block: decouple REQ_OP_SECURE_ERASE from REQ_OP_DISCARD
[linux-2.6-microblaze.git] / tools / perf / util / parse-events.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/hw_breakpoint.h>
3 #include <linux/err.h>
4 #include <linux/zalloc.h>
5 #include <dirent.h>
6 #include <errno.h>
7 #include <sys/ioctl.h>
8 #include <sys/types.h>
9 #include <sys/stat.h>
10 #include <fcntl.h>
11 #include <sys/param.h>
12 #include "term.h"
13 #include "build-id.h"
14 #include "evlist.h"
15 #include "evsel.h"
16 #include <subcmd/pager.h>
17 #include <subcmd/parse-options.h>
18 #include "parse-events.h"
19 #include <subcmd/exec-cmd.h>
20 #include "string2.h"
21 #include "strlist.h"
22 #include "bpf-loader.h"
23 #include "debug.h"
24 #include <api/fs/tracing_path.h>
25 #include <perf/cpumap.h>
26 #include "parse-events-bison.h"
27 #define YY_EXTRA_TYPE void*
28 #include "parse-events-flex.h"
29 #include "pmu.h"
30 #include "thread_map.h"
31 #include "probe-file.h"
32 #include "asm/bug.h"
33 #include "util/parse-branch-options.h"
34 #include "metricgroup.h"
35 #include "util/evsel_config.h"
36 #include "util/event.h"
37 #include "util/pfm.h"
38 #include "util/parse-events-hybrid.h"
39 #include "util/pmu-hybrid.h"
40 #include "perf.h"
41
42 #define MAX_NAME_LEN 100
43
44 #ifdef PARSER_DEBUG
45 extern int parse_events_debug;
46 #endif
47 int parse_events_parse(void *parse_state, void *scanner);
48 static int get_config_terms(struct list_head *head_config,
49                             struct list_head *head_terms __maybe_unused);
50 static int parse_events__with_hybrid_pmu(struct parse_events_state *parse_state,
51                                          const char *str, char *pmu_name,
52                                          struct list_head *list);
53
54 static struct perf_pmu_event_symbol *perf_pmu_events_list;
55 /*
56  * The variable indicates the number of supported pmu event symbols.
57  * 0 means not initialized and ready to init
58  * -1 means failed to init, don't try anymore
59  * >0 is the number of supported pmu event symbols
60  */
61 static int perf_pmu_events_list_num;
62
63 struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = {
64         [PERF_COUNT_HW_CPU_CYCLES] = {
65                 .symbol = "cpu-cycles",
66                 .alias  = "cycles",
67         },
68         [PERF_COUNT_HW_INSTRUCTIONS] = {
69                 .symbol = "instructions",
70                 .alias  = "",
71         },
72         [PERF_COUNT_HW_CACHE_REFERENCES] = {
73                 .symbol = "cache-references",
74                 .alias  = "",
75         },
76         [PERF_COUNT_HW_CACHE_MISSES] = {
77                 .symbol = "cache-misses",
78                 .alias  = "",
79         },
80         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {
81                 .symbol = "branch-instructions",
82                 .alias  = "branches",
83         },
84         [PERF_COUNT_HW_BRANCH_MISSES] = {
85                 .symbol = "branch-misses",
86                 .alias  = "",
87         },
88         [PERF_COUNT_HW_BUS_CYCLES] = {
89                 .symbol = "bus-cycles",
90                 .alias  = "",
91         },
92         [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = {
93                 .symbol = "stalled-cycles-frontend",
94                 .alias  = "idle-cycles-frontend",
95         },
96         [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = {
97                 .symbol = "stalled-cycles-backend",
98                 .alias  = "idle-cycles-backend",
99         },
100         [PERF_COUNT_HW_REF_CPU_CYCLES] = {
101                 .symbol = "ref-cycles",
102                 .alias  = "",
103         },
104 };
105
106 struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
107         [PERF_COUNT_SW_CPU_CLOCK] = {
108                 .symbol = "cpu-clock",
109                 .alias  = "",
110         },
111         [PERF_COUNT_SW_TASK_CLOCK] = {
112                 .symbol = "task-clock",
113                 .alias  = "",
114         },
115         [PERF_COUNT_SW_PAGE_FAULTS] = {
116                 .symbol = "page-faults",
117                 .alias  = "faults",
118         },
119         [PERF_COUNT_SW_CONTEXT_SWITCHES] = {
120                 .symbol = "context-switches",
121                 .alias  = "cs",
122         },
123         [PERF_COUNT_SW_CPU_MIGRATIONS] = {
124                 .symbol = "cpu-migrations",
125                 .alias  = "migrations",
126         },
127         [PERF_COUNT_SW_PAGE_FAULTS_MIN] = {
128                 .symbol = "minor-faults",
129                 .alias  = "",
130         },
131         [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = {
132                 .symbol = "major-faults",
133                 .alias  = "",
134         },
135         [PERF_COUNT_SW_ALIGNMENT_FAULTS] = {
136                 .symbol = "alignment-faults",
137                 .alias  = "",
138         },
139         [PERF_COUNT_SW_EMULATION_FAULTS] = {
140                 .symbol = "emulation-faults",
141                 .alias  = "",
142         },
143         [PERF_COUNT_SW_DUMMY] = {
144                 .symbol = "dummy",
145                 .alias  = "",
146         },
147         [PERF_COUNT_SW_BPF_OUTPUT] = {
148                 .symbol = "bpf-output",
149                 .alias  = "",
150         },
151         [PERF_COUNT_SW_CGROUP_SWITCHES] = {
152                 .symbol = "cgroup-switches",
153                 .alias  = "",
154         },
155 };
156
157 #define __PERF_EVENT_FIELD(config, name) \
158         ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT)
159
160 #define PERF_EVENT_RAW(config)          __PERF_EVENT_FIELD(config, RAW)
161 #define PERF_EVENT_CONFIG(config)       __PERF_EVENT_FIELD(config, CONFIG)
162 #define PERF_EVENT_TYPE(config)         __PERF_EVENT_FIELD(config, TYPE)
163 #define PERF_EVENT_ID(config)           __PERF_EVENT_FIELD(config, EVENT)
164
165 #define for_each_subsystem(sys_dir, sys_dirent)                 \
166         while ((sys_dirent = readdir(sys_dir)) != NULL)         \
167                 if (sys_dirent->d_type == DT_DIR &&             \
168                     (strcmp(sys_dirent->d_name, ".")) &&        \
169                     (strcmp(sys_dirent->d_name, "..")))
170
171 static int tp_event_has_id(const char *dir_path, struct dirent *evt_dir)
172 {
173         char evt_path[MAXPATHLEN];
174         int fd;
175
176         snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path, evt_dir->d_name);
177         fd = open(evt_path, O_RDONLY);
178         if (fd < 0)
179                 return -EINVAL;
180         close(fd);
181
182         return 0;
183 }
184
185 #define for_each_event(dir_path, evt_dir, evt_dirent)           \
186         while ((evt_dirent = readdir(evt_dir)) != NULL)         \
187                 if (evt_dirent->d_type == DT_DIR &&             \
188                     (strcmp(evt_dirent->d_name, ".")) &&        \
189                     (strcmp(evt_dirent->d_name, "..")) &&       \
190                     (!tp_event_has_id(dir_path, evt_dirent)))
191
192 #define MAX_EVENT_LENGTH 512
193
194 struct tracepoint_path *tracepoint_id_to_path(u64 config)
195 {
196         struct tracepoint_path *path = NULL;
197         DIR *sys_dir, *evt_dir;
198         struct dirent *sys_dirent, *evt_dirent;
199         char id_buf[24];
200         int fd;
201         u64 id;
202         char evt_path[MAXPATHLEN];
203         char *dir_path;
204
205         sys_dir = tracing_events__opendir();
206         if (!sys_dir)
207                 return NULL;
208
209         for_each_subsystem(sys_dir, sys_dirent) {
210                 dir_path = get_events_file(sys_dirent->d_name);
211                 if (!dir_path)
212                         continue;
213                 evt_dir = opendir(dir_path);
214                 if (!evt_dir)
215                         goto next;
216
217                 for_each_event(dir_path, evt_dir, evt_dirent) {
218
219                         scnprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path,
220                                   evt_dirent->d_name);
221                         fd = open(evt_path, O_RDONLY);
222                         if (fd < 0)
223                                 continue;
224                         if (read(fd, id_buf, sizeof(id_buf)) < 0) {
225                                 close(fd);
226                                 continue;
227                         }
228                         close(fd);
229                         id = atoll(id_buf);
230                         if (id == config) {
231                                 put_events_file(dir_path);
232                                 closedir(evt_dir);
233                                 closedir(sys_dir);
234                                 path = zalloc(sizeof(*path));
235                                 if (!path)
236                                         return NULL;
237                                 if (asprintf(&path->system, "%.*s", MAX_EVENT_LENGTH, sys_dirent->d_name) < 0) {
238                                         free(path);
239                                         return NULL;
240                                 }
241                                 if (asprintf(&path->name, "%.*s", MAX_EVENT_LENGTH, evt_dirent->d_name) < 0) {
242                                         zfree(&path->system);
243                                         free(path);
244                                         return NULL;
245                                 }
246                                 return path;
247                         }
248                 }
249                 closedir(evt_dir);
250 next:
251                 put_events_file(dir_path);
252         }
253
254         closedir(sys_dir);
255         return NULL;
256 }
257
258 struct tracepoint_path *tracepoint_name_to_path(const char *name)
259 {
260         struct tracepoint_path *path = zalloc(sizeof(*path));
261         char *str = strchr(name, ':');
262
263         if (path == NULL || str == NULL) {
264                 free(path);
265                 return NULL;
266         }
267
268         path->system = strndup(name, str - name);
269         path->name = strdup(str+1);
270
271         if (path->system == NULL || path->name == NULL) {
272                 zfree(&path->system);
273                 zfree(&path->name);
274                 zfree(&path);
275         }
276
277         return path;
278 }
279
280 const char *event_type(int type)
281 {
282         switch (type) {
283         case PERF_TYPE_HARDWARE:
284                 return "hardware";
285
286         case PERF_TYPE_SOFTWARE:
287                 return "software";
288
289         case PERF_TYPE_TRACEPOINT:
290                 return "tracepoint";
291
292         case PERF_TYPE_HW_CACHE:
293                 return "hardware-cache";
294
295         default:
296                 break;
297         }
298
299         return "unknown";
300 }
301
302 static char *get_config_str(struct list_head *head_terms, int type_term)
303 {
304         struct parse_events_term *term;
305
306         if (!head_terms)
307                 return NULL;
308
309         list_for_each_entry(term, head_terms, list)
310                 if (term->type_term == type_term)
311                         return term->val.str;
312
313         return NULL;
314 }
315
316 static char *get_config_metric_id(struct list_head *head_terms)
317 {
318         return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID);
319 }
320
321 static char *get_config_name(struct list_head *head_terms)
322 {
323         return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME);
324 }
325
326 static struct evsel *
327 __add_event(struct list_head *list, int *idx,
328             struct perf_event_attr *attr,
329             bool init_attr,
330             const char *name, const char *metric_id, struct perf_pmu *pmu,
331             struct list_head *config_terms, bool auto_merge_stats,
332             const char *cpu_list)
333 {
334         struct evsel *evsel;
335         struct perf_cpu_map *cpus = pmu ? perf_cpu_map__get(pmu->cpus) :
336                                cpu_list ? perf_cpu_map__new(cpu_list) : NULL;
337
338         if (pmu && attr->type == PERF_TYPE_RAW)
339                 perf_pmu__warn_invalid_config(pmu, attr->config, name);
340
341         if (init_attr)
342                 event_attr_init(attr);
343
344         evsel = evsel__new_idx(attr, *idx);
345         if (!evsel) {
346                 perf_cpu_map__put(cpus);
347                 return NULL;
348         }
349
350         (*idx)++;
351         evsel->core.cpus = cpus;
352         evsel->core.own_cpus = perf_cpu_map__get(cpus);
353         evsel->core.system_wide = pmu ? pmu->is_uncore : false;
354         evsel->auto_merge_stats = auto_merge_stats;
355
356         if (name)
357                 evsel->name = strdup(name);
358
359         if (metric_id)
360                 evsel->metric_id = strdup(metric_id);
361
362         if (config_terms)
363                 list_splice_init(config_terms, &evsel->config_terms);
364
365         if (list)
366                 list_add_tail(&evsel->core.node, list);
367
368         return evsel;
369 }
370
371 struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
372                                       const char *name, const char *metric_id,
373                                       struct perf_pmu *pmu)
374 {
375         return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name,
376                            metric_id, pmu, /*config_terms=*/NULL,
377                            /*auto_merge_stats=*/false, /*cpu_list=*/NULL);
378 }
379
380 static int add_event(struct list_head *list, int *idx,
381                      struct perf_event_attr *attr, const char *name,
382                      const char *metric_id, struct list_head *config_terms)
383 {
384         return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id,
385                            /*pmu=*/NULL, config_terms,
386                            /*auto_merge_stats=*/false, /*cpu_list=*/NULL) ? 0 : -ENOMEM;
387 }
388
389 static int add_event_tool(struct list_head *list, int *idx,
390                           enum perf_tool_event tool_event)
391 {
392         struct evsel *evsel;
393         struct perf_event_attr attr = {
394                 .type = PERF_TYPE_SOFTWARE,
395                 .config = PERF_COUNT_SW_DUMMY,
396         };
397
398         evsel = __add_event(list, idx, &attr, /*init_attr=*/true, /*name=*/NULL,
399                             /*metric_id=*/NULL, /*pmu=*/NULL,
400                             /*config_terms=*/NULL, /*auto_merge_stats=*/false,
401                             /*cpu_list=*/"0");
402         if (!evsel)
403                 return -ENOMEM;
404         evsel->tool_event = tool_event;
405         if (tool_event == PERF_TOOL_DURATION_TIME) {
406                 free((char *)evsel->unit);
407                 evsel->unit = strdup("ns");
408         }
409         return 0;
410 }
411
412 static int parse_aliases(char *str, const char *names[][EVSEL__MAX_ALIASES], int size)
413 {
414         int i, j;
415         int n, longest = -1;
416
417         for (i = 0; i < size; i++) {
418                 for (j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) {
419                         n = strlen(names[i][j]);
420                         if (n > longest && !strncasecmp(str, names[i][j], n))
421                                 longest = n;
422                 }
423                 if (longest > 0)
424                         return i;
425         }
426
427         return -1;
428 }
429
430 typedef int config_term_func_t(struct perf_event_attr *attr,
431                                struct parse_events_term *term,
432                                struct parse_events_error *err);
433 static int config_term_common(struct perf_event_attr *attr,
434                               struct parse_events_term *term,
435                               struct parse_events_error *err);
436 static int config_attr(struct perf_event_attr *attr,
437                        struct list_head *head,
438                        struct parse_events_error *err,
439                        config_term_func_t config_term);
440
441 int parse_events_add_cache(struct list_head *list, int *idx,
442                            char *type, char *op_result1, char *op_result2,
443                            struct parse_events_error *err,
444                            struct list_head *head_config,
445                            struct parse_events_state *parse_state)
446 {
447         struct perf_event_attr attr;
448         LIST_HEAD(config_terms);
449         char name[MAX_NAME_LEN];
450         const char *config_name, *metric_id;
451         int cache_type = -1, cache_op = -1, cache_result = -1;
452         char *op_result[2] = { op_result1, op_result2 };
453         int i, n, ret;
454         bool hybrid;
455
456         /*
457          * No fallback - if we cannot get a clear cache type
458          * then bail out:
459          */
460         cache_type = parse_aliases(type, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX);
461         if (cache_type == -1)
462                 return -EINVAL;
463
464         config_name = get_config_name(head_config);
465         n = snprintf(name, MAX_NAME_LEN, "%s", type);
466
467         for (i = 0; (i < 2) && (op_result[i]); i++) {
468                 char *str = op_result[i];
469
470                 n += snprintf(name + n, MAX_NAME_LEN - n, "-%s", str);
471
472                 if (cache_op == -1) {
473                         cache_op = parse_aliases(str, evsel__hw_cache_op,
474                                                  PERF_COUNT_HW_CACHE_OP_MAX);
475                         if (cache_op >= 0) {
476                                 if (!evsel__is_cache_op_valid(cache_type, cache_op))
477                                         return -EINVAL;
478                                 continue;
479                         }
480                 }
481
482                 if (cache_result == -1) {
483                         cache_result = parse_aliases(str, evsel__hw_cache_result,
484                                                      PERF_COUNT_HW_CACHE_RESULT_MAX);
485                         if (cache_result >= 0)
486                                 continue;
487                 }
488         }
489
490         /*
491          * Fall back to reads:
492          */
493         if (cache_op == -1)
494                 cache_op = PERF_COUNT_HW_CACHE_OP_READ;
495
496         /*
497          * Fall back to accesses:
498          */
499         if (cache_result == -1)
500                 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
501
502         memset(&attr, 0, sizeof(attr));
503         attr.config = cache_type | (cache_op << 8) | (cache_result << 16);
504         attr.type = PERF_TYPE_HW_CACHE;
505
506         if (head_config) {
507                 if (config_attr(&attr, head_config, err,
508                                 config_term_common))
509                         return -EINVAL;
510
511                 if (get_config_terms(head_config, &config_terms))
512                         return -ENOMEM;
513         }
514
515         metric_id = get_config_metric_id(head_config);
516         ret = parse_events__add_cache_hybrid(list, idx, &attr,
517                                              config_name ? : name,
518                                              metric_id,
519                                              &config_terms,
520                                              &hybrid, parse_state);
521         if (hybrid)
522                 goto out_free_terms;
523
524         ret = add_event(list, idx, &attr, config_name ? : name, metric_id,
525                         &config_terms);
526 out_free_terms:
527         free_config_terms(&config_terms);
528         return ret;
529 }
530
531 static void tracepoint_error(struct parse_events_error *e, int err,
532                              const char *sys, const char *name)
533 {
534         const char *str;
535         char help[BUFSIZ];
536
537         if (!e)
538                 return;
539
540         /*
541          * We get error directly from syscall errno ( > 0),
542          * or from encoded pointer's error ( < 0).
543          */
544         err = abs(err);
545
546         switch (err) {
547         case EACCES:
548                 str = "can't access trace events";
549                 break;
550         case ENOENT:
551                 str = "unknown tracepoint";
552                 break;
553         default:
554                 str = "failed to add tracepoint";
555                 break;
556         }
557
558         tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name);
559         parse_events_error__handle(e, 0, strdup(str), strdup(help));
560 }
561
562 static int add_tracepoint(struct list_head *list, int *idx,
563                           const char *sys_name, const char *evt_name,
564                           struct parse_events_error *err,
565                           struct list_head *head_config)
566 {
567         struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, (*idx)++);
568
569         if (IS_ERR(evsel)) {
570                 tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name);
571                 return PTR_ERR(evsel);
572         }
573
574         if (head_config) {
575                 LIST_HEAD(config_terms);
576
577                 if (get_config_terms(head_config, &config_terms))
578                         return -ENOMEM;
579                 list_splice(&config_terms, &evsel->config_terms);
580         }
581
582         list_add_tail(&evsel->core.node, list);
583         return 0;
584 }
585
586 static int add_tracepoint_multi_event(struct list_head *list, int *idx,
587                                       const char *sys_name, const char *evt_name,
588                                       struct parse_events_error *err,
589                                       struct list_head *head_config)
590 {
591         char *evt_path;
592         struct dirent *evt_ent;
593         DIR *evt_dir;
594         int ret = 0, found = 0;
595
596         evt_path = get_events_file(sys_name);
597         if (!evt_path) {
598                 tracepoint_error(err, errno, sys_name, evt_name);
599                 return -1;
600         }
601         evt_dir = opendir(evt_path);
602         if (!evt_dir) {
603                 put_events_file(evt_path);
604                 tracepoint_error(err, errno, sys_name, evt_name);
605                 return -1;
606         }
607
608         while (!ret && (evt_ent = readdir(evt_dir))) {
609                 if (!strcmp(evt_ent->d_name, ".")
610                     || !strcmp(evt_ent->d_name, "..")
611                     || !strcmp(evt_ent->d_name, "enable")
612                     || !strcmp(evt_ent->d_name, "filter"))
613                         continue;
614
615                 if (!strglobmatch(evt_ent->d_name, evt_name))
616                         continue;
617
618                 found++;
619
620                 ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name,
621                                      err, head_config);
622         }
623
624         if (!found) {
625                 tracepoint_error(err, ENOENT, sys_name, evt_name);
626                 ret = -1;
627         }
628
629         put_events_file(evt_path);
630         closedir(evt_dir);
631         return ret;
632 }
633
634 static int add_tracepoint_event(struct list_head *list, int *idx,
635                                 const char *sys_name, const char *evt_name,
636                                 struct parse_events_error *err,
637                                 struct list_head *head_config)
638 {
639         return strpbrk(evt_name, "*?") ?
640                add_tracepoint_multi_event(list, idx, sys_name, evt_name,
641                                           err, head_config) :
642                add_tracepoint(list, idx, sys_name, evt_name,
643                               err, head_config);
644 }
645
646 static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
647                                     const char *sys_name, const char *evt_name,
648                                     struct parse_events_error *err,
649                                     struct list_head *head_config)
650 {
651         struct dirent *events_ent;
652         DIR *events_dir;
653         int ret = 0;
654
655         events_dir = tracing_events__opendir();
656         if (!events_dir) {
657                 tracepoint_error(err, errno, sys_name, evt_name);
658                 return -1;
659         }
660
661         while (!ret && (events_ent = readdir(events_dir))) {
662                 if (!strcmp(events_ent->d_name, ".")
663                     || !strcmp(events_ent->d_name, "..")
664                     || !strcmp(events_ent->d_name, "enable")
665                     || !strcmp(events_ent->d_name, "header_event")
666                     || !strcmp(events_ent->d_name, "header_page"))
667                         continue;
668
669                 if (!strglobmatch(events_ent->d_name, sys_name))
670                         continue;
671
672                 ret = add_tracepoint_event(list, idx, events_ent->d_name,
673                                            evt_name, err, head_config);
674         }
675
676         closedir(events_dir);
677         return ret;
678 }
679
680 #ifdef HAVE_LIBBPF_SUPPORT
681 struct __add_bpf_event_param {
682         struct parse_events_state *parse_state;
683         struct list_head *list;
684         struct list_head *head_config;
685 };
686
687 static int add_bpf_event(const char *group, const char *event, int fd, struct bpf_object *obj,
688                          void *_param)
689 {
690         LIST_HEAD(new_evsels);
691         struct __add_bpf_event_param *param = _param;
692         struct parse_events_state *parse_state = param->parse_state;
693         struct list_head *list = param->list;
694         struct evsel *pos;
695         int err;
696         /*
697          * Check if we should add the event, i.e. if it is a TP but starts with a '!',
698          * then don't add the tracepoint, this will be used for something else, like
699          * adding to a BPF_MAP_TYPE_PROG_ARRAY.
700          *
701          * See tools/perf/examples/bpf/augmented_raw_syscalls.c
702          */
703         if (group[0] == '!')
704                 return 0;
705
706         pr_debug("add bpf event %s:%s and attach bpf program %d\n",
707                  group, event, fd);
708
709         err = parse_events_add_tracepoint(&new_evsels, &parse_state->idx, group,
710                                           event, parse_state->error,
711                                           param->head_config);
712         if (err) {
713                 struct evsel *evsel, *tmp;
714
715                 pr_debug("Failed to add BPF event %s:%s\n",
716                          group, event);
717                 list_for_each_entry_safe(evsel, tmp, &new_evsels, core.node) {
718                         list_del_init(&evsel->core.node);
719                         evsel__delete(evsel);
720                 }
721                 return err;
722         }
723         pr_debug("adding %s:%s\n", group, event);
724
725         list_for_each_entry(pos, &new_evsels, core.node) {
726                 pr_debug("adding %s:%s to %p\n",
727                          group, event, pos);
728                 pos->bpf_fd = fd;
729                 pos->bpf_obj = obj;
730         }
731         list_splice(&new_evsels, list);
732         return 0;
733 }
734
735 int parse_events_load_bpf_obj(struct parse_events_state *parse_state,
736                               struct list_head *list,
737                               struct bpf_object *obj,
738                               struct list_head *head_config)
739 {
740         int err;
741         char errbuf[BUFSIZ];
742         struct __add_bpf_event_param param = {parse_state, list, head_config};
743         static bool registered_unprobe_atexit = false;
744
745         if (IS_ERR(obj) || !obj) {
746                 snprintf(errbuf, sizeof(errbuf),
747                          "Internal error: load bpf obj with NULL");
748                 err = -EINVAL;
749                 goto errout;
750         }
751
752         /*
753          * Register atexit handler before calling bpf__probe() so
754          * bpf__probe() don't need to unprobe probe points its already
755          * created when failure.
756          */
757         if (!registered_unprobe_atexit) {
758                 atexit(bpf__clear);
759                 registered_unprobe_atexit = true;
760         }
761
762         err = bpf__probe(obj);
763         if (err) {
764                 bpf__strerror_probe(obj, err, errbuf, sizeof(errbuf));
765                 goto errout;
766         }
767
768         err = bpf__load(obj);
769         if (err) {
770                 bpf__strerror_load(obj, err, errbuf, sizeof(errbuf));
771                 goto errout;
772         }
773
774         err = bpf__foreach_event(obj, add_bpf_event, &param);
775         if (err) {
776                 snprintf(errbuf, sizeof(errbuf),
777                          "Attach events in BPF object failed");
778                 goto errout;
779         }
780
781         return 0;
782 errout:
783         parse_events_error__handle(parse_state->error, 0,
784                                 strdup(errbuf), strdup("(add -v to see detail)"));
785         return err;
786 }
787
788 static int
789 parse_events_config_bpf(struct parse_events_state *parse_state,
790                         struct bpf_object *obj,
791                         struct list_head *head_config)
792 {
793         struct parse_events_term *term;
794         int error_pos;
795
796         if (!head_config || list_empty(head_config))
797                 return 0;
798
799         list_for_each_entry(term, head_config, list) {
800                 int err;
801
802                 if (term->type_term != PARSE_EVENTS__TERM_TYPE_USER) {
803                         parse_events_error__handle(parse_state->error, term->err_term,
804                                                 strdup("Invalid config term for BPF object"),
805                                                 NULL);
806                         return -EINVAL;
807                 }
808
809                 err = bpf__config_obj(obj, term, parse_state->evlist, &error_pos);
810                 if (err) {
811                         char errbuf[BUFSIZ];
812                         int idx;
813
814                         bpf__strerror_config_obj(obj, term, parse_state->evlist,
815                                                  &error_pos, err, errbuf,
816                                                  sizeof(errbuf));
817
818                         if (err == -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE)
819                                 idx = term->err_val;
820                         else
821                                 idx = term->err_term + error_pos;
822
823                         parse_events_error__handle(parse_state->error, idx,
824                                                 strdup(errbuf),
825                                                 strdup(
826 "Hint:\tValid config terms:\n"
827 "     \tmap:[<arraymap>].value<indices>=[value]\n"
828 "     \tmap:[<eventmap>].event<indices>=[event]\n"
829 "\n"
830 "     \twhere <indices> is something like [0,3...5] or [all]\n"
831 "     \t(add -v to see detail)"));
832                         return err;
833                 }
834         }
835         return 0;
836 }
837
838 /*
839  * Split config terms:
840  * perf record -e bpf.c/call-graph=fp,map:array.value[0]=1/ ...
841  *  'call-graph=fp' is 'evt config', should be applied to each
842  *  events in bpf.c.
843  * 'map:array.value[0]=1' is 'obj config', should be processed
844  * with parse_events_config_bpf.
845  *
846  * Move object config terms from the first list to obj_head_config.
847  */
848 static void
849 split_bpf_config_terms(struct list_head *evt_head_config,
850                        struct list_head *obj_head_config)
851 {
852         struct parse_events_term *term, *temp;
853
854         /*
855          * Currently, all possible user config term
856          * belong to bpf object. parse_events__is_hardcoded_term()
857          * happens to be a good flag.
858          *
859          * See parse_events_config_bpf() and
860          * config_term_tracepoint().
861          */
862         list_for_each_entry_safe(term, temp, evt_head_config, list)
863                 if (!parse_events__is_hardcoded_term(term))
864                         list_move_tail(&term->list, obj_head_config);
865 }
866
867 int parse_events_load_bpf(struct parse_events_state *parse_state,
868                           struct list_head *list,
869                           char *bpf_file_name,
870                           bool source,
871                           struct list_head *head_config)
872 {
873         int err;
874         struct bpf_object *obj;
875         LIST_HEAD(obj_head_config);
876
877         if (head_config)
878                 split_bpf_config_terms(head_config, &obj_head_config);
879
880         obj = bpf__prepare_load(bpf_file_name, source);
881         if (IS_ERR(obj)) {
882                 char errbuf[BUFSIZ];
883
884                 err = PTR_ERR(obj);
885
886                 if (err == -ENOTSUP)
887                         snprintf(errbuf, sizeof(errbuf),
888                                  "BPF support is not compiled");
889                 else
890                         bpf__strerror_prepare_load(bpf_file_name,
891                                                    source,
892                                                    -err, errbuf,
893                                                    sizeof(errbuf));
894
895                 parse_events_error__handle(parse_state->error, 0,
896                                         strdup(errbuf), strdup("(add -v to see detail)"));
897                 return err;
898         }
899
900         err = parse_events_load_bpf_obj(parse_state, list, obj, head_config);
901         if (err)
902                 return err;
903         err = parse_events_config_bpf(parse_state, obj, &obj_head_config);
904
905         /*
906          * Caller doesn't know anything about obj_head_config,
907          * so combine them together again before returning.
908          */
909         if (head_config)
910                 list_splice_tail(&obj_head_config, head_config);
911         return err;
912 }
913 #else // HAVE_LIBBPF_SUPPORT
914 int parse_events_load_bpf_obj(struct parse_events_state *parse_state,
915                               struct list_head *list __maybe_unused,
916                               struct bpf_object *obj __maybe_unused,
917                               struct list_head *head_config __maybe_unused)
918 {
919         parse_events_error__handle(parse_state->error, 0,
920                                    strdup("BPF support is not compiled"),
921                                    strdup("Make sure libbpf-devel is available at build time."));
922         return -ENOTSUP;
923 }
924
925 int parse_events_load_bpf(struct parse_events_state *parse_state,
926                           struct list_head *list __maybe_unused,
927                           char *bpf_file_name __maybe_unused,
928                           bool source __maybe_unused,
929                           struct list_head *head_config __maybe_unused)
930 {
931         parse_events_error__handle(parse_state->error, 0,
932                                    strdup("BPF support is not compiled"),
933                                    strdup("Make sure libbpf-devel is available at build time."));
934         return -ENOTSUP;
935 }
936 #endif // HAVE_LIBBPF_SUPPORT
937
938 static int
939 parse_breakpoint_type(const char *type, struct perf_event_attr *attr)
940 {
941         int i;
942
943         for (i = 0; i < 3; i++) {
944                 if (!type || !type[i])
945                         break;
946
947 #define CHECK_SET_TYPE(bit)             \
948 do {                                    \
949         if (attr->bp_type & bit)        \
950                 return -EINVAL;         \
951         else                            \
952                 attr->bp_type |= bit;   \
953 } while (0)
954
955                 switch (type[i]) {
956                 case 'r':
957                         CHECK_SET_TYPE(HW_BREAKPOINT_R);
958                         break;
959                 case 'w':
960                         CHECK_SET_TYPE(HW_BREAKPOINT_W);
961                         break;
962                 case 'x':
963                         CHECK_SET_TYPE(HW_BREAKPOINT_X);
964                         break;
965                 default:
966                         return -EINVAL;
967                 }
968         }
969
970 #undef CHECK_SET_TYPE
971
972         if (!attr->bp_type) /* Default */
973                 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
974
975         return 0;
976 }
977
978 int parse_events_add_breakpoint(struct list_head *list, int *idx,
979                                 u64 addr, char *type, u64 len)
980 {
981         struct perf_event_attr attr;
982
983         memset(&attr, 0, sizeof(attr));
984         attr.bp_addr = addr;
985
986         if (parse_breakpoint_type(type, &attr))
987                 return -EINVAL;
988
989         /* Provide some defaults if len is not specified */
990         if (!len) {
991                 if (attr.bp_type == HW_BREAKPOINT_X)
992                         len = sizeof(long);
993                 else
994                         len = HW_BREAKPOINT_LEN_4;
995         }
996
997         attr.bp_len = len;
998
999         attr.type = PERF_TYPE_BREAKPOINT;
1000         attr.sample_period = 1;
1001
1002         return add_event(list, idx, &attr, /*name=*/NULL, /*mertic_id=*/NULL,
1003                          /*config_terms=*/NULL);
1004 }
1005
1006 static int check_type_val(struct parse_events_term *term,
1007                           struct parse_events_error *err,
1008                           int type)
1009 {
1010         if (type == term->type_val)
1011                 return 0;
1012
1013         if (err) {
1014                 parse_events_error__handle(err, term->err_val,
1015                                         type == PARSE_EVENTS__TERM_TYPE_NUM
1016                                         ? strdup("expected numeric value")
1017                                         : strdup("expected string value"),
1018                                         NULL);
1019         }
1020         return -EINVAL;
1021 }
1022
1023 /*
1024  * Update according to parse-events.l
1025  */
1026 static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = {
1027         [PARSE_EVENTS__TERM_TYPE_USER]                  = "<sysfs term>",
1028         [PARSE_EVENTS__TERM_TYPE_CONFIG]                = "config",
1029         [PARSE_EVENTS__TERM_TYPE_CONFIG1]               = "config1",
1030         [PARSE_EVENTS__TERM_TYPE_CONFIG2]               = "config2",
1031         [PARSE_EVENTS__TERM_TYPE_NAME]                  = "name",
1032         [PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD]         = "period",
1033         [PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ]           = "freq",
1034         [PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE]    = "branch_type",
1035         [PARSE_EVENTS__TERM_TYPE_TIME]                  = "time",
1036         [PARSE_EVENTS__TERM_TYPE_CALLGRAPH]             = "call-graph",
1037         [PARSE_EVENTS__TERM_TYPE_STACKSIZE]             = "stack-size",
1038         [PARSE_EVENTS__TERM_TYPE_NOINHERIT]             = "no-inherit",
1039         [PARSE_EVENTS__TERM_TYPE_INHERIT]               = "inherit",
1040         [PARSE_EVENTS__TERM_TYPE_MAX_STACK]             = "max-stack",
1041         [PARSE_EVENTS__TERM_TYPE_MAX_EVENTS]            = "nr",
1042         [PARSE_EVENTS__TERM_TYPE_OVERWRITE]             = "overwrite",
1043         [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE]           = "no-overwrite",
1044         [PARSE_EVENTS__TERM_TYPE_DRV_CFG]               = "driver-config",
1045         [PARSE_EVENTS__TERM_TYPE_PERCORE]               = "percore",
1046         [PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT]            = "aux-output",
1047         [PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE]       = "aux-sample-size",
1048         [PARSE_EVENTS__TERM_TYPE_METRIC_ID]             = "metric-id",
1049 };
1050
1051 static bool config_term_shrinked;
1052
1053 static bool
1054 config_term_avail(int term_type, struct parse_events_error *err)
1055 {
1056         char *err_str;
1057
1058         if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) {
1059                 parse_events_error__handle(err, -1,
1060                                         strdup("Invalid term_type"), NULL);
1061                 return false;
1062         }
1063         if (!config_term_shrinked)
1064                 return true;
1065
1066         switch (term_type) {
1067         case PARSE_EVENTS__TERM_TYPE_CONFIG:
1068         case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1069         case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1070         case PARSE_EVENTS__TERM_TYPE_NAME:
1071         case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1072         case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1073         case PARSE_EVENTS__TERM_TYPE_PERCORE:
1074                 return true;
1075         default:
1076                 if (!err)
1077                         return false;
1078
1079                 /* term_type is validated so indexing is safe */
1080                 if (asprintf(&err_str, "'%s' is not usable in 'perf stat'",
1081                                 config_term_names[term_type]) >= 0)
1082                         parse_events_error__handle(err, -1, err_str, NULL);
1083                 return false;
1084         }
1085 }
1086
1087 void parse_events__shrink_config_terms(void)
1088 {
1089         config_term_shrinked = true;
1090 }
1091
1092 static int config_term_common(struct perf_event_attr *attr,
1093                               struct parse_events_term *term,
1094                               struct parse_events_error *err)
1095 {
1096 #define CHECK_TYPE_VAL(type)                                               \
1097 do {                                                                       \
1098         if (check_type_val(term, err, PARSE_EVENTS__TERM_TYPE_ ## type)) \
1099                 return -EINVAL;                                            \
1100 } while (0)
1101
1102         switch (term->type_term) {
1103         case PARSE_EVENTS__TERM_TYPE_CONFIG:
1104                 CHECK_TYPE_VAL(NUM);
1105                 attr->config = term->val.num;
1106                 break;
1107         case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1108                 CHECK_TYPE_VAL(NUM);
1109                 attr->config1 = term->val.num;
1110                 break;
1111         case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1112                 CHECK_TYPE_VAL(NUM);
1113                 attr->config2 = term->val.num;
1114                 break;
1115         case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1116                 CHECK_TYPE_VAL(NUM);
1117                 break;
1118         case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1119                 CHECK_TYPE_VAL(NUM);
1120                 break;
1121         case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1122                 CHECK_TYPE_VAL(STR);
1123                 if (strcmp(term->val.str, "no") &&
1124                     parse_branch_str(term->val.str,
1125                                     &attr->branch_sample_type)) {
1126                         parse_events_error__handle(err, term->err_val,
1127                                         strdup("invalid branch sample type"),
1128                                         NULL);
1129                         return -EINVAL;
1130                 }
1131                 break;
1132         case PARSE_EVENTS__TERM_TYPE_TIME:
1133                 CHECK_TYPE_VAL(NUM);
1134                 if (term->val.num > 1) {
1135                         parse_events_error__handle(err, term->err_val,
1136                                                 strdup("expected 0 or 1"),
1137                                                 NULL);
1138                         return -EINVAL;
1139                 }
1140                 break;
1141         case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1142                 CHECK_TYPE_VAL(STR);
1143                 break;
1144         case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1145                 CHECK_TYPE_VAL(NUM);
1146                 break;
1147         case PARSE_EVENTS__TERM_TYPE_INHERIT:
1148                 CHECK_TYPE_VAL(NUM);
1149                 break;
1150         case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1151                 CHECK_TYPE_VAL(NUM);
1152                 break;
1153         case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1154                 CHECK_TYPE_VAL(NUM);
1155                 break;
1156         case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1157                 CHECK_TYPE_VAL(NUM);
1158                 break;
1159         case PARSE_EVENTS__TERM_TYPE_NAME:
1160                 CHECK_TYPE_VAL(STR);
1161                 break;
1162         case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1163                 CHECK_TYPE_VAL(STR);
1164                 break;
1165         case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1166                 CHECK_TYPE_VAL(NUM);
1167                 break;
1168         case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1169                 CHECK_TYPE_VAL(NUM);
1170                 break;
1171         case PARSE_EVENTS__TERM_TYPE_PERCORE:
1172                 CHECK_TYPE_VAL(NUM);
1173                 if ((unsigned int)term->val.num > 1) {
1174                         parse_events_error__handle(err, term->err_val,
1175                                                 strdup("expected 0 or 1"),
1176                                                 NULL);
1177                         return -EINVAL;
1178                 }
1179                 break;
1180         case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1181                 CHECK_TYPE_VAL(NUM);
1182                 break;
1183         case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1184                 CHECK_TYPE_VAL(NUM);
1185                 if (term->val.num > UINT_MAX) {
1186                         parse_events_error__handle(err, term->err_val,
1187                                                 strdup("too big"),
1188                                                 NULL);
1189                         return -EINVAL;
1190                 }
1191                 break;
1192         default:
1193                 parse_events_error__handle(err, term->err_term,
1194                                 strdup("unknown term"),
1195                                 parse_events_formats_error_string(NULL));
1196                 return -EINVAL;
1197         }
1198
1199         /*
1200          * Check term availability after basic checking so
1201          * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered.
1202          *
1203          * If check availability at the entry of this function,
1204          * user will see "'<sysfs term>' is not usable in 'perf stat'"
1205          * if an invalid config term is provided for legacy events
1206          * (for example, instructions/badterm/...), which is confusing.
1207          */
1208         if (!config_term_avail(term->type_term, err))
1209                 return -EINVAL;
1210         return 0;
1211 #undef CHECK_TYPE_VAL
1212 }
1213
1214 static int config_term_pmu(struct perf_event_attr *attr,
1215                            struct parse_events_term *term,
1216                            struct parse_events_error *err)
1217 {
1218         if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER ||
1219             term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG)
1220                 /*
1221                  * Always succeed for sysfs terms, as we dont know
1222                  * at this point what type they need to have.
1223                  */
1224                 return 0;
1225         else
1226                 return config_term_common(attr, term, err);
1227 }
1228
1229 static int config_term_tracepoint(struct perf_event_attr *attr,
1230                                   struct parse_events_term *term,
1231                                   struct parse_events_error *err)
1232 {
1233         switch (term->type_term) {
1234         case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1235         case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1236         case PARSE_EVENTS__TERM_TYPE_INHERIT:
1237         case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1238         case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1239         case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1240         case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1241         case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1242         case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1243         case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1244                 return config_term_common(attr, term, err);
1245         default:
1246                 if (err) {
1247                         parse_events_error__handle(err, term->err_term,
1248                                 strdup("unknown term"),
1249                                 strdup("valid terms: call-graph,stack-size\n"));
1250                 }
1251                 return -EINVAL;
1252         }
1253
1254         return 0;
1255 }
1256
1257 static int config_attr(struct perf_event_attr *attr,
1258                        struct list_head *head,
1259                        struct parse_events_error *err,
1260                        config_term_func_t config_term)
1261 {
1262         struct parse_events_term *term;
1263
1264         list_for_each_entry(term, head, list)
1265                 if (config_term(attr, term, err))
1266                         return -EINVAL;
1267
1268         return 0;
1269 }
1270
1271 static int get_config_terms(struct list_head *head_config,
1272                             struct list_head *head_terms __maybe_unused)
1273 {
1274 #define ADD_CONFIG_TERM(__type, __weak)                         \
1275         struct evsel_config_term *__t;                  \
1276                                                                 \
1277         __t = zalloc(sizeof(*__t));                             \
1278         if (!__t)                                               \
1279                 return -ENOMEM;                                 \
1280                                                                 \
1281         INIT_LIST_HEAD(&__t->list);                             \
1282         __t->type       = EVSEL__CONFIG_TERM_ ## __type;        \
1283         __t->weak       = __weak;                               \
1284         list_add_tail(&__t->list, head_terms)
1285
1286 #define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak)      \
1287 do {                                                            \
1288         ADD_CONFIG_TERM(__type, __weak);                        \
1289         __t->val.__name = __val;                                \
1290 } while (0)
1291
1292 #define ADD_CONFIG_TERM_STR(__type, __val, __weak)              \
1293 do {                                                            \
1294         ADD_CONFIG_TERM(__type, __weak);                        \
1295         __t->val.str = strdup(__val);                           \
1296         if (!__t->val.str) {                                    \
1297                 zfree(&__t);                                    \
1298                 return -ENOMEM;                                 \
1299         }                                                       \
1300         __t->free_str = true;                                   \
1301 } while (0)
1302
1303         struct parse_events_term *term;
1304
1305         list_for_each_entry(term, head_config, list) {
1306                 switch (term->type_term) {
1307                 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1308                         ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak);
1309                         break;
1310                 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1311                         ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak);
1312                         break;
1313                 case PARSE_EVENTS__TERM_TYPE_TIME:
1314                         ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak);
1315                         break;
1316                 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1317                         ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak);
1318                         break;
1319                 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1320                         ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak);
1321                         break;
1322                 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1323                         ADD_CONFIG_TERM_VAL(STACK_USER, stack_user,
1324                                             term->val.num, term->weak);
1325                         break;
1326                 case PARSE_EVENTS__TERM_TYPE_INHERIT:
1327                         ADD_CONFIG_TERM_VAL(INHERIT, inherit,
1328                                             term->val.num ? 1 : 0, term->weak);
1329                         break;
1330                 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1331                         ADD_CONFIG_TERM_VAL(INHERIT, inherit,
1332                                             term->val.num ? 0 : 1, term->weak);
1333                         break;
1334                 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1335                         ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack,
1336                                             term->val.num, term->weak);
1337                         break;
1338                 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1339                         ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events,
1340                                             term->val.num, term->weak);
1341                         break;
1342                 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1343                         ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
1344                                             term->val.num ? 1 : 0, term->weak);
1345                         break;
1346                 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1347                         ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
1348                                             term->val.num ? 0 : 1, term->weak);
1349                         break;
1350                 case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1351                         ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak);
1352                         break;
1353                 case PARSE_EVENTS__TERM_TYPE_PERCORE:
1354                         ADD_CONFIG_TERM_VAL(PERCORE, percore,
1355                                             term->val.num ? true : false, term->weak);
1356                         break;
1357                 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1358                         ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output,
1359                                             term->val.num ? 1 : 0, term->weak);
1360                         break;
1361                 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1362                         ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size,
1363                                             term->val.num, term->weak);
1364                         break;
1365                 default:
1366                         break;
1367                 }
1368         }
1369         return 0;
1370 }
1371
1372 /*
1373  * Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for
1374  * each bit of attr->config that the user has changed.
1375  */
1376 static int get_config_chgs(struct perf_pmu *pmu, struct list_head *head_config,
1377                            struct list_head *head_terms)
1378 {
1379         struct parse_events_term *term;
1380         u64 bits = 0;
1381         int type;
1382
1383         list_for_each_entry(term, head_config, list) {
1384                 switch (term->type_term) {
1385                 case PARSE_EVENTS__TERM_TYPE_USER:
1386                         type = perf_pmu__format_type(&pmu->format, term->config);
1387                         if (type != PERF_PMU_FORMAT_VALUE_CONFIG)
1388                                 continue;
1389                         bits |= perf_pmu__format_bits(&pmu->format, term->config);
1390                         break;
1391                 case PARSE_EVENTS__TERM_TYPE_CONFIG:
1392                         bits = ~(u64)0;
1393                         break;
1394                 default:
1395                         break;
1396                 }
1397         }
1398
1399         if (bits)
1400                 ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false);
1401
1402 #undef ADD_CONFIG_TERM
1403         return 0;
1404 }
1405
1406 int parse_events_add_tracepoint(struct list_head *list, int *idx,
1407                                 const char *sys, const char *event,
1408                                 struct parse_events_error *err,
1409                                 struct list_head *head_config)
1410 {
1411         if (head_config) {
1412                 struct perf_event_attr attr;
1413
1414                 if (config_attr(&attr, head_config, err,
1415                                 config_term_tracepoint))
1416                         return -EINVAL;
1417         }
1418
1419         if (strpbrk(sys, "*?"))
1420                 return add_tracepoint_multi_sys(list, idx, sys, event,
1421                                                 err, head_config);
1422         else
1423                 return add_tracepoint_event(list, idx, sys, event,
1424                                             err, head_config);
1425 }
1426
1427 int parse_events_add_numeric(struct parse_events_state *parse_state,
1428                              struct list_head *list,
1429                              u32 type, u64 config,
1430                              struct list_head *head_config)
1431 {
1432         struct perf_event_attr attr;
1433         LIST_HEAD(config_terms);
1434         const char *name, *metric_id;
1435         bool hybrid;
1436         int ret;
1437
1438         memset(&attr, 0, sizeof(attr));
1439         attr.type = type;
1440         attr.config = config;
1441
1442         if (head_config) {
1443                 if (config_attr(&attr, head_config, parse_state->error,
1444                                 config_term_common))
1445                         return -EINVAL;
1446
1447                 if (get_config_terms(head_config, &config_terms))
1448                         return -ENOMEM;
1449         }
1450
1451         name = get_config_name(head_config);
1452         metric_id = get_config_metric_id(head_config);
1453         ret = parse_events__add_numeric_hybrid(parse_state, list, &attr,
1454                                                name, metric_id,
1455                                                &config_terms, &hybrid);
1456         if (hybrid)
1457                 goto out_free_terms;
1458
1459         ret = add_event(list, &parse_state->idx, &attr, name, metric_id,
1460                         &config_terms);
1461 out_free_terms:
1462         free_config_terms(&config_terms);
1463         return ret;
1464 }
1465
1466 int parse_events_add_tool(struct parse_events_state *parse_state,
1467                           struct list_head *list,
1468                           int tool_event)
1469 {
1470         return add_event_tool(list, &parse_state->idx, tool_event);
1471 }
1472
1473 static bool config_term_percore(struct list_head *config_terms)
1474 {
1475         struct evsel_config_term *term;
1476
1477         list_for_each_entry(term, config_terms, list) {
1478                 if (term->type == EVSEL__CONFIG_TERM_PERCORE)
1479                         return term->val.percore;
1480         }
1481
1482         return false;
1483 }
1484
1485 static int parse_events__inside_hybrid_pmu(struct parse_events_state *parse_state,
1486                                            struct list_head *list, char *name,
1487                                            struct list_head *head_config)
1488 {
1489         struct parse_events_term *term;
1490         int ret = -1;
1491
1492         if (parse_state->fake_pmu || !head_config || list_empty(head_config) ||
1493             !perf_pmu__is_hybrid(name)) {
1494                 return -1;
1495         }
1496
1497         /*
1498          * More than one term in list.
1499          */
1500         if (head_config->next && head_config->next->next != head_config)
1501                 return -1;
1502
1503         term = list_first_entry(head_config, struct parse_events_term, list);
1504         if (term && term->config && strcmp(term->config, "event")) {
1505                 ret = parse_events__with_hybrid_pmu(parse_state, term->config,
1506                                                     name, list);
1507         }
1508
1509         return ret;
1510 }
1511
1512 int parse_events_add_pmu(struct parse_events_state *parse_state,
1513                          struct list_head *list, char *name,
1514                          struct list_head *head_config,
1515                          bool auto_merge_stats,
1516                          bool use_alias)
1517 {
1518         struct perf_event_attr attr;
1519         struct perf_pmu_info info;
1520         struct perf_pmu *pmu;
1521         struct evsel *evsel;
1522         struct parse_events_error *err = parse_state->error;
1523         bool use_uncore_alias;
1524         LIST_HEAD(config_terms);
1525
1526         pmu = parse_state->fake_pmu ?: perf_pmu__find(name);
1527
1528         if (verbose > 1 && !(pmu && pmu->selectable)) {
1529                 fprintf(stderr, "Attempting to add event pmu '%s' with '",
1530                         name);
1531                 if (head_config) {
1532                         struct parse_events_term *term;
1533
1534                         list_for_each_entry(term, head_config, list) {
1535                                 fprintf(stderr, "%s,", term->config);
1536                         }
1537                 }
1538                 fprintf(stderr, "' that may result in non-fatal errors\n");
1539         }
1540
1541         if (!pmu) {
1542                 char *err_str;
1543
1544                 if (asprintf(&err_str,
1545                                 "Cannot find PMU `%s'. Missing kernel support?",
1546                                 name) >= 0)
1547                         parse_events_error__handle(err, 0, err_str, NULL);
1548                 return -EINVAL;
1549         }
1550
1551         if (pmu->default_config) {
1552                 memcpy(&attr, pmu->default_config,
1553                        sizeof(struct perf_event_attr));
1554         } else {
1555                 memset(&attr, 0, sizeof(attr));
1556         }
1557
1558         use_uncore_alias = (pmu->is_uncore && use_alias);
1559
1560         if (!head_config) {
1561                 attr.type = pmu->type;
1562                 evsel = __add_event(list, &parse_state->idx, &attr,
1563                                     /*init_attr=*/true, /*name=*/NULL,
1564                                     /*metric_id=*/NULL, pmu,
1565                                     /*config_terms=*/NULL, auto_merge_stats,
1566                                     /*cpu_list=*/NULL);
1567                 if (evsel) {
1568                         evsel->pmu_name = name ? strdup(name) : NULL;
1569                         evsel->use_uncore_alias = use_uncore_alias;
1570                         return 0;
1571                 } else {
1572                         return -ENOMEM;
1573                 }
1574         }
1575
1576         if (!parse_state->fake_pmu && perf_pmu__check_alias(pmu, head_config, &info))
1577                 return -EINVAL;
1578
1579         if (verbose > 1) {
1580                 fprintf(stderr, "After aliases, add event pmu '%s' with '",
1581                         name);
1582                 if (head_config) {
1583                         struct parse_events_term *term;
1584
1585                         list_for_each_entry(term, head_config, list) {
1586                                 fprintf(stderr, "%s,", term->config);
1587                         }
1588                 }
1589                 fprintf(stderr, "' that may result in non-fatal errors\n");
1590         }
1591
1592         /*
1593          * Configure hardcoded terms first, no need to check
1594          * return value when called with fail == 0 ;)
1595          */
1596         if (config_attr(&attr, head_config, parse_state->error, config_term_pmu))
1597                 return -EINVAL;
1598
1599         if (get_config_terms(head_config, &config_terms))
1600                 return -ENOMEM;
1601
1602         /*
1603          * When using default config, record which bits of attr->config were
1604          * changed by the user.
1605          */
1606         if (pmu->default_config && get_config_chgs(pmu, head_config, &config_terms))
1607                 return -ENOMEM;
1608
1609         if (!parse_events__inside_hybrid_pmu(parse_state, list, name,
1610                                              head_config)) {
1611                 return 0;
1612         }
1613
1614         if (!parse_state->fake_pmu && perf_pmu__config(pmu, &attr, head_config, parse_state->error)) {
1615                 free_config_terms(&config_terms);
1616                 return -EINVAL;
1617         }
1618
1619         evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true,
1620                             get_config_name(head_config),
1621                             get_config_metric_id(head_config), pmu,
1622                             &config_terms, auto_merge_stats, /*cpu_list=*/NULL);
1623         if (!evsel)
1624                 return -ENOMEM;
1625
1626         if (evsel->name)
1627                 evsel->use_config_name = true;
1628
1629         evsel->pmu_name = name ? strdup(name) : NULL;
1630         evsel->use_uncore_alias = use_uncore_alias;
1631         evsel->percore = config_term_percore(&evsel->config_terms);
1632
1633         if (parse_state->fake_pmu)
1634                 return 0;
1635
1636         free((char *)evsel->unit);
1637         evsel->unit = strdup(info.unit);
1638         evsel->scale = info.scale;
1639         evsel->per_pkg = info.per_pkg;
1640         evsel->snapshot = info.snapshot;
1641         evsel->metric_expr = info.metric_expr;
1642         evsel->metric_name = info.metric_name;
1643         return 0;
1644 }
1645
1646 int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
1647                                char *str, struct list_head *head,
1648                                struct list_head **listp)
1649 {
1650         struct parse_events_term *term;
1651         struct list_head *list = NULL;
1652         struct list_head *orig_head = NULL;
1653         struct perf_pmu *pmu = NULL;
1654         int ok = 0;
1655         char *config;
1656
1657         *listp = NULL;
1658
1659         if (!head) {
1660                 head = malloc(sizeof(struct list_head));
1661                 if (!head)
1662                         goto out_err;
1663
1664                 INIT_LIST_HEAD(head);
1665         }
1666         config = strdup(str);
1667         if (!config)
1668                 goto out_err;
1669
1670         if (parse_events_term__num(&term,
1671                                    PARSE_EVENTS__TERM_TYPE_USER,
1672                                    config, 1, false, &config,
1673                                         NULL) < 0) {
1674                 free(config);
1675                 goto out_err;
1676         }
1677         list_add_tail(&term->list, head);
1678
1679         /* Add it for all PMUs that support the alias */
1680         list = malloc(sizeof(struct list_head));
1681         if (!list)
1682                 goto out_err;
1683
1684         INIT_LIST_HEAD(list);
1685
1686         while ((pmu = perf_pmu__scan(pmu)) != NULL) {
1687                 struct perf_pmu_alias *alias;
1688
1689                 list_for_each_entry(alias, &pmu->aliases, list) {
1690                         if (!strcasecmp(alias->name, str)) {
1691                                 parse_events_copy_term_list(head, &orig_head);
1692                                 if (!parse_events_add_pmu(parse_state, list,
1693                                                           pmu->name, orig_head,
1694                                                           true, true)) {
1695                                         pr_debug("%s -> %s/%s/\n", str,
1696                                                  pmu->name, alias->str);
1697                                         ok++;
1698                                 }
1699                                 parse_events_terms__delete(orig_head);
1700                         }
1701                 }
1702         }
1703
1704         if (parse_state->fake_pmu) {
1705                 if (!parse_events_add_pmu(parse_state, list, str, head,
1706                                           true, true)) {
1707                         pr_debug("%s -> %s/%s/\n", str, "fake_pmu", str);
1708                         ok++;
1709                 }
1710         }
1711
1712 out_err:
1713         if (ok)
1714                 *listp = list;
1715         else
1716                 free(list);
1717
1718         parse_events_terms__delete(head);
1719         return ok ? 0 : -1;
1720 }
1721
1722 int parse_events__modifier_group(struct list_head *list,
1723                                  char *event_mod)
1724 {
1725         return parse_events__modifier_event(list, event_mod, true);
1726 }
1727
1728 /*
1729  * Check if the two uncore PMUs are from the same uncore block
1730  * The format of the uncore PMU name is uncore_#blockname_#pmuidx
1731  */
1732 static bool is_same_uncore_block(const char *pmu_name_a, const char *pmu_name_b)
1733 {
1734         char *end_a, *end_b;
1735
1736         end_a = strrchr(pmu_name_a, '_');
1737         end_b = strrchr(pmu_name_b, '_');
1738
1739         if (!end_a || !end_b)
1740                 return false;
1741
1742         if ((end_a - pmu_name_a) != (end_b - pmu_name_b))
1743                 return false;
1744
1745         return (strncmp(pmu_name_a, pmu_name_b, end_a - pmu_name_a) == 0);
1746 }
1747
1748 static int
1749 parse_events__set_leader_for_uncore_aliase(char *name, struct list_head *list,
1750                                            struct parse_events_state *parse_state)
1751 {
1752         struct evsel *evsel, *leader;
1753         uintptr_t *leaders;
1754         bool is_leader = true;
1755         int i, nr_pmu = 0, total_members, ret = 0;
1756
1757         leader = list_first_entry(list, struct evsel, core.node);
1758         evsel = list_last_entry(list, struct evsel, core.node);
1759         total_members = evsel->core.idx - leader->core.idx + 1;
1760
1761         leaders = calloc(total_members, sizeof(uintptr_t));
1762         if (WARN_ON(!leaders))
1763                 return 0;
1764
1765         /*
1766          * Going through the whole group and doing sanity check.
1767          * All members must use alias, and be from the same uncore block.
1768          * Also, storing the leader events in an array.
1769          */
1770         __evlist__for_each_entry(list, evsel) {
1771
1772                 /* Only split the uncore group which members use alias */
1773                 if (!evsel->use_uncore_alias)
1774                         goto out;
1775
1776                 /* The events must be from the same uncore block */
1777                 if (!is_same_uncore_block(leader->pmu_name, evsel->pmu_name))
1778                         goto out;
1779
1780                 if (!is_leader)
1781                         continue;
1782                 /*
1783                  * If the event's PMU name starts to repeat, it must be a new
1784                  * event. That can be used to distinguish the leader from
1785                  * other members, even they have the same event name.
1786                  */
1787                 if ((leader != evsel) &&
1788                     !strcmp(leader->pmu_name, evsel->pmu_name)) {
1789                         is_leader = false;
1790                         continue;
1791                 }
1792
1793                 /* Store the leader event for each PMU */
1794                 leaders[nr_pmu++] = (uintptr_t) evsel;
1795         }
1796
1797         /* only one event alias */
1798         if (nr_pmu == total_members) {
1799                 parse_state->nr_groups--;
1800                 goto handled;
1801         }
1802
1803         /*
1804          * An uncore event alias is a joint name which means the same event
1805          * runs on all PMUs of a block.
1806          * Perf doesn't support mixed events from different PMUs in the same
1807          * group. The big group has to be split into multiple small groups
1808          * which only include the events from the same PMU.
1809          *
1810          * Here the uncore event aliases must be from the same uncore block.
1811          * The number of PMUs must be same for each alias. The number of new
1812          * small groups equals to the number of PMUs.
1813          * Setting the leader event for corresponding members in each group.
1814          */
1815         i = 0;
1816         __evlist__for_each_entry(list, evsel) {
1817                 if (i >= nr_pmu)
1818                         i = 0;
1819                 evsel__set_leader(evsel, (struct evsel *) leaders[i++]);
1820         }
1821
1822         /* The number of members and group name are same for each group */
1823         for (i = 0; i < nr_pmu; i++) {
1824                 evsel = (struct evsel *) leaders[i];
1825                 evsel->core.nr_members = total_members / nr_pmu;
1826                 evsel->group_name = name ? strdup(name) : NULL;
1827         }
1828
1829         /* Take the new small groups into account */
1830         parse_state->nr_groups += nr_pmu - 1;
1831
1832 handled:
1833         ret = 1;
1834 out:
1835         free(leaders);
1836         return ret;
1837 }
1838
1839 __weak struct evsel *arch_evlist__leader(struct list_head *list)
1840 {
1841         return list_first_entry(list, struct evsel, core.node);
1842 }
1843
1844 void parse_events__set_leader(char *name, struct list_head *list,
1845                               struct parse_events_state *parse_state)
1846 {
1847         struct evsel *leader;
1848
1849         if (list_empty(list)) {
1850                 WARN_ONCE(true, "WARNING: failed to set leader: empty list");
1851                 return;
1852         }
1853
1854         if (parse_events__set_leader_for_uncore_aliase(name, list, parse_state))
1855                 return;
1856
1857         leader = arch_evlist__leader(list);
1858         __perf_evlist__set_leader(list, &leader->core);
1859         leader->group_name = name ? strdup(name) : NULL;
1860         list_move(&leader->core.node, list);
1861 }
1862
1863 /* list_event is assumed to point to malloc'ed memory */
1864 void parse_events_update_lists(struct list_head *list_event,
1865                                struct list_head *list_all)
1866 {
1867         /*
1868          * Called for single event definition. Update the
1869          * 'all event' list, and reinit the 'single event'
1870          * list, for next event definition.
1871          */
1872         list_splice_tail(list_event, list_all);
1873         free(list_event);
1874 }
1875
1876 struct event_modifier {
1877         int eu;
1878         int ek;
1879         int eh;
1880         int eH;
1881         int eG;
1882         int eI;
1883         int precise;
1884         int precise_max;
1885         int exclude_GH;
1886         int sample_read;
1887         int pinned;
1888         int weak;
1889         int exclusive;
1890         int bpf_counter;
1891 };
1892
1893 static int get_event_modifier(struct event_modifier *mod, char *str,
1894                                struct evsel *evsel)
1895 {
1896         int eu = evsel ? evsel->core.attr.exclude_user : 0;
1897         int ek = evsel ? evsel->core.attr.exclude_kernel : 0;
1898         int eh = evsel ? evsel->core.attr.exclude_hv : 0;
1899         int eH = evsel ? evsel->core.attr.exclude_host : 0;
1900         int eG = evsel ? evsel->core.attr.exclude_guest : 0;
1901         int eI = evsel ? evsel->core.attr.exclude_idle : 0;
1902         int precise = evsel ? evsel->core.attr.precise_ip : 0;
1903         int precise_max = 0;
1904         int sample_read = 0;
1905         int pinned = evsel ? evsel->core.attr.pinned : 0;
1906         int exclusive = evsel ? evsel->core.attr.exclusive : 0;
1907
1908         int exclude = eu | ek | eh;
1909         int exclude_GH = evsel ? evsel->exclude_GH : 0;
1910         int weak = 0;
1911         int bpf_counter = 0;
1912
1913         memset(mod, 0, sizeof(*mod));
1914
1915         while (*str) {
1916                 if (*str == 'u') {
1917                         if (!exclude)
1918                                 exclude = eu = ek = eh = 1;
1919                         if (!exclude_GH && !perf_guest)
1920                                 eG = 1;
1921                         eu = 0;
1922                 } else if (*str == 'k') {
1923                         if (!exclude)
1924                                 exclude = eu = ek = eh = 1;
1925                         ek = 0;
1926                 } else if (*str == 'h') {
1927                         if (!exclude)
1928                                 exclude = eu = ek = eh = 1;
1929                         eh = 0;
1930                 } else if (*str == 'G') {
1931                         if (!exclude_GH)
1932                                 exclude_GH = eG = eH = 1;
1933                         eG = 0;
1934                 } else if (*str == 'H') {
1935                         if (!exclude_GH)
1936                                 exclude_GH = eG = eH = 1;
1937                         eH = 0;
1938                 } else if (*str == 'I') {
1939                         eI = 1;
1940                 } else if (*str == 'p') {
1941                         precise++;
1942                         /* use of precise requires exclude_guest */
1943                         if (!exclude_GH)
1944                                 eG = 1;
1945                 } else if (*str == 'P') {
1946                         precise_max = 1;
1947                 } else if (*str == 'S') {
1948                         sample_read = 1;
1949                 } else if (*str == 'D') {
1950                         pinned = 1;
1951                 } else if (*str == 'e') {
1952                         exclusive = 1;
1953                 } else if (*str == 'W') {
1954                         weak = 1;
1955                 } else if (*str == 'b') {
1956                         bpf_counter = 1;
1957                 } else
1958                         break;
1959
1960                 ++str;
1961         }
1962
1963         /*
1964          * precise ip:
1965          *
1966          *  0 - SAMPLE_IP can have arbitrary skid
1967          *  1 - SAMPLE_IP must have constant skid
1968          *  2 - SAMPLE_IP requested to have 0 skid
1969          *  3 - SAMPLE_IP must have 0 skid
1970          *
1971          *  See also PERF_RECORD_MISC_EXACT_IP
1972          */
1973         if (precise > 3)
1974                 return -EINVAL;
1975
1976         mod->eu = eu;
1977         mod->ek = ek;
1978         mod->eh = eh;
1979         mod->eH = eH;
1980         mod->eG = eG;
1981         mod->eI = eI;
1982         mod->precise = precise;
1983         mod->precise_max = precise_max;
1984         mod->exclude_GH = exclude_GH;
1985         mod->sample_read = sample_read;
1986         mod->pinned = pinned;
1987         mod->weak = weak;
1988         mod->bpf_counter = bpf_counter;
1989         mod->exclusive = exclusive;
1990
1991         return 0;
1992 }
1993
1994 /*
1995  * Basic modifier sanity check to validate it contains only one
1996  * instance of any modifier (apart from 'p') present.
1997  */
1998 static int check_modifier(char *str)
1999 {
2000         char *p = str;
2001
2002         /* The sizeof includes 0 byte as well. */
2003         if (strlen(str) > (sizeof("ukhGHpppPSDIWeb") - 1))
2004                 return -1;
2005
2006         while (*p) {
2007                 if (*p != 'p' && strchr(p + 1, *p))
2008                         return -1;
2009                 p++;
2010         }
2011
2012         return 0;
2013 }
2014
2015 int parse_events__modifier_event(struct list_head *list, char *str, bool add)
2016 {
2017         struct evsel *evsel;
2018         struct event_modifier mod;
2019
2020         if (str == NULL)
2021                 return 0;
2022
2023         if (check_modifier(str))
2024                 return -EINVAL;
2025
2026         if (!add && get_event_modifier(&mod, str, NULL))
2027                 return -EINVAL;
2028
2029         __evlist__for_each_entry(list, evsel) {
2030                 if (add && get_event_modifier(&mod, str, evsel))
2031                         return -EINVAL;
2032
2033                 evsel->core.attr.exclude_user   = mod.eu;
2034                 evsel->core.attr.exclude_kernel = mod.ek;
2035                 evsel->core.attr.exclude_hv     = mod.eh;
2036                 evsel->core.attr.precise_ip     = mod.precise;
2037                 evsel->core.attr.exclude_host   = mod.eH;
2038                 evsel->core.attr.exclude_guest  = mod.eG;
2039                 evsel->core.attr.exclude_idle   = mod.eI;
2040                 evsel->exclude_GH          = mod.exclude_GH;
2041                 evsel->sample_read         = mod.sample_read;
2042                 evsel->precise_max         = mod.precise_max;
2043                 evsel->weak_group          = mod.weak;
2044                 evsel->bpf_counter         = mod.bpf_counter;
2045
2046                 if (evsel__is_group_leader(evsel)) {
2047                         evsel->core.attr.pinned = mod.pinned;
2048                         evsel->core.attr.exclusive = mod.exclusive;
2049                 }
2050         }
2051
2052         return 0;
2053 }
2054
2055 int parse_events_name(struct list_head *list, const char *name)
2056 {
2057         struct evsel *evsel;
2058
2059         __evlist__for_each_entry(list, evsel) {
2060                 if (!evsel->name)
2061                         evsel->name = strdup(name);
2062         }
2063
2064         return 0;
2065 }
2066
2067 static int
2068 comp_pmu(const void *p1, const void *p2)
2069 {
2070         struct perf_pmu_event_symbol *pmu1 = (struct perf_pmu_event_symbol *) p1;
2071         struct perf_pmu_event_symbol *pmu2 = (struct perf_pmu_event_symbol *) p2;
2072
2073         return strcasecmp(pmu1->symbol, pmu2->symbol);
2074 }
2075
2076 static void perf_pmu__parse_cleanup(void)
2077 {
2078         if (perf_pmu_events_list_num > 0) {
2079                 struct perf_pmu_event_symbol *p;
2080                 int i;
2081
2082                 for (i = 0; i < perf_pmu_events_list_num; i++) {
2083                         p = perf_pmu_events_list + i;
2084                         zfree(&p->symbol);
2085                 }
2086                 zfree(&perf_pmu_events_list);
2087                 perf_pmu_events_list_num = 0;
2088         }
2089 }
2090
2091 #define SET_SYMBOL(str, stype)          \
2092 do {                                    \
2093         p->symbol = str;                \
2094         if (!p->symbol)                 \
2095                 goto err;               \
2096         p->type = stype;                \
2097 } while (0)
2098
2099 /*
2100  * Read the pmu events list from sysfs
2101  * Save it into perf_pmu_events_list
2102  */
2103 static void perf_pmu__parse_init(void)
2104 {
2105
2106         struct perf_pmu *pmu = NULL;
2107         struct perf_pmu_alias *alias;
2108         int len = 0;
2109
2110         pmu = NULL;
2111         while ((pmu = perf_pmu__scan(pmu)) != NULL) {
2112                 list_for_each_entry(alias, &pmu->aliases, list) {
2113                         char *tmp = strchr(alias->name, '-');
2114
2115                         if (tmp) {
2116                                 char *tmp2 = NULL;
2117
2118                                 tmp2 = strchr(tmp + 1, '-');
2119                                 len++;
2120                                 if (tmp2)
2121                                         len++;
2122                         }
2123
2124                         len++;
2125                 }
2126         }
2127
2128         if (len == 0) {
2129                 perf_pmu_events_list_num = -1;
2130                 return;
2131         }
2132         perf_pmu_events_list = malloc(sizeof(struct perf_pmu_event_symbol) * len);
2133         if (!perf_pmu_events_list)
2134                 return;
2135         perf_pmu_events_list_num = len;
2136
2137         len = 0;
2138         pmu = NULL;
2139         while ((pmu = perf_pmu__scan(pmu)) != NULL) {
2140                 list_for_each_entry(alias, &pmu->aliases, list) {
2141                         struct perf_pmu_event_symbol *p = perf_pmu_events_list + len;
2142                         char *tmp = strchr(alias->name, '-');
2143                         char *tmp2 = NULL;
2144
2145                         if (tmp)
2146                                 tmp2 = strchr(tmp + 1, '-');
2147                         if (tmp2) {
2148                                 SET_SYMBOL(strndup(alias->name, tmp - alias->name),
2149                                                 PMU_EVENT_SYMBOL_PREFIX);
2150                                 p++;
2151                                 tmp++;
2152                                 SET_SYMBOL(strndup(tmp, tmp2 - tmp), PMU_EVENT_SYMBOL_SUFFIX);
2153                                 p++;
2154                                 SET_SYMBOL(strdup(++tmp2), PMU_EVENT_SYMBOL_SUFFIX2);
2155                                 len += 3;
2156                         } else if (tmp) {
2157                                 SET_SYMBOL(strndup(alias->name, tmp - alias->name),
2158                                                 PMU_EVENT_SYMBOL_PREFIX);
2159                                 p++;
2160                                 SET_SYMBOL(strdup(++tmp), PMU_EVENT_SYMBOL_SUFFIX);
2161                                 len += 2;
2162                         } else {
2163                                 SET_SYMBOL(strdup(alias->name), PMU_EVENT_SYMBOL);
2164                                 len++;
2165                         }
2166                 }
2167         }
2168         qsort(perf_pmu_events_list, len,
2169                 sizeof(struct perf_pmu_event_symbol), comp_pmu);
2170
2171         return;
2172 err:
2173         perf_pmu__parse_cleanup();
2174 }
2175
2176 /*
2177  * This function injects special term in
2178  * perf_pmu_events_list so the test code
2179  * can check on this functionality.
2180  */
2181 int perf_pmu__test_parse_init(void)
2182 {
2183         struct perf_pmu_event_symbol *list, *tmp, symbols[] = {
2184                 {(char *)"read", PMU_EVENT_SYMBOL},
2185                 {(char *)"event", PMU_EVENT_SYMBOL_PREFIX},
2186                 {(char *)"two", PMU_EVENT_SYMBOL_SUFFIX},
2187                 {(char *)"hyphen", PMU_EVENT_SYMBOL_SUFFIX},
2188                 {(char *)"hyph", PMU_EVENT_SYMBOL_SUFFIX2},
2189         };
2190         unsigned long i, j;
2191
2192         tmp = list = malloc(sizeof(*list) * ARRAY_SIZE(symbols));
2193         if (!list)
2194                 return -ENOMEM;
2195
2196         for (i = 0; i < ARRAY_SIZE(symbols); i++, tmp++) {
2197                 tmp->type = symbols[i].type;
2198                 tmp->symbol = strdup(symbols[i].symbol);
2199                 if (!tmp->symbol)
2200                         goto err_free;
2201         }
2202
2203         perf_pmu_events_list = list;
2204         perf_pmu_events_list_num = ARRAY_SIZE(symbols);
2205
2206         qsort(perf_pmu_events_list, ARRAY_SIZE(symbols),
2207               sizeof(struct perf_pmu_event_symbol), comp_pmu);
2208         return 0;
2209
2210 err_free:
2211         for (j = 0, tmp = list; j < i; j++, tmp++)
2212                 free(tmp->symbol);
2213         free(list);
2214         return -ENOMEM;
2215 }
2216
2217 enum perf_pmu_event_symbol_type
2218 perf_pmu__parse_check(const char *name)
2219 {
2220         struct perf_pmu_event_symbol p, *r;
2221
2222         /* scan kernel pmu events from sysfs if needed */
2223         if (perf_pmu_events_list_num == 0)
2224                 perf_pmu__parse_init();
2225         /*
2226          * name "cpu" could be prefix of cpu-cycles or cpu// events.
2227          * cpu-cycles has been handled by hardcode.
2228          * So it must be cpu// events, not kernel pmu event.
2229          */
2230         if ((perf_pmu_events_list_num <= 0) || !strcmp(name, "cpu"))
2231                 return PMU_EVENT_SYMBOL_ERR;
2232
2233         p.symbol = strdup(name);
2234         r = bsearch(&p, perf_pmu_events_list,
2235                         (size_t) perf_pmu_events_list_num,
2236                         sizeof(struct perf_pmu_event_symbol), comp_pmu);
2237         zfree(&p.symbol);
2238         return r ? r->type : PMU_EVENT_SYMBOL_ERR;
2239 }
2240
2241 static int parse_events__scanner(const char *str,
2242                                  struct parse_events_state *parse_state)
2243 {
2244         YY_BUFFER_STATE buffer;
2245         void *scanner;
2246         int ret;
2247
2248         ret = parse_events_lex_init_extra(parse_state, &scanner);
2249         if (ret)
2250                 return ret;
2251
2252         buffer = parse_events__scan_string(str, scanner);
2253
2254 #ifdef PARSER_DEBUG
2255         parse_events_debug = 1;
2256         parse_events_set_debug(1, scanner);
2257 #endif
2258         ret = parse_events_parse(parse_state, scanner);
2259
2260         parse_events__flush_buffer(buffer, scanner);
2261         parse_events__delete_buffer(buffer, scanner);
2262         parse_events_lex_destroy(scanner);
2263         return ret;
2264 }
2265
2266 /*
2267  * parse event config string, return a list of event terms.
2268  */
2269 int parse_events_terms(struct list_head *terms, const char *str)
2270 {
2271         struct parse_events_state parse_state = {
2272                 .terms  = NULL,
2273                 .stoken = PE_START_TERMS,
2274         };
2275         int ret;
2276
2277         ret = parse_events__scanner(str, &parse_state);
2278         perf_pmu__parse_cleanup();
2279
2280         if (!ret) {
2281                 list_splice(parse_state.terms, terms);
2282                 zfree(&parse_state.terms);
2283                 return 0;
2284         }
2285
2286         parse_events_terms__delete(parse_state.terms);
2287         return ret;
2288 }
2289
2290 static int parse_events__with_hybrid_pmu(struct parse_events_state *parse_state,
2291                                          const char *str, char *pmu_name,
2292                                          struct list_head *list)
2293 {
2294         struct parse_events_state ps = {
2295                 .list            = LIST_HEAD_INIT(ps.list),
2296                 .stoken          = PE_START_EVENTS,
2297                 .hybrid_pmu_name = pmu_name,
2298                 .idx             = parse_state->idx,
2299         };
2300         int ret;
2301
2302         ret = parse_events__scanner(str, &ps);
2303         perf_pmu__parse_cleanup();
2304
2305         if (!ret) {
2306                 if (!list_empty(&ps.list)) {
2307                         list_splice(&ps.list, list);
2308                         parse_state->idx = ps.idx;
2309                         return 0;
2310                 } else
2311                         return -1;
2312         }
2313
2314         return ret;
2315 }
2316
2317 int __parse_events(struct evlist *evlist, const char *str,
2318                    struct parse_events_error *err, struct perf_pmu *fake_pmu)
2319 {
2320         struct parse_events_state parse_state = {
2321                 .list     = LIST_HEAD_INIT(parse_state.list),
2322                 .idx      = evlist->core.nr_entries,
2323                 .error    = err,
2324                 .evlist   = evlist,
2325                 .stoken   = PE_START_EVENTS,
2326                 .fake_pmu = fake_pmu,
2327         };
2328         int ret;
2329
2330         ret = parse_events__scanner(str, &parse_state);
2331         perf_pmu__parse_cleanup();
2332
2333         if (!ret && list_empty(&parse_state.list)) {
2334                 WARN_ONCE(true, "WARNING: event parser found nothing\n");
2335                 return -1;
2336         }
2337
2338         /*
2339          * Add list to the evlist even with errors to allow callers to clean up.
2340          */
2341         evlist__splice_list_tail(evlist, &parse_state.list);
2342
2343         if (!ret) {
2344                 struct evsel *last;
2345
2346                 evlist->core.nr_groups += parse_state.nr_groups;
2347                 last = evlist__last(evlist);
2348                 last->cmdline_group_boundary = true;
2349
2350                 return 0;
2351         }
2352
2353         /*
2354          * There are 2 users - builtin-record and builtin-test objects.
2355          * Both call evlist__delete in case of error, so we dont
2356          * need to bother.
2357          */
2358         return ret;
2359 }
2360
2361 void parse_events_error__init(struct parse_events_error *err)
2362 {
2363         bzero(err, sizeof(*err));
2364 }
2365
2366 void parse_events_error__exit(struct parse_events_error *err)
2367 {
2368         zfree(&err->str);
2369         zfree(&err->help);
2370         zfree(&err->first_str);
2371         zfree(&err->first_help);
2372 }
2373
2374 void parse_events_error__handle(struct parse_events_error *err, int idx,
2375                                 char *str, char *help)
2376 {
2377         if (WARN(!str, "WARNING: failed to provide error string\n")) {
2378                 free(help);
2379                 return;
2380         }
2381         switch (err->num_errors) {
2382         case 0:
2383                 err->idx = idx;
2384                 err->str = str;
2385                 err->help = help;
2386                 break;
2387         case 1:
2388                 err->first_idx = err->idx;
2389                 err->idx = idx;
2390                 err->first_str = err->str;
2391                 err->str = str;
2392                 err->first_help = err->help;
2393                 err->help = help;
2394                 break;
2395         default:
2396                 pr_debug("Multiple errors dropping message: %s (%s)\n",
2397                         err->str, err->help);
2398                 free(err->str);
2399                 err->str = str;
2400                 free(err->help);
2401                 err->help = help;
2402                 break;
2403         }
2404         err->num_errors++;
2405 }
2406
2407 #define MAX_WIDTH 1000
2408 static int get_term_width(void)
2409 {
2410         struct winsize ws;
2411
2412         get_term_dimensions(&ws);
2413         return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col;
2414 }
2415
2416 static void __parse_events_error__print(int err_idx, const char *err_str,
2417                                         const char *err_help, const char *event)
2418 {
2419         const char *str = "invalid or unsupported event: ";
2420         char _buf[MAX_WIDTH];
2421         char *buf = (char *) event;
2422         int idx = 0;
2423         if (err_str) {
2424                 /* -2 for extra '' in the final fprintf */
2425                 int width       = get_term_width() - 2;
2426                 int len_event   = strlen(event);
2427                 int len_str, max_len, cut = 0;
2428
2429                 /*
2430                  * Maximum error index indent, we will cut
2431                  * the event string if it's bigger.
2432                  */
2433                 int max_err_idx = 13;
2434
2435                 /*
2436                  * Let's be specific with the message when
2437                  * we have the precise error.
2438                  */
2439                 str     = "event syntax error: ";
2440                 len_str = strlen(str);
2441                 max_len = width - len_str;
2442
2443                 buf = _buf;
2444
2445                 /* We're cutting from the beginning. */
2446                 if (err_idx > max_err_idx)
2447                         cut = err_idx - max_err_idx;
2448
2449                 strncpy(buf, event + cut, max_len);
2450
2451                 /* Mark cut parts with '..' on both sides. */
2452                 if (cut)
2453                         buf[0] = buf[1] = '.';
2454
2455                 if ((len_event - cut) > max_len) {
2456                         buf[max_len - 1] = buf[max_len - 2] = '.';
2457                         buf[max_len] = 0;
2458                 }
2459
2460                 idx = len_str + err_idx - cut;
2461         }
2462
2463         fprintf(stderr, "%s'%s'\n", str, buf);
2464         if (idx) {
2465                 fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err_str);
2466                 if (err_help)
2467                         fprintf(stderr, "\n%s\n", err_help);
2468         }
2469 }
2470
2471 void parse_events_error__print(struct parse_events_error *err,
2472                                const char *event)
2473 {
2474         if (!err->num_errors)
2475                 return;
2476
2477         __parse_events_error__print(err->idx, err->str, err->help, event);
2478
2479         if (err->num_errors > 1) {
2480                 fputs("\nInitial error:\n", stderr);
2481                 __parse_events_error__print(err->first_idx, err->first_str,
2482                                         err->first_help, event);
2483         }
2484 }
2485
2486 #undef MAX_WIDTH
2487
2488 int parse_events_option(const struct option *opt, const char *str,
2489                         int unset __maybe_unused)
2490 {
2491         struct evlist *evlist = *(struct evlist **)opt->value;
2492         struct parse_events_error err;
2493         int ret;
2494
2495         parse_events_error__init(&err);
2496         ret = parse_events(evlist, str, &err);
2497
2498         if (ret) {
2499                 parse_events_error__print(&err, str);
2500                 fprintf(stderr, "Run 'perf list' for a list of valid events\n");
2501         }
2502         parse_events_error__exit(&err);
2503
2504         return ret;
2505 }
2506
2507 int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset)
2508 {
2509         struct evlist **evlistp = opt->value;
2510         int ret;
2511
2512         if (*evlistp == NULL) {
2513                 *evlistp = evlist__new();
2514
2515                 if (*evlistp == NULL) {
2516                         fprintf(stderr, "Not enough memory to create evlist\n");
2517                         return -1;
2518                 }
2519         }
2520
2521         ret = parse_events_option(opt, str, unset);
2522         if (ret) {
2523                 evlist__delete(*evlistp);
2524                 *evlistp = NULL;
2525         }
2526
2527         return ret;
2528 }
2529
2530 static int
2531 foreach_evsel_in_last_glob(struct evlist *evlist,
2532                            int (*func)(struct evsel *evsel,
2533                                        const void *arg),
2534                            const void *arg)
2535 {
2536         struct evsel *last = NULL;
2537         int err;
2538
2539         /*
2540          * Don't return when list_empty, give func a chance to report
2541          * error when it found last == NULL.
2542          *
2543          * So no need to WARN here, let *func do this.
2544          */
2545         if (evlist->core.nr_entries > 0)
2546                 last = evlist__last(evlist);
2547
2548         do {
2549                 err = (*func)(last, arg);
2550                 if (err)
2551                         return -1;
2552                 if (!last)
2553                         return 0;
2554
2555                 if (last->core.node.prev == &evlist->core.entries)
2556                         return 0;
2557                 last = list_entry(last->core.node.prev, struct evsel, core.node);
2558         } while (!last->cmdline_group_boundary);
2559
2560         return 0;
2561 }
2562
2563 static int set_filter(struct evsel *evsel, const void *arg)
2564 {
2565         const char *str = arg;
2566         bool found = false;
2567         int nr_addr_filters = 0;
2568         struct perf_pmu *pmu = NULL;
2569
2570         if (evsel == NULL) {
2571                 fprintf(stderr,
2572                         "--filter option should follow a -e tracepoint or HW tracer option\n");
2573                 return -1;
2574         }
2575
2576         if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
2577                 if (evsel__append_tp_filter(evsel, str) < 0) {
2578                         fprintf(stderr,
2579                                 "not enough memory to hold filter string\n");
2580                         return -1;
2581                 }
2582
2583                 return 0;
2584         }
2585
2586         while ((pmu = perf_pmu__scan(pmu)) != NULL)
2587                 if (pmu->type == evsel->core.attr.type) {
2588                         found = true;
2589                         break;
2590                 }
2591
2592         if (found)
2593                 perf_pmu__scan_file(pmu, "nr_addr_filters",
2594                                     "%d", &nr_addr_filters);
2595
2596         if (!nr_addr_filters) {
2597                 fprintf(stderr,
2598                         "This CPU does not support address filtering\n");
2599                 return -1;
2600         }
2601
2602         if (evsel__append_addr_filter(evsel, str) < 0) {
2603                 fprintf(stderr,
2604                         "not enough memory to hold filter string\n");
2605                 return -1;
2606         }
2607
2608         return 0;
2609 }
2610
2611 int parse_filter(const struct option *opt, const char *str,
2612                  int unset __maybe_unused)
2613 {
2614         struct evlist *evlist = *(struct evlist **)opt->value;
2615
2616         return foreach_evsel_in_last_glob(evlist, set_filter,
2617                                           (const void *)str);
2618 }
2619
2620 static int add_exclude_perf_filter(struct evsel *evsel,
2621                                    const void *arg __maybe_unused)
2622 {
2623         char new_filter[64];
2624
2625         if (evsel == NULL || evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
2626                 fprintf(stderr,
2627                         "--exclude-perf option should follow a -e tracepoint option\n");
2628                 return -1;
2629         }
2630
2631         snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid());
2632
2633         if (evsel__append_tp_filter(evsel, new_filter) < 0) {
2634                 fprintf(stderr,
2635                         "not enough memory to hold filter string\n");
2636                 return -1;
2637         }
2638
2639         return 0;
2640 }
2641
2642 int exclude_perf(const struct option *opt,
2643                  const char *arg __maybe_unused,
2644                  int unset __maybe_unused)
2645 {
2646         struct evlist *evlist = *(struct evlist **)opt->value;
2647
2648         return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter,
2649                                           NULL);
2650 }
2651
2652 static const char * const event_type_descriptors[] = {
2653         "Hardware event",
2654         "Software event",
2655         "Tracepoint event",
2656         "Hardware cache event",
2657         "Raw hardware event descriptor",
2658         "Hardware breakpoint",
2659 };
2660
2661 static int cmp_string(const void *a, const void *b)
2662 {
2663         const char * const *as = a;
2664         const char * const *bs = b;
2665
2666         return strcmp(*as, *bs);
2667 }
2668
2669 /*
2670  * Print the events from <debugfs_mount_point>/tracing/events
2671  */
2672
2673 void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
2674                              bool name_only)
2675 {
2676         DIR *sys_dir, *evt_dir;
2677         struct dirent *sys_dirent, *evt_dirent;
2678         char evt_path[MAXPATHLEN];
2679         char *dir_path;
2680         char **evt_list = NULL;
2681         unsigned int evt_i = 0, evt_num = 0;
2682         bool evt_num_known = false;
2683
2684 restart:
2685         sys_dir = tracing_events__opendir();
2686         if (!sys_dir)
2687                 return;
2688
2689         if (evt_num_known) {
2690                 evt_list = zalloc(sizeof(char *) * evt_num);
2691                 if (!evt_list)
2692                         goto out_close_sys_dir;
2693         }
2694
2695         for_each_subsystem(sys_dir, sys_dirent) {
2696                 if (subsys_glob != NULL &&
2697                     !strglobmatch(sys_dirent->d_name, subsys_glob))
2698                         continue;
2699
2700                 dir_path = get_events_file(sys_dirent->d_name);
2701                 if (!dir_path)
2702                         continue;
2703                 evt_dir = opendir(dir_path);
2704                 if (!evt_dir)
2705                         goto next;
2706
2707                 for_each_event(dir_path, evt_dir, evt_dirent) {
2708                         if (event_glob != NULL &&
2709                             !strglobmatch(evt_dirent->d_name, event_glob))
2710                                 continue;
2711
2712                         if (!evt_num_known) {
2713                                 evt_num++;
2714                                 continue;
2715                         }
2716
2717                         snprintf(evt_path, MAXPATHLEN, "%s:%s",
2718                                  sys_dirent->d_name, evt_dirent->d_name);
2719
2720                         evt_list[evt_i] = strdup(evt_path);
2721                         if (evt_list[evt_i] == NULL) {
2722                                 put_events_file(dir_path);
2723                                 goto out_close_evt_dir;
2724                         }
2725                         evt_i++;
2726                 }
2727                 closedir(evt_dir);
2728 next:
2729                 put_events_file(dir_path);
2730         }
2731         closedir(sys_dir);
2732
2733         if (!evt_num_known) {
2734                 evt_num_known = true;
2735                 goto restart;
2736         }
2737         qsort(evt_list, evt_num, sizeof(char *), cmp_string);
2738         evt_i = 0;
2739         while (evt_i < evt_num) {
2740                 if (name_only) {
2741                         printf("%s ", evt_list[evt_i++]);
2742                         continue;
2743                 }
2744                 printf("  %-50s [%s]\n", evt_list[evt_i++],
2745                                 event_type_descriptors[PERF_TYPE_TRACEPOINT]);
2746         }
2747         if (evt_num && pager_in_use())
2748                 printf("\n");
2749
2750 out_free:
2751         evt_num = evt_i;
2752         for (evt_i = 0; evt_i < evt_num; evt_i++)
2753                 zfree(&evt_list[evt_i]);
2754         zfree(&evt_list);
2755         return;
2756
2757 out_close_evt_dir:
2758         closedir(evt_dir);
2759 out_close_sys_dir:
2760         closedir(sys_dir);
2761
2762         printf("FATAL: not enough memory to print %s\n",
2763                         event_type_descriptors[PERF_TYPE_TRACEPOINT]);
2764         if (evt_list)
2765                 goto out_free;
2766 }
2767
2768 /*
2769  * Check whether event is in <debugfs_mount_point>/tracing/events
2770  */
2771
2772 int is_valid_tracepoint(const char *event_string)
2773 {
2774         DIR *sys_dir, *evt_dir;
2775         struct dirent *sys_dirent, *evt_dirent;
2776         char evt_path[MAXPATHLEN];
2777         char *dir_path;
2778
2779         sys_dir = tracing_events__opendir();
2780         if (!sys_dir)
2781                 return 0;
2782
2783         for_each_subsystem(sys_dir, sys_dirent) {
2784                 dir_path = get_events_file(sys_dirent->d_name);
2785                 if (!dir_path)
2786                         continue;
2787                 evt_dir = opendir(dir_path);
2788                 if (!evt_dir)
2789                         goto next;
2790
2791                 for_each_event(dir_path, evt_dir, evt_dirent) {
2792                         snprintf(evt_path, MAXPATHLEN, "%s:%s",
2793                                  sys_dirent->d_name, evt_dirent->d_name);
2794                         if (!strcmp(evt_path, event_string)) {
2795                                 closedir(evt_dir);
2796                                 closedir(sys_dir);
2797                                 return 1;
2798                         }
2799                 }
2800                 closedir(evt_dir);
2801 next:
2802                 put_events_file(dir_path);
2803         }
2804         closedir(sys_dir);
2805         return 0;
2806 }
2807
2808 static bool is_event_supported(u8 type, u64 config)
2809 {
2810         bool ret = true;
2811         int open_return;
2812         struct evsel *evsel;
2813         struct perf_event_attr attr = {
2814                 .type = type,
2815                 .config = config,
2816                 .disabled = 1,
2817         };
2818         struct perf_thread_map *tmap = thread_map__new_by_tid(0);
2819
2820         if (tmap == NULL)
2821                 return false;
2822
2823         evsel = evsel__new(&attr);
2824         if (evsel) {
2825                 open_return = evsel__open(evsel, NULL, tmap);
2826                 ret = open_return >= 0;
2827
2828                 if (open_return == -EACCES) {
2829                         /*
2830                          * This happens if the paranoid value
2831                          * /proc/sys/kernel/perf_event_paranoid is set to 2
2832                          * Re-run with exclude_kernel set; we don't do that
2833                          * by default as some ARM machines do not support it.
2834                          *
2835                          */
2836                         evsel->core.attr.exclude_kernel = 1;
2837                         ret = evsel__open(evsel, NULL, tmap) >= 0;
2838                 }
2839                 evsel__delete(evsel);
2840         }
2841
2842         perf_thread_map__put(tmap);
2843         return ret;
2844 }
2845
2846 void print_sdt_events(const char *subsys_glob, const char *event_glob,
2847                       bool name_only)
2848 {
2849         struct probe_cache *pcache;
2850         struct probe_cache_entry *ent;
2851         struct strlist *bidlist, *sdtlist;
2852         struct strlist_config cfg = {.dont_dupstr = true};
2853         struct str_node *nd, *nd2;
2854         char *buf, *path, *ptr = NULL;
2855         bool show_detail = false;
2856         int ret;
2857
2858         sdtlist = strlist__new(NULL, &cfg);
2859         if (!sdtlist) {
2860                 pr_debug("Failed to allocate new strlist for SDT\n");
2861                 return;
2862         }
2863         bidlist = build_id_cache__list_all(true);
2864         if (!bidlist) {
2865                 pr_debug("Failed to get buildids: %d\n", errno);
2866                 return;
2867         }
2868         strlist__for_each_entry(nd, bidlist) {
2869                 pcache = probe_cache__new(nd->s, NULL);
2870                 if (!pcache)
2871                         continue;
2872                 list_for_each_entry(ent, &pcache->entries, node) {
2873                         if (!ent->sdt)
2874                                 continue;
2875                         if (subsys_glob &&
2876                             !strglobmatch(ent->pev.group, subsys_glob))
2877                                 continue;
2878                         if (event_glob &&
2879                             !strglobmatch(ent->pev.event, event_glob))
2880                                 continue;
2881                         ret = asprintf(&buf, "%s:%s@%s", ent->pev.group,
2882                                         ent->pev.event, nd->s);
2883                         if (ret > 0)
2884                                 strlist__add(sdtlist, buf);
2885                 }
2886                 probe_cache__delete(pcache);
2887         }
2888         strlist__delete(bidlist);
2889
2890         strlist__for_each_entry(nd, sdtlist) {
2891                 buf = strchr(nd->s, '@');
2892                 if (buf)
2893                         *(buf++) = '\0';
2894                 if (name_only) {
2895                         printf("%s ", nd->s);
2896                         continue;
2897                 }
2898                 nd2 = strlist__next(nd);
2899                 if (nd2) {
2900                         ptr = strchr(nd2->s, '@');
2901                         if (ptr)
2902                                 *ptr = '\0';
2903                         if (strcmp(nd->s, nd2->s) == 0)
2904                                 show_detail = true;
2905                 }
2906                 if (show_detail) {
2907                         path = build_id_cache__origname(buf);
2908                         ret = asprintf(&buf, "%s@%s(%.12s)", nd->s, path, buf);
2909                         if (ret > 0) {
2910                                 printf("  %-50s [%s]\n", buf, "SDT event");
2911                                 free(buf);
2912                         }
2913                         free(path);
2914                 } else
2915                         printf("  %-50s [%s]\n", nd->s, "SDT event");
2916                 if (nd2) {
2917                         if (strcmp(nd->s, nd2->s) != 0)
2918                                 show_detail = false;
2919                         if (ptr)
2920                                 *ptr = '@';
2921                 }
2922         }
2923         strlist__delete(sdtlist);
2924 }
2925
2926 int print_hwcache_events(const char *event_glob, bool name_only)
2927 {
2928         unsigned int type, op, i, evt_i = 0, evt_num = 0, npmus = 0;
2929         char name[64], new_name[128];
2930         char **evt_list = NULL, **evt_pmus = NULL;
2931         bool evt_num_known = false;
2932         struct perf_pmu *pmu = NULL;
2933
2934         if (perf_pmu__has_hybrid()) {
2935                 npmus = perf_pmu__hybrid_pmu_num();
2936                 evt_pmus = zalloc(sizeof(char *) * npmus);
2937                 if (!evt_pmus)
2938                         goto out_enomem;
2939         }
2940
2941 restart:
2942         if (evt_num_known) {
2943                 evt_list = zalloc(sizeof(char *) * evt_num);
2944                 if (!evt_list)
2945                         goto out_enomem;
2946         }
2947
2948         for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
2949                 for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
2950                         /* skip invalid cache type */
2951                         if (!evsel__is_cache_op_valid(type, op))
2952                                 continue;
2953
2954                         for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
2955                                 unsigned int hybrid_supported = 0, j;
2956                                 bool supported;
2957
2958                                 __evsel__hw_cache_type_op_res_name(type, op, i, name, sizeof(name));
2959                                 if (event_glob != NULL && !strglobmatch(name, event_glob))
2960                                         continue;
2961
2962                                 if (!perf_pmu__has_hybrid()) {
2963                                         if (!is_event_supported(PERF_TYPE_HW_CACHE,
2964                                                                 type | (op << 8) | (i << 16))) {
2965                                                 continue;
2966                                         }
2967                                 } else {
2968                                         perf_pmu__for_each_hybrid_pmu(pmu) {
2969                                                 if (!evt_num_known) {
2970                                                         evt_num++;
2971                                                         continue;
2972                                                 }
2973
2974                                                 supported = is_event_supported(
2975                                                                         PERF_TYPE_HW_CACHE,
2976                                                                         type | (op << 8) | (i << 16) |
2977                                                                         ((__u64)pmu->type << PERF_PMU_TYPE_SHIFT));
2978                                                 if (supported) {
2979                                                         snprintf(new_name, sizeof(new_name), "%s/%s/",
2980                                                                  pmu->name, name);
2981                                                         evt_pmus[hybrid_supported] = strdup(new_name);
2982                                                         hybrid_supported++;
2983                                                 }
2984                                         }
2985
2986                                         if (hybrid_supported == 0)
2987                                                 continue;
2988                                 }
2989
2990                                 if (!evt_num_known) {
2991                                         evt_num++;
2992                                         continue;
2993                                 }
2994
2995                                 if ((hybrid_supported == 0) ||
2996                                     (hybrid_supported == npmus)) {
2997                                         evt_list[evt_i] = strdup(name);
2998                                         if (npmus > 0) {
2999                                                 for (j = 0; j < npmus; j++)
3000                                                         zfree(&evt_pmus[j]);
3001                                         }
3002                                 } else {
3003                                         for (j = 0; j < hybrid_supported; j++) {
3004                                                 evt_list[evt_i++] = evt_pmus[j];
3005                                                 evt_pmus[j] = NULL;
3006                                         }
3007                                         continue;
3008                                 }
3009
3010                                 if (evt_list[evt_i] == NULL)
3011                                         goto out_enomem;
3012                                 evt_i++;
3013                         }
3014                 }
3015         }
3016
3017         if (!evt_num_known) {
3018                 evt_num_known = true;
3019                 goto restart;
3020         }
3021
3022         for (evt_i = 0; evt_i < evt_num; evt_i++) {
3023                 if (!evt_list[evt_i])
3024                         break;
3025         }
3026
3027         evt_num = evt_i;
3028         qsort(evt_list, evt_num, sizeof(char *), cmp_string);
3029         evt_i = 0;
3030         while (evt_i < evt_num) {
3031                 if (name_only) {
3032                         printf("%s ", evt_list[evt_i++]);
3033                         continue;
3034                 }
3035                 printf("  %-50s [%s]\n", evt_list[evt_i++],
3036                                 event_type_descriptors[PERF_TYPE_HW_CACHE]);
3037         }
3038         if (evt_num && pager_in_use())
3039                 printf("\n");
3040
3041 out_free:
3042         evt_num = evt_i;
3043         for (evt_i = 0; evt_i < evt_num; evt_i++)
3044                 zfree(&evt_list[evt_i]);
3045         zfree(&evt_list);
3046
3047         for (evt_i = 0; evt_i < npmus; evt_i++)
3048                 zfree(&evt_pmus[evt_i]);
3049         zfree(&evt_pmus);
3050         return evt_num;
3051
3052 out_enomem:
3053         printf("FATAL: not enough memory to print %s\n", event_type_descriptors[PERF_TYPE_HW_CACHE]);
3054         if (evt_list)
3055                 goto out_free;
3056         return evt_num;
3057 }
3058
3059 static void print_tool_event(const char *name, const char *event_glob,
3060                              bool name_only)
3061 {
3062         if (event_glob && !strglobmatch(name, event_glob))
3063                 return;
3064         if (name_only)
3065                 printf("%s ", name);
3066         else
3067                 printf("  %-50s [%s]\n", name, "Tool event");
3068
3069 }
3070
3071 void print_tool_events(const char *event_glob, bool name_only)
3072 {
3073         print_tool_event("duration_time", event_glob, name_only);
3074         if (pager_in_use())
3075                 printf("\n");
3076 }
3077
3078 void print_symbol_events(const char *event_glob, unsigned type,
3079                                 struct event_symbol *syms, unsigned max,
3080                                 bool name_only)
3081 {
3082         unsigned int i, evt_i = 0, evt_num = 0;
3083         char name[MAX_NAME_LEN];
3084         char **evt_list = NULL;
3085         bool evt_num_known = false;
3086
3087 restart:
3088         if (evt_num_known) {
3089                 evt_list = zalloc(sizeof(char *) * evt_num);
3090                 if (!evt_list)
3091                         goto out_enomem;
3092                 syms -= max;
3093         }
3094
3095         for (i = 0; i < max; i++, syms++) {
3096                 /*
3097                  * New attr.config still not supported here, the latest
3098                  * example was PERF_COUNT_SW_CGROUP_SWITCHES
3099                  */
3100                 if (syms->symbol == NULL)
3101                         continue;
3102
3103                 if (event_glob != NULL && !(strglobmatch(syms->symbol, event_glob) ||
3104                       (syms->alias && strglobmatch(syms->alias, event_glob))))
3105                         continue;
3106
3107                 if (!is_event_supported(type, i))
3108                         continue;
3109
3110                 if (!evt_num_known) {
3111                         evt_num++;
3112                         continue;
3113                 }
3114
3115                 if (!name_only && strlen(syms->alias))
3116                         snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
3117                 else
3118                         strlcpy(name, syms->symbol, MAX_NAME_LEN);
3119
3120                 evt_list[evt_i] = strdup(name);
3121                 if (evt_list[evt_i] == NULL)
3122                         goto out_enomem;
3123                 evt_i++;
3124         }
3125
3126         if (!evt_num_known) {
3127                 evt_num_known = true;
3128                 goto restart;
3129         }
3130         qsort(evt_list, evt_num, sizeof(char *), cmp_string);
3131         evt_i = 0;
3132         while (evt_i < evt_num) {
3133                 if (name_only) {
3134                         printf("%s ", evt_list[evt_i++]);
3135                         continue;
3136                 }
3137                 printf("  %-50s [%s]\n", evt_list[evt_i++], event_type_descriptors[type]);
3138         }
3139         if (evt_num && pager_in_use())
3140                 printf("\n");
3141
3142 out_free:
3143         evt_num = evt_i;
3144         for (evt_i = 0; evt_i < evt_num; evt_i++)
3145                 zfree(&evt_list[evt_i]);
3146         zfree(&evt_list);
3147         return;
3148
3149 out_enomem:
3150         printf("FATAL: not enough memory to print %s\n", event_type_descriptors[type]);
3151         if (evt_list)
3152                 goto out_free;
3153 }
3154
3155 /*
3156  * Print the help text for the event symbols:
3157  */
3158 void print_events(const char *event_glob, bool name_only, bool quiet_flag,
3159                         bool long_desc, bool details_flag, bool deprecated,
3160                         const char *pmu_name)
3161 {
3162         print_symbol_events(event_glob, PERF_TYPE_HARDWARE,
3163                             event_symbols_hw, PERF_COUNT_HW_MAX, name_only);
3164
3165         print_symbol_events(event_glob, PERF_TYPE_SOFTWARE,
3166                             event_symbols_sw, PERF_COUNT_SW_MAX, name_only);
3167         print_tool_events(event_glob, name_only);
3168
3169         print_hwcache_events(event_glob, name_only);
3170
3171         print_pmu_events(event_glob, name_only, quiet_flag, long_desc,
3172                         details_flag, deprecated, pmu_name);
3173
3174         if (event_glob != NULL)
3175                 return;
3176
3177         if (!name_only) {
3178                 printf("  %-50s [%s]\n",
3179                        "rNNN",
3180                        event_type_descriptors[PERF_TYPE_RAW]);
3181                 printf("  %-50s [%s]\n",
3182                        "cpu/t1=v1[,t2=v2,t3 ...]/modifier",
3183                        event_type_descriptors[PERF_TYPE_RAW]);
3184                 if (pager_in_use())
3185                         printf("   (see 'man perf-list' on how to encode it)\n\n");
3186
3187                 printf("  %-50s [%s]\n",
3188                        "mem:<addr>[/len][:access]",
3189                         event_type_descriptors[PERF_TYPE_BREAKPOINT]);
3190                 if (pager_in_use())
3191                         printf("\n");
3192         }
3193
3194         print_tracepoint_events(NULL, NULL, name_only);
3195
3196         print_sdt_events(NULL, NULL, name_only);
3197
3198         metricgroup__print(true, true, NULL, name_only, details_flag,
3199                            pmu_name);
3200
3201         print_libpfm_events(name_only, long_desc);
3202 }
3203
3204 int parse_events__is_hardcoded_term(struct parse_events_term *term)
3205 {
3206         return term->type_term != PARSE_EVENTS__TERM_TYPE_USER;
3207 }
3208
3209 static int new_term(struct parse_events_term **_term,
3210                     struct parse_events_term *temp,
3211                     char *str, u64 num)
3212 {
3213         struct parse_events_term *term;
3214
3215         term = malloc(sizeof(*term));
3216         if (!term)
3217                 return -ENOMEM;
3218
3219         *term = *temp;
3220         INIT_LIST_HEAD(&term->list);
3221         term->weak = false;
3222
3223         switch (term->type_val) {
3224         case PARSE_EVENTS__TERM_TYPE_NUM:
3225                 term->val.num = num;
3226                 break;
3227         case PARSE_EVENTS__TERM_TYPE_STR:
3228                 term->val.str = str;
3229                 break;
3230         default:
3231                 free(term);
3232                 return -EINVAL;
3233         }
3234
3235         *_term = term;
3236         return 0;
3237 }
3238
3239 int parse_events_term__num(struct parse_events_term **term,
3240                            int type_term, char *config, u64 num,
3241                            bool no_value,
3242                            void *loc_term_, void *loc_val_)
3243 {
3244         YYLTYPE *loc_term = loc_term_;
3245         YYLTYPE *loc_val = loc_val_;
3246
3247         struct parse_events_term temp = {
3248                 .type_val  = PARSE_EVENTS__TERM_TYPE_NUM,
3249                 .type_term = type_term,
3250                 .config    = config ? : strdup(config_term_names[type_term]),
3251                 .no_value  = no_value,
3252                 .err_term  = loc_term ? loc_term->first_column : 0,
3253                 .err_val   = loc_val  ? loc_val->first_column  : 0,
3254         };
3255
3256         return new_term(term, &temp, NULL, num);
3257 }
3258
3259 int parse_events_term__str(struct parse_events_term **term,
3260                            int type_term, char *config, char *str,
3261                            void *loc_term_, void *loc_val_)
3262 {
3263         YYLTYPE *loc_term = loc_term_;
3264         YYLTYPE *loc_val = loc_val_;
3265
3266         struct parse_events_term temp = {
3267                 .type_val  = PARSE_EVENTS__TERM_TYPE_STR,
3268                 .type_term = type_term,
3269                 .config    = config,
3270                 .err_term  = loc_term ? loc_term->first_column : 0,
3271                 .err_val   = loc_val  ? loc_val->first_column  : 0,
3272         };
3273
3274         return new_term(term, &temp, str, 0);
3275 }
3276
3277 int parse_events_term__sym_hw(struct parse_events_term **term,
3278                               char *config, unsigned idx)
3279 {
3280         struct event_symbol *sym;
3281         char *str;
3282         struct parse_events_term temp = {
3283                 .type_val  = PARSE_EVENTS__TERM_TYPE_STR,
3284                 .type_term = PARSE_EVENTS__TERM_TYPE_USER,
3285                 .config    = config,
3286         };
3287
3288         if (!temp.config) {
3289                 temp.config = strdup("event");
3290                 if (!temp.config)
3291                         return -ENOMEM;
3292         }
3293         BUG_ON(idx >= PERF_COUNT_HW_MAX);
3294         sym = &event_symbols_hw[idx];
3295
3296         str = strdup(sym->symbol);
3297         if (!str)
3298                 return -ENOMEM;
3299         return new_term(term, &temp, str, 0);
3300 }
3301
3302 int parse_events_term__clone(struct parse_events_term **new,
3303                              struct parse_events_term *term)
3304 {
3305         char *str;
3306         struct parse_events_term temp = {
3307                 .type_val  = term->type_val,
3308                 .type_term = term->type_term,
3309                 .config    = NULL,
3310                 .err_term  = term->err_term,
3311                 .err_val   = term->err_val,
3312         };
3313
3314         if (term->config) {
3315                 temp.config = strdup(term->config);
3316                 if (!temp.config)
3317                         return -ENOMEM;
3318         }
3319         if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
3320                 return new_term(new, &temp, NULL, term->val.num);
3321
3322         str = strdup(term->val.str);
3323         if (!str)
3324                 return -ENOMEM;
3325         return new_term(new, &temp, str, 0);
3326 }
3327
3328 void parse_events_term__delete(struct parse_events_term *term)
3329 {
3330         if (term->array.nr_ranges)
3331                 zfree(&term->array.ranges);
3332
3333         if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM)
3334                 zfree(&term->val.str);
3335
3336         zfree(&term->config);
3337         free(term);
3338 }
3339
3340 int parse_events_copy_term_list(struct list_head *old,
3341                                  struct list_head **new)
3342 {
3343         struct parse_events_term *term, *n;
3344         int ret;
3345
3346         if (!old) {
3347                 *new = NULL;
3348                 return 0;
3349         }
3350
3351         *new = malloc(sizeof(struct list_head));
3352         if (!*new)
3353                 return -ENOMEM;
3354         INIT_LIST_HEAD(*new);
3355
3356         list_for_each_entry (term, old, list) {
3357                 ret = parse_events_term__clone(&n, term);
3358                 if (ret)
3359                         return ret;
3360                 list_add_tail(&n->list, *new);
3361         }
3362         return 0;
3363 }
3364
3365 void parse_events_terms__purge(struct list_head *terms)
3366 {
3367         struct parse_events_term *term, *h;
3368
3369         list_for_each_entry_safe(term, h, terms, list) {
3370                 list_del_init(&term->list);
3371                 parse_events_term__delete(term);
3372         }
3373 }
3374
3375 void parse_events_terms__delete(struct list_head *terms)
3376 {
3377         if (!terms)
3378                 return;
3379         parse_events_terms__purge(terms);
3380         free(terms);
3381 }
3382
3383 void parse_events__clear_array(struct parse_events_array *a)
3384 {
3385         zfree(&a->ranges);
3386 }
3387
3388 void parse_events_evlist_error(struct parse_events_state *parse_state,
3389                                int idx, const char *str)
3390 {
3391         if (!parse_state->error)
3392                 return;
3393
3394         parse_events_error__handle(parse_state->error, idx, strdup(str), NULL);
3395 }
3396
3397 static void config_terms_list(char *buf, size_t buf_sz)
3398 {
3399         int i;
3400         bool first = true;
3401
3402         buf[0] = '\0';
3403         for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) {
3404                 const char *name = config_term_names[i];
3405
3406                 if (!config_term_avail(i, NULL))
3407                         continue;
3408                 if (!name)
3409                         continue;
3410                 if (name[0] == '<')
3411                         continue;
3412
3413                 if (strlen(buf) + strlen(name) + 2 >= buf_sz)
3414                         return;
3415
3416                 if (!first)
3417                         strcat(buf, ",");
3418                 else
3419                         first = false;
3420                 strcat(buf, name);
3421         }
3422 }
3423
3424 /*
3425  * Return string contains valid config terms of an event.
3426  * @additional_terms: For terms such as PMU sysfs terms.
3427  */
3428 char *parse_events_formats_error_string(char *additional_terms)
3429 {
3430         char *str;
3431         /* "no-overwrite" is the longest name */
3432         char static_terms[__PARSE_EVENTS__TERM_TYPE_NR *
3433                           (sizeof("no-overwrite") - 1)];
3434
3435         config_terms_list(static_terms, sizeof(static_terms));
3436         /* valid terms */
3437         if (additional_terms) {
3438                 if (asprintf(&str, "valid terms: %s,%s",
3439                              additional_terms, static_terms) < 0)
3440                         goto fail;
3441         } else {
3442                 if (asprintf(&str, "valid terms: %s", static_terms) < 0)
3443                         goto fail;
3444         }
3445         return str;
3446
3447 fail:
3448         return NULL;
3449 }
3450
3451 struct evsel *parse_events__add_event_hybrid(struct list_head *list, int *idx,
3452                                              struct perf_event_attr *attr,
3453                                              const char *name,
3454                                              const char *metric_id,
3455                                              struct perf_pmu *pmu,
3456                                              struct list_head *config_terms)
3457 {
3458         return __add_event(list, idx, attr, /*init_attr=*/true, name, metric_id,
3459                            pmu, config_terms, /*auto_merge_stats=*/false,
3460                            /*cpu_list=*/NULL);
3461 }