2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
13 #include <linux/bitops.h>
14 #include <api/fs/fs.h>
15 #include <api/fs/tracing_path.h>
16 #include <traceevent/event-parse.h>
17 #include <linux/hw_breakpoint.h>
18 #include <linux/perf_event.h>
19 #include <linux/compiler.h>
20 #include <linux/err.h>
21 #include <sys/ioctl.h>
22 #include <sys/resource.h>
23 #include <sys/types.h>
26 #include "callchain.h"
33 #include "thread_map.h"
35 #include "perf_regs.h"
37 #include "trace-event.h"
40 #include "util/parse-branch-options.h"
42 #include "sane_ctype.h"
44 struct perf_missing_features perf_missing_features;
46 static clockid_t clockid;
48 static int perf_evsel__no_extra_init(struct perf_evsel *evsel __maybe_unused)
53 void __weak test_attr__ready(void) { }
55 static void perf_evsel__no_extra_fini(struct perf_evsel *evsel __maybe_unused)
61 int (*init)(struct perf_evsel *evsel);
62 void (*fini)(struct perf_evsel *evsel);
63 } perf_evsel__object = {
64 .size = sizeof(struct perf_evsel),
65 .init = perf_evsel__no_extra_init,
66 .fini = perf_evsel__no_extra_fini,
69 int perf_evsel__object_config(size_t object_size,
70 int (*init)(struct perf_evsel *evsel),
71 void (*fini)(struct perf_evsel *evsel))
77 if (perf_evsel__object.size > object_size)
80 perf_evsel__object.size = object_size;
84 perf_evsel__object.init = init;
87 perf_evsel__object.fini = fini;
92 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
94 int __perf_evsel__sample_size(u64 sample_type)
96 u64 mask = sample_type & PERF_SAMPLE_MASK;
100 for (i = 0; i < 64; i++) {
101 if (mask & (1ULL << i))
111 * __perf_evsel__calc_id_pos - calculate id_pos.
112 * @sample_type: sample type
114 * This function returns the position of the event id (PERF_SAMPLE_ID or
115 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
118 static int __perf_evsel__calc_id_pos(u64 sample_type)
122 if (sample_type & PERF_SAMPLE_IDENTIFIER)
125 if (!(sample_type & PERF_SAMPLE_ID))
128 if (sample_type & PERF_SAMPLE_IP)
131 if (sample_type & PERF_SAMPLE_TID)
134 if (sample_type & PERF_SAMPLE_TIME)
137 if (sample_type & PERF_SAMPLE_ADDR)
144 * __perf_evsel__calc_is_pos - calculate is_pos.
145 * @sample_type: sample type
147 * This function returns the position (counting backwards) of the event id
148 * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
149 * sample_id_all is used there is an id sample appended to non-sample events.
151 static int __perf_evsel__calc_is_pos(u64 sample_type)
155 if (sample_type & PERF_SAMPLE_IDENTIFIER)
158 if (!(sample_type & PERF_SAMPLE_ID))
161 if (sample_type & PERF_SAMPLE_CPU)
164 if (sample_type & PERF_SAMPLE_STREAM_ID)
170 void perf_evsel__calc_id_pos(struct perf_evsel *evsel)
172 evsel->id_pos = __perf_evsel__calc_id_pos(evsel->attr.sample_type);
173 evsel->is_pos = __perf_evsel__calc_is_pos(evsel->attr.sample_type);
176 void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
177 enum perf_event_sample_format bit)
179 if (!(evsel->attr.sample_type & bit)) {
180 evsel->attr.sample_type |= bit;
181 evsel->sample_size += sizeof(u64);
182 perf_evsel__calc_id_pos(evsel);
186 void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
187 enum perf_event_sample_format bit)
189 if (evsel->attr.sample_type & bit) {
190 evsel->attr.sample_type &= ~bit;
191 evsel->sample_size -= sizeof(u64);
192 perf_evsel__calc_id_pos(evsel);
196 void perf_evsel__set_sample_id(struct perf_evsel *evsel,
197 bool can_sample_identifier)
199 if (can_sample_identifier) {
200 perf_evsel__reset_sample_bit(evsel, ID);
201 perf_evsel__set_sample_bit(evsel, IDENTIFIER);
203 perf_evsel__set_sample_bit(evsel, ID);
205 evsel->attr.read_format |= PERF_FORMAT_ID;
209 * perf_evsel__is_function_event - Return whether given evsel is a function
212 * @evsel - evsel selector to be tested
214 * Return %true if event is function trace event
216 bool perf_evsel__is_function_event(struct perf_evsel *evsel)
218 #define FUNCTION_EVENT "ftrace:function"
220 return evsel->name &&
221 !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT));
223 #undef FUNCTION_EVENT
226 void perf_evsel__init(struct perf_evsel *evsel,
227 struct perf_event_attr *attr, int idx)
230 evsel->tracking = !idx;
232 evsel->leader = evsel;
235 evsel->evlist = NULL;
237 INIT_LIST_HEAD(&evsel->node);
238 INIT_LIST_HEAD(&evsel->config_terms);
239 perf_evsel__object.init(evsel);
240 evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
241 perf_evsel__calc_id_pos(evsel);
242 evsel->cmdline_group_boundary = false;
243 evsel->metric_expr = NULL;
244 evsel->metric_name = NULL;
245 evsel->metric_events = NULL;
246 evsel->collect_stat = false;
247 evsel->pmu_name = NULL;
250 struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
252 struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
255 perf_evsel__init(evsel, attr, idx);
257 if (perf_evsel__is_bpf_output(evsel)) {
258 evsel->attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
259 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
260 evsel->attr.sample_period = 1;
263 if (perf_evsel__is_clock(evsel)) {
265 * The evsel->unit points to static alias->unit
266 * so it's ok to use static string in here.
268 static const char *unit = "msec";
277 static bool perf_event_can_profile_kernel(void)
279 return geteuid() == 0 || perf_event_paranoid() == -1;
282 struct perf_evsel *perf_evsel__new_cycles(bool precise)
284 struct perf_event_attr attr = {
285 .type = PERF_TYPE_HARDWARE,
286 .config = PERF_COUNT_HW_CPU_CYCLES,
287 .exclude_kernel = !perf_event_can_profile_kernel(),
289 struct perf_evsel *evsel;
291 event_attr_init(&attr);
296 * Unnamed union member, not supported as struct member named
297 * initializer in older compilers such as gcc 4.4.7
299 * Just for probing the precise_ip:
301 attr.sample_period = 1;
303 perf_event_attr__set_max_precise_ip(&attr);
305 * Now let the usual logic to set up the perf_event_attr defaults
306 * to kick in when we return and before perf_evsel__open() is called.
308 attr.sample_period = 0;
310 evsel = perf_evsel__new(&attr);
314 /* use asprintf() because free(evsel) assumes name is allocated */
315 if (asprintf(&evsel->name, "cycles%s%s%.*s",
316 (attr.precise_ip || attr.exclude_kernel) ? ":" : "",
317 attr.exclude_kernel ? "u" : "",
318 attr.precise_ip ? attr.precise_ip + 1 : 0, "ppp") < 0)
323 perf_evsel__delete(evsel);
329 * Returns pointer with encoded error via <linux/err.h> interface.
331 struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx)
333 struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
339 struct perf_event_attr attr = {
340 .type = PERF_TYPE_TRACEPOINT,
341 .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
342 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
345 if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
348 evsel->tp_format = trace_event__tp_format(sys, name);
349 if (IS_ERR(evsel->tp_format)) {
350 err = PTR_ERR(evsel->tp_format);
354 event_attr_init(&attr);
355 attr.config = evsel->tp_format->id;
356 attr.sample_period = 1;
357 perf_evsel__init(evsel, &attr, idx);
369 const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
377 "stalled-cycles-frontend",
378 "stalled-cycles-backend",
382 static const char *__perf_evsel__hw_name(u64 config)
384 if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
385 return perf_evsel__hw_names[config];
387 return "unknown-hardware";
390 static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
392 int colon = 0, r = 0;
393 struct perf_event_attr *attr = &evsel->attr;
394 bool exclude_guest_default = false;
396 #define MOD_PRINT(context, mod) do { \
397 if (!attr->exclude_##context) { \
398 if (!colon) colon = ++r; \
399 r += scnprintf(bf + r, size - r, "%c", mod); \
402 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
403 MOD_PRINT(kernel, 'k');
404 MOD_PRINT(user, 'u');
406 exclude_guest_default = true;
409 if (attr->precise_ip) {
412 r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
413 exclude_guest_default = true;
416 if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
417 MOD_PRINT(host, 'H');
418 MOD_PRINT(guest, 'G');
426 static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
428 int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
429 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
432 const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
445 static const char *__perf_evsel__sw_name(u64 config)
447 if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
448 return perf_evsel__sw_names[config];
449 return "unknown-software";
452 static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
454 int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
455 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
458 static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
462 r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
464 if (type & HW_BREAKPOINT_R)
465 r += scnprintf(bf + r, size - r, "r");
467 if (type & HW_BREAKPOINT_W)
468 r += scnprintf(bf + r, size - r, "w");
470 if (type & HW_BREAKPOINT_X)
471 r += scnprintf(bf + r, size - r, "x");
476 static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
478 struct perf_event_attr *attr = &evsel->attr;
479 int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
480 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
483 const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
484 [PERF_EVSEL__MAX_ALIASES] = {
485 { "L1-dcache", "l1-d", "l1d", "L1-data", },
486 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
488 { "dTLB", "d-tlb", "Data-TLB", },
489 { "iTLB", "i-tlb", "Instruction-TLB", },
490 { "branch", "branches", "bpu", "btb", "bpc", },
494 const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
495 [PERF_EVSEL__MAX_ALIASES] = {
496 { "load", "loads", "read", },
497 { "store", "stores", "write", },
498 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
501 const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
502 [PERF_EVSEL__MAX_ALIASES] = {
503 { "refs", "Reference", "ops", "access", },
504 { "misses", "miss", },
507 #define C(x) PERF_COUNT_HW_CACHE_##x
508 #define CACHE_READ (1 << C(OP_READ))
509 #define CACHE_WRITE (1 << C(OP_WRITE))
510 #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
511 #define COP(x) (1 << x)
514 * cache operartion stat
515 * L1I : Read and prefetch only
516 * ITLB and BPU : Read-only
518 static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
519 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
520 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
521 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
522 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
523 [C(ITLB)] = (CACHE_READ),
524 [C(BPU)] = (CACHE_READ),
525 [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
528 bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
530 if (perf_evsel__hw_cache_stat[type] & COP(op))
531 return true; /* valid */
533 return false; /* invalid */
536 int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
537 char *bf, size_t size)
540 return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
541 perf_evsel__hw_cache_op[op][0],
542 perf_evsel__hw_cache_result[result][0]);
545 return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
546 perf_evsel__hw_cache_op[op][1]);
549 static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
551 u8 op, result, type = (config >> 0) & 0xff;
552 const char *err = "unknown-ext-hardware-cache-type";
554 if (type >= PERF_COUNT_HW_CACHE_MAX)
557 op = (config >> 8) & 0xff;
558 err = "unknown-ext-hardware-cache-op";
559 if (op >= PERF_COUNT_HW_CACHE_OP_MAX)
562 result = (config >> 16) & 0xff;
563 err = "unknown-ext-hardware-cache-result";
564 if (result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
567 err = "invalid-cache";
568 if (!perf_evsel__is_cache_op_valid(type, op))
571 return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
573 return scnprintf(bf, size, "%s", err);
576 static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
578 int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
579 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
582 static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
584 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
585 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
588 const char *perf_evsel__name(struct perf_evsel *evsel)
595 switch (evsel->attr.type) {
597 perf_evsel__raw_name(evsel, bf, sizeof(bf));
600 case PERF_TYPE_HARDWARE:
601 perf_evsel__hw_name(evsel, bf, sizeof(bf));
604 case PERF_TYPE_HW_CACHE:
605 perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
608 case PERF_TYPE_SOFTWARE:
609 perf_evsel__sw_name(evsel, bf, sizeof(bf));
612 case PERF_TYPE_TRACEPOINT:
613 scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
616 case PERF_TYPE_BREAKPOINT:
617 perf_evsel__bp_name(evsel, bf, sizeof(bf));
621 scnprintf(bf, sizeof(bf), "unknown attr type: %d",
626 evsel->name = strdup(bf);
628 return evsel->name ?: "unknown";
631 const char *perf_evsel__group_name(struct perf_evsel *evsel)
633 return evsel->group_name ?: "anon group";
637 * Returns the group details for the specified leader,
638 * with following rules.
640 * For record -e '{cycles,instructions}'
641 * 'anon group { cycles:u, instructions:u }'
643 * For record -e 'cycles,instructions' and report --group
644 * 'cycles:u, instructions:u'
646 int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
649 struct perf_evsel *pos;
650 const char *group_name = perf_evsel__group_name(evsel);
652 if (!evsel->forced_leader)
653 ret = scnprintf(buf, size, "%s { ", group_name);
655 ret += scnprintf(buf + ret, size - ret, "%s",
656 perf_evsel__name(evsel));
658 for_each_group_member(pos, evsel)
659 ret += scnprintf(buf + ret, size - ret, ", %s",
660 perf_evsel__name(pos));
662 if (!evsel->forced_leader)
663 ret += scnprintf(buf + ret, size - ret, " }");
668 static void __perf_evsel__config_callchain(struct perf_evsel *evsel,
669 struct record_opts *opts,
670 struct callchain_param *param)
672 bool function = perf_evsel__is_function_event(evsel);
673 struct perf_event_attr *attr = &evsel->attr;
675 perf_evsel__set_sample_bit(evsel, CALLCHAIN);
677 attr->sample_max_stack = param->max_stack;
679 if (param->record_mode == CALLCHAIN_LBR) {
680 if (!opts->branch_stack) {
681 if (attr->exclude_user) {
682 pr_warning("LBR callstack option is only available "
683 "to get user callchain information. "
684 "Falling back to framepointers.\n");
686 perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
687 attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER |
688 PERF_SAMPLE_BRANCH_CALL_STACK |
689 PERF_SAMPLE_BRANCH_NO_CYCLES |
690 PERF_SAMPLE_BRANCH_NO_FLAGS;
693 pr_warning("Cannot use LBR callstack with branch stack. "
694 "Falling back to framepointers.\n");
697 if (param->record_mode == CALLCHAIN_DWARF) {
699 perf_evsel__set_sample_bit(evsel, REGS_USER);
700 perf_evsel__set_sample_bit(evsel, STACK_USER);
701 attr->sample_regs_user |= PERF_REGS_MASK;
702 attr->sample_stack_user = param->dump_size;
703 attr->exclude_callchain_user = 1;
705 pr_info("Cannot use DWARF unwind for function trace event,"
706 " falling back to framepointers.\n");
711 pr_info("Disabling user space callchains for function trace event.\n");
712 attr->exclude_callchain_user = 1;
716 void perf_evsel__config_callchain(struct perf_evsel *evsel,
717 struct record_opts *opts,
718 struct callchain_param *param)
721 return __perf_evsel__config_callchain(evsel, opts, param);
725 perf_evsel__reset_callgraph(struct perf_evsel *evsel,
726 struct callchain_param *param)
728 struct perf_event_attr *attr = &evsel->attr;
730 perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
731 if (param->record_mode == CALLCHAIN_LBR) {
732 perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
733 attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER |
734 PERF_SAMPLE_BRANCH_CALL_STACK);
736 if (param->record_mode == CALLCHAIN_DWARF) {
737 perf_evsel__reset_sample_bit(evsel, REGS_USER);
738 perf_evsel__reset_sample_bit(evsel, STACK_USER);
742 static void apply_config_terms(struct perf_evsel *evsel,
743 struct record_opts *opts, bool track)
745 struct perf_evsel_config_term *term;
746 struct list_head *config_terms = &evsel->config_terms;
747 struct perf_event_attr *attr = &evsel->attr;
748 /* callgraph default */
749 struct callchain_param param = {
750 .record_mode = callchain_param.record_mode,
754 const char *callgraph_buf = NULL;
756 list_for_each_entry(term, config_terms, list) {
757 switch (term->type) {
758 case PERF_EVSEL__CONFIG_TERM_PERIOD:
759 if (!(term->weak && opts->user_interval != ULLONG_MAX)) {
760 attr->sample_period = term->val.period;
762 perf_evsel__reset_sample_bit(evsel, PERIOD);
765 case PERF_EVSEL__CONFIG_TERM_FREQ:
766 if (!(term->weak && opts->user_freq != UINT_MAX)) {
767 attr->sample_freq = term->val.freq;
769 perf_evsel__set_sample_bit(evsel, PERIOD);
772 case PERF_EVSEL__CONFIG_TERM_TIME:
774 perf_evsel__set_sample_bit(evsel, TIME);
776 perf_evsel__reset_sample_bit(evsel, TIME);
778 case PERF_EVSEL__CONFIG_TERM_CALLGRAPH:
779 callgraph_buf = term->val.callgraph;
781 case PERF_EVSEL__CONFIG_TERM_BRANCH:
782 if (term->val.branch && strcmp(term->val.branch, "no")) {
783 perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
784 parse_branch_str(term->val.branch,
785 &attr->branch_sample_type);
787 perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
789 case PERF_EVSEL__CONFIG_TERM_STACK_USER:
790 dump_size = term->val.stack_user;
792 case PERF_EVSEL__CONFIG_TERM_MAX_STACK:
793 max_stack = term->val.max_stack;
795 case PERF_EVSEL__CONFIG_TERM_INHERIT:
797 * attr->inherit should has already been set by
798 * perf_evsel__config. If user explicitly set
799 * inherit using config terms, override global
800 * opt->no_inherit setting.
802 attr->inherit = term->val.inherit ? 1 : 0;
804 case PERF_EVSEL__CONFIG_TERM_OVERWRITE:
805 attr->write_backward = term->val.overwrite ? 1 : 0;
807 case PERF_EVSEL__CONFIG_TERM_DRV_CFG:
814 /* User explicitly set per-event callgraph, clear the old setting and reset. */
815 if ((callgraph_buf != NULL) || (dump_size > 0) || max_stack) {
816 bool sample_address = false;
819 param.max_stack = max_stack;
820 if (callgraph_buf == NULL)
821 callgraph_buf = "fp";
824 /* parse callgraph parameters */
825 if (callgraph_buf != NULL) {
826 if (!strcmp(callgraph_buf, "no")) {
827 param.enabled = false;
828 param.record_mode = CALLCHAIN_NONE;
830 param.enabled = true;
831 if (parse_callchain_record(callgraph_buf, ¶m)) {
832 pr_err("per-event callgraph setting for %s failed. "
833 "Apply callgraph global setting for it\n",
837 if (param.record_mode == CALLCHAIN_DWARF)
838 sample_address = true;
842 dump_size = round_up(dump_size, sizeof(u64));
843 param.dump_size = dump_size;
846 /* If global callgraph set, clear it */
847 if (callchain_param.enabled)
848 perf_evsel__reset_callgraph(evsel, &callchain_param);
850 /* set perf-event callgraph */
852 if (sample_address) {
853 perf_evsel__set_sample_bit(evsel, ADDR);
854 perf_evsel__set_sample_bit(evsel, DATA_SRC);
855 evsel->attr.mmap_data = track;
857 perf_evsel__config_callchain(evsel, opts, ¶m);
862 static bool is_dummy_event(struct perf_evsel *evsel)
864 return (evsel->attr.type == PERF_TYPE_SOFTWARE) &&
865 (evsel->attr.config == PERF_COUNT_SW_DUMMY);
869 * The enable_on_exec/disabled value strategy:
871 * 1) For any type of traced program:
872 * - all independent events and group leaders are disabled
873 * - all group members are enabled
875 * Group members are ruled by group leaders. They need to
876 * be enabled, because the group scheduling relies on that.
878 * 2) For traced programs executed by perf:
879 * - all independent events and group leaders have
881 * - we don't specifically enable or disable any event during
884 * Independent events and group leaders are initially disabled
885 * and get enabled by exec. Group members are ruled by group
886 * leaders as stated in 1).
888 * 3) For traced programs attached by perf (pid/tid):
889 * - we specifically enable or disable all events during
892 * When attaching events to already running traced we
893 * enable/disable events specifically, as there's no
894 * initial traced exec call.
896 void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
897 struct callchain_param *callchain)
899 struct perf_evsel *leader = evsel->leader;
900 struct perf_event_attr *attr = &evsel->attr;
901 int track = evsel->tracking;
902 bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
904 attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
905 attr->inherit = !opts->no_inherit;
906 attr->write_backward = opts->overwrite ? 1 : 0;
908 perf_evsel__set_sample_bit(evsel, IP);
909 perf_evsel__set_sample_bit(evsel, TID);
911 if (evsel->sample_read) {
912 perf_evsel__set_sample_bit(evsel, READ);
915 * We need ID even in case of single event, because
916 * PERF_SAMPLE_READ process ID specific data.
918 perf_evsel__set_sample_id(evsel, false);
921 * Apply group format only if we belong to group
922 * with more than one members.
924 if (leader->nr_members > 1) {
925 attr->read_format |= PERF_FORMAT_GROUP;
931 * We default some events to have a default interval. But keep
932 * it a weak assumption overridable by the user.
934 if (!attr->sample_period || (opts->user_freq != UINT_MAX ||
935 opts->user_interval != ULLONG_MAX)) {
937 perf_evsel__set_sample_bit(evsel, PERIOD);
939 attr->sample_freq = opts->freq;
941 attr->sample_period = opts->default_interval;
946 * Disable sampling for all group members other
947 * than leader in case leader 'leads' the sampling.
949 if ((leader != evsel) && leader->sample_read) {
951 attr->sample_freq = 0;
952 attr->sample_period = 0;
953 attr->write_backward = 0;
954 attr->sample_id_all = 0;
957 if (opts->no_samples)
958 attr->sample_freq = 0;
960 if (opts->inherit_stat) {
961 evsel->attr.read_format |=
962 PERF_FORMAT_TOTAL_TIME_ENABLED |
963 PERF_FORMAT_TOTAL_TIME_RUNNING |
965 attr->inherit_stat = 1;
968 if (opts->sample_address) {
969 perf_evsel__set_sample_bit(evsel, ADDR);
970 attr->mmap_data = track;
974 * We don't allow user space callchains for function trace
975 * event, due to issues with page faults while tracing page
976 * fault handler and its overall trickiness nature.
978 if (perf_evsel__is_function_event(evsel))
979 evsel->attr.exclude_callchain_user = 1;
981 if (callchain && callchain->enabled && !evsel->no_aux_samples)
982 perf_evsel__config_callchain(evsel, opts, callchain);
984 if (opts->sample_intr_regs) {
985 attr->sample_regs_intr = opts->sample_intr_regs;
986 perf_evsel__set_sample_bit(evsel, REGS_INTR);
989 if (opts->sample_user_regs) {
990 attr->sample_regs_user |= opts->sample_user_regs;
991 perf_evsel__set_sample_bit(evsel, REGS_USER);
994 if (target__has_cpu(&opts->target) || opts->sample_cpu)
995 perf_evsel__set_sample_bit(evsel, CPU);
998 * When the user explicitly disabled time don't force it here.
1000 if (opts->sample_time &&
1001 (!perf_missing_features.sample_id_all &&
1002 (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu ||
1003 opts->sample_time_set)))
1004 perf_evsel__set_sample_bit(evsel, TIME);
1006 if (opts->raw_samples && !evsel->no_aux_samples) {
1007 perf_evsel__set_sample_bit(evsel, TIME);
1008 perf_evsel__set_sample_bit(evsel, RAW);
1009 perf_evsel__set_sample_bit(evsel, CPU);
1012 if (opts->sample_address)
1013 perf_evsel__set_sample_bit(evsel, DATA_SRC);
1015 if (opts->sample_phys_addr)
1016 perf_evsel__set_sample_bit(evsel, PHYS_ADDR);
1018 if (opts->no_buffering) {
1019 attr->watermark = 0;
1020 attr->wakeup_events = 1;
1022 if (opts->branch_stack && !evsel->no_aux_samples) {
1023 perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
1024 attr->branch_sample_type = opts->branch_stack;
1027 if (opts->sample_weight)
1028 perf_evsel__set_sample_bit(evsel, WEIGHT);
1032 attr->mmap2 = track && !perf_missing_features.mmap2;
1035 if (opts->record_namespaces)
1036 attr->namespaces = track;
1038 if (opts->record_switch_events)
1039 attr->context_switch = track;
1041 if (opts->sample_transaction)
1042 perf_evsel__set_sample_bit(evsel, TRANSACTION);
1044 if (opts->running_time) {
1045 evsel->attr.read_format |=
1046 PERF_FORMAT_TOTAL_TIME_ENABLED |
1047 PERF_FORMAT_TOTAL_TIME_RUNNING;
1051 * XXX see the function comment above
1053 * Disabling only independent events or group leaders,
1054 * keeping group members enabled.
1056 if (perf_evsel__is_group_leader(evsel))
1060 * Setting enable_on_exec for independent events and
1061 * group leaders for traced executed by perf.
1063 if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel) &&
1064 !opts->initial_delay)
1065 attr->enable_on_exec = 1;
1067 if (evsel->immediate) {
1069 attr->enable_on_exec = 0;
1072 clockid = opts->clockid;
1073 if (opts->use_clockid) {
1074 attr->use_clockid = 1;
1075 attr->clockid = opts->clockid;
1078 if (evsel->precise_max)
1079 perf_event_attr__set_max_precise_ip(attr);
1081 if (opts->all_user) {
1082 attr->exclude_kernel = 1;
1083 attr->exclude_user = 0;
1086 if (opts->all_kernel) {
1087 attr->exclude_kernel = 0;
1088 attr->exclude_user = 1;
1092 * Apply event specific term settings,
1093 * it overloads any global configuration.
1095 apply_config_terms(evsel, opts, track);
1097 evsel->ignore_missing_thread = opts->ignore_missing_thread;
1099 /* The --period option takes the precedence. */
1100 if (opts->period_set) {
1102 perf_evsel__set_sample_bit(evsel, PERIOD);
1104 perf_evsel__reset_sample_bit(evsel, PERIOD);
1108 * For initial_delay, a dummy event is added implicitly.
1109 * The software event will trigger -EOPNOTSUPP error out,
1110 * if BRANCH_STACK bit is set.
1112 if (opts->initial_delay && is_dummy_event(evsel))
1113 perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
1116 static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
1118 if (evsel->system_wide)
1121 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
1125 for (cpu = 0; cpu < ncpus; cpu++) {
1126 for (thread = 0; thread < nthreads; thread++) {
1127 FD(evsel, cpu, thread) = -1;
1132 return evsel->fd != NULL ? 0 : -ENOMEM;
1135 static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
1140 for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
1141 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
1142 int fd = FD(evsel, cpu, thread),
1143 err = ioctl(fd, ioc, arg);
1153 int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
1155 return perf_evsel__run_ioctl(evsel,
1156 PERF_EVENT_IOC_SET_FILTER,
1160 int perf_evsel__set_filter(struct perf_evsel *evsel, const char *filter)
1162 char *new_filter = strdup(filter);
1164 if (new_filter != NULL) {
1165 free(evsel->filter);
1166 evsel->filter = new_filter;
1173 static int perf_evsel__append_filter(struct perf_evsel *evsel,
1174 const char *fmt, const char *filter)
1178 if (evsel->filter == NULL)
1179 return perf_evsel__set_filter(evsel, filter);
1181 if (asprintf(&new_filter, fmt, evsel->filter, filter) > 0) {
1182 free(evsel->filter);
1183 evsel->filter = new_filter;
1190 int perf_evsel__append_tp_filter(struct perf_evsel *evsel, const char *filter)
1192 return perf_evsel__append_filter(evsel, "(%s) && (%s)", filter);
1195 int perf_evsel__append_addr_filter(struct perf_evsel *evsel, const char *filter)
1197 return perf_evsel__append_filter(evsel, "%s,%s", filter);
1200 int perf_evsel__enable(struct perf_evsel *evsel)
1202 return perf_evsel__run_ioctl(evsel,
1203 PERF_EVENT_IOC_ENABLE,
1207 int perf_evsel__disable(struct perf_evsel *evsel)
1209 return perf_evsel__run_ioctl(evsel,
1210 PERF_EVENT_IOC_DISABLE,
1214 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
1216 if (ncpus == 0 || nthreads == 0)
1219 if (evsel->system_wide)
1222 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
1223 if (evsel->sample_id == NULL)
1226 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
1227 if (evsel->id == NULL) {
1228 xyarray__delete(evsel->sample_id);
1229 evsel->sample_id = NULL;
1236 static void perf_evsel__free_fd(struct perf_evsel *evsel)
1238 xyarray__delete(evsel->fd);
1242 static void perf_evsel__free_id(struct perf_evsel *evsel)
1244 xyarray__delete(evsel->sample_id);
1245 evsel->sample_id = NULL;
1249 static void perf_evsel__free_config_terms(struct perf_evsel *evsel)
1251 struct perf_evsel_config_term *term, *h;
1253 list_for_each_entry_safe(term, h, &evsel->config_terms, list) {
1254 list_del(&term->list);
1259 void perf_evsel__close_fd(struct perf_evsel *evsel)
1263 for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++)
1264 for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
1265 close(FD(evsel, cpu, thread));
1266 FD(evsel, cpu, thread) = -1;
1270 void perf_evsel__exit(struct perf_evsel *evsel)
1272 assert(list_empty(&evsel->node));
1273 assert(evsel->evlist == NULL);
1274 perf_evsel__free_fd(evsel);
1275 perf_evsel__free_id(evsel);
1276 perf_evsel__free_config_terms(evsel);
1277 cgroup__put(evsel->cgrp);
1278 cpu_map__put(evsel->cpus);
1279 cpu_map__put(evsel->own_cpus);
1280 thread_map__put(evsel->threads);
1281 zfree(&evsel->group_name);
1282 zfree(&evsel->name);
1283 perf_evsel__object.fini(evsel);
1286 void perf_evsel__delete(struct perf_evsel *evsel)
1288 perf_evsel__exit(evsel);
1292 void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, int thread,
1293 struct perf_counts_values *count)
1295 struct perf_counts_values tmp;
1297 if (!evsel->prev_raw_counts)
1301 tmp = evsel->prev_raw_counts->aggr;
1302 evsel->prev_raw_counts->aggr = *count;
1304 tmp = *perf_counts(evsel->prev_raw_counts, cpu, thread);
1305 *perf_counts(evsel->prev_raw_counts, cpu, thread) = *count;
1308 count->val = count->val - tmp.val;
1309 count->ena = count->ena - tmp.ena;
1310 count->run = count->run - tmp.run;
1313 void perf_counts_values__scale(struct perf_counts_values *count,
1314 bool scale, s8 *pscaled)
1319 if (count->run == 0) {
1322 } else if (count->run < count->ena) {
1324 count->val = (u64)((double) count->val * count->ena / count->run + 0.5);
1327 count->ena = count->run = 0;
1333 static int perf_evsel__read_size(struct perf_evsel *evsel)
1335 u64 read_format = evsel->attr.read_format;
1336 int entry = sizeof(u64); /* value */
1340 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1341 size += sizeof(u64);
1343 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1344 size += sizeof(u64);
1346 if (read_format & PERF_FORMAT_ID)
1347 entry += sizeof(u64);
1349 if (read_format & PERF_FORMAT_GROUP) {
1350 nr = evsel->nr_members;
1351 size += sizeof(u64);
1358 int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
1359 struct perf_counts_values *count)
1361 size_t size = perf_evsel__read_size(evsel);
1363 memset(count, 0, sizeof(*count));
1365 if (FD(evsel, cpu, thread) < 0)
1368 if (readn(FD(evsel, cpu, thread), count->values, size) <= 0)
1375 perf_evsel__read_one(struct perf_evsel *evsel, int cpu, int thread)
1377 struct perf_counts_values *count = perf_counts(evsel->counts, cpu, thread);
1379 return perf_evsel__read(evsel, cpu, thread, count);
1383 perf_evsel__set_count(struct perf_evsel *counter, int cpu, int thread,
1384 u64 val, u64 ena, u64 run)
1386 struct perf_counts_values *count;
1388 count = perf_counts(counter->counts, cpu, thread);
1393 count->loaded = true;
1397 perf_evsel__process_group_data(struct perf_evsel *leader,
1398 int cpu, int thread, u64 *data)
1400 u64 read_format = leader->attr.read_format;
1401 struct sample_read_value *v;
1402 u64 nr, ena = 0, run = 0, i;
1406 if (nr != (u64) leader->nr_members)
1409 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1412 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1415 v = (struct sample_read_value *) data;
1417 perf_evsel__set_count(leader, cpu, thread,
1418 v[0].value, ena, run);
1420 for (i = 1; i < nr; i++) {
1421 struct perf_evsel *counter;
1423 counter = perf_evlist__id2evsel(leader->evlist, v[i].id);
1427 perf_evsel__set_count(counter, cpu, thread,
1428 v[i].value, ena, run);
1435 perf_evsel__read_group(struct perf_evsel *leader, int cpu, int thread)
1437 struct perf_stat_evsel *ps = leader->stats;
1438 u64 read_format = leader->attr.read_format;
1439 int size = perf_evsel__read_size(leader);
1440 u64 *data = ps->group_data;
1442 if (!(read_format & PERF_FORMAT_ID))
1445 if (!perf_evsel__is_group_leader(leader))
1449 data = zalloc(size);
1453 ps->group_data = data;
1456 if (FD(leader, cpu, thread) < 0)
1459 if (readn(FD(leader, cpu, thread), data, size) <= 0)
1462 return perf_evsel__process_group_data(leader, cpu, thread, data);
1465 int perf_evsel__read_counter(struct perf_evsel *evsel, int cpu, int thread)
1467 u64 read_format = evsel->attr.read_format;
1469 if (read_format & PERF_FORMAT_GROUP)
1470 return perf_evsel__read_group(evsel, cpu, thread);
1472 return perf_evsel__read_one(evsel, cpu, thread);
1475 int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
1476 int cpu, int thread, bool scale)
1478 struct perf_counts_values count;
1479 size_t nv = scale ? 3 : 1;
1481 if (FD(evsel, cpu, thread) < 0)
1484 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1, thread + 1) < 0)
1487 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) <= 0)
1490 perf_evsel__compute_deltas(evsel, cpu, thread, &count);
1491 perf_counts_values__scale(&count, scale, NULL);
1492 *perf_counts(evsel->counts, cpu, thread) = count;
1496 static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
1498 struct perf_evsel *leader = evsel->leader;
1501 if (perf_evsel__is_group_leader(evsel))
1505 * Leader must be already processed/open,
1506 * if not it's a bug.
1508 BUG_ON(!leader->fd);
1510 fd = FD(leader, cpu, thread);
1521 static void __p_bits(char *buf, size_t size, u64 value, struct bit_names *bits)
1523 bool first_bit = true;
1527 if (value & bits[i].bit) {
1528 buf += scnprintf(buf, size, "%s%s", first_bit ? "" : "|", bits[i].name);
1531 } while (bits[++i].name != NULL);
1534 static void __p_sample_type(char *buf, size_t size, u64 value)
1536 #define bit_name(n) { PERF_SAMPLE_##n, #n }
1537 struct bit_names bits[] = {
1538 bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
1539 bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
1540 bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
1541 bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
1542 bit_name(IDENTIFIER), bit_name(REGS_INTR), bit_name(DATA_SRC),
1543 bit_name(WEIGHT), bit_name(PHYS_ADDR),
1547 __p_bits(buf, size, value, bits);
1550 static void __p_branch_sample_type(char *buf, size_t size, u64 value)
1552 #define bit_name(n) { PERF_SAMPLE_BRANCH_##n, #n }
1553 struct bit_names bits[] = {
1554 bit_name(USER), bit_name(KERNEL), bit_name(HV), bit_name(ANY),
1555 bit_name(ANY_CALL), bit_name(ANY_RETURN), bit_name(IND_CALL),
1556 bit_name(ABORT_TX), bit_name(IN_TX), bit_name(NO_TX),
1557 bit_name(COND), bit_name(CALL_STACK), bit_name(IND_JUMP),
1558 bit_name(CALL), bit_name(NO_FLAGS), bit_name(NO_CYCLES),
1562 __p_bits(buf, size, value, bits);
1565 static void __p_read_format(char *buf, size_t size, u64 value)
1567 #define bit_name(n) { PERF_FORMAT_##n, #n }
1568 struct bit_names bits[] = {
1569 bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
1570 bit_name(ID), bit_name(GROUP),
1574 __p_bits(buf, size, value, bits);
1577 #define BUF_SIZE 1024
1579 #define p_hex(val) snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val))
1580 #define p_unsigned(val) snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val))
1581 #define p_signed(val) snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val))
1582 #define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val)
1583 #define p_branch_sample_type(val) __p_branch_sample_type(buf, BUF_SIZE, val)
1584 #define p_read_format(val) __p_read_format(buf, BUF_SIZE, val)
1586 #define PRINT_ATTRn(_n, _f, _p) \
1590 ret += attr__fprintf(fp, _n, buf, priv);\
1594 #define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p)
1596 int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
1597 attr__fprintf_f attr__fprintf, void *priv)
1602 PRINT_ATTRf(type, p_unsigned);
1603 PRINT_ATTRf(size, p_unsigned);
1604 PRINT_ATTRf(config, p_hex);
1605 PRINT_ATTRn("{ sample_period, sample_freq }", sample_period, p_unsigned);
1606 PRINT_ATTRf(sample_type, p_sample_type);
1607 PRINT_ATTRf(read_format, p_read_format);
1609 PRINT_ATTRf(disabled, p_unsigned);
1610 PRINT_ATTRf(inherit, p_unsigned);
1611 PRINT_ATTRf(pinned, p_unsigned);
1612 PRINT_ATTRf(exclusive, p_unsigned);
1613 PRINT_ATTRf(exclude_user, p_unsigned);
1614 PRINT_ATTRf(exclude_kernel, p_unsigned);
1615 PRINT_ATTRf(exclude_hv, p_unsigned);
1616 PRINT_ATTRf(exclude_idle, p_unsigned);
1617 PRINT_ATTRf(mmap, p_unsigned);
1618 PRINT_ATTRf(comm, p_unsigned);
1619 PRINT_ATTRf(freq, p_unsigned);
1620 PRINT_ATTRf(inherit_stat, p_unsigned);
1621 PRINT_ATTRf(enable_on_exec, p_unsigned);
1622 PRINT_ATTRf(task, p_unsigned);
1623 PRINT_ATTRf(watermark, p_unsigned);
1624 PRINT_ATTRf(precise_ip, p_unsigned);
1625 PRINT_ATTRf(mmap_data, p_unsigned);
1626 PRINT_ATTRf(sample_id_all, p_unsigned);
1627 PRINT_ATTRf(exclude_host, p_unsigned);
1628 PRINT_ATTRf(exclude_guest, p_unsigned);
1629 PRINT_ATTRf(exclude_callchain_kernel, p_unsigned);
1630 PRINT_ATTRf(exclude_callchain_user, p_unsigned);
1631 PRINT_ATTRf(mmap2, p_unsigned);
1632 PRINT_ATTRf(comm_exec, p_unsigned);
1633 PRINT_ATTRf(use_clockid, p_unsigned);
1634 PRINT_ATTRf(context_switch, p_unsigned);
1635 PRINT_ATTRf(write_backward, p_unsigned);
1636 PRINT_ATTRf(namespaces, p_unsigned);
1638 PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
1639 PRINT_ATTRf(bp_type, p_unsigned);
1640 PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex);
1641 PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex);
1642 PRINT_ATTRf(branch_sample_type, p_branch_sample_type);
1643 PRINT_ATTRf(sample_regs_user, p_hex);
1644 PRINT_ATTRf(sample_stack_user, p_unsigned);
1645 PRINT_ATTRf(clockid, p_signed);
1646 PRINT_ATTRf(sample_regs_intr, p_hex);
1647 PRINT_ATTRf(aux_watermark, p_unsigned);
1648 PRINT_ATTRf(sample_max_stack, p_unsigned);
1653 static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
1654 void *priv __maybe_unused)
1656 return fprintf(fp, " %-32s %s\n", name, val);
1659 static void perf_evsel__remove_fd(struct perf_evsel *pos,
1660 int nr_cpus, int nr_threads,
1663 for (int cpu = 0; cpu < nr_cpus; cpu++)
1664 for (int thread = thread_idx; thread < nr_threads - 1; thread++)
1665 FD(pos, cpu, thread) = FD(pos, cpu, thread + 1);
1668 static int update_fds(struct perf_evsel *evsel,
1669 int nr_cpus, int cpu_idx,
1670 int nr_threads, int thread_idx)
1672 struct perf_evsel *pos;
1674 if (cpu_idx >= nr_cpus || thread_idx >= nr_threads)
1677 evlist__for_each_entry(evsel->evlist, pos) {
1678 nr_cpus = pos != evsel ? nr_cpus : cpu_idx;
1680 perf_evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx);
1683 * Since fds for next evsel has not been created,
1684 * there is no need to iterate whole event list.
1692 static bool ignore_missing_thread(struct perf_evsel *evsel,
1693 int nr_cpus, int cpu,
1694 struct thread_map *threads,
1695 int thread, int err)
1697 pid_t ignore_pid = thread_map__pid(threads, thread);
1699 if (!evsel->ignore_missing_thread)
1702 /* The system wide setup does not work with threads. */
1703 if (evsel->system_wide)
1706 /* The -ESRCH is perf event syscall errno for pid's not found. */
1710 /* If there's only one thread, let it fail. */
1711 if (threads->nr == 1)
1715 * We should remove fd for missing_thread first
1716 * because thread_map__remove() will decrease threads->nr.
1718 if (update_fds(evsel, nr_cpus, cpu, threads->nr, thread))
1721 if (thread_map__remove(threads, thread))
1724 pr_warning("WARNING: Ignored open failure for pid %d\n",
1729 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
1730 struct thread_map *threads)
1732 int cpu, thread, nthreads;
1733 unsigned long flags = PERF_FLAG_FD_CLOEXEC;
1735 enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
1737 if (perf_missing_features.write_backward && evsel->attr.write_backward)
1741 static struct cpu_map *empty_cpu_map;
1743 if (empty_cpu_map == NULL) {
1744 empty_cpu_map = cpu_map__dummy_new();
1745 if (empty_cpu_map == NULL)
1749 cpus = empty_cpu_map;
1752 if (threads == NULL) {
1753 static struct thread_map *empty_thread_map;
1755 if (empty_thread_map == NULL) {
1756 empty_thread_map = thread_map__new_by_tid(-1);
1757 if (empty_thread_map == NULL)
1761 threads = empty_thread_map;
1764 if (evsel->system_wide)
1767 nthreads = threads->nr;
1769 if (evsel->fd == NULL &&
1770 perf_evsel__alloc_fd(evsel, cpus->nr, nthreads) < 0)
1774 flags |= PERF_FLAG_PID_CGROUP;
1775 pid = evsel->cgrp->fd;
1778 fallback_missing_features:
1779 if (perf_missing_features.clockid_wrong)
1780 evsel->attr.clockid = CLOCK_MONOTONIC; /* should always work */
1781 if (perf_missing_features.clockid) {
1782 evsel->attr.use_clockid = 0;
1783 evsel->attr.clockid = 0;
1785 if (perf_missing_features.cloexec)
1786 flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC;
1787 if (perf_missing_features.mmap2)
1788 evsel->attr.mmap2 = 0;
1789 if (perf_missing_features.exclude_guest)
1790 evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
1791 if (perf_missing_features.lbr_flags)
1792 evsel->attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS |
1793 PERF_SAMPLE_BRANCH_NO_CYCLES);
1794 if (perf_missing_features.group_read && evsel->attr.inherit)
1795 evsel->attr.read_format &= ~(PERF_FORMAT_GROUP|PERF_FORMAT_ID);
1797 if (perf_missing_features.sample_id_all)
1798 evsel->attr.sample_id_all = 0;
1801 fprintf(stderr, "%.60s\n", graph_dotted_line);
1802 fprintf(stderr, "perf_event_attr:\n");
1803 perf_event_attr__fprintf(stderr, &evsel->attr, __open_attr__fprintf, NULL);
1804 fprintf(stderr, "%.60s\n", graph_dotted_line);
1807 for (cpu = 0; cpu < cpus->nr; cpu++) {
1809 for (thread = 0; thread < nthreads; thread++) {
1812 if (!evsel->cgrp && !evsel->system_wide)
1813 pid = thread_map__pid(threads, thread);
1815 group_fd = get_group_fd(evsel, cpu, thread);
1817 pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
1818 pid, cpus->map[cpu], group_fd, flags);
1822 fd = sys_perf_event_open(&evsel->attr, pid, cpus->map[cpu],
1825 FD(evsel, cpu, thread) = fd;
1830 if (ignore_missing_thread(evsel, cpus->nr, cpu, threads, thread, err)) {
1832 * We just removed 1 thread, so take a step
1833 * back on thread index and lower the upper
1839 /* ... and pretend like nothing have happened. */
1844 pr_debug2("\nsys_perf_event_open failed, error %d\n",
1849 pr_debug2(" = %d\n", fd);
1851 if (evsel->bpf_fd >= 0) {
1853 int bpf_fd = evsel->bpf_fd;
1856 PERF_EVENT_IOC_SET_BPF,
1858 if (err && errno != EEXIST) {
1859 pr_err("failed to attach bpf fd %d: %s\n",
1860 bpf_fd, strerror(errno));
1866 set_rlimit = NO_CHANGE;
1869 * If we succeeded but had to kill clockid, fail and
1870 * have perf_evsel__open_strerror() print us a nice
1873 if (perf_missing_features.clockid ||
1874 perf_missing_features.clockid_wrong) {
1885 * perf stat needs between 5 and 22 fds per CPU. When we run out
1886 * of them try to increase the limits.
1888 if (err == -EMFILE && set_rlimit < INCREASED_MAX) {
1890 int old_errno = errno;
1892 if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
1893 if (set_rlimit == NO_CHANGE)
1894 l.rlim_cur = l.rlim_max;
1896 l.rlim_cur = l.rlim_max + 1000;
1897 l.rlim_max = l.rlim_cur;
1899 if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
1908 if (err != -EINVAL || cpu > 0 || thread > 0)
1912 * Must probe features in the order they were added to the
1913 * perf_event_attr interface.
1915 if (!perf_missing_features.write_backward && evsel->attr.write_backward) {
1916 perf_missing_features.write_backward = true;
1917 pr_debug2("switching off write_backward\n");
1919 } else if (!perf_missing_features.clockid_wrong && evsel->attr.use_clockid) {
1920 perf_missing_features.clockid_wrong = true;
1921 pr_debug2("switching off clockid\n");
1922 goto fallback_missing_features;
1923 } else if (!perf_missing_features.clockid && evsel->attr.use_clockid) {
1924 perf_missing_features.clockid = true;
1925 pr_debug2("switching off use_clockid\n");
1926 goto fallback_missing_features;
1927 } else if (!perf_missing_features.cloexec && (flags & PERF_FLAG_FD_CLOEXEC)) {
1928 perf_missing_features.cloexec = true;
1929 pr_debug2("switching off cloexec flag\n");
1930 goto fallback_missing_features;
1931 } else if (!perf_missing_features.mmap2 && evsel->attr.mmap2) {
1932 perf_missing_features.mmap2 = true;
1933 pr_debug2("switching off mmap2\n");
1934 goto fallback_missing_features;
1935 } else if (!perf_missing_features.exclude_guest &&
1936 (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
1937 perf_missing_features.exclude_guest = true;
1938 pr_debug2("switching off exclude_guest, exclude_host\n");
1939 goto fallback_missing_features;
1940 } else if (!perf_missing_features.sample_id_all) {
1941 perf_missing_features.sample_id_all = true;
1942 pr_debug2("switching off sample_id_all\n");
1943 goto retry_sample_id;
1944 } else if (!perf_missing_features.lbr_flags &&
1945 (evsel->attr.branch_sample_type &
1946 (PERF_SAMPLE_BRANCH_NO_CYCLES |
1947 PERF_SAMPLE_BRANCH_NO_FLAGS))) {
1948 perf_missing_features.lbr_flags = true;
1949 pr_debug2("switching off branch sample type no (cycles/flags)\n");
1950 goto fallback_missing_features;
1951 } else if (!perf_missing_features.group_read &&
1952 evsel->attr.inherit &&
1953 (evsel->attr.read_format & PERF_FORMAT_GROUP) &&
1954 perf_evsel__is_group_leader(evsel)) {
1955 perf_missing_features.group_read = true;
1956 pr_debug2("switching off group read\n");
1957 goto fallback_missing_features;
1961 threads->err_thread = thread;
1964 while (--thread >= 0) {
1965 close(FD(evsel, cpu, thread));
1966 FD(evsel, cpu, thread) = -1;
1969 } while (--cpu >= 0);
1973 void perf_evsel__close(struct perf_evsel *evsel)
1975 if (evsel->fd == NULL)
1978 perf_evsel__close_fd(evsel);
1979 perf_evsel__free_fd(evsel);
1982 int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
1983 struct cpu_map *cpus)
1985 return perf_evsel__open(evsel, cpus, NULL);
1988 int perf_evsel__open_per_thread(struct perf_evsel *evsel,
1989 struct thread_map *threads)
1991 return perf_evsel__open(evsel, NULL, threads);
1994 static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
1995 const union perf_event *event,
1996 struct perf_sample *sample)
1998 u64 type = evsel->attr.sample_type;
1999 const u64 *array = event->sample.array;
2000 bool swapped = evsel->needs_swap;
2003 array += ((event->header.size -
2004 sizeof(event->header)) / sizeof(u64)) - 1;
2006 if (type & PERF_SAMPLE_IDENTIFIER) {
2007 sample->id = *array;
2011 if (type & PERF_SAMPLE_CPU) {
2014 /* undo swap of u64, then swap on individual u32s */
2015 u.val64 = bswap_64(u.val64);
2016 u.val32[0] = bswap_32(u.val32[0]);
2019 sample->cpu = u.val32[0];
2023 if (type & PERF_SAMPLE_STREAM_ID) {
2024 sample->stream_id = *array;
2028 if (type & PERF_SAMPLE_ID) {
2029 sample->id = *array;
2033 if (type & PERF_SAMPLE_TIME) {
2034 sample->time = *array;
2038 if (type & PERF_SAMPLE_TID) {
2041 /* undo swap of u64, then swap on individual u32s */
2042 u.val64 = bswap_64(u.val64);
2043 u.val32[0] = bswap_32(u.val32[0]);
2044 u.val32[1] = bswap_32(u.val32[1]);
2047 sample->pid = u.val32[0];
2048 sample->tid = u.val32[1];
2055 static inline bool overflow(const void *endp, u16 max_size, const void *offset,
2058 return size > max_size || offset + size > endp;
2061 #define OVERFLOW_CHECK(offset, size, max_size) \
2063 if (overflow(endp, (max_size), (offset), (size))) \
2067 #define OVERFLOW_CHECK_u64(offset) \
2068 OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
2071 perf_event__check_size(union perf_event *event, unsigned int sample_size)
2074 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
2075 * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
2076 * check the format does not go past the end of the event.
2078 if (sample_size + sizeof(event->header) > event->header.size)
2084 int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
2085 struct perf_sample *data)
2087 u64 type = evsel->attr.sample_type;
2088 bool swapped = evsel->needs_swap;
2090 u16 max_size = event->header.size;
2091 const void *endp = (void *)event + max_size;
2095 * used for cross-endian analysis. See git commit 65014ab3
2096 * for why this goofiness is needed.
2100 memset(data, 0, sizeof(*data));
2101 data->cpu = data->pid = data->tid = -1;
2102 data->stream_id = data->id = data->time = -1ULL;
2103 data->period = evsel->attr.sample_period;
2104 data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
2105 data->misc = event->header.misc;
2107 data->data_src = PERF_MEM_DATA_SRC_NONE;
2109 if (event->header.type != PERF_RECORD_SAMPLE) {
2110 if (!evsel->attr.sample_id_all)
2112 return perf_evsel__parse_id_sample(evsel, event, data);
2115 array = event->sample.array;
2117 if (perf_event__check_size(event, evsel->sample_size))
2120 if (type & PERF_SAMPLE_IDENTIFIER) {
2125 if (type & PERF_SAMPLE_IP) {
2130 if (type & PERF_SAMPLE_TID) {
2133 /* undo swap of u64, then swap on individual u32s */
2134 u.val64 = bswap_64(u.val64);
2135 u.val32[0] = bswap_32(u.val32[0]);
2136 u.val32[1] = bswap_32(u.val32[1]);
2139 data->pid = u.val32[0];
2140 data->tid = u.val32[1];
2144 if (type & PERF_SAMPLE_TIME) {
2145 data->time = *array;
2149 if (type & PERF_SAMPLE_ADDR) {
2150 data->addr = *array;
2154 if (type & PERF_SAMPLE_ID) {
2159 if (type & PERF_SAMPLE_STREAM_ID) {
2160 data->stream_id = *array;
2164 if (type & PERF_SAMPLE_CPU) {
2168 /* undo swap of u64, then swap on individual u32s */
2169 u.val64 = bswap_64(u.val64);
2170 u.val32[0] = bswap_32(u.val32[0]);
2173 data->cpu = u.val32[0];
2177 if (type & PERF_SAMPLE_PERIOD) {
2178 data->period = *array;
2182 if (type & PERF_SAMPLE_READ) {
2183 u64 read_format = evsel->attr.read_format;
2185 OVERFLOW_CHECK_u64(array);
2186 if (read_format & PERF_FORMAT_GROUP)
2187 data->read.group.nr = *array;
2189 data->read.one.value = *array;
2193 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2194 OVERFLOW_CHECK_u64(array);
2195 data->read.time_enabled = *array;
2199 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2200 OVERFLOW_CHECK_u64(array);
2201 data->read.time_running = *array;
2205 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
2206 if (read_format & PERF_FORMAT_GROUP) {
2207 const u64 max_group_nr = UINT64_MAX /
2208 sizeof(struct sample_read_value);
2210 if (data->read.group.nr > max_group_nr)
2212 sz = data->read.group.nr *
2213 sizeof(struct sample_read_value);
2214 OVERFLOW_CHECK(array, sz, max_size);
2215 data->read.group.values =
2216 (struct sample_read_value *)array;
2217 array = (void *)array + sz;
2219 OVERFLOW_CHECK_u64(array);
2220 data->read.one.id = *array;
2225 if (evsel__has_callchain(evsel)) {
2226 const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
2228 OVERFLOW_CHECK_u64(array);
2229 data->callchain = (struct ip_callchain *)array++;
2230 if (data->callchain->nr > max_callchain_nr)
2232 sz = data->callchain->nr * sizeof(u64);
2233 OVERFLOW_CHECK(array, sz, max_size);
2234 array = (void *)array + sz;
2237 if (type & PERF_SAMPLE_RAW) {
2238 OVERFLOW_CHECK_u64(array);
2242 * Undo swap of u64, then swap on individual u32s,
2243 * get the size of the raw area and undo all of the
2244 * swap. The pevent interface handles endianity by
2248 u.val64 = bswap_64(u.val64);
2249 u.val32[0] = bswap_32(u.val32[0]);
2250 u.val32[1] = bswap_32(u.val32[1]);
2252 data->raw_size = u.val32[0];
2255 * The raw data is aligned on 64bits including the
2256 * u32 size, so it's safe to use mem_bswap_64.
2259 mem_bswap_64((void *) array, data->raw_size);
2261 array = (void *)array + sizeof(u32);
2263 OVERFLOW_CHECK(array, data->raw_size, max_size);
2264 data->raw_data = (void *)array;
2265 array = (void *)array + data->raw_size;
2268 if (type & PERF_SAMPLE_BRANCH_STACK) {
2269 const u64 max_branch_nr = UINT64_MAX /
2270 sizeof(struct branch_entry);
2272 OVERFLOW_CHECK_u64(array);
2273 data->branch_stack = (struct branch_stack *)array++;
2275 if (data->branch_stack->nr > max_branch_nr)
2277 sz = data->branch_stack->nr * sizeof(struct branch_entry);
2278 OVERFLOW_CHECK(array, sz, max_size);
2279 array = (void *)array + sz;
2282 if (type & PERF_SAMPLE_REGS_USER) {
2283 OVERFLOW_CHECK_u64(array);
2284 data->user_regs.abi = *array;
2287 if (data->user_regs.abi) {
2288 u64 mask = evsel->attr.sample_regs_user;
2290 sz = hweight_long(mask) * sizeof(u64);
2291 OVERFLOW_CHECK(array, sz, max_size);
2292 data->user_regs.mask = mask;
2293 data->user_regs.regs = (u64 *)array;
2294 array = (void *)array + sz;
2298 if (type & PERF_SAMPLE_STACK_USER) {
2299 OVERFLOW_CHECK_u64(array);
2302 data->user_stack.offset = ((char *)(array - 1)
2306 data->user_stack.size = 0;
2308 OVERFLOW_CHECK(array, sz, max_size);
2309 data->user_stack.data = (char *)array;
2310 array = (void *)array + sz;
2311 OVERFLOW_CHECK_u64(array);
2312 data->user_stack.size = *array++;
2313 if (WARN_ONCE(data->user_stack.size > sz,
2314 "user stack dump failure\n"))
2319 if (type & PERF_SAMPLE_WEIGHT) {
2320 OVERFLOW_CHECK_u64(array);
2321 data->weight = *array;
2325 if (type & PERF_SAMPLE_DATA_SRC) {
2326 OVERFLOW_CHECK_u64(array);
2327 data->data_src = *array;
2331 if (type & PERF_SAMPLE_TRANSACTION) {
2332 OVERFLOW_CHECK_u64(array);
2333 data->transaction = *array;
2337 data->intr_regs.abi = PERF_SAMPLE_REGS_ABI_NONE;
2338 if (type & PERF_SAMPLE_REGS_INTR) {
2339 OVERFLOW_CHECK_u64(array);
2340 data->intr_regs.abi = *array;
2343 if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) {
2344 u64 mask = evsel->attr.sample_regs_intr;
2346 sz = hweight_long(mask) * sizeof(u64);
2347 OVERFLOW_CHECK(array, sz, max_size);
2348 data->intr_regs.mask = mask;
2349 data->intr_regs.regs = (u64 *)array;
2350 array = (void *)array + sz;
2354 data->phys_addr = 0;
2355 if (type & PERF_SAMPLE_PHYS_ADDR) {
2356 data->phys_addr = *array;
2363 int perf_evsel__parse_sample_timestamp(struct perf_evsel *evsel,
2364 union perf_event *event,
2367 u64 type = evsel->attr.sample_type;
2370 if (!(type & PERF_SAMPLE_TIME))
2373 if (event->header.type != PERF_RECORD_SAMPLE) {
2374 struct perf_sample data = {
2378 if (!evsel->attr.sample_id_all)
2380 if (perf_evsel__parse_id_sample(evsel, event, &data))
2383 *timestamp = data.time;
2387 array = event->sample.array;
2389 if (perf_event__check_size(event, evsel->sample_size))
2392 if (type & PERF_SAMPLE_IDENTIFIER)
2395 if (type & PERF_SAMPLE_IP)
2398 if (type & PERF_SAMPLE_TID)
2401 if (type & PERF_SAMPLE_TIME)
2402 *timestamp = *array;
2407 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
2410 size_t sz, result = sizeof(struct sample_event);
2412 if (type & PERF_SAMPLE_IDENTIFIER)
2413 result += sizeof(u64);
2415 if (type & PERF_SAMPLE_IP)
2416 result += sizeof(u64);
2418 if (type & PERF_SAMPLE_TID)
2419 result += sizeof(u64);
2421 if (type & PERF_SAMPLE_TIME)
2422 result += sizeof(u64);
2424 if (type & PERF_SAMPLE_ADDR)
2425 result += sizeof(u64);
2427 if (type & PERF_SAMPLE_ID)
2428 result += sizeof(u64);
2430 if (type & PERF_SAMPLE_STREAM_ID)
2431 result += sizeof(u64);
2433 if (type & PERF_SAMPLE_CPU)
2434 result += sizeof(u64);
2436 if (type & PERF_SAMPLE_PERIOD)
2437 result += sizeof(u64);
2439 if (type & PERF_SAMPLE_READ) {
2440 result += sizeof(u64);
2441 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2442 result += sizeof(u64);
2443 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2444 result += sizeof(u64);
2445 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
2446 if (read_format & PERF_FORMAT_GROUP) {
2447 sz = sample->read.group.nr *
2448 sizeof(struct sample_read_value);
2451 result += sizeof(u64);
2455 if (type & PERF_SAMPLE_CALLCHAIN) {
2456 sz = (sample->callchain->nr + 1) * sizeof(u64);
2460 if (type & PERF_SAMPLE_RAW) {
2461 result += sizeof(u32);
2462 result += sample->raw_size;
2465 if (type & PERF_SAMPLE_BRANCH_STACK) {
2466 sz = sample->branch_stack->nr * sizeof(struct branch_entry);
2471 if (type & PERF_SAMPLE_REGS_USER) {
2472 if (sample->user_regs.abi) {
2473 result += sizeof(u64);
2474 sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
2477 result += sizeof(u64);
2481 if (type & PERF_SAMPLE_STACK_USER) {
2482 sz = sample->user_stack.size;
2483 result += sizeof(u64);
2486 result += sizeof(u64);
2490 if (type & PERF_SAMPLE_WEIGHT)
2491 result += sizeof(u64);
2493 if (type & PERF_SAMPLE_DATA_SRC)
2494 result += sizeof(u64);
2496 if (type & PERF_SAMPLE_TRANSACTION)
2497 result += sizeof(u64);
2499 if (type & PERF_SAMPLE_REGS_INTR) {
2500 if (sample->intr_regs.abi) {
2501 result += sizeof(u64);
2502 sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
2505 result += sizeof(u64);
2509 if (type & PERF_SAMPLE_PHYS_ADDR)
2510 result += sizeof(u64);
2515 int perf_event__synthesize_sample(union perf_event *event, u64 type,
2517 const struct perf_sample *sample)
2522 * used for cross-endian analysis. See git commit 65014ab3
2523 * for why this goofiness is needed.
2527 array = event->sample.array;
2529 if (type & PERF_SAMPLE_IDENTIFIER) {
2530 *array = sample->id;
2534 if (type & PERF_SAMPLE_IP) {
2535 *array = sample->ip;
2539 if (type & PERF_SAMPLE_TID) {
2540 u.val32[0] = sample->pid;
2541 u.val32[1] = sample->tid;
2546 if (type & PERF_SAMPLE_TIME) {
2547 *array = sample->time;
2551 if (type & PERF_SAMPLE_ADDR) {
2552 *array = sample->addr;
2556 if (type & PERF_SAMPLE_ID) {
2557 *array = sample->id;
2561 if (type & PERF_SAMPLE_STREAM_ID) {
2562 *array = sample->stream_id;
2566 if (type & PERF_SAMPLE_CPU) {
2567 u.val32[0] = sample->cpu;
2573 if (type & PERF_SAMPLE_PERIOD) {
2574 *array = sample->period;
2578 if (type & PERF_SAMPLE_READ) {
2579 if (read_format & PERF_FORMAT_GROUP)
2580 *array = sample->read.group.nr;
2582 *array = sample->read.one.value;
2585 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2586 *array = sample->read.time_enabled;
2590 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2591 *array = sample->read.time_running;
2595 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
2596 if (read_format & PERF_FORMAT_GROUP) {
2597 sz = sample->read.group.nr *
2598 sizeof(struct sample_read_value);
2599 memcpy(array, sample->read.group.values, sz);
2600 array = (void *)array + sz;
2602 *array = sample->read.one.id;
2607 if (type & PERF_SAMPLE_CALLCHAIN) {
2608 sz = (sample->callchain->nr + 1) * sizeof(u64);
2609 memcpy(array, sample->callchain, sz);
2610 array = (void *)array + sz;
2613 if (type & PERF_SAMPLE_RAW) {
2614 u.val32[0] = sample->raw_size;
2616 array = (void *)array + sizeof(u32);
2618 memcpy(array, sample->raw_data, sample->raw_size);
2619 array = (void *)array + sample->raw_size;
2622 if (type & PERF_SAMPLE_BRANCH_STACK) {
2623 sz = sample->branch_stack->nr * sizeof(struct branch_entry);
2625 memcpy(array, sample->branch_stack, sz);
2626 array = (void *)array + sz;
2629 if (type & PERF_SAMPLE_REGS_USER) {
2630 if (sample->user_regs.abi) {
2631 *array++ = sample->user_regs.abi;
2632 sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
2633 memcpy(array, sample->user_regs.regs, sz);
2634 array = (void *)array + sz;
2640 if (type & PERF_SAMPLE_STACK_USER) {
2641 sz = sample->user_stack.size;
2644 memcpy(array, sample->user_stack.data, sz);
2645 array = (void *)array + sz;
2650 if (type & PERF_SAMPLE_WEIGHT) {
2651 *array = sample->weight;
2655 if (type & PERF_SAMPLE_DATA_SRC) {
2656 *array = sample->data_src;
2660 if (type & PERF_SAMPLE_TRANSACTION) {
2661 *array = sample->transaction;
2665 if (type & PERF_SAMPLE_REGS_INTR) {
2666 if (sample->intr_regs.abi) {
2667 *array++ = sample->intr_regs.abi;
2668 sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
2669 memcpy(array, sample->intr_regs.regs, sz);
2670 array = (void *)array + sz;
2676 if (type & PERF_SAMPLE_PHYS_ADDR) {
2677 *array = sample->phys_addr;
2684 struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
2686 return tep_find_field(evsel->tp_format, name);
2689 void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
2692 struct format_field *field = perf_evsel__field(evsel, name);
2698 offset = field->offset;
2700 if (field->flags & FIELD_IS_DYNAMIC) {
2701 offset = *(int *)(sample->raw_data + field->offset);
2705 return sample->raw_data + offset;
2708 u64 format_field__intval(struct format_field *field, struct perf_sample *sample,
2712 void *ptr = sample->raw_data + field->offset;
2714 switch (field->size) {
2718 value = *(u16 *)ptr;
2721 value = *(u32 *)ptr;
2724 memcpy(&value, ptr, sizeof(u64));
2733 switch (field->size) {
2735 return bswap_16(value);
2737 return bswap_32(value);
2739 return bswap_64(value);
2747 u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
2750 struct format_field *field = perf_evsel__field(evsel, name);
2755 return field ? format_field__intval(field, sample, evsel->needs_swap) : 0;
2758 bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
2759 char *msg, size_t msgsize)
2763 if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
2764 evsel->attr.type == PERF_TYPE_HARDWARE &&
2765 evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
2767 * If it's cycles then fall back to hrtimer based
2768 * cpu-clock-tick sw counter, which is always available even if
2771 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
2774 scnprintf(msg, msgsize, "%s",
2775 "The cycles event is not supported, trying to fall back to cpu-clock-ticks");
2777 evsel->attr.type = PERF_TYPE_SOFTWARE;
2778 evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK;
2780 zfree(&evsel->name);
2782 } else if (err == EACCES && !evsel->attr.exclude_kernel &&
2783 (paranoid = perf_event_paranoid()) > 1) {
2784 const char *name = perf_evsel__name(evsel);
2786 const char *sep = ":";
2788 /* Is there already the separator in the name. */
2789 if (strchr(name, '/') ||
2793 if (asprintf(&new_name, "%s%su", name, sep) < 0)
2798 evsel->name = new_name;
2799 scnprintf(msg, msgsize,
2800 "kernel.perf_event_paranoid=%d, trying to fall back to excluding kernel samples", paranoid);
2801 evsel->attr.exclude_kernel = 1;
2809 static bool find_process(const char *name)
2811 size_t len = strlen(name);
2816 dir = opendir(procfs__mountpoint());
2820 /* Walk through the directory. */
2821 while (ret && (d = readdir(dir)) != NULL) {
2822 char path[PATH_MAX];
2826 if ((d->d_type != DT_DIR) ||
2827 !strcmp(".", d->d_name) ||
2828 !strcmp("..", d->d_name))
2831 scnprintf(path, sizeof(path), "%s/%s/comm",
2832 procfs__mountpoint(), d->d_name);
2834 if (filename__read_str(path, &data, &size))
2837 ret = strncmp(name, data, len);
2842 return ret ? false : true;
2845 int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
2846 int err, char *msg, size_t size)
2848 char sbuf[STRERR_BUFSIZE];
2855 printed = scnprintf(msg, size,
2856 "No permission to enable %s event.\n\n",
2857 perf_evsel__name(evsel));
2859 return scnprintf(msg + printed, size - printed,
2860 "You may not have permission to collect %sstats.\n\n"
2861 "Consider tweaking /proc/sys/kernel/perf_event_paranoid,\n"
2862 "which controls use of the performance events system by\n"
2863 "unprivileged users (without CAP_SYS_ADMIN).\n\n"
2864 "The current value is %d:\n\n"
2865 " -1: Allow use of (almost) all events by all users\n"
2866 " Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK\n"
2867 ">= 0: Disallow ftrace function tracepoint by users without CAP_SYS_ADMIN\n"
2868 " Disallow raw tracepoint access by users without CAP_SYS_ADMIN\n"
2869 ">= 1: Disallow CPU event access by users without CAP_SYS_ADMIN\n"
2870 ">= 2: Disallow kernel profiling by users without CAP_SYS_ADMIN\n\n"
2871 "To make this setting permanent, edit /etc/sysctl.conf too, e.g.:\n\n"
2872 " kernel.perf_event_paranoid = -1\n" ,
2873 target->system_wide ? "system-wide " : "",
2874 perf_event_paranoid());
2876 return scnprintf(msg, size, "The %s event is not supported.",
2877 perf_evsel__name(evsel));
2879 return scnprintf(msg, size, "%s",
2880 "Too many events are opened.\n"
2881 "Probably the maximum number of open file descriptors has been reached.\n"
2882 "Hint: Try again after reducing the number of events.\n"
2883 "Hint: Try increasing the limit with 'ulimit -n <limit>'");
2885 if (evsel__has_callchain(evsel) &&
2886 access("/proc/sys/kernel/perf_event_max_stack", F_OK) == 0)
2887 return scnprintf(msg, size,
2888 "Not enough memory to setup event with callchain.\n"
2889 "Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n"
2890 "Hint: Current value: %d", sysctl__max_stack());
2893 if (target->cpu_list)
2894 return scnprintf(msg, size, "%s",
2895 "No such device - did you specify an out-of-range profile CPU?");
2898 if (evsel->attr.sample_period != 0)
2899 return scnprintf(msg, size,
2900 "%s: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat'",
2901 perf_evsel__name(evsel));
2902 if (evsel->attr.precise_ip)
2903 return scnprintf(msg, size, "%s",
2904 "\'precise\' request may not be supported. Try removing 'p' modifier.");
2905 #if defined(__i386__) || defined(__x86_64__)
2906 if (evsel->attr.type == PERF_TYPE_HARDWARE)
2907 return scnprintf(msg, size, "%s",
2908 "No hardware sampling interrupt available.\n");
2912 if (find_process("oprofiled"))
2913 return scnprintf(msg, size,
2914 "The PMU counters are busy/taken by another profiler.\n"
2915 "We found oprofile daemon running, please stop it and try again.");
2918 if (evsel->attr.write_backward && perf_missing_features.write_backward)
2919 return scnprintf(msg, size, "Reading from overwrite event is not supported by this kernel.");
2920 if (perf_missing_features.clockid)
2921 return scnprintf(msg, size, "clockid feature not supported.");
2922 if (perf_missing_features.clockid_wrong)
2923 return scnprintf(msg, size, "wrong clockid (%d).", clockid);
2929 return scnprintf(msg, size,
2930 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
2931 "/bin/dmesg | grep -i perf may provide additional information.\n",
2932 err, str_error_r(err, sbuf, sizeof(sbuf)),
2933 perf_evsel__name(evsel));
2936 struct perf_env *perf_evsel__env(struct perf_evsel *evsel)
2938 if (evsel && evsel->evlist)
2939 return evsel->evlist->env;