1 // SPDX-License-Identifier: GPL-2.0
5 #include "parse-events.h"
10 #include <subcmd/parse-options.h>
11 #include <perf/cpumap.h>
14 #include "../perf-sys.h"
16 typedef void (*setup_probe_fn_t)(struct evsel *evsel);
18 static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str)
20 struct evlist *evlist;
22 unsigned long flags = perf_event_open_cloexec_flag();
23 int err = -EAGAIN, fd;
24 static pid_t pid = -1;
26 evlist = evlist__new();
30 if (parse_events(evlist, str, NULL))
33 evsel = evlist__first(evlist);
36 fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, -1, flags);
38 if (pid == -1 && errno == EACCES) {
50 fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, -1, flags);
60 evlist__delete(evlist);
64 static bool perf_probe_api(setup_probe_fn_t fn)
66 const char *try[] = {"cycles:u", "instructions:u", "cpu-clock:u", NULL};
67 struct perf_cpu_map *cpus;
70 cpus = perf_cpu_map__new(NULL);
74 perf_cpu_map__put(cpus);
77 ret = perf_do_probe_api(fn, cpu, try[i++]);
80 } while (ret == -EAGAIN && try[i]);
85 static void perf_probe_sample_identifier(struct evsel *evsel)
87 evsel->core.attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
90 static void perf_probe_comm_exec(struct evsel *evsel)
92 evsel->core.attr.comm_exec = 1;
95 static void perf_probe_context_switch(struct evsel *evsel)
97 evsel->core.attr.context_switch = 1;
100 bool perf_can_sample_identifier(void)
102 return perf_probe_api(perf_probe_sample_identifier);
105 static bool perf_can_comm_exec(void)
107 return perf_probe_api(perf_probe_comm_exec);
110 bool perf_can_record_switch_events(void)
112 return perf_probe_api(perf_probe_context_switch);
115 bool perf_can_record_cpu_wide(void)
117 struct perf_event_attr attr = {
118 .type = PERF_TYPE_SOFTWARE,
119 .config = PERF_COUNT_SW_CPU_CLOCK,
122 struct perf_cpu_map *cpus;
125 cpus = perf_cpu_map__new(NULL);
129 perf_cpu_map__put(cpus);
131 fd = sys_perf_event_open(&attr, -1, cpu, -1, 0);
140 * Architectures are expected to know if AUX area sampling is supported by the
141 * hardware. Here we check for kernel support.
143 bool perf_can_aux_sample(void)
145 struct perf_event_attr attr = {
146 .size = sizeof(struct perf_event_attr),
149 * Non-zero value causes the kernel to calculate the effective
150 * attribute size up to that byte.
152 .aux_sample_size = 1,
156 fd = sys_perf_event_open(&attr, -1, 0, -1, 0);
158 * If the kernel attribute is big enough to contain aux_sample_size
159 * then we assume that it is supported. We are relying on the kernel to
160 * validate the attribute size before anything else that could be wrong.
162 if (fd < 0 && errno == E2BIG)
170 void perf_evlist__config(struct evlist *evlist, struct record_opts *opts,
171 struct callchain_param *callchain)
174 bool use_sample_identifier = false;
176 bool sample_id = opts->sample_id;
179 * Set the evsel leader links before we configure attributes,
180 * since some might depend on this info.
183 perf_evlist__set_leader(evlist);
185 if (evlist->core.cpus->map[0] < 0)
186 opts->no_inherit = true;
188 use_comm_exec = perf_can_comm_exec();
190 evlist__for_each_entry(evlist, evsel) {
191 perf_evsel__config(evsel, opts, callchain);
192 if (evsel->tracking && use_comm_exec)
193 evsel->core.attr.comm_exec = 1;
196 if (opts->full_auxtrace) {
198 * Need to be able to synthesize and parse selected events with
199 * arbitrary sample types, which requires always being able to
202 use_sample_identifier = perf_can_sample_identifier();
204 } else if (evlist->core.nr_entries > 1) {
205 struct evsel *first = evlist__first(evlist);
207 evlist__for_each_entry(evlist, evsel) {
208 if (evsel->core.attr.sample_type == first->core.attr.sample_type)
210 use_sample_identifier = perf_can_sample_identifier();
217 evlist__for_each_entry(evlist, evsel)
218 perf_evsel__set_sample_id(evsel, use_sample_identifier);
221 perf_evlist__set_id_pos(evlist);
224 static int get_max_rate(unsigned int *rate)
226 return sysctl__read_int("kernel/perf_event_max_sample_rate", (int *)rate);
229 static int record_opts__config_freq(struct record_opts *opts)
231 bool user_freq = opts->user_freq != UINT_MAX;
232 unsigned int max_rate;
234 if (opts->user_interval != ULLONG_MAX)
235 opts->default_interval = opts->user_interval;
237 opts->freq = opts->user_freq;
240 * User specified count overrides default frequency.
242 if (opts->default_interval)
244 else if (opts->freq) {
245 opts->default_interval = opts->freq;
247 pr_err("frequency and count are zero, aborting\n");
251 if (get_max_rate(&max_rate))
255 * User specified frequency is over current maximum.
257 if (user_freq && (max_rate < opts->freq)) {
258 if (opts->strict_freq) {
259 pr_err("error: Maximum frequency rate (%'u Hz) exceeded.\n"
260 " Please use -F freq option with a lower value or consider\n"
261 " tweaking /proc/sys/kernel/perf_event_max_sample_rate.\n",
265 pr_warning("warning: Maximum frequency rate (%'u Hz) exceeded, throttling from %'u Hz to %'u Hz.\n"
266 " The limit can be raised via /proc/sys/kernel/perf_event_max_sample_rate.\n"
267 " The kernel will lower it when perf's interrupts take too long.\n"
268 " Use --strict-freq to disable this throttling, refusing to record.\n",
269 max_rate, opts->freq, max_rate);
271 opts->freq = max_rate;
276 * Default frequency is over current maximum.
278 if (max_rate < opts->freq) {
279 pr_warning("Lowering default frequency rate to %u.\n"
280 "Please consider tweaking "
281 "/proc/sys/kernel/perf_event_max_sample_rate.\n",
283 opts->freq = max_rate;
289 int record_opts__config(struct record_opts *opts)
291 return record_opts__config_freq(opts);
294 bool perf_evlist__can_select_event(struct evlist *evlist, const char *str)
296 struct evlist *temp_evlist;
302 temp_evlist = evlist__new();
306 err = parse_events(temp_evlist, str, NULL);
310 evsel = evlist__last(temp_evlist);
312 if (!evlist || perf_cpu_map__empty(evlist->core.cpus)) {
313 struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
315 cpu = cpus ? cpus->map[0] : 0;
316 perf_cpu_map__put(cpus);
318 cpu = evlist->core.cpus->map[0];
322 fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, -1,
323 perf_event_open_cloexec_flag());
325 if (pid == -1 && errno == EACCES) {
337 evlist__delete(temp_evlist);
341 int record__parse_freq(const struct option *opt, const char *str, int unset __maybe_unused)
344 struct record_opts *opts = opt->value;
349 if (strcasecmp(str, "max") == 0) {
350 if (get_max_rate(&freq)) {
351 pr_err("couldn't read /proc/sys/kernel/perf_event_max_sample_rate\n");
354 pr_info("info: Using a maximum frequency rate of %'d Hz\n", freq);
359 opts->user_freq = freq;