goto out;
}
-static int evsel__copy_config_terms(struct evsel *dst, struct evsel *src)
+int copy_config_terms(struct list_head *dst, struct list_head *src)
{
struct evsel_config_term *pos, *tmp;
- list_for_each_entry(pos, &src->config_terms, list) {
+ list_for_each_entry(pos, src, list) {
tmp = malloc(sizeof(*tmp));
if (tmp == NULL)
return -ENOMEM;
return -ENOMEM;
}
}
- list_add_tail(&tmp->list, &dst->config_terms);
+ list_add_tail(&tmp->list, dst);
}
return 0;
}
+static int evsel__copy_config_terms(struct evsel *dst, struct evsel *src)
+{
+ return copy_config_terms(&dst->config_terms, &src->config_terms);
+}
+
/**
* evsel__clone - create a new evsel copied from @orig
* @orig: original evsel
return err;
}
-static void evsel__free_config_terms(struct evsel *evsel)
+void free_config_terms(struct list_head *config_terms)
{
struct evsel_config_term *term, *h;
- list_for_each_entry_safe(term, h, &evsel->config_terms, list) {
+ list_for_each_entry_safe(term, h, config_terms, list) {
list_del_init(&term->list);
if (term->free_str)
zfree(&term->val.str);
}
}
+static void evsel__free_config_terms(struct evsel *evsel)
+{
+ free_config_terms(&evsel->config_terms);
+}
+
void evsel__exit(struct evsel *evsel)
{
assert(list_empty(&evsel->core.node));
return 0;
}
-static bool ignore_missing_thread(struct evsel *evsel,
+bool evsel__ignore_missing_thread(struct evsel *evsel,
int nr_cpus, int cpu,
struct perf_thread_map *threads,
int thread, int err)
}
}
-static int perf_event_open(struct evsel *evsel,
- pid_t pid, int cpu, int group_fd)
+bool evsel__precise_ip_fallback(struct evsel *evsel)
{
- int precise_ip = evsel->core.attr.precise_ip;
- int fd;
-
- while (1) {
- pr_debug2_peo("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
- pid, cpu, group_fd, evsel->open_flags);
-
- fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, group_fd, evsel->open_flags);
- if (fd >= 0)
- break;
-
- /* Do not try less precise if not requested. */
- if (!evsel->precise_max)
- break;
-
- /*
- * We tried all the precise_ip values, and it's
- * still failing, so leave it to standard fallback.
- */
- if (!evsel->core.attr.precise_ip) {
- evsel->core.attr.precise_ip = precise_ip;
- break;
- }
+ /* Do not try less precise if not requested. */
+ if (!evsel->precise_max)
+ return false;
- pr_debug2_peo("\nsys_perf_event_open failed, error %d\n", -ENOTSUP);
- evsel->core.attr.precise_ip--;
- pr_debug2_peo("decreasing precise_ip by one (%d)\n", evsel->core.attr.precise_ip);
- display_attr(&evsel->core.attr);
+ /*
+ * We tried all the precise_ip values, and it's
+ * still failing, so leave it to standard fallback.
+ */
+ if (!evsel->core.attr.precise_ip) {
+ evsel->core.attr.precise_ip = evsel->precise_ip_original;
+ return false;
}
- return fd;
-}
+ if (!evsel->precise_ip_original)
+ evsel->precise_ip_original = evsel->core.attr.precise_ip;
+ evsel->core.attr.precise_ip--;
+ pr_debug2_peo("decreasing precise_ip by one (%d)\n", evsel->core.attr.precise_ip);
+ display_attr(&evsel->core.attr);
+ return true;
+}
static struct perf_cpu_map *empty_cpu_map;
static struct perf_thread_map *empty_thread_map;
for (thread = 0; thread < nthreads; thread++) {
int fd, group_fd;
+retry_open:
+ if (thread >= nthreads)
+ break;
if (!evsel->cgrp && !evsel->core.system_wide)
pid = perf_thread_map__pid(threads, thread);
group_fd = get_group_fd(evsel, cpu, thread);
-retry_open:
- test_attr__ready();
- fd = perf_event_open(evsel, pid, cpus->map[cpu],
- group_fd);
+ test_attr__ready();
- FD(evsel, cpu, thread) = fd;
+ pr_debug2_peo("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
+ pid, cpus->map[cpu], group_fd, evsel->open_flags);
- bpf_counter__install_pe(evsel, cpu, fd);
+ fd = sys_perf_event_open(&evsel->core.attr, pid, cpus->map[cpu],
+ group_fd, evsel->open_flags);
- if (unlikely(test_attr__enabled)) {
- test_attr__open(&evsel->core.attr, pid, cpus->map[cpu],
- fd, group_fd, evsel->open_flags);
- }
+ FD(evsel, cpu, thread) = fd;
if (fd < 0) {
err = -errno;
- if (ignore_missing_thread(evsel, cpus->nr, cpu, threads, thread, err)) {
- /*
- * We just removed 1 thread, so take a step
- * back on thread index and lower the upper
- * nthreads limit.
- */
- nthreads--;
- thread--;
-
- /* ... and pretend like nothing have happened. */
- err = 0;
- continue;
- }
-
pr_debug2_peo("\nsys_perf_event_open failed, error %d\n",
err);
goto try_fallback;
}
+ bpf_counter__install_pe(evsel, cpu, fd);
+
+ if (unlikely(test_attr__enabled)) {
+ test_attr__open(&evsel->core.attr, pid, cpus->map[cpu],
+ fd, group_fd, evsel->open_flags);
+ }
+
pr_debug2_peo(" = %d\n", fd);
if (evsel->bpf_fd >= 0) {
return 0;
try_fallback:
+ if (evsel__precise_ip_fallback(evsel))
+ goto retry_open;
+
+ if (evsel__ignore_missing_thread(evsel, cpus->nr, cpu, threads, thread, err)) {
+ /* We just removed 1 thread, so lower the upper nthreads limit. */
+ nthreads--;
+
+ /* ... and pretend like nothing have happened. */
+ err = 0;
+ goto retry_open;
+ }
/*
* perf stat needs between 5 and 22 fds per CPU. When we run out
* of them try to increase the limits.