perf evsel: Separate open preparation from open itself
authorRiccardo Mancini <rickyman7@gmail.com>
Sat, 21 Aug 2021 09:19:21 +0000 (11:19 +0200)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Tue, 31 Aug 2021 19:26:55 +0000 (16:26 -0300)
This is a preparatory patch for the following patches with the goal to
separate in evlist__open_cpu the actual perf_event_open, which could be
performed in parallel, from the existing fallback mechanisms, which
should be handled sequentially.

This patch separates the first lines of evsel__open_cpu into a new
__evsel__prepare_open function.

Signed-off-by: Riccardo Mancini <rickyman7@gmail.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lore.kernel.org/lkml/e14118b934c338dbbf68b8677f20d0d7dbf9359a.1629490974.git.rickyman7@gmail.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/perf/util/evsel.c

index 7b4bb32..ddf324e 100644 (file)
@@ -1746,22 +1746,20 @@ static int perf_event_open(struct evsel *evsel,
        return fd;
 }
 
-static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
-               struct perf_thread_map *threads,
-               int start_cpu, int end_cpu)
+
+static struct perf_cpu_map *empty_cpu_map;
+static struct perf_thread_map *empty_thread_map;
+
+static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
+               struct perf_thread_map *threads)
 {
-       int cpu, thread, nthreads;
-       unsigned long flags = PERF_FLAG_FD_CLOEXEC;
-       int pid = -1, err, old_errno;
-       enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
+       int nthreads;
 
        if ((perf_missing_features.write_backward && evsel->core.attr.write_backward) ||
            (perf_missing_features.aux_output     && evsel->core.attr.aux_output))
                return -EINVAL;
 
        if (cpus == NULL) {
-               static struct perf_cpu_map *empty_cpu_map;
-
                if (empty_cpu_map == NULL) {
                        empty_cpu_map = perf_cpu_map__dummy_new();
                        if (empty_cpu_map == NULL)
@@ -1772,8 +1770,6 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
        }
 
        if (threads == NULL) {
-               static struct perf_thread_map *empty_thread_map;
-
                if (empty_thread_map == NULL) {
                        empty_thread_map = thread_map__new_by_tid(-1);
                        if (empty_thread_map == NULL)
@@ -1792,6 +1788,33 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
            perf_evsel__alloc_fd(&evsel->core, cpus->nr, nthreads) < 0)
                return -ENOMEM;
 
+       return 0;
+}
+
+static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
+               struct perf_thread_map *threads,
+               int start_cpu, int end_cpu)
+{
+       int cpu, thread, nthreads;
+       unsigned long flags = PERF_FLAG_FD_CLOEXEC;
+       int pid = -1, err, old_errno;
+       enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
+
+       err = __evsel__prepare_open(evsel, cpus, threads);
+       if (err)
+               return err;
+
+       if (cpus == NULL)
+               cpus = empty_cpu_map;
+
+       if (threads == NULL)
+               threads = empty_thread_map;
+
+       if (evsel->core.system_wide)
+               nthreads = 1;
+       else
+               nthreads = threads->nr;
+
        if (evsel->cgrp) {
                flags |= PERF_FLAG_PID_CGROUP;
                pid = evsel->cgrp->fd;