libperf evlist: Allow mixing per-thread and per-cpu mmaps
authorAdrian Hunter <adrian.hunter@intel.com>
Tue, 24 May 2022 07:54:31 +0000 (10:54 +0300)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Thu, 26 May 2022 15:36:57 +0000 (12:36 -0300)
mmap_per_evsel() will skip events that do not match the CPU, so all CPUs
can be iterated in any case.

Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Acked-by: Ian Rogers <irogers@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Leo Yan <leo.yan@linaro.org>
Link: https://lore.kernel.org/r/20220524075436.29144-11-adrian.hunter@intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/lib/perf/evlist.c

index ec0e4b5..eae1f61 100644 (file)
@@ -512,29 +512,6 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
        return 0;
 }
 
-static int
-mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
-               struct perf_mmap_param *mp)
-{
-       int thread;
-       int nr_threads = perf_thread_map__nr(evlist->threads);
-
-       for (thread = 0; thread < nr_threads; thread++) {
-               int output = -1;
-               int output_overwrite = -1;
-
-               if (mmap_per_evsel(evlist, ops, thread, mp, 0, thread,
-                                  &output, &output_overwrite))
-                       goto out_unmap;
-       }
-
-       return 0;
-
-out_unmap:
-       perf_evlist__munmap(evlist);
-       return -1;
-}
-
 static int
 mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
             struct perf_mmap_param *mp)
@@ -565,9 +542,14 @@ static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
 {
        int nr_mmaps;
 
+       /* One for each CPU */
        nr_mmaps = perf_cpu_map__nr(evlist->all_cpus);
-       if (perf_cpu_map__empty(evlist->all_cpus))
-               nr_mmaps = perf_thread_map__nr(evlist->threads);
+       if (perf_cpu_map__empty(evlist->all_cpus)) {
+               /* Plus one for each thread */
+               nr_mmaps += perf_thread_map__nr(evlist->threads);
+               /* Minus the per-thread CPU (-1) */
+               nr_mmaps -= 1;
+       }
 
        return nr_mmaps;
 }
@@ -577,7 +559,6 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
                          struct perf_mmap_param *mp)
 {
        struct perf_evsel *evsel;
-       const struct perf_cpu_map *cpus = evlist->all_cpus;
 
        if (!ops || !ops->get || !ops->mmap)
                return -EINVAL;
@@ -596,9 +577,6 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
        if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
                return -ENOMEM;
 
-       if (perf_cpu_map__empty(cpus))
-               return mmap_per_thread(evlist, ops, mp);
-
        return mmap_per_cpu(evlist, ops, mp);
 }