perf evsel: Add has_callchain() helper to make code more compact/clear
authorArnaldo Carvalho de Melo <acme@redhat.com>
Mon, 28 May 2018 19:00:29 +0000 (16:00 -0300)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Tue, 5 Jun 2018 13:09:54 +0000 (10:09 -0300)
Its common to have the (evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN),
so add an evsel__has_callchain(evsel) helper.

This will actually get more uses as we check that instead of
symbol_conf.use_callchain in places where that produces the same result
but makes this decision to be more fine grained, per evsel.

Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: https://lkml.kernel.org/n/tip-145340oytbthatpfeaq1do18@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/perf/builtin-sched.c
tools/perf/builtin-script.c
tools/perf/builtin-trace.c
tools/perf/tests/parse-events.c
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/hist.c
tools/perf/util/session.c

index 4dfdee6..97f9e75 100644 (file)
@@ -2933,8 +2933,7 @@ static int timehist_check_attr(struct perf_sched *sched,
                        return -1;
                }
 
-               if (sched->show_callchain &&
-                   !(evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN)) {
+               if (sched->show_callchain && !evsel__has_callchain(evsel)) {
                        pr_info("Samples do not have callchains.\n");
                        sched->show_callchain = 0;
                        symbol_conf.use_callchain = 0;
index cefc881..48e940e 100644 (file)
@@ -517,7 +517,7 @@ static int perf_session__check_output_opt(struct perf_session *session)
 
                evlist__for_each_entry(session->evlist, evsel) {
                        not_pipe = true;
-                       if (evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
+                       if (evsel__has_callchain(evsel)) {
                                use_callchain = true;
                                break;
                        }
@@ -532,22 +532,18 @@ static int perf_session__check_output_opt(struct perf_session *session)
         */
        if (symbol_conf.use_callchain &&
            !output[PERF_TYPE_TRACEPOINT].user_set) {
-               struct perf_event_attr *attr;
-
                j = PERF_TYPE_TRACEPOINT;
 
                evlist__for_each_entry(session->evlist, evsel) {
                        if (evsel->attr.type != j)
                                continue;
 
-                       attr = &evsel->attr;
-
-                       if (attr->sample_type & PERF_SAMPLE_CALLCHAIN) {
+                       if (evsel__has_callchain(evsel)) {
                                output[j].fields |= PERF_OUTPUT_IP;
                                output[j].fields |= PERF_OUTPUT_SYM;
                                output[j].fields |= PERF_OUTPUT_SYMOFFSET;
                                output[j].fields |= PERF_OUTPUT_DSO;
-                               set_print_ip_opts(attr);
+                               set_print_ip_opts(&evsel->attr);
                                goto out;
                        }
                }
index 560aed7..6a748ec 100644 (file)
@@ -2491,7 +2491,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
         * to override an explicitely set --max-stack global setting.
         */
        evlist__for_each_entry(evlist, evsel) {
-               if ((evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) &&
+               if (evsel__has_callchain(evsel) &&
                    evsel->attr.sample_max_stack == 0)
                        evsel->attr.sample_max_stack = trace->max_stack;
        }
index b9ebe15..7d40770 100644 (file)
@@ -499,7 +499,7 @@ static int test__checkevent_pmu_partial_time_callgraph(struct perf_evlist *evlis
         * while this test executes only parse events method.
         */
        TEST_ASSERT_VAL("wrong period",     0 == evsel->attr.sample_period);
-       TEST_ASSERT_VAL("wrong callgraph",  !(PERF_SAMPLE_CALLCHAIN & evsel->attr.sample_type));
+       TEST_ASSERT_VAL("wrong callgraph",  !evsel__has_callchain(evsel));
        TEST_ASSERT_VAL("wrong time",  !(PERF_SAMPLE_TIME & evsel->attr.sample_type));
 
        /* cpu/config=2,call-graph=no,time=0,period=2000/ */
@@ -512,7 +512,7 @@ static int test__checkevent_pmu_partial_time_callgraph(struct perf_evlist *evlis
         * while this test executes only parse events method.
         */
        TEST_ASSERT_VAL("wrong period",     0 == evsel->attr.sample_period);
-       TEST_ASSERT_VAL("wrong callgraph",  !(PERF_SAMPLE_CALLCHAIN & evsel->attr.sample_type));
+       TEST_ASSERT_VAL("wrong callgraph",  !evsel__has_callchain(evsel));
        TEST_ASSERT_VAL("wrong time",  !(PERF_SAMPLE_TIME & evsel->attr.sample_type));
 
        return 0;
index 150db5e..94fce4f 100644 (file)
@@ -2197,7 +2197,7 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
                }
        }
 
-       if (type & PERF_SAMPLE_CALLCHAIN) {
+       if (evsel__has_callchain(evsel)) {
                const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
 
                OVERFLOW_CHECK_u64(array);
@@ -2857,7 +2857,7 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
                         "Hint: Try again after reducing the number of events.\n"
                         "Hint: Try increasing the limit with 'ulimit -n <limit>'");
        case ENOMEM:
-               if ((evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) != 0 &&
+               if (evsel__has_callchain(evsel) &&
                    access("/proc/sys/kernel/perf_event_max_stack", F_OK) == 0)
                        return scnprintf(msg, size,
                                         "Not enough memory to setup event with callchain.\n"
index b13f5f2..d277930 100644 (file)
@@ -459,6 +459,11 @@ static inline bool perf_evsel__has_branch_callstack(const struct perf_evsel *evs
        return evsel->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK;
 }
 
+static inline bool evsel__has_callchain(const struct perf_evsel *evsel)
+{
+       return (evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) != 0;
+}
+
 typedef int (*attr__fprintf_f)(FILE *, const char *, const char *, void *);
 
 int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
index 95333b0..34864c8 100644 (file)
@@ -1757,7 +1757,7 @@ void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *pro
        bool use_callchain;
 
        if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
-               use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
+               use_callchain = evsel__has_callchain(evsel);
        else
                use_callchain = symbol_conf.use_callchain;
 
index b998bb4..8b93693 100644 (file)
@@ -1094,7 +1094,7 @@ static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
 
        sample_type = evsel->attr.sample_type;
 
-       if (sample_type & PERF_SAMPLE_CALLCHAIN)
+       if (evsel__has_callchain(evsel))
                callchain__printf(evsel, sample);
 
        if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !perf_evsel__has_branch_callstack(evsel))