Merge branch 'perf/urgent' into perf/core, to pick up fixes
authorIngo Molnar <mingo@kernel.org>
Sat, 24 Mar 2018 08:21:47 +0000 (09:21 +0100)
committerIngo Molnar <mingo@kernel.org>
Sat, 24 Mar 2018 08:21:47 +0000 (09:21 +0100)
With the cherry-picked perf/urgent commit merged separately we can now
merge all the fixes without conflicts.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
194 files changed:
Documentation/trace/coresight.txt
arch/alpha/kernel/perf_event.c
arch/arm/mach-imx/mmdc.c
arch/arm/mm/cache-l2x0-pmu.c
arch/mips/kernel/perf_event_mipsxx.c
arch/powerpc/perf/core-book3s.c
arch/powerpc/perf/core-fsl-emb.c
arch/sparc/kernel/perf_event.c
arch/x86/events/core.c
arch/x86/events/intel/core.c
arch/x86/events/intel/ds.c
arch/x86/events/intel/uncore.c
arch/x86/events/perf_event.h
drivers/bus/arm-cci.c
drivers/bus/arm-ccn.c
drivers/perf/arm_dsu_pmu.c
drivers/perf/arm_pmu.c
drivers/perf/hisilicon/hisi_uncore_pmu.c
drivers/perf/qcom_l2_pmu.c
drivers/perf/qcom_l3_pmu.c
drivers/perf/xgene_pmu.c
include/linux/hw_breakpoint.h
include/linux/perf_event.h
include/linux/trace_events.h
include/uapi/linux/perf_event.h
kernel/events/core.c
kernel/events/hw_breakpoint.c
kernel/trace/trace_event_perf.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_probe.h
kernel/trace/trace_uprobe.c
tools/arch/powerpc/include/uapi/asm/unistd.h [new file with mode: 0644]
tools/build/Makefile.feature
tools/build/feature/Makefile
tools/include/linux/bitmap.h
tools/include/uapi/linux/perf_event.h
tools/lib/api/fs/fs.c
tools/lib/api/fs/fs.h
tools/lib/str_error_r.c
tools/lib/symbol/kallsyms.c
tools/perf/Documentation/perf-annotate.txt
tools/perf/Documentation/perf-c2c.txt
tools/perf/Documentation/perf-data.txt
tools/perf/Documentation/perf-ftrace.txt
tools/perf/Documentation/perf-kallsyms.txt
tools/perf/Documentation/perf-kmem.txt
tools/perf/Documentation/perf-list.txt
tools/perf/Documentation/perf-mem.txt
tools/perf/Documentation/perf-record.txt
tools/perf/Documentation/perf-report.txt
tools/perf/Documentation/perf-sched.txt
tools/perf/Documentation/perf-script-perl.txt
tools/perf/Documentation/perf-script.txt
tools/perf/Documentation/perf-stat.txt
tools/perf/Documentation/perf-top.txt
tools/perf/Documentation/perf-trace.txt
tools/perf/Documentation/perf.data-file-format.txt
tools/perf/Makefile.config
tools/perf/Makefile.perf
tools/perf/arch/arm/util/auxtrace.c
tools/perf/arch/arm/util/cs-etm.c
tools/perf/arch/arm64/include/arch-tests.h [new file with mode: 0644]
tools/perf/arch/arm64/tests/Build
tools/perf/arch/arm64/tests/arch-tests.c [new file with mode: 0644]
tools/perf/arch/arm64/util/Build
tools/perf/arch/arm64/util/unwind-libdw.c [new file with mode: 0644]
tools/perf/arch/powerpc/Makefile
tools/perf/arch/powerpc/entry/syscalls/mksyscalltbl [new file with mode: 0755]
tools/perf/arch/s390/annotate/instructions.c
tools/perf/arch/s390/util/header.c
tools/perf/arch/x86/tests/perf-time-to-tsc.c
tools/perf/arch/x86/util/auxtrace.c
tools/perf/builtin-annotate.c
tools/perf/builtin-c2c.c
tools/perf/builtin-ftrace.c
tools/perf/builtin-kvm.c
tools/perf/builtin-record.c
tools/perf/builtin-report.c
tools/perf/builtin-sched.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/builtin-top.c
tools/perf/builtin-trace.c
tools/perf/check-headers.sh
tools/perf/perf.h
tools/perf/pmu-events/Build
tools/perf/pmu-events/README
tools/perf/pmu-events/arch/arm64/arm/cortex-a53/branch.json [new file with mode: 0644]
tools/perf/pmu-events/arch/arm64/arm/cortex-a53/bus.json [new file with mode: 0644]
tools/perf/pmu-events/arch/arm64/arm/cortex-a53/cache.json [new file with mode: 0644]
tools/perf/pmu-events/arch/arm64/arm/cortex-a53/memory.json [new file with mode: 0644]
tools/perf/pmu-events/arch/arm64/arm/cortex-a53/other.json [new file with mode: 0644]
tools/perf/pmu-events/arch/arm64/arm/cortex-a53/pipeline.json [new file with mode: 0644]
tools/perf/pmu-events/arch/arm64/armv8-recommended.json [new file with mode: 0644]
tools/perf/pmu-events/arch/arm64/cavium/thunderx2-imp-def.json [deleted file]
tools/perf/pmu-events/arch/arm64/cavium/thunderx2/core-imp-def.json [new file with mode: 0644]
tools/perf/pmu-events/arch/arm64/cortex-a53/branch.json [deleted file]
tools/perf/pmu-events/arch/arm64/cortex-a53/bus.json [deleted file]
tools/perf/pmu-events/arch/arm64/cortex-a53/cache.json [deleted file]
tools/perf/pmu-events/arch/arm64/cortex-a53/memory.json [deleted file]
tools/perf/pmu-events/arch/arm64/cortex-a53/other.json [deleted file]
tools/perf/pmu-events/arch/arm64/cortex-a53/pipeline.json [deleted file]
tools/perf/pmu-events/arch/arm64/hisilicon/hip08/core-imp-def.json [new file with mode: 0644]
tools/perf/pmu-events/arch/arm64/mapfile.csv
tools/perf/pmu-events/arch/powerpc/power9/cache.json
tools/perf/pmu-events/arch/powerpc/power9/frontend.json
tools/perf/pmu-events/arch/powerpc/power9/marked.json
tools/perf/pmu-events/arch/powerpc/power9/memory.json
tools/perf/pmu-events/arch/powerpc/power9/other.json
tools/perf/pmu-events/arch/powerpc/power9/pipeline.json
tools/perf/pmu-events/arch/powerpc/power9/pmc.json
tools/perf/pmu-events/arch/powerpc/power9/translation.json
tools/perf/pmu-events/jevents.c
tools/perf/python/twatch.py
tools/perf/scripts/python/Perf-Trace-Util/Context.c
tools/perf/tests/Build
tools/perf/tests/attr.c
tools/perf/tests/backward-ring-buffer.c
tools/perf/tests/bp_account.c [new file with mode: 0644]
tools/perf/tests/bpf.c
tools/perf/tests/builtin-test.c
tools/perf/tests/code-reading.c
tools/perf/tests/dwarf-unwind.c
tools/perf/tests/keep-tracking.c
tools/perf/tests/mem.c
tools/perf/tests/mem2node.c [new file with mode: 0644]
tools/perf/tests/mmap-basic.c
tools/perf/tests/openat-syscall-tp-fields.c
tools/perf/tests/perf-record.c
tools/perf/tests/pmu.c
tools/perf/tests/shell/lib/probe_vfs_getname.sh
tools/perf/tests/shell/record+probe_libc_inet_pton.sh [new file with mode: 0755]
tools/perf/tests/shell/trace+probe_libc_inet_pton.sh [deleted file]
tools/perf/tests/sw-clock.c
tools/perf/tests/switch-tracking.c
tools/perf/tests/task-exit.c
tools/perf/tests/tests.h
tools/perf/tests/vmlinux-kallsyms.c
tools/perf/ui/browsers/annotate.c
tools/perf/ui/browsers/hists.c
tools/perf/ui/stdio/hist.c
tools/perf/util/Build
tools/perf/util/annotate.c
tools/perf/util/annotate.h
tools/perf/util/auxtrace.c
tools/perf/util/auxtrace.h
tools/perf/util/build-id.c
tools/perf/util/cgroup.c
tools/perf/util/cgroup.h
tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
tools/perf/util/cs-etm.c
tools/perf/util/debug.c
tools/perf/util/env.c
tools/perf/util/env.h
tools/perf/util/event.c
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/header.c
tools/perf/util/header.h
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
tools/perf/util/intel-pt.c
tools/perf/util/llvm-utils.c
tools/perf/util/machine.c
tools/perf/util/machine.h
tools/perf/util/mem2node.c [new file with mode: 0644]
tools/perf/util/mem2node.h [new file with mode: 0644]
tools/perf/util/mmap.c
tools/perf/util/mmap.h
tools/perf/util/parse-events.c
tools/perf/util/parse-events.h
tools/perf/util/parse-events.l
tools/perf/util/parse-events.y
tools/perf/util/pmu.c
tools/perf/util/probe-finder.c
tools/perf/util/python.c
tools/perf/util/record.c
tools/perf/util/scripting-engines/trace-event-python.c
tools/perf/util/setup.py
tools/perf/util/sort.c
tools/perf/util/stat.c
tools/perf/util/stat.h
tools/perf/util/symbol.c
tools/perf/util/symbol.h
tools/perf/util/syscalltbl.c
tools/perf/util/thread.h
tools/perf/util/thread_map.c
tools/perf/util/thread_map.h
tools/perf/util/unwind-libdw.c

index a33c88c..6f0120c 100644 (file)
@@ -330,3 +330,54 @@ Details on how to use the generic STM API can be found here [2].
 
 [1]. Documentation/ABI/testing/sysfs-bus-coresight-devices-stm
 [2]. Documentation/trace/stm.txt
+
+
+Using perf tools
+----------------
+
+perf can be used to record and analyze trace of programs.
+
+Execution can be recorded using 'perf record' with the cs_etm event,
+specifying the name of the sink to record to, e.g:
+
+    perf record -e cs_etm/@20070000.etr/u --per-thread
+
+The 'perf report' and 'perf script' commands can be used to analyze execution,
+synthesizing instruction and branch events from the instruction trace.
+'perf inject' can be used to replace the trace data with the synthesized events.
+The --itrace option controls the type and frequency of synthesized events
+(see perf documentation).
+
+Note that only 64-bit programs are currently supported - further work is
+required to support instruction decode of 32-bit Arm programs.
+
+
+Generating coverage files for Feedback Directed Optimization: AutoFDO
+---------------------------------------------------------------------
+
+'perf inject' accepts the --itrace option in which case tracing data is
+removed and replaced with the synthesized events. e.g.
+
+       perf inject --itrace --strip -i perf.data -o perf.data.new
+
+Below is an example of using ARM ETM for autoFDO.  It requires autofdo
+(https://github.com/google/autofdo) and gcc version 5.  The bubble
+sort example is from the AutoFDO tutorial (https://gcc.gnu.org/wiki/AutoFDO/Tutorial).
+
+       $ gcc-5 -O3 sort.c -o sort
+       $ taskset -c 2 ./sort
+       Bubble sorting array of 30000 elements
+       5910 ms
+
+       $ perf record -e cs_etm/@20070000.etr/u --per-thread taskset -c 2 ./sort
+       Bubble sorting array of 30000 elements
+       12543 ms
+       [ perf record: Woken up 35 times to write data ]
+       [ perf record: Captured and wrote 69.640 MB perf.data ]
+
+       $ perf inject -i perf.data -o inj.data --itrace=il64 --strip
+       $ create_gcov --binary=./sort --profile=inj.data --gcov=sort.gcov -gcov_version=1
+       $ gcc-5 -O3 -fauto-profile=sort.gcov sort.c -o sort_autofdo
+       $ taskset -c 2 ./sort_autofdo
+       Bubble sorting array of 30000 elements
+       5806 ms
index a1f6bc7..5613aa3 100644 (file)
@@ -351,7 +351,7 @@ static int collect_events(struct perf_event *group, int max_count,
                evtype[n] = group->hw.event_base;
                current_idx[n++] = PMC_NO_INDEX;
        }
-       list_for_each_entry(pe, &group->sibling_list, group_entry) {
+       for_each_sibling_event(pe, group) {
                if (!is_software_event(pe) && pe->state != PERF_EVENT_STATE_OFF) {
                        if (n >= max_count)
                                return -1;
index 5fb1d22..04b3bf7 100644 (file)
@@ -269,7 +269,7 @@ static bool mmdc_pmu_group_is_valid(struct perf_event *event)
                        return false;
        }
 
-       list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+       for_each_sibling_event(sibling, leader) {
                if (!mmdc_pmu_group_event_is_valid(sibling, pmu, &counter_mask))
                        return false;
        }
index 0a1e228..afe5b4c 100644 (file)
@@ -293,7 +293,7 @@ static bool l2x0_pmu_group_is_valid(struct perf_event *event)
        else if (!is_software_event(leader))
                return false;
 
-       list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+       for_each_sibling_event(sibling, leader) {
                if (sibling->pmu == pmu)
                        num_hw++;
                else if (!is_software_event(sibling))
index 6668f67..ee73550 100644 (file)
@@ -711,7 +711,7 @@ static int validate_group(struct perf_event *event)
        if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
                return -EINVAL;
 
-       list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+       for_each_sibling_event(sibling, leader) {
                if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
                        return -EINVAL;
        }
index f89bbd5..f8908ea 100644 (file)
@@ -1426,7 +1426,7 @@ static int collect_events(struct perf_event *group, int max_count,
                flags[n] = group->hw.event_base;
                events[n++] = group->hw.config;
        }
-       list_for_each_entry(event, &group->sibling_list, group_entry) {
+       for_each_sibling_event(event, group) {
                if (event->pmu->task_ctx_nr == perf_hw_context &&
                    event->state != PERF_EVENT_STATE_OFF) {
                        if (n >= max_count)
index 5d747b4..85f1d18 100644 (file)
@@ -277,7 +277,7 @@ static int collect_events(struct perf_event *group, int max_count,
                ctrs[n] = group;
                n++;
        }
-       list_for_each_entry(event, &group->sibling_list, group_entry) {
+       for_each_sibling_event(event, group) {
                if (!is_software_event(event) &&
                    event->state != PERF_EVENT_STATE_OFF) {
                        if (n >= max_count)
index 5c1f547..d3149ba 100644 (file)
@@ -1342,7 +1342,7 @@ static int collect_events(struct perf_event *group, int max_count,
                events[n] = group->hw.event_base;
                current_idx[n++] = PIC_NO_INDEX;
        }
-       list_for_each_entry(event, &group->sibling_list, group_entry) {
+       for_each_sibling_event(event, group) {
                if (!is_software_event(event) &&
                    event->state != PERF_EVENT_STATE_OFF) {
                        if (n >= max_count)
index 88797c8..4823695 100644 (file)
@@ -990,7 +990,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
        if (!dogrp)
                return n;
 
-       list_for_each_entry(event, &leader->sibling_list, group_entry) {
+       for_each_sibling_event(event, leader) {
                if (!is_x86_event(event) ||
                    event->state <= PERF_EVENT_STATE_OFF)
                        continue;
@@ -1156,16 +1156,13 @@ int x86_perf_event_set_period(struct perf_event *event)
 
        per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
 
-       if (!(hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) ||
-           local64_read(&hwc->prev_count) != (u64)-left) {
-               /*
-                * The hw event starts counting from this event offset,
-                * mark it to be able to extra future deltas:
-                */
-               local64_set(&hwc->prev_count, (u64)-left);
+       /*
+        * The hw event starts counting from this event offset,
+        * mark it to be able to extra future deltas:
+        */
+       local64_set(&hwc->prev_count, (u64)-left);
 
-               wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
-       }
+       wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
 
        /*
         * Due to erratum on certan cpu we need
@@ -1884,6 +1881,8 @@ early_initcall(init_hw_perf_events);
 
 static inline void x86_pmu_read(struct perf_event *event)
 {
+       if (x86_pmu.read)
+               return x86_pmu.read(event);
        x86_perf_event_update(event);
 }
 
index 1e41d75..607bf56 100644 (file)
@@ -2060,6 +2060,14 @@ static void intel_pmu_del_event(struct perf_event *event)
                intel_pmu_pebs_del(event);
 }
 
+static void intel_pmu_read_event(struct perf_event *event)
+{
+       if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
+               intel_pmu_auto_reload_read(event);
+       else
+               x86_perf_event_update(event);
+}
+
 static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
 {
        int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
@@ -2201,9 +2209,15 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
        int bit, loops;
        u64 status;
        int handled;
+       int pmu_enabled;
 
        cpuc = this_cpu_ptr(&cpu_hw_events);
 
+       /*
+        * Save the PMU state.
+        * It needs to be restored when leaving the handler.
+        */
+       pmu_enabled = cpuc->enabled;
        /*
         * No known reason to not always do late ACK,
         * but just in case do it opt-in.
@@ -2211,6 +2225,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
        if (!x86_pmu.late_ack)
                apic_write(APIC_LVTPC, APIC_DM_NMI);
        intel_bts_disable_local();
+       cpuc->enabled = 0;
        __intel_pmu_disable_all();
        handled = intel_pmu_drain_bts_buffer();
        handled += intel_bts_interrupt();
@@ -2320,7 +2335,8 @@ again:
 
 done:
        /* Only restore PMU state when it's active. See x86_pmu_disable(). */
-       if (cpuc->enabled)
+       cpuc->enabled = pmu_enabled;
+       if (pmu_enabled)
                __intel_pmu_enable_all(0, true);
        intel_bts_enable_local();
 
@@ -3188,7 +3204,7 @@ glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
  * Therefore the effective (average) period matches the requested period,
  * despite coarser hardware granularity.
  */
-static unsigned bdw_limit_period(struct perf_event *event, unsigned left)
+static u64 bdw_limit_period(struct perf_event *event, u64 left)
 {
        if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
                        X86_CONFIG(.event=0xc0, .umask=0x01)) {
@@ -3495,6 +3511,7 @@ static __initconst const struct x86_pmu intel_pmu = {
        .disable                = intel_pmu_disable_event,
        .add                    = intel_pmu_add_event,
        .del                    = intel_pmu_del_event,
+       .read                   = intel_pmu_read_event,
        .hw_config              = intel_pmu_hw_config,
        .schedule_events        = x86_schedule_events,
        .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
index d801523..209bf7c 100644 (file)
@@ -1306,17 +1306,93 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit)
        return NULL;
 }
 
+void intel_pmu_auto_reload_read(struct perf_event *event)
+{
+       WARN_ON(!(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD));
+
+       perf_pmu_disable(event->pmu);
+       intel_pmu_drain_pebs_buffer();
+       perf_pmu_enable(event->pmu);
+}
+
+/*
+ * Special variant of intel_pmu_save_and_restart() for auto-reload.
+ */
+static int
+intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
+{
+       struct hw_perf_event *hwc = &event->hw;
+       int shift = 64 - x86_pmu.cntval_bits;
+       u64 period = hwc->sample_period;
+       u64 prev_raw_count, new_raw_count;
+       s64 new, old;
+
+       WARN_ON(!period);
+
+       /*
+        * drain_pebs() only happens when the PMU is disabled.
+        */
+       WARN_ON(this_cpu_read(cpu_hw_events.enabled));
+
+       prev_raw_count = local64_read(&hwc->prev_count);
+       rdpmcl(hwc->event_base_rdpmc, new_raw_count);
+       local64_set(&hwc->prev_count, new_raw_count);
+
+       /*
+        * Since the counter increments a negative counter value and
+        * overflows on the sign switch, giving the interval:
+        *
+        *   [-period, 0]
+        *
+        * the difference between two consequtive reads is:
+        *
+        *   A) value2 - value1;
+        *      when no overflows have happened in between,
+        *
+        *   B) (0 - value1) + (value2 - (-period));
+        *      when one overflow happened in between,
+        *
+        *   C) (0 - value1) + (n - 1) * (period) + (value2 - (-period));
+        *      when @n overflows happened in between.
+        *
+        * Here A) is the obvious difference, B) is the extension to the
+        * discrete interval, where the first term is to the top of the
+        * interval and the second term is from the bottom of the next
+        * interval and C) the extension to multiple intervals, where the
+        * middle term is the whole intervals covered.
+        *
+        * An equivalent of C, by reduction, is:
+        *
+        *   value2 - value1 + n * period
+        */
+       new = ((s64)(new_raw_count << shift) >> shift);
+       old = ((s64)(prev_raw_count << shift) >> shift);
+       local64_add(new - old + count * period, &event->count);
+
+       perf_event_update_userpage(event);
+
+       return 0;
+}
+
 static void __intel_pmu_pebs_event(struct perf_event *event,
                                   struct pt_regs *iregs,
                                   void *base, void *top,
                                   int bit, int count)
 {
+       struct hw_perf_event *hwc = &event->hw;
        struct perf_sample_data data;
        struct pt_regs regs;
        void *at = get_next_pebs_record_by_bit(base, top, bit);
 
-       if (!intel_pmu_save_and_restart(event) &&
-           !(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD))
+       if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
+               /*
+                * Now, auto-reload is only enabled in fixed period mode.
+                * The reload value is always hwc->sample_period.
+                * May need to change it, if auto-reload is enabled in
+                * freq mode later.
+                */
+               intel_pmu_save_and_restart_reload(event, count);
+       } else if (!intel_pmu_save_and_restart(event))
                return;
 
        while (count > 1) {
@@ -1368,8 +1444,11 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
                return;
 
        n = top - at;
-       if (n <= 0)
+       if (n <= 0) {
+               if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
+                       intel_pmu_save_and_restart_reload(event, 0);
                return;
+       }
 
        __intel_pmu_pebs_event(event, iregs, at, top, 0, n);
 }
@@ -1392,8 +1471,22 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
 
        ds->pebs_index = ds->pebs_buffer_base;
 
-       if (unlikely(base >= top))
+       if (unlikely(base >= top)) {
+               /*
+                * The drain_pebs() could be called twice in a short period
+                * for auto-reload event in pmu::read(). There are no
+                * overflows have happened in between.
+                * It needs to call intel_pmu_save_and_restart_reload() to
+                * update the event->count for this case.
+                */
+               for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled,
+                                x86_pmu.max_pebs_events) {
+                       event = cpuc->events[bit];
+                       if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
+                               intel_pmu_save_and_restart_reload(event, 0);
+               }
                return;
+       }
 
        for (at = base; at < top; at += x86_pmu.pebs_record_size) {
                struct pebs_record_nhm *p = at;
index 7874c98..a7956fc 100644 (file)
@@ -354,7 +354,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
        if (!dogrp)
                return n;
 
-       list_for_each_entry(event, &leader->sibling_list, group_entry) {
+       for_each_sibling_event(event, leader) {
                if (!is_box_event(box, event) ||
                    event->state <= PERF_EVENT_STATE_OFF)
                        continue;
index 39cd061..9f37114 100644 (file)
@@ -520,6 +520,7 @@ struct x86_pmu {
        void            (*disable)(struct perf_event *);
        void            (*add)(struct perf_event *);
        void            (*del)(struct perf_event *);
+       void            (*read)(struct perf_event *event);
        int             (*hw_config)(struct perf_event *event);
        int             (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
        unsigned        eventsel;
@@ -557,7 +558,7 @@ struct x86_pmu {
        struct x86_pmu_quirk *quirks;
        int             perfctr_second_write;
        bool            late_ack;
-       unsigned        (*limit_period)(struct perf_event *event, unsigned l);
+       u64             (*limit_period)(struct perf_event *event, u64 l);
 
        /*
         * sysfs attrs
@@ -923,6 +924,8 @@ void intel_pmu_pebs_disable_all(void);
 
 void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
 
+void intel_pmu_auto_reload_read(struct perf_event *event);
+
 void intel_ds_init(void);
 
 void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
index 5426c04..c4c0c85 100644 (file)
@@ -1311,7 +1311,7 @@ validate_group(struct perf_event *event)
        if (!validate_event(event->pmu, &fake_pmu, leader))
                return -EINVAL;
 
-       list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+       for_each_sibling_event(sibling, leader) {
                if (!validate_event(event->pmu, &fake_pmu, sibling))
                        return -EINVAL;
        }
index b52332e..65b7e40 100644 (file)
@@ -846,11 +846,11 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
                        !is_software_event(event->group_leader))
                return -EINVAL;
 
-       list_for_each_entry(sibling, &event->group_leader->sibling_list,
-                       group_entry)
+       for_each_sibling_event(sibling, event->group_leader) {
                if (sibling->pmu != event->pmu &&
                                !is_software_event(sibling))
                        return -EINVAL;
+       }
 
        return 0;
 }
index 38f2cc2..660cb8a 100644 (file)
@@ -536,7 +536,7 @@ static bool dsu_pmu_validate_group(struct perf_event *event)
        memset(fake_hw.used_mask, 0, sizeof(fake_hw.used_mask));
        if (!dsu_pmu_validate_event(event->pmu, &fake_hw, leader))
                return false;
-       list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+       for_each_sibling_event(sibling, leader) {
                if (!dsu_pmu_validate_event(event->pmu, &fake_hw, sibling))
                        return false;
        }
index f63db34..1a0d340 100644 (file)
@@ -311,7 +311,7 @@ validate_group(struct perf_event *event)
        if (!validate_event(event->pmu, &fake_pmu, leader))
                return -EINVAL;
 
-       list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+       for_each_sibling_event(sibling, leader) {
                if (!validate_event(event->pmu, &fake_pmu, sibling))
                        return -EINVAL;
        }
index 7ed24b9..44df613 100644 (file)
@@ -82,8 +82,7 @@ static bool hisi_validate_event_group(struct perf_event *event)
                        counters++;
        }
 
-       list_for_each_entry(sibling, &event->group_leader->sibling_list,
-                           group_entry) {
+       for_each_sibling_event(sibling, event->group_leader) {
                if (is_software_event(sibling))
                        continue;
                if (sibling->pmu != event->pmu)
index 4fdc848..842135c 100644 (file)
@@ -534,14 +534,14 @@ static int l2_cache_event_init(struct perf_event *event)
                return -EINVAL;
        }
 
-       list_for_each_entry(sibling, &event->group_leader->sibling_list,
-                           group_entry)
+       for_each_sibling_event(sibling, event->group_leader) {
                if (sibling->pmu != event->pmu &&
                    !is_software_event(sibling)) {
                        dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
                                 "Can't create mixed PMU group\n");
                        return -EINVAL;
                }
+       }
 
        cluster = get_cluster_pmu(l2cache_pmu, event->cpu);
        if (!cluster) {
@@ -571,8 +571,7 @@ static int l2_cache_event_init(struct perf_event *event)
                return -EINVAL;
        }
 
-       list_for_each_entry(sibling, &event->group_leader->sibling_list,
-                           group_entry) {
+       for_each_sibling_event(sibling, event->group_leader) {
                if ((sibling != event) &&
                    !is_software_event(sibling) &&
                    (L2_EVT_GROUP(sibling->attr.config) ==
index 7f6b62b..2dc63d6 100644 (file)
@@ -468,7 +468,7 @@ static bool qcom_l3_cache__validate_event_group(struct perf_event *event)
        counters = event_num_counters(event);
        counters += event_num_counters(leader);
 
-       list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+       for_each_sibling_event(sibling, leader) {
                if (is_software_event(sibling))
                        continue;
                if (sibling->pmu != event->pmu)
index eb23311..6bdb1da 100644 (file)
@@ -949,11 +949,11 @@ static int xgene_perf_event_init(struct perf_event *event)
                        !is_software_event(event->group_leader))
                return -EINVAL;
 
-       list_for_each_entry(sibling, &event->group_leader->sibling_list,
-                       group_entry)
+       for_each_sibling_event(sibling, event->group_leader) {
                if (sibling->pmu != event->pmu &&
                                !is_software_event(sibling))
                        return -EINVAL;
+       }
 
        return 0;
 }
index cf04588..6058c38 100644 (file)
@@ -53,6 +53,9 @@ register_user_hw_breakpoint(struct perf_event_attr *attr,
 /* FIXME: only change from the attr, and don't unregister */
 extern int
 modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr);
+extern int
+modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
+                               bool check);
 
 /*
  * Kernel breakpoints are not associated with any particular thread.
@@ -97,6 +100,10 @@ register_user_hw_breakpoint(struct perf_event_attr *attr,
 static inline int
 modify_user_hw_breakpoint(struct perf_event *bp,
                          struct perf_event_attr *attr) { return -ENOSYS; }
+static inline int
+modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
+                               bool check)     { return -ENOSYS; }
+
 static inline struct perf_event *
 register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr,
                                perf_overflow_handler_t  triggered,
index 7546822..ff39ab0 100644 (file)
@@ -536,6 +536,10 @@ struct pmu_event_list {
        struct list_head        list;
 };
 
+#define for_each_sibling_event(sibling, event)                 \
+       if ((event)->group_leader == (event))                   \
+               list_for_each_entry((sibling), &(event)->sibling_list, sibling_list)
+
 /**
  * struct perf_event - performance event kernel representation:
  */
@@ -549,16 +553,16 @@ struct perf_event {
        struct list_head                event_entry;
 
        /*
-        * XXX: group_entry and sibling_list should be mutually exclusive;
-        * either you're a sibling on a group, or you're the group leader.
-        * Rework the code to always use the same list element.
-        *
         * Locked for modification by both ctx->mutex and ctx->lock; holding
         * either sufficies for read.
         */
-       struct list_head                group_entry;
        struct list_head                sibling_list;
-
+       struct list_head                active_list;
+       /*
+        * Node on the pinned or flexible tree located at the event context;
+        */
+       struct rb_node                  group_node;
+       u64                             group_index;
        /*
         * We need storage to track the entries in perf_pmu_migrate_context; we
         * cannot use the event_entry because of RCU and we want to keep the
@@ -690,6 +694,12 @@ struct perf_event {
 #endif /* CONFIG_PERF_EVENTS */
 };
 
+
+struct perf_event_groups {
+       struct rb_root  tree;
+       u64             index;
+};
+
 /**
  * struct perf_event_context - event context structure
  *
@@ -710,9 +720,13 @@ struct perf_event_context {
        struct mutex                    mutex;
 
        struct list_head                active_ctx_list;
-       struct list_head                pinned_groups;
-       struct list_head                flexible_groups;
+       struct perf_event_groups        pinned_groups;
+       struct perf_event_groups        flexible_groups;
        struct list_head                event_list;
+
+       struct list_head                pinned_active;
+       struct list_head                flexible_active;
+
        int                             nr_events;
        int                             nr_active;
        int                             is_active;
index 8a1442c..c20acfc 100644 (file)
@@ -540,6 +540,14 @@ extern int  perf_trace_init(struct perf_event *event);
 extern void perf_trace_destroy(struct perf_event *event);
 extern int  perf_trace_add(struct perf_event *event, int flags);
 extern void perf_trace_del(struct perf_event *event, int flags);
+#ifdef CONFIG_KPROBE_EVENTS
+extern int  perf_kprobe_init(struct perf_event *event, bool is_retprobe);
+extern void perf_kprobe_destroy(struct perf_event *event);
+#endif
+#ifdef CONFIG_UPROBE_EVENTS
+extern int  perf_uprobe_init(struct perf_event *event, bool is_retprobe);
+extern void perf_uprobe_destroy(struct perf_event *event);
+#endif
 extern int  ftrace_profile_set_filter(struct perf_event *event, int event_id,
                                     char *filter_str);
 extern void ftrace_profile_free_filter(struct perf_event *event);
index e0739a1..912b85b 100644 (file)
@@ -380,10 +380,14 @@ struct perf_event_attr {
        __u32                   bp_type;
        union {
                __u64           bp_addr;
+               __u64           kprobe_func; /* for perf_kprobe */
+               __u64           uprobe_path; /* for perf_uprobe */
                __u64           config1; /* extension of config */
        };
        union {
                __u64           bp_len;
+               __u64           kprobe_addr; /* when kprobe_func == NULL */
+               __u64           probe_offset; /* for perf_[k,u]probe */
                __u64           config2; /* extension of config1 */
        };
        __u64   branch_sample_type; /* enum perf_branch_sample_type */
@@ -444,17 +448,18 @@ struct perf_event_query_bpf {
 /*
  * Ioctls that can be done on a perf event fd:
  */
-#define PERF_EVENT_IOC_ENABLE          _IO ('$', 0)
-#define PERF_EVENT_IOC_DISABLE         _IO ('$', 1)
-#define PERF_EVENT_IOC_REFRESH         _IO ('$', 2)
-#define PERF_EVENT_IOC_RESET           _IO ('$', 3)
-#define PERF_EVENT_IOC_PERIOD          _IOW('$', 4, __u64)
-#define PERF_EVENT_IOC_SET_OUTPUT      _IO ('$', 5)
-#define PERF_EVENT_IOC_SET_FILTER      _IOW('$', 6, char *)
-#define PERF_EVENT_IOC_ID              _IOR('$', 7, __u64 *)
-#define PERF_EVENT_IOC_SET_BPF         _IOW('$', 8, __u32)
-#define PERF_EVENT_IOC_PAUSE_OUTPUT    _IOW('$', 9, __u32)
-#define PERF_EVENT_IOC_QUERY_BPF       _IOWR('$', 10, struct perf_event_query_bpf *)
+#define PERF_EVENT_IOC_ENABLE                  _IO ('$', 0)
+#define PERF_EVENT_IOC_DISABLE                 _IO ('$', 1)
+#define PERF_EVENT_IOC_REFRESH                 _IO ('$', 2)
+#define PERF_EVENT_IOC_RESET                   _IO ('$', 3)
+#define PERF_EVENT_IOC_PERIOD                  _IOW('$', 4, __u64)
+#define PERF_EVENT_IOC_SET_OUTPUT              _IO ('$', 5)
+#define PERF_EVENT_IOC_SET_FILTER              _IOW('$', 6, char *)
+#define PERF_EVENT_IOC_ID                      _IOR('$', 7, __u64 *)
+#define PERF_EVENT_IOC_SET_BPF                 _IOW('$', 8, __u32)
+#define PERF_EVENT_IOC_PAUSE_OUTPUT            _IOW('$', 9, __u32)
+#define PERF_EVENT_IOC_QUERY_BPF               _IOWR('$', 10, struct perf_event_query_bpf *)
+#define PERF_EVENT_IOC_MODIFY_ATTRIBUTES       _IOW('$', 11, struct perf_event_attr *)
 
 enum perf_event_ioc_flags {
        PERF_IOC_FLAG_GROUP             = 1U << 0,
index 709a55b..7517b4f 100644 (file)
@@ -430,7 +430,7 @@ static void update_perf_cpu_limits(void)
        WRITE_ONCE(perf_sample_allowed_ns, tmp);
 }
 
-static int perf_rotate_context(struct perf_cpu_context *cpuctx);
+static bool perf_rotate_context(struct perf_cpu_context *cpuctx);
 
 int perf_proc_update_handler(struct ctl_table *table, int write,
                void __user *buffer, size_t *lenp,
@@ -643,7 +643,7 @@ static void perf_event_update_sibling_time(struct perf_event *leader)
 {
        struct perf_event *sibling;
 
-       list_for_each_entry(sibling, &leader->sibling_list, group_entry)
+       for_each_sibling_event(sibling, leader)
                perf_event_update_time(sibling);
 }
 
@@ -948,27 +948,39 @@ list_update_cgroup_event(struct perf_event *event,
        if (!is_cgroup_event(event))
                return;
 
-       if (add && ctx->nr_cgroups++)
-               return;
-       else if (!add && --ctx->nr_cgroups)
-               return;
        /*
         * Because cgroup events are always per-cpu events,
         * this will always be called from the right CPU.
         */
        cpuctx = __get_cpu_context(ctx);
-       cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
-       /* cpuctx->cgrp is NULL unless a cgroup event is active in this CPU .*/
-       if (add) {
+
+       /*
+        * Since setting cpuctx->cgrp is conditional on the current @cgrp
+        * matching the event's cgroup, we must do this for every new event,
+        * because if the first would mismatch, the second would not try again
+        * and we would leave cpuctx->cgrp unset.
+        */
+       if (add && !cpuctx->cgrp) {
                struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
 
-               list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list));
                if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
                        cpuctx->cgrp = cgrp;
-       } else {
-               list_del(cpuctx_entry);
-               cpuctx->cgrp = NULL;
        }
+
+       if (add && ctx->nr_cgroups++)
+               return;
+       else if (!add && --ctx->nr_cgroups)
+               return;
+
+       /* no cgroup running */
+       if (!add)
+               cpuctx->cgrp = NULL;
+
+       cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
+       if (add)
+               list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list));
+       else
+               list_del(cpuctx_entry);
 }
 
 #else /* !CONFIG_CGROUP_PERF */
@@ -1052,7 +1064,7 @@ list_update_cgroup_event(struct perf_event *event,
 static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
 {
        struct perf_cpu_context *cpuctx;
-       int rotations = 0;
+       bool rotations;
 
        lockdep_assert_irqs_disabled();
 
@@ -1471,8 +1483,21 @@ static enum event_type_t get_event_type(struct perf_event *event)
        return event_type;
 }
 
-static struct list_head *
-ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
+/*
+ * Helper function to initialize event group nodes.
+ */
+static void init_event_group(struct perf_event *event)
+{
+       RB_CLEAR_NODE(&event->group_node);
+       event->group_index = 0;
+}
+
+/*
+ * Extract pinned or flexible groups from the context
+ * based on event attrs bits.
+ */
+static struct perf_event_groups *
+get_event_groups(struct perf_event *event, struct perf_event_context *ctx)
 {
        if (event->attr.pinned)
                return &ctx->pinned_groups;
@@ -1480,6 +1505,156 @@ ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
                return &ctx->flexible_groups;
 }
 
+/*
+ * Helper function to initializes perf_event_group trees.
+ */
+static void perf_event_groups_init(struct perf_event_groups *groups)
+{
+       groups->tree = RB_ROOT;
+       groups->index = 0;
+}
+
+/*
+ * Compare function for event groups;
+ *
+ * Implements complex key that first sorts by CPU and then by virtual index
+ * which provides ordering when rotating groups for the same CPU.
+ */
+static bool
+perf_event_groups_less(struct perf_event *left, struct perf_event *right)
+{
+       if (left->cpu < right->cpu)
+               return true;
+       if (left->cpu > right->cpu)
+               return false;
+
+       if (left->group_index < right->group_index)
+               return true;
+       if (left->group_index > right->group_index)
+               return false;
+
+       return false;
+}
+
+/*
+ * Insert @event into @groups' tree; using {@event->cpu, ++@groups->index} for
+ * key (see perf_event_groups_less). This places it last inside the CPU
+ * subtree.
+ */
+static void
+perf_event_groups_insert(struct perf_event_groups *groups,
+                        struct perf_event *event)
+{
+       struct perf_event *node_event;
+       struct rb_node *parent;
+       struct rb_node **node;
+
+       event->group_index = ++groups->index;
+
+       node = &groups->tree.rb_node;
+       parent = *node;
+
+       while (*node) {
+               parent = *node;
+               node_event = container_of(*node, struct perf_event, group_node);
+
+               if (perf_event_groups_less(event, node_event))
+                       node = &parent->rb_left;
+               else
+                       node = &parent->rb_right;
+       }
+
+       rb_link_node(&event->group_node, parent, node);
+       rb_insert_color(&event->group_node, &groups->tree);
+}
+
+/*
+ * Helper function to insert event into the pinned or flexible groups.
+ */
+static void
+add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx)
+{
+       struct perf_event_groups *groups;
+
+       groups = get_event_groups(event, ctx);
+       perf_event_groups_insert(groups, event);
+}
+
+/*
+ * Delete a group from a tree.
+ */
+static void
+perf_event_groups_delete(struct perf_event_groups *groups,
+                        struct perf_event *event)
+{
+       WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) ||
+                    RB_EMPTY_ROOT(&groups->tree));
+
+       rb_erase(&event->group_node, &groups->tree);
+       init_event_group(event);
+}
+
+/*
+ * Helper function to delete event from its groups.
+ */
+static void
+del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx)
+{
+       struct perf_event_groups *groups;
+
+       groups = get_event_groups(event, ctx);
+       perf_event_groups_delete(groups, event);
+}
+
+/*
+ * Get the leftmost event in the @cpu subtree.
+ */
+static struct perf_event *
+perf_event_groups_first(struct perf_event_groups *groups, int cpu)
+{
+       struct perf_event *node_event = NULL, *match = NULL;
+       struct rb_node *node = groups->tree.rb_node;
+
+       while (node) {
+               node_event = container_of(node, struct perf_event, group_node);
+
+               if (cpu < node_event->cpu) {
+                       node = node->rb_left;
+               } else if (cpu > node_event->cpu) {
+                       node = node->rb_right;
+               } else {
+                       match = node_event;
+                       node = node->rb_left;
+               }
+       }
+
+       return match;
+}
+
+/*
+ * Like rb_entry_next_safe() for the @cpu subtree.
+ */
+static struct perf_event *
+perf_event_groups_next(struct perf_event *event)
+{
+       struct perf_event *next;
+
+       next = rb_entry_safe(rb_next(&event->group_node), typeof(*event), group_node);
+       if (next && next->cpu == event->cpu)
+               return next;
+
+       return NULL;
+}
+
+/*
+ * Iterate through the whole groups tree.
+ */
+#define perf_event_groups_for_each(event, groups)                      \
+       for (event = rb_entry_safe(rb_first(&((groups)->tree)),         \
+                               typeof(*event), group_node); event;     \
+               event = rb_entry_safe(rb_next(&event->group_node),      \
+                               typeof(*event), group_node))
+
 /*
  * Add a event from the lists for its context.
  * Must be called with ctx->mutex and ctx->lock held.
@@ -1500,12 +1675,8 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
         * perf_group_detach can, at all times, locate all siblings.
         */
        if (event->group_leader == event) {
-               struct list_head *list;
-
                event->group_caps = event->event_caps;
-
-               list = ctx_group_list(event, ctx);
-               list_add_tail(&event->group_entry, list);
+               add_event_to_groups(event, ctx);
        }
 
        list_update_cgroup_event(event, ctx, true);
@@ -1663,12 +1834,12 @@ static void perf_group_attach(struct perf_event *event)
 
        group_leader->group_caps &= event->event_caps;
 
-       list_add_tail(&event->group_entry, &group_leader->sibling_list);
+       list_add_tail(&event->sibling_list, &group_leader->sibling_list);
        group_leader->nr_siblings++;
 
        perf_event__header_size(group_leader);
 
-       list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
+       for_each_sibling_event(pos, group_leader)
                perf_event__header_size(pos);
 }
 
@@ -1699,7 +1870,7 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
        list_del_rcu(&event->event_entry);
 
        if (event->group_leader == event)
-               list_del_init(&event->group_entry);
+               del_event_from_groups(event, ctx);
 
        /*
         * If event was in error state, then keep it
@@ -1717,9 +1888,9 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
 static void perf_group_detach(struct perf_event *event)
 {
        struct perf_event *sibling, *tmp;
-       struct list_head *list = NULL;
+       struct perf_event_context *ctx = event->ctx;
 
-       lockdep_assert_held(&event->ctx->lock);
+       lockdep_assert_held(&ctx->lock);
 
        /*
         * We can have double detach due to exit/hot-unplug + close.
@@ -1733,34 +1904,42 @@ static void perf_group_detach(struct perf_event *event)
         * If this is a sibling, remove it from its group.
         */
        if (event->group_leader != event) {
-               list_del_init(&event->group_entry);
+               list_del_init(&event->sibling_list);
                event->group_leader->nr_siblings--;
                goto out;
        }
 
-       if (!list_empty(&event->group_entry))
-               list = &event->group_entry;
-
        /*
         * If this was a group event with sibling events then
         * upgrade the siblings to singleton events by adding them
         * to whatever list we are on.
         */
-       list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
-               if (list)
-                       list_move_tail(&sibling->group_entry, list);
+       list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) {
+
                sibling->group_leader = sibling;
+               list_del_init(&sibling->sibling_list);
 
                /* Inherit group flags from the previous leader */
                sibling->group_caps = event->group_caps;
 
+               if (!RB_EMPTY_NODE(&event->group_node)) {
+                       add_event_to_groups(sibling, event->ctx);
+
+                       if (sibling->state == PERF_EVENT_STATE_ACTIVE) {
+                               struct list_head *list = sibling->attr.pinned ?
+                                       &ctx->pinned_active : &ctx->flexible_active;
+
+                               list_add_tail(&sibling->active_list, list);
+                       }
+               }
+
                WARN_ON_ONCE(sibling->ctx != event->ctx);
        }
 
 out:
        perf_event__header_size(event->group_leader);
 
-       list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
+       for_each_sibling_event(tmp, event->group_leader)
                perf_event__header_size(tmp);
 }
 
@@ -1783,13 +1962,13 @@ static inline int __pmu_filter_match(struct perf_event *event)
  */
 static inline int pmu_filter_match(struct perf_event *event)
 {
-       struct perf_event *child;
+       struct perf_event *sibling;
 
        if (!__pmu_filter_match(event))
                return 0;
 
-       list_for_each_entry(child, &event->sibling_list, group_entry) {
-               if (!__pmu_filter_match(child))
+       for_each_sibling_event(sibling, event) {
+               if (!__pmu_filter_match(sibling))
                        return 0;
        }
 
@@ -1816,6 +1995,13 @@ event_sched_out(struct perf_event *event,
        if (event->state != PERF_EVENT_STATE_ACTIVE)
                return;
 
+       /*
+        * Asymmetry; we only schedule events _IN_ through ctx_sched_in(), but
+        * we can schedule events _OUT_ individually through things like
+        * __perf_remove_from_context().
+        */
+       list_del_init(&event->active_list);
+
        perf_pmu_disable(event->pmu);
 
        event->pmu->del(event, 0);
@@ -1856,7 +2042,7 @@ group_sched_out(struct perf_event *group_event,
        /*
         * Schedule out siblings (if any):
         */
-       list_for_each_entry(event, &group_event->sibling_list, group_entry)
+       for_each_sibling_event(event, group_event)
                event_sched_out(event, cpuctx, ctx);
 
        perf_pmu_enable(ctx->pmu);
@@ -2135,7 +2321,7 @@ group_sched_in(struct perf_event *group_event,
        /*
         * Schedule in siblings as one group (if any):
         */
-       list_for_each_entry(event, &group_event->sibling_list, group_entry) {
+       for_each_sibling_event(event, group_event) {
                if (event_sched_in(event, cpuctx, ctx)) {
                        partial_group = event;
                        goto group_error;
@@ -2151,7 +2337,7 @@ group_error:
         * partial group before returning:
         * The events up to the failed event are scheduled out normally.
         */
-       list_for_each_entry(event, &group_event->sibling_list, group_entry) {
+       for_each_sibling_event(event, group_event) {
                if (event == partial_group)
                        break;
 
@@ -2328,6 +2514,18 @@ static int  __perf_install_in_context(void *info)
                raw_spin_lock(&task_ctx->lock);
        }
 
+#ifdef CONFIG_CGROUP_PERF
+       if (is_cgroup_event(event)) {
+               /*
+                * If the current cgroup doesn't match the event's
+                * cgroup, we should not try to schedule it.
+                */
+               struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
+               reprogram = cgroup_is_descendant(cgrp->css.cgroup,
+                                       event->cgrp->css.cgroup);
+       }
+#endif
+
        if (reprogram) {
                ctx_sched_out(ctx, cpuctx, EVENT_TIME);
                add_event_to_ctx(event, ctx);
@@ -2661,12 +2859,47 @@ int perf_event_refresh(struct perf_event *event, int refresh)
 }
 EXPORT_SYMBOL_GPL(perf_event_refresh);
 
+static int perf_event_modify_breakpoint(struct perf_event *bp,
+                                        struct perf_event_attr *attr)
+{
+       int err;
+
+       _perf_event_disable(bp);
+
+       err = modify_user_hw_breakpoint_check(bp, attr, true);
+       if (err) {
+               if (!bp->attr.disabled)
+                       _perf_event_enable(bp);
+
+               return err;
+       }
+
+       if (!attr->disabled)
+               _perf_event_enable(bp);
+       return 0;
+}
+
+static int perf_event_modify_attr(struct perf_event *event,
+                                 struct perf_event_attr *attr)
+{
+       if (event->attr.type != attr->type)
+               return -EINVAL;
+
+       switch (event->attr.type) {
+       case PERF_TYPE_BREAKPOINT:
+               return perf_event_modify_breakpoint(event, attr);
+       default:
+               /* Place holder for future additions. */
+               return -EOPNOTSUPP;
+       }
+}
+
 static void ctx_sched_out(struct perf_event_context *ctx,
                          struct perf_cpu_context *cpuctx,
                          enum event_type_t event_type)
 {
+       struct perf_event *event, *tmp;
        int is_active = ctx->is_active;
-       struct perf_event *event;
 
        lockdep_assert_held(&ctx->lock);
 
@@ -2713,12 +2946,12 @@ static void ctx_sched_out(struct perf_event_context *ctx,
 
        perf_pmu_disable(ctx->pmu);
        if (is_active & EVENT_PINNED) {
-               list_for_each_entry(event, &ctx->pinned_groups, group_entry)
+               list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list)
                        group_sched_out(event, cpuctx, ctx);
        }
 
        if (is_active & EVENT_FLEXIBLE) {
-               list_for_each_entry(event, &ctx->flexible_groups, group_entry)
+               list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list)
                        group_sched_out(event, cpuctx, ctx);
        }
        perf_pmu_enable(ctx->pmu);
@@ -3005,53 +3238,116 @@ static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
        ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
 }
 
-static void
-ctx_pinned_sched_in(struct perf_event_context *ctx,
-                   struct perf_cpu_context *cpuctx)
+static int visit_groups_merge(struct perf_event_groups *groups, int cpu,
+                             int (*func)(struct perf_event *, void *), void *data)
 {
-       struct perf_event *event;
+       struct perf_event **evt, *evt1, *evt2;
+       int ret;
 
-       list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
-               if (event->state <= PERF_EVENT_STATE_OFF)
-                       continue;
-               if (!event_filter_match(event))
-                       continue;
+       evt1 = perf_event_groups_first(groups, -1);
+       evt2 = perf_event_groups_first(groups, cpu);
+
+       while (evt1 || evt2) {
+               if (evt1 && evt2) {
+                       if (evt1->group_index < evt2->group_index)
+                               evt = &evt1;
+                       else
+                               evt = &evt2;
+               } else if (evt1) {
+                       evt = &evt1;
+               } else {
+                       evt = &evt2;
+               }
 
-               if (group_can_go_on(event, cpuctx, 1))
-                       group_sched_in(event, cpuctx, ctx);
+               ret = func(*evt, data);
+               if (ret)
+                       return ret;
 
-               /*
-                * If this pinned group hasn't been scheduled,
-                * put it in error state.
-                */
-               if (event->state == PERF_EVENT_STATE_INACTIVE)
-                       perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
+               *evt = perf_event_groups_next(*evt);
+       }
+
+       return 0;
+}
+
+struct sched_in_data {
+       struct perf_event_context *ctx;
+       struct perf_cpu_context *cpuctx;
+       int can_add_hw;
+};
+
+static int pinned_sched_in(struct perf_event *event, void *data)
+{
+       struct sched_in_data *sid = data;
+
+       if (event->state <= PERF_EVENT_STATE_OFF)
+               return 0;
+
+       if (!event_filter_match(event))
+               return 0;
+
+       if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
+               if (!group_sched_in(event, sid->cpuctx, sid->ctx))
+                       list_add_tail(&event->active_list, &sid->ctx->pinned_active);
        }
+
+       /*
+        * If this pinned group hasn't been scheduled,
+        * put it in error state.
+        */
+       if (event->state == PERF_EVENT_STATE_INACTIVE)
+               perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
+
+       return 0;
+}
+
+static int flexible_sched_in(struct perf_event *event, void *data)
+{
+       struct sched_in_data *sid = data;
+
+       if (event->state <= PERF_EVENT_STATE_OFF)
+               return 0;
+
+       if (!event_filter_match(event))
+               return 0;
+
+       if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
+               if (!group_sched_in(event, sid->cpuctx, sid->ctx))
+                       list_add_tail(&event->active_list, &sid->ctx->flexible_active);
+               else
+                       sid->can_add_hw = 0;
+       }
+
+       return 0;
+}
+
+static void
+ctx_pinned_sched_in(struct perf_event_context *ctx,
+                   struct perf_cpu_context *cpuctx)
+{
+       struct sched_in_data sid = {
+               .ctx = ctx,
+               .cpuctx = cpuctx,
+               .can_add_hw = 1,
+       };
+
+       visit_groups_merge(&ctx->pinned_groups,
+                          smp_processor_id(),
+                          pinned_sched_in, &sid);
 }
 
 static void
 ctx_flexible_sched_in(struct perf_event_context *ctx,
                      struct perf_cpu_context *cpuctx)
 {
-       struct perf_event *event;
-       int can_add_hw = 1;
-
-       list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
-               /* Ignore events in OFF or ERROR state */
-               if (event->state <= PERF_EVENT_STATE_OFF)
-                       continue;
-               /*
-                * Listen to the 'cpu' scheduling filter constraint
-                * of events:
-                */
-               if (!event_filter_match(event))
-                       continue;
+       struct sched_in_data sid = {
+               .ctx = ctx,
+               .cpuctx = cpuctx,
+               .can_add_hw = 1,
+       };
 
-               if (group_can_go_on(event, cpuctx, can_add_hw)) {
-                       if (group_sched_in(event, cpuctx, ctx))
-                               can_add_hw = 0;
-               }
-       }
+       visit_groups_merge(&ctx->flexible_groups,
+                          smp_processor_id(),
+                          flexible_sched_in, &sid);
 }
 
 static void
@@ -3132,7 +3428,7 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
         * However, if task's ctx is not carrying any pinned
         * events, no need to flip the cpuctx's events around.
         */
-       if (!list_empty(&ctx->pinned_groups))
+       if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree))
                cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
        perf_event_sched_in(cpuctx, ctx, task);
        perf_pmu_enable(ctx->pmu);
@@ -3361,55 +3657,81 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
 }
 
 /*
- * Round-robin a context's events:
+ * Move @event to the tail of the @ctx's elegible events.
  */
-static void rotate_ctx(struct perf_event_context *ctx)
+static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event)
 {
        /*
         * Rotate the first entry last of non-pinned groups. Rotation might be
         * disabled by the inheritance code.
         */
-       if (!ctx->rotate_disable)
-               list_rotate_left(&ctx->flexible_groups);
+       if (ctx->rotate_disable)
+               return;
+
+       perf_event_groups_delete(&ctx->flexible_groups, event);
+       perf_event_groups_insert(&ctx->flexible_groups, event);
 }
 
-static int perf_rotate_context(struct perf_cpu_context *cpuctx)
+static inline struct perf_event *
+ctx_first_active(struct perf_event_context *ctx)
 {
+       return list_first_entry_or_null(&ctx->flexible_active,
+                                       struct perf_event, active_list);
+}
+
+static bool perf_rotate_context(struct perf_cpu_context *cpuctx)
+{
+       struct perf_event *cpu_event = NULL, *task_event = NULL;
+       bool cpu_rotate = false, task_rotate = false;
        struct perf_event_context *ctx = NULL;
-       int rotate = 0;
+
+       /*
+        * Since we run this from IRQ context, nobody can install new
+        * events, thus the event count values are stable.
+        */
 
        if (cpuctx->ctx.nr_events) {
                if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
-                       rotate = 1;
+                       cpu_rotate = true;
        }
 
        ctx = cpuctx->task_ctx;
        if (ctx && ctx->nr_events) {
                if (ctx->nr_events != ctx->nr_active)
-                       rotate = 1;
+                       task_rotate = true;
        }
 
-       if (!rotate)
-               goto done;
+       if (!(cpu_rotate || task_rotate))
+               return false;
 
        perf_ctx_lock(cpuctx, cpuctx->task_ctx);
        perf_pmu_disable(cpuctx->ctx.pmu);
 
-       cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
-       if (ctx)
+       if (task_rotate)
+               task_event = ctx_first_active(ctx);
+       if (cpu_rotate)
+               cpu_event = ctx_first_active(&cpuctx->ctx);
+
+       /*
+        * As per the order given at ctx_resched() first 'pop' task flexible
+        * and then, if needed CPU flexible.
+        */
+       if (task_event || (ctx && cpu_event))
                ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
+       if (cpu_event)
+               cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
 
-       rotate_ctx(&cpuctx->ctx);
-       if (ctx)
-               rotate_ctx(ctx);
+       if (task_event)
+               rotate_ctx(ctx, task_event);
+       if (cpu_event)
+               rotate_ctx(&cpuctx->ctx, cpu_event);
 
        perf_event_sched_in(cpuctx, ctx, current);
 
        perf_pmu_enable(cpuctx->ctx.pmu);
        perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
-done:
 
-       return rotate;
+       return true;
 }
 
 void perf_event_task_tick(void)
@@ -3554,7 +3876,7 @@ static void __perf_event_read(void *info)
 
        pmu->read(event);
 
-       list_for_each_entry(sub, &event->sibling_list, group_entry) {
+       for_each_sibling_event(sub, event) {
                if (sub->state == PERF_EVENT_STATE_ACTIVE) {
                        /*
                         * Use sibling's PMU rather than @event's since
@@ -3728,9 +4050,11 @@ static void __perf_event_init_context(struct perf_event_context *ctx)
        raw_spin_lock_init(&ctx->lock);
        mutex_init(&ctx->mutex);
        INIT_LIST_HEAD(&ctx->active_ctx_list);
-       INIT_LIST_HEAD(&ctx->pinned_groups);
-       INIT_LIST_HEAD(&ctx->flexible_groups);
+       perf_event_groups_init(&ctx->pinned_groups);
+       perf_event_groups_init(&ctx->flexible_groups);
        INIT_LIST_HEAD(&ctx->event_list);
+       INIT_LIST_HEAD(&ctx->pinned_active);
+       INIT_LIST_HEAD(&ctx->flexible_active);
        atomic_set(&ctx->refcount, 1);
 }
 
@@ -4400,7 +4724,7 @@ static int __perf_read_group_add(struct perf_event *leader,
        if (read_format & PERF_FORMAT_ID)
                values[n++] = primary_event_id(leader);
 
-       list_for_each_entry(sub, &leader->sibling_list, group_entry) {
+       for_each_sibling_event(sub, leader) {
                values[n++] += perf_event_count(sub);
                if (read_format & PERF_FORMAT_ID)
                        values[n++] = primary_event_id(sub);
@@ -4594,7 +4918,7 @@ static void perf_event_for_each(struct perf_event *event,
        event = event->group_leader;
 
        perf_event_for_each_child(event, func);
-       list_for_each_entry(sibling, &event->sibling_list, group_entry)
+       for_each_sibling_event(sibling, event)
                perf_event_for_each_child(sibling, func);
 }
 
@@ -4676,6 +5000,8 @@ static int perf_event_set_output(struct perf_event *event,
                                 struct perf_event *output_event);
 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd);
+static int perf_copy_attr(struct perf_event_attr __user *uattr,
+                         struct perf_event_attr *attr);
 
 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
 {
@@ -4748,6 +5074,17 @@ static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned lon
 
        case PERF_EVENT_IOC_QUERY_BPF:
                return perf_event_query_prog_array(event, (void __user *)arg);
+
+       case PERF_EVENT_IOC_MODIFY_ATTRIBUTES: {
+               struct perf_event_attr new_attr;
+               int err = perf_copy_attr((struct perf_event_attr __user *)arg,
+                                        &new_attr);
+
+               if (err)
+                       return err;
+
+               return perf_event_modify_attr(event,  &new_attr);
+       }
        default:
                return -ENOTTY;
        }
@@ -5743,7 +6080,8 @@ static void perf_output_read_group(struct perf_output_handle *handle,
        if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
                values[n++] = running;
 
-       if (leader != event)
+       if ((leader != event) &&
+           (leader->state == PERF_EVENT_STATE_ACTIVE))
                leader->pmu->read(leader);
 
        values[n++] = perf_event_count(leader);
@@ -5752,7 +6090,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
 
        __output_copy(handle, values, n * sizeof(u64));
 
-       list_for_each_entry(sub, &leader->sibling_list, group_entry) {
+       for_each_sibling_event(sub, leader) {
                n = 0;
 
                if ((sub != event) &&
@@ -8009,9 +8347,119 @@ static struct pmu perf_tracepoint = {
        .read           = perf_swevent_read,
 };
 
+#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
+/*
+ * Flags in config, used by dynamic PMU kprobe and uprobe
+ * The flags should match following PMU_FORMAT_ATTR().
+ *
+ * PERF_PROBE_CONFIG_IS_RETPROBE if set, create kretprobe/uretprobe
+ *                               if not set, create kprobe/uprobe
+ */
+enum perf_probe_config {
+       PERF_PROBE_CONFIG_IS_RETPROBE = 1U << 0,  /* [k,u]retprobe */
+};
+
+PMU_FORMAT_ATTR(retprobe, "config:0");
+
+static struct attribute *probe_attrs[] = {
+       &format_attr_retprobe.attr,
+       NULL,
+};
+
+static struct attribute_group probe_format_group = {
+       .name = "format",
+       .attrs = probe_attrs,
+};
+
+static const struct attribute_group *probe_attr_groups[] = {
+       &probe_format_group,
+       NULL,
+};
+#endif
+
+#ifdef CONFIG_KPROBE_EVENTS
+static int perf_kprobe_event_init(struct perf_event *event);
+static struct pmu perf_kprobe = {
+       .task_ctx_nr    = perf_sw_context,
+       .event_init     = perf_kprobe_event_init,
+       .add            = perf_trace_add,
+       .del            = perf_trace_del,
+       .start          = perf_swevent_start,
+       .stop           = perf_swevent_stop,
+       .read           = perf_swevent_read,
+       .attr_groups    = probe_attr_groups,
+};
+
+static int perf_kprobe_event_init(struct perf_event *event)
+{
+       int err;
+       bool is_retprobe;
+
+       if (event->attr.type != perf_kprobe.type)
+               return -ENOENT;
+       /*
+        * no branch sampling for probe events
+        */
+       if (has_branch_stack(event))
+               return -EOPNOTSUPP;
+
+       is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE;
+       err = perf_kprobe_init(event, is_retprobe);
+       if (err)
+               return err;
+
+       event->destroy = perf_kprobe_destroy;
+
+       return 0;
+}
+#endif /* CONFIG_KPROBE_EVENTS */
+
+#ifdef CONFIG_UPROBE_EVENTS
+static int perf_uprobe_event_init(struct perf_event *event);
+static struct pmu perf_uprobe = {
+       .task_ctx_nr    = perf_sw_context,
+       .event_init     = perf_uprobe_event_init,
+       .add            = perf_trace_add,
+       .del            = perf_trace_del,
+       .start          = perf_swevent_start,
+       .stop           = perf_swevent_stop,
+       .read           = perf_swevent_read,
+       .attr_groups    = probe_attr_groups,
+};
+
+static int perf_uprobe_event_init(struct perf_event *event)
+{
+       int err;
+       bool is_retprobe;
+
+       if (event->attr.type != perf_uprobe.type)
+               return -ENOENT;
+       /*
+        * no branch sampling for probe events
+        */
+       if (has_branch_stack(event))
+               return -EOPNOTSUPP;
+
+       is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE;
+       err = perf_uprobe_init(event, is_retprobe);
+       if (err)
+               return err;
+
+       event->destroy = perf_uprobe_destroy;
+
+       return 0;
+}
+#endif /* CONFIG_UPROBE_EVENTS */
+
 static inline void perf_tp_register(void)
 {
        perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
+#ifdef CONFIG_KPROBE_EVENTS
+       perf_pmu_register(&perf_kprobe, "kprobe", -1);
+#endif
+#ifdef CONFIG_UPROBE_EVENTS
+       perf_pmu_register(&perf_uprobe, "uprobe", -1);
+#endif
 }
 
 static void perf_event_free_filter(struct perf_event *event)
@@ -8088,13 +8536,32 @@ static void perf_event_free_bpf_handler(struct perf_event *event)
 }
 #endif
 
+/*
+ * returns true if the event is a tracepoint, or a kprobe/upprobe created
+ * with perf_event_open()
+ */
+static inline bool perf_event_is_tracing(struct perf_event *event)
+{
+       if (event->pmu == &perf_tracepoint)
+               return true;
+#ifdef CONFIG_KPROBE_EVENTS
+       if (event->pmu == &perf_kprobe)
+               return true;
+#endif
+#ifdef CONFIG_UPROBE_EVENTS
+       if (event->pmu == &perf_uprobe)
+               return true;
+#endif
+       return false;
+}
+
 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
 {
        bool is_kprobe, is_tracepoint, is_syscall_tp;
        struct bpf_prog *prog;
        int ret;
 
-       if (event->attr.type != PERF_TYPE_TRACEPOINT)
+       if (!perf_event_is_tracing(event))
                return perf_event_set_bpf_handler(event, prog_fd);
 
        is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_UKPROBE;
@@ -8140,7 +8607,7 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
 
 static void perf_event_free_bpf_prog(struct perf_event *event)
 {
-       if (event->attr.type != PERF_TYPE_TRACEPOINT) {
+       if (!perf_event_is_tracing(event)) {
                perf_event_free_bpf_handler(event);
                return;
        }
@@ -8559,47 +9026,36 @@ fail_clear_files:
        return ret;
 }
 
-static int
-perf_tracepoint_set_filter(struct perf_event *event, char *filter_str)
-{
-       struct perf_event_context *ctx = event->ctx;
-       int ret;
-
-       /*
-        * Beware, here be dragons!!
-        *
-        * the tracepoint muck will deadlock against ctx->mutex, but the tracepoint
-        * stuff does not actually need it. So temporarily drop ctx->mutex. As per
-        * perf_event_ctx_lock() we already have a reference on ctx.
-        *
-        * This can result in event getting moved to a different ctx, but that
-        * does not affect the tracepoint state.
-        */
-       mutex_unlock(&ctx->mutex);
-       ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
-       mutex_lock(&ctx->mutex);
-
-       return ret;
-}
-
 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
 {
-       char *filter_str;
        int ret = -EINVAL;
-
-       if ((event->attr.type != PERF_TYPE_TRACEPOINT ||
-           !IS_ENABLED(CONFIG_EVENT_TRACING)) &&
-           !has_addr_filter(event))
-               return -EINVAL;
+       char *filter_str;
 
        filter_str = strndup_user(arg, PAGE_SIZE);
        if (IS_ERR(filter_str))
                return PTR_ERR(filter_str);
 
-       if (IS_ENABLED(CONFIG_EVENT_TRACING) &&
-           event->attr.type == PERF_TYPE_TRACEPOINT)
-               ret = perf_tracepoint_set_filter(event, filter_str);
-       else if (has_addr_filter(event))
+#ifdef CONFIG_EVENT_TRACING
+       if (perf_event_is_tracing(event)) {
+               struct perf_event_context *ctx = event->ctx;
+
+               /*
+                * Beware, here be dragons!!
+                *
+                * the tracepoint muck will deadlock against ctx->mutex, but
+                * the tracepoint stuff does not actually need it. So
+                * temporarily drop ctx->mutex. As per perf_event_ctx_lock() we
+                * already have a reference on ctx.
+                *
+                * This can result in event getting moved to a different ctx,
+                * but that does not affect the tracepoint state.
+                */
+               mutex_unlock(&ctx->mutex);
+               ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
+               mutex_lock(&ctx->mutex);
+       } else
+#endif
+       if (has_addr_filter(event))
                ret = perf_event_set_addr_filter(event, filter_str);
 
        kfree(filter_str);
@@ -9452,9 +9908,10 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
        mutex_init(&event->child_mutex);
        INIT_LIST_HEAD(&event->child_list);
 
-       INIT_LIST_HEAD(&event->group_entry);
        INIT_LIST_HEAD(&event->event_entry);
        INIT_LIST_HEAD(&event->sibling_list);
+       INIT_LIST_HEAD(&event->active_list);
+       init_event_group(event);
        INIT_LIST_HEAD(&event->rb_entry);
        INIT_LIST_HEAD(&event->active_entry);
        INIT_LIST_HEAD(&event->addr_filters.list);
@@ -9729,6 +10186,9 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
                        ret = -EINVAL;
        }
 
+       if (!attr->sample_max_stack)
+               attr->sample_max_stack = sysctl_perf_event_max_stack;
+
        if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
                ret = perf_reg_validate(attr->sample_regs_intr);
 out:
@@ -9942,9 +10402,6 @@ SYSCALL_DEFINE5(perf_event_open,
            perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
                return -EACCES;
 
-       if (!attr.sample_max_stack)
-               attr.sample_max_stack = sysctl_perf_event_max_stack;
-
        /*
         * In cgroup mode, the pid argument is used to pass the fd
         * opened to the cgroup directory in cgroupfs. The cpu argument
@@ -10218,8 +10675,7 @@ SYSCALL_DEFINE5(perf_event_open,
                perf_remove_from_context(group_leader, 0);
                put_ctx(gctx);
 
-               list_for_each_entry(sibling, &group_leader->sibling_list,
-                                   group_entry) {
+               for_each_sibling_event(sibling, group_leader) {
                        perf_remove_from_context(sibling, 0);
                        put_ctx(gctx);
                }
@@ -10240,8 +10696,7 @@ SYSCALL_DEFINE5(perf_event_open,
                 * By installing siblings first we NO-OP because they're not
                 * reachable through the group lists.
                 */
-               list_for_each_entry(sibling, &group_leader->sibling_list,
-                                   group_entry) {
+               for_each_sibling_event(sibling, group_leader) {
                        perf_event__state_init(sibling);
                        perf_install_in_context(ctx, sibling, sibling->cpu);
                        get_ctx(ctx);
@@ -10880,7 +11335,7 @@ static int inherit_group(struct perf_event *parent_event,
         * case inherit_event() will create individual events, similar to what
         * perf_group_detach() would do anyway.
         */
-       list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
+       for_each_sibling_event(sub, parent_event) {
                child_ctr = inherit_event(sub, parent, parent_ctx,
                                            child, leader, child_ctx);
                if (IS_ERR(child_ctr))
@@ -10979,7 +11434,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
         * We dont have to disable NMIs - we are only looking at
         * the list, not manipulating it:
         */
-       list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
+       perf_event_groups_for_each(event, &parent_ctx->pinned_groups) {
                ret = inherit_task_group(event, parent, parent_ctx,
                                         child, ctxn, &inherited_all);
                if (ret)
@@ -10995,7 +11450,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
        parent_ctx->rotate_disable = 1;
        raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
 
-       list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
+       perf_event_groups_for_each(event, &parent_ctx->flexible_groups) {
                ret = inherit_task_group(event, parent, parent_ctx,
                                         child, ctxn, &inherited_all);
                if (ret)
index 3f8cb1e..6253d55 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/list.h>
 #include <linux/cpu.h>
 #include <linux/smp.h>
+#include <linux/bug.h>
 
 #include <linux/hw_breakpoint.h>
 /*
@@ -85,9 +86,9 @@ __weak int hw_breakpoint_weight(struct perf_event *bp)
        return 1;
 }
 
-static inline enum bp_type_idx find_slot_idx(struct perf_event *bp)
+static inline enum bp_type_idx find_slot_idx(u64 bp_type)
 {
-       if (bp->attr.bp_type & HW_BREAKPOINT_RW)
+       if (bp_type & HW_BREAKPOINT_RW)
                return TYPE_DATA;
 
        return TYPE_INST;
@@ -122,7 +123,7 @@ static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
 
        list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
                if (iter->hw.target == tsk &&
-                   find_slot_idx(iter) == type &&
+                   find_slot_idx(iter->attr.bp_type) == type &&
                    (iter->cpu < 0 || cpu == iter->cpu))
                        count += hw_breakpoint_weight(iter);
        }
@@ -277,7 +278,7 @@ __weak void arch_unregister_hw_breakpoint(struct perf_event *bp)
  *       ((per_cpu(info->flexible, *) > 1) + max(per_cpu(info->cpu_pinned, *))
  *            + max(per_cpu(info->tsk_pinned, *))) < HBP_NUM
  */
-static int __reserve_bp_slot(struct perf_event *bp)
+static int __reserve_bp_slot(struct perf_event *bp, u64 bp_type)
 {
        struct bp_busy_slots slots = {0};
        enum bp_type_idx type;
@@ -288,11 +289,11 @@ static int __reserve_bp_slot(struct perf_event *bp)
                return -ENOMEM;
 
        /* Basic checks */
-       if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY ||
-           bp->attr.bp_type == HW_BREAKPOINT_INVALID)
+       if (bp_type == HW_BREAKPOINT_EMPTY ||
+           bp_type == HW_BREAKPOINT_INVALID)
                return -EINVAL;
 
-       type = find_slot_idx(bp);
+       type = find_slot_idx(bp_type);
        weight = hw_breakpoint_weight(bp);
 
        fetch_bp_busy_slots(&slots, bp, type);
@@ -317,19 +318,19 @@ int reserve_bp_slot(struct perf_event *bp)
 
        mutex_lock(&nr_bp_mutex);
 
-       ret = __reserve_bp_slot(bp);
+       ret = __reserve_bp_slot(bp, bp->attr.bp_type);
 
        mutex_unlock(&nr_bp_mutex);
 
        return ret;
 }
 
-static void __release_bp_slot(struct perf_event *bp)
+static void __release_bp_slot(struct perf_event *bp, u64 bp_type)
 {
        enum bp_type_idx type;
        int weight;
 
-       type = find_slot_idx(bp);
+       type = find_slot_idx(bp_type);
        weight = hw_breakpoint_weight(bp);
        toggle_bp_slot(bp, false, type, weight);
 }
@@ -339,11 +340,43 @@ void release_bp_slot(struct perf_event *bp)
        mutex_lock(&nr_bp_mutex);
 
        arch_unregister_hw_breakpoint(bp);
-       __release_bp_slot(bp);
+       __release_bp_slot(bp, bp->attr.bp_type);
 
        mutex_unlock(&nr_bp_mutex);
 }
 
+static int __modify_bp_slot(struct perf_event *bp, u64 old_type)
+{
+       int err;
+
+       __release_bp_slot(bp, old_type);
+
+       err = __reserve_bp_slot(bp, bp->attr.bp_type);
+       if (err) {
+               /*
+                * Reserve the old_type slot back in case
+                * there's no space for the new type.
+                *
+                * This must succeed, because we just released
+                * the old_type slot in the __release_bp_slot
+                * call above. If not, something is broken.
+                */
+               WARN_ON(__reserve_bp_slot(bp, old_type));
+       }
+
+       return err;
+}
+
+static int modify_bp_slot(struct perf_event *bp, u64 old_type)
+{
+       int ret;
+
+       mutex_lock(&nr_bp_mutex);
+       ret = __modify_bp_slot(bp, old_type);
+       mutex_unlock(&nr_bp_mutex);
+       return ret;
+}
+
 /*
  * Allow the kernel debugger to reserve breakpoint slots without
  * taking a lock using the dbg_* variant of for the reserve and
@@ -354,7 +387,7 @@ int dbg_reserve_bp_slot(struct perf_event *bp)
        if (mutex_is_locked(&nr_bp_mutex))
                return -1;
 
-       return __reserve_bp_slot(bp);
+       return __reserve_bp_slot(bp, bp->attr.bp_type);
 }
 
 int dbg_release_bp_slot(struct perf_event *bp)
@@ -362,7 +395,7 @@ int dbg_release_bp_slot(struct perf_event *bp)
        if (mutex_is_locked(&nr_bp_mutex))
                return -1;
 
-       __release_bp_slot(bp);
+       __release_bp_slot(bp, bp->attr.bp_type);
 
        return 0;
 }
@@ -423,6 +456,38 @@ register_user_hw_breakpoint(struct perf_event_attr *attr,
 }
 EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
 
+int
+modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
+                               bool check)
+{
+       u64 old_addr = bp->attr.bp_addr;
+       u64 old_len  = bp->attr.bp_len;
+       int old_type = bp->attr.bp_type;
+       bool modify  = attr->bp_type != old_type;
+       int err = 0;
+
+       bp->attr.bp_addr = attr->bp_addr;
+       bp->attr.bp_type = attr->bp_type;
+       bp->attr.bp_len  = attr->bp_len;
+
+       if (check && memcmp(&bp->attr, attr, sizeof(*attr)))
+               return -EINVAL;
+
+       err = validate_hw_breakpoint(bp);
+       if (!err && modify)
+               err = modify_bp_slot(bp, old_type);
+
+       if (err) {
+               bp->attr.bp_addr = old_addr;
+               bp->attr.bp_type = old_type;
+               bp->attr.bp_len  = old_len;
+               return err;
+       }
+
+       bp->attr.disabled = attr->disabled;
+       return 0;
+}
+
 /**
  * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
  * @bp: the breakpoint structure to modify
@@ -432,10 +497,7 @@ EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
  */
 int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
 {
-       u64 old_addr = bp->attr.bp_addr;
-       u64 old_len = bp->attr.bp_len;
-       int old_type = bp->attr.bp_type;
-       int err = 0;
+       int err;
 
        /*
         * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it
@@ -448,30 +510,17 @@ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *att
        else
                perf_event_disable(bp);
 
-       bp->attr.bp_addr = attr->bp_addr;
-       bp->attr.bp_type = attr->bp_type;
-       bp->attr.bp_len = attr->bp_len;
-
-       if (attr->disabled)
-               goto end;
-
-       err = validate_hw_breakpoint(bp);
-       if (!err)
-               perf_event_enable(bp);
+       err = modify_user_hw_breakpoint_check(bp, attr, false);
 
        if (err) {
-               bp->attr.bp_addr = old_addr;
-               bp->attr.bp_type = old_type;
-               bp->attr.bp_len = old_len;
                if (!bp->attr.disabled)
                        perf_event_enable(bp);
 
                return err;
        }
 
-end:
-       bp->attr.disabled = attr->disabled;
-
+       if (!attr->disabled)
+               perf_event_enable(bp);
        return 0;
 }
 EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
index 55d6dff..2c41650 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/module.h>
 #include <linux/kprobes.h>
 #include "trace.h"
+#include "trace_probe.h"
 
 static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
 
@@ -237,6 +238,107 @@ void perf_trace_destroy(struct perf_event *p_event)
        mutex_unlock(&event_mutex);
 }
 
+#ifdef CONFIG_KPROBE_EVENTS
+int perf_kprobe_init(struct perf_event *p_event, bool is_retprobe)
+{
+       int ret;
+       char *func = NULL;
+       struct trace_event_call *tp_event;
+
+       if (p_event->attr.kprobe_func) {
+               func = kzalloc(KSYM_NAME_LEN, GFP_KERNEL);
+               if (!func)
+                       return -ENOMEM;
+               ret = strncpy_from_user(
+                       func, u64_to_user_ptr(p_event->attr.kprobe_func),
+                       KSYM_NAME_LEN);
+               if (ret < 0)
+                       goto out;
+
+               if (func[0] == '\0') {
+                       kfree(func);
+                       func = NULL;
+               }
+       }
+
+       tp_event = create_local_trace_kprobe(
+               func, (void *)(unsigned long)(p_event->attr.kprobe_addr),
+               p_event->attr.probe_offset, is_retprobe);
+       if (IS_ERR(tp_event)) {
+               ret = PTR_ERR(tp_event);
+               goto out;
+       }
+
+       ret = perf_trace_event_init(tp_event, p_event);
+       if (ret)
+               destroy_local_trace_kprobe(tp_event);
+out:
+       kfree(func);
+       return ret;
+}
+
+void perf_kprobe_destroy(struct perf_event *p_event)
+{
+       perf_trace_event_close(p_event);
+       perf_trace_event_unreg(p_event);
+
+       destroy_local_trace_kprobe(p_event->tp_event);
+}
+#endif /* CONFIG_KPROBE_EVENTS */
+
+#ifdef CONFIG_UPROBE_EVENTS
+int perf_uprobe_init(struct perf_event *p_event, bool is_retprobe)
+{
+       int ret;
+       char *path = NULL;
+       struct trace_event_call *tp_event;
+
+       if (!p_event->attr.uprobe_path)
+               return -EINVAL;
+       path = kzalloc(PATH_MAX, GFP_KERNEL);
+       if (!path)
+               return -ENOMEM;
+       ret = strncpy_from_user(
+               path, u64_to_user_ptr(p_event->attr.uprobe_path), PATH_MAX);
+       if (ret < 0)
+               goto out;
+       if (path[0] == '\0') {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       tp_event = create_local_trace_uprobe(
+               path, p_event->attr.probe_offset, is_retprobe);
+       if (IS_ERR(tp_event)) {
+               ret = PTR_ERR(tp_event);
+               goto out;
+       }
+
+       /*
+        * local trace_uprobe need to hold event_mutex to call
+        * uprobe_buffer_enable() and uprobe_buffer_disable().
+        * event_mutex is not required for local trace_kprobes.
+        */
+       mutex_lock(&event_mutex);
+       ret = perf_trace_event_init(tp_event, p_event);
+       if (ret)
+               destroy_local_trace_uprobe(tp_event);
+       mutex_unlock(&event_mutex);
+out:
+       kfree(path);
+       return ret;
+}
+
+void perf_uprobe_destroy(struct perf_event *p_event)
+{
+       mutex_lock(&event_mutex);
+       perf_trace_event_close(p_event);
+       perf_trace_event_unreg(p_event);
+       mutex_unlock(&event_mutex);
+       destroy_local_trace_uprobe(p_event->tp_event);
+}
+#endif /* CONFIG_UPROBE_EVENTS */
+
 int perf_trace_add(struct perf_event *p_event, int flags)
 {
        struct trace_event_call *tp_event = p_event->tp_event;
index 1fad24a..5ce9b8c 100644 (file)
@@ -462,6 +462,14 @@ disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
                        disable_kprobe(&tk->rp.kp);
                wait = 1;
        }
+
+       /*
+        * if tk is not added to any list, it must be a local trace_kprobe
+        * created with perf_event_open. We don't need to wait for these
+        * trace_kprobes
+        */
+       if (list_empty(&tk->list))
+               wait = 0;
  out:
        if (wait) {
                /*
@@ -1358,12 +1366,9 @@ static struct trace_event_functions kprobe_funcs = {
        .trace          = print_kprobe_event
 };
 
-static int register_kprobe_event(struct trace_kprobe *tk)
+static inline void init_trace_event_call(struct trace_kprobe *tk,
+                                        struct trace_event_call *call)
 {
-       struct trace_event_call *call = &tk->tp.call;
-       int ret;
-
-       /* Initialize trace_event_call */
        INIT_LIST_HEAD(&call->class->fields);
        if (trace_kprobe_is_return(tk)) {
                call->event.funcs = &kretprobe_funcs;
@@ -1372,6 +1377,19 @@ static int register_kprobe_event(struct trace_kprobe *tk)
                call->event.funcs = &kprobe_funcs;
                call->class->define_fields = kprobe_event_define_fields;
        }
+
+       call->flags = TRACE_EVENT_FL_KPROBE;
+       call->class->reg = kprobe_register;
+       call->data = tk;
+}
+
+static int register_kprobe_event(struct trace_kprobe *tk)
+{
+       struct trace_event_call *call = &tk->tp.call;
+       int ret = 0;
+
+       init_trace_event_call(tk, call);
+
        if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0)
                return -ENOMEM;
        ret = register_trace_event(&call->event);
@@ -1379,9 +1397,6 @@ static int register_kprobe_event(struct trace_kprobe *tk)
                kfree(call->print_fmt);
                return -ENODEV;
        }
-       call->flags = TRACE_EVENT_FL_KPROBE;
-       call->class->reg = kprobe_register;
-       call->data = tk;
        ret = trace_add_event_call(call);
        if (ret) {
                pr_info("Failed to register kprobe event: %s\n",
@@ -1403,6 +1418,66 @@ static int unregister_kprobe_event(struct trace_kprobe *tk)
        return ret;
 }
 
+#ifdef CONFIG_PERF_EVENTS
+/* create a trace_kprobe, but don't add it to global lists */
+struct trace_event_call *
+create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
+                         bool is_return)
+{
+       struct trace_kprobe *tk;
+       int ret;
+       char *event;
+
+       /*
+        * local trace_kprobes are not added to probe_list, so they are never
+        * searched in find_trace_kprobe(). Therefore, there is no concern of
+        * duplicated name here.
+        */
+       event = func ? func : "DUMMY_EVENT";
+
+       tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
+                               offs, 0 /* maxactive */, 0 /* nargs */,
+                               is_return);
+
+       if (IS_ERR(tk)) {
+               pr_info("Failed to allocate trace_probe.(%d)\n",
+                       (int)PTR_ERR(tk));
+               return ERR_CAST(tk);
+       }
+
+       init_trace_event_call(tk, &tk->tp.call);
+
+       if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) {
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       ret = __register_trace_kprobe(tk);
+       if (ret < 0)
+               goto error;
+
+       return &tk->tp.call;
+error:
+       free_trace_kprobe(tk);
+       return ERR_PTR(ret);
+}
+
+void destroy_local_trace_kprobe(struct trace_event_call *event_call)
+{
+       struct trace_kprobe *tk;
+
+       tk = container_of(event_call, struct trace_kprobe, tp.call);
+
+       if (trace_probe_is_enabled(&tk->tp)) {
+               WARN_ON(1);
+               return;
+       }
+
+       __unregister_trace_kprobe(tk);
+       free_trace_kprobe(tk);
+}
+#endif /* CONFIG_PERF_EVENTS */
+
 /* Make a tracefs interface for controlling probe points */
 static __init int init_kprobe_trace(void)
 {
index e101c5b..0745f89 100644 (file)
@@ -416,3 +416,14 @@ store_trace_args(int ent_size, struct trace_probe *tp, struct pt_regs *regs,
 }
 
 extern int set_print_fmt(struct trace_probe *tp, bool is_return);
+
+#ifdef CONFIG_PERF_EVENTS
+extern struct trace_event_call *
+create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
+                         bool is_return);
+extern void destroy_local_trace_kprobe(struct trace_event_call *event_call);
+
+extern struct trace_event_call *
+create_local_trace_uprobe(char *name, unsigned long offs, bool is_return);
+extern void destroy_local_trace_uprobe(struct trace_event_call *event_call);
+#endif
index 268029a..2014f43 100644 (file)
@@ -1292,16 +1292,25 @@ static struct trace_event_functions uprobe_funcs = {
        .trace          = print_uprobe_event
 };
 
-static int register_uprobe_event(struct trace_uprobe *tu)
+static inline void init_trace_event_call(struct trace_uprobe *tu,
+                                        struct trace_event_call *call)
 {
-       struct trace_event_call *call = &tu->tp.call;
-       int ret;
-
-       /* Initialize trace_event_call */
        INIT_LIST_HEAD(&call->class->fields);
        call->event.funcs = &uprobe_funcs;
        call->class->define_fields = uprobe_event_define_fields;
 
+       call->flags = TRACE_EVENT_FL_UPROBE;
+       call->class->reg = trace_uprobe_register;
+       call->data = tu;
+}
+
+static int register_uprobe_event(struct trace_uprobe *tu)
+{
+       struct trace_event_call *call = &tu->tp.call;
+       int ret = 0;
+
+       init_trace_event_call(tu, call);
+
        if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
                return -ENOMEM;
 
@@ -1311,9 +1320,6 @@ static int register_uprobe_event(struct trace_uprobe *tu)
                return -ENODEV;
        }
 
-       call->flags = TRACE_EVENT_FL_UPROBE;
-       call->class->reg = trace_uprobe_register;
-       call->data = tu;
        ret = trace_add_event_call(call);
 
        if (ret) {
@@ -1339,6 +1345,70 @@ static int unregister_uprobe_event(struct trace_uprobe *tu)
        return 0;
 }
 
+#ifdef CONFIG_PERF_EVENTS
+struct trace_event_call *
+create_local_trace_uprobe(char *name, unsigned long offs, bool is_return)
+{
+       struct trace_uprobe *tu;
+       struct inode *inode;
+       struct path path;
+       int ret;
+
+       ret = kern_path(name, LOOKUP_FOLLOW, &path);
+       if (ret)
+               return ERR_PTR(ret);
+
+       inode = igrab(d_inode(path.dentry));
+       path_put(&path);
+
+       if (!inode || !S_ISREG(inode->i_mode)) {
+               iput(inode);
+               return ERR_PTR(-EINVAL);
+       }
+
+       /*
+        * local trace_kprobes are not added to probe_list, so they are never
+        * searched in find_trace_kprobe(). Therefore, there is no concern of
+        * duplicated name "DUMMY_EVENT" here.
+        */
+       tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
+                               is_return);
+
+       if (IS_ERR(tu)) {
+               pr_info("Failed to allocate trace_uprobe.(%d)\n",
+                       (int)PTR_ERR(tu));
+               return ERR_CAST(tu);
+       }
+
+       tu->offset = offs;
+       tu->inode = inode;
+       tu->filename = kstrdup(name, GFP_KERNEL);
+       init_trace_event_call(tu, &tu->tp.call);
+
+       if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       return &tu->tp.call;
+error:
+       free_trace_uprobe(tu);
+       return ERR_PTR(ret);
+}
+
+void destroy_local_trace_uprobe(struct trace_event_call *event_call)
+{
+       struct trace_uprobe *tu;
+
+       tu = container_of(event_call, struct trace_uprobe, tp.call);
+
+       kfree(tu->tp.call.print_fmt);
+       tu->tp.call.print_fmt = NULL;
+
+       free_trace_uprobe(tu);
+}
+#endif /* CONFIG_PERF_EVENTS */
+
 /* Make a trace interface for controling probe points */
 static __init int init_uprobe_trace(void)
 {
diff --git a/tools/arch/powerpc/include/uapi/asm/unistd.h b/tools/arch/powerpc/include/uapi/asm/unistd.h
new file mode 100644 (file)
index 0000000..389c36f
--- /dev/null
@@ -0,0 +1,402 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * This file contains the system call numbers.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _UAPI_ASM_POWERPC_UNISTD_H_
+#define _UAPI_ASM_POWERPC_UNISTD_H_
+
+
+#define __NR_restart_syscall     0
+#define __NR_exit                1
+#define __NR_fork                2
+#define __NR_read                3
+#define __NR_write               4
+#define __NR_open                5
+#define __NR_close               6
+#define __NR_waitpid             7
+#define __NR_creat               8
+#define __NR_link                9
+#define __NR_unlink             10
+#define __NR_execve             11
+#define __NR_chdir              12
+#define __NR_time               13
+#define __NR_mknod              14
+#define __NR_chmod              15
+#define __NR_lchown             16
+#define __NR_break              17
+#define __NR_oldstat            18
+#define __NR_lseek              19
+#define __NR_getpid             20
+#define __NR_mount              21
+#define __NR_umount             22
+#define __NR_setuid             23
+#define __NR_getuid             24
+#define __NR_stime              25
+#define __NR_ptrace             26
+#define __NR_alarm              27
+#define __NR_oldfstat           28
+#define __NR_pause              29
+#define __NR_utime              30
+#define __NR_stty               31
+#define __NR_gtty               32
+#define __NR_access             33
+#define __NR_nice               34
+#define __NR_ftime              35
+#define __NR_sync               36
+#define __NR_kill               37
+#define __NR_rename             38
+#define __NR_mkdir              39
+#define __NR_rmdir              40
+#define __NR_dup                41
+#define __NR_pipe               42
+#define __NR_times              43
+#define __NR_prof               44
+#define __NR_brk                45
+#define __NR_setgid             46
+#define __NR_getgid             47
+#define __NR_signal             48
+#define __NR_geteuid            49
+#define __NR_getegid            50
+#define __NR_acct               51
+#define __NR_umount2            52
+#define __NR_lock               53
+#define __NR_ioctl              54
+#define __NR_fcntl              55
+#define __NR_mpx                56
+#define __NR_setpgid            57
+#define __NR_ulimit             58
+#define __NR_oldolduname        59
+#define __NR_umask              60
+#define __NR_chroot             61
+#define __NR_ustat              62
+#define __NR_dup2               63
+#define __NR_getppid            64
+#define __NR_getpgrp            65
+#define __NR_setsid             66
+#define __NR_sigaction          67
+#define __NR_sgetmask           68
+#define __NR_ssetmask           69
+#define __NR_setreuid           70
+#define __NR_setregid           71
+#define __NR_sigsuspend                 72
+#define __NR_sigpending                 73
+#define __NR_sethostname        74
+#define __NR_setrlimit          75
+#define __NR_getrlimit          76
+#define __NR_getrusage          77
+#define __NR_gettimeofday       78
+#define __NR_settimeofday       79
+#define __NR_getgroups          80
+#define __NR_setgroups          81
+#define __NR_select             82
+#define __NR_symlink            83
+#define __NR_oldlstat           84
+#define __NR_readlink           85
+#define __NR_uselib             86
+#define __NR_swapon             87
+#define __NR_reboot             88
+#define __NR_readdir            89
+#define __NR_mmap               90
+#define __NR_munmap             91
+#define __NR_truncate           92
+#define __NR_ftruncate          93
+#define __NR_fchmod             94
+#define __NR_fchown             95
+#define __NR_getpriority        96
+#define __NR_setpriority        97
+#define __NR_profil             98
+#define __NR_statfs             99
+#define __NR_fstatfs           100
+#define __NR_ioperm            101
+#define __NR_socketcall                102
+#define __NR_syslog            103
+#define __NR_setitimer         104
+#define __NR_getitimer         105
+#define __NR_stat              106
+#define __NR_lstat             107
+#define __NR_fstat             108
+#define __NR_olduname          109
+#define __NR_iopl              110
+#define __NR_vhangup           111
+#define __NR_idle              112
+#define __NR_vm86              113
+#define __NR_wait4             114
+#define __NR_swapoff           115
+#define __NR_sysinfo           116
+#define __NR_ipc               117
+#define __NR_fsync             118
+#define __NR_sigreturn         119
+#define __NR_clone             120
+#define __NR_setdomainname     121
+#define __NR_uname             122
+#define __NR_modify_ldt                123
+#define __NR_adjtimex          124
+#define __NR_mprotect          125
+#define __NR_sigprocmask       126
+#define __NR_create_module     127
+#define __NR_init_module       128
+#define __NR_delete_module     129
+#define __NR_get_kernel_syms   130
+#define __NR_quotactl          131
+#define __NR_getpgid           132
+#define __NR_fchdir            133
+#define __NR_bdflush           134
+#define __NR_sysfs             135
+#define __NR_personality       136
+#define __NR_afs_syscall       137 /* Syscall for Andrew File System */
+#define __NR_setfsuid          138
+#define __NR_setfsgid          139
+#define __NR__llseek           140
+#define __NR_getdents          141
+#define __NR__newselect                142
+#define __NR_flock             143
+#define __NR_msync             144
+#define __NR_readv             145
+#define __NR_writev            146
+#define __NR_getsid            147
+#define __NR_fdatasync         148
+#define __NR__sysctl           149
+#define __NR_mlock             150
+#define __NR_munlock           151
+#define __NR_mlockall          152
+#define __NR_munlockall                153
+#define __NR_sched_setparam            154
+#define __NR_sched_getparam            155
+#define __NR_sched_setscheduler                156
+#define __NR_sched_getscheduler                157
+#define __NR_sched_yield               158
+#define __NR_sched_get_priority_max    159
+#define __NR_sched_get_priority_min    160
+#define __NR_sched_rr_get_interval     161
+#define __NR_nanosleep         162
+#define __NR_mremap            163
+#define __NR_setresuid         164
+#define __NR_getresuid         165
+#define __NR_query_module      166
+#define __NR_poll              167
+#define __NR_nfsservctl                168
+#define __NR_setresgid         169
+#define __NR_getresgid         170
+#define __NR_prctl             171
+#define __NR_rt_sigreturn      172
+#define __NR_rt_sigaction      173
+#define __NR_rt_sigprocmask    174
+#define __NR_rt_sigpending     175
+#define __NR_rt_sigtimedwait   176
+#define __NR_rt_sigqueueinfo   177
+#define __NR_rt_sigsuspend     178
+#define __NR_pread64           179
+#define __NR_pwrite64          180
+#define __NR_chown             181
+#define __NR_getcwd            182
+#define __NR_capget            183
+#define __NR_capset            184
+#define __NR_sigaltstack       185
+#define __NR_sendfile          186
+#define __NR_getpmsg           187     /* some people actually want streams */
+#define __NR_putpmsg           188     /* some people actually want streams */
+#define __NR_vfork             189
+#define __NR_ugetrlimit                190     /* SuS compliant getrlimit */
+#define __NR_readahead         191
+#ifndef __powerpc64__                  /* these are 32-bit only */
+#define __NR_mmap2             192
+#define __NR_truncate64                193
+#define __NR_ftruncate64       194
+#define __NR_stat64            195
+#define __NR_lstat64           196
+#define __NR_fstat64           197
+#endif
+#define __NR_pciconfig_read    198
+#define __NR_pciconfig_write   199
+#define __NR_pciconfig_iobase  200
+#define __NR_multiplexer       201
+#define __NR_getdents64                202
+#define __NR_pivot_root                203
+#ifndef __powerpc64__
+#define __NR_fcntl64           204
+#endif
+#define __NR_madvise           205
+#define __NR_mincore           206
+#define __NR_gettid            207
+#define __NR_tkill             208
+#define __NR_setxattr          209
+#define __NR_lsetxattr         210
+#define __NR_fsetxattr         211
+#define __NR_getxattr          212
+#define __NR_lgetxattr         213
+#define __NR_fgetxattr         214
+#define __NR_listxattr         215
+#define __NR_llistxattr                216
+#define __NR_flistxattr                217
+#define __NR_removexattr       218
+#define __NR_lremovexattr      219
+#define __NR_fremovexattr      220
+#define __NR_futex             221
+#define __NR_sched_setaffinity 222
+#define __NR_sched_getaffinity 223
+/* 224 currently unused */
+#define __NR_tuxcall           225
+#ifndef __powerpc64__
+#define __NR_sendfile64                226
+#endif
+#define __NR_io_setup          227
+#define __NR_io_destroy                228
+#define __NR_io_getevents      229
+#define __NR_io_submit         230
+#define __NR_io_cancel         231
+#define __NR_set_tid_address   232
+#define __NR_fadvise64         233
+#define __NR_exit_group                234
+#define __NR_lookup_dcookie    235
+#define __NR_epoll_create      236
+#define __NR_epoll_ctl         237
+#define __NR_epoll_wait                238
+#define __NR_remap_file_pages  239
+#define __NR_timer_create      240
+#define __NR_timer_settime     241
+#define __NR_timer_gettime     242
+#define __NR_timer_getoverrun  243
+#define __NR_timer_delete      244
+#define __NR_clock_settime     245
+#define __NR_clock_gettime     246
+#define __NR_clock_getres      247
+#define __NR_clock_nanosleep   248
+#define __NR_swapcontext       249
+#define __NR_tgkill            250
+#define __NR_utimes            251
+#define __NR_statfs64          252
+#define __NR_fstatfs64         253
+#ifndef __powerpc64__
+#define __NR_fadvise64_64      254
+#endif
+#define __NR_rtas              255
+#define __NR_sys_debug_setcontext 256
+/* Number 257 is reserved for vserver */
+#define __NR_migrate_pages     258
+#define __NR_mbind             259
+#define __NR_get_mempolicy     260
+#define __NR_set_mempolicy     261
+#define __NR_mq_open           262
+#define __NR_mq_unlink         263
+#define __NR_mq_timedsend      264
+#define __NR_mq_timedreceive   265
+#define __NR_mq_notify         266
+#define __NR_mq_getsetattr     267
+#define __NR_kexec_load                268
+#define __NR_add_key           269
+#define __NR_request_key       270
+#define __NR_keyctl            271
+#define __NR_waitid            272
+#define __NR_ioprio_set                273
+#define __NR_ioprio_get                274
+#define __NR_inotify_init      275
+#define __NR_inotify_add_watch 276
+#define __NR_inotify_rm_watch  277
+#define __NR_spu_run           278
+#define __NR_spu_create                279
+#define __NR_pselect6          280
+#define __NR_ppoll             281
+#define __NR_unshare           282
+#define __NR_splice            283
+#define __NR_tee               284
+#define __NR_vmsplice          285
+#define __NR_openat            286
+#define __NR_mkdirat           287
+#define __NR_mknodat           288
+#define __NR_fchownat          289
+#define __NR_futimesat         290
+#ifdef __powerpc64__
+#define __NR_newfstatat                291
+#else
+#define __NR_fstatat64         291
+#endif
+#define __NR_unlinkat          292
+#define __NR_renameat          293
+#define __NR_linkat            294
+#define __NR_symlinkat         295
+#define __NR_readlinkat                296
+#define __NR_fchmodat          297
+#define __NR_faccessat         298
+#define __NR_get_robust_list   299
+#define __NR_set_robust_list   300
+#define __NR_move_pages                301
+#define __NR_getcpu            302
+#define __NR_epoll_pwait       303
+#define __NR_utimensat         304
+#define __NR_signalfd          305
+#define __NR_timerfd_create    306
+#define __NR_eventfd           307
+#define __NR_sync_file_range2  308
+#define __NR_fallocate         309
+#define __NR_subpage_prot      310
+#define __NR_timerfd_settime   311
+#define __NR_timerfd_gettime   312
+#define __NR_signalfd4         313
+#define __NR_eventfd2          314
+#define __NR_epoll_create1     315
+#define __NR_dup3              316
+#define __NR_pipe2             317
+#define __NR_inotify_init1     318
+#define __NR_perf_event_open   319
+#define __NR_preadv            320
+#define __NR_pwritev           321
+#define __NR_rt_tgsigqueueinfo 322
+#define __NR_fanotify_init     323
+#define __NR_fanotify_mark     324
+#define __NR_prlimit64         325
+#define __NR_socket            326
+#define __NR_bind              327
+#define __NR_connect           328
+#define __NR_listen            329
+#define __NR_accept            330
+#define __NR_getsockname       331
+#define __NR_getpeername       332
+#define __NR_socketpair                333
+#define __NR_send              334
+#define __NR_sendto            335
+#define __NR_recv              336
+#define __NR_recvfrom          337
+#define __NR_shutdown          338
+#define __NR_setsockopt                339
+#define __NR_getsockopt                340
+#define __NR_sendmsg           341
+#define __NR_recvmsg           342
+#define __NR_recvmmsg          343
+#define __NR_accept4           344
+#define __NR_name_to_handle_at 345
+#define __NR_open_by_handle_at 346
+#define __NR_clock_adjtime     347
+#define __NR_syncfs            348
+#define __NR_sendmmsg          349
+#define __NR_setns             350
+#define __NR_process_vm_readv  351
+#define __NR_process_vm_writev 352
+#define __NR_finit_module      353
+#define __NR_kcmp              354
+#define __NR_sched_setattr     355
+#define __NR_sched_getattr     356
+#define __NR_renameat2         357
+#define __NR_seccomp           358
+#define __NR_getrandom         359
+#define __NR_memfd_create      360
+#define __NR_bpf               361
+#define __NR_execveat          362
+#define __NR_switch_endian     363
+#define __NR_userfaultfd       364
+#define __NR_membarrier                365
+#define __NR_mlock2            378
+#define __NR_copy_file_range   379
+#define __NR_preadv2           380
+#define __NR_pwritev2          381
+#define __NR_kexec_file_load   382
+#define __NR_statx             383
+#define __NR_pkey_alloc                384
+#define __NR_pkey_free         385
+#define __NR_pkey_mprotect     386
+
+#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
index c378f00..5b6dda3 100644 (file)
@@ -82,7 +82,11 @@ FEATURE_TESTS_EXTRA :=                  \
          liberty-z                      \
          libunwind-debug-frame          \
          libunwind-debug-frame-arm      \
-         libunwind-debug-frame-aarch64
+         libunwind-debug-frame-aarch64  \
+         cxx                            \
+         llvm                           \
+         llvm-version                   \
+         clang
 
 FEATURE_TESTS ?= $(FEATURE_TESTS_BASIC)
 
index 0a490cb..dac9563 100644 (file)
@@ -54,7 +54,10 @@ FILES=                                          \
          test-jvmti.bin                                \
          test-sched_getcpu.bin                 \
          test-setns.bin                                \
-         test-libopencsd.bin
+         test-libopencsd.bin                   \
+         test-clang.bin                                \
+         test-llvm.bin                         \
+         test-llvm-version.bin
 
 FILES := $(addprefix $(OUTPUT),$(FILES))
 
@@ -257,11 +260,13 @@ $(OUTPUT)test-llvm.bin:
                -I$(shell $(LLVM_CONFIG) --includedir)          \
                -L$(shell $(LLVM_CONFIG) --libdir)              \
                $(shell $(LLVM_CONFIG) --libs Core BPF)         \
-               $(shell $(LLVM_CONFIG) --system-libs)
+               $(shell $(LLVM_CONFIG) --system-libs)           \
+               > $(@:.bin=.make.output) 2>&1
 
 $(OUTPUT)test-llvm-version.bin:
        $(BUILDXX) -std=gnu++11                                 \
-               -I$(shell $(LLVM_CONFIG) --includedir)
+               -I$(shell $(LLVM_CONFIG) --includedir)          \
+               > $(@:.bin=.make.output) 2>&1
 
 $(OUTPUT)test-clang.bin:
        $(BUILDXX) -std=gnu++11                                 \
@@ -271,7 +276,8 @@ $(OUTPUT)test-clang.bin:
                  -lclangFrontend -lclangEdit -lclangLex        \
                  -lclangAST -Wl,--end-group                    \
                $(shell $(LLVM_CONFIG) --libs Core option)      \
-               $(shell $(LLVM_CONFIG) --system-libs)
+               $(shell $(LLVM_CONFIG) --system-libs)           \
+               > $(@:.bin=.make.output) 2>&1
 
 -include $(OUTPUT)*.d
 
index ca16027..63440cc 100644 (file)
@@ -98,7 +98,7 @@ static inline int test_and_set_bit(int nr, unsigned long *addr)
 
 /**
  * bitmap_alloc - Allocate bitmap
- * @nr: Bit to set
+ * @nbits: Number of bits
  */
 static inline unsigned long *bitmap_alloc(int nbits)
 {
index e0739a1..912b85b 100644 (file)
@@ -380,10 +380,14 @@ struct perf_event_attr {
        __u32                   bp_type;
        union {
                __u64           bp_addr;
+               __u64           kprobe_func; /* for perf_kprobe */
+               __u64           uprobe_path; /* for perf_uprobe */
                __u64           config1; /* extension of config */
        };
        union {
                __u64           bp_len;
+               __u64           kprobe_addr; /* when kprobe_func == NULL */
+               __u64           probe_offset; /* for perf_[k,u]probe */
                __u64           config2; /* extension of config1 */
        };
        __u64   branch_sample_type; /* enum perf_branch_sample_type */
@@ -444,17 +448,18 @@ struct perf_event_query_bpf {
 /*
  * Ioctls that can be done on a perf event fd:
  */
-#define PERF_EVENT_IOC_ENABLE          _IO ('$', 0)
-#define PERF_EVENT_IOC_DISABLE         _IO ('$', 1)
-#define PERF_EVENT_IOC_REFRESH         _IO ('$', 2)
-#define PERF_EVENT_IOC_RESET           _IO ('$', 3)
-#define PERF_EVENT_IOC_PERIOD          _IOW('$', 4, __u64)
-#define PERF_EVENT_IOC_SET_OUTPUT      _IO ('$', 5)
-#define PERF_EVENT_IOC_SET_FILTER      _IOW('$', 6, char *)
-#define PERF_EVENT_IOC_ID              _IOR('$', 7, __u64 *)
-#define PERF_EVENT_IOC_SET_BPF         _IOW('$', 8, __u32)
-#define PERF_EVENT_IOC_PAUSE_OUTPUT    _IOW('$', 9, __u32)
-#define PERF_EVENT_IOC_QUERY_BPF       _IOWR('$', 10, struct perf_event_query_bpf *)
+#define PERF_EVENT_IOC_ENABLE                  _IO ('$', 0)
+#define PERF_EVENT_IOC_DISABLE                 _IO ('$', 1)
+#define PERF_EVENT_IOC_REFRESH                 _IO ('$', 2)
+#define PERF_EVENT_IOC_RESET                   _IO ('$', 3)
+#define PERF_EVENT_IOC_PERIOD                  _IOW('$', 4, __u64)
+#define PERF_EVENT_IOC_SET_OUTPUT              _IO ('$', 5)
+#define PERF_EVENT_IOC_SET_FILTER              _IOW('$', 6, char *)
+#define PERF_EVENT_IOC_ID                      _IOR('$', 7, __u64 *)
+#define PERF_EVENT_IOC_SET_BPF                 _IOW('$', 8, __u32)
+#define PERF_EVENT_IOC_PAUSE_OUTPUT            _IOW('$', 9, __u32)
+#define PERF_EVENT_IOC_QUERY_BPF               _IOWR('$', 10, struct perf_event_query_bpf *)
+#define PERF_EVENT_IOC_MODIFY_ATTRIBUTES       _IOW('$', 11, struct perf_event_attr *)
 
 enum perf_event_ioc_flags {
        PERF_IOC_FLAG_GROUP             = 1U << 0,
index b24afc0..6a12bbf 100644 (file)
@@ -315,12 +315,8 @@ int filename__read_int(const char *filename, int *value)
        return err;
 }
 
-/*
- * Parses @value out of @filename with strtoull.
- * By using 0 for base, the strtoull detects the
- * base automatically (see man strtoull).
- */
-int filename__read_ull(const char *filename, unsigned long long *value)
+static int filename__read_ull_base(const char *filename,
+                                  unsigned long long *value, int base)
 {
        char line[64];
        int fd = open(filename, O_RDONLY), err = -1;
@@ -329,7 +325,7 @@ int filename__read_ull(const char *filename, unsigned long long *value)
                return -1;
 
        if (read(fd, line, sizeof(line)) > 0) {
-               *value = strtoull(line, NULL, 0);
+               *value = strtoull(line, NULL, base);
                if (*value != ULLONG_MAX)
                        err = 0;
        }
@@ -338,6 +334,25 @@ int filename__read_ull(const char *filename, unsigned long long *value)
        return err;
 }
 
+/*
+ * Parses @value out of @filename with strtoull.
+ * By using 16 for base to treat the number as hex.
+ */
+int filename__read_xll(const char *filename, unsigned long long *value)
+{
+       return filename__read_ull_base(filename, value, 16);
+}
+
+/*
+ * Parses @value out of @filename with strtoull.
+ * By using 0 for base, the strtoull detects the
+ * base automatically (see man strtoull).
+ */
+int filename__read_ull(const char *filename, unsigned long long *value)
+{
+       return filename__read_ull_base(filename, value, 0);
+}
+
 #define STRERR_BUFSIZE  128     /* For the buffer size of strerror_r */
 
 int filename__read_str(const char *filename, char **buf, size_t *sizep)
@@ -417,7 +432,8 @@ int procfs__read_str(const char *entry, char **buf, size_t *sizep)
        return filename__read_str(path, buf, sizep);
 }
 
-int sysfs__read_ull(const char *entry, unsigned long long *value)
+static int sysfs__read_ull_base(const char *entry,
+                               unsigned long long *value, int base)
 {
        char path[PATH_MAX];
        const char *sysfs = sysfs__mountpoint();
@@ -427,7 +443,17 @@ int sysfs__read_ull(const char *entry, unsigned long long *value)
 
        snprintf(path, sizeof(path), "%s/%s", sysfs, entry);
 
-       return filename__read_ull(path, value);
+       return filename__read_ull_base(path, value, base);
+}
+
+int sysfs__read_xll(const char *entry, unsigned long long *value)
+{
+       return sysfs__read_ull_base(entry, value, 16);
+}
+
+int sysfs__read_ull(const char *entry, unsigned long long *value)
+{
+       return sysfs__read_ull_base(entry, value, 0);
 }
 
 int sysfs__read_int(const char *entry, int *value)
index dda49de..92d03b8 100644 (file)
@@ -30,6 +30,7 @@ FS(bpf_fs)
 
 int filename__read_int(const char *filename, int *value);
 int filename__read_ull(const char *filename, unsigned long long *value);
+int filename__read_xll(const char *filename, unsigned long long *value);
 int filename__read_str(const char *filename, char **buf, size_t *sizep);
 
 int filename__write_int(const char *filename, int value);
@@ -39,6 +40,7 @@ int procfs__read_str(const char *entry, char **buf, size_t *sizep);
 int sysctl__read_int(const char *sysctl, int *value);
 int sysfs__read_int(const char *entry, int *value);
 int sysfs__read_ull(const char *entry, unsigned long long *value);
+int sysfs__read_xll(const char *entry, unsigned long long *value);
 int sysfs__read_str(const char *entry, char **buf, size_t *sizep);
 int sysfs__read_bool(const char *entry, bool *value);
 
index d6d6553..6aad830 100644 (file)
@@ -22,6 +22,6 @@ char *str_error_r(int errnum, char *buf, size_t buflen)
 {
        int err = strerror_r(errnum, buf, buflen);
        if (err)
-               snprintf(buf, buflen, "INTERNAL ERROR: strerror_r(%d, %p, %zd)=%d", errnum, buf, buflen, err);
+               snprintf(buf, buflen, "INTERNAL ERROR: strerror_r(%d, [buf], %zd)=%d", errnum, buflen, err);
        return buf;
 }
index 914cb8e..689b6a1 100644 (file)
@@ -38,6 +38,10 @@ int kallsyms__parse(const char *filename, void *arg,
 
                len = hex2u64(line, &start);
 
+               /* Skip the line if we failed to parse the address. */
+               if (!len)
+                       continue;
+
                len++;
                if (len + 2 >= line_len)
                        continue;
index c635eab..292809c 100644 (file)
@@ -21,7 +21,7 @@ If there is no debug info in the object, then annotated assembly is displayed.
 OPTIONS
 -------
 -i::
---input=::
+--input=<file>::
         Input file name. (default: perf.data unless stdin is a fifo)
 
 -d::
@@ -69,7 +69,7 @@ OPTIONS
 
 --stdio:: Use the stdio interface.
 
---stdio-color::
+--stdio-color=<mode>::
        'always', 'never' or 'auto', allowing configuring color output
        via the command line, in addition to via "color.ui" .perfconfig.
        Use '--stdio-color always' to generate color even when redirecting
@@ -84,7 +84,7 @@ OPTIONS
 --gtk:: Use the GTK interface.
 
 -C::
---cpu:: Only report samples for the list of CPUs provided. Multiple CPUs can
+--cpu=<cpu>:: Only report samples for the list of CPUs provided. Multiple CPUs can
        be provided as a comma-separated list with no space: 0,1. Ranges of
        CPUs are specified with -: 0-2. Default is to report samples on all
        CPUs.
index 8224142..095aebd 100644 (file)
@@ -116,7 +116,7 @@ and calls standard perf record command.
 Following perf record options are configured by default:
 (check perf record man page for details)
 
-  -W,-d,--sample-cpu
+  -W,-d,--phys-data,--sample-cpu
 
 Unless specified otherwise with '-e' option, following events are monitored by
 default:
index 90bb4aa..c871807 100644 (file)
@@ -1,5 +1,5 @@
 perf-data(1)
-==============
+============
 
 NAME
 ----
index 721a447..b80c843 100644 (file)
@@ -1,5 +1,5 @@
 perf-ftrace(1)
-=============
+==============
 
 NAME
 ----
index 479fc32..85b8ac6 100644 (file)
@@ -25,6 +25,10 @@ OPTIONS
 --input=<file>::
        Select the input file (default: perf.data unless stdin is a fifo)
 
+-f::
+--force::
+       Don't do ownership validation
+
 -v::
 --verbose::
         Be more verbose. (show symbol address, etc)
@@ -61,7 +65,7 @@ OPTIONS
        default, but this option shows live (currently allocated) pages
        instead.  (This option works with --page option only)
 
---time::
+--time=<start>,<stop>::
        Only analyze samples within given time window: <start>,<stop>. Times
        have the format seconds.microseconds. If start is not given (i.e., time
        string is ',x.y') then analysis starts at the beginning of the file. If
index e2a897a..2549c34 100644 (file)
@@ -141,7 +141,13 @@ on the first memory controller on socket 0 of a Intel Xeon system
 
 Each memory controller has its own PMU.  Measuring the complete system
 bandwidth would require specifying all imc PMUs (see perf list output),
-and adding the values together.
+and adding the values together. To simplify creation of multiple events,
+prefix and glob matching is supported in the PMU name, and the prefix
+'uncore_' is also ignored when performing the match. So the command above
+can be expanded to all memory controllers by using the syntaxes:
+
+  perf stat -C 0 -a imc/cas_count_read/,imc/cas_count_write/ -I 1000 ...
+  perf stat -C 0 -a *imc*/cas_count_read/,*imc*/cas_count_write/ -I 1000 ...
 
 This example measures the combined core power every second
 
index 4be08a1..b021141 100644 (file)
@@ -28,6 +28,10 @@ OPTIONS
 <command>...::
        Any command you can specify in a shell.
 
+-f::
+--force::
+       Don't do ownership validation
+
 -t::
 --type=::
        Select the memory operation type: load or store (default: load,store)
index 3eea6de..cc37b3a 100644 (file)
@@ -191,9 +191,16 @@ OPTIONS
 -i::
 --no-inherit::
        Child tasks do not inherit counters.
+
 -F::
 --freq=::
-       Profile at this frequency.
+       Profile at this frequency. Use 'max' to use the currently maximum
+       allowed frequency, i.e. the value in the kernel.perf_event_max_sample_rate
+       sysctl. Will throttle down to the currently maximum allowed frequency.
+       See --strict-freq.
+
+--strict-freq::
+       Fail if the specified frequency can't be used.
 
 -m::
 --mmap-pages=::
@@ -308,7 +315,11 @@ can be provided. Each cgroup is applied to the corresponding event, i.e., first
 to first event, second cgroup to second event and so on. It is possible to provide
 an empty cgroup (monitor all the time) using, e.g., -G foo,,bar. Cgroups must have
 corresponding events, i.e., they always refer to events defined earlier on the command
-line.
+line. If the user wants to track multiple events for a specific cgroup, the user can
+use '-e e1 -e e2 -G foo,foo' or just use '-e e1 -e e2 -G foo'.
+
+If wanting to monitor, say, 'cycles' for a cgroup and also for system wide, this
+command line can be used: 'perf stat -e cycles -G cgroup_name -a -e cycles'.
 
 -b::
 --branch-any::
index 907e505..cba16d8 100644 (file)
@@ -354,7 +354,8 @@ OPTIONS
         Path to objdump binary.
 
 --group::
-       Show event group information together.
+       Show event group information together. It forces group output also
+       if there are no groups defined in data file.
 
 --demangle::
        Demangle symbol names to human readable form. It's enabled by default,
@@ -367,7 +368,7 @@ OPTIONS
        Use the data addresses of samples in addition to instruction addresses
        to build the histograms.  To generate meaningful output, the perf.data
        file must have been obtained using perf record -d -W and using a
-       special event -e cpu/mem-loads/ or -e cpu/mem-stores/. See
+       special event -e cpu/mem-loads/p or -e cpu/mem-stores/p. See
        'perf mem' for simpler access.
 
 --percent-limit::
index c7e50f2..bb33601 100644 (file)
@@ -1,5 +1,5 @@
 perf-sched(1)
-==============
+=============
 
 NAME
 ----
index 142606c..5a1f681 100644 (file)
@@ -1,5 +1,5 @@
 perf-script-perl(1)
-==================
+===================
 
 NAME
 ----
index 7730c1d..36ec025 100644 (file)
@@ -303,6 +303,9 @@ OPTIONS
 --show-lost-events
        Display lost events i.e. events of type PERF_RECORD_LOST.
 
+--show-round-events
+       Display finished round events i.e. events of type PERF_RECORD_FINISHED_ROUND.
+
 --demangle::
        Demangle symbol names to human readable form. It's enabled by default,
        disable with --no-demangle.
index 823fce7..f15b306 100644 (file)
@@ -49,6 +49,13 @@ report::
          parameters are defined by corresponding entries in
          /sys/bus/event_source/devices/<pmu>/format/*
 
+       Note that the last two syntaxes support prefix and glob matching in
+       the PMU name to simplify creation of events accross multiple instances
+       of the same type of PMU in large systems (e.g. memory controller PMUs).
+       Multiple PMU instances are typical for uncore PMUs, so the prefix
+       'uncore_' is also ignored when performing this match.
+
+
 -i::
 --no-inherit::
         child tasks do not inherit counters
@@ -118,7 +125,11 @@ can be provided. Each cgroup is applied to the corresponding event, i.e., first
 to first event, second cgroup to second event and so on. It is possible to provide
 an empty cgroup (monitor all the time) using, e.g., -G foo,,bar. Cgroups must have
 corresponding events, i.e., they always refer to events defined earlier on the command
-line.
+line. If the user wants to track multiple events for a specific cgroup, the user can
+use '-e e1 -e e2 -G foo,foo' or just use '-e e1 -e e2 -G foo'.
+
+If wanting to monitor, say, 'cycles' for a cgroup and also for system wide, this
+command line can be used: 'perf stat -e cycles -G cgroup_name -a -e cycles'.
 
 -o file::
 --output file::
@@ -146,6 +157,16 @@ Print count deltas every N milliseconds (minimum: 10ms)
 The overhead percentage could be high in some cases, for instance with small, sub 100ms intervals.  Use with caution.
        example: 'perf stat -I 1000 -e cycles -a sleep 5'
 
+--interval-count times::
+Print count deltas for fixed number of times.
+This option should be used together with "-I" option.
+       example: 'perf stat -I 1000 --interval-count 2 -e cycles -a'
+
+--timeout msecs::
+Stop the 'perf stat' session and print count deltas after N milliseconds (minimum: 10 ms).
+This option is not supported with the "-I" option.
+       example: 'perf stat --time 2000 -e cycles -a'
+
 --metric-only::
 Only print computed metrics. Print them in a single line.
 Don't show any raw values. Not supported with --per-thread.
@@ -246,6 +267,16 @@ taskset.
 --no-merge::
 Do not merge results from same PMUs.
 
+When multiple events are created from a single event specification,
+stat will, by default, aggregate the event counts and show the result
+in a single row. This option disables that behavior and shows
+the individual events and counts.
+
+Multiple events are created from a single event specification when:
+1. Prefix or glob matching is used for the PMU name.
+2. Aliases, which are listed immediately after the Kernel PMU events
+   by perf list, are used.
+
 --smi-cost::
 Measure SMI cost if msr/aperf/ and msr/smi/ events are supported.
 
index 8a32cc7..114fda1 100644 (file)
@@ -55,7 +55,9 @@ Default is to monitor all CPUS.
 
 -F <freq>::
 --freq=<freq>::
-       Profile at this frequency.
+       Profile at this frequency. Use 'max' to use the currently maximum
+       allowed frequency, i.e. the value in the kernel.perf_event_max_sample_rate
+       sysctl.
 
 -i::
 --inherit::
@@ -65,6 +67,9 @@ Default is to monitor all CPUS.
 --vmlinux=<path>::
        Path to vmlinux.  Required for annotation functionality.
 
+--ignore-vmlinux::
+       Ignore vmlinux files.
+
 -m <pages>::
 --mmap-pages=<pages>::
        Number of mmap data pages (must be a power of two) or size
index 33a88e9..5a7035c 100644 (file)
@@ -63,6 +63,31 @@ filter out the startup phase of the program, which is often very different.
 --uid=::
         Record events in threads owned by uid. Name or number.
 
+-G::
+--cgroup::
+       Record events in threads in a cgroup.
+
+       Look for cgroups to set at the /sys/fs/cgroup/perf_event directory, then
+       remove the /sys/fs/cgroup/perf_event/ part and try:
+
+               perf trace -G A -e sched:*switch
+
+       Will set all raw_syscalls:sys_{enter,exit}, pgfault, vfs_getname, etc
+       _and_ sched:sched_switch to the 'A' cgroup, while:
+
+               perf trace -e sched:*switch -G A
+
+       will only set the sched:sched_switch event to the 'A' cgroup, all the
+       other events (raw_syscalls:sys_{enter,exit}, etc are left "without"
+       a cgroup (on the root cgroup, sys wide, etc).
+
+       Multiple cgroups:
+
+               perf trace -G A -e sched:*switch -G B
+
+       the syscall ones go to the 'A' cgroup, the sched:sched_switch goes
+       to the 'B' cgroup.
+
 --filter-pids=::
        Filter out events for these pids and for 'trace' itself (comma separated list).
 
index f7d85e8..d00f0d5 100644 (file)
@@ -485,10 +485,5 @@ in pmu-tools parser. This allows to read perf.data from python and dump it.
 quipper
 
 The quipper C++ parser is available at
-https://chromium.googlesource.com/chromiumos/platform2
+http://github.com/google/perf_data_converter/tree/master/src/quipper
 
-It is under the chromiumos-wide-profiling/ subdirectory. This library can
-convert a perf data file to a protobuf and vice versa.
-
-Unfortunately this parser tends to be many versions behind and may not be able
-to parse data files generated by recent perf.
index 0dfdaa9..98ff736 100644 (file)
@@ -27,6 +27,8 @@ NO_SYSCALL_TABLE := 1
 # Additional ARCH settings for ppc
 ifeq ($(SRCARCH),powerpc)
   NO_PERF_REGS := 0
+  NO_SYSCALL_TABLE := 0
+  CFLAGS += -I$(OUTPUT)arch/powerpc/include/generated
   LIBUNWIND_LIBS := -lunwind -lunwind-ppc64
 endif
 
@@ -73,7 +75,7 @@ endif
 # Disable it on all other architectures in case libdw unwind
 # support is detected in system. Add supported architectures
 # to the check.
-ifneq ($(SRCARCH),$(filter $(SRCARCH),x86 arm powerpc s390))
+ifneq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc s390))
   NO_LIBDW_DWARF_UNWIND := 1
 endif
 
@@ -666,25 +668,10 @@ else
       ifneq ($(feature-libpython), 1)
         $(call disable-python,No 'Python.h' (for Python 2.x support) was found: disables Python support - please install python-devel/python-dev)
       else
-        ifneq ($(feature-libpython-version), 1)
-          $(warning Python 3 is not yet supported; please set)
-          $(warning PYTHON and/or PYTHON_CONFIG appropriately.)
-          $(warning If you also have Python 2 installed, then)
-          $(warning try something like:)
-          $(warning $(and ,))
-          $(warning $(and ,)  make PYTHON=python2)
-          $(warning $(and ,))
-          $(warning Otherwise, disable Python support entirely:)
-          $(warning $(and ,))
-          $(warning $(and ,)  make NO_LIBPYTHON=1)
-          $(warning $(and ,))
-          $(error   $(and ,))
-        else
-          LDFLAGS += $(PYTHON_EMBED_LDFLAGS)
-          EXTLIBS += $(PYTHON_EMBED_LIBADD)
-          LANG_BINDINGS += $(obj-perf)python/perf.so
-          $(call detected,CONFIG_LIBPYTHON)
-        endif
+         LDFLAGS += $(PYTHON_EMBED_LDFLAGS)
+         EXTLIBS += $(PYTHON_EMBED_LIBADD)
+         LANG_BINDINGS += $(obj-perf)python/perf.so
+         $(call detected,CONFIG_LIBPYTHON)
       endif
     endif
   endif
index 0123280..f7517e1 100644 (file)
@@ -296,7 +296,7 @@ PYTHON_EXTBUILD_LIB := $(PYTHON_EXTBUILD)lib/
 PYTHON_EXTBUILD_TMP := $(PYTHON_EXTBUILD)tmp/
 export PYTHON_EXTBUILD_LIB PYTHON_EXTBUILD_TMP
 
-python-clean := $(call QUIET_CLEAN, python) $(RM) -r $(PYTHON_EXTBUILD) $(OUTPUT)python/perf.so
+python-clean := $(call QUIET_CLEAN, python) $(RM) -r $(PYTHON_EXTBUILD) $(OUTPUT)python/perf*.so
 
 PYTHON_EXT_SRCS := $(shell grep -v ^\# util/python-ext-sources)
 PYTHON_EXT_DEPS := util/python-ext-sources util/setup.py $(LIBTRACEEVENT) $(LIBAPI)
@@ -473,7 +473,7 @@ $(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(LIBTRACEEVENT_D
          $(PYTHON_WORD) util/setup.py \
          --quiet build_ext; \
        mkdir -p $(OUTPUT)python && \
-       cp $(PYTHON_EXTBUILD_LIB)perf.so $(OUTPUT)python/
+       cp $(PYTHON_EXTBUILD_LIB)perf*.so $(OUTPUT)python/
 
 please_set_SHELL_PATH_to_a_more_modern_shell:
        $(Q)$$(:)
@@ -708,15 +708,15 @@ TAG_FILES= ../../include/uapi/linux/perf_event.h
 
 TAGS:
        $(QUIET_GEN)$(RM) TAGS; \
-       $(FIND) $(TAG_FOLDERS) -name '*.[hcS]' -print | xargs etags -a $(TAG_FILES)
+       $(FIND) $(TAG_FOLDERS) -name '*.[hcS]' -print -o -name '*.cpp' -print | xargs etags -a $(TAG_FILES)
 
 tags:
        $(QUIET_GEN)$(RM) tags; \
-       $(FIND) $(TAG_FOLDERS) -name '*.[hcS]' -print | xargs ctags -a $(TAG_FILES)
+       $(FIND) $(TAG_FOLDERS) -name '*.[hcS]' -print -o -name '*.cpp' -print | xargs ctags -a $(TAG_FILES)
 
 cscope:
        $(QUIET_GEN)$(RM) cscope*; \
-       $(FIND) $(TAG_FOLDERS) -name '*.[hcS]' -print | xargs cscope -b $(TAG_FILES)
+       $(FIND) $(TAG_FOLDERS) -name '*.[hcS]' -print -o -name '*.cpp' -print | xargs cscope -b $(TAG_FILES)
 
 ### Testing rules
 
index 2323581..fa639e3 100644 (file)
@@ -68,7 +68,7 @@ struct auxtrace_record
        bool found_spe = false;
        static struct perf_pmu **arm_spe_pmus = NULL;
        static int nr_spes = 0;
-       int i;
+       int i = 0;
 
        if (!evlist)
                return NULL;
index fbfc055..5c655ad 100644 (file)
@@ -298,12 +298,17 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
 {
        int i;
        int etmv3 = 0, etmv4 = 0;
-       const struct cpu_map *cpus = evlist->cpus;
+       struct cpu_map *event_cpus = evlist->cpus;
+       struct cpu_map *online_cpus = cpu_map__new(NULL);
 
        /* cpu map is not empty, we have specific CPUs to work with */
-       if (!cpu_map__empty(cpus)) {
-               for (i = 0; i < cpu_map__nr(cpus); i++) {
-                       if (cs_etm_is_etmv4(itr, cpus->map[i]))
+       if (!cpu_map__empty(event_cpus)) {
+               for (i = 0; i < cpu__max_cpu(); i++) {
+                       if (!cpu_map__has(event_cpus, i) ||
+                           !cpu_map__has(online_cpus, i))
+                               continue;
+
+                       if (cs_etm_is_etmv4(itr, i))
                                etmv4++;
                        else
                                etmv3++;
@@ -311,6 +316,9 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
        } else {
                /* get configuration for all CPUs in the system */
                for (i = 0; i < cpu__max_cpu(); i++) {
+                       if (!cpu_map__has(online_cpus, i))
+                               continue;
+
                        if (cs_etm_is_etmv4(itr, i))
                                etmv4++;
                        else
@@ -318,6 +326,8 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
                }
        }
 
+       cpu_map__put(online_cpus);
+
        return (CS_ETM_HEADER_SIZE +
               (etmv4 * CS_ETMV4_PRIV_SIZE) +
               (etmv3 * CS_ETMV3_PRIV_SIZE));
@@ -447,7 +457,9 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
        int i;
        u32 offset;
        u64 nr_cpu, type;
-       const struct cpu_map *cpus = session->evlist->cpus;
+       struct cpu_map *cpu_map;
+       struct cpu_map *event_cpus = session->evlist->cpus;
+       struct cpu_map *online_cpus = cpu_map__new(NULL);
        struct cs_etm_recording *ptr =
                        container_of(itr, struct cs_etm_recording, itr);
        struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
@@ -458,8 +470,21 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
        if (!session->evlist->nr_mmaps)
                return -EINVAL;
 
-       /* If the cpu_map is empty all CPUs are involved */
-       nr_cpu = cpu_map__empty(cpus) ? cpu__max_cpu() : cpu_map__nr(cpus);
+       /* If the cpu_map is empty all online CPUs are involved */
+       if (cpu_map__empty(event_cpus)) {
+               cpu_map = online_cpus;
+       } else {
+               /* Make sure all specified CPUs are online */
+               for (i = 0; i < cpu_map__nr(event_cpus); i++) {
+                       if (cpu_map__has(event_cpus, i) &&
+                           !cpu_map__has(online_cpus, i))
+                               return -EINVAL;
+               }
+
+               cpu_map = event_cpus;
+       }
+
+       nr_cpu = cpu_map__nr(cpu_map);
        /* Get PMU type as dynamically assigned by the core */
        type = cs_etm_pmu->type;
 
@@ -472,15 +497,11 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
 
        offset = CS_ETM_SNAPSHOT + 1;
 
-       /* cpu map is not empty, we have specific CPUs to work with */
-       if (!cpu_map__empty(cpus)) {
-               for (i = 0; i < cpu_map__nr(cpus) && offset < priv_size; i++)
-                       cs_etm_get_metadata(cpus->map[i], &offset, itr, info);
-       } else {
-               /* get configuration for all CPUs in the system */
-               for (i = 0; i < cpu__max_cpu(); i++)
+       for (i = 0; i < cpu__max_cpu() && offset < priv_size; i++)
+               if (cpu_map__has(cpu_map, i))
                        cs_etm_get_metadata(i, &offset, itr, info);
-       }
+
+       cpu_map__put(online_cpus);
 
        return 0;
 }
diff --git a/tools/perf/arch/arm64/include/arch-tests.h b/tools/perf/arch/arm64/include/arch-tests.h
new file mode 100644 (file)
index 0000000..90ec4c8
--- /dev/null
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ARCH_TESTS_H
+#define ARCH_TESTS_H
+
+#ifdef HAVE_DWARF_UNWIND_SUPPORT
+struct thread;
+struct perf_sample;
+#endif
+
+extern struct test arch_tests[];
+
+#endif
index b30eff9..883c57f 100644 (file)
@@ -1,2 +1,4 @@
 libperf-y += regs_load.o
 libperf-y += dwarf-unwind.o
+
+libperf-y += arch-tests.o
diff --git a/tools/perf/arch/arm64/tests/arch-tests.c b/tools/perf/arch/arm64/tests/arch-tests.c
new file mode 100644 (file)
index 0000000..5b1543c
--- /dev/null
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <string.h>
+#include "tests/tests.h"
+#include "arch-tests.h"
+
+struct test arch_tests[] = {
+#ifdef HAVE_DWARF_UNWIND_SUPPORT
+       {
+               .desc = "DWARF unwind",
+               .func = test__dwarf_unwind,
+       },
+#endif
+       {
+               .func = NULL,
+       },
+};
index c0b8dfe..68f8a8e 100644 (file)
@@ -2,6 +2,7 @@ libperf-y += header.o
 libperf-y += sym-handling.o
 libperf-$(CONFIG_DWARF)     += dwarf-regs.o
 libperf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
+libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
 
 libperf-$(CONFIG_AUXTRACE) += ../../arm/util/pmu.o \
                              ../../arm/util/auxtrace.o \
diff --git a/tools/perf/arch/arm64/util/unwind-libdw.c b/tools/perf/arch/arm64/util/unwind-libdw.c
new file mode 100644 (file)
index 0000000..7623d85
--- /dev/null
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <elfutils/libdwfl.h>
+#include "../../util/unwind-libdw.h"
+#include "../../util/perf_regs.h"
+#include "../../util/event.h"
+
+bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg)
+{
+       struct unwind_info *ui = arg;
+       struct regs_dump *user_regs = &ui->sample->user_regs;
+       Dwarf_Word dwarf_regs[PERF_REG_ARM64_MAX], dwarf_pc;
+
+#define REG(r) ({                                              \
+       Dwarf_Word val = 0;                                     \
+       perf_reg_value(&val, user_regs, PERF_REG_ARM64_##r);    \
+       val;                                                    \
+})
+
+       dwarf_regs[0]  = REG(X0);
+       dwarf_regs[1]  = REG(X1);
+       dwarf_regs[2]  = REG(X2);
+       dwarf_regs[3]  = REG(X3);
+       dwarf_regs[4]  = REG(X4);
+       dwarf_regs[5]  = REG(X5);
+       dwarf_regs[6]  = REG(X6);
+       dwarf_regs[7]  = REG(X7);
+       dwarf_regs[8]  = REG(X8);
+       dwarf_regs[9]  = REG(X9);
+       dwarf_regs[10] = REG(X10);
+       dwarf_regs[11] = REG(X11);
+       dwarf_regs[12] = REG(X12);
+       dwarf_regs[13] = REG(X13);
+       dwarf_regs[14] = REG(X14);
+       dwarf_regs[15] = REG(X15);
+       dwarf_regs[16] = REG(X16);
+       dwarf_regs[17] = REG(X17);
+       dwarf_regs[18] = REG(X18);
+       dwarf_regs[19] = REG(X19);
+       dwarf_regs[20] = REG(X20);
+       dwarf_regs[21] = REG(X21);
+       dwarf_regs[22] = REG(X22);
+       dwarf_regs[23] = REG(X23);
+       dwarf_regs[24] = REG(X24);
+       dwarf_regs[25] = REG(X25);
+       dwarf_regs[26] = REG(X26);
+       dwarf_regs[27] = REG(X27);
+       dwarf_regs[28] = REG(X28);
+       dwarf_regs[29] = REG(X29);
+       dwarf_regs[30] = REG(LR);
+       dwarf_regs[31] = REG(SP);
+
+       if (!dwfl_thread_state_registers(thread, 0, PERF_REG_ARM64_MAX,
+                                        dwarf_regs))
+               return false;
+
+       dwarf_pc = REG(PC);
+       dwfl_thread_state_register_pc(thread, dwarf_pc);
+
+       return true;
+}
index 42dab7c..a111239 100644 (file)
@@ -6,3 +6,28 @@ endif
 HAVE_KVM_STAT_SUPPORT := 1
 PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
 PERF_HAVE_JITDUMP := 1
+
+#
+# Syscall table generation for perf
+#
+
+out    := $(OUTPUT)arch/powerpc/include/generated/asm
+header32 := $(out)/syscalls_32.c
+header64 := $(out)/syscalls_64.c
+sysdef := $(srctree)/tools/arch/powerpc/include/uapi/asm/unistd.h
+sysprf := $(srctree)/tools/perf/arch/powerpc/entry/syscalls/
+systbl := $(sysprf)/mksyscalltbl
+
+# Create output directory if not already present
+_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
+
+$(header64): $(sysdef) $(systbl)
+       $(Q)$(SHELL) '$(systbl)' '64' '$(CC)' $(sysdef) > $@
+
+$(header32): $(sysdef) $(systbl)
+       $(Q)$(SHELL) '$(systbl)' '32' '$(CC)' $(sysdef) > $@
+
+clean::
+       $(call QUIET_CLEAN, powerpc) $(RM) $(header32) $(header64)
+
+archheaders: $(header32) $(header64)
diff --git a/tools/perf/arch/powerpc/entry/syscalls/mksyscalltbl b/tools/perf/arch/powerpc/entry/syscalls/mksyscalltbl
new file mode 100755 (executable)
index 0000000..ef52e1d
--- /dev/null
@@ -0,0 +1,37 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Generate system call table for perf. Derived from
+# s390 script.
+#
+# Copyright IBM Corp. 2017
+# Author(s):  Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+# Changed by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
+
+wordsize=$1
+gcc=$2
+input=$3
+
+if ! test -r $input; then
+       echo "Could not read input file" >&2
+       exit 1
+fi
+
+create_table()
+{
+       local wordsize=$1
+       local max_nr
+
+       echo "static const char *syscalltbl_powerpc_${wordsize}[] = {"
+       while read sc nr; do
+               printf '\t[%d] = "%s",\n' $nr $sc
+               max_nr=$nr
+       done
+       echo '};'
+       echo "#define SYSCALLTBL_POWERPC_${wordsize}_MAX_ID $max_nr"
+}
+
+$gcc -m${wordsize} -E -dM -x c  $input        \
+       |sed -ne 's/^#define __NR_//p' \
+       |sort -t' ' -k2 -nu            \
+       |create_table ${wordsize}
index 8c72b44..46c2183 100644 (file)
@@ -1,6 +1,112 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/compiler.h>
 
+static int s390_call__parse(struct arch *arch, struct ins_operands *ops,
+                           struct map *map)
+{
+       char *endptr, *tok, *name;
+       struct addr_map_symbol target = {
+               .map = map,
+       };
+
+       tok = strchr(ops->raw, ',');
+       if (!tok)
+               return -1;
+
+       ops->target.addr = strtoull(tok + 1, &endptr, 16);
+
+       name = strchr(endptr, '<');
+       if (name == NULL)
+               return -1;
+
+       name++;
+
+       if (arch->objdump.skip_functions_char &&
+           strchr(name, arch->objdump.skip_functions_char))
+               return -1;
+
+       tok = strchr(name, '>');
+       if (tok == NULL)
+               return -1;
+
+       *tok = '\0';
+       ops->target.name = strdup(name);
+       *tok = '>';
+
+       if (ops->target.name == NULL)
+               return -1;
+       target.addr = map__objdump_2mem(map, ops->target.addr);
+
+       if (map_groups__find_ams(&target) == 0 &&
+           map__rip_2objdump(target.map, map->map_ip(target.map, target.addr)) == ops->target.addr)
+               ops->target.sym = target.sym;
+
+       return 0;
+}
+
+static int call__scnprintf(struct ins *ins, char *bf, size_t size,
+                          struct ins_operands *ops);
+
+static struct ins_ops s390_call_ops = {
+       .parse     = s390_call__parse,
+       .scnprintf = call__scnprintf,
+};
+
+static int s390_mov__parse(struct arch *arch __maybe_unused,
+                          struct ins_operands *ops,
+                          struct map *map __maybe_unused)
+{
+       char *s = strchr(ops->raw, ','), *target, *endptr;
+
+       if (s == NULL)
+               return -1;
+
+       *s = '\0';
+       ops->source.raw = strdup(ops->raw);
+       *s = ',';
+
+       if (ops->source.raw == NULL)
+               return -1;
+
+       target = ++s;
+       ops->target.raw = strdup(target);
+       if (ops->target.raw == NULL)
+               goto out_free_source;
+
+       ops->target.addr = strtoull(target, &endptr, 16);
+       if (endptr == target)
+               goto out_free_target;
+
+       s = strchr(endptr, '<');
+       if (s == NULL)
+               goto out_free_target;
+       endptr = strchr(s + 1, '>');
+       if (endptr == NULL)
+               goto out_free_target;
+
+       *endptr = '\0';
+       ops->target.name = strdup(s + 1);
+       *endptr = '>';
+       if (ops->target.name == NULL)
+               goto out_free_target;
+
+       return 0;
+
+out_free_target:
+       zfree(&ops->target.raw);
+out_free_source:
+       zfree(&ops->source.raw);
+       return -1;
+}
+
+static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
+                         struct ins_operands *ops);
+
+static struct ins_ops s390_mov_ops = {
+       .parse     = s390_mov__parse,
+       .scnprintf = mov__scnprintf,
+};
+
 static struct ins_ops *s390__associate_ins_ops(struct arch *arch, const char *name)
 {
        struct ins_ops *ops = NULL;
@@ -14,21 +120,54 @@ static struct ins_ops *s390__associate_ins_ops(struct arch *arch, const char *na
        if (!strcmp(name, "bras") ||
            !strcmp(name, "brasl") ||
            !strcmp(name, "basr"))
-               ops = &call_ops;
+               ops = &s390_call_ops;
        if (!strcmp(name, "br"))
                ops = &ret_ops;
+       /* override load/store relative to PC */
+       if (!strcmp(name, "lrl") ||
+           !strcmp(name, "lgrl") ||
+           !strcmp(name, "lgfrl") ||
+           !strcmp(name, "llgfrl") ||
+           !strcmp(name, "strl") ||
+           !strcmp(name, "stgrl"))
+               ops = &s390_mov_ops;
 
        if (ops)
                arch__associate_ins_ops(arch, name, ops);
        return ops;
 }
 
+static int s390__cpuid_parse(struct arch *arch, char *cpuid)
+{
+       unsigned int family;
+       char model[16], model_c[16], cpumf_v[16], cpumf_a[16];
+       int ret;
+
+       /*
+        * cpuid string format:
+        * "IBM,family,model-capacity,model[,cpum_cf-version,cpum_cf-authorization]"
+        */
+       ret = sscanf(cpuid, "%*[^,],%u,%[^,],%[^,],%[^,],%s", &family, model_c,
+                    model, cpumf_v, cpumf_a);
+       if (ret >= 2) {
+               arch->family = family;
+               arch->model = 0;
+               return 0;
+       }
+
+       return -1;
+}
+
 static int s390__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
 {
+       int err = 0;
+
        if (!arch->initialized) {
                arch->initialized = true;
                arch->associate_instruction_ops = s390__associate_ins_ops;
+               if (cpuid)
+                       err = s390__cpuid_parse(arch, cpuid);
        }
 
-       return 0;
+       return err;
 }
index 9fa6c3e..a4c30f1 100644 (file)
@@ -1,8 +1,9 @@
 /*
  * Implementation of get_cpuid().
  *
- * Copyright 2014 IBM Corp.
+ * Copyright IBM Corp. 2014, 2018
  * Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com>
+ *           Thomas Richter <tmricht@linux.vnet.ibm.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License (version 2 only)
 #include <unistd.h>
 #include <stdio.h>
 #include <string.h>
+#include <ctype.h>
 
 #include "../../util/header.h"
+#include "../../util/util.h"
+
+#define SYSINFO_MANU   "Manufacturer:"
+#define SYSINFO_TYPE   "Type:"
+#define SYSINFO_MODEL  "Model:"
+#define SRVLVL_CPUMF   "CPU-MF:"
+#define SRVLVL_VERSION "version="
+#define SRVLVL_AUTHORIZATION   "authorization="
+#define SYSINFO                "/proc/sysinfo"
+#define SRVLVL         "/proc/service_levels"
 
 int get_cpuid(char *buffer, size_t sz)
 {
-       const char *cpuid = "IBM/S390";
+       char *cp, *line = NULL, *line2;
+       char type[8], model[33], version[8], manufacturer[32], authorization[8];
+       int tpsize = 0, mdsize = 0, vssize = 0, mfsize = 0, atsize = 0;
+       int read;
+       unsigned long line_sz;
+       size_t nbytes;
+       FILE *sysinfo;
+
+       /*
+        * Scan /proc/sysinfo line by line and read out values for
+        * Manufacturer:, Type: and Model:, for example:
+        * Manufacturer:    IBM
+        * Type:            2964
+        * Model:           702              N96
+        * The first word is the Model Capacity and the second word is
+        * Model (can be omitted). Both words have a maximum size of 16
+        * bytes.
+        */
+       memset(manufacturer, 0, sizeof(manufacturer));
+       memset(type, 0, sizeof(type));
+       memset(model, 0, sizeof(model));
+       memset(version, 0, sizeof(version));
+       memset(authorization, 0, sizeof(authorization));
+
+       sysinfo = fopen(SYSINFO, "r");
+       if (sysinfo == NULL)
+               return -1;
+
+       while ((read = getline(&line, &line_sz, sysinfo)) != -1) {
+               if (!strncmp(line, SYSINFO_MANU, strlen(SYSINFO_MANU))) {
+                       line2 = line + strlen(SYSINFO_MANU);
+
+                       while ((cp = strtok_r(line2, "\n ", &line2))) {
+                               mfsize += scnprintf(manufacturer + mfsize,
+                                                   sizeof(manufacturer) - mfsize, "%s", cp);
+                       }
+               }
+
+               if (!strncmp(line, SYSINFO_TYPE, strlen(SYSINFO_TYPE))) {
+                       line2 = line + strlen(SYSINFO_TYPE);
 
-       if (strlen(cpuid) + 1 > sz)
+                       while ((cp = strtok_r(line2, "\n ", &line2))) {
+                               tpsize += scnprintf(type + tpsize,
+                                                   sizeof(type) - tpsize, "%s", cp);
+                       }
+               }
+
+               if (!strncmp(line, SYSINFO_MODEL, strlen(SYSINFO_MODEL))) {
+                       line2 = line + strlen(SYSINFO_MODEL);
+
+                       while ((cp = strtok_r(line2, "\n ", &line2))) {
+                               mdsize += scnprintf(model + mdsize, sizeof(model) - mdsize,
+                                                   "%s%s", model[0] ? "," : "", cp);
+                       }
+                       break;
+               }
+       }
+       fclose(sysinfo);
+
+       /* Missing manufacturer, type or model information should not happen */
+       if (!manufacturer[0] || !type[0] || !model[0])
                return -1;
 
-       strcpy(buffer, cpuid);
-       return 0;
+       /*
+        * Scan /proc/service_levels and return the CPU-MF counter facility
+        * version number and authorization level.
+        * Optional, does not exist on z/VM guests.
+        */
+       sysinfo = fopen(SRVLVL, "r");
+       if (sysinfo == NULL)
+               goto skip_sysinfo;
+       while ((read = getline(&line, &line_sz, sysinfo)) != -1) {
+               if (strncmp(line, SRVLVL_CPUMF, strlen(SRVLVL_CPUMF)))
+                       continue;
+
+               line2 = line + strlen(SRVLVL_CPUMF);
+               while ((cp = strtok_r(line2, "\n ", &line2))) {
+                       if (!strncmp(cp, SRVLVL_VERSION,
+                                    strlen(SRVLVL_VERSION))) {
+                               char *sep = strchr(cp, '=');
+
+                               vssize += scnprintf(version + vssize,
+                                                   sizeof(version) - vssize, "%s", sep + 1);
+                       }
+                       if (!strncmp(cp, SRVLVL_AUTHORIZATION,
+                                    strlen(SRVLVL_AUTHORIZATION))) {
+                               char *sep = strchr(cp, '=');
+
+                               atsize += scnprintf(authorization + atsize,
+                                                   sizeof(authorization) - atsize, "%s", sep + 1);
+                       }
+               }
+       }
+       fclose(sysinfo);
+
+skip_sysinfo:
+       free(line);
+
+       if (version[0] && authorization[0] )
+               nbytes = snprintf(buffer, sz, "%s,%s,%s,%s,%s",
+                                 manufacturer, type, model, version,
+                                 authorization);
+       else
+               nbytes = snprintf(buffer, sz, "%s,%s,%s", manufacturer, type,
+                                 model);
+       return (nbytes >= sz) ? -1 : 0;
+}
+
+char *get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
+{
+       char *buf = malloc(128);
+
+       if (buf && get_cpuid(buf, 128) < 0)
+               zfree(&buf);
+       return buf;
+}
+
+/*
+ * Compare the cpuid string returned by get_cpuid() function
+ * with the name generated by the jevents file read from
+ * pmu-events/arch/s390/mapfile.csv.
+ *
+ * Parameter mapcpuid is the cpuid as stored in the
+ * pmu-events/arch/s390/mapfile.csv. This is just the type number.
+ * Parameter cpuid is the cpuid returned by function get_cpuid().
+ */
+int strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
+{
+       char *cp = strchr(cpuid, ',');
+
+       if (cp == NULL)
+               return -1;
+       return strncmp(cp + 1, mapcpuid, strlen(mapcpuid));
 }
index 06abe81..7a77216 100644 (file)
@@ -60,6 +60,7 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
        union perf_event *event;
        u64 test_tsc, comm1_tsc, comm2_tsc;
        u64 test_time, comm1_time = 0, comm2_time = 0;
+       struct perf_mmap *md;
 
        threads = thread_map__new(-1, getpid(), UINT_MAX);
        CHECK_NOT_NULL__(threads);
@@ -109,7 +110,11 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
        perf_evlist__disable(evlist);
 
        for (i = 0; i < evlist->nr_mmaps; i++) {
-               while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
+               md = &evlist->mmap[i];
+               if (perf_mmap__read_init(md) < 0)
+                       continue;
+
+               while ((event = perf_mmap__read_event(md)) != NULL) {
                        struct perf_sample sample;
 
                        if (event->header.type != PERF_RECORD_COMM ||
@@ -128,8 +133,9 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
                                comm2_time = sample.time;
                        }
 next_event:
-                       perf_evlist__mmap_consume(evlist, i);
+                       perf_mmap__consume(md);
                }
+               perf_mmap__read_done(md);
        }
 
        if (!comm1_time || !comm2_time)
index 6aa3f2a..b135af6 100644 (file)
@@ -37,15 +37,11 @@ struct auxtrace_record *auxtrace_record__init_intel(struct perf_evlist *evlist,
        intel_pt_pmu = perf_pmu__find(INTEL_PT_PMU_NAME);
        intel_bts_pmu = perf_pmu__find(INTEL_BTS_PMU_NAME);
 
-       if (evlist) {
-               evlist__for_each_entry(evlist, evsel) {
-                       if (intel_pt_pmu &&
-                           evsel->attr.type == intel_pt_pmu->type)
-                               found_pt = true;
-                       if (intel_bts_pmu &&
-                           evsel->attr.type == intel_bts_pmu->type)
-                               found_bts = true;
-               }
+       evlist__for_each_entry(evlist, evsel) {
+               if (intel_pt_pmu && evsel->attr.type == intel_pt_pmu->type)
+                       found_pt = true;
+               if (intel_bts_pmu && evsel->attr.type == intel_bts_pmu->type)
+                       found_bts = true;
        }
 
        if (found_pt && found_bts) {
index f15731a..ead6ae4 100644 (file)
@@ -44,6 +44,7 @@ struct perf_annotate {
        bool       full_paths;
        bool       print_line;
        bool       skip_missing;
+       bool       has_br_stack;
        const char *sym_hist_filter;
        const char *cpu_list;
        DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
@@ -146,16 +147,73 @@ static void process_branch_stack(struct branch_stack *bs, struct addr_location *
        free(bi);
 }
 
+static int hist_iter__branch_callback(struct hist_entry_iter *iter,
+                                     struct addr_location *al __maybe_unused,
+                                     bool single __maybe_unused,
+                                     void *arg __maybe_unused)
+{
+       struct hist_entry *he = iter->he;
+       struct branch_info *bi;
+       struct perf_sample *sample = iter->sample;
+       struct perf_evsel *evsel = iter->evsel;
+       int err;
+
+       hist__account_cycles(sample->branch_stack, al, sample, false);
+
+       bi = he->branch_info;
+       err = addr_map_symbol__inc_samples(&bi->from, sample, evsel->idx);
+
+       if (err)
+               goto out;
+
+       err = addr_map_symbol__inc_samples(&bi->to, sample, evsel->idx);
+
+out:
+       return err;
+}
+
+static int process_branch_callback(struct perf_evsel *evsel,
+                                  struct perf_sample *sample,
+                                  struct addr_location *al __maybe_unused,
+                                  struct perf_annotate *ann,
+                                  struct machine *machine)
+{
+       struct hist_entry_iter iter = {
+               .evsel          = evsel,
+               .sample         = sample,
+               .add_entry_cb   = hist_iter__branch_callback,
+               .hide_unresolved        = symbol_conf.hide_unresolved,
+               .ops            = &hist_iter_branch,
+       };
+
+       struct addr_location a;
+       int ret;
+
+       if (machine__resolve(machine, &a, sample) < 0)
+               return -1;
+
+       if (a.sym == NULL)
+               return 0;
+
+       if (a.map != NULL)
+               a.map->dso->hit = 1;
+
+       ret = hist_entry_iter__add(&iter, &a, PERF_MAX_STACK_DEPTH, ann);
+       return ret;
+}
+
 static int perf_evsel__add_sample(struct perf_evsel *evsel,
                                  struct perf_sample *sample,
                                  struct addr_location *al,
-                                 struct perf_annotate *ann)
+                                 struct perf_annotate *ann,
+                                 struct machine *machine)
 {
        struct hists *hists = evsel__hists(evsel);
        struct hist_entry *he;
        int ret;
 
-       if (ann->sym_hist_filter != NULL &&
+       if ((!ann->has_br_stack || !ui__has_annotation()) &&
+           ann->sym_hist_filter != NULL &&
            (al->sym == NULL ||
             strcmp(ann->sym_hist_filter, al->sym->name) != 0)) {
                /* We're only interested in a symbol named sym_hist_filter */
@@ -178,6 +236,9 @@ static int perf_evsel__add_sample(struct perf_evsel *evsel,
         */
        process_branch_stack(sample->branch_stack, al, sample);
 
+       if (ann->has_br_stack && ui__has_annotation())
+               return process_branch_callback(evsel, sample, al, ann, machine);
+
        he = hists__add_entry(hists, al, NULL, NULL, NULL, sample, true);
        if (he == NULL)
                return -ENOMEM;
@@ -206,7 +267,8 @@ static int process_sample_event(struct perf_tool *tool,
        if (ann->cpu_list && !test_bit(sample->cpu, ann->cpu_bitmap))
                goto out_put;
 
-       if (!al.filtered && perf_evsel__add_sample(evsel, sample, &al, ann)) {
+       if (!al.filtered &&
+           perf_evsel__add_sample(evsel, sample, &al, ann, machine)) {
                pr_warning("problem incrementing symbol count, "
                           "skipping event\n");
                ret = -1;
@@ -238,6 +300,10 @@ static void hists__find_annotations(struct hists *hists,
                if (he->ms.sym == NULL || he->ms.map->dso->annotate_warned)
                        goto find_next;
 
+               if (ann->sym_hist_filter &&
+                   (strcmp(he->ms.sym->name, ann->sym_hist_filter) != 0))
+                       goto find_next;
+
                notes = symbol__annotation(he->ms.sym);
                if (notes->src == NULL) {
 find_next:
@@ -269,6 +335,7 @@ find_next:
                        nd = rb_next(nd);
                } else if (use_browser == 1) {
                        key = hist_entry__tui_annotate(he, evsel, NULL);
+
                        switch (key) {
                        case -1:
                                if (!ann->skip_missing)
@@ -489,6 +556,9 @@ int cmd_annotate(int argc, const char **argv)
        if (annotate.session == NULL)
                return -1;
 
+       annotate.has_br_stack = perf_header__has_feat(&annotate.session->header,
+                                                     HEADER_BRANCH_STACK);
+
        ret = symbol__annotation_init();
        if (ret < 0)
                goto out_delete;
@@ -499,9 +569,6 @@ int cmd_annotate(int argc, const char **argv)
        if (ret < 0)
                goto out_delete;
 
-       if (setup_sorting(NULL) < 0)
-               usage_with_options(annotate_usage, options);
-
        if (annotate.use_stdio)
                use_browser = 0;
        else if (annotate.use_tui)
@@ -511,6 +578,15 @@ int cmd_annotate(int argc, const char **argv)
 
        setup_browser(true);
 
+       if (use_browser == 1 && annotate.has_br_stack) {
+               sort__mode = SORT_MODE__BRANCH;
+               if (setup_sorting(annotate.session->evlist) < 0)
+                       usage_with_options(annotate_usage, options);
+       } else {
+               if (setup_sorting(NULL) < 0)
+                       usage_with_options(annotate_usage, options);
+       }
+
        ret = __cmd_annotate(&annotate);
 
 out_delete:
index 539c3d4..2126bfb 100644 (file)
@@ -32,6 +32,7 @@
 #include "evsel.h"
 #include "ui/browsers/hists.h"
 #include "thread.h"
+#include "mem2node.h"
 
 struct c2c_hists {
        struct hists            hists;
@@ -49,6 +50,7 @@ struct c2c_hist_entry {
        struct c2c_hists        *hists;
        struct c2c_stats         stats;
        unsigned long           *cpuset;
+       unsigned long           *nodeset;
        struct c2c_stats        *node_stats;
        unsigned int             cacheline_idx;
 
@@ -59,6 +61,11 @@ struct c2c_hist_entry {
         * because of its callchain dynamic entry
         */
        struct hist_entry       he;
+
+       unsigned long            paddr;
+       unsigned long            paddr_cnt;
+       bool                     paddr_zero;
+       char                    *nodestr;
 };
 
 static char const *coalesce_default = "pid,iaddr";
@@ -66,6 +73,7 @@ static char const *coalesce_default = "pid,iaddr";
 struct perf_c2c {
        struct perf_tool        tool;
        struct c2c_hists        hists;
+       struct mem2node         mem2node;
 
        unsigned long           **nodes;
        int                      nodes_cnt;
@@ -123,6 +131,10 @@ static void *c2c_he_zalloc(size_t size)
        if (!c2c_he->cpuset)
                return NULL;
 
+       c2c_he->nodeset = bitmap_alloc(c2c.nodes_cnt);
+       if (!c2c_he->nodeset)
+               return NULL;
+
        c2c_he->node_stats = zalloc(c2c.nodes_cnt * sizeof(*c2c_he->node_stats));
        if (!c2c_he->node_stats)
                return NULL;
@@ -145,6 +157,8 @@ static void c2c_he_free(void *he)
        }
 
        free(c2c_he->cpuset);
+       free(c2c_he->nodeset);
+       free(c2c_he->nodestr);
        free(c2c_he->node_stats);
        free(c2c_he);
 }
@@ -194,6 +208,28 @@ static void c2c_he__set_cpu(struct c2c_hist_entry *c2c_he,
        set_bit(sample->cpu, c2c_he->cpuset);
 }
 
+static void c2c_he__set_node(struct c2c_hist_entry *c2c_he,
+                            struct perf_sample *sample)
+{
+       int node;
+
+       if (!sample->phys_addr) {
+               c2c_he->paddr_zero = true;
+               return;
+       }
+
+       node = mem2node__node(&c2c.mem2node, sample->phys_addr);
+       if (WARN_ONCE(node < 0, "WARNING: failed to find node\n"))
+               return;
+
+       set_bit(node, c2c_he->nodeset);
+
+       if (c2c_he->paddr != sample->phys_addr) {
+               c2c_he->paddr_cnt++;
+               c2c_he->paddr = sample->phys_addr;
+       }
+}
+
 static void compute_stats(struct c2c_hist_entry *c2c_he,
                          struct c2c_stats *stats,
                          u64 weight)
@@ -237,9 +273,12 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
        if (mi == NULL)
                return -ENOMEM;
 
-       mi_dup = memdup(mi, sizeof(*mi));
-       if (!mi_dup)
-               goto free_mi;
+       /*
+        * The mi object is released in hists__add_entry_ops,
+        * if it gets sorted out into existing data, so we need
+        * to take the copy now.
+        */
+       mi_dup = mem_info__get(mi);
 
        c2c_decode_stats(&stats, mi);
 
@@ -247,13 +286,14 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
                                  &al, NULL, NULL, mi,
                                  sample, true);
        if (he == NULL)
-               goto free_mi_dup;
+               goto free_mi;
 
        c2c_he = container_of(he, struct c2c_hist_entry, he);
        c2c_add_stats(&c2c_he->stats, &stats);
        c2c_add_stats(&c2c_hists->stats, &stats);
 
        c2c_he__set_cpu(c2c_he, sample);
+       c2c_he__set_node(c2c_he, sample);
 
        hists__inc_nr_samples(&c2c_hists->hists, he->filtered);
        ret = hist_entry__append_callchain(he, sample);
@@ -272,19 +312,15 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
 
                mi = mi_dup;
 
-               mi_dup = memdup(mi, sizeof(*mi));
-               if (!mi_dup)
-                       goto free_mi;
-
                c2c_hists = he__get_c2c_hists(he, c2c.cl_sort, 2);
                if (!c2c_hists)
-                       goto free_mi_dup;
+                       goto free_mi;
 
                he = hists__add_entry_ops(&c2c_hists->hists, &c2c_entry_ops,
                                          &al, NULL, NULL, mi,
                                          sample, true);
                if (he == NULL)
-                       goto free_mi_dup;
+                       goto free_mi;
 
                c2c_he = container_of(he, struct c2c_hist_entry, he);
                c2c_add_stats(&c2c_he->stats, &stats);
@@ -294,6 +330,7 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
                compute_stats(c2c_he, &stats, sample->weight);
 
                c2c_he__set_cpu(c2c_he, sample);
+               c2c_he__set_node(c2c_he, sample);
 
                hists__inc_nr_samples(&c2c_hists->hists, he->filtered);
                ret = hist_entry__append_callchain(he, sample);
@@ -303,10 +340,9 @@ out:
        addr_location__put(&al);
        return ret;
 
-free_mi_dup:
-       free(mi_dup);
 free_mi:
-       free(mi);
+       mem_info__put(mi_dup);
+       mem_info__put(mi);
        ret = -ENOMEM;
        goto out;
 }
@@ -457,6 +493,31 @@ static int dcacheline_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
        return scnprintf(hpp->buf, hpp->size, "%*s", width, HEX_STR(buf, addr));
 }
 
+static int
+dcacheline_node_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+                     struct hist_entry *he)
+{
+       struct c2c_hist_entry *c2c_he;
+       int width = c2c_width(fmt, hpp, he->hists);
+
+       c2c_he = container_of(he, struct c2c_hist_entry, he);
+       if (WARN_ON_ONCE(!c2c_he->nodestr))
+               return 0;
+
+       return scnprintf(hpp->buf, hpp->size, "%*s", width, c2c_he->nodestr);
+}
+
+static int
+dcacheline_node_count(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+                     struct hist_entry *he)
+{
+       struct c2c_hist_entry *c2c_he;
+       int width = c2c_width(fmt, hpp, he->hists);
+
+       c2c_he = container_of(he, struct c2c_hist_entry, he);
+       return scnprintf(hpp->buf, hpp->size, "%*lu", width, c2c_he->paddr_cnt);
+}
+
 static int offset_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
                        struct hist_entry *he)
 {
@@ -1202,23 +1263,47 @@ cl_idx_empty_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
        }
 
 static struct c2c_dimension dim_dcacheline = {
-       .header         = HEADER_LOW("Cacheline"),
+       .header         = HEADER_SPAN("--- Cacheline ----", "Address", 2),
        .name           = "dcacheline",
        .cmp            = dcacheline_cmp,
        .entry          = dcacheline_entry,
        .width          = 18,
 };
 
-static struct c2c_header header_offset_tui = HEADER_LOW("Off");
+static struct c2c_dimension dim_dcacheline_node = {
+       .header         = HEADER_LOW("Node"),
+       .name           = "dcacheline_node",
+       .cmp            = empty_cmp,
+       .entry          = dcacheline_node_entry,
+       .width          = 4,
+};
+
+static struct c2c_dimension dim_dcacheline_count = {
+       .header         = HEADER_LOW("PA cnt"),
+       .name           = "dcacheline_count",
+       .cmp            = empty_cmp,
+       .entry          = dcacheline_node_count,
+       .width          = 6,
+};
+
+static struct c2c_header header_offset_tui = HEADER_SPAN("-----", "Off", 2);
 
 static struct c2c_dimension dim_offset = {
-       .header         = HEADER_BOTH("Data address", "Offset"),
+       .header         = HEADER_SPAN("--- Data address -", "Offset", 2),
        .name           = "offset",
        .cmp            = offset_cmp,
        .entry          = offset_entry,
        .width          = 18,
 };
 
+static struct c2c_dimension dim_offset_node = {
+       .header         = HEADER_LOW("Node"),
+       .name           = "offset_node",
+       .cmp            = empty_cmp,
+       .entry          = dcacheline_node_entry,
+       .width          = 4,
+};
+
 static struct c2c_dimension dim_iaddr = {
        .header         = HEADER_LOW("Code address"),
        .name           = "iaddr",
@@ -1538,7 +1623,10 @@ static struct c2c_dimension dim_dcacheline_num_empty = {
 
 static struct c2c_dimension *dimensions[] = {
        &dim_dcacheline,
+       &dim_dcacheline_node,
+       &dim_dcacheline_count,
        &dim_offset,
+       &dim_offset_node,
        &dim_iaddr,
        &dim_tot_hitm,
        &dim_lcl_hitm,
@@ -1841,20 +1929,56 @@ static inline int valid_hitm_or_store(struct hist_entry *he)
        return has_hitm || c2c_he->stats.store;
 }
 
-static void calc_width(struct hist_entry *he)
+static void set_node_width(struct c2c_hist_entry *c2c_he, int len)
+{
+       struct c2c_dimension *dim;
+
+       dim = &c2c.hists == c2c_he->hists ?
+             &dim_dcacheline_node : &dim_offset_node;
+
+       if (len > dim->width)
+               dim->width = len;
+}
+
+static int set_nodestr(struct c2c_hist_entry *c2c_he)
+{
+       char buf[30];
+       int len;
+
+       if (c2c_he->nodestr)
+               return 0;
+
+       if (bitmap_weight(c2c_he->nodeset, c2c.nodes_cnt)) {
+               len = bitmap_scnprintf(c2c_he->nodeset, c2c.nodes_cnt,
+                                     buf, sizeof(buf));
+       } else {
+               len = scnprintf(buf, sizeof(buf), "N/A");
+       }
+
+       set_node_width(c2c_he, len);
+       c2c_he->nodestr = strdup(buf);
+       return c2c_he->nodestr ? 0 : -ENOMEM;
+}
+
+static void calc_width(struct c2c_hist_entry *c2c_he)
 {
        struct c2c_hists *c2c_hists;
 
-       c2c_hists = container_of(he->hists, struct c2c_hists, hists);
-       hists__calc_col_len(&c2c_hists->hists, he);
+       c2c_hists = container_of(c2c_he->he.hists, struct c2c_hists, hists);
+       hists__calc_col_len(&c2c_hists->hists, &c2c_he->he);
+       set_nodestr(c2c_he);
 }
 
 static int filter_cb(struct hist_entry *he)
 {
+       struct c2c_hist_entry *c2c_he;
+
+       c2c_he = container_of(he, struct c2c_hist_entry, he);
+
        if (c2c.show_src && !he->srcline)
                he->srcline = hist_entry__get_srcline(he);
 
-       calc_width(he);
+       calc_width(c2c_he);
 
        if (!valid_hitm_or_store(he))
                he->filtered = HIST_FILTER__C2C;
@@ -1871,12 +1995,11 @@ static int resort_cl_cb(struct hist_entry *he)
        c2c_he = container_of(he, struct c2c_hist_entry, he);
        c2c_hists = c2c_he->hists;
 
-       calc_width(he);
-
        if (display && c2c_hists) {
                static unsigned int idx;
 
                c2c_he->cacheline_idx = idx++;
+               calc_width(c2c_he);
 
                c2c_hists__reinit(c2c_hists, c2c.cl_output, c2c.cl_resort);
 
@@ -2350,14 +2473,66 @@ static void perf_c2c_display(struct perf_session *session)
 }
 #endif /* HAVE_SLANG_SUPPORT */
 
-static void ui_quirks(void)
+static char *fill_line(const char *orig, int len)
+{
+       int i, j, olen = strlen(orig);
+       char *buf;
+
+       buf = zalloc(len + 1);
+       if (!buf)
+               return NULL;
+
+       j = len / 2 - olen / 2;
+
+       for (i = 0; i < j - 1; i++)
+               buf[i] = '-';
+
+       buf[i++] = ' ';
+
+       strcpy(buf + i, orig);
+
+       i += olen;
+
+       buf[i++] = ' ';
+
+       for (; i < len; i++)
+               buf[i] = '-';
+
+       return buf;
+}
+
+static int ui_quirks(void)
 {
+       const char *nodestr = "Data address";
+       char *buf;
+
        if (!c2c.use_stdio) {
                dim_offset.width  = 5;
                dim_offset.header = header_offset_tui;
+               nodestr = "CL";
        }
 
        dim_percent_hitm.header = percent_hitm_header[c2c.display];
+
+       /* Fix the zero line for dcacheline column. */
+       buf = fill_line("Cacheline", dim_dcacheline.width +
+                                    dim_dcacheline_node.width +
+                                    dim_dcacheline_count.width + 4);
+       if (!buf)
+               return -ENOMEM;
+
+       dim_dcacheline.header.line[0].text = buf;
+
+       /* Fix the zero line for offset column. */
+       buf = fill_line(nodestr, dim_offset.width +
+                                dim_offset_node.width +
+                                dim_dcacheline_count.width + 4);
+       if (!buf)
+               return -ENOMEM;
+
+       dim_offset.header.line[0].text = buf;
+
+       return 0;
 }
 
 #define CALLCHAIN_DEFAULT_OPT  "graph,0.5,caller,function,percent"
@@ -2473,7 +2648,7 @@ static int build_cl_output(char *cl_sort, bool no_source)
                "percent_lcl_hitm,"
                "percent_stores_l1hit,"
                "percent_stores_l1miss,"
-               "offset,",
+               "offset,offset_node,dcacheline_count,",
                add_pid   ? "pid," : "",
                add_tid   ? "tid," : "",
                add_iaddr ? "iaddr," : "",
@@ -2602,17 +2777,21 @@ static int perf_c2c__report(int argc, const char **argv)
                goto out;
        }
 
-       err = setup_callchain(session->evlist);
+       err = mem2node__init(&c2c.mem2node, &session->header.env);
        if (err)
                goto out_session;
 
+       err = setup_callchain(session->evlist);
+       if (err)
+               goto out_mem2node;
+
        if (symbol__init(&session->header.env) < 0)
-               goto out_session;
+               goto out_mem2node;
 
        /* No pipe support at the moment. */
        if (perf_data__is_pipe(session->data)) {
                pr_debug("No pipe support at the moment.\n");
-               goto out_session;
+               goto out_mem2node;
        }
 
        if (c2c.use_stdio)
@@ -2625,12 +2804,14 @@ static int perf_c2c__report(int argc, const char **argv)
        err = perf_session__process_events(session);
        if (err) {
                pr_err("failed to process sample\n");
-               goto out_session;
+               goto out_mem2node;
        }
 
        c2c_hists__reinit(&c2c.hists,
                        "cl_idx,"
                        "dcacheline,"
+                       "dcacheline_node,"
+                       "dcacheline_count,"
                        "tot_recs,"
                        "percent_hitm,"
                        "tot_hitm,lcl_hitm,rmt_hitm,"
@@ -2652,10 +2833,15 @@ static int perf_c2c__report(int argc, const char **argv)
 
        ui_progress__finish();
 
-       ui_quirks();
+       if (ui_quirks()) {
+               pr_err("failed to setup UI\n");
+               goto out_mem2node;
+       }
 
        perf_c2c_display(session);
 
+out_mem2node:
+       mem2node__exit(&c2c.mem2node);
 out_session:
        perf_session__delete(session);
 out:
@@ -2706,7 +2892,7 @@ static int perf_c2c__record(int argc, const char **argv)
        argc = parse_options(argc, argv, options, record_mem_usage,
                             PARSE_OPT_KEEP_UNKNOWN);
 
-       rec_argc = argc + 10; /* max number of arguments */
+       rec_argc = argc + 11; /* max number of arguments */
        rec_argv = calloc(rec_argc + 1, sizeof(char *));
        if (!rec_argv)
                return -1;
@@ -2722,6 +2908,7 @@ static int perf_c2c__record(int argc, const char **argv)
                rec_argv[i++] = "-W";
 
        rec_argv[i++] = "-d";
+       rec_argv[i++] = "--phys-data";
        rec_argv[i++] = "--sample-cpu";
 
        for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
index 25a42ac..f42f228 100644 (file)
@@ -72,6 +72,7 @@ static int __write_tracing_file(const char *name, const char *val, bool append)
        ssize_t size = strlen(val);
        int flags = O_WRONLY;
        char errbuf[512];
+       char *val_copy;
 
        file = get_tracing_file(name);
        if (!file) {
@@ -91,12 +92,23 @@ static int __write_tracing_file(const char *name, const char *val, bool append)
                goto out;
        }
 
-       if (write(fd, val, size) == size)
+       /*
+        * Copy the original value and append a '\n'. Without this,
+        * the kernel can hide possible errors.
+        */
+       val_copy = strdup(val);
+       if (!val_copy)
+               goto out_close;
+       val_copy[size] = '\n';
+
+       if (write(fd, val_copy, size + 1) == size + 1)
                ret = 0;
        else
                pr_debug("write '%s' to tracing/%s failed: %s\n",
                         val, name, str_error_r(errno, errbuf, sizeof(errbuf)));
 
+       free(val_copy);
+out_close:
        close(fd);
 out:
        put_tracing_file(file);
@@ -280,8 +292,10 @@ static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv)
        signal(SIGCHLD, sig_handler);
        signal(SIGPIPE, sig_handler);
 
-       if (reset_tracing_files(ftrace) < 0)
+       if (reset_tracing_files(ftrace) < 0) {
+               pr_err("failed to reset ftrace\n");
                goto out;
+       }
 
        /* reset ftrace buffer */
        if (write_tracing_file("trace", "0") < 0)
index 55d919d..72e2ca0 100644 (file)
@@ -743,16 +743,23 @@ static bool verify_vcpu(int vcpu)
 static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
                                   u64 *mmap_time)
 {
+       struct perf_evlist *evlist = kvm->evlist;
        union perf_event *event;
+       struct perf_mmap *md;
        u64 timestamp;
        s64 n = 0;
        int err;
 
        *mmap_time = ULLONG_MAX;
-       while ((event = perf_evlist__mmap_read(kvm->evlist, idx)) != NULL) {
-               err = perf_evlist__parse_sample_timestamp(kvm->evlist, event, &timestamp);
+       md = &evlist->mmap[idx];
+       err = perf_mmap__read_init(md);
+       if (err < 0)
+               return (err == -EAGAIN) ? 0 : -1;
+
+       while ((event = perf_mmap__read_event(md)) != NULL) {
+               err = perf_evlist__parse_sample_timestamp(evlist, event, &timestamp);
                if (err) {
-                       perf_evlist__mmap_consume(kvm->evlist, idx);
+                       perf_mmap__consume(md);
                        pr_err("Failed to parse sample\n");
                        return -1;
                }
@@ -762,7 +769,7 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
                 * FIXME: Here we can't consume the event, as perf_session__queue_event will
                 *        point to it, and it'll get possibly overwritten by the kernel.
                 */
-               perf_evlist__mmap_consume(kvm->evlist, idx);
+               perf_mmap__consume(md);
 
                if (err) {
                        pr_err("Failed to enqueue sample: %d\n", err);
@@ -779,6 +786,7 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
                        break;
        }
 
+       perf_mmap__read_done(md);
        return n;
 }
 
index a217623..22ebeb9 100644 (file)
@@ -45,6 +45,7 @@
 
 #include <errno.h>
 #include <inttypes.h>
+#include <locale.h>
 #include <poll.h>
 #include <unistd.h>
 #include <sched.h>
@@ -70,7 +71,6 @@ struct record {
        struct auxtrace_record  *itr;
        struct perf_evlist      *evlist;
        struct perf_session     *session;
-       const char              *progname;
        int                     realtime_prio;
        bool                    no_buildid;
        bool                    no_buildid_set;
@@ -273,6 +273,24 @@ static void record__read_auxtrace_snapshot(struct record *rec)
        }
 }
 
+static int record__auxtrace_init(struct record *rec)
+{
+       int err;
+
+       if (!rec->itr) {
+               rec->itr = auxtrace_record__init(rec->evlist, &err);
+               if (err)
+                       return err;
+       }
+
+       err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
+                                             rec->opts.auxtrace_snapshot_opts);
+       if (err)
+               return err;
+
+       return auxtrace_parse_filters(rec->evlist);
+}
+
 #else
 
 static inline
@@ -293,6 +311,11 @@ int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
        return 0;
 }
 
+static int record__auxtrace_init(struct record *rec __maybe_unused)
+{
+       return 0;
+}
+
 #endif
 
 static int record__mmap_evlist(struct record *rec,
@@ -509,7 +532,7 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
                struct auxtrace_mmap *mm = &maps[i].auxtrace_mmap;
 
                if (maps[i].base) {
-                       if (perf_mmap__push(&maps[i], overwrite, rec, record__pushfn) != 0) {
+                       if (perf_mmap__push(&maps[i], rec, record__pushfn) != 0) {
                                rc = -1;
                                goto out;
                        }
@@ -731,13 +754,10 @@ static int record__synthesize(struct record *rec, bool tail)
                return 0;
 
        if (data->is_pipe) {
-               err = perf_event__synthesize_features(
-                       tool, session, rec->evlist, process_synthesized_event);
-               if (err < 0) {
-                       pr_err("Couldn't synthesize features.\n");
-                       return err;
-               }
-
+               /*
+                * We need to synthesize events first, because some
+                * features works on top of them (on report side).
+                */
                err = perf_event__synthesize_attrs(tool, session,
                                                   process_synthesized_event);
                if (err < 0) {
@@ -745,6 +765,13 @@ static int record__synthesize(struct record *rec, bool tail)
                        goto out;
                }
 
+               err = perf_event__synthesize_features(tool, session, rec->evlist,
+                                                     process_synthesized_event);
+               if (err < 0) {
+                       pr_err("Couldn't synthesize features.\n");
+                       return err;
+               }
+
                if (have_tracepoints(&rec->evlist->entries)) {
                        /*
                         * FIXME err <= 0 here actually means that
@@ -830,7 +857,6 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
        int status = 0;
        unsigned long waking = 0;
        const bool forks = argc > 0;
-       struct machine *machine;
        struct perf_tool *tool = &rec->tool;
        struct record_opts *opts = &rec->opts;
        struct perf_data *data = &rec->data;
@@ -838,8 +864,6 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
        bool disabled = false, draining = false;
        int fd;
 
-       rec->progname = argv[0];
-
        atexit(record__sig_exit);
        signal(SIGCHLD, sig_handler);
        signal(SIGINT, sig_handler);
@@ -935,8 +959,6 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
                goto out_child;
        }
 
-       machine = &session->machines.host;
-
        err = record__synthesize(rec, false);
        if (err < 0)
                goto out_child;
@@ -964,6 +986,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
         * Let the child rip
         */
        if (forks) {
+               struct machine *machine = &session->machines.host;
                union perf_event *event;
                pid_t tgid;
 
@@ -1260,10 +1283,12 @@ static int perf_record_config(const char *var, const char *value, void *cb)
                        return -1;
                return 0;
        }
-       if (!strcmp(var, "record.call-graph"))
-               var = "call-graph.record-mode"; /* fall-through */
+       if (!strcmp(var, "record.call-graph")) {
+               var = "call-graph.record-mode";
+               return perf_default_config(var, value, cb);
+       }
 
-       return perf_default_config(var, value, cb);
+       return 0;
 }
 
 struct clockid_map {
@@ -1551,7 +1576,11 @@ static struct option __record_options[] = {
        OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
                    "synthesize non-sample events at the end of output"),
        OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
-       OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
+       OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
+                   "Fail if the specified frequency can't be used"),
+       OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
+                    "profile at this frequency",
+                     record__parse_freq),
        OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
                     "number of mmap data pages and AUX area tracing mmap pages",
                     record__parse_mmap_pages),
@@ -1660,6 +1689,8 @@ int cmd_record(int argc, const char **argv)
        struct record *rec = &record;
        char errbuf[BUFSIZ];
 
+       setlocale(LC_ALL, "");
+
 #ifndef HAVE_LIBBPF_SUPPORT
 # define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
        set_nobuild('\0', "clang-path", true);
@@ -1720,17 +1751,6 @@ int cmd_record(int argc, const char **argv)
                alarm(rec->switch_output.time);
        }
 
-       if (!rec->itr) {
-               rec->itr = auxtrace_record__init(rec->evlist, &err);
-               if (err)
-                       goto out;
-       }
-
-       err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
-                                             rec->opts.auxtrace_snapshot_opts);
-       if (err)
-               goto out;
-
        /*
         * Allow aliases to facilitate the lookup of symbols for address
         * filters. Refer to auxtrace_parse_filters().
@@ -1739,7 +1759,7 @@ int cmd_record(int argc, const char **argv)
 
        symbol__init(NULL);
 
-       err = auxtrace_parse_filters(rec->evlist);
+       err = record__auxtrace_init(rec);
        if (err)
                goto out;
 
@@ -1812,7 +1832,7 @@ int cmd_record(int argc, const char **argv)
        err = target__validate(&rec->opts.target);
        if (err) {
                target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
-               ui__warning("%s", errbuf);
+               ui__warning("%s\n", errbuf);
        }
 
        err = target__parse_uid(&rec->opts.target);
index 4ad5dc6..91da129 100644 (file)
@@ -68,6 +68,7 @@ struct report {
        bool                    header;
        bool                    header_only;
        bool                    nonany_branch_mode;
+       bool                    group_set;
        int                     max_stack;
        struct perf_read_values show_threads_values;
        const char              *pretty_printing_style;
@@ -193,6 +194,45 @@ out:
        return err;
 }
 
+/*
+ * Events in data file are not collect in groups, but we still want
+ * the group display. Set the artificial group and set the leader's
+ * forced_leader flag to notify the display code.
+ */
+static void setup_forced_leader(struct report *report,
+                               struct perf_evlist *evlist)
+{
+       if (report->group_set && !evlist->nr_groups) {
+               struct perf_evsel *leader = perf_evlist__first(evlist);
+
+               perf_evlist__set_leader(evlist);
+               leader->forced_leader = true;
+       }
+}
+
+static int process_feature_event(struct perf_tool *tool,
+                                union perf_event *event,
+                                struct perf_session *session __maybe_unused)
+{
+       struct report *rep = container_of(tool, struct report, tool);
+
+       if (event->feat.feat_id < HEADER_LAST_FEATURE)
+               return perf_event__process_feature(tool, event, session);
+
+       if (event->feat.feat_id != HEADER_LAST_FEATURE) {
+               pr_err("failed: wrong feature ID: %" PRIu64 "\n",
+                      event->feat.feat_id);
+               return -1;
+       }
+
+       /*
+        * All features are received, we can force the
+        * group if needed.
+        */
+       setup_forced_leader(rep, session->evlist);
+       return 0;
+}
+
 static int process_sample_event(struct perf_tool *tool,
                                union perf_event *event,
                                struct perf_sample *sample,
@@ -400,8 +440,10 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report
 
        nr_samples = convert_unit(nr_samples, &unit);
        ret = fprintf(fp, "# Samples: %lu%c", nr_samples, unit);
-       if (evname != NULL)
-               ret += fprintf(fp, " of event '%s'", evname);
+       if (evname != NULL) {
+               ret += fprintf(fp, " of event%s '%s'",
+                              evsel->nr_members > 1 ? "s" : "", evname);
+       }
 
        if (rep->time_str)
                ret += fprintf(fp, " (time slices: %s)", rep->time_str);
@@ -614,6 +656,7 @@ static int stats_print(struct report *rep)
 static void tasks_setup(struct report *rep)
 {
        memset(&rep->tool, 0, sizeof(rep->tool));
+       rep->tool.ordered_events = true;
        if (rep->mmaps_mode) {
                rep->tool.mmap = perf_event__process_mmap;
                rep->tool.mmap2 = perf_event__process_mmap2;
@@ -954,7 +997,7 @@ int cmd_report(int argc, const char **argv)
                        .id_index        = perf_event__process_id_index,
                        .auxtrace_info   = perf_event__process_auxtrace_info,
                        .auxtrace        = perf_event__process_auxtrace,
-                       .feature         = perf_event__process_feature,
+                       .feature         = process_feature_event,
                        .ordered_events  = true,
                        .ordering_requires_timestamps = true,
                },
@@ -1056,7 +1099,7 @@ int cmd_report(int argc, const char **argv)
                   "Specify disassembler style (e.g. -M intel for intel syntax)"),
        OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
                    "Show a column with the sum of periods"),
-       OPT_BOOLEAN(0, "group", &symbol_conf.event_group,
+       OPT_BOOLEAN_SET(0, "group", &symbol_conf.event_group, &report.group_set,
                    "Show event group information together"),
        OPT_CALLBACK_NOOPT('b', "branch-stack", &branch_mode, "",
                    "use branch records for per branch histogram filling",
@@ -1173,6 +1216,8 @@ repeat:
        has_br_stack = perf_header__has_feat(&session->header,
                                             HEADER_BRANCH_STACK);
 
+       setup_forced_leader(&report, session->evlist);
+
        if (itrace_synth_opts.last_branch)
                has_br_stack = true;
 
@@ -1332,6 +1377,15 @@ repeat:
                report.range_num = 1;
        }
 
+       if (session->tevent.pevent &&
+           pevent_set_function_resolver(session->tevent.pevent,
+                                        machine__resolve_kernel_addr,
+                                        &session->machines.host) < 0) {
+               pr_err("%s: failed to set libtraceevent function resolver\n",
+                      __func__);
+               return -1;
+       }
+
        sort__setup_elide(stdout);
 
        ret = __cmd_report(&report);
index 83283fe..4dfdee6 100644 (file)
@@ -254,6 +254,10 @@ struct thread_runtime {
        u64 total_delay_time;
 
        int last_state;
+
+       char shortname[3];
+       bool comm_changed;
+
        u64 migrations;
 };
 
@@ -897,6 +901,37 @@ struct sort_dimension {
        struct list_head        list;
 };
 
+/*
+ * handle runtime stats saved per thread
+ */
+static struct thread_runtime *thread__init_runtime(struct thread *thread)
+{
+       struct thread_runtime *r;
+
+       r = zalloc(sizeof(struct thread_runtime));
+       if (!r)
+               return NULL;
+
+       init_stats(&r->run_stats);
+       thread__set_priv(thread, r);
+
+       return r;
+}
+
+static struct thread_runtime *thread__get_runtime(struct thread *thread)
+{
+       struct thread_runtime *tr;
+
+       tr = thread__priv(thread);
+       if (tr == NULL) {
+               tr = thread__init_runtime(thread);
+               if (tr == NULL)
+                       pr_debug("Failed to malloc memory for runtime data.\n");
+       }
+
+       return tr;
+}
+
 static int
 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
 {
@@ -1480,6 +1515,7 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
 {
        const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
        struct thread *sched_in;
+       struct thread_runtime *tr;
        int new_shortname;
        u64 timestamp0, timestamp = sample->time;
        s64 delta;
@@ -1519,22 +1555,28 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
        if (sched_in == NULL)
                return -1;
 
+       tr = thread__get_runtime(sched_in);
+       if (tr == NULL) {
+               thread__put(sched_in);
+               return -1;
+       }
+
        sched->curr_thread[this_cpu] = thread__get(sched_in);
 
        printf("  ");
 
        new_shortname = 0;
-       if (!sched_in->shortname[0]) {
+       if (!tr->shortname[0]) {
                if (!strcmp(thread__comm_str(sched_in), "swapper")) {
                        /*
                         * Don't allocate a letter-number for swapper:0
                         * as a shortname. Instead, we use '.' for it.
                         */
-                       sched_in->shortname[0] = '.';
-                       sched_in->shortname[1] = ' ';
+                       tr->shortname[0] = '.';
+                       tr->shortname[1] = ' ';
                } else {
-                       sched_in->shortname[0] = sched->next_shortname1;
-                       sched_in->shortname[1] = sched->next_shortname2;
+                       tr->shortname[0] = sched->next_shortname1;
+                       tr->shortname[1] = sched->next_shortname2;
 
                        if (sched->next_shortname1 < 'Z') {
                                sched->next_shortname1++;
@@ -1552,6 +1594,7 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
        for (i = 0; i < cpus_nr; i++) {
                int cpu = sched->map.comp ? sched->map.comp_cpus[i] : i;
                struct thread *curr_thread = sched->curr_thread[cpu];
+               struct thread_runtime *curr_tr;
                const char *pid_color = color;
                const char *cpu_color = color;
 
@@ -1569,9 +1612,14 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
                else
                        color_fprintf(stdout, cpu_color, "*");
 
-               if (sched->curr_thread[cpu])
-                       color_fprintf(stdout, pid_color, "%2s ", sched->curr_thread[cpu]->shortname);
-               else
+               if (sched->curr_thread[cpu]) {
+                       curr_tr = thread__get_runtime(sched->curr_thread[cpu]);
+                       if (curr_tr == NULL) {
+                               thread__put(sched_in);
+                               return -1;
+                       }
+                       color_fprintf(stdout, pid_color, "%2s ", curr_tr->shortname);
+               } else
                        color_fprintf(stdout, color, "   ");
        }
 
@@ -1580,14 +1628,15 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
 
        timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
        color_fprintf(stdout, color, "  %12s secs ", stimestamp);
-       if (new_shortname || (verbose > 0 && sched_in->tid)) {
+       if (new_shortname || tr->comm_changed || (verbose > 0 && sched_in->tid)) {
                const char *pid_color = color;
 
                if (thread__has_color(sched_in))
                        pid_color = COLOR_PIDS;
 
                color_fprintf(stdout, pid_color, "%s => %s:%d",
-                      sched_in->shortname, thread__comm_str(sched_in), sched_in->tid);
+                      tr->shortname, thread__comm_str(sched_in), sched_in->tid);
+               tr->comm_changed = false;
        }
 
        if (sched->map.comp && new_cpu)
@@ -1691,6 +1740,37 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_
        return err;
 }
 
+static int perf_sched__process_comm(struct perf_tool *tool __maybe_unused,
+                                   union perf_event *event,
+                                   struct perf_sample *sample,
+                                   struct machine *machine)
+{
+       struct thread *thread;
+       struct thread_runtime *tr;
+       int err;
+
+       err = perf_event__process_comm(tool, event, sample, machine);
+       if (err)
+               return err;
+
+       thread = machine__find_thread(machine, sample->pid, sample->tid);
+       if (!thread) {
+               pr_err("Internal error: can't find thread\n");
+               return -1;
+       }
+
+       tr = thread__get_runtime(thread);
+       if (tr == NULL) {
+               thread__put(thread);
+               return -1;
+       }
+
+       tr->comm_changed = true;
+       thread__put(thread);
+
+       return 0;
+}
+
 static int perf_sched__read_events(struct perf_sched *sched)
 {
        const struct perf_evsel_str_handler handlers[] = {
@@ -2200,37 +2280,6 @@ static void save_idle_callchain(struct idle_thread_runtime *itr,
        callchain_cursor__copy(&itr->cursor, &callchain_cursor);
 }
 
-/*
- * handle runtime stats saved per thread
- */
-static struct thread_runtime *thread__init_runtime(struct thread *thread)
-{
-       struct thread_runtime *r;
-
-       r = zalloc(sizeof(struct thread_runtime));
-       if (!r)
-               return NULL;
-
-       init_stats(&r->run_stats);
-       thread__set_priv(thread, r);
-
-       return r;
-}
-
-static struct thread_runtime *thread__get_runtime(struct thread *thread)
-{
-       struct thread_runtime *tr;
-
-       tr = thread__priv(thread);
-       if (tr == NULL) {
-               tr = thread__init_runtime(thread);
-               if (tr == NULL)
-                       pr_debug("Failed to malloc memory for runtime data.\n");
-       }
-
-       return tr;
-}
-
 static struct thread *timehist_get_thread(struct perf_sched *sched,
                                          struct perf_sample *sample,
                                          struct machine *machine,
@@ -3291,7 +3340,7 @@ int cmd_sched(int argc, const char **argv)
        struct perf_sched sched = {
                .tool = {
                        .sample          = perf_sched__process_tracepoint_sample,
-                       .comm            = perf_event__process_comm,
+                       .comm            = perf_sched__process_comm,
                        .namespaces      = perf_event__process_namespaces,
                        .lost            = perf_event__process_lost,
                        .fork            = perf_sched__process_fork_event,
index ab19a6e..313c424 100644 (file)
@@ -1489,6 +1489,7 @@ struct perf_script {
        bool                    show_switch_events;
        bool                    show_namespace_events;
        bool                    show_lost_events;
+       bool                    show_round_events;
        bool                    allocated;
        bool                    per_event_dump;
        struct cpu_map          *cpus;
@@ -2104,6 +2105,16 @@ process_lost_event(struct perf_tool *tool,
        return 0;
 }
 
+static int
+process_finished_round_event(struct perf_tool *tool __maybe_unused,
+                            union perf_event *event,
+                            struct ordered_events *oe __maybe_unused)
+
+{
+       perf_event__fprintf(event, stdout);
+       return 0;
+}
+
 static void sig_handler(int sig __maybe_unused)
 {
        session_done = 1;
@@ -2200,6 +2211,10 @@ static int __cmd_script(struct perf_script *script)
                script->tool.namespaces = process_namespaces_event;
        if (script->show_lost_events)
                script->tool.lost = process_lost_event;
+       if (script->show_round_events) {
+               script->tool.ordered_events = false;
+               script->tool.finished_round = process_finished_round_event;
+       }
 
        if (perf_script__setup_per_event_dump(script)) {
                pr_err("Couldn't create the per event dump files\n");
@@ -2659,8 +2674,8 @@ static int list_available_scripts(const struct option *opt __maybe_unused,
        }
 
        for_each_lang(scripts_path, scripts_dir, lang_dirent) {
-               snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
-                        lang_dirent->d_name);
+               scnprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
+                         lang_dirent->d_name);
                lang_dir = opendir(lang_path);
                if (!lang_dir)
                        continue;
@@ -2669,8 +2684,8 @@ static int list_available_scripts(const struct option *opt __maybe_unused,
                        script_root = get_script_root(script_dirent, REPORT_SUFFIX);
                        if (script_root) {
                                desc = script_desc__findnew(script_root);
-                               snprintf(script_path, MAXPATHLEN, "%s/%s",
-                                        lang_path, script_dirent->d_name);
+                               scnprintf(script_path, MAXPATHLEN, "%s/%s",
+                                         lang_path, script_dirent->d_name);
                                read_script_info(desc, script_path);
                                free(script_root);
                        }
@@ -2706,7 +2721,7 @@ static int check_ev_match(char *dir_name, char *scriptname,
        int match, len;
        FILE *fp;
 
-       sprintf(filename, "%s/bin/%s-record", dir_name, scriptname);
+       scnprintf(filename, MAXPATHLEN, "%s/bin/%s-record", dir_name, scriptname);
 
        fp = fopen(filename, "r");
        if (!fp)
@@ -2784,8 +2799,8 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
        }
 
        for_each_lang(scripts_path, scripts_dir, lang_dirent) {
-               snprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path,
-                        lang_dirent->d_name);
+               scnprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path,
+                         lang_dirent->d_name);
 #ifdef NO_LIBPERL
                if (strstr(lang_path, "perl"))
                        continue;
@@ -2840,8 +2855,8 @@ static char *get_script_path(const char *script_root, const char *suffix)
                return NULL;
 
        for_each_lang(scripts_path, scripts_dir, lang_dirent) {
-               snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
-                        lang_dirent->d_name);
+               scnprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
+                         lang_dirent->d_name);
                lang_dir = opendir(lang_path);
                if (!lang_dir)
                        continue;
@@ -2852,8 +2867,8 @@ static char *get_script_path(const char *script_root, const char *suffix)
                                free(__script_root);
                                closedir(lang_dir);
                                closedir(scripts_dir);
-                               snprintf(script_path, MAXPATHLEN, "%s/%s",
-                                        lang_path, script_dirent->d_name);
+                               scnprintf(script_path, MAXPATHLEN, "%s/%s",
+                                         lang_path, script_dirent->d_name);
                                return strdup(script_path);
                        }
                        free(__script_root);
@@ -3139,6 +3154,8 @@ int cmd_script(int argc, const char **argv)
                    "Show namespace events (if recorded)"),
        OPT_BOOLEAN('\0', "show-lost-events", &script.show_lost_events,
                    "Show lost events (if recorded)"),
+       OPT_BOOLEAN('\0', "show-round-events", &script.show_round_events,
+                   "Show round events (if recorded)"),
        OPT_BOOLEAN('\0', "per-event-dump", &script.per_event_dump,
                    "Dump trace output to files named by the monitored events"),
        OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
index 54a4c15..f5c4548 100644 (file)
@@ -168,6 +168,7 @@ static struct timespec              ref_time;
 static struct cpu_map          *aggr_map;
 static aggr_get_id_t           aggr_get_id;
 static bool                    append_file;
+static bool                    interval_count;
 static const char              *output_name;
 static int                     output_fd;
 static int                     print_free_counters_hint;
@@ -507,14 +508,13 @@ static int perf_stat_synthesize_config(bool is_pipe)
 
 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
 
-static int __store_counter_ids(struct perf_evsel *counter,
-                              struct cpu_map *cpus,
-                              struct thread_map *threads)
+static int __store_counter_ids(struct perf_evsel *counter)
 {
        int cpu, thread;
 
-       for (cpu = 0; cpu < cpus->nr; cpu++) {
-               for (thread = 0; thread < threads->nr; thread++) {
+       for (cpu = 0; cpu < xyarray__max_x(counter->fd); cpu++) {
+               for (thread = 0; thread < xyarray__max_y(counter->fd);
+                    thread++) {
                        int fd = FD(counter, cpu, thread);
 
                        if (perf_evlist__id_add_fd(evsel_list, counter,
@@ -534,7 +534,7 @@ static int store_counter_ids(struct perf_evsel *counter)
        if (perf_evsel__alloc_id(counter, cpus->nr, threads->nr))
                return -ENOMEM;
 
-       return __store_counter_ids(counter, cpus, threads);
+       return __store_counter_ids(counter);
 }
 
 static bool perf_evsel__should_store_id(struct perf_evsel *counter)
@@ -571,6 +571,8 @@ static struct perf_evsel *perf_evsel__reset_weak_group(struct perf_evsel *evsel)
 static int __run_perf_stat(int argc, const char **argv)
 {
        int interval = stat_config.interval;
+       int times = stat_config.times;
+       int timeout = stat_config.timeout;
        char msg[BUFSIZ];
        unsigned long long t0, t1;
        struct perf_evsel *counter;
@@ -584,6 +586,9 @@ static int __run_perf_stat(int argc, const char **argv)
        if (interval) {
                ts.tv_sec  = interval / USEC_PER_MSEC;
                ts.tv_nsec = (interval % USEC_PER_MSEC) * NSEC_PER_MSEC;
+       } else if (timeout) {
+               ts.tv_sec  = timeout / USEC_PER_MSEC;
+               ts.tv_nsec = (timeout % USEC_PER_MSEC) * NSEC_PER_MSEC;
        } else {
                ts.tv_sec  = 1;
                ts.tv_nsec = 0;
@@ -632,7 +637,19 @@ try_again:
                                 if (verbose > 0)
                                         ui__warning("%s\n", msg);
                                 goto try_again;
-                        }
+                       } else if (target__has_per_thread(&target) &&
+                                  evsel_list->threads &&
+                                  evsel_list->threads->err_thread != -1) {
+                               /*
+                                * For global --per-thread case, skip current
+                                * error thread.
+                                */
+                               if (!thread_map__remove(evsel_list->threads,
+                                                       evsel_list->threads->err_thread)) {
+                                       evsel_list->threads->err_thread = -1;
+                                       goto try_again;
+                               }
+                       }
 
                        perf_evsel__open_strerror(counter, &target,
                                                  errno, msg, sizeof(msg));
@@ -696,10 +713,14 @@ try_again:
                perf_evlist__start_workload(evsel_list);
                enable_counters();
 
-               if (interval) {
+               if (interval || timeout) {
                        while (!waitpid(child_pid, &status, WNOHANG)) {
                                nanosleep(&ts, NULL);
+                               if (timeout)
+                                       break;
                                process_interval();
+                               if (interval_count && !(--times))
+                                       break;
                        }
                }
                waitpid(child_pid, &status, 0);
@@ -716,8 +737,13 @@ try_again:
                enable_counters();
                while (!done) {
                        nanosleep(&ts, NULL);
-                       if (interval)
+                       if (timeout)
+                               break;
+                       if (interval) {
                                process_interval();
+                               if (interval_count && !(--times))
+                                       break;
+                       }
                }
        }
 
@@ -1225,6 +1251,31 @@ static void aggr_update_shadow(void)
        }
 }
 
+static void uniquify_event_name(struct perf_evsel *counter)
+{
+       char *new_name;
+       char *config;
+
+       if (!counter->pmu_name || !strncmp(counter->name, counter->pmu_name,
+                                          strlen(counter->pmu_name)))
+               return;
+
+       config = strchr(counter->name, '/');
+       if (config) {
+               if (asprintf(&new_name,
+                            "%s%s", counter->pmu_name, config) > 0) {
+                       free(counter->name);
+                       counter->name = new_name;
+               }
+       } else {
+               if (asprintf(&new_name,
+                            "%s [%s]", counter->name, counter->pmu_name) > 0) {
+                       free(counter->name);
+                       counter->name = new_name;
+               }
+       }
+}
+
 static void collect_all_aliases(struct perf_evsel *counter,
                            void (*cb)(struct perf_evsel *counter, void *data,
                                       bool first),
@@ -1253,7 +1304,9 @@ static bool collect_data(struct perf_evsel *counter,
        if (counter->merged_stat)
                return false;
        cb(counter, data, true);
-       if (!no_merge && counter->auto_merge_stats)
+       if (no_merge)
+               uniquify_event_name(counter);
+       else if (counter->auto_merge_stats)
                collect_all_aliases(counter, cb, data);
        return true;
 }
@@ -1891,6 +1944,10 @@ static const struct option stat_options[] = {
                        "command to run after to the measured command"),
        OPT_UINTEGER('I', "interval-print", &stat_config.interval,
                    "print counts at regular interval in ms (>= 10)"),
+       OPT_INTEGER(0, "interval-count", &stat_config.times,
+                   "print counts for fixed number of times"),
+       OPT_UINTEGER(0, "timeout", &stat_config.timeout,
+                   "stop workload and print counts after a timeout period in ms (>= 10ms)"),
        OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode,
                     "aggregate counts per processor socket", AGGR_SOCKET),
        OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode,
@@ -2274,11 +2331,16 @@ static int add_default_attributes(void)
                return 0;
 
        if (transaction_run) {
+               struct parse_events_error errinfo;
+
                if (pmu_have_event("cpu", "cycles-ct") &&
                    pmu_have_event("cpu", "el-start"))
-                       err = parse_events(evsel_list, transaction_attrs, NULL);
+                       err = parse_events(evsel_list, transaction_attrs,
+                                          &errinfo);
                else
-                       err = parse_events(evsel_list, transaction_limited_attrs, NULL);
+                       err = parse_events(evsel_list,
+                                          transaction_limited_attrs,
+                                          &errinfo);
                if (err) {
                        fprintf(stderr, "Cannot set up transaction events\n");
                        return -1;
@@ -2688,7 +2750,7 @@ int cmd_stat(int argc, const char **argv)
        int status = -EINVAL, run_idx;
        const char *mode;
        FILE *output = stderr;
-       unsigned int interval;
+       unsigned int interval, timeout;
        const char * const stat_subcommands[] = { "record", "report" };
 
        setlocale(LC_ALL, "");
@@ -2719,6 +2781,7 @@ int cmd_stat(int argc, const char **argv)
                return __cmd_report(argc, argv);
 
        interval = stat_config.interval;
+       timeout = stat_config.timeout;
 
        /*
         * For record command the -o is already taken care of.
@@ -2871,6 +2934,33 @@ int cmd_stat(int argc, const char **argv)
                                   "Please proceed with caution.\n");
        }
 
+       if (stat_config.times && interval)
+               interval_count = true;
+       else if (stat_config.times && !interval) {
+               pr_err("interval-count option should be used together with "
+                               "interval-print.\n");
+               parse_options_usage(stat_usage, stat_options, "interval-count", 0);
+               parse_options_usage(stat_usage, stat_options, "I", 1);
+               goto out;
+       }
+
+       if (timeout && timeout < 100) {
+               if (timeout < 10) {
+                       pr_err("timeout must be >= 10ms.\n");
+                       parse_options_usage(stat_usage, stat_options, "timeout", 0);
+                       goto out;
+               } else
+                       pr_warning("timeout < 100ms. "
+                                  "The overhead percentage could be high in some cases. "
+                                  "Please proceed with caution.\n");
+       }
+       if (timeout && interval) {
+               pr_err("timeout option is not supported with interval-print.\n");
+               parse_options_usage(stat_usage, stat_options, "timeout", 0);
+               parse_options_usage(stat_usage, stat_options, "I", 1);
+               goto out;
+       }
+
        if (perf_evlist__alloc_stats(evsel_list, interval))
                goto out;
 
index 35ac016..113c298 100644 (file)
@@ -817,14 +817,13 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
        struct perf_session *session = top->session;
        union perf_event *event;
        struct machine *machine;
-       u64 end, start;
        int ret;
 
        md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx];
-       if (perf_mmap__read_init(md, opts->overwrite, &start, &end) < 0)
+       if (perf_mmap__read_init(md) < 0)
                return;
 
-       while ((event = perf_mmap__read_event(md, opts->overwrite, &start, end)) != NULL) {
+       while ((event = perf_mmap__read_event(md)) != NULL) {
                ret = perf_evlist__parse_sample(evlist, event, &sample);
                if (ret) {
                        pr_err("Can't parse sample, err = %d\n", ret);
@@ -879,7 +878,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
                } else
                        ++session->evlist->stats.nr_unknown_events;
 next_event:
-               perf_mmap__consume(md, opts->overwrite);
+               perf_mmap__consume(md);
        }
 
        perf_mmap__read_done(md);
@@ -1224,8 +1223,10 @@ parse_callchain_opt(const struct option *opt, const char *arg, int unset)
 
 static int perf_top_config(const char *var, const char *value, void *cb __maybe_unused)
 {
-       if (!strcmp(var, "top.call-graph"))
-               var = "call-graph.record-mode"; /* fall-through */
+       if (!strcmp(var, "top.call-graph")) {
+               var = "call-graph.record-mode";
+               return perf_default_config(var, value, cb);
+       }
        if (!strcmp(var, "top.children")) {
                symbol_conf.cumulate_callchain = perf_config_bool(var, value);
                return 0;
@@ -1307,7 +1308,9 @@ int cmd_top(int argc, const char **argv)
        OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name",
                    "symbol to annotate"),
        OPT_BOOLEAN('z', "zero", &top.zero, "zero history across updates"),
-       OPT_UINTEGER('F', "freq", &opts->user_freq, "profile at this frequency"),
+       OPT_CALLBACK('F', "freq", &top.record_opts, "freq or 'max'",
+                    "profile at this frequency",
+                     record__parse_freq),
        OPT_INTEGER('E', "entries", &top.print_entries,
                    "display this many functions"),
        OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols,
index e7f1b18..87b95c9 100644 (file)
@@ -19,6 +19,7 @@
 #include <traceevent/event-parse.h>
 #include <api/fs/tracing_path.h>
 #include "builtin.h"
+#include "util/cgroup.h"
 #include "util/color.h"
 #include "util/debug.h"
 #include "util/env.h"
@@ -83,6 +84,7 @@ struct trace {
        struct perf_evlist      *evlist;
        struct machine          *host;
        struct thread           *current;
+       struct cgroup           *cgroup;
        u64                     base_time;
        FILE                    *output;
        unsigned long           nr_events;
@@ -2370,6 +2372,34 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
                                   trace__sched_stat_runtime))
                goto out_error_sched_stat_runtime;
 
+       /*
+        * If a global cgroup was set, apply it to all the events without an
+        * explicit cgroup. I.e.:
+        *
+        *      trace -G A -e sched:*switch
+        *
+        * Will set all raw_syscalls:sys_{enter,exit}, pgfault, vfs_getname, etc
+        * _and_ sched:sched_switch to the 'A' cgroup, while:
+        *
+        * trace -e sched:*switch -G A
+        *
+        * will only set the sched:sched_switch event to the 'A' cgroup, all the
+        * other events (raw_syscalls:sys_{enter,exit}, etc are left "without"
+        * a cgroup (on the root cgroup, sys wide, etc).
+        *
+        * Multiple cgroups:
+        *
+        * trace -G A -e sched:*switch -G B
+        *
+        * the syscall ones go to the 'A' cgroup, the sched:sched_switch goes
+        * to the 'B' cgroup.
+        *
+        * evlist__set_default_cgroup() grabs a reference of the passed cgroup
+        * only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL.
+        */
+       if (trace->cgroup)
+               evlist__set_default_cgroup(trace->evlist, trace->cgroup);
+
        err = perf_evlist__create_maps(evlist, &trace->opts.target);
        if (err < 0) {
                fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
@@ -2472,8 +2502,13 @@ again:
 
        for (i = 0; i < evlist->nr_mmaps; i++) {
                union perf_event *event;
+               struct perf_mmap *md;
+
+               md = &evlist->mmap[i];
+               if (perf_mmap__read_init(md) < 0)
+                       continue;
 
-               while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
+               while ((event = perf_mmap__read_event(md)) != NULL) {
                        struct perf_sample sample;
 
                        ++trace->nr_events;
@@ -2486,7 +2521,7 @@ again:
 
                        trace__handle_event(trace, event, &sample);
 next_event:
-                       perf_evlist__mmap_consume(evlist, i);
+                       perf_mmap__consume(md);
 
                        if (interrupted)
                                goto out_disable;
@@ -2496,6 +2531,7 @@ next_event:
                                draining = true;
                        }
                }
+               perf_mmap__read_done(md);
        }
 
        if (trace->nr_events == before) {
@@ -2533,6 +2569,7 @@ out_delete_evlist:
        trace__symbols__exit(trace);
 
        perf_evlist__delete(evlist);
+       cgroup__put(trace->cgroup);
        trace->evlist = NULL;
        trace->live = false;
        return err;
@@ -2972,6 +3009,18 @@ out:
        return err;
 }
 
+static int trace__parse_cgroups(const struct option *opt, const char *str, int unset)
+{
+       struct trace *trace = opt->value;
+
+       if (!list_empty(&trace->evlist->entries))
+               return parse_cgroups(opt, str, unset);
+
+       trace->cgroup = evlist__findnew_cgroup(trace->evlist, str);
+
+       return 0;
+}
+
 int cmd_trace(int argc, const char **argv)
 {
        const char *trace_usage[] = {
@@ -3062,6 +3111,8 @@ int cmd_trace(int argc, const char **argv)
                        "print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"),
        OPT_UINTEGER(0, "proc-map-timeout", &trace.opts.proc_map_timeout,
                        "per thread proc mmap processing timeout in ms"),
+       OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only",
+                    trace__parse_cgroups),
        OPT_UINTEGER('D', "delay", &trace.opts.initial_delay,
                     "ms to wait before starting measurement after program "
                     "start"),
@@ -3088,6 +3139,11 @@ int cmd_trace(int argc, const char **argv)
        argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
                                 trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
 
+       if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) {
+               usage_with_options_msg(trace_usage, trace_options,
+                                      "cgroup monitoring only available in system-wide mode");
+       }
+
        err = bpf__setup_stdout(trace.evlist);
        if (err) {
                bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf));
index 790ec25..bf206ff 100755 (executable)
@@ -42,6 +42,7 @@ arch/parisc/include/uapi/asm/errno.h
 arch/powerpc/include/uapi/asm/errno.h
 arch/sparc/include/uapi/asm/errno.h
 arch/x86/include/uapi/asm/errno.h
+arch/powerpc/include/uapi/asm/unistd.h
 include/asm-generic/bitops/arch_hweight.h
 include/asm-generic/bitops/const_hweight.h
 include/asm-generic/bitops/__fls.h
index 57b9b34..8fec1ab 100644 (file)
@@ -61,6 +61,7 @@ struct record_opts {
        bool         tail_synthesize;
        bool         overwrite;
        bool         ignore_missing_thread;
+       bool         strict_freq;
        bool         sample_id;
        unsigned int freq;
        unsigned int mmap_pages;
@@ -83,4 +84,6 @@ struct record_opts {
 struct option;
 extern const char * const *record_usage;
 extern struct option *record_options;
+
+int record__parse_freq(const struct option *opt, const char *str, int unset);
 #endif
index 999a4e8..1778391 100644 (file)
@@ -1,10 +1,12 @@
 hostprogs := jevents
 
 jevents-y      += json.o jsmn.o jevents.o
+CHOSTFLAGS_jevents.o   = -I$(srctree)/tools/include
 pmu-events-y   += pmu-events.o
 JDIR           =  pmu-events/arch/$(SRCARCH)
 JSON           =  $(shell [ -d $(JDIR) ] &&                            \
                        find $(JDIR) -name '*.json' -o -name 'mapfile.csv')
+
 #
 # Locate/process JSON files in pmu-events/arch/
 # directory and create tables in pmu-events.c.
index c2ee3e4..e62b09b 100644 (file)
@@ -11,12 +11,17 @@ tree tools/perf/pmu-events/arch/foo.
        - Regular files with '.json' extension in the name are assumed to be
          JSON files, each of which describes a set of PMU events.
 
-       - Regular files with basename starting with 'mapfile.csv' are assumed
-         to be a CSV file that maps a specific CPU to its set of PMU events.
-         (see below for mapfile format)
+       - The CSV file that maps a specific CPU to its set of PMU events is to
+         be named 'mapfile.csv' (see below for mapfile format).
 
        - Directories are traversed, but all other files are ignored.
 
+       - To reduce JSON event duplication per architecture, platform JSONs may
+         use "ArchStdEvent" keyword to dereference an "Architecture standard
+         events", defined in architecture standard JSONs.
+         Architecture standard JSONs must be located in the architecture root
+         folder. Matching is based on the "EventName" field.
+
 The PMU events supported by a CPU model are expected to grouped into topics
 such as Pipelining, Cache, Memory, Floating-point etc. All events for a topic
 should be placed in a separate JSON file - where the file name identifies
@@ -29,6 +34,10 @@ sub directory. Thus for the Silvermont X86 CPU:
        Cache.json      Memory.json     Virtual-Memory.json
        Frontend.json   Pipeline.json
 
+The JSONs folder for a CPU model/family may be placed in the root arch
+folder, or may be placed in a vendor sub-folder under the arch folder
+for instances where the arch and vendor are not the same.
+
 Using the JSON files and the mapfile, 'jevents' generates the C source file,
 'pmu-events.c', which encodes the two sets of tables:
 
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a53/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a53/branch.json
new file mode 100644 (file)
index 0000000..0b0e6b2
--- /dev/null
@@ -0,0 +1,25 @@
+[
+  {
+    "ArchStdEvent":  "BR_INDIRECT_SPEC",
+  },
+  {
+    "EventCode": "0xC9",
+    "EventName": "BR_COND",
+    "BriefDescription": "Conditional branch executed"
+  },
+  {
+    "EventCode": "0xCA",
+    "EventName": "BR_INDIRECT_MISPRED",
+    "BriefDescription": "Indirect branch mispredicted"
+  },
+  {
+    "EventCode": "0xCB",
+    "EventName": "BR_INDIRECT_MISPRED_ADDR",
+    "BriefDescription": "Indirect branch mispredicted because of address miscompare"
+  },
+  {
+    "EventCode": "0xCC",
+    "EventName": "BR_COND_MISPRED",
+    "BriefDescription": "Conditional branch mispredicted"
+  }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a53/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a53/bus.json
new file mode 100644 (file)
index 0000000..ce33b25
--- /dev/null
@@ -0,0 +1,8 @@
+[
+  {
+        "ArchStdEvent": "BUS_ACCESS_RD",
+  },
+  {
+        "ArchStdEvent": "BUS_ACCESS_WR",
+  }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a53/cache.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a53/cache.json
new file mode 100644 (file)
index 0000000..5dfbec4
--- /dev/null
@@ -0,0 +1,27 @@
+[
+  {
+        "EventCode": "0xC2",
+        "EventName": "PREFETCH_LINEFILL",
+        "BriefDescription": "Linefill because of prefetch"
+  },
+  {
+        "EventCode": "0xC3",
+        "EventName": "PREFETCH_LINEFILL_DROP",
+        "BriefDescription": "Instruction Cache Throttle occurred"
+  },
+  {
+        "EventCode": "0xC4",
+        "EventName": "READ_ALLOC_ENTER",
+        "BriefDescription": "Entering read allocate mode"
+  },
+  {
+        "EventCode": "0xC5",
+        "EventName": "READ_ALLOC",
+        "BriefDescription": "Read allocate mode"
+  },
+  {
+        "EventCode": "0xC8",
+        "EventName": "EXT_SNOOP",
+        "BriefDescription": "SCU Snooped data from another CPU for this CPU"
+  }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a53/memory.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a53/memory.json
new file mode 100644 (file)
index 0000000..25ae642
--- /dev/null
@@ -0,0 +1,12 @@
+[
+  {
+    "EventCode": "0xC0",
+    "EventName": "EXT_MEM_REQ",
+    "BriefDescription": "External memory request"
+  },
+  {
+    "EventCode": "0xC1",
+    "EventName": "EXT_MEM_REQ_NC",
+    "BriefDescription": "Non-cacheable external memory request"
+  }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a53/other.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a53/other.json
new file mode 100644 (file)
index 0000000..6cc6cbd
--- /dev/null
@@ -0,0 +1,28 @@
+[
+  {
+        "ArchStdEvent": "EXC_IRQ",
+  },
+  {
+        "ArchStdEvent": "EXC_FIQ",
+  },
+  {
+        "EventCode": "0xC6",
+        "EventName": "PRE_DECODE_ERR",
+        "BriefDescription": "Pre-decode error"
+  },
+  {
+        "EventCode": "0xD0",
+        "EventName": "L1I_CACHE_ERR",
+        "BriefDescription": "L1 Instruction Cache (data or tag) memory error"
+  },
+  {
+        "EventCode": "0xD1",
+        "EventName": "L1D_CACHE_ERR",
+        "BriefDescription": "L1 Data Cache (data, tag or dirty) memory error, correctable or non-correctable"
+  },
+  {
+        "EventCode": "0xD2",
+        "EventName": "TLB_ERR",
+        "BriefDescription": "TLB memory error"
+  }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a53/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a53/pipeline.json
new file mode 100644 (file)
index 0000000..f45a6b5
--- /dev/null
@@ -0,0 +1,52 @@
+[
+  {
+    "EventCode": "0xC7",
+    "EventName": "STALL_SB_FULL",
+    "BriefDescription": "Data Write operation that stalls the pipeline because the store buffer is full"
+  },
+  {
+    "EventCode": "0xE0",
+    "EventName": "OTHER_IQ_DEP_STALL",
+    "BriefDescription": "Cycles that the DPU IQ is empty and that is not because of a recent micro-TLB miss, instruction cache miss or pre-decode error"
+  },
+  {
+    "EventCode": "0xE1",
+    "EventName": "IC_DEP_STALL",
+    "BriefDescription": "Cycles the DPU IQ is empty and there is an instruction cache miss being processed"
+  },
+  {
+    "EventCode": "0xE2",
+    "EventName": "IUTLB_DEP_STALL",
+    "BriefDescription": "Cycles the DPU IQ is empty and there is an instruction micro-TLB miss being processed"
+  },
+  {
+    "EventCode": "0xE3",
+    "EventName": "DECODE_DEP_STALL",
+    "BriefDescription": "Cycles the DPU IQ is empty and there is a pre-decode error being processed"
+  },
+  {
+    "EventCode": "0xE4",
+    "EventName": "OTHER_INTERLOCK_STALL",
+    "BriefDescription": "Cycles there is an interlock other than  Advanced SIMD/Floating-point instructions or load/store instruction"
+  },
+  {
+    "EventCode": "0xE5",
+    "EventName": "AGU_DEP_STALL",
+    "BriefDescription": "Cycles there is an interlock for a load/store instruction waiting for data to calculate the address in the AGU"
+  },
+  {
+    "EventCode": "0xE6",
+    "EventName": "SIMD_DEP_STALL",
+    "BriefDescription": "Cycles there is an interlock for an Advanced SIMD/Floating-point operation."
+  },
+  {
+    "EventCode": "0xE7",
+    "EventName": "LD_DEP_STALL",
+    "BriefDescription": "Cycles there is a stall in the Wr stage because of a load miss"
+  },
+  {
+    "EventCode": "0xE8",
+    "EventName": "ST_DEP_STALL",
+    "BriefDescription": "Cycles there is a stall in the Wr stage because of a store"
+  }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/armv8-recommended.json b/tools/perf/pmu-events/arch/arm64/armv8-recommended.json
new file mode 100644 (file)
index 0000000..6328828
--- /dev/null
@@ -0,0 +1,452 @@
+[
+    {
+        "PublicDescription": "Attributable Level 1 data cache access, read",
+        "EventCode": "0x40",
+        "EventName": "L1D_CACHE_RD",
+        "BriefDescription": "L1D cache access, read"
+    },
+    {
+        "PublicDescription": "Attributable Level 1 data cache access, write",
+        "EventCode": "0x41",
+        "EventName": "L1D_CACHE_WR",
+        "BriefDescription": "L1D cache access, write"
+    },
+    {
+        "PublicDescription": "Attributable Level 1 data cache refill, read",
+        "EventCode": "0x42",
+        "EventName": "L1D_CACHE_REFILL_RD",
+        "BriefDescription": "L1D cache refill, read"
+    },
+    {
+        "PublicDescription": "Attributable Level 1 data cache refill, write",
+        "EventCode": "0x43",
+        "EventName": "L1D_CACHE_REFILL_WR",
+        "BriefDescription": "L1D cache refill, write"
+    },
+    {
+        "PublicDescription": "Attributable Level 1 data cache refill, inner",
+        "EventCode": "0x44",
+        "EventName": "L1D_CACHE_REFILL_INNER",
+        "BriefDescription": "L1D cache refill, inner"
+    },
+    {
+        "PublicDescription": "Attributable Level 1 data cache refill, outer",
+        "EventCode": "0x45",
+        "EventName": "L1D_CACHE_REFILL_OUTER",
+        "BriefDescription": "L1D cache refill, outer"
+    },
+    {
+        "PublicDescription": "Attributable Level 1 data cache Write-Back, victim",
+        "EventCode": "0x46",
+        "EventName": "L1D_CACHE_WB_VICTIM",
+        "BriefDescription": "L1D cache Write-Back, victim"
+    },
+    {
+        "PublicDescription": "Level 1 data cache Write-Back, cleaning and coherency",
+        "EventCode": "0x47",
+        "EventName": "L1D_CACHE_WB_CLEAN",
+        "BriefDescription": "L1D cache Write-Back, cleaning and coherency"
+    },
+    {
+        "PublicDescription": "Attributable Level 1 data cache invalidate",
+        "EventCode": "0x48",
+        "EventName": "L1D_CACHE_INVAL",
+        "BriefDescription": "L1D cache invalidate"
+    },
+    {
+        "PublicDescription": "Attributable Level 1 data TLB refill, read",
+        "EventCode": "0x4C",
+        "EventName": "L1D_TLB_REFILL_RD",
+        "BriefDescription": "L1D tlb refill, read"
+    },
+    {
+        "PublicDescription": "Attributable Level 1 data TLB refill, write",
+        "EventCode": "0x4D",
+        "EventName": "L1D_TLB_REFILL_WR",
+        "BriefDescription": "L1D tlb refill, write"
+    },
+    {
+        "PublicDescription": "Attributable Level 1 data or unified TLB access, read",
+        "EventCode": "0x4E",
+        "EventName": "L1D_TLB_RD",
+        "BriefDescription": "L1D tlb access, read"
+    },
+    {
+        "PublicDescription": "Attributable Level 1 data or unified TLB access, write",
+        "EventCode": "0x4F",
+        "EventName": "L1D_TLB_WR",
+        "BriefDescription": "L1D tlb access, write"
+    },
+    {
+        "PublicDescription": "Attributable Level 2 data cache access, read",
+        "EventCode": "0x50",
+        "EventName": "L2D_CACHE_RD",
+        "BriefDescription": "L2D cache access, read"
+    },
+    {
+        "PublicDescription": "Attributable Level 2 data cache access, write",
+        "EventCode": "0x51",
+        "EventName": "L2D_CACHE_WR",
+        "BriefDescription": "L2D cache access, write"
+    },
+    {
+        "PublicDescription": "Attributable Level 2 data cache refill, read",
+        "EventCode": "0x52",
+        "EventName": "L2D_CACHE_REFILL_RD",
+        "BriefDescription": "L2D cache refill, read"
+    },
+    {
+        "PublicDescription": "Attributable Level 2 data cache refill, write",
+        "EventCode": "0x53",
+        "EventName": "L2D_CACHE_REFILL_WR",
+        "BriefDescription": "L2D cache refill, write"
+    },
+    {
+        "PublicDescription": "Attributable Level 2 data cache Write-Back, victim",
+        "EventCode": "0x56",
+        "EventName": "L2D_CACHE_WB_VICTIM",
+        "BriefDescription": "L2D cache Write-Back, victim"
+    },
+    {
+        "PublicDescription": "Level 2 data cache Write-Back, cleaning and coherency",
+        "EventCode": "0x57",
+        "EventName": "L2D_CACHE_WB_CLEAN",
+        "BriefDescription": "L2D cache Write-Back, cleaning and coherency"
+    },
+    {
+        "PublicDescription": "Attributable Level 2 data cache invalidate",
+        "EventCode": "0x58",
+        "EventName": "L2D_CACHE_INVAL",
+        "BriefDescription": "L2D cache invalidate"
+    },
+    {
+        "PublicDescription": "Attributable Level 2 data or unified TLB refill, read",
+        "EventCode": "0x5c",
+        "EventName": "L2D_TLB_REFILL_RD",
+        "BriefDescription": "L2D cache refill, read"
+    },
+    {
+        "PublicDescription": "Attributable Level 2 data or unified TLB refill, write",
+        "EventCode": "0x5d",
+        "EventName": "L2D_TLB_REFILL_WR",
+        "BriefDescription": "L2D cache refill, write"
+    },
+    {
+        "PublicDescription": "Attributable Level 2 data or unified TLB access, read",
+        "EventCode": "0x5e",
+        "EventName": "L2D_TLB_RD",
+        "BriefDescription": "L2D cache access, read"
+    },
+    {
+        "PublicDescription": "Attributable Level 2 data or unified TLB access, write",
+        "EventCode": "0x5f",
+        "EventName": "L2D_TLB_WR",
+        "BriefDescription": "L2D cache access, write"
+    },
+    {
+        "PublicDescription": "Bus access read",
+        "EventCode": "0x60",
+        "EventName": "BUS_ACCESS_RD",
+        "BriefDescription": "Bus access read"
+   },
+   {
+        "PublicDescription": "Bus access write",
+        "EventCode": "0x61",
+        "EventName": "BUS_ACCESS_WR",
+        "BriefDescription": "Bus access write"
+   }
+   {
+        "PublicDescription": "Bus access, Normal, Cacheable, Shareable",
+        "EventCode": "0x62",
+        "EventName": "BUS_ACCESS_SHARED",
+        "BriefDescription": "Bus access, Normal, Cacheable, Shareable"
+   }
+   {
+        "PublicDescription": "Bus access, not Normal, Cacheable, Shareable",
+        "EventCode": "0x63",
+        "EventName": "BUS_ACCESS_NOT_SHARED",
+        "BriefDescription": "Bus access, not Normal, Cacheable, Shareable"
+   }
+   {
+        "PublicDescription": "Bus access, Normal",
+        "EventCode": "0x64",
+        "EventName": "BUS_ACCESS_NORMAL",
+        "BriefDescription": "Bus access, Normal"
+   }
+   {
+        "PublicDescription": "Bus access, peripheral",
+        "EventCode": "0x65",
+        "EventName": "BUS_ACCESS_PERIPH",
+        "BriefDescription": "Bus access, peripheral"
+   }
+   {
+        "PublicDescription": "Data memory access, read",
+        "EventCode": "0x66",
+        "EventName": "MEM_ACCESS_RD",
+        "BriefDescription": "Data memory access, read"
+   }
+   {
+        "PublicDescription": "Data memory access, write",
+        "EventCode": "0x67",
+        "EventName": "MEM_ACCESS_WR",
+        "BriefDescription": "Data memory access, write"
+   }
+   {
+        "PublicDescription": "Unaligned access, read",
+        "EventCode": "0x68",
+        "EventName": "UNALIGNED_LD_SPEC",
+        "BriefDescription": "Unaligned access, read"
+   }
+   {
+        "PublicDescription": "Unaligned access, write",
+        "EventCode": "0x69",
+        "EventName": "UNALIGNED_ST_SPEC",
+        "BriefDescription": "Unaligned access, write"
+   }
+   {
+        "PublicDescription": "Unaligned access",
+        "EventCode": "0x6a",
+        "EventName": "UNALIGNED_LDST_SPEC",
+        "BriefDescription": "Unaligned access"
+   }
+   {
+        "PublicDescription": "Exclusive operation speculatively executed, LDREX or LDX",
+        "EventCode": "0x6c",
+        "EventName": "LDREX_SPEC",
+        "BriefDescription": "Exclusive operation speculatively executed, LDREX or LDX"
+   }
+   {
+        "PublicDescription": "Exclusive operation speculatively executed, STREX or STX pass",
+        "EventCode": "0x6d",
+        "EventName": "STREX_PASS_SPEC",
+        "BriefDescription": "Exclusive operation speculatively executed, STREX or STX pass"
+   }
+   {
+        "PublicDescription": "Exclusive operation speculatively executed, STREX or STX fail",
+        "EventCode": "0x6e",
+        "EventName": "STREX_FAIL_SPEC",
+        "BriefDescription": "Exclusive operation speculatively executed, STREX or STX fail"
+   }
+   {
+        "PublicDescription": "Exclusive operation speculatively executed, STREX or STX",
+        "EventCode": "0x6f",
+        "EventName": "STREX_SPEC",
+        "BriefDescription": "Exclusive operation speculatively executed, STREX or STX"
+   }
+   {
+        "PublicDescription": "Operation speculatively executed, load",
+        "EventCode": "0x70",
+        "EventName": "LD_SPEC",
+        "BriefDescription": "Operation speculatively executed, load"
+   }
+   {
+        "PublicDescription": "Operation speculatively executed, store"
+        "EventCode": "0x71",
+        "EventName": "ST_SPEC",
+        "BriefDescription": "Operation speculatively executed, store"
+   }
+   {
+        "PublicDescription": "Operation speculatively executed, load or store",
+        "EventCode": "0x72",
+        "EventName": "LDST_SPEC",
+        "BriefDescription": "Operation speculatively executed, load or store"
+   }
+   {
+        "PublicDescription": "Operation speculatively executed, integer data processing",
+        "EventCode": "0x73",
+        "EventName": "DP_SPEC",
+        "BriefDescription": "Operation speculatively executed, integer data processing"
+   }
+   {
+        "PublicDescription": "Operation speculatively executed, Advanced SIMD instruction",
+        "EventCode": "0x74",
+        "EventName": "ASE_SPEC",
+        "BriefDescription": "Operation speculatively executed, Advanced SIMD instruction",
+   }
+   {
+        "PublicDescription": "Operation speculatively executed, floating-point instruction",
+        "EventCode": "0x75",
+        "EventName": "VFP_SPEC",
+        "BriefDescription": "Operation speculatively executed, floating-point instruction"
+   }
+   {
+        "PublicDescription": "Operation speculatively executed, software change of the PC",
+        "EventCode": "0x76",
+        "EventName": "PC_WRITE_SPEC",
+        "BriefDescription": "Operation speculatively executed, software change of the PC"
+   }
+   {
+        "PublicDescription": "Operation speculatively executed, Cryptographic instruction",
+        "EventCode": "0x77",
+        "EventName": "CRYPTO_SPEC",
+        "BriefDescription": "Operation speculatively executed, Cryptographic instruction"
+   }
+   {
+        "PublicDescription": "Branch speculatively executed, immediate branch"
+        "EventCode": "0x78",
+        "EventName": "BR_IMMED_SPEC",
+        "BriefDescription": "Branch speculatively executed, immediate branch"
+   }
+   {
+        "PublicDescription": "Branch speculatively executed, procedure return"
+        "EventCode": "0x79",
+        "EventName": "BR_RETURN_SPEC",
+        "BriefDescription": "Branch speculatively executed, procedure return"
+   }
+   {
+        "PublicDescription": "Branch speculatively executed, indirect branch"
+        "EventCode": "0x7a",
+        "EventName": "BR_INDIRECT_SPEC",
+        "BriefDescription": "Branch speculatively executed, indirect branch"
+   }
+   {
+        "PublicDescription": "Barrier speculatively executed, ISB"
+        "EventCode": "0x7c",
+        "EventName": "ISB_SPEC",
+        "BriefDescription": "Barrier speculatively executed, ISB"
+   }
+   {
+        "PublicDescription": "Barrier speculatively executed, DSB"
+        "EventCode": "0x7d",
+        "EventName": "DSB_SPEC",
+        "BriefDescription": "Barrier speculatively executed, DSB"
+   }
+   {
+        "PublicDescription": "Barrier speculatively executed, DMB"
+        "EventCode": "0x7e",
+        "EventName": "DMB_SPEC",
+        "BriefDescription": "Barrier speculatively executed, DMB"
+   }
+   {
+        "PublicDescription": "Exception taken, Other synchronous"
+        "EventCode": "0x81",
+        "EventName": "EXC_UNDEF",
+        "BriefDescription": "Exception taken, Other synchronous"
+   }
+   {
+        "PublicDescription": "Exception taken, Supervisor Call"
+        "EventCode": "0x82",
+        "EventName": "EXC_SVC",
+        "BriefDescription": "Exception taken, Supervisor Call"
+   }
+   {
+        "PublicDescription": "Exception taken, Instruction Abort"
+        "EventCode": "0x83",
+        "EventName": "EXC_PABORT",
+        "BriefDescription": "Exception taken, Instruction Abort"
+   }
+   {
+        "PublicDescription": "Exception taken, Data Abort and SError"
+        "EventCode": "0x84",
+        "EventName": "EXC_DABORT",
+        "BriefDescription": "Exception taken, Data Abort and SError"
+   }
+   {
+        "PublicDescription": "Exception taken, IRQ"
+        "EventCode": "0x86",
+        "EventName": "EXC_IRQ",
+        "BriefDescription": "Exception taken, IRQ"
+   }
+   {
+        "PublicDescription": "Exception taken, FIQ"
+        "EventCode": "0x87",
+        "EventName": "EXC_FIQ",
+        "BriefDescription": "Exception taken, FIQ"
+   }
+   {
+        "PublicDescription": "Exception taken, Secure Monitor Call"
+        "EventCode": "0x88",
+        "EventName": "EXC_SMC",
+        "BriefDescription": "Exception taken, Secure Monitor Call"
+   }
+   {
+        "PublicDescription": "Exception taken, Hypervisor Call"
+        "EventCode": "0x8a",
+        "EventName": "EXC_HVC",
+        "BriefDescription": "Exception taken, Hypervisor Call"
+   }
+   {
+        "PublicDescription": "Exception taken, Instruction Abort not taken locally"
+        "EventCode": "0x8b",
+        "EventName": "EXC_TRAP_PABORT",
+        "BriefDescription": "Exception taken, Instruction Abort not taken locally"
+   }
+   {
+        "PublicDescription": "Exception taken, Data Abort or SError not taken locally"
+        "EventCode": "0x8c",
+        "EventName": "EXC_TRAP_DABORT",
+        "BriefDescription": "Exception taken, Data Abort or SError not taken locally"
+   }
+   {
+        "PublicDescription": "Exception taken, Other traps not taken locally"
+        "EventCode": "0x8d",
+        "EventName": "EXC_TRAP_OTHER",
+        "BriefDescription": "Exception taken, Other traps not taken locally"
+   }
+   {
+        "PublicDescription": "Exception taken, IRQ not taken locally"
+        "EventCode": "0x8e",
+        "EventName": "EXC_TRAP_IRQ",
+        "BriefDescription": "Exception taken, IRQ not taken locally"
+   }
+   {
+        "PublicDescription": "Exception taken, FIQ not taken locally"
+        "EventCode": "0x8f",
+        "EventName": "EXC_TRAP_FIQ",
+        "BriefDescription": "Exception taken, FIQ not taken locally"
+   }
+   {
+        "PublicDescription": "Release consistency operation speculatively executed, Load-Acquire"
+        "EventCode": "0x90",
+        "EventName": "RC_LD_SPEC",
+        "BriefDescription": "Release consistency operation speculatively executed, Load-Acquire"
+   }
+   {
+        "PublicDescription": "Release consistency operation speculatively executed, Store-Release"
+        "EventCode": "0x91",
+        "EventName": "RC_ST_SPEC",
+        "BriefDescription": "Release consistency operation speculatively executed, Store-Release"
+   }
+   {
+        "PublicDescription": "Attributable Level 3 data or unified cache access, read"
+        "EventCode": "0xa0",
+        "EventName": "L3D_CACHE_RD",
+        "BriefDescription": "Attributable Level 3 data or unified cache access, read"
+   }
+   {
+        "PublicDescription": "Attributable Level 3 data or unified cache access, write"
+        "EventCode": "0xa1",
+        "EventName": "L3D_CACHE_WR",
+        "BriefDescription": "Attributable Level 3 data or unified cache access, write"
+   }
+   {
+        "PublicDescription": "Attributable Level 3 data or unified cache refill, read"
+        "EventCode": "0xa2",
+        "EventName": "L3D_CACHE_REFILL_RD",
+        "BriefDescription": "Attributable Level 3 data or unified cache refill, read"
+   }
+   {
+        "PublicDescription": "Attributable Level 3 data or unified cache refill, write"
+        "EventCode": "0xa3",
+        "EventName": "L3D_CACHE_REFILL_WR",
+        "BriefDescription": "Attributable Level 3 data or unified cache refill, write"
+   }
+   {
+        "PublicDescription": "Attributable Level 3 data or unified cache Write-Back, victim"
+        "EventCode": "0xa6",
+        "EventName": "L3D_CACHE_WB_VICTIM",
+        "BriefDescription": "Attributable Level 3 data or unified cache Write-Back, victim"
+   }
+   {
+        "PublicDescription": "Attributable Level 3 data or unified cache Write-Back, cache clean"
+        "EventCode": "0xa7",
+        "EventName": "L3D_CACHE_WB_CLEAN",
+        "BriefDescription": "Attributable Level 3 data or unified cache Write-Back, cache clean"
+   }
+   {
+        "PublicDescription": "Attributable Level 3 data or unified cache access, invalidate"
+        "EventCode": "0xa8",
+        "EventName": "L3D_CACHE_INVAL",
+        "BriefDescription": "Attributable Level 3 data or unified cache access, invalidate"
+   }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/cavium/thunderx2-imp-def.json b/tools/perf/pmu-events/arch/arm64/cavium/thunderx2-imp-def.json
deleted file mode 100644 (file)
index 2db45c4..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-[
-    {
-        "PublicDescription": "Attributable Level 1 data cache access, read",
-        "EventCode": "0x40",
-        "EventName": "l1d_cache_rd",
-        "BriefDescription": "L1D cache read",
-    },
-    {
-        "PublicDescription": "Attributable Level 1 data cache access, write ",
-        "EventCode": "0x41",
-        "EventName": "l1d_cache_wr",
-        "BriefDescription": "L1D cache write",
-    },
-    {
-        "PublicDescription": "Attributable Level 1 data cache refill, read",
-        "EventCode": "0x42",
-        "EventName": "l1d_cache_refill_rd",
-        "BriefDescription": "L1D cache refill read",
-    },
-    {
-        "PublicDescription": "Attributable Level 1 data cache refill, write",
-        "EventCode": "0x43",
-        "EventName": "l1d_cache_refill_wr",
-        "BriefDescription": "L1D refill write",
-    },
-    {
-        "PublicDescription": "Attributable Level 1 data TLB refill, read",
-        "EventCode": "0x4C",
-        "EventName": "l1d_tlb_refill_rd",
-        "BriefDescription": "L1D tlb refill read",
-    },
-    {
-        "PublicDescription": "Attributable Level 1 data TLB refill, write",
-        "EventCode": "0x4D",
-        "EventName": "l1d_tlb_refill_wr",
-        "BriefDescription": "L1D tlb refill write",
-    },
-    {
-        "PublicDescription": "Attributable Level 1 data or unified TLB access, read",
-        "EventCode": "0x4E",
-        "EventName": "l1d_tlb_rd",
-        "BriefDescription": "L1D tlb read",
-    },
-    {
-        "PublicDescription": "Attributable Level 1 data or unified TLB access, write",
-        "EventCode": "0x4F",
-        "EventName": "l1d_tlb_wr",
-        "BriefDescription": "L1D tlb write",
-    },
-    {
-        "PublicDescription": "Bus access read",
-        "EventCode": "0x60",
-        "EventName": "bus_access_rd",
-        "BriefDescription": "Bus access read",
-   },
-   {
-        "PublicDescription": "Bus access write",
-        "EventCode": "0x61",
-        "EventName": "bus_access_wr",
-        "BriefDescription": "Bus access write",
-   }
-]
diff --git a/tools/perf/pmu-events/arch/arm64/cavium/thunderx2/core-imp-def.json b/tools/perf/pmu-events/arch/arm64/cavium/thunderx2/core-imp-def.json
new file mode 100644 (file)
index 0000000..bc03c06
--- /dev/null
@@ -0,0 +1,32 @@
+[
+    {
+        "ArchStdEvent": "L1D_CACHE_RD",
+    },
+    {
+        "ArchStdEvent": "L1D_CACHE_WR",
+    },
+    {
+        "ArchStdEvent": "L1D_CACHE_REFILL_RD",
+    },
+    {
+        "ArchStdEvent": "L1D_CACHE_REFILL_WR",
+    },
+    {
+        "ArchStdEvent": "L1D_TLB_REFILL_RD",
+    },
+    {
+        "ArchStdEvent": "L1D_TLB_REFILL_WR",
+    },
+    {
+        "ArchStdEvent": "L1D_TLB_RD",
+    },
+    {
+        "ArchStdEvent": "L1D_TLB_WR",
+    },
+    {
+        "ArchStdEvent": "BUS_ACCESS_RD",
+   },
+   {
+        "ArchStdEvent": "BUS_ACCESS_WR",
+   }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/cortex-a53/branch.json b/tools/perf/pmu-events/arch/arm64/cortex-a53/branch.json
deleted file mode 100644 (file)
index 3b62087..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-[
-  {,
-    "EventCode": "0x7A",
-    "EventName": "BR_INDIRECT_SPEC",
-    "BriefDescription": "Branch speculatively executed - Indirect branch"
-  },
-  {,
-    "EventCode": "0xC9",
-    "EventName": "BR_COND",
-    "BriefDescription": "Conditional branch executed"
-  },
-  {,
-    "EventCode": "0xCA",
-    "EventName": "BR_INDIRECT_MISPRED",
-    "BriefDescription": "Indirect branch mispredicted"
-  },
-  {,
-    "EventCode": "0xCB",
-    "EventName": "BR_INDIRECT_MISPRED_ADDR",
-    "BriefDescription": "Indirect branch mispredicted because of address miscompare"
-  },
-  {,
-    "EventCode": "0xCC",
-    "EventName": "BR_COND_MISPRED",
-    "BriefDescription": "Conditional branch mispredicted"
-  }
-]
diff --git a/tools/perf/pmu-events/arch/arm64/cortex-a53/bus.json b/tools/perf/pmu-events/arch/arm64/cortex-a53/bus.json
deleted file mode 100644 (file)
index 480d9f7..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-[
-  {,
-    "EventCode": "0x60",
-    "EventName": "BUS_ACCESS_LD",
-    "BriefDescription": "Bus access - Read"
-  },
-  {,
-    "EventCode": "0x61",
-    "EventName": "BUS_ACCESS_ST",
-    "BriefDescription": "Bus access - Write"
-  },
-  {,
-    "EventCode": "0xC0",
-    "EventName": "EXT_MEM_REQ",
-    "BriefDescription": "External memory request"
-  },
-  {,
-    "EventCode": "0xC1",
-    "EventName": "EXT_MEM_REQ_NC",
-    "BriefDescription": "Non-cacheable external memory request"
-  }
-]
diff --git a/tools/perf/pmu-events/arch/arm64/cortex-a53/cache.json b/tools/perf/pmu-events/arch/arm64/cortex-a53/cache.json
deleted file mode 100644 (file)
index 11baad6..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-[
-  {,
-    "EventCode": "0xC2",
-    "EventName": "PREFETCH_LINEFILL",
-    "BriefDescription": "Linefill because of prefetch"
-  },
-  {,
-    "EventCode": "0xC3",
-    "EventName": "PREFETCH_LINEFILL_DROP",
-    "BriefDescription": "Instruction Cache Throttle occurred"
-  },
-  {,
-    "EventCode": "0xC4",
-    "EventName": "READ_ALLOC_ENTER",
-    "BriefDescription": "Entering read allocate mode"
-  },
-  {,
-    "EventCode": "0xC5",
-    "EventName": "READ_ALLOC",
-    "BriefDescription": "Read allocate mode"
-  },
-  {,
-    "EventCode": "0xC8",
-    "EventName": "EXT_SNOOP",
-    "BriefDescription": "SCU Snooped data from another CPU for this CPU"
-  }
-]
diff --git a/tools/perf/pmu-events/arch/arm64/cortex-a53/memory.json b/tools/perf/pmu-events/arch/arm64/cortex-a53/memory.json
deleted file mode 100644 (file)
index 480d9f7..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-[
-  {,
-    "EventCode": "0x60",
-    "EventName": "BUS_ACCESS_LD",
-    "BriefDescription": "Bus access - Read"
-  },
-  {,
-    "EventCode": "0x61",
-    "EventName": "BUS_ACCESS_ST",
-    "BriefDescription": "Bus access - Write"
-  },
-  {,
-    "EventCode": "0xC0",
-    "EventName": "EXT_MEM_REQ",
-    "BriefDescription": "External memory request"
-  },
-  {,
-    "EventCode": "0xC1",
-    "EventName": "EXT_MEM_REQ_NC",
-    "BriefDescription": "Non-cacheable external memory request"
-  }
-]
diff --git a/tools/perf/pmu-events/arch/arm64/cortex-a53/other.json b/tools/perf/pmu-events/arch/arm64/cortex-a53/other.json
deleted file mode 100644 (file)
index 73a2240..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-[
-  {,
-    "EventCode": "0x86",
-    "EventName": "EXC_IRQ",
-    "BriefDescription": "Exception taken, IRQ"
-  },
-  {,
-    "EventCode": "0x87",
-    "EventName": "EXC_FIQ",
-    "BriefDescription": "Exception taken, FIQ"
-  },
-  {,
-    "EventCode": "0xC6",
-    "EventName": "PRE_DECODE_ERR",
-    "BriefDescription": "Pre-decode error"
-  },
-  {,
-    "EventCode": "0xD0",
-    "EventName": "L1I_CACHE_ERR",
-    "BriefDescription": "L1 Instruction Cache (data or tag) memory error"
-  },
-  {,
-    "EventCode": "0xD1",
-    "EventName": "L1D_CACHE_ERR",
-    "BriefDescription": "L1 Data Cache (data, tag or dirty) memory error, correctable or non-correctable"
-  },
-  {,
-    "EventCode": "0xD2",
-    "EventName": "TLB_ERR",
-    "BriefDescription": "TLB memory error"
-  }
-]
diff --git a/tools/perf/pmu-events/arch/arm64/cortex-a53/pipeline.json b/tools/perf/pmu-events/arch/arm64/cortex-a53/pipeline.json
deleted file mode 100644 (file)
index 3149fb9..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-[
-  {,
-    "EventCode": "0xC7",
-    "EventName": "STALL_SB_FULL",
-    "BriefDescription": "Data Write operation that stalls the pipeline because the store buffer is full"
-  },
-  {,
-    "EventCode": "0xE0",
-    "EventName": "OTHER_IQ_DEP_STALL",
-    "BriefDescription": "Cycles that the DPU IQ is empty and that is not because of a recent micro-TLB miss, instruction cache miss or pre-decode error"
-  },
-  {,
-    "EventCode": "0xE1",
-    "EventName": "IC_DEP_STALL",
-    "BriefDescription": "Cycles the DPU IQ is empty and there is an instruction cache miss being processed"
-  },
-  {,
-    "EventCode": "0xE2",
-    "EventName": "IUTLB_DEP_STALL",
-    "BriefDescription": "Cycles the DPU IQ is empty and there is an instruction micro-TLB miss being processed"
-  },
-  {,
-    "EventCode": "0xE3",
-    "EventName": "DECODE_DEP_STALL",
-    "BriefDescription": "Cycles the DPU IQ is empty and there is a pre-decode error being processed"
-  },
-  {,
-    "EventCode": "0xE4",
-    "EventName": "OTHER_INTERLOCK_STALL",
-    "BriefDescription": "Cycles there is an interlock other than  Advanced SIMD/Floating-point instructions or load/store instruction"
-  },
-  {,
-    "EventCode": "0xE5",
-    "EventName": "AGU_DEP_STALL",
-    "BriefDescription": "Cycles there is an interlock for a load/store instruction waiting for data to calculate the address in the AGU"
-  },
-  {,
-    "EventCode": "0xE6",
-    "EventName": "SIMD_DEP_STALL",
-    "BriefDescription": "Cycles there is an interlock for an Advanced SIMD/Floating-point operation."
-  },
-  {,
-    "EventCode": "0xE7",
-    "EventName": "LD_DEP_STALL",
-    "BriefDescription": "Cycles there is a stall in the Wr stage because of a load miss"
-  },
-  {,
-    "EventCode": "0xE8",
-    "EventName": "ST_DEP_STALL",
-    "BriefDescription": "Cycles there is a stall in the Wr stage because of a store"
-  }
-]
diff --git a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/core-imp-def.json b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/core-imp-def.json
new file mode 100644 (file)
index 0000000..9f0f15d
--- /dev/null
@@ -0,0 +1,122 @@
+[
+    {
+        "ArchStdEvent": "L1D_CACHE_RD",
+    },
+    {
+        "ArchStdEvent": "L1D_CACHE_WR",
+    },
+    {
+        "ArchStdEvent": "L1D_CACHE_REFILL_RD",
+    },
+    {
+        "ArchStdEvent": "L1D_CACHE_REFILL_WR",
+    },
+    {
+        "ArchStdEvent": "L1D_CACHE_WB_VICTIM",
+    },
+    {
+        "ArchStdEvent": "L1D_CACHE_WB_CLEAN",
+    },
+    {
+        "ArchStdEvent": "L1D_CACHE_INVAL",
+    },
+    {
+        "ArchStdEvent": "L1D_TLB_REFILL_RD",
+    },
+    {
+        "ArchStdEvent": "L1D_TLB_REFILL_WR",
+    },
+    {
+        "ArchStdEvent": "L1D_TLB_RD",
+    },
+    {
+        "ArchStdEvent": "L1D_TLB_WR",
+    },
+    {
+        "ArchStdEvent": "L2D_CACHE_RD",
+    },
+    {
+        "ArchStdEvent": "L2D_CACHE_WR",
+    },
+    {
+        "ArchStdEvent": "L2D_CACHE_REFILL_RD",
+    },
+    {
+        "ArchStdEvent": "L2D_CACHE_REFILL_WR",
+    },
+    {
+        "ArchStdEvent": "L2D_CACHE_WB_VICTIM",
+    },
+    {
+        "ArchStdEvent": "L2D_CACHE_WB_CLEAN",
+    },
+    {
+        "ArchStdEvent": "L2D_CACHE_INVAL",
+    },
+    {
+        "PublicDescription": "Level 1 instruction cache prefetch access count",
+        "EventCode": "0x102e",
+        "EventName": "L1I_CACHE_PRF",
+        "BriefDescription": "L1I cache prefetch access count",
+    },
+    {
+        "PublicDescription": "Level 1 instruction cache miss due to prefetch access count",
+        "EventCode": "0x102f",
+        "EventName": "L1I_CACHE_PRF_REFILL",
+        "BriefDescription": "L1I cache miss due to prefetch access count",
+    },
+    {
+        "PublicDescription": "Instruction queue is empty",
+        "EventCode": "0x1043",
+        "EventName": "IQ_IS_EMPTY",
+        "BriefDescription": "Instruction queue is empty",
+    },
+    {
+        "PublicDescription": "Instruction fetch stall cycles",
+        "EventCode": "0x1044",
+        "EventName": "IF_IS_STALL",
+        "BriefDescription": "Instruction fetch stall cycles",
+    },
+    {
+        "PublicDescription": "Instructions can receive, but not send",
+        "EventCode": "0x2014",
+        "EventName": "FETCH_BUBBLE",
+        "BriefDescription": "Instructions can receive, but not send",
+    },
+    {
+        "PublicDescription": "Prefetch request from LSU",
+        "EventCode": "0x6013",
+        "EventName": "PRF_REQ",
+        "BriefDescription": "Prefetch request from LSU",
+    },
+    {
+        "PublicDescription": "Hit on prefetched data",
+        "EventCode": "0x6014",
+        "EventName": "HIT_ON_PRF",
+        "BriefDescription": "Hit on prefetched data",
+    },
+    {
+        "PublicDescription": "Cycles of that the number of issuing micro operations are less than 4",
+        "EventCode": "0x7001",
+        "EventName": "EXE_STALL_CYCLE",
+        "BriefDescription": "Cycles of that the number of issue ups are less than 4",
+    },
+    {
+        "PublicDescription": "No any micro operation is issued and meanwhile any load operation is not resolved",
+        "EventCode": "0x7004",
+        "EventName": "MEM_STALL_ANYLOAD",
+        "BriefDescription": "No any micro operation is issued and meanwhile any load operation is not resolved",
+    },
+    {
+        "PublicDescription": "No any micro operation is issued and meanwhile there is any load operation missing L1 cache and pending data refill",
+        "EventCode": "0x7006",
+        "EventName": "MEM_STALL_L1MISS",
+        "BriefDescription": "No any micro operation is issued and meanwhile there is any load operation missing L1 cache and pending data refill",
+    },
+    {
+        "PublicDescription": "No any micro operation is issued and meanwhile there is any load operation missing both L1 and L2 cache and pending data refill from L3 cache",
+        "EventCode": "0x7007",
+        "EventName": "MEM_STALL_L2MISS",
+        "BriefDescription": "No any micro operation is issued and meanwhile there is any load operation missing both L1 and L2 cache and pending data refill from L3 cache",
+    },
+]
index e61c9ca..f03e26e 100644 (file)
@@ -12,5 +12,7 @@
 #
 #
 #Family-model,Version,Filename,EventType
-0x00000000420f5160,v1,cavium,core
-0x00000000410fd03[[:xdigit:]],v1,cortex-a53,core
+0x00000000410fd03[[:xdigit:]],v1,arm/cortex-a53,core
+0x00000000420f5160,v1,cavium/thunderx2,core
+0x00000000430f0af0,v1,cavium/thunderx2,core
+0x00000000480fd010,v1,hisilicon/hip08,core
index 7945c51..8510721 100644 (file)
     "EventName": "PM_CMPLU_STALL_FXU",
     "BriefDescription": "Finish stall due to a scalar fixed point or CR instruction in the execution pipeline. These instructions get routed to the ALU, ALU2, and DIV pipes"
   },
-  {,
-    "EventCode": "0x1D15C",
-    "EventName": "PM_MRK_DTLB_MISS_1G",
-    "BriefDescription": "Marked Data TLB reload (after a miss) page size 2M. Implies radix translation was used"
-  },
   {,
     "EventCode": "0x4D12A",
     "EventName": "PM_MRK_DATA_FROM_RL4_CYC",
     "EventName": "PM_THRESH_EXC_4096",
     "BriefDescription": "Threshold counter exceed a count of 4096"
   },
-  {,
-    "EventCode": "0x3D156",
-    "EventName": "PM_MRK_DTLB_MISS_64K",
-    "BriefDescription": "Marked Data TLB Miss page size 64K"
-  },
-  {,
-    "EventCode": "0x4C15E",
-    "EventName": "PM_MRK_DTLB_MISS_16M",
-    "BriefDescription": "Marked Data TLB Miss page size 16M"
-  },
-  {,
-    "EventCode": "0x2D15E",
-    "EventName": "PM_MRK_DTLB_MISS_16G",
-    "BriefDescription": "Marked Data TLB Miss page size 16G"
-  },
   {,
     "EventCode": "0x3F14A",
     "EventName": "PM_MRK_DPTEG_FROM_RMEM",
     "EventCode": "0x1002A",
     "EventName": "PM_CMPLU_STALL_LARX",
     "BriefDescription": "Finish stall because the NTF instruction was a larx waiting to be satisfied"
-  },
-  {,
-    "EventCode": "0x1C058",
-    "EventName": "PM_DTLB_MISS_16G",
-    "BriefDescription": "Data TLB Miss page size 16G"
   }
 ]
\ No newline at end of file
index bd8361b..f9fa84b 100644 (file)
     "EventName": "PM_MRK_DATA_FROM_RL2L3_SHR_CYC",
     "BriefDescription": "Duration in cycles to reload with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a marked load"
   },
-  {,
-    "EventCode": "0x3C056",
-    "EventName": "PM_DTLB_MISS_64K",
-    "BriefDescription": "Data TLB Miss page size 64K"
-  },
   {,
     "EventCode": "0x30060",
     "EventName": "PM_TM_TRANS_RUN_INST",
     "EventName": "PM_MRK_LARX_FIN",
     "BriefDescription": "Larx finished"
   },
-  {,
-    "EventCode": "0x4C056",
-    "EventName": "PM_DTLB_MISS_16M",
-    "BriefDescription": "Data TLB Miss page size 16M"
-  },
   {,
     "EventCode": "0x1003A",
     "EventName": "PM_CMPLU_STALL_LSU_FIN",
index 22f9f32..b1954c3 100644 (file)
     "EventName": "PM_L1_ICACHE_RELOADED_ALL",
     "BriefDescription": "Counts all Icache reloads includes demand, prefetch, prefetch turned into demand and demand turned into prefetch"
   },
-  {,
-    "EventCode": "0x4003C",
-    "EventName": "PM_DISP_HELD_SYNC_HOLD",
-    "BriefDescription": "Cycles in which dispatch is held because of a synchronizing instruction in the pipeline"
-  },
   {,
     "EventCode": "0x3003C",
     "EventName": "PM_CMPLU_STALL_NESTED_TEND",
index 9960d1c..2e2ebc7 100644 (file)
     "EventName": "PM_LD_CMPL",
     "BriefDescription": "count of Loads completed"
   },
-  {,
-    "EventCode": "0x2D156",
-    "EventName": "PM_MRK_DTLB_MISS_4K",
-    "BriefDescription": "Marked Data TLB Miss page size 4k"
-  },
   {,
     "EventCode": "0x4C042",
     "EventName": "PM_DATA_FROM_L3",
index 5ce3129..48cf4f9 100644 (file)
     "EventName": "PM_THRD_PRIO_0_1_CYC",
     "BriefDescription": "Cycles thread running at priority level 0 or 1"
   },
+  {,
+    "EventCode": "0x4C054",
+    "EventName": "PM_DERAT_MISS_16G_1G",
+    "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 16G (hpt mode) or 1G (radix mode)"
+  },
   {,
     "EventCode": "0x2084",
     "EventName": "PM_FLUSH_HB_RESTORE_CYC",
   {,
     "EventCode": "0x360B2",
     "EventName": "PM_L3_GRP_GUESS_WRONG_LOW",
-    "BriefDescription": "Initial scope=group (GS or NNS) but data from outside group (far or rem). Prediction too Low"
+    "BriefDescription": "Prefetch scope predictor selected GS or NNS, but was wrong because scope was LNS"
   },
   {,
     "EventCode": "0x168A6",
     "EventName": "PM_TM_CAM_OVERFLOW",
-    "BriefDescription": "L3 TM cam overflow during L2 co of SC"
+    "BriefDescription": "L3 TM CAM is full when a L2 castout of TM_SC line occurs.  Line is pushed to memory"
   },
   {,
     "EventCode": "0xE8B0",
     "EventName": "PM_ISU3_ISS_HOLD_ALL",
     "BriefDescription": "All ISU rejects"
   },
-  {,
-    "EventCode": "0x460A6",
-    "EventName": "PM_RD_FORMING_SC",
-    "BriefDescription": "Read forming SC"
-  },
   {,
     "EventCode": "0x468A0",
     "EventName": "PM_L3_PF_OFF_CHIP_MEM",
   {,
     "EventCode": "0x368A6",
     "EventName": "PM_SNP_TM_HIT_T",
-    "BriefDescription": "Snp TM sthit T/Tn/Te"
+    "BriefDescription": "TM snoop that is a store hits line in L3 in T, Tn or Te state (shared modified)"
   },
   {,
     "EventCode": "0x3001A",
     "EventName": "PM_MRK_DATA_FROM_L31_ECO_MOD_CYC",
     "BriefDescription": "Duration in cycles to reload with Modified (M) data from another core's ECO L3 on the same chip due to a marked load"
   },
+  {,
+    "EventCode": "0xF0B4",
+    "EventName": "PM_DC_PREF_CONS_ALLOC",
+    "BriefDescription": "Prefetch stream allocated in the conservative phase by either the hardware prefetch mechanism or software prefetch. The sum of this pair subtracted from the total number of allocs will give the total allocs in normal phase"
+  },
   {,
     "EventCode": "0xF894",
     "EventName": "PM_LSU3_L1_CAM_CANCEL",
   {,
     "EventCode": "0x468A6",
     "EventName": "PM_RD_CLEARING_SC",
-    "BriefDescription": "Read clearing SC"
+    "BriefDescription": "Core TM load hits line in L3 in TM_SC state and causes it to be invalidated"
+  },
+  {,
+    "EventCode": "0xD0B0",
+    "EventName": "PM_HWSYNC",
+    "BriefDescription": ""
   },
   {,
     "EventCode": "0x168B0",
     "EventName": "PM_DC_PREF_HW_ALLOC",
     "BriefDescription": "Prefetch stream allocated by the hardware prefetch mechanism"
   },
+  {,
+    "EventCode": "0xF0BC",
+    "EventName": "PM_LS2_UNALIGNED_ST",
+    "BriefDescription": "Store instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the Store of that size.  If the Store wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
+  },
   {,
     "EventCode": "0xD0AC",
     "EventName": "PM_SRQ_SYNC_CYC",
     "EventName": "PM_MRK_INST_FROM_L3MISS",
     "BriefDescription": "Marked instruction was reloaded from a location beyond the local chiplet"
   },
+  {,
+    "EventCode": "0x58A8",
+    "EventName": "PM_DECODE_HOLD_ICT_FULL",
+    "BriefDescription": "Counts the number of cycles in which the IFU was not able to decode and transmit one or more instructions because all itags were in use.  This means the ICT is full for this thread"
+  },
   {,
     "EventCode": "0x26082",
     "EventName": "PM_L2_IC_INV",
     "EventName": "PM_MRK_DATA_FROM_OFF_CHIP_CACHE_CYC",
     "BriefDescription": "Duration in cycles to reload either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to a marked load"
   },
+  {,
+    "EventCode": "0xF888",
+    "EventName": "PM_LSU1_STORE_REJECT",
+    "BriefDescription": "All internal store rejects cause the instruction to go back to the SRQ and go to sleep until woken up to try again after the condition has been met"
+  },
+  {,
+    "EventCode": "0xC098",
+    "EventName": "PM_LS2_UNALIGNED_LD",
+    "BriefDescription": "Load instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the load of that size.  If the load wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
+  },
   {,
     "EventCode": "0x20058",
     "EventName": "PM_DARQ1_10_12_ENTRIES",
   {,
     "EventCode": "0x360A6",
     "EventName": "PM_SNP_TM_HIT_M",
-    "BriefDescription": "Snp TM st hit M/Mu"
+    "BriefDescription": "TM snoop that is a store hits line in L3 in M or Mu state (exclusive modified)"
   },
   {,
     "EventCode": "0x5898",
     "BriefDescription": "A data line was written to the L1 due to a hardware or software prefetch"
   },
   {,
-    "EventCode": "0xF888",
-    "EventName": "PM_LSU1_STORE_REJECT",
-    "BriefDescription": "All internal store rejects cause the instruction to go back to the SRQ and go to sleep until woken up to try again after the condition has been met"
+    "EventCode": "0x2608E",
+    "EventName": "PM_TM_LD_CONF",
+    "BriefDescription": "TM Load (fav or non-fav) ran into conflict (failed)"
   },
   {,
     "EventCode": "0x1D144",
   {,
     "EventCode": "0x26884",
     "EventName": "PM_DSIDE_MRU_TOUCH",
-    "BriefDescription": "D-side L2 MRU touch sent to L2"
+    "BriefDescription": "D-side L2 MRU touch commands sent to the L2"
   },
   {,
     "EventCode": "0x30134",
     "EventName": "PM_EAT_FORCE_MISPRED",
     "BriefDescription": "XL-form branch was mispredicted due to the predicted target address missing from EAT.  The EAT forces a mispredict in this case since there is no predicated target to validate.  This is a rare case that may occur when the EAT is full and a branch is issued"
   },
+  {,
+    "EventCode": "0xC094",
+    "EventName": "PM_LS0_UNALIGNED_LD",
+    "BriefDescription": "Load instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the load of that size.  If the load wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
+  },
+  {,
+    "EventCode": "0xF8BC",
+    "EventName": "PM_LS3_UNALIGNED_ST",
+    "BriefDescription": "Store instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the Store of that size.  If the Store wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
+  },
   {,
     "EventCode": "0x460AE",
     "EventName": "PM_L3_P2_CO_RTY",
   {,
     "EventCode": "0xC880",
     "EventName": "PM_LS1_LD_VECTOR_FIN",
-    "BriefDescription": ""
+    "BriefDescription": "LS1 finished load vector op"
   },
   {,
     "EventCode": "0x2894",
     "EventName": "PM_MRK_LSU_DERAT_MISS",
     "BriefDescription": "Marked derat reload (miss) for any page size"
   },
+  {,
+    "EventCode": "0x160A0",
+    "EventName": "PM_L3_PF_MISS_L3",
+    "BriefDescription": "L3 PF missed in L3"
+  },
   {,
     "EventCode": "0x1C04A",
     "EventName": "PM_DATA_FROM_RL2L3_SHR",
     "EventName": "PM_L2_LOC_GUESS_WRONG",
     "BriefDescription": "L2 guess local (LNS) and guess was not correct (ie data not on chip)"
   },
+  {,
+    "EventCode": "0xC888",
+    "EventName": "PM_LSU_DTLB_MISS_64K",
+    "BriefDescription": "Data TLB Miss page size 64K"
+  },
   {,
     "EventCode": "0xE0A4",
     "EventName": "PM_TMA_REQ_L2",
     "BriefDescription": "addrs only req to L2 only on the first one,Indication that Load footprint is not expanding"
   },
+  {,
+    "EventCode": "0xC088",
+    "EventName": "PM_LSU_DTLB_MISS_4K",
+    "BriefDescription": "Data TLB Miss page size 4K"
+  },
   {,
     "EventCode": "0x3C042",
     "EventName": "PM_DATA_FROM_L3_DISP_CONFLICT",
   {,
     "EventCode": "0x26084",
     "EventName": "PM_L2_RCLD_DISP_FAIL_OTHER",
-    "BriefDescription": "All I-or-D side load dispatch attempts for this thread that failed due to reason other than address collision (excludes i_l2mru_tch_reqs)"
+    "BriefDescription": "All D-side-Ld or I-side-instruction-fetch dispatch attempts for this thread that failed due to reasons other than an address collision conflicts with an L2 machines (e.g. Read-Claim/Snoop machine not available)"
   },
   {,
     "EventCode": "0x101E4",
   {,
     "EventCode": "0x46080",
     "EventName": "PM_L2_DISP_ALL_L2MISS",
-    "BriefDescription": "All successful Ld/St dispatches for this thread that were an L2 miss (excludes i_l2mru_tch_reqs)"
+    "BriefDescription": "All successful D-side-Ld/St or I-side-instruction-fetch dispatches for this thread that were an L2 miss"
   },
   {,
-    "EventCode": "0x160A0",
-    "EventName": "PM_L3_PF_MISS_L3",
-    "BriefDescription": "L3 PF missed in L3"
+    "EventCode": "0xF8B8",
+    "EventName": "PM_LS1_UNALIGNED_ST",
+    "BriefDescription": "Store instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the Store of that size.  If the Store wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
   },
   {,
     "EventCode": "0x408C",
   {,
     "EventCode": "0x160B2",
     "EventName": "PM_L3_LOC_GUESS_CORRECT",
-    "BriefDescription": "initial scope=node/chip (LNS) and data from local node (local) (pred successful) - always PFs only"
+    "BriefDescription": "Prefetch scope predictor selected LNS and was correct"
   },
   {,
     "EventCode": "0x48B4",
   {,
     "EventCode": "0x36082",
     "EventName": "PM_L2_LD_DISP",
-    "BriefDescription": "All successful I-or-D side load dispatches for this thread (excludes i_l2mru_tch_reqs)"
+    "BriefDescription": "All successful D-side-Ld or I-side-instruction-fetch dispatches for this thread"
   },
   {,
     "EventCode": "0xF8B0",
   {,
     "EventCode": "0x16884",
     "EventName": "PM_L2_RCLD_DISP_FAIL_ADDR",
-    "BriefDescription": "All I-od-D side load dispatch attempts for this thread that failed due to address collision with RC/CO/SN/SQ machine (excludes i_l2mru_tch_reqs)"
+    "BriefDescription": "All D-side-Ld or I-side-instruction-fetch dispatch attempts for this thread that failed due to an address collision conflicts with an L2 machines already working on this line (e.g. ld-hit-stq or Read-claim/Castout/Snoop machines)"
   },
   {,
     "EventCode": "0x460A0",
     "EventName": "PM_IC_PREF_REQ",
     "BriefDescription": "Instruction prefetch requests"
   },
+  {,
+    "EventCode": "0xC898",
+    "EventName": "PM_LS3_UNALIGNED_LD",
+    "BriefDescription": "Load instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the load of that size.  If the load wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
+  },
   {,
     "EventCode": "0x488C",
     "EventName": "PM_IC_PREF_WRITE",
   {,
     "EventCode": "0xF89C",
     "EventName": "PM_XLATE_MISS",
-    "BriefDescription": "The LSU requested a line from L2 for translation.  It may be satisfied from any source beyond L2.  Includes speculative instructions"
+    "BriefDescription": "The LSU requested a line from L2 for translation.  It may be satisfied from any source beyond L2.  Includes speculative instructions. Includes instruction, prefetch and demand"
   },
   {,
     "EventCode": "0x14158",
     "EventName": "PM_MRK_DATA_FROM_L31_SHR_CYC",
     "BriefDescription": "Duration in cycles to reload with Shared (S) data from another core's L3 on the same chip due to a marked load"
   },
+  {,
+    "EventCode": "0xC88C",
+    "EventName": "PM_LSU_DTLB_MISS_16G_1G",
+    "BriefDescription": "Data TLB Miss page size 16G (HPT) or 1G (Radix)"
+  },
   {,
     "EventCode": "0x268A6",
     "EventName": "PM_TM_RST_SC",
-    "BriefDescription": "TM-snp rst RM SC"
+    "BriefDescription": "TM snoop hits line in L3 that is TM_SC state and causes it to be invalidated"
   },
   {,
     "EventCode": "0x468A4",
   {,
     "EventCode": "0x46086",
     "EventName": "PM_L2_SN_M_RD_DONE",
-    "BriefDescription": "SNP dispatched for a read and was M (true M)"
+    "BriefDescription": "Snoop dispatched for a read and was M (true M)"
   },
   {,
     "EventCode": "0x40154",
     "EventName": "PM_LINK_STACK_CORRECT",
     "BriefDescription": "Link stack predicts right address"
   },
-  {,
-    "EventCode": "0x4C05A",
-    "EventName": "PM_DTLB_MISS_1G",
-    "BriefDescription": "Data TLB reload (after a miss) page size 1G. Implies radix translation was used"
-  },
   {,
     "EventCode": "0x36886",
     "EventName": "PM_L2_SN_SX_I_DONE",
-    "BriefDescription": "SNP dispatched and went from Sx to Ix"
+    "BriefDescription": "Snoop dispatched and went from Sx to Ix"
   },
   {,
     "EventCode": "0x4E04A",
     "EventName": "PM_MRK_DATA_FROM_DL4_CYC",
     "BriefDescription": "Duration in cycles to reload from another chip's L4 on a different Node or Group (Distant) due to a marked load"
   },
-  {,
-    "EventCode": "0x2608E",
-    "EventName": "PM_TM_LD_CONF",
-    "BriefDescription": "TM Load (fav or non-fav) ran into conflict (failed)"
-  },
   {,
     "EventCode": "0x4080",
     "EventName": "PM_INST_FROM_L1",
   {,
     "EventCode": "0x260A6",
     "EventName": "PM_NON_TM_RST_SC",
-    "BriefDescription": "Non-TM snp rst TM SC"
+    "BriefDescription": "Non-TM snoop hits line in L3 that is TM_SC state and causes it to be invalidated"
   },
   {,
     "EventCode": "0x3608A",
     "EventName": "PM_FLUSH_MPRED",
     "BriefDescription": "Branch mispredict flushes.  Includes target and address misprecition"
   },
-  {,
-    "EventCode": "0x508C",
-    "EventName": "PM_SHL_CREATED",
-    "BriefDescription": "Store-Hit-Load Table Entry Created"
-  },
   {,
     "EventCode": "0x1504C",
     "EventName": "PM_IPTEG_FROM_LL4",
   {,
     "EventCode": "0x2608A",
     "EventName": "PM_ISIDE_DISP_FAIL_ADDR",
-    "BriefDescription": "All I-side dispatch attempts for this thread that failed due to a addr collision with another machine (excludes i_l2mru_tch_reqs)"
+    "BriefDescription": "All I-side-instruction-fetch dispatch attempts for this thread that failed due to an address collision conflict with an L2 machine already working on this line (e.g. ld-hit-stq or RC/CO/SN machines)"
   },
   {,
     "EventCode": "0x50B4",
     "BriefDescription": "Number of stcx instructions finished. This includes instructions in the speculative path of a branch that may be flushed"
   },
   {,
-    "EventCode": "0xE0B8",
-    "EventName": "PM_LS2_TM_DISALLOW",
-    "BriefDescription": "A TM-ineligible instruction tries to execute inside a transaction and the LSU disallows it"
+    "EventCode": "0xD8AC",
+    "EventName": "PM_LWSYNC",
+    "BriefDescription": ""
   },
   {,
     "EventCode": "0x2094",
     "EventName": "PM_ICT_NOSLOT_DISP_HELD_HB_FULL",
     "BriefDescription": "Ict empty for this thread due to dispatch holds because the History Buffer was full. Could be GPR/VSR/VMR/FPR/CR/XVF; CR; XVF (XER/VSCR/FPSCR)"
   },
+  {,
+    "EventCode": "0xC894",
+    "EventName": "PM_LS1_UNALIGNED_LD",
+    "BriefDescription": "Load instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the load of that size.  If the load wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
+  },
   {,
     "EventCode": "0x360A2",
     "EventName": "PM_L3_L2_CO_HIT",
   {,
     "EventCode": "0xC084",
     "EventName": "PM_LS2_LD_VECTOR_FIN",
-    "BriefDescription": ""
+    "BriefDescription": "LS2 finished load vector op"
   },
   {,
     "EventCode": "0x1608E",
     "EventName": "PM_SN_USAGE",
     "BriefDescription": "Continuous 16 cycle (2to1) window where this signals rotates thru sampling each SN machine busy. PMU uses this wave to then do 16 cyc count to sample total number of machs running"
   },
+  {,
+    "EventCode": "0x36084",
+    "EventName": "PM_L2_RCST_DISP",
+    "BriefDescription": "All D-side store dispatch attempts for this thread"
+  },
   {,
     "EventCode": "0x46084",
     "EventName": "PM_L2_RCST_DISP_FAIL_OTHER",
     "EventName": "PM_DC_PREF_STRIDED_CONF",
     "BriefDescription": "A demand load referenced a line in an active strided prefetch stream. The stream could have been allocated through the hardware prefetch mechanism or through software."
   },
-  {,
-    "EventCode": "0x36084",
-    "EventName": "PM_L2_RCST_DISP",
-    "BriefDescription": "All D-side store dispatch attempts for this thread"
-  },
   {,
     "EventCode": "0x45054",
     "EventName": "PM_FMA_CMPL",
   {,
     "EventCode": "0x36080",
     "EventName": "PM_L2_INST",
-    "BriefDescription": "All successful I-side dispatches for this thread   (excludes i_l2mru_tch reqs)"
+    "BriefDescription": "All successful I-side-instruction-fetch (e.g. i-demand, i-prefetch) dispatches for this thread"
   },
   {,
     "EventCode": "0x3504C",
   {,
     "EventCode": "0x1688A",
     "EventName": "PM_ISIDE_DISP",
-    "BriefDescription": "All I-side dispatch attempts for this thread (excludes i_l2mru_tch_reqs)"
+    "BriefDescription": "All I-side-instruction-fetch dispatch attempts for this thread"
   },
   {,
     "EventCode": "0x468AA",
     "EventName": "PM_LSU2_TM_L1_HIT",
     "BriefDescription": "Load tm hit in L1"
   },
+  {,
+    "EventCode": "0xE0B8",
+    "EventName": "PM_LS2_TM_DISALLOW",
+    "BriefDescription": "A TM-ineligible instruction tries to execute inside a transaction and the LSU disallows it"
+  },
   {,
     "EventCode": "0x44044",
     "EventName": "PM_INST_FROM_L31_ECO_MOD",
   {,
     "EventCode": "0x36086",
     "EventName": "PM_L2_RC_ST_DONE",
-    "BriefDescription": "RC did store to line that was Tx or Sx"
+    "BriefDescription": "Read-claim machine did store to line that was in Tx or Sx (Tagged or Shared state)"
   },
   {,
     "EventCode": "0xE8AC",
     "EventName": "PM_IPTEG_FROM_L2_NO_CONFLICT",
     "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 without conflict due to a instruction side request"
   },
+  {,
+    "EventCode": "0x460A6",
+    "EventName": "PM_RD_FORMING_SC",
+    "BriefDescription": "Doesn't occur"
+  },
   {,
     "EventCode": "0x35042",
     "EventName": "PM_IPTEG_FROM_L3_DISP_CONFLICT",
   {,
     "EventCode": "0x36882",
     "EventName": "PM_L2_LD_HIT",
-    "BriefDescription": "All successful I-or-D side load dispatches for this thread that were L2 hits (excludes i_l2mru_tch_reqs)"
+    "BriefDescription": "All successful D-side-Ld or I-side-instruction-fetch dispatches for this thread that were L2 hits"
   },
   {,
     "EventCode": "0x168AC",
     "EventName": "PM_PROBE_NOP_DISP",
     "BriefDescription": "ProbeNops dispatched"
   },
-  {,
-    "EventCode": "0x58A8",
-    "EventName": "PM_DECODE_HOLD_ICT_FULL",
-    "BriefDescription": "Counts the number of cycles in which the IFU was not able to decode and transmit one or more instructions because all itags were in use.  This means the ICT is full for this thread"
-  },
   {,
     "EventCode": "0x10052",
     "EventName": "PM_GRP_PUMP_MPRED_RTY",
   {,
     "EventCode": "0x2688A",
     "EventName": "PM_ISIDE_DISP_FAIL_OTHER",
-    "BriefDescription": "All I-side dispatch attempts for this thread that failed due to a reason other than addrs collision (excludes i_l2mru_tch_reqs)"
+    "BriefDescription": "All I-side-instruction-fetch dispatch attempts for this thread that failed due to reasons other than an address collision conflict with an L2 machine (e.g. no available RC/CO machines)"
   },
   {,
     "EventCode": "0x2001A",
   {,
     "EventCode": "0x46880",
     "EventName": "PM_ISIDE_MRU_TOUCH",
-    "BriefDescription": "I-side L2 MRU touch sent to L2 for this thread"
+    "BriefDescription": "I-side L2 MRU touch sent to L2 for this thread I-side L2 MRU touch commands sent to the L2 for this thread"
   },
   {,
-    "EventCode": "0x1C05C",
-    "EventName": "PM_DTLB_MISS_2M",
-    "BriefDescription": "Data TLB reload (after a miss) page size 2M. Implies radix translation was used"
+    "EventCode": "0x508C",
+    "EventName": "PM_SHL_CREATED",
+    "BriefDescription": "Store-Hit-Load Table Entry Created"
   },
   {,
     "EventCode": "0x50B8",
   {,
     "EventCode": "0x268B2",
     "EventName": "PM_L3_LOC_GUESS_WRONG",
-    "BriefDescription": "Initial scope=node (LNS) but data from out side local node (near or far or rem). Prediction too Low"
+    "BriefDescription": "Prefetch scope predictor selected LNS, but was wrong"
   },
   {,
     "EventCode": "0x36088",
     "EventName": "PM_L3_P2_PF_RTY",
     "BriefDescription": "L3 PF received retry port 2, every retry counted"
   },
+  {,
+    "EventCode": "0xD8B0",
+    "EventName": "PM_PTESYNC",
+    "BriefDescription": ""
+  },
   {,
     "EventCode": "0x26086",
     "EventName": "PM_CO_TM_SC_FOOTPRINT",
     "EventName": "PM_L2_ST_MISS",
     "BriefDescription": "All successful D-Side Store dispatches that were an L2 miss for this thread"
   },
+  {,
+    "EventCode": "0xF8B4",
+    "EventName": "PM_DC_PREF_XCONS_ALLOC",
+    "BriefDescription": "Prefetch stream allocated in the Ultra conservative phase by either the hardware prefetch mechanism or software prefetch"
+  },
   {,
     "EventCode": "0x35048",
     "EventName": "PM_IPTEG_FROM_DL2L3_SHR",
   {,
     "EventCode": "0x460B2",
     "EventName": "PM_L3_SYS_GUESS_WRONG",
-    "BriefDescription": "Initial scope=system (VGS or RNS) but data from local or near. Prediction too high"
+    "BriefDescription": "Prefetch scope predictor selected VGS or RNS, but was wrong"
   },
   {,
     "EventCode": "0x58B8",
     "EventName": "PM_TM_TABORT_TRECLAIM",
     "BriefDescription": "Completion time tabortnoncd, tabortcd, treclaim"
   },
-  {,
-    "EventCode": "0x4C054",
-    "EventName": "PM_DERAT_MISS_16G",
-    "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 16G"
-  },
   {,
     "EventCode": "0x268A0",
     "EventName": "PM_L3_CO_L31",
   {,
     "EventCode": "0x368B2",
     "EventName": "PM_L3_GRP_GUESS_WRONG_HIGH",
-    "BriefDescription": "Initial scope=group (GS or NNS) but data from local node. Prediction too high"
+    "BriefDescription": "Prefetch scope predictor selected GS or NNS, but was wrong because scope was VGS or RNS"
   },
   {,
     "EventCode": "0xE8BC",
   {,
     "EventCode": "0x260B2",
     "EventName": "PM_L3_SYS_GUESS_CORRECT",
-    "BriefDescription": "Initial scope=system (VGS or RNS) and data from outside group (far or rem)(pred successful)"
+    "BriefDescription": "Prefetch scope predictor selected VGS or RNS and was correct"
   },
   {,
     "EventCode": "0x1D146",
     "EventName": "PM_L2_GROUP_PUMP",
     "BriefDescription": "RC requests that were on group (aka nodel) pump attempts"
   },
+  {,
+    "EventCode": "0xC08C",
+    "EventName": "PM_LSU_DTLB_MISS_16M_2M",
+    "BriefDescription": "Data TLB Miss page size 16M (HPT) or 2M (Radix)"
+  },
   {,
     "EventCode": "0x16080",
     "EventName": "PM_L2_LD",
   {,
     "EventCode": "0xC080",
     "EventName": "PM_LS0_LD_VECTOR_FIN",
-    "BriefDescription": ""
+    "BriefDescription": "LS0 finished load vector op"
   },
   {,
     "EventCode": "0x368B0",
     "EventName": "PM_BR_CORECT_PRED_TAKEN_CMPL",
     "BriefDescription": "Conditional Branch Completed in which the HW correctly predicted the direction as taken.  Counted at completion time"
   },
+  {,
+    "EventCode": "0xF0B8",
+    "EventName": "PM_LS0_UNALIGNED_ST",
+    "BriefDescription": "Store instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the Store of that size.  If the Store wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
+  },
   {,
     "EventCode": "0x20132",
     "EventName": "PM_MRK_DFU_FIN",
   {,
     "EventCode": "0x160A6",
     "EventName": "PM_TM_SC_CO",
-    "BriefDescription": "L3 castout TM SC line"
+    "BriefDescription": "L3 castout of line that was StoreCopy (original value of speculatively written line) in a Transaction"
   },
   {,
     "EventCode": "0xC8B0",
   {,
     "EventCode": "0x16084",
     "EventName": "PM_L2_RCLD_DISP",
-    "BriefDescription": "All I-or-D side load dispatch attempts for this thread (excludes i_l2mru_tch_reqs)"
+    "BriefDescription": "All D-side-Ld or I-side-instruction-fetch dispatch attempts for this thread"
   },
   {,
     "EventCode": "0x3F150",
   {,
     "EventCode": "0x46082",
     "EventName": "PM_L2_ST_DISP",
-    "BriefDescription": "All successful D-side store dispatches for this thread (L2 miss + L2 hits)"
+    "BriefDescription": "All successful D-side store dispatches for this thread"
   },
   {,
     "EventCode": "0x36880",
     "EventName": "PM_L2_INST_MISS",
-    "BriefDescription": "All successful I-side dispatches that were an L2 miss for this thread (excludes i_l2mru_tch reqs)"
+    "BriefDescription": "All successful I-side-instruction-fetch (e.g. i-demand, i-prefetch) dispatches for this thread that were an L2 miss"
   },
   {,
     "EventCode": "0xE084",
   {,
     "EventCode": "0xC884",
     "EventName": "PM_LS3_LD_VECTOR_FIN",
-    "BriefDescription": ""
+    "BriefDescription": "LS3 finished load vector op"
   },
   {,
     "EventCode": "0x360A8",
   {,
     "EventCode": "0x168B2",
     "EventName": "PM_L3_GRP_GUESS_CORRECT",
-    "BriefDescription": "Initial scope=group (GS or NNS) and data from same group (near) (pred successful)"
+    "BriefDescription": "Prefetch scope predictor selected GS or NNS and was correct"
   },
   {,
     "EventCode": "0x48A4",
index 5af1abb..b4772f5 100644 (file)
     "EventName": "PM_DISP_HELD",
     "BriefDescription": "Dispatch Held"
   },
-  {,
-    "EventCode": "0x3D154",
-    "EventName": "PM_MRK_DERAT_MISS_16M",
-    "BriefDescription": "Marked Data ERAT Miss (Data TLB Access) page size 16M"
-  },
   {,
     "EventCode": "0x200F8",
     "EventName": "PM_EXT_INT",
     "EventName": "PM_MRK_DPTEG_FROM_L3_MEPF",
     "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 without dispatch conflicts hit on Mepf state. due to a marked data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
   },
+  {,
+    "EventCode": "0x4C15C",
+    "EventName": "PM_MRK_DERAT_MISS_16G_1G",
+    "BriefDescription": "Marked Data ERAT Miss (Data TLB Access) page size 16G (hpt mode) and 1G (radix mode)"
+  },
   {,
     "EventCode": "0x10024",
     "EventName": "PM_PMC5_OVERFLOW",
     "EventName": "PM_ICT_NOSLOT_IC_MISS",
     "BriefDescription": "Ict empty for this thread due to Icache Miss"
   },
-  {,
-    "EventCode": "0x3D152",
-    "EventName": "PM_MRK_DERAT_MISS_1G",
-    "BriefDescription": "Marked Data ERAT Miss (Data TLB Access) page size 1G. Implies radix translation"
-  },
   {,
     "EventCode": "0x4F14A",
     "EventName": "PM_MRK_DPTEG_FROM_OFF_CHIP_CACHE",
     "EventName": "PM_MRK_DPTEG_FROM_L2_NO_CONFLICT",
     "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 without conflict due to a marked data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
   },
-  {,
-    "EventCode": "0x2C05A",
-    "EventName": "PM_DERAT_MISS_1G",
-    "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 1G. Implies radix translation"
-  },
   {,
     "EventCode": "0x1F058",
     "EventName": "PM_RADIX_PWC_L2_PTE_FROM_L2",
     "EventName": "PM_DTLB_MISS",
     "BriefDescription": "Data PTEG reload"
   },
-  {,
-    "EventCode": "0x2D152",
-    "EventName": "PM_MRK_DERAT_MISS_2M",
-    "BriefDescription": "Marked Data ERAT Miss (Data TLB Access) page size 2M. Implies radix translation"
-  },
   {,
     "EventCode": "0x2C046",
     "EventName": "PM_DATA_FROM_RL2L3_MOD",
     "EventName": "PM_CMPLU_STALL_DFU",
     "BriefDescription": "Finish stall because the NTF instruction was issued to the Decimal Floating Point execution pipe and waiting to finish. Includes decimal floating point instructions + 128 bit binary floating point instructions. Not qualified by multicycle"
   },
+  {,
+    "EventCode": "0x3C054",
+    "EventName": "PM_DERAT_MISS_16M_2M",
+    "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 16M (HPT mode) or 2M (Radix mode)"
+  },
   {,
     "EventCode": "0x4C04C",
     "EventName": "PM_DATA_FROM_DMEM",
     "EventName": "PM_INST_FROM_MEMORY",
     "BriefDescription": "The processor's Instruction cache was reloaded from a memory location including L4 from local remote or distant due to an instruction fetch (not prefetch)"
   },
-  {,
-    "EventCode": "0x1C05A",
-    "EventName": "PM_DERAT_MISS_2M",
-    "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 2M. Implies radix translation"
-  },
   {,
     "EventCode": "0x30024",
     "EventName": "PM_PMC6_OVERFLOW",
     "EventName": "PM_BRU_FIN",
     "BriefDescription": "Branch Instruction Finished"
   },
+  {,
+    "EventCode": "0x3D154",
+    "EventName": "PM_MRK_DERAT_MISS_16M_2M",
+    "BriefDescription": "Marked Data ERAT Miss (Data TLB Access) page size 16M (hpt mode) or 2M (radix mode)"
+  },
   {,
     "EventCode": "0x30020",
     "EventName": "PM_PMC2_REWIND",
     "EventName": "PM_MRK_DPTEG_FROM_L31_MOD",
     "BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's L3 on the same chip due to a marked data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
   },
-  {,
-    "EventCode": "0x4C15C",
-    "EventName": "PM_MRK_DERAT_MISS_16G",
-    "BriefDescription": "Marked Data ERAT Miss (Data TLB Access) page size 16G"
-  },
   {,
     "EventCode": "0x14052",
     "EventName": "PM_INST_GRP_PUMP_MPRED_RTY",
     "EventName": "PM_IC_DEMAND_CYC",
     "BriefDescription": "Icache miss demand cycles"
   },
-  {,
-    "EventCode": "0x3C054",
-    "EventName": "PM_DERAT_MISS_16M",
-    "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 16M"
-  },
   {,
     "EventCode": "0x2D14E",
     "EventName": "PM_MRK_DATA_FROM_L21_SHR",
index d0b89f9..8b3b0f3 100644 (file)
@@ -9,11 +9,6 @@
     "EventName": "PM_MEM_LOC_THRESH_LSU_HIGH",
     "BriefDescription": "Local memory above threshold for LSU medium"
   },
-  {,
-    "EventCode": "0x2C056",
-    "EventName": "PM_DTLB_MISS_4K",
-    "BriefDescription": "Data TLB Miss page size 4k"
-  },
   {,
     "EventCode": "0x40118",
     "EventName": "PM_MRK_DCACHE_RELOAD_INTV",
index bc8e03d..b276426 100644 (file)
     "EventName": "PM_ST_FIN",
     "BriefDescription": "Store finish count. Includes speculative activity"
   },
-  {,
-    "EventCode": "0x44042",
-    "EventName": "PM_INST_FROM_L3",
-    "BriefDescription": "The processor's Instruction cache was reloaded from local core's L3 due to an instruction fetch (not prefetch)"
-  },
   {,
     "EventCode": "0x1504A",
     "EventName": "PM_IPTEG_FROM_RL2L3_SHR",
     "EventName": "PM_PMC1_SAVED",
     "BriefDescription": "PMC1 Rewind Value saved"
   },
+  {,
+    "EventCode": "0x44042",
+    "EventName": "PM_INST_FROM_L3",
+    "BriefDescription": "The processor's Instruction cache was reloaded from local core's L3 due to an instruction fetch (not prefetch)"
+  },
   {,
     "EventCode": "0x200FE",
     "EventName": "PM_DATA_FROM_L2MISS",
index b578aa2..db3a594 100644 (file)
 #include <unistd.h>
 #include <stdarg.h>
 #include <libgen.h>
+#include <limits.h>
 #include <dirent.h>
 #include <sys/time.h>                  /* getrlimit */
 #include <sys/resource.h>              /* getrlimit */
 #include <ftw.h>
 #include <sys/stat.h>
+#include <linux/list.h>
 #include "jsmn.h"
 #include "json.h"
 #include "jevents.h"
@@ -249,31 +251,25 @@ static const char *field_to_perf(struct map *table, char *map, jsmntok_t *val)
        jsmntok_t *loc = (t);                                   \
        if (!(t)->start && (t) > tokens)                        \
                loc = (t) - 1;                                  \
-               pr_err("%s:%d: " m ", got %s\n", fn,            \
-                       json_line(map, loc),                    \
-                       json_name(t));                          \
+       pr_err("%s:%d: " m ", got %s\n", fn,                    \
+              json_line(map, loc),                             \
+              json_name(t));                                   \
+       err = -EIO;                                             \
        goto out_free;                                          \
 } } while (0)
 
-#define TOPIC_DEPTH 256
-static char *topic_array[TOPIC_DEPTH];
-static int   topic_level;
+static char *topic;
 
 static char *get_topic(void)
 {
-       char *tp_old, *tp = NULL;
+       char *tp;
        int i;
 
-       for (i = 0; i < topic_level + 1; i++) {
-               int n;
-
-               tp_old = tp;
-               n = asprintf(&tp, "%s%s", tp ?: "", topic_array[i]);
-               if (n < 0) {
-                       pr_info("%s: asprintf() error %s\n", prog);
-                       return NULL;
-               }
-               free(tp_old);
+       /* tp is free'd in process_one_file() */
+       i = asprintf(&tp, "%s", topic);
+       if (i < 0) {
+               pr_info("%s: asprintf() error %s\n", prog);
+               return NULL;
        }
 
        for (i = 0; i < (int) strlen(tp); i++) {
@@ -290,25 +286,15 @@ static char *get_topic(void)
        return tp;
 }
 
-static int add_topic(int level, char *bname)
+static int add_topic(char *bname)
 {
-       char *topic;
-
-       level -= 2;
-
-       if (level >= TOPIC_DEPTH)
-               return -EINVAL;
-
+       free(topic);
        topic = strdup(bname);
        if (!topic) {
                pr_info("%s: strdup() error %s for file %s\n", prog,
                                strerror(errno), bname);
                return -ENOMEM;
        }
-
-       free(topic_array[topic_level]);
-       topic_array[topic_level] = topic;
-       topic_level              = level;
        return 0;
 }
 
@@ -366,6 +352,81 @@ static int print_events_table_entry(void *data, char *name, char *event,
        return 0;
 }
 
+struct event_struct {
+       struct list_head list;
+       char *name;
+       char *event;
+       char *desc;
+       char *long_desc;
+       char *pmu;
+       char *unit;
+       char *perpkg;
+       char *metric_expr;
+       char *metric_name;
+       char *metric_group;
+};
+
+#define ADD_EVENT_FIELD(field) do { if (field) {               \
+       es->field = strdup(field);                              \
+       if (!es->field)                                         \
+               goto out_free;                                  \
+} } while (0)
+
+#define FREE_EVENT_FIELD(field) free(es->field)
+
+#define TRY_FIXUP_FIELD(field) do { if (es->field && !*field) {\
+       *field = strdup(es->field);                             \
+       if (!*field)                                            \
+               return -ENOMEM;                                 \
+} } while (0)
+
+#define FOR_ALL_EVENT_STRUCT_FIELDS(op) do {                   \
+       op(name);                                               \
+       op(event);                                              \
+       op(desc);                                               \
+       op(long_desc);                                          \
+       op(pmu);                                                \
+       op(unit);                                               \
+       op(perpkg);                                             \
+       op(metric_expr);                                        \
+       op(metric_name);                                        \
+       op(metric_group);                                       \
+} while (0)
+
+static LIST_HEAD(arch_std_events);
+
+static void free_arch_std_events(void)
+{
+       struct event_struct *es, *next;
+
+       list_for_each_entry_safe(es, next, &arch_std_events, list) {
+               FOR_ALL_EVENT_STRUCT_FIELDS(FREE_EVENT_FIELD);
+               list_del(&es->list);
+               free(es);
+       }
+}
+
+static int save_arch_std_events(void *data, char *name, char *event,
+                               char *desc, char *long_desc, char *pmu,
+                               char *unit, char *perpkg, char *metric_expr,
+                               char *metric_name, char *metric_group)
+{
+       struct event_struct *es;
+       struct stat *sb = data;
+
+       es = malloc(sizeof(*es));
+       if (!es)
+               return -ENOMEM;
+       memset(es, 0, sizeof(*es));
+       FOR_ALL_EVENT_STRUCT_FIELDS(ADD_EVENT_FIELD);
+       list_add_tail(&es->list, &arch_std_events);
+       return 0;
+out_free:
+       FOR_ALL_EVENT_STRUCT_FIELDS(FREE_EVENT_FIELD);
+       free(es);
+       return -ENOMEM;
+}
+
 static void print_events_table_suffix(FILE *outfp)
 {
        fprintf(outfp, "{\n");
@@ -407,6 +468,32 @@ static char *real_event(const char *name, char *event)
        return event;
 }
 
+static int
+try_fixup(const char *fn, char *arch_std, char **event, char **desc,
+         char **name, char **long_desc, char **pmu, char **filter,
+         char **perpkg, char **unit, char **metric_expr, char **metric_name,
+         char **metric_group, unsigned long long eventcode)
+{
+       /* try to find matching event from arch standard values */
+       struct event_struct *es;
+
+       list_for_each_entry(es, &arch_std_events, list) {
+               if (!strcmp(arch_std, es->name)) {
+                       if (!eventcode && es->event) {
+                               /* allow EventCode to be overridden */
+                               free(*event);
+                               *event = NULL;
+                       }
+                       FOR_ALL_EVENT_STRUCT_FIELDS(TRY_FIXUP_FIELD);
+                       return 0;
+               }
+       }
+
+       pr_err("%s: could not find matching %s for %s\n",
+                                       prog, arch_std, fn);
+       return -1;
+}
+
 /* Call func with each event in the json file */
 int json_events(const char *fn,
          int (*func)(void *data, char *name, char *event, char *desc,
@@ -416,7 +503,7 @@ int json_events(const char *fn,
                      char *metric_name, char *metric_group),
          void *data)
 {
-       int err = -EIO;
+       int err;
        size_t size;
        jsmntok_t *tokens, *tok;
        int i, j, len;
@@ -442,6 +529,7 @@ int json_events(const char *fn,
                char *metric_expr = NULL;
                char *metric_name = NULL;
                char *metric_group = NULL;
+               char *arch_std = NULL;
                unsigned long long eventcode = 0;
                struct msrmap *msr = NULL;
                jsmntok_t *msrval = NULL;
@@ -527,6 +615,10 @@ int json_events(const char *fn,
                                addfield(map, &metric_expr, "", "", val);
                                for (s = metric_expr; *s; s++)
                                        *s = tolower(*s);
+                       } else if (json_streq(map, field, "ArchStdEvent")) {
+                               addfield(map, &arch_std, "", "", val);
+                               for (s = arch_std; *s; s++)
+                                       *s = tolower(*s);
                        }
                        /* ignore unknown fields */
                }
@@ -551,8 +643,21 @@ int json_events(const char *fn,
                if (name)
                        fixname(name);
 
+               if (arch_std) {
+                       /*
+                        * An arch standard event is referenced, so try to
+                        * fixup any unassigned values.
+                        */
+                       err = try_fixup(fn, arch_std, &event, &desc, &name,
+                                       &long_desc, &pmu, &filter, &perpkg,
+                                       &unit, &metric_expr, &metric_name,
+                                       &metric_group, eventcode);
+                       if (err)
+                               goto free_strings;
+               }
                err = func(data, name, real_event(name, event), desc, long_desc,
                           pmu, unit, perpkg, metric_expr, metric_name, metric_group);
+free_strings:
                free(event);
                free(desc);
                free(name);
@@ -565,6 +670,8 @@ int json_events(const char *fn,
                free(metric_expr);
                free(metric_name);
                free(metric_group);
+               free(arch_std);
+
                if (err)
                        break;
                tok += j;
@@ -588,7 +695,7 @@ static char *file_name_to_table_name(char *fname)
         * Derive rest of table name from basename of the JSON file,
         * replacing hyphens and stripping out .json suffix.
         */
-       n = asprintf(&tblname, "pme_%s", basename(fname));
+       n = asprintf(&tblname, "pme_%s", fname);
        if (n < 0) {
                pr_info("%s: asprintf() error %s for file %s\n", prog,
                                strerror(errno), fname);
@@ -598,7 +705,7 @@ static char *file_name_to_table_name(char *fname)
        for (i = 0; i < strlen(tblname); i++) {
                c = tblname[i];
 
-               if (c == '-')
+               if (c == '-' || c == '/')
                        tblname[i] = '_';
                else if (c == '.') {
                        tblname[i] = '\0';
@@ -755,25 +862,106 @@ static int get_maxfds(void)
 static FILE *eventsfp;
 static char *mapfile;
 
+static int is_leaf_dir(const char *fpath)
+{
+       DIR *d;
+       struct dirent *dir;
+       int res = 1;
+
+       d = opendir(fpath);
+       if (!d)
+               return 0;
+
+       while ((dir = readdir(d)) != NULL) {
+               if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
+                       continue;
+
+               if (dir->d_type == DT_DIR) {
+                       res = 0;
+                       break;
+               } else if (dir->d_type == DT_UNKNOWN) {
+                       char path[PATH_MAX];
+                       struct stat st;
+
+                       sprintf(path, "%s/%s", fpath, dir->d_name);
+                       if (stat(path, &st))
+                               break;
+
+                       if (S_ISDIR(st.st_mode)) {
+                               res = 0;
+                               break;
+                       }
+               }
+       }
+
+       closedir(d);
+
+       return res;
+}
+
+static int is_json_file(const char *name)
+{
+       const char *suffix;
+
+       if (strlen(name) < 5)
+               return 0;
+
+       suffix = name + strlen(name) - 5;
+
+       if (strncmp(suffix, ".json", 5) == 0)
+               return 1;
+       return 0;
+}
+
+static int preprocess_arch_std_files(const char *fpath, const struct stat *sb,
+                               int typeflag, struct FTW *ftwbuf)
+{
+       int level = ftwbuf->level;
+       int is_file = typeflag == FTW_F;
+
+       if (level == 1 && is_file && is_json_file(fpath))
+               return json_events(fpath, save_arch_std_events, (void *)sb);
+
+       return 0;
+}
+
 static int process_one_file(const char *fpath, const struct stat *sb,
                            int typeflag, struct FTW *ftwbuf)
 {
-       char *tblname, *bname  = (char *) fpath + ftwbuf->base;
+       char *tblname, *bname;
        int is_dir  = typeflag == FTW_D;
        int is_file = typeflag == FTW_F;
        int level   = ftwbuf->level;
        int err = 0;
 
+       if (level == 2 && is_dir) {
+               /*
+                * For level 2 directory, bname will include parent name,
+                * like vendor/platform. So search back from platform dir
+                * to find this.
+                */
+               bname = (char *) fpath + ftwbuf->base - 2;
+               for (;;) {
+                       if (*bname == '/')
+                               break;
+                       bname--;
+               }
+               bname++;
+       } else
+               bname = (char *) fpath + ftwbuf->base;
+
        pr_debug("%s %d %7jd %-20s %s\n",
                 is_file ? "f" : is_dir ? "d" : "x",
                 level, sb->st_size, bname, fpath);
 
-       /* base dir */
-       if (level == 0)
+       /* base dir or too deep */
+       if (level == 0 || level > 3)
                return 0;
 
+
        /* model directory, reset topic */
-       if (level == 1 && is_dir) {
+       if ((level == 1 && is_dir && is_leaf_dir(fpath)) ||
+           (level == 2 && is_dir)) {
                if (close_table)
                        print_events_table_suffix(eventsfp);
 
@@ -798,16 +986,10 @@ static int process_one_file(const char *fpath, const struct stat *sb,
         * after processing all JSON files (so we can write out the
         * mapping table after all PMU events tables).
         *
-        * TODO: Allow for multiple mapfiles? Punt for now.
         */
        if (level == 1 && is_file) {
-               if (!strncmp(bname, "mapfile.csv", 11)) {
-                       if (mapfile) {
-                               pr_info("%s: Many mapfiles? Using %s, ignoring %s\n",
-                                               prog, mapfile, fpath);
-                       } else {
-                               mapfile = strdup(fpath);
-                       }
+               if (!strcmp(bname, "mapfile.csv")) {
+                       mapfile = strdup(fpath);
                        return 0;
                }
 
@@ -820,16 +1002,14 @@ static int process_one_file(const char *fpath, const struct stat *sb,
         * ignore it. It could be a readme.txt for instance.
         */
        if (is_file) {
-               char *suffix = bname + strlen(bname) - 5;
-
-               if (strncmp(suffix, ".json", 5)) {
+               if (!is_json_file(bname)) {
                        pr_info("%s: Ignoring file without .json suffix %s\n", prog,
                                fpath);
                        return 0;
                }
        }
 
-       if (level > 1 && add_topic(level, bname))
+       if (level > 1 && add_topic(bname))
                return -ENOMEM;
 
        /*
@@ -928,12 +1108,26 @@ int main(int argc, char *argv[])
 
        maxfds = get_maxfds();
        mapfile = NULL;
+       rc = nftw(ldirname, preprocess_arch_std_files, maxfds, 0);
+       if (rc && verbose) {
+               pr_info("%s: Error preprocessing arch standard files %s\n",
+                       prog, ldirname);
+               goto empty_map;
+       } else if (rc < 0) {
+               /* Make build fail */
+               free_arch_std_events();
+               return 1;
+       } else if (rc) {
+               goto empty_map;
+       }
+
        rc = nftw(ldirname, process_one_file, maxfds, 0);
        if (rc && verbose) {
                pr_info("%s: Error walking file tree %s\n", prog, ldirname);
                goto empty_map;
        } else if (rc < 0) {
                /* Make build fail */
+               free_arch_std_events();
                return 1;
        } else if (rc) {
                goto empty_map;
@@ -958,5 +1152,6 @@ int main(int argc, char *argv[])
 empty_map:
        fclose(eventsfp);
        create_empty_mapping(output_file);
+       free_arch_std_events();
        return 0;
 }
index c235c22..0a29c5c 100755 (executable)
@@ -42,10 +42,10 @@ def main(context_switch = 0, thread = -1):
                        event = evlist.read_on_cpu(cpu)
                        if not event:
                                continue
-                       print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
-                                                               event.sample_pid,
-                                                               event.sample_tid),
-                       print event
+                       print("cpu: {0}, pid: {1}, tid: {2} {3}".format(event.sample_cpu,
+                                                                        event.sample_pid,
+                                                                        event.sample_tid,
+                                                                        event))
 
 if __name__ == '__main__':
     """
index fcd1dd6..1a0d277 100644 (file)
 #include "../../../perf.h"
 #include "../../../util/trace-event.h"
 
+#if PY_MAJOR_VERSION < 3
+#define _PyCapsule_GetPointer(arg1, arg2) \
+  PyCObject_AsVoidPtr(arg1)
+
 PyMODINIT_FUNC initperf_trace_context(void);
+#else
+#define _PyCapsule_GetPointer(arg1, arg2) \
+  PyCapsule_GetPointer((arg1), (arg2))
+
+PyMODINIT_FUNC PyInit_perf_trace_context(void);
+#endif
 
 static PyObject *perf_trace_context_common_pc(PyObject *obj, PyObject *args)
 {
@@ -34,7 +44,7 @@ static PyObject *perf_trace_context_common_pc(PyObject *obj, PyObject *args)
        if (!PyArg_ParseTuple(args, "O", &context))
                return NULL;
 
-       scripting_context = PyCObject_AsVoidPtr(context);
+       scripting_context = _PyCapsule_GetPointer(context, NULL);
        retval = common_pc(scripting_context);
 
        return Py_BuildValue("i", retval);
@@ -50,7 +60,7 @@ static PyObject *perf_trace_context_common_flags(PyObject *obj,
        if (!PyArg_ParseTuple(args, "O", &context))
                return NULL;
 
-       scripting_context = PyCObject_AsVoidPtr(context);
+       scripting_context = _PyCapsule_GetPointer(context, NULL);
        retval = common_flags(scripting_context);
 
        return Py_BuildValue("i", retval);
@@ -66,7 +76,7 @@ static PyObject *perf_trace_context_common_lock_depth(PyObject *obj,
        if (!PyArg_ParseTuple(args, "O", &context))
                return NULL;
 
-       scripting_context = PyCObject_AsVoidPtr(context);
+       scripting_context = _PyCapsule_GetPointer(context, NULL);
        retval = common_lock_depth(scripting_context);
 
        return Py_BuildValue("i", retval);
@@ -82,7 +92,25 @@ static PyMethodDef ContextMethods[] = {
        { NULL, NULL, 0, NULL}
 };
 
+#if PY_MAJOR_VERSION < 3
 PyMODINIT_FUNC initperf_trace_context(void)
 {
        (void) Py_InitModule("perf_trace_context", ContextMethods);
 }
+#else
+PyMODINIT_FUNC PyInit_perf_trace_context(void)
+{
+       static struct PyModuleDef moduledef = {
+               PyModuleDef_HEAD_INIT,
+               "perf_trace_context",   /* m_name */
+               "",                     /* m_doc */
+               -1,                     /* m_size */
+               ContextMethods,         /* m_methods */
+               NULL,                   /* m_reload */
+               NULL,                   /* m_traverse */
+               NULL,                   /* m_clear */
+               NULL,                   /* m_free */
+       };
+       return PyModule_Create(&moduledef);
+}
+#endif
index 87bf3ed..6c108fa 100644 (file)
@@ -20,6 +20,7 @@ perf-y += hists_cumulate.o
 perf-y += python-use.o
 perf-y += bp_signal.o
 perf-y += bp_signal_overflow.o
+perf-y += bp_account.o
 perf-y += task-exit.o
 perf-y += sw-clock.o
 perf-y += mmap-thread-lookup.o
@@ -47,6 +48,7 @@ perf-y += bitmap.o
 perf-y += perf-hooks.o
 perf-y += clang.o
 perf-y += unit_number__scnprintf.o
+perf-y += mem2node.o
 
 $(OUTPUT)tests/llvm-src-base.c: tests/bpf-script-example.c tests/Build
        $(call rule_mkdir)
index 97f64ad..05dfe11 100644 (file)
@@ -170,8 +170,8 @@ static int run_dir(const char *d, const char *perf)
        if (verbose > 0)
                vcnt++;
 
-       snprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %.*s",
-                d, d, perf, vcnt, v);
+       scnprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %.*s",
+                 d, d, perf, vcnt, v);
 
        return system(cmd) ? TEST_FAIL : TEST_OK;
 }
index e0b1b41..6d598cc 100644 (file)
@@ -33,10 +33,9 @@ static int count_samples(struct perf_evlist *evlist, int *sample_count,
        for (i = 0; i < evlist->nr_mmaps; i++) {
                struct perf_mmap *map = &evlist->overwrite_mmap[i];
                union perf_event *event;
-               u64 start, end;
 
-               perf_mmap__read_init(map, true, &start, &end);
-               while ((event = perf_mmap__read_event(map, true, &start, end)) != NULL) {
+               perf_mmap__read_init(map);
+               while ((event = perf_mmap__read_event(map)) != NULL) {
                        const u32 type = event->header.type;
 
                        switch (type) {
diff --git a/tools/perf/tests/bp_account.c b/tools/perf/tests/bp_account.c
new file mode 100644 (file)
index 0000000..a20cbc4
--- /dev/null
@@ -0,0 +1,193 @@
+/*
+ * Powerpc needs __SANE_USERSPACE_TYPES__ before <linux/types.h> to select
+ * 'int-ll64.h' and avoid compile warnings when printing __u64 with %llu.
+ */
+#define __SANE_USERSPACE_TYPES__
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <time.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <sys/mman.h>
+#include <linux/compiler.h>
+#include <linux/hw_breakpoint.h>
+#include <sys/ioctl.h>
+
+#include "tests.h"
+#include "debug.h"
+#include "perf.h"
+#include "cloexec.h"
+
+volatile long the_var;
+
+static noinline int test_function(void)
+{
+       return 0;
+}
+
+static int __event(bool is_x, void *addr, struct perf_event_attr *attr)
+{
+       int fd;
+
+       memset(attr, 0, sizeof(struct perf_event_attr));
+       attr->type = PERF_TYPE_BREAKPOINT;
+       attr->size = sizeof(struct perf_event_attr);
+
+       attr->config = 0;
+       attr->bp_type = is_x ? HW_BREAKPOINT_X : HW_BREAKPOINT_W;
+       attr->bp_addr = (unsigned long) addr;
+       attr->bp_len = sizeof(long);
+
+       attr->sample_period = 1;
+       attr->sample_type = PERF_SAMPLE_IP;
+
+       attr->exclude_kernel = 1;
+       attr->exclude_hv = 1;
+
+       fd = sys_perf_event_open(attr, -1, 0, -1,
+                                perf_event_open_cloexec_flag());
+       if (fd < 0) {
+               pr_debug("failed opening event %llx\n", attr->config);
+               return TEST_FAIL;
+       }
+
+       return fd;
+}
+
+static int wp_event(void *addr, struct perf_event_attr *attr)
+{
+       return __event(false, addr, attr);
+}
+
+static int bp_event(void *addr, struct perf_event_attr *attr)
+{
+       return __event(true, addr, attr);
+}
+
+static int bp_accounting(int wp_cnt, int share)
+{
+       struct perf_event_attr attr, attr_mod, attr_new;
+       int i, fd[wp_cnt], fd_wp, ret;
+
+       for (i = 0; i < wp_cnt; i++) {
+               fd[i] = wp_event((void *)&the_var, &attr);
+               TEST_ASSERT_VAL("failed to create wp\n", fd[i] != -1);
+               pr_debug("wp %d created\n", i);
+       }
+
+       attr_mod = attr;
+       attr_mod.bp_type = HW_BREAKPOINT_X;
+       attr_mod.bp_addr = (unsigned long) test_function;
+
+       ret = ioctl(fd[0], PERF_EVENT_IOC_MODIFY_ATTRIBUTES, &attr_mod);
+       TEST_ASSERT_VAL("failed to modify wp\n", ret == 0);
+
+       pr_debug("wp 0 modified to bp\n");
+
+       if (!share) {
+               fd_wp = wp_event((void *)&the_var, &attr_new);
+               TEST_ASSERT_VAL("failed to create max wp\n", fd_wp != -1);
+               pr_debug("wp max created\n");
+       }
+
+       for (i = 0; i < wp_cnt; i++)
+               close(fd[i]);
+
+       return 0;
+}
+
+static int detect_cnt(bool is_x)
+{
+       struct perf_event_attr attr;
+       void *addr = is_x ? (void *)test_function : (void *)&the_var;
+       int fd[100], cnt = 0, i;
+
+       while (1) {
+               if (cnt == 100) {
+                       pr_debug("way too many debug registers, fix the test\n");
+                       return 0;
+               }
+               fd[cnt] = __event(is_x, addr, &attr);
+
+               if (fd[cnt] < 0)
+                       break;
+               cnt++;
+       }
+
+       for (i = 0; i < cnt; i++)
+               close(fd[i]);
+
+       return cnt;
+}
+
+static int detect_ioctl(void)
+{
+       struct perf_event_attr attr;
+       int fd, ret = 1;
+
+       fd = wp_event((void *) &the_var, &attr);
+       if (fd > 0) {
+               ret = ioctl(fd, PERF_EVENT_IOC_MODIFY_ATTRIBUTES, &attr);
+               close(fd);
+       }
+
+       return ret ? 0 : 1;
+}
+
+static int detect_share(int wp_cnt, int bp_cnt)
+{
+       struct perf_event_attr attr;
+       int i, fd[wp_cnt + bp_cnt], ret;
+
+       for (i = 0; i < wp_cnt; i++) {
+               fd[i] = wp_event((void *)&the_var, &attr);
+               TEST_ASSERT_VAL("failed to create wp\n", fd[i] != -1);
+       }
+
+       for (; i < (bp_cnt + wp_cnt); i++) {
+               fd[i] = bp_event((void *)test_function, &attr);
+               if (fd[i] == -1)
+                       break;
+       }
+
+       ret = i != (bp_cnt + wp_cnt);
+
+       while (i--)
+               close(fd[i]);
+
+       return ret;
+}
+
+/*
+ * This test does following:
+ *   - detects the number of watch/break-points,
+ *     skip test if any is missing
+ *   - detects PERF_EVENT_IOC_MODIFY_ATTRIBUTES ioctl,
+ *     skip test if it's missing
+ *   - detects if watchpoints and breakpoints share
+ *     same slots
+ *   - create all possible watchpoints on cpu 0
+ *   - change one of it to breakpoint
+ *   - in case wp and bp do not share slots,
+ *     we create another watchpoint to ensure
+ *     the slot accounting is correct
+ */
+int test__bp_accounting(struct test *test __maybe_unused, int subtest __maybe_unused)
+{
+       int has_ioctl = detect_ioctl();
+       int wp_cnt = detect_cnt(false);
+       int bp_cnt = detect_cnt(true);
+       int share  = detect_share(wp_cnt, bp_cnt);
+
+       pr_debug("watchpoints count %d, breakpoints count %d, has_ioctl %d, share %d\n",
+                wp_cnt, bp_cnt, has_ioctl, share);
+
+       if (!wp_cnt || !bp_cnt || !has_ioctl)
+               return TEST_SKIP;
+
+       return bp_accounting(wp_cnt, share);
+}
index e8399be..79b54f8 100644 (file)
@@ -176,13 +176,19 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
 
        for (i = 0; i < evlist->nr_mmaps; i++) {
                union perf_event *event;
+               struct perf_mmap *md;
 
-               while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
+               md = &evlist->mmap[i];
+               if (perf_mmap__read_init(md) < 0)
+                       continue;
+
+               while ((event = perf_mmap__read_event(md)) != NULL) {
                        const u32 type = event->header.type;
 
                        if (type == PERF_RECORD_SAMPLE)
                                count ++;
                }
+               perf_mmap__read_done(md);
        }
 
        if (count != expect) {
index fafa014..625f5a6 100644 (file)
@@ -115,6 +115,10 @@ static struct test generic_tests[] = {
                .func = test__bp_signal_overflow,
                .is_supported = test__bp_signal_is_supported,
        },
+       {
+               .desc = "Breakpoint accounting",
+               .func = test__bp_accounting,
+       },
        {
                .desc = "Number of exit events of a simple workload",
                .func = test__task_exit,
@@ -270,6 +274,10 @@ static struct test generic_tests[] = {
                .desc = "unit_number__scnprintf",
                .func = test__unit_number__scnprint,
        },
+       {
+               .desc = "mem2node",
+               .func = test__mem2node,
+       },
        {
                .func = NULL,
        },
index 3bf7b14..9993635 100644 (file)
@@ -409,15 +409,21 @@ static int process_events(struct machine *machine, struct perf_evlist *evlist,
                          struct state *state)
 {
        union perf_event *event;
+       struct perf_mmap *md;
        int i, ret;
 
        for (i = 0; i < evlist->nr_mmaps; i++) {
-               while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
+               md = &evlist->mmap[i];
+               if (perf_mmap__read_init(md) < 0)
+                       continue;
+
+               while ((event = perf_mmap__read_event(md)) != NULL) {
                        ret = process_event(machine, evlist, event, state);
-                       perf_evlist__mmap_consume(evlist, i);
+                       perf_mmap__consume(md);
                        if (ret < 0)
                                return ret;
                }
+               perf_mmap__read_done(md);
        }
        return 0;
 }
@@ -482,6 +488,34 @@ static void fs_something(void)
        }
 }
 
+static const char *do_determine_event(bool excl_kernel)
+{
+       const char *event = excl_kernel ? "cycles:u" : "cycles";
+
+#ifdef __s390x__
+       char cpuid[128], model[16], model_c[16], cpum_cf_v[16];
+       unsigned int family;
+       int ret, cpum_cf_a;
+
+       if (get_cpuid(cpuid, sizeof(cpuid)))
+               goto out_clocks;
+       ret = sscanf(cpuid, "%*[^,],%u,%[^,],%[^,],%[^,],%x", &family, model_c,
+                    model, cpum_cf_v, &cpum_cf_a);
+       if (ret != 5)            /* Not available */
+               goto out_clocks;
+       if (excl_kernel && (cpum_cf_a & 4))
+               return event;
+       if (!excl_kernel && (cpum_cf_a & 2))
+               return event;
+
+       /* Fall through: missing authorization */
+out_clocks:
+       event = excl_kernel ? "cpu-clock:u" : "cpu-clock";
+
+#endif
+       return event;
+}
+
 static void do_something(void)
 {
        fs_something();
@@ -592,10 +626,7 @@ static int do_test_code_reading(bool try_kcore)
 
                perf_evlist__set_maps(evlist, cpus, threads);
 
-               if (excl_kernel)
-                       str = "cycles:u";
-               else
-                       str = "cycles";
+               str = do_determine_event(excl_kernel);
                pr_debug("Parsing event '%s'\n", str);
                ret = parse_events(evlist, str, NULL);
                if (ret < 0) {
index 2604189..2f00806 100644 (file)
@@ -37,6 +37,19 @@ static int init_live_machine(struct machine *machine)
                                                  mmap_handler, machine, true, 500);
 }
 
+/*
+ * We need to keep these functions global, despite the
+ * fact that they are used only locally in this object,
+ * in order to keep them around even if the binary is
+ * stripped. If they are gone, the unwind check for
+ * symbol fails.
+ */
+int test_dwarf_unwind__thread(struct thread *thread);
+int test_dwarf_unwind__compare(void *p1, void *p2);
+int test_dwarf_unwind__krava_3(struct thread *thread);
+int test_dwarf_unwind__krava_2(struct thread *thread);
+int test_dwarf_unwind__krava_1(struct thread *thread);
+
 #define MAX_STACK 8
 
 static int unwind_entry(struct unwind_entry *entry, void *arg)
@@ -45,12 +58,12 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
        char *symbol = entry->sym ? entry->sym->name : NULL;
        static const char *funcs[MAX_STACK] = {
                "test__arch_unwind_sample",
-               "unwind_thread",
-               "compare",
+               "test_dwarf_unwind__thread",
+               "test_dwarf_unwind__compare",
                "bsearch",
-               "krava_3",
-               "krava_2",
-               "krava_1",
+               "test_dwarf_unwind__krava_3",
+               "test_dwarf_unwind__krava_2",
+               "test_dwarf_unwind__krava_1",
                "test__dwarf_unwind"
        };
        /*
@@ -77,7 +90,7 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
        return strcmp((const char *) symbol, funcs[idx]);
 }
 
-static noinline int unwind_thread(struct thread *thread)
+noinline int test_dwarf_unwind__thread(struct thread *thread)
 {
        struct perf_sample sample;
        unsigned long cnt = 0;
@@ -108,7 +121,7 @@ static noinline int unwind_thread(struct thread *thread)
 
 static int global_unwind_retval = -INT_MAX;
 
-static noinline int compare(void *p1, void *p2)
+noinline int test_dwarf_unwind__compare(void *p1, void *p2)
 {
        /* Any possible value should be 'thread' */
        struct thread *thread = *(struct thread **)p1;
@@ -117,17 +130,17 @@ static noinline int compare(void *p1, void *p2)
                /* Call unwinder twice for both callchain orders. */
                callchain_param.order = ORDER_CALLER;
 
-               global_unwind_retval = unwind_thread(thread);
+               global_unwind_retval = test_dwarf_unwind__thread(thread);
                if (!global_unwind_retval) {
                        callchain_param.order = ORDER_CALLEE;
-                       global_unwind_retval = unwind_thread(thread);
+                       global_unwind_retval = test_dwarf_unwind__thread(thread);
                }
        }
 
        return p1 - p2;
 }
 
-static noinline int krava_3(struct thread *thread)
+noinline int test_dwarf_unwind__krava_3(struct thread *thread)
 {
        struct thread *array[2] = {thread, thread};
        void *fp = &bsearch;
@@ -141,18 +154,19 @@ static noinline int krava_3(struct thread *thread)
                        size_t, int (*)(void *, void *));
 
        _bsearch = fp;
-       _bsearch(array, &thread, 2, sizeof(struct thread **), compare);
+       _bsearch(array, &thread, 2, sizeof(struct thread **),
+                test_dwarf_unwind__compare);
        return global_unwind_retval;
 }
 
-static noinline int krava_2(struct thread *thread)
+noinline int test_dwarf_unwind__krava_2(struct thread *thread)
 {
-       return krava_3(thread);
+       return test_dwarf_unwind__krava_3(thread);
 }
 
-static noinline int krava_1(struct thread *thread)
+noinline int test_dwarf_unwind__krava_1(struct thread *thread)
 {
-       return krava_2(thread);
+       return test_dwarf_unwind__krava_2(thread);
 }
 
 int test__dwarf_unwind(struct test *test __maybe_unused, int subtest __maybe_unused)
@@ -189,7 +203,7 @@ int test__dwarf_unwind(struct test *test __maybe_unused, int subtest __maybe_unu
                goto out;
        }
 
-       err = krava_1(thread);
+       err = test_dwarf_unwind__krava_1(thread);
        thread__put(thread);
 
  out:
index c465309..17c46f3 100644 (file)
 static int find_comm(struct perf_evlist *evlist, const char *comm)
 {
        union perf_event *event;
+       struct perf_mmap *md;
        int i, found;
 
        found = 0;
        for (i = 0; i < evlist->nr_mmaps; i++) {
-               while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
+               md = &evlist->mmap[i];
+               if (perf_mmap__read_init(md) < 0)
+                       continue;
+               while ((event = perf_mmap__read_event(md)) != NULL) {
                        if (event->header.type == PERF_RECORD_COMM &&
                            (pid_t)event->comm.pid == getpid() &&
                            (pid_t)event->comm.tid == getpid() &&
                            strcmp(event->comm.comm, comm) == 0)
                                found += 1;
-                       perf_evlist__mmap_consume(evlist, i);
+                       perf_mmap__consume(md);
                }
+               perf_mmap__read_done(md);
        }
        return found;
 }
index 21952e1..0f82ee9 100644 (file)
@@ -16,7 +16,7 @@ static int check(union perf_mem_data_src data_src,
 
        n = perf_mem__snp_scnprintf(out, sizeof out, &mi);
        n += perf_mem__lvl_scnprintf(out + n, sizeof out - n, &mi);
-       snprintf(failure, sizeof failure, "unexpected %s", out);
+       scnprintf(failure, sizeof failure, "unexpected %s", out);
        TEST_ASSERT_VAL(failure, !strcmp(string, out));
        return 0;
 }
diff --git a/tools/perf/tests/mem2node.c b/tools/perf/tests/mem2node.c
new file mode 100644 (file)
index 0000000..0c3c87f
--- /dev/null
@@ -0,0 +1,75 @@
+#include <linux/compiler.h>
+#include <linux/bitmap.h>
+#include "cpumap.h"
+#include "mem2node.h"
+#include "tests.h"
+
+static struct node {
+       int              node;
+       const char      *map;
+} test_nodes[] = {
+       { .node = 0, .map = "0"     },
+       { .node = 1, .map = "1-2"   },
+       { .node = 3, .map = "5-7,9" },
+};
+
+#define T TEST_ASSERT_VAL
+
+static unsigned long *get_bitmap(const char *str, int nbits)
+{
+       struct cpu_map *map = cpu_map__new(str);
+       unsigned long *bm = NULL;
+       int i;
+
+       bm = bitmap_alloc(nbits);
+
+       if (map && bm) {
+               bitmap_zero(bm, nbits);
+
+               for (i = 0; i < map->nr; i++) {
+                       set_bit(map->map[i], bm);
+               }
+       }
+
+       if (map)
+               cpu_map__put(map);
+       else
+               free(bm);
+
+       return bm && map ? bm : NULL;
+}
+
+int test__mem2node(struct test *t __maybe_unused, int subtest __maybe_unused)
+{
+       struct mem2node map;
+       struct memory_node nodes[3];
+       struct perf_env env = {
+               .memory_nodes    = (struct memory_node *) &nodes[0],
+               .nr_memory_nodes = ARRAY_SIZE(nodes),
+               .memory_bsize    = 0x100,
+       };
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(nodes); i++) {
+               nodes[i].node = test_nodes[i].node;
+               nodes[i].size = 10;
+
+               T("failed: alloc bitmap",
+                 (nodes[i].set = get_bitmap(test_nodes[i].map, 10)));
+       }
+
+       T("failed: mem2node__init", !mem2node__init(&map, &env));
+       T("failed: mem2node__node",  0 == mem2node__node(&map,   0x50));
+       T("failed: mem2node__node",  1 == mem2node__node(&map,  0x100));
+       T("failed: mem2node__node",  1 == mem2node__node(&map,  0x250));
+       T("failed: mem2node__node",  3 == mem2node__node(&map,  0x500));
+       T("failed: mem2node__node",  3 == mem2node__node(&map,  0x650));
+       T("failed: mem2node__node", -1 == mem2node__node(&map,  0x450));
+       T("failed: mem2node__node", -1 == mem2node__node(&map, 0x1050));
+
+       for (i = 0; i < ARRAY_SIZE(nodes); i++)
+               free(nodes[i].set);
+
+       mem2node__exit(&map);
+       return 0;
+}
index c0e971d..bb8e6bc 100644 (file)
@@ -38,6 +38,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
                     expected_nr_events[nsyscalls], i, j;
        struct perf_evsel *evsels[nsyscalls], *evsel;
        char sbuf[STRERR_BUFSIZE];
+       struct perf_mmap *md;
 
        threads = thread_map__new(-1, getpid(), UINT_MAX);
        if (threads == NULL) {
@@ -106,7 +107,11 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
                        ++foo;
                }
 
-       while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
+       md = &evlist->mmap[0];
+       if (perf_mmap__read_init(md) < 0)
+               goto out_init;
+
+       while ((event = perf_mmap__read_event(md)) != NULL) {
                struct perf_sample sample;
 
                if (event->header.type != PERF_RECORD_SAMPLE) {
@@ -129,9 +134,11 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
                        goto out_delete_evlist;
                }
                nr_events[evsel->idx]++;
-               perf_evlist__mmap_consume(evlist, 0);
+               perf_mmap__consume(md);
        }
+       perf_mmap__read_done(md);
 
+out_init:
        err = 0;
        evlist__for_each_entry(evlist, evsel) {
                if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
index 4351926..344dc3a 100644 (file)
@@ -86,8 +86,13 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
 
                for (i = 0; i < evlist->nr_mmaps; i++) {
                        union perf_event *event;
+                       struct perf_mmap *md;
 
-                       while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
+                       md = &evlist->mmap[i];
+                       if (perf_mmap__read_init(md) < 0)
+                               continue;
+
+                       while ((event = perf_mmap__read_event(md)) != NULL) {
                                const u32 type = event->header.type;
                                int tp_flags;
                                struct perf_sample sample;
@@ -95,7 +100,7 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
                                ++nr_events;
 
                                if (type != PERF_RECORD_SAMPLE) {
-                                       perf_evlist__mmap_consume(evlist, i);
+                                       perf_mmap__consume(md);
                                        continue;
                                }
 
@@ -115,6 +120,7 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
 
                                goto out_ok;
                        }
+                       perf_mmap__read_done(md);
                }
 
                if (nr_events == before)
index 0afafab..34394cc 100644 (file)
@@ -164,8 +164,13 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
 
                for (i = 0; i < evlist->nr_mmaps; i++) {
                        union perf_event *event;
+                       struct perf_mmap *md;
 
-                       while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
+                       md = &evlist->mmap[i];
+                       if (perf_mmap__read_init(md) < 0)
+                               continue;
+
+                       while ((event = perf_mmap__read_event(md)) != NULL) {
                                const u32 type = event->header.type;
                                const char *name = perf_event__name(type);
 
@@ -266,8 +271,9 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
                                        ++errs;
                                }
 
-                               perf_evlist__mmap_consume(evlist, i);
+                               perf_mmap__consume(md);
                        }
+                       perf_mmap__read_done(md);
                }
 
                /*
index 9abca26..7bedf86 100644 (file)
@@ -98,7 +98,7 @@ static char *test_format_dir_get(void)
                struct test_format *format = &test_formats[i];
                FILE *file;
 
-               snprintf(name, PATH_MAX, "%s/%s", dir, format->name);
+               scnprintf(name, PATH_MAX, "%s/%s", dir, format->name);
 
                file = fopen(name, "w");
                if (!file)
index 30a950c..1c16e56 100644 (file)
@@ -5,7 +5,7 @@ had_vfs_getname=$?
 
 cleanup_probe_vfs_getname() {
        if [ $had_vfs_getname -eq 1 ] ; then
-               perf probe -q -d probe:vfs_getname
+               perf probe -q -d probe:vfs_getname*
        fi
 }
 
diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
new file mode 100755 (executable)
index 0000000..1ecc1f0
--- /dev/null
@@ -0,0 +1,65 @@
+# probe libc's inet_pton & backtrace it with ping
+
+# Installs a probe on libc's inet_pton function, that will use uprobes,
+# then use 'perf trace' on a ping to localhost asking for just one packet
+# with the a backtrace 3 levels deep, check that it is what we expect.
+# This needs no debuginfo package, all is done using the libc ELF symtab
+# and the CFI info in the binaries.
+
+# Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
+
+. $(dirname $0)/lib/probe.sh
+
+libc=$(grep -w libc /proc/self/maps | head -1 | sed -r 's/.*[[:space:]](\/.*)/\1/g')
+nm -g $libc 2>/dev/null | fgrep -q inet_pton || exit 254
+
+trace_libc_inet_pton_backtrace() {
+       idx=0
+       expected[0]="ping[][0-9 \.:]+probe_libc:inet_pton: \([[:xdigit:]]+\)"
+       expected[1]=".*inet_pton[[:space:]]\($libc\)$"
+       case "$(uname -m)" in
+       s390x)
+               eventattr='call-graph=dwarf'
+               expected[2]="gaih_inet.*[[:space:]]\($libc|inlined\)$"
+               expected[3]="__GI_getaddrinfo[[:space:]]\($libc|inlined\)$"
+               expected[4]="main[[:space:]]\(.*/bin/ping.*\)$"
+               expected[5]="__libc_start_main[[:space:]]\($libc\)$"
+               expected[6]="_start[[:space:]]\(.*/bin/ping.*\)$"
+               ;;
+       *)
+               eventattr='max-stack=3'
+               expected[2]="getaddrinfo[[:space:]]\($libc\)$"
+               expected[3]=".*\(.*/bin/ping.*\)$"
+               ;;
+       esac
+
+       file=`mktemp -u /tmp/perf.data.XXX`
+
+       perf record -e probe_libc:inet_pton/$eventattr/ -o $file ping -6 -c 1 ::1 > /dev/null 2>&1
+       perf script -i $file | while read line ; do
+               echo $line
+               echo "$line" | egrep -q "${expected[$idx]}"
+               if [ $? -ne 0 ] ; then
+                       printf "FAIL: expected backtrace entry %d \"%s\" got \"%s\"\n" $idx "${expected[$idx]}" "$line"
+                       exit 1
+               fi
+               let idx+=1
+               [ -z "${expected[$idx]}" ] && break
+       done
+
+       # If any statements are executed from this point onwards,
+       # the exit code of the last among these will be reflected
+       # in err below. If the exit code is 0, the test will pass
+       # even if the perf script output does not match.
+}
+
+# Check for IPv6 interface existence
+ip a sh lo | fgrep -q inet6 || exit 2
+
+skip_if_no_perf_probe && \
+perf probe -q $libc inet_pton && \
+trace_libc_inet_pton_backtrace
+err=$?
+rm -f ${file}
+perf probe -q -d probe_libc:inet_pton
+exit $err
diff --git a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
deleted file mode 100755 (executable)
index c446c89..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-# probe libc's inet_pton & backtrace it with ping
-
-# Installs a probe on libc's inet_pton function, that will use uprobes,
-# then use 'perf trace' on a ping to localhost asking for just one packet
-# with the a backtrace 3 levels deep, check that it is what we expect.
-# This needs no debuginfo package, all is done using the libc ELF symtab
-# and the CFI info in the binaries.
-
-# Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
-
-. $(dirname $0)/lib/probe.sh
-
-libc=$(grep -w libc /proc/self/maps | head -1 | sed -r 's/.*[[:space:]](\/.*)/\1/g')
-nm -g $libc 2>/dev/null | fgrep -q inet_pton || exit 254
-
-trace_libc_inet_pton_backtrace() {
-       idx=0
-       expected[0]="PING.*bytes"
-       expected[1]="64 bytes from ::1.*"
-       expected[2]=".*ping statistics.*"
-       expected[3]=".*packets transmitted.*"
-       expected[4]="rtt min.*"
-       expected[5]="[0-9]+\.[0-9]+[[:space:]]+probe_libc:inet_pton:\([[:xdigit:]]+\)"
-       expected[6]=".*inet_pton[[:space:]]\($libc\)$"
-       case "$(uname -m)" in
-       s390x)
-               eventattr='call-graph=dwarf'
-               expected[7]="gaih_inet[[:space:]]\(inlined\)$"
-               expected[8]="__GI_getaddrinfo[[:space:]]\(inlined\)$"
-               expected[9]="main[[:space:]]\(.*/bin/ping.*\)$"
-               expected[10]="__libc_start_main[[:space:]]\($libc\)$"
-               expected[11]="_start[[:space:]]\(.*/bin/ping.*\)$"
-               ;;
-       *)
-               eventattr='max-stack=3'
-               expected[7]="getaddrinfo[[:space:]]\($libc\)$"
-               expected[8]=".*\(.*/bin/ping.*\)$"
-               ;;
-       esac
-
-       perf trace --no-syscalls -e probe_libc:inet_pton/$eventattr/ ping -6 -c 1 ::1 2>&1 | grep -v ^$ | while read line ; do
-               echo $line
-               echo "$line" | egrep -q "${expected[$idx]}"
-               if [ $? -ne 0 ] ; then
-                       printf "FAIL: expected backtrace entry %d \"%s\" got \"%s\"\n" $idx "${expected[$idx]}" "$line"
-                       exit 1
-               fi
-               let idx+=1
-               [ -z "${expected[$idx]}" ] && break
-       done
-}
-
-# Check for IPv6 interface existence
-ip a sh lo | fgrep -q inet6 || exit 2
-
-skip_if_no_perf_probe && \
-perf probe -q $libc inet_pton && \
-trace_libc_inet_pton_backtrace
-err=$?
-rm -f ${file}
-perf probe -q -d probe_libc:inet_pton
-exit $err
index f6c72f9..f9490b2 100644 (file)
@@ -39,6 +39,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
        };
        struct cpu_map *cpus;
        struct thread_map *threads;
+       struct perf_mmap *md;
 
        attr.sample_freq = 500;
 
@@ -93,7 +94,11 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
 
        perf_evlist__disable(evlist);
 
-       while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
+       md = &evlist->mmap[0];
+       if (perf_mmap__read_init(md) < 0)
+               goto out_init;
+
+       while ((event = perf_mmap__read_event(md)) != NULL) {
                struct perf_sample sample;
 
                if (event->header.type != PERF_RECORD_SAMPLE)
@@ -108,9 +113,11 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
                total_periods += sample.period;
                nr_samples++;
 next_event:
-               perf_evlist__mmap_consume(evlist, 0);
+               perf_mmap__consume(md);
        }
+       perf_mmap__read_done(md);
 
+out_init:
        if ((u64) nr_samples == total_periods) {
                pr_debug("All (%d) samples have period value of 1!\n",
                         nr_samples);
index 33e0029..9b5be51 100644 (file)
@@ -258,16 +258,22 @@ static int process_events(struct perf_evlist *evlist,
        unsigned pos, cnt = 0;
        LIST_HEAD(events);
        struct event_node *events_array, *node;
+       struct perf_mmap *md;
        int i, ret;
 
        for (i = 0; i < evlist->nr_mmaps; i++) {
-               while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
+               md = &evlist->mmap[i];
+               if (perf_mmap__read_init(md) < 0)
+                       continue;
+
+               while ((event = perf_mmap__read_event(md)) != NULL) {
                        cnt += 1;
                        ret = add_event(evlist, &events, event);
-                       perf_evlist__mmap_consume(evlist, i);
+                        perf_mmap__consume(md);
                        if (ret < 0)
                                goto out_free_nodes;
                }
+               perf_mmap__read_done(md);
        }
 
        events_array = calloc(cnt, sizeof(struct event_node));
index 01b62b8..e92fa60 100644 (file)
@@ -47,6 +47,7 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
        char sbuf[STRERR_BUFSIZE];
        struct cpu_map *cpus;
        struct thread_map *threads;
+       struct perf_mmap *md;
 
        signal(SIGCHLD, sig_handler);
 
@@ -110,13 +111,19 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
        perf_evlist__start_workload(evlist);
 
 retry:
-       while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
+       md = &evlist->mmap[0];
+       if (perf_mmap__read_init(md) < 0)
+               goto out_init;
+
+       while ((event = perf_mmap__read_event(md)) != NULL) {
                if (event->header.type == PERF_RECORD_EXIT)
                        nr_exit++;
 
-               perf_evlist__mmap_consume(evlist, 0);
+               perf_mmap__consume(md);
        }
+       perf_mmap__read_done(md);
 
+out_init:
        if (!exited || !nr_exit) {
                perf_evlist__poll(evlist, -1);
                goto retry;
index 2862b80..a9760e7 100644 (file)
@@ -58,6 +58,7 @@ int test__hists_link(struct test *test, int subtest);
 int test__python_use(struct test *test, int subtest);
 int test__bp_signal(struct test *test, int subtest);
 int test__bp_signal_overflow(struct test *test, int subtest);
+int test__bp_accounting(struct test *test, int subtest);
 int test__task_exit(struct test *test, int subtest);
 int test__mem(struct test *test, int subtest);
 int test__sw_clock_freq(struct test *test, int subtest);
@@ -102,6 +103,7 @@ int test__clang(struct test *test, int subtest);
 const char *test__clang_subtest_get_desc(int subtest);
 int test__clang_subtest_get_nr(void);
 int test__unit_number__scnprint(struct test *test, int subtest);
+int test__mem2node(struct test *t, int subtest);
 
 bool test__bp_signal_is_supported(void);
 
index f6789fb..1e5adb6 100644 (file)
@@ -56,7 +56,7 @@ int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest
         * be compacted against the list of modules found in the "vmlinux"
         * code and with the one got from /proc/modules from the "kallsyms" code.
         */
-       if (__machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, true) <= 0) {
+       if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type) <= 0) {
                pr_debug("dso__load_kallsyms ");
                goto out;
        }
@@ -125,7 +125,7 @@ int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest
 
                if (pair && UM(pair->start) == mem_start) {
 next_pair:
-                       if (strcmp(sym->name, pair->name) == 0) {
+                       if (arch__compare_symbol_names(sym->name, pair->name) == 0) {
                                /*
                                 * kallsyms don't have the symbol end, so we
                                 * set that by using the next symbol start - 1,
index fbf927c..618edf9 100644 (file)
@@ -319,6 +319,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
        struct map_symbol *ms = ab->b.priv;
        struct symbol *sym = ms->sym;
        u8 pcnt_width = annotate_browser__pcnt_width(ab);
+       int width = 0;
 
        /* PLT symbols contain external offsets */
        if (strstr(sym->name, "@plt"))
@@ -365,13 +366,17 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
                to = (u64)btarget->idx;
        }
 
+       if (ab->have_cycles)
+               width = IPC_WIDTH + CYCLES_WIDTH;
+
        ui_browser__set_color(browser, HE_COLORSET_JUMP_ARROWS);
-       __ui_browser__line_arrow(browser, pcnt_width + 2 + ab->addr_width,
+       __ui_browser__line_arrow(browser,
+                                pcnt_width + 2 + ab->addr_width + width,
                                 from, to);
 
        if (is_fused(ab, cursor)) {
                ui_browser__mark_fused(browser,
-                                      pcnt_width + 3 + ab->addr_width,
+                                      pcnt_width + 3 + ab->addr_width + width,
                                       from - 1,
                                       to > from ? true : false);
        }
@@ -563,35 +568,28 @@ static bool annotate_browser__callq(struct annotate_browser *browser,
        struct map_symbol *ms = browser->b.priv;
        struct disasm_line *dl = disasm_line(browser->selection);
        struct annotation *notes;
-       struct addr_map_symbol target = {
-               .map = ms->map,
-               .addr = map__objdump_2mem(ms->map, dl->ops.target.addr),
-       };
        char title[SYM_TITLE_MAX_SIZE];
 
        if (!ins__is_call(&dl->ins))
                return false;
 
-       if (map_groups__find_ams(&target) ||
-           map__rip_2objdump(target.map, target.map->map_ip(target.map,
-                                                            target.addr)) !=
-           dl->ops.target.addr) {
+       if (!dl->ops.target.sym) {
                ui_helpline__puts("The called function was not found.");
                return true;
        }
 
-       notes = symbol__annotation(target.sym);
+       notes = symbol__annotation(dl->ops.target.sym);
        pthread_mutex_lock(&notes->lock);
 
-       if (notes->src == NULL && symbol__alloc_hist(target.sym) < 0) {
+       if (notes->src == NULL && symbol__alloc_hist(dl->ops.target.sym) < 0) {
                pthread_mutex_unlock(&notes->lock);
                ui__warning("Not enough memory for annotating '%s' symbol!\n",
-                           target.sym->name);
+                           dl->ops.target.sym->name);
                return true;
        }
 
        pthread_mutex_unlock(&notes->lock);
-       symbol__tui_annotate(target.sym, target.map, evsel, hbt);
+       symbol__tui_annotate(dl->ops.target.sym, ms->map, evsel, hbt);
        sym_title(ms->sym, ms->map, title, sizeof(title));
        ui_browser__show_title(&browser->b, title);
        return true;
index 6495ee5..8b4e825 100644 (file)
@@ -2223,7 +2223,7 @@ static int perf_evsel_browser_title(struct hist_browser *browser,
        u64 nr_events = hists->stats.total_period;
        struct perf_evsel *evsel = hists_to_evsel(hists);
        const char *ev_name = perf_evsel__name(evsel);
-       char buf[512];
+       char buf[512], sample_freq_str[64] = "";
        size_t buflen = sizeof(buf);
        char ref[30] = " show reference callgraph, ";
        bool enable_ref = false;
@@ -2255,10 +2255,15 @@ static int perf_evsel_browser_title(struct hist_browser *browser,
        if (symbol_conf.show_ref_callgraph &&
            strstr(ev_name, "call-graph=no"))
                enable_ref = true;
+
+       if (!is_report_browser(hbt))
+               scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->attr.sample_freq);
+
        nr_samples = convert_unit(nr_samples, &unit);
        printed = scnprintf(bf, size,
-                          "Samples: %lu%c of event '%s',%sEvent count (approx.): %" PRIu64,
-                          nr_samples, unit, ev_name, enable_ref ? ref : " ", nr_events);
+                          "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64,
+                          nr_samples, unit, evsel->nr_members > 1 ? "s" : "",
+                          ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events);
 
 
        if (hists->uid_filter_str)
index 25dd1e0..6832fcb 100644 (file)
@@ -840,15 +840,11 @@ size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
        for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
                const char *name;
 
-               if (stats->nr_events[i] == 0)
-                       continue;
-
                name = perf_event__name(i);
                if (!strcmp(name, "UNKNOWN"))
                        continue;
 
-               ret += fprintf(fp, "%16s events: %10d\n", name,
-                              stats->nr_events[i]);
+               ret += fprintf(fp, "%16s events: %10d\n", name, stats->nr_events[i]);
        }
 
        return ret;
index ea0a452..8052373 100644 (file)
@@ -106,6 +106,7 @@ libperf-y += units.o
 libperf-y += time-utils.o
 libperf-y += expr-bison.o
 libperf-y += branch.o
+libperf-y += mem2node.o
 
 libperf-$(CONFIG_LIBBPF) += bpf-loader.o
 libperf-$(CONFIG_BPF_PROLOGUE) += bpf-prologue.o
index 28b233c..535357c 100644 (file)
@@ -187,6 +187,9 @@ bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2)
 static int call__parse(struct arch *arch, struct ins_operands *ops, struct map *map)
 {
        char *endptr, *tok, *name;
+       struct addr_map_symbol target = {
+               .map = map,
+       };
 
        ops->target.addr = strtoull(ops->raw, &endptr, 16);
 
@@ -208,32 +211,36 @@ static int call__parse(struct arch *arch, struct ins_operands *ops, struct map *
        ops->target.name = strdup(name);
        *tok = '>';
 
-       return ops->target.name == NULL ? -1 : 0;
+       if (ops->target.name == NULL)
+               return -1;
+find_target:
+       target.addr = map__objdump_2mem(map, ops->target.addr);
 
-indirect_call:
-       tok = strchr(endptr, '*');
-       if (tok == NULL) {
-               struct symbol *sym = map__find_symbol(map, map->map_ip(map, ops->target.addr));
-               if (sym != NULL)
-                       ops->target.name = strdup(sym->name);
-               else
-                       ops->target.addr = 0;
-               return 0;
-       }
+       if (map_groups__find_ams(&target) == 0 &&
+           map__rip_2objdump(target.map, map->map_ip(target.map, target.addr)) == ops->target.addr)
+               ops->target.sym = target.sym;
 
-       ops->target.addr = strtoull(tok + 1, NULL, 16);
        return 0;
+
+indirect_call:
+       tok = strchr(endptr, '*');
+       if (tok != NULL)
+               ops->target.addr = strtoull(tok + 1, NULL, 16);
+       goto find_target;
 }
 
 static int call__scnprintf(struct ins *ins, char *bf, size_t size,
                           struct ins_operands *ops)
 {
-       if (ops->target.name)
-               return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.name);
+       if (ops->target.sym)
+               return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.sym->name);
 
        if (ops->target.addr == 0)
                return ins__raw_scnprintf(ins, bf, size, ops);
 
+       if (ops->target.name)
+               return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.name);
+
        return scnprintf(bf, size, "%-6s *%" PRIx64, ins->name, ops->target.addr);
 }
 
@@ -244,7 +251,7 @@ static struct ins_ops call_ops = {
 
 bool ins__is_call(const struct ins *ins)
 {
-       return ins->ops == &call_ops;
+       return ins->ops == &call_ops || ins->ops == &s390_call_ops;
 }
 
 static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map *map __maybe_unused)
@@ -1283,8 +1290,8 @@ static int symbol__parse_objdump_line(struct symbol *sym, FILE *file,
                dl->ops.target.offset_avail = true;
        }
 
-       /* kcore has no symbols, so add the call target name */
-       if (dl->ins.ops && ins__is_call(&dl->ins) && !dl->ops.target.name) {
+       /* kcore has no symbols, so add the call target symbol */
+       if (dl->ins.ops && ins__is_call(&dl->ins) && !dl->ops.target.sym) {
                struct addr_map_symbol target = {
                        .map = map,
                        .addr = dl->ops.target.addr,
@@ -1292,7 +1299,7 @@ static int symbol__parse_objdump_line(struct symbol *sym, FILE *file,
 
                if (!map_groups__find_ams(&target) &&
                    target.sym->start == target.al_addr)
-                       dl->ops.target.name = strdup(target.sym->name);
+                       dl->ops.target.sym = target.sym;
        }
 
        annotation_line__add(&dl->al, &notes->src->source);
@@ -1423,7 +1430,7 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
 {
        struct map *map = args->map;
        struct dso *dso = map->dso;
-       char command[PATH_MAX * 2];
+       char *command;
        FILE *file;
        char symfs_filename[PATH_MAX];
        struct kcore_extract kce;
@@ -1464,7 +1471,7 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
                strcpy(symfs_filename, tmp);
        }
 
-       snprintf(command, sizeof(command),
+       err = asprintf(&command,
                 "%s %s%s --start-address=0x%016" PRIx64
                 " --stop-address=0x%016" PRIx64
                 " -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand",
@@ -1477,12 +1484,17 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
                 symbol_conf.annotate_src ? "-S" : "",
                 symfs_filename, symfs_filename);
 
+       if (err < 0) {
+               pr_err("Failure allocating memory for the command to run\n");
+               goto out_remove_tmp;
+       }
+
        pr_debug("Executing: %s\n", command);
 
        err = -1;
        if (pipe(stdout_fd) < 0) {
                pr_err("Failure creating the pipe to run %s\n", command);
-               goto out_remove_tmp;
+               goto out_free_command;
        }
 
        pid = fork();
@@ -1509,7 +1521,7 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
                 * If we were using debug info should retry with
                 * original binary.
                 */
-               goto out_remove_tmp;
+               goto out_free_command;
        }
 
        nline = 0;
@@ -1537,6 +1549,8 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
 
        fclose(file);
        err = 0;
+out_free_command:
+       free(command);
 out_remove_tmp:
        close(stdout_fd[0]);
 
@@ -1550,7 +1564,7 @@ out:
 
 out_close_stdout:
        close(stdout_fd[1]);
-       goto out_remove_tmp;
+       goto out_free_command;
 }
 
 static void calc_percent(struct sym_hist *hist,
index ce42744..7e914e8 100644 (file)
@@ -24,6 +24,7 @@ struct ins_operands {
        struct {
                char    *raw;
                char    *name;
+               struct symbol *sym;
                u64     addr;
                s64     offset;
                bool    offset_avail;
index 6470ea2..fb357a0 100644 (file)
@@ -233,9 +233,9 @@ static void *auxtrace_copy_data(u64 size, struct perf_session *session)
        return p;
 }
 
-static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues,
-                                      unsigned int idx,
-                                      struct auxtrace_buffer *buffer)
+static int auxtrace_queues__queue_buffer(struct auxtrace_queues *queues,
+                                        unsigned int idx,
+                                        struct auxtrace_buffer *buffer)
 {
        struct auxtrace_queue *queue;
        int err;
@@ -286,7 +286,7 @@ static int auxtrace_queues__split_buffer(struct auxtrace_queues *queues,
                        return -ENOMEM;
                b->size = BUFFER_LIMIT_FOR_32_BIT;
                b->consecutive = consecutive;
-               err = auxtrace_queues__add_buffer(queues, idx, b);
+               err = auxtrace_queues__queue_buffer(queues, idx, b);
                if (err) {
                        auxtrace_buffer__free(b);
                        return err;
@@ -302,11 +302,14 @@ static int auxtrace_queues__split_buffer(struct auxtrace_queues *queues,
        return 0;
 }
 
-static int auxtrace_queues__add_event_buffer(struct auxtrace_queues *queues,
-                                            struct perf_session *session,
-                                            unsigned int idx,
-                                            struct auxtrace_buffer *buffer)
+static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues,
+                                      struct perf_session *session,
+                                      unsigned int idx,
+                                      struct auxtrace_buffer *buffer,
+                                      struct auxtrace_buffer **buffer_ptr)
 {
+       int err;
+
        if (session->one_mmap) {
                buffer->data = buffer->data_offset - session->one_mmap_offset +
                               session->one_mmap_addr;
@@ -317,14 +320,20 @@ static int auxtrace_queues__add_event_buffer(struct auxtrace_queues *queues,
                buffer->data_needs_freeing = true;
        } else if (BITS_PER_LONG == 32 &&
                   buffer->size > BUFFER_LIMIT_FOR_32_BIT) {
-               int err;
-
                err = auxtrace_queues__split_buffer(queues, idx, buffer);
                if (err)
                        return err;
        }
 
-       return auxtrace_queues__add_buffer(queues, idx, buffer);
+       err = auxtrace_queues__queue_buffer(queues, idx, buffer);
+       if (err)
+               return err;
+
+       /* FIXME: Doesn't work for split buffer */
+       if (buffer_ptr)
+               *buffer_ptr = buffer;
+
+       return 0;
 }
 
 static bool filter_cpu(struct perf_session *session, int cpu)
@@ -359,13 +368,11 @@ int auxtrace_queues__add_event(struct auxtrace_queues *queues,
        buffer->size = event->auxtrace.size;
        idx = event->auxtrace.idx;
 
-       err = auxtrace_queues__add_event_buffer(queues, session, idx, buffer);
+       err = auxtrace_queues__add_buffer(queues, session, idx, buffer,
+                                         buffer_ptr);
        if (err)
                goto out_err;
 
-       if (buffer_ptr)
-               *buffer_ptr = buffer;
-
        return 0;
 
 out_err:
index 453c148..e731f55 100644 (file)
@@ -130,6 +130,7 @@ struct auxtrace_index {
 /**
  * struct auxtrace - session callbacks to allow AUX area data decoding.
  * @process_event: lets the decoder see all session events
+ * @process_auxtrace_event: process a PERF_RECORD_AUXTRACE event
  * @flush_events: process any remaining data
  * @free_events: free resources associated with event processing
  * @free: free resources associated with the session
@@ -301,6 +302,7 @@ struct auxtrace_mmap_params {
  * @parse_snapshot_options: parse snapshot options
  * @reference: provide a 64-bit reference number for auxtrace_event
  * @read_finish: called after reading from an auxtrace mmap
+ * @alignment: alignment (if any) for AUX area data
  */
 struct auxtrace_record {
        int (*recording_options)(struct auxtrace_record *itr,
index 7f85536..537eadd 100644 (file)
@@ -316,7 +316,6 @@ static int machine__write_buildid_table(struct machine *machine,
                                        struct feat_fd *fd)
 {
        int err = 0;
-       char nm[PATH_MAX];
        struct dso *pos;
        u16 kmisc = PERF_RECORD_MISC_KERNEL,
            umisc = PERF_RECORD_MISC_USER;
@@ -338,9 +337,8 @@ static int machine__write_buildid_table(struct machine *machine,
                        name = pos->short_name;
                        name_len = pos->short_name_len;
                } else if (dso__is_kcore(pos)) {
-                       machine__mmap_name(machine, nm, sizeof(nm));
-                       name = nm;
-                       name_len = strlen(nm);
+                       name = machine->mmap_name;
+                       name_len = strlen(name);
                } else {
                        name = pos->long_name;
                        name_len = pos->long_name_len;
@@ -813,12 +811,10 @@ static int dso__cache_build_id(struct dso *dso, struct machine *machine)
        bool is_kallsyms = dso__is_kallsyms(dso);
        bool is_vdso = dso__is_vdso(dso);
        const char *name = dso->long_name;
-       char nm[PATH_MAX];
 
        if (dso__is_kcore(dso)) {
                is_kallsyms = true;
-               machine__mmap_name(machine, nm, sizeof(nm));
-               name = nm;
+               name = machine->mmap_name;
        }
        return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), name,
                                     dso->nsinfo, is_kallsyms, is_vdso);
index 984f691..decb91f 100644 (file)
@@ -71,7 +71,7 @@ cgroupfs_find_mountpoint(char *buf, size_t maxlen)
        return -1;
 }
 
-static int open_cgroup(char *name)
+static int open_cgroup(const char *name)
 {
        char path[PATH_MAX + 1];
        char mnt[PATH_MAX + 1];
@@ -81,7 +81,7 @@ static int open_cgroup(char *name)
        if (cgroupfs_find_mountpoint(mnt, PATH_MAX + 1))
                return -1;
 
-       snprintf(path, PATH_MAX, "%s/%s", mnt, name);
+       scnprintf(path, PATH_MAX, "%s/%s", mnt, name);
 
        fd = open(path, O_RDONLY);
        if (fd == -1)
@@ -90,41 +90,64 @@ static int open_cgroup(char *name)
        return fd;
 }
 
-static int add_cgroup(struct perf_evlist *evlist, char *str)
+static struct cgroup *evlist__find_cgroup(struct perf_evlist *evlist, const char *str)
 {
        struct perf_evsel *counter;
-       struct cgroup_sel *cgrp = NULL;
-       int n;
+       struct cgroup *cgrp = NULL;
        /*
         * check if cgrp is already defined, if so we reuse it
         */
        evlist__for_each_entry(evlist, counter) {
-               cgrp = counter->cgrp;
-               if (!cgrp)
+               if (!counter->cgrp)
                        continue;
-               if (!strcmp(cgrp->name, str)) {
-                       refcount_inc(&cgrp->refcnt);
+               if (!strcmp(counter->cgrp->name, str)) {
+                       cgrp = cgroup__get(counter->cgrp);
                        break;
                }
-
-               cgrp = NULL;
        }
 
-       if (!cgrp) {
-               cgrp = zalloc(sizeof(*cgrp));
-               if (!cgrp)
-                       return -1;
+       return cgrp;
+}
 
-               cgrp->name = str;
-               refcount_set(&cgrp->refcnt, 1);
+static struct cgroup *cgroup__new(const char *name)
+{
+       struct cgroup *cgroup = zalloc(sizeof(*cgroup));
 
-               cgrp->fd = open_cgroup(str);
-               if (cgrp->fd == -1) {
-                       free(cgrp);
-                       return -1;
-               }
+       if (cgroup != NULL) {
+               refcount_set(&cgroup->refcnt, 1);
+
+               cgroup->name = strdup(name);
+               if (!cgroup->name)
+                       goto out_err;
+               cgroup->fd = open_cgroup(name);
+               if (cgroup->fd == -1)
+                       goto out_free_name;
        }
 
+       return cgroup;
+
+out_free_name:
+       free(cgroup->name);
+out_err:
+       free(cgroup);
+       return NULL;
+}
+
+struct cgroup *evlist__findnew_cgroup(struct perf_evlist *evlist, const char *name)
+{
+       struct cgroup *cgroup = evlist__find_cgroup(evlist, name);
+
+       return cgroup ?: cgroup__new(name);
+}
+
+static int add_cgroup(struct perf_evlist *evlist, const char *str)
+{
+       struct perf_evsel *counter;
+       struct cgroup *cgrp = evlist__findnew_cgroup(evlist, str);
+       int n;
+
+       if (!cgrp)
+               return -1;
        /*
         * find corresponding event
         * if add cgroup N, then need to find event N
@@ -135,31 +158,58 @@ static int add_cgroup(struct perf_evlist *evlist, char *str)
                        goto found;
                n++;
        }
-       if (refcount_dec_and_test(&cgrp->refcnt))
-               free(cgrp);
 
+       cgroup__put(cgrp);
        return -1;
 found:
        counter->cgrp = cgrp;
        return 0;
 }
 
-void close_cgroup(struct cgroup_sel *cgrp)
+static void cgroup__delete(struct cgroup *cgroup)
+{
+       close(cgroup->fd);
+       zfree(&cgroup->name);
+       free(cgroup);
+}
+
+void cgroup__put(struct cgroup *cgrp)
 {
        if (cgrp && refcount_dec_and_test(&cgrp->refcnt)) {
-               close(cgrp->fd);
-               zfree(&cgrp->name);
-               free(cgrp);
+               cgroup__delete(cgrp);
        }
 }
 
-int parse_cgroups(const struct option *opt __maybe_unused, const char *str,
+struct cgroup *cgroup__get(struct cgroup *cgroup)
+{
+       if (cgroup)
+               refcount_inc(&cgroup->refcnt);
+       return cgroup;
+}
+
+static void evsel__set_default_cgroup(struct perf_evsel *evsel, struct cgroup *cgroup)
+{
+       if (evsel->cgrp == NULL)
+               evsel->cgrp = cgroup__get(cgroup);
+}
+
+void evlist__set_default_cgroup(struct perf_evlist *evlist, struct cgroup *cgroup)
+{
+       struct perf_evsel *evsel;
+
+       evlist__for_each_entry(evlist, evsel)
+               evsel__set_default_cgroup(evsel, cgroup);
+}
+
+int parse_cgroups(const struct option *opt, const char *str,
                  int unset __maybe_unused)
 {
        struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
+       struct perf_evsel *counter;
+       struct cgroup *cgrp = NULL;
        const char *p, *e, *eos = str + strlen(str);
        char *s;
-       int ret;
+       int ret, i;
 
        if (list_empty(&evlist->entries)) {
                fprintf(stderr, "must define events before cgroups\n");
@@ -177,10 +227,9 @@ int parse_cgroups(const struct option *opt __maybe_unused, const char *str,
                        if (!s)
                                return -1;
                        ret = add_cgroup(evlist, s);
-                       if (ret) {
-                               free(s);
+                       free(s);
+                       if (ret)
                                return -1;
-                       }
                }
                /* nr_cgroups is increased een for empty cgroups */
                nr_cgroups++;
@@ -188,5 +237,18 @@ int parse_cgroups(const struct option *opt __maybe_unused, const char *str,
                        break;
                str = p+1;
        }
+       /* for the case one cgroup combine to multiple events */
+       i = 0;
+       if (nr_cgroups == 1) {
+               evlist__for_each_entry(evlist, counter) {
+                       if (i == 0)
+                               cgrp = counter->cgrp;
+                       else {
+                               counter->cgrp = cgrp;
+                               refcount_inc(&cgrp->refcnt);
+                       }
+                       i++;
+               }
+       }
        return 0;
 }
index afafc87..f033a80 100644 (file)
@@ -6,7 +6,7 @@
 
 struct option;
 
-struct cgroup_sel {
+struct cgroup {
        char *name;
        int fd;
        refcount_t refcnt;
@@ -14,7 +14,16 @@ struct cgroup_sel {
 
 
 extern int nr_cgroups; /* number of explicit cgroups defined */
-void close_cgroup(struct cgroup_sel *cgrp);
+
+struct cgroup *cgroup__get(struct cgroup *cgroup);
+void cgroup__put(struct cgroup *cgroup);
+
+struct perf_evlist;
+
+struct cgroup *evlist__findnew_cgroup(struct perf_evlist *evlist, const char *name);
+
+void evlist__set_default_cgroup(struct perf_evlist *evlist, struct cgroup *cgroup);
+
 int parse_cgroups(const struct option *opt, const char *str, int unset);
 
 #endif /* __CGROUP_H__ */
index 1fb0184..640af88 100644 (file)
@@ -78,6 +78,8 @@ int cs_etm_decoder__reset(struct cs_etm_decoder *decoder)
 {
        ocsd_datapath_resp_t dp_ret;
 
+       decoder->prev_return = OCSD_RESP_CONT;
+
        dp_ret = ocsd_dt_process_data(decoder->dcd_tree, OCSD_OP_RESET,
                                      0, 0, NULL, NULL);
        if (OCSD_DATA_RESP_IS_FATAL(dp_ret))
@@ -253,16 +255,16 @@ static void cs_etm_decoder__clear_buffer(struct cs_etm_decoder *decoder)
        decoder->packet_count = 0;
        for (i = 0; i < MAX_BUFFER; i++) {
                decoder->packet_buffer[i].start_addr = 0xdeadbeefdeadbeefUL;
-               decoder->packet_buffer[i].end_addr   = 0xdeadbeefdeadbeefUL;
-               decoder->packet_buffer[i].exc        = false;
-               decoder->packet_buffer[i].exc_ret    = false;
-               decoder->packet_buffer[i].cpu        = INT_MIN;
+               decoder->packet_buffer[i].end_addr = 0xdeadbeefdeadbeefUL;
+               decoder->packet_buffer[i].last_instr_taken_branch = false;
+               decoder->packet_buffer[i].exc = false;
+               decoder->packet_buffer[i].exc_ret = false;
+               decoder->packet_buffer[i].cpu = INT_MIN;
        }
 }
 
 static ocsd_datapath_resp_t
 cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder,
-                             const ocsd_generic_trace_elem *elem,
                              const u8 trace_chan_id,
                              enum cs_etm_sample_type sample_type)
 {
@@ -278,18 +280,16 @@ cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder,
                return OCSD_RESP_FATAL_SYS_ERR;
 
        et = decoder->tail;
+       et = (et + 1) & (MAX_BUFFER - 1);
+       decoder->tail = et;
+       decoder->packet_count++;
+
        decoder->packet_buffer[et].sample_type = sample_type;
-       decoder->packet_buffer[et].start_addr = elem->st_addr;
-       decoder->packet_buffer[et].end_addr = elem->en_addr;
        decoder->packet_buffer[et].exc = false;
        decoder->packet_buffer[et].exc_ret = false;
        decoder->packet_buffer[et].cpu = *((int *)inode->priv);
-
-       /* Wrap around if need be */
-       et = (et + 1) & (MAX_BUFFER - 1);
-
-       decoder->tail = et;
-       decoder->packet_count++;
+       decoder->packet_buffer[et].start_addr = 0xdeadbeefdeadbeefUL;
+       decoder->packet_buffer[et].end_addr = 0xdeadbeefdeadbeefUL;
 
        if (decoder->packet_count == MAX_BUFFER - 1)
                return OCSD_RESP_WAIT;
@@ -297,6 +297,47 @@ cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder,
        return OCSD_RESP_CONT;
 }
 
+static ocsd_datapath_resp_t
+cs_etm_decoder__buffer_range(struct cs_etm_decoder *decoder,
+                            const ocsd_generic_trace_elem *elem,
+                            const uint8_t trace_chan_id)
+{
+       int ret = 0;
+       struct cs_etm_packet *packet;
+
+       ret = cs_etm_decoder__buffer_packet(decoder, trace_chan_id,
+                                           CS_ETM_RANGE);
+       if (ret != OCSD_RESP_CONT && ret != OCSD_RESP_WAIT)
+               return ret;
+
+       packet = &decoder->packet_buffer[decoder->tail];
+
+       packet->start_addr = elem->st_addr;
+       packet->end_addr = elem->en_addr;
+       switch (elem->last_i_type) {
+       case OCSD_INSTR_BR:
+       case OCSD_INSTR_BR_INDIRECT:
+               packet->last_instr_taken_branch = elem->last_instr_exec;
+               break;
+       case OCSD_INSTR_ISB:
+       case OCSD_INSTR_DSB_DMB:
+       case OCSD_INSTR_OTHER:
+       default:
+               packet->last_instr_taken_branch = false;
+               break;
+       }
+
+       return ret;
+}
+
+static ocsd_datapath_resp_t
+cs_etm_decoder__buffer_trace_on(struct cs_etm_decoder *decoder,
+                               const uint8_t trace_chan_id)
+{
+       return cs_etm_decoder__buffer_packet(decoder, trace_chan_id,
+                                            CS_ETM_TRACE_ON);
+}
+
 static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
                                const void *context,
                                const ocsd_trc_index_t indx __maybe_unused,
@@ -313,12 +354,13 @@ static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
                decoder->trace_on = false;
                break;
        case OCSD_GEN_TRC_ELEM_TRACE_ON:
+               resp = cs_etm_decoder__buffer_trace_on(decoder,
+                                                      trace_chan_id);
                decoder->trace_on = true;
                break;
        case OCSD_GEN_TRC_ELEM_INSTR_RANGE:
-               resp = cs_etm_decoder__buffer_packet(decoder, elem,
-                                                    trace_chan_id,
-                                                    CS_ETM_RANGE);
+               resp = cs_etm_decoder__buffer_range(decoder, elem,
+                                                   trace_chan_id);
                break;
        case OCSD_GEN_TRC_ELEM_EXCEPTION:
                decoder->packet_buffer[decoder->tail].exc = true;
index 3d2e620..743f5f4 100644 (file)
@@ -24,12 +24,14 @@ struct cs_etm_buffer {
 
 enum cs_etm_sample_type {
        CS_ETM_RANGE = 1 << 0,
+       CS_ETM_TRACE_ON = 1 << 1,
 };
 
 struct cs_etm_packet {
        enum cs_etm_sample_type sample_type;
        u64 start_addr;
        u64 end_addr;
+       u8 last_instr_taken_branch;
        u8 exc;
        u8 exc_ret;
        int cpu;
index b9f0a53..1b0d422 100644 (file)
 
 #define MAX_TIMESTAMP (~0ULL)
 
+/*
+ * A64 instructions are always 4 bytes
+ *
+ * Only A64 is supported, so can use this constant for converting between
+ * addresses and instruction counts, calculting offsets etc
+ */
+#define A64_INSTR_SIZE 4
+
 struct cs_etm_auxtrace {
        struct auxtrace auxtrace;
        struct auxtrace_queues queues;
@@ -45,11 +53,15 @@ struct cs_etm_auxtrace {
        u8 snapshot_mode;
        u8 data_queued;
        u8 sample_branches;
+       u8 sample_instructions;
 
        int num_cpu;
        u32 auxtrace_type;
        u64 branches_sample_type;
        u64 branches_id;
+       u64 instructions_sample_type;
+       u64 instructions_sample_period;
+       u64 instructions_id;
        u64 **metadata;
        u64 kernel_start;
        unsigned int pmu_type;
@@ -68,6 +80,12 @@ struct cs_etm_queue {
        u64 time;
        u64 timestamp;
        u64 offset;
+       u64 period_instructions;
+       struct branch_stack *last_branch;
+       struct branch_stack *last_branch_rb;
+       size_t last_branch_pos;
+       struct cs_etm_packet *prev_packet;
+       struct cs_etm_packet *packet;
 };
 
 static int cs_etm__update_queues(struct cs_etm_auxtrace *etm);
@@ -174,6 +192,16 @@ static void cs_etm__free_queue(void *priv)
 {
        struct cs_etm_queue *etmq = priv;
 
+       if (!etmq)
+               return;
+
+       thread__zput(etmq->thread);
+       cs_etm_decoder__free(etmq->decoder);
+       zfree(&etmq->event_buf);
+       zfree(&etmq->last_branch);
+       zfree(&etmq->last_branch_rb);
+       zfree(&etmq->prev_packet);
+       zfree(&etmq->packet);
        free(etmq);
 }
 
@@ -270,11 +298,35 @@ static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
        struct cs_etm_decoder_params d_params;
        struct cs_etm_trace_params  *t_params;
        struct cs_etm_queue *etmq;
+       size_t szp = sizeof(struct cs_etm_packet);
 
        etmq = zalloc(sizeof(*etmq));
        if (!etmq)
                return NULL;
 
+       etmq->packet = zalloc(szp);
+       if (!etmq->packet)
+               goto out_free;
+
+       if (etm->synth_opts.last_branch || etm->sample_branches) {
+               etmq->prev_packet = zalloc(szp);
+               if (!etmq->prev_packet)
+                       goto out_free;
+       }
+
+       if (etm->synth_opts.last_branch) {
+               size_t sz = sizeof(struct branch_stack);
+
+               sz += etm->synth_opts.last_branch_sz *
+                     sizeof(struct branch_entry);
+               etmq->last_branch = zalloc(sz);
+               if (!etmq->last_branch)
+                       goto out_free;
+               etmq->last_branch_rb = zalloc(sz);
+               if (!etmq->last_branch_rb)
+                       goto out_free;
+       }
+
        etmq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
        if (!etmq->event_buf)
                goto out_free;
@@ -329,6 +381,7 @@ static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
                goto out_free_decoder;
 
        etmq->offset = 0;
+       etmq->period_instructions = 0;
 
        return etmq;
 
@@ -336,6 +389,10 @@ out_free_decoder:
        cs_etm_decoder__free(etmq->decoder);
 out_free:
        zfree(&etmq->event_buf);
+       zfree(&etmq->last_branch);
+       zfree(&etmq->last_branch_rb);
+       zfree(&etmq->prev_packet);
+       zfree(&etmq->packet);
        free(etmq);
 
        return NULL;
@@ -389,6 +446,129 @@ static int cs_etm__update_queues(struct cs_etm_auxtrace *etm)
        return 0;
 }
 
+static inline void cs_etm__copy_last_branch_rb(struct cs_etm_queue *etmq)
+{
+       struct branch_stack *bs_src = etmq->last_branch_rb;
+       struct branch_stack *bs_dst = etmq->last_branch;
+       size_t nr = 0;
+
+       /*
+        * Set the number of records before early exit: ->nr is used to
+        * determine how many branches to copy from ->entries.
+        */
+       bs_dst->nr = bs_src->nr;
+
+       /*
+        * Early exit when there is nothing to copy.
+        */
+       if (!bs_src->nr)
+               return;
+
+       /*
+        * As bs_src->entries is a circular buffer, we need to copy from it in
+        * two steps.  First, copy the branches from the most recently inserted
+        * branch ->last_branch_pos until the end of bs_src->entries buffer.
+        */
+       nr = etmq->etm->synth_opts.last_branch_sz - etmq->last_branch_pos;
+       memcpy(&bs_dst->entries[0],
+              &bs_src->entries[etmq->last_branch_pos],
+              sizeof(struct branch_entry) * nr);
+
+       /*
+        * If we wrapped around at least once, the branches from the beginning
+        * of the bs_src->entries buffer and until the ->last_branch_pos element
+        * are older valid branches: copy them over.  The total number of
+        * branches copied over will be equal to the number of branches asked by
+        * the user in last_branch_sz.
+        */
+       if (bs_src->nr >= etmq->etm->synth_opts.last_branch_sz) {
+               memcpy(&bs_dst->entries[nr],
+                      &bs_src->entries[0],
+                      sizeof(struct branch_entry) * etmq->last_branch_pos);
+       }
+}
+
+static inline void cs_etm__reset_last_branch_rb(struct cs_etm_queue *etmq)
+{
+       etmq->last_branch_pos = 0;
+       etmq->last_branch_rb->nr = 0;
+}
+
+static inline u64 cs_etm__last_executed_instr(struct cs_etm_packet *packet)
+{
+       /*
+        * The packet records the execution range with an exclusive end address
+        *
+        * A64 instructions are constant size, so the last executed
+        * instruction is A64_INSTR_SIZE before the end address
+        * Will need to do instruction level decode for T32 instructions as
+        * they can be variable size (not yet supported).
+        */
+       return packet->end_addr - A64_INSTR_SIZE;
+}
+
+static inline u64 cs_etm__instr_count(const struct cs_etm_packet *packet)
+{
+       /*
+        * Only A64 instructions are currently supported, so can get
+        * instruction count by dividing.
+        * Will need to do instruction level decode for T32 instructions as
+        * they can be variable size (not yet supported).
+        */
+       return (packet->end_addr - packet->start_addr) / A64_INSTR_SIZE;
+}
+
+static inline u64 cs_etm__instr_addr(const struct cs_etm_packet *packet,
+                                    u64 offset)
+{
+       /*
+        * Only A64 instructions are currently supported, so can get
+        * instruction address by muliplying.
+        * Will need to do instruction level decode for T32 instructions as
+        * they can be variable size (not yet supported).
+        */
+       return packet->start_addr + offset * A64_INSTR_SIZE;
+}
+
+static void cs_etm__update_last_branch_rb(struct cs_etm_queue *etmq)
+{
+       struct branch_stack *bs = etmq->last_branch_rb;
+       struct branch_entry *be;
+
+       /*
+        * The branches are recorded in a circular buffer in reverse
+        * chronological order: we start recording from the last element of the
+        * buffer down.  After writing the first element of the stack, move the
+        * insert position back to the end of the buffer.
+        */
+       if (!etmq->last_branch_pos)
+               etmq->last_branch_pos = etmq->etm->synth_opts.last_branch_sz;
+
+       etmq->last_branch_pos -= 1;
+
+       be       = &bs->entries[etmq->last_branch_pos];
+       be->from = cs_etm__last_executed_instr(etmq->prev_packet);
+       be->to   = etmq->packet->start_addr;
+       /* No support for mispredict */
+       be->flags.mispred = 0;
+       be->flags.predicted = 1;
+
+       /*
+        * Increment bs->nr until reaching the number of last branches asked by
+        * the user on the command line.
+        */
+       if (bs->nr < etmq->etm->synth_opts.last_branch_sz)
+               bs->nr += 1;
+}
+
+static int cs_etm__inject_event(union perf_event *event,
+                              struct perf_sample *sample, u64 type)
+{
+       event->header.size = perf_event__sample_event_size(sample, type, 0);
+       return perf_event__synthesize_sample(event, type, 0, sample);
+}
+
+
 static int
 cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq)
 {
@@ -453,35 +633,105 @@ static void  cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
        }
 }
 
+static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
+                                           u64 addr, u64 period)
+{
+       int ret = 0;
+       struct cs_etm_auxtrace *etm = etmq->etm;
+       union perf_event *event = etmq->event_buf;
+       struct perf_sample sample = {.ip = 0,};
+
+       event->sample.header.type = PERF_RECORD_SAMPLE;
+       event->sample.header.misc = PERF_RECORD_MISC_USER;
+       event->sample.header.size = sizeof(struct perf_event_header);
+
+       sample.ip = addr;
+       sample.pid = etmq->pid;
+       sample.tid = etmq->tid;
+       sample.id = etmq->etm->instructions_id;
+       sample.stream_id = etmq->etm->instructions_id;
+       sample.period = period;
+       sample.cpu = etmq->packet->cpu;
+       sample.flags = 0;
+       sample.insn_len = 1;
+       sample.cpumode = event->header.misc;
+
+       if (etm->synth_opts.last_branch) {
+               cs_etm__copy_last_branch_rb(etmq);
+               sample.branch_stack = etmq->last_branch;
+       }
+
+       if (etm->synth_opts.inject) {
+               ret = cs_etm__inject_event(event, &sample,
+                                          etm->instructions_sample_type);
+               if (ret)
+                       return ret;
+       }
+
+       ret = perf_session__deliver_synth_event(etm->session, event, &sample);
+
+       if (ret)
+               pr_err(
+                       "CS ETM Trace: failed to deliver instruction event, error %d\n",
+                       ret);
+
+       if (etm->synth_opts.last_branch)
+               cs_etm__reset_last_branch_rb(etmq);
+
+       return ret;
+}
+
 /*
  * The cs etm packet encodes an instruction range between a branch target
  * and the next taken branch. Generate sample accordingly.
  */
-static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq,
-                                      struct cs_etm_packet *packet)
+static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq)
 {
        int ret = 0;
        struct cs_etm_auxtrace *etm = etmq->etm;
        struct perf_sample sample = {.ip = 0,};
        union perf_event *event = etmq->event_buf;
-       u64 start_addr = packet->start_addr;
-       u64 end_addr = packet->end_addr;
+       struct dummy_branch_stack {
+               u64                     nr;
+               struct branch_entry     entries;
+       } dummy_bs;
 
        event->sample.header.type = PERF_RECORD_SAMPLE;
        event->sample.header.misc = PERF_RECORD_MISC_USER;
        event->sample.header.size = sizeof(struct perf_event_header);
 
-       sample.ip = start_addr;
+       sample.ip = cs_etm__last_executed_instr(etmq->prev_packet);
        sample.pid = etmq->pid;
        sample.tid = etmq->tid;
-       sample.addr = end_addr;
+       sample.addr = etmq->packet->start_addr;
        sample.id = etmq->etm->branches_id;
        sample.stream_id = etmq->etm->branches_id;
        sample.period = 1;
-       sample.cpu = packet->cpu;
+       sample.cpu = etmq->packet->cpu;
        sample.flags = 0;
        sample.cpumode = PERF_RECORD_MISC_USER;
 
+       /*
+        * perf report cannot handle events without a branch stack
+        */
+       if (etm->synth_opts.last_branch) {
+               dummy_bs = (struct dummy_branch_stack){
+                       .nr = 1,
+                       .entries = {
+                               .from = sample.ip,
+                               .to = sample.addr,
+                       },
+               };
+               sample.branch_stack = (struct branch_stack *)&dummy_bs;
+       }
+
+       if (etm->synth_opts.inject) {
+               ret = cs_etm__inject_event(event, &sample,
+                                          etm->branches_sample_type);
+               if (ret)
+                       return ret;
+       }
+
        ret = perf_session__deliver_synth_event(etm->session, event, &sample);
 
        if (ret)
@@ -578,6 +828,24 @@ static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
                etm->sample_branches = true;
                etm->branches_sample_type = attr.sample_type;
                etm->branches_id = id;
+               id += 1;
+               attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
+       }
+
+       if (etm->synth_opts.last_branch)
+               attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
+
+       if (etm->synth_opts.instructions) {
+               attr.config = PERF_COUNT_HW_INSTRUCTIONS;
+               attr.sample_period = etm->synth_opts.period;
+               etm->instructions_sample_period = attr.sample_period;
+               err = cs_etm__synth_event(session, &attr, id);
+               if (err)
+                       return err;
+               etm->sample_instructions = true;
+               etm->instructions_sample_type = attr.sample_type;
+               etm->instructions_id = id;
+               id += 1;
        }
 
        return 0;
@@ -585,25 +853,108 @@ static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
 
 static int cs_etm__sample(struct cs_etm_queue *etmq)
 {
+       struct cs_etm_auxtrace *etm = etmq->etm;
+       struct cs_etm_packet *tmp;
        int ret;
-       struct cs_etm_packet packet;
+       u64 instrs_executed;
 
-       while (1) {
-               ret = cs_etm_decoder__get_packet(etmq->decoder, &packet);
-               if (ret <= 0)
+       instrs_executed = cs_etm__instr_count(etmq->packet);
+       etmq->period_instructions += instrs_executed;
+
+       /*
+        * Record a branch when the last instruction in
+        * PREV_PACKET is a branch.
+        */
+       if (etm->synth_opts.last_branch &&
+           etmq->prev_packet &&
+           etmq->prev_packet->sample_type == CS_ETM_RANGE &&
+           etmq->prev_packet->last_instr_taken_branch)
+               cs_etm__update_last_branch_rb(etmq);
+
+       if (etm->sample_instructions &&
+           etmq->period_instructions >= etm->instructions_sample_period) {
+               /*
+                * Emit instruction sample periodically
+                * TODO: allow period to be defined in cycles and clock time
+                */
+
+               /* Get number of instructions executed after the sample point */
+               u64 instrs_over = etmq->period_instructions -
+                       etm->instructions_sample_period;
+
+               /*
+                * Calculate the address of the sampled instruction (-1 as
+                * sample is reported as though instruction has just been
+                * executed, but PC has not advanced to next instruction)
+                */
+               u64 offset = (instrs_executed - instrs_over - 1);
+               u64 addr = cs_etm__instr_addr(etmq->packet, offset);
+
+               ret = cs_etm__synth_instruction_sample(
+                       etmq, addr, etm->instructions_sample_period);
+               if (ret)
+                       return ret;
+
+               /* Carry remaining instructions into next sample period */
+               etmq->period_instructions = instrs_over;
+       }
+
+       if (etm->sample_branches &&
+           etmq->prev_packet &&
+           etmq->prev_packet->sample_type == CS_ETM_RANGE &&
+           etmq->prev_packet->last_instr_taken_branch) {
+               ret = cs_etm__synth_branch_sample(etmq);
+               if (ret)
                        return ret;
+       }
 
+       if (etm->sample_branches || etm->synth_opts.last_branch) {
                /*
-                * If the packet contains an instruction range, generate an
-                * instruction sequence event.
+                * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
+                * the next incoming packet.
                 */
-               if (packet.sample_type & CS_ETM_RANGE)
-                       cs_etm__synth_branch_sample(etmq, &packet);
+               tmp = etmq->packet;
+               etmq->packet = etmq->prev_packet;
+               etmq->prev_packet = tmp;
        }
 
        return 0;
 }
 
+static int cs_etm__flush(struct cs_etm_queue *etmq)
+{
+       int err = 0;
+       struct cs_etm_packet *tmp;
+
+       if (etmq->etm->synth_opts.last_branch &&
+           etmq->prev_packet &&
+           etmq->prev_packet->sample_type == CS_ETM_RANGE) {
+               /*
+                * Generate a last branch event for the branches left in the
+                * circular buffer at the end of the trace.
+                *
+                * Use the address of the end of the last reported execution
+                * range
+                */
+               u64 addr = cs_etm__last_executed_instr(etmq->prev_packet);
+
+               err = cs_etm__synth_instruction_sample(
+                       etmq, addr,
+                       etmq->period_instructions);
+               etmq->period_instructions = 0;
+
+               /*
+                * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
+                * the next incoming packet.
+                */
+               tmp = etmq->packet;
+               etmq->packet = etmq->prev_packet;
+               etmq->prev_packet = tmp;
+       }
+
+       return err;
+}
+
 static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
 {
        struct cs_etm_auxtrace *etm = etmq->etm;
@@ -615,45 +966,72 @@ static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
                etm->kernel_start = machine__kernel_start(etm->machine);
 
        /* Go through each buffer in the queue and decode them one by one */
-more:
-       buffer_used = 0;
-       memset(&buffer, 0, sizeof(buffer));
-       err = cs_etm__get_trace(&buffer, etmq);
-       if (err <= 0)
-               return err;
-       /*
-        * We cannot assume consecutive blocks in the data file are contiguous,
-        * reset the decoder to force re-sync.
-        */
-       err = cs_etm_decoder__reset(etmq->decoder);
-       if (err != 0)
-               return err;
-
-       /* Run trace decoder until buffer consumed or end of trace */
-       do {
-               processed = 0;
-
-               err = cs_etm_decoder__process_data_block(
-                                               etmq->decoder,
-                                               etmq->offset,
-                                               &buffer.buf[buffer_used],
-                                               buffer.len - buffer_used,
-                                               &processed);
-
-               if (err)
+       while (1) {
+               buffer_used = 0;
+               memset(&buffer, 0, sizeof(buffer));
+               err = cs_etm__get_trace(&buffer, etmq);
+               if (err <= 0)
                        return err;
-
-               etmq->offset += processed;
-               buffer_used += processed;
-
                /*
-                * Nothing to do with an error condition, let's hope the next
-                * chunk will be better.
+                * We cannot assume consecutive blocks in the data file are
+                * contiguous, reset the decoder to force re-sync.
                 */
-               err = cs_etm__sample(etmq);
-       } while (buffer.len > buffer_used);
+               err = cs_etm_decoder__reset(etmq->decoder);
+               if (err != 0)
+                       return err;
+
+               /* Run trace decoder until buffer consumed or end of trace */
+               do {
+                       processed = 0;
+                       err = cs_etm_decoder__process_data_block(
+                               etmq->decoder,
+                               etmq->offset,
+                               &buffer.buf[buffer_used],
+                               buffer.len - buffer_used,
+                               &processed);
+                       if (err)
+                               return err;
+
+                       etmq->offset += processed;
+                       buffer_used += processed;
+
+                       /* Process each packet in this chunk */
+                       while (1) {
+                               err = cs_etm_decoder__get_packet(etmq->decoder,
+                                                                etmq->packet);
+                               if (err <= 0)
+                                       /*
+                                        * Stop processing this chunk on
+                                        * end of data or error
+                                        */
+                                       break;
+
+                               switch (etmq->packet->sample_type) {
+                               case CS_ETM_RANGE:
+                                       /*
+                                        * If the packet contains an instruction
+                                        * range, generate instruction sequence
+                                        * events.
+                                        */
+                                       cs_etm__sample(etmq);
+                                       break;
+                               case CS_ETM_TRACE_ON:
+                                       /*
+                                        * Discontinuity in trace, flush
+                                        * previous branch stack
+                                        */
+                                       cs_etm__flush(etmq);
+                                       break;
+                               default:
+                                       break;
+                               }
+                       }
+               } while (buffer.len > buffer_used);
 
-goto more;
+               if (err == 0)
+                       /* Flush any remaining branch stack entries */
+                       err = cs_etm__flush(etmq);
+       }
 
        return err;
 }
index f3a71db..3d64596 100644 (file)
@@ -232,7 +232,6 @@ int perf_quiet_option(void)
                var++;
        }
 
-       quiet = true;
        return 0;
 }
 
index 6d31186..4c84276 100644 (file)
@@ -32,6 +32,10 @@ void perf_env__exit(struct perf_env *env)
        for (i = 0; i < env->caches_cnt; i++)
                cpu_cache_level__free(&env->caches[i]);
        zfree(&env->caches);
+
+       for (i = 0; i < env->nr_memory_nodes; i++)
+               free(env->memory_nodes[i].set);
+       zfree(&env->memory_nodes);
 }
 
 int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
index bf970f5..c4ef2e5 100644 (file)
@@ -27,6 +27,12 @@ struct numa_node {
        struct cpu_map  *map;
 };
 
+struct memory_node {
+       u64              node;
+       u64              size;
+       unsigned long   *set;
+};
+
 struct perf_env {
        char                    *hostname;
        char                    *os_release;
@@ -43,6 +49,7 @@ struct perf_env {
        int                     nr_sibling_cores;
        int                     nr_sibling_threads;
        int                     nr_numa_nodes;
+       int                     nr_memory_nodes;
        int                     nr_pmu_mappings;
        int                     nr_groups;
        char                    *cmdline;
@@ -54,6 +61,8 @@ struct perf_env {
        struct cpu_cache_level  *caches;
        int                      caches_cnt;
        struct numa_node        *numa_nodes;
+       struct memory_node      *memory_nodes;
+       unsigned long long       memory_bsize;
 };
 
 extern struct perf_env perf_env;
index 44e603c..f0a6cbd 100644 (file)
@@ -894,8 +894,6 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
                                       struct machine *machine)
 {
        size_t size;
-       const char *mmap_name;
-       char name_buff[PATH_MAX];
        struct map *map = machine__kernel_map(machine);
        struct kmap *kmap;
        int err;
@@ -918,7 +916,6 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
                return -1;
        }
 
-       mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
        if (machine__is_host(machine)) {
                /*
                 * kernel uses PERF_RECORD_MISC_USER for user space maps,
@@ -931,7 +928,7 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
 
        kmap = map__kmap(map);
        size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
-                       "%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1;
+                       "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
        size = PERF_ALIGN(size, sizeof(u64));
        event->mmap.header.type = PERF_RECORD_MMAP;
        event->mmap.header.size = (sizeof(event->mmap) -
@@ -1591,17 +1588,6 @@ int machine__resolve(struct machine *machine, struct addr_location *al,
                return -1;
 
        dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
-       /*
-        * Have we already created the kernel maps for this machine?
-        *
-        * This should have happened earlier, when we processed the kernel MMAP
-        * events, but for older perf.data files there was no such thing, so do
-        * it now.
-        */
-       if (sample->cpumode == PERF_RECORD_MISC_KERNEL &&
-           machine__kernel_map(machine) == NULL)
-               machine__create_kernel_maps(machine);
-
        thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->ip, al);
        dump_printf(" ...... dso: %s\n",
                    al->map ? al->map->dso->long_name :
index e5fc14e..a59281d 100644 (file)
@@ -702,29 +702,6 @@ static int perf_evlist__resume(struct perf_evlist *evlist)
        return perf_evlist__set_paused(evlist, false);
 }
 
-union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx)
-{
-       struct perf_mmap *md = &evlist->mmap[idx];
-
-       /*
-        * Check messup is required for forward overwritable ring buffer:
-        * memory pointed by md->prev can be overwritten in this case.
-        * No need for read-write ring buffer: kernel stop outputting when
-        * it hit md->prev (perf_mmap__consume()).
-        */
-       return perf_mmap__read_forward(md);
-}
-
-union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
-{
-       return perf_evlist__mmap_read_forward(evlist, idx);
-}
-
-void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
-{
-       perf_mmap__consume(&evlist->mmap[idx], false);
-}
-
 static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
 {
        int i;
@@ -745,7 +722,8 @@ void perf_evlist__munmap(struct perf_evlist *evlist)
        zfree(&evlist->overwrite_mmap);
 }
 
-static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist)
+static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist,
+                                                bool overwrite)
 {
        int i;
        struct perf_mmap *map;
@@ -759,9 +737,10 @@ static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist)
 
        for (i = 0; i < evlist->nr_mmaps; i++) {
                map[i].fd = -1;
+               map[i].overwrite = overwrite;
                /*
                 * When the perf_mmap() call is made we grab one refcount, plus
-                * one extra to let perf_evlist__mmap_consume() get the last
+                * one extra to let perf_mmap__consume() get the last
                 * events after all real references (perf_mmap__get()) are
                 * dropped.
                 *
@@ -802,7 +781,7 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
                        maps = evlist->overwrite_mmap;
 
                        if (!maps) {
-                               maps = perf_evlist__alloc_mmap(evlist);
+                               maps = perf_evlist__alloc_mmap(evlist, true);
                                if (!maps)
                                        return -1;
                                evlist->overwrite_mmap = maps;
@@ -1052,7 +1031,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
        struct mmap_params mp;
 
        if (!evlist->mmap)
-               evlist->mmap = perf_evlist__alloc_mmap(evlist);
+               evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
        if (!evlist->mmap)
                return -ENOMEM;
 
@@ -1086,11 +1065,30 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages)
 
 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
 {
+       bool all_threads = (target->per_thread && target->system_wide);
        struct cpu_map *cpus;
        struct thread_map *threads;
 
+       /*
+        * If specify '-a' and '--per-thread' to perf record, perf record
+        * will override '--per-thread'. target->per_thread = false and
+        * target->system_wide = true.
+        *
+        * If specify '--per-thread' only to perf record,
+        * target->per_thread = true and target->system_wide = false.
+        *
+        * So target->per_thread && target->system_wide is false.
+        * For perf record, thread_map__new_str doesn't call
+        * thread_map__new_all_cpus. That will keep perf record's
+        * current behavior.
+        *
+        * For perf stat, it allows the case that target->per_thread and
+        * target->system_wide are all true. It means to collect system-wide
+        * per-thread data. thread_map__new_str will call
+        * thread_map__new_all_cpus to enumerate all threads.
+        */
        threads = thread_map__new_str(target->pid, target->tid, target->uid,
-                                     target->per_thread);
+                                     all_threads);
 
        if (!threads)
                return -1;
index 336b838..6c41b2f 100644 (file)
@@ -129,10 +129,6 @@ struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id);
 
 void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist, enum bkw_mmap_state state);
 
-union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx);
-
-union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist,
-                                                int idx);
 void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx);
 
 int perf_evlist__open(struct perf_evlist *evlist);
index ef35168..1ac8d92 100644 (file)
@@ -244,6 +244,7 @@ void perf_evsel__init(struct perf_evsel *evsel,
        evsel->metric_name   = NULL;
        evsel->metric_events = NULL;
        evsel->collect_stat  = false;
+       evsel->pmu_name      = NULL;
 }
 
 struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
@@ -621,22 +622,34 @@ const char *perf_evsel__group_name(struct perf_evsel *evsel)
        return evsel->group_name ?: "anon group";
 }
 
+/*
+ * Returns the group details for the specified leader,
+ * with following rules.
+ *
+ *  For record -e '{cycles,instructions}'
+ *    'anon group { cycles:u, instructions:u }'
+ *
+ *  For record -e 'cycles,instructions' and report --group
+ *    'cycles:u, instructions:u'
+ */
 int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
 {
-       int ret;
+       int ret = 0;
        struct perf_evsel *pos;
        const char *group_name = perf_evsel__group_name(evsel);
 
-       ret = scnprintf(buf, size, "%s", group_name);
+       if (!evsel->forced_leader)
+               ret = scnprintf(buf, size, "%s { ", group_name);
 
-       ret += scnprintf(buf + ret, size - ret, " { %s",
+       ret += scnprintf(buf + ret, size - ret, "%s",
                         perf_evsel__name(evsel));
 
        for_each_group_member(pos, evsel)
                ret += scnprintf(buf + ret, size - ret, ", %s",
                                 perf_evsel__name(pos));
 
-       ret += scnprintf(buf + ret, size - ret, " }");
+       if (!evsel->forced_leader)
+               ret += scnprintf(buf + ret, size - ret, " }");
 
        return ret;
 }
@@ -1233,7 +1246,7 @@ void perf_evsel__exit(struct perf_evsel *evsel)
        perf_evsel__free_fd(evsel);
        perf_evsel__free_id(evsel);
        perf_evsel__free_config_terms(evsel);
-       close_cgroup(evsel->cgrp);
+       cgroup__put(evsel->cgrp);
        cpu_map__put(evsel->cpus);
        cpu_map__put(evsel->own_cpus);
        thread_map__put(evsel->threads);
@@ -1915,6 +1928,9 @@ try_fallback:
                goto fallback_missing_features;
        }
 out_close:
+       if (err)
+               threads->err_thread = thread;
+
        do {
                while (--thread >= 0) {
                        close(FD(evsel, cpu, thread));
index a7487c6..d3ee3af 100644 (file)
@@ -30,7 +30,7 @@ struct perf_sample_id {
        u64                     period;
 };
 
-struct cgroup_sel;
+struct cgroup;
 
 /*
  * The 'struct perf_evsel_config_term' is used to pass event
@@ -107,7 +107,7 @@ struct perf_evsel {
        struct perf_stat_evsel  *stats;
        void                    *priv;
        u64                     db_id;
-       struct cgroup_sel       *cgrp;
+       struct cgroup           *cgrp;
        void                    *handler;
        struct cpu_map          *cpus;
        struct cpu_map          *own_cpus;
@@ -125,6 +125,7 @@ struct perf_evsel {
        bool                    per_pkg;
        bool                    precise_max;
        bool                    ignore_missing_thread;
+       bool                    forced_leader;
        /* parse modifier helper */
        int                     exclude_GH;
        int                     nr_members;
@@ -142,6 +143,7 @@ struct perf_evsel {
        struct perf_evsel       **metric_events;
        bool                    collect_stat;
        bool                    weak_group;
+       const char              *pmu_name;
 };
 
 union u64_swap {
index a326e0d..121df16 100644 (file)
@@ -17,6 +17,7 @@
 #include <sys/stat.h>
 #include <sys/utsname.h>
 #include <linux/time64.h>
+#include <dirent.h>
 
 #include "evlist.h"
 #include "evsel.h"
@@ -37,6 +38,7 @@
 #include "asm/bug.h"
 #include "tool.h"
 #include "time-utils.h"
+#include "units.h"
 
 #include "sane_ctype.h"
 
@@ -131,6 +133,25 @@ int do_write(struct feat_fd *ff, const void *buf, size_t size)
        return __do_write_buf(ff, buf, size);
 }
 
+/* Return: 0 if succeded, -ERR if failed. */
+static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size)
+{
+       u64 *p = (u64 *) set;
+       int i, ret;
+
+       ret = do_write(ff, &size, sizeof(size));
+       if (ret < 0)
+               return ret;
+
+       for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
+               ret = do_write(ff, p + i, sizeof(*p));
+               if (ret < 0)
+                       return ret;
+       }
+
+       return 0;
+}
+
 /* Return: 0 if succeded, -ERR if failed. */
 int write_padded(struct feat_fd *ff, const void *bf,
                 size_t count, size_t count_aligned)
@@ -243,6 +264,38 @@ static char *do_read_string(struct feat_fd *ff)
        return NULL;
 }
 
+/* Return: 0 if succeded, -ERR if failed. */
+static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize)
+{
+       unsigned long *set;
+       u64 size, *p;
+       int i, ret;
+
+       ret = do_read_u64(ff, &size);
+       if (ret)
+               return ret;
+
+       set = bitmap_alloc(size);
+       if (!set)
+               return -ENOMEM;
+
+       bitmap_zero(set, size);
+
+       p = (u64 *) set;
+
+       for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
+               ret = do_read_u64(ff, p + i);
+               if (ret < 0) {
+                       free(set);
+                       return ret;
+               }
+       }
+
+       *pset  = set;
+       *psize = size;
+       return 0;
+}
+
 static int write_tracing_data(struct feat_fd *ff,
                              struct perf_evlist *evlist)
 {
@@ -1196,6 +1249,176 @@ static int write_sample_time(struct feat_fd *ff,
                        sizeof(evlist->last_sample_time));
 }
 
+
+static int memory_node__read(struct memory_node *n, unsigned long idx)
+{
+       unsigned int phys, size = 0;
+       char path[PATH_MAX];
+       struct dirent *ent;
+       DIR *dir;
+
+#define for_each_memory(mem, dir)                                      \
+       while ((ent = readdir(dir)))                                    \
+               if (strcmp(ent->d_name, ".") &&                         \
+                   strcmp(ent->d_name, "..") &&                        \
+                   sscanf(ent->d_name, "memory%u", &mem) == 1)
+
+       scnprintf(path, PATH_MAX,
+                 "%s/devices/system/node/node%lu",
+                 sysfs__mountpoint(), idx);
+
+       dir = opendir(path);
+       if (!dir) {
+               pr_warning("failed: cant' open memory sysfs data\n");
+               return -1;
+       }
+
+       for_each_memory(phys, dir) {
+               size = max(phys, size);
+       }
+
+       size++;
+
+       n->set = bitmap_alloc(size);
+       if (!n->set) {
+               closedir(dir);
+               return -ENOMEM;
+       }
+
+       bitmap_zero(n->set, size);
+       n->node = idx;
+       n->size = size;
+
+       rewinddir(dir);
+
+       for_each_memory(phys, dir) {
+               set_bit(phys, n->set);
+       }
+
+       closedir(dir);
+       return 0;
+}
+
+static int memory_node__sort(const void *a, const void *b)
+{
+       const struct memory_node *na = a;
+       const struct memory_node *nb = b;
+
+       return na->node - nb->node;
+}
+
+static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp)
+{
+       char path[PATH_MAX];
+       struct dirent *ent;
+       DIR *dir;
+       u64 cnt = 0;
+       int ret = 0;
+
+       scnprintf(path, PATH_MAX, "%s/devices/system/node/",
+                 sysfs__mountpoint());
+
+       dir = opendir(path);
+       if (!dir) {
+               pr_warning("failed: can't open node sysfs data\n");
+               return -1;
+       }
+
+       while (!ret && (ent = readdir(dir))) {
+               unsigned int idx;
+               int r;
+
+               if (!strcmp(ent->d_name, ".") ||
+                   !strcmp(ent->d_name, ".."))
+                       continue;
+
+               r = sscanf(ent->d_name, "node%u", &idx);
+               if (r != 1)
+                       continue;
+
+               if (WARN_ONCE(cnt >= size,
+                             "failed to write MEM_TOPOLOGY, way too many nodes\n"))
+                       return -1;
+
+               ret = memory_node__read(&nodes[cnt++], idx);
+       }
+
+       *cntp = cnt;
+       closedir(dir);
+
+       if (!ret)
+               qsort(nodes, cnt, sizeof(nodes[0]), memory_node__sort);
+
+       return ret;
+}
+
+#define MAX_MEMORY_NODES 2000
+
+/*
+ * The MEM_TOPOLOGY holds physical memory map for every
+ * node in system. The format of data is as follows:
+ *
+ *  0 - version          | for future changes
+ *  8 - block_size_bytes | /sys/devices/system/memory/block_size_bytes
+ * 16 - count            | number of nodes
+ *
+ * For each node we store map of physical indexes for
+ * each node:
+ *
+ * 32 - node id          | node index
+ * 40 - size             | size of bitmap
+ * 48 - bitmap           | bitmap of memory indexes that belongs to node
+ */
+static int write_mem_topology(struct feat_fd *ff __maybe_unused,
+                             struct perf_evlist *evlist __maybe_unused)
+{
+       static struct memory_node nodes[MAX_MEMORY_NODES];
+       u64 bsize, version = 1, i, nr;
+       int ret;
+
+       ret = sysfs__read_xll("devices/system/memory/block_size_bytes",
+                             (unsigned long long *) &bsize);
+       if (ret)
+               return ret;
+
+       ret = build_mem_topology(&nodes[0], MAX_MEMORY_NODES, &nr);
+       if (ret)
+               return ret;
+
+       ret = do_write(ff, &version, sizeof(version));
+       if (ret < 0)
+               goto out;
+
+       ret = do_write(ff, &bsize, sizeof(bsize));
+       if (ret < 0)
+               goto out;
+
+       ret = do_write(ff, &nr, sizeof(nr));
+       if (ret < 0)
+               goto out;
+
+       for (i = 0; i < nr; i++) {
+               struct memory_node *n = &nodes[i];
+
+               #define _W(v)                                           \
+                       ret = do_write(ff, &n->v, sizeof(n->v));        \
+                       if (ret < 0)                                    \
+                               goto out;
+
+               _W(node)
+               _W(size)
+
+               #undef _W
+
+               ret = do_write_bitmap(ff, n->set, n->size);
+               if (ret < 0)
+                       goto out;
+       }
+
+out:
+       return ret;
+}
+
 static void print_hostname(struct feat_fd *ff, FILE *fp)
 {
        fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
@@ -1543,6 +1766,35 @@ static void print_sample_time(struct feat_fd *ff, FILE *fp)
        fprintf(fp, "# sample duration : %10.3f ms\n", d);
 }
 
+static void memory_node__fprintf(struct memory_node *n,
+                                unsigned long long bsize, FILE *fp)
+{
+       char buf_map[100], buf_size[50];
+       unsigned long long size;
+
+       size = bsize * bitmap_weight(n->set, n->size);
+       unit_number__scnprintf(buf_size, 50, size);
+
+       bitmap_scnprintf(n->set, n->size, buf_map, 100);
+       fprintf(fp, "#  %3" PRIu64 " [%s]: %s\n", n->node, buf_size, buf_map);
+}
+
+static void print_mem_topology(struct feat_fd *ff, FILE *fp)
+{
+       struct memory_node *nodes;
+       int i, nr;
+
+       nodes = ff->ph->env.memory_nodes;
+       nr    = ff->ph->env.nr_memory_nodes;
+
+       fprintf(fp, "# memory nodes (nr %d, block size 0x%llx):\n",
+               nr, ff->ph->env.memory_bsize);
+
+       for (i = 0; i < nr; i++) {
+               memory_node__fprintf(&nodes[i], ff->ph->env.memory_bsize, fp);
+       }
+}
+
 static int __event_process_build_id(struct build_id_event *bev,
                                    char *filename,
                                    struct perf_session *session)
@@ -2205,6 +2457,58 @@ static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused)
        return 0;
 }
 
+static int process_mem_topology(struct feat_fd *ff,
+                               void *data __maybe_unused)
+{
+       struct memory_node *nodes;
+       u64 version, i, nr, bsize;
+       int ret = -1;
+
+       if (do_read_u64(ff, &version))
+               return -1;
+
+       if (version != 1)
+               return -1;
+
+       if (do_read_u64(ff, &bsize))
+               return -1;
+
+       if (do_read_u64(ff, &nr))
+               return -1;
+
+       nodes = zalloc(sizeof(*nodes) * nr);
+       if (!nodes)
+               return -1;
+
+       for (i = 0; i < nr; i++) {
+               struct memory_node n;
+
+               #define _R(v)                           \
+                       if (do_read_u64(ff, &n.v))      \
+                               goto out;               \
+
+               _R(node)
+               _R(size)
+
+               #undef _R
+
+               if (do_read_bitmap(ff, &n.set, &n.size))
+                       goto out;
+
+               nodes[i] = n;
+       }
+
+       ff->ph->env.memory_bsize    = bsize;
+       ff->ph->env.memory_nodes    = nodes;
+       ff->ph->env.nr_memory_nodes = nr;
+       ret = 0;
+
+out:
+       if (ret)
+               free(nodes);
+       return ret;
+}
+
 struct feature_ops {
        int (*write)(struct feat_fd *ff, struct perf_evlist *evlist);
        void (*print)(struct feat_fd *ff, FILE *fp);
@@ -2263,6 +2567,7 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
        FEAT_OPN(STAT,          stat,           false),
        FEAT_OPN(CACHE,         cache,          true),
        FEAT_OPR(SAMPLE_TIME,   sample_time,    false),
+       FEAT_OPR(MEM_TOPOLOGY,  mem_topology,   true),
 };
 
 struct header_print_data {
@@ -2318,7 +2623,12 @@ int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
        if (ret == -1)
                return -1;
 
-       fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
+       fprintf(fp, "# captured on    : %s", ctime(&st.st_ctime));
+
+       fprintf(fp, "# header version : %u\n", header->version);
+       fprintf(fp, "# data offset    : %" PRIu64 "\n", header->data_offset);
+       fprintf(fp, "# data size      : %" PRIu64 "\n", header->data_size);
+       fprintf(fp, "# feat offset    : %" PRIu64 "\n", header->feat_offset);
 
        perf_header__process_sections(header, fd, &hd,
                                      perf_file_section__fprintf_info);
@@ -3105,8 +3415,17 @@ int perf_event__synthesize_features(struct perf_tool *tool,
                        return ret;
                }
        }
+
+       /* Send HEADER_LAST_FEATURE mark. */
+       fe = ff.buf;
+       fe->feat_id     = HEADER_LAST_FEATURE;
+       fe->header.type = PERF_RECORD_HEADER_FEATURE;
+       fe->header.size = sizeof(*fe);
+
+       ret = process(tool, ff.buf, NULL, NULL);
+
        free(ff.buf);
-       return 0;
+       return ret;
 }
 
 int perf_event__process_feature(struct perf_tool *tool,
index f28aaaa..90d4577 100644 (file)
@@ -36,6 +36,7 @@ enum {
        HEADER_STAT,
        HEADER_CACHE,
        HEADER_SAMPLE_TIME,
+       HEADER_MEM_TOPOLOGY,
        HEADER_LAST_FEATURE,
        HEADER_FEAT_BITS        = 256,
 };
@@ -174,4 +175,5 @@ int write_padded(struct feat_fd *fd, const void *bf,
 int get_cpuid(char *buffer, size_t sz);
 
 char *get_cpuid_str(struct perf_pmu *pmu __maybe_unused);
+int strcmp_cpuid_str(const char *s1, const char *s2);
 #endif /* __PERF_HEADER_H */
index b614095..7d96889 100644 (file)
@@ -536,7 +536,7 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
                         * This mem info was allocated from sample__resolve_mem
                         * and will not be used anymore.
                         */
-                       zfree(&entry->mem_info);
+                       mem_info__zput(entry->mem_info);
 
                        /* If the map of an existing hist_entry has
                         * become out-of-date due to an exec() or
@@ -879,7 +879,7 @@ iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
         * cumulated only one time to prevent entries more than 100%
         * overhead.
         */
-       he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
+       he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1));
        if (he_cache == NULL)
                return -ENOMEM;
 
@@ -1045,8 +1045,6 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
        if (err)
                return err;
 
-       iter->max_stack = max_stack_depth;
-
        err = iter->ops->prepare_entry(iter, al);
        if (err)
                goto out;
@@ -1141,7 +1139,7 @@ void hist_entry__delete(struct hist_entry *he)
        if (he->mem_info) {
                map__zput(he->mem_info->iaddr.map);
                map__zput(he->mem_info->daddr.map);
-               zfree(&he->mem_info);
+               mem_info__zput(he->mem_info);
        }
 
        zfree(&he->stat_acc);
index 02721b5..e869cad 100644 (file)
@@ -107,7 +107,6 @@ struct hist_entry_iter {
        int curr;
 
        bool hide_unresolved;
-       int max_stack;
 
        struct perf_evsel *evsel;
        struct perf_sample *sample;
index aa1593c..f9157ae 100644 (file)
@@ -1378,6 +1378,7 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder)
        intel_pt_clear_tx_flags(decoder);
        decoder->have_tma = false;
        decoder->cbr = 0;
+       decoder->timestamp_insn_cnt = 0;
        decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
        decoder->overflow = true;
        return -EOVERFLOW;
@@ -1616,6 +1617,7 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
                case INTEL_PT_PWRX:
                        intel_pt_log("ERROR: Missing TIP after FUP\n");
                        decoder->pkt_state = INTEL_PT_STATE_ERR3;
+                       decoder->pkt_step = 0;
                        return -ENOENT;
 
                case INTEL_PT_OVF:
@@ -2390,14 +2392,6 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
        return &decoder->state;
 }
 
-static bool intel_pt_at_psb(unsigned char *buf, size_t len)
-{
-       if (len < INTEL_PT_PSB_LEN)
-               return false;
-       return memmem(buf, INTEL_PT_PSB_LEN, INTEL_PT_PSB_STR,
-                     INTEL_PT_PSB_LEN);
-}
-
 /**
  * intel_pt_next_psb - move buffer pointer to the start of the next PSB packet.
  * @buf: pointer to buffer pointer
@@ -2486,6 +2480,7 @@ static unsigned char *intel_pt_last_psb(unsigned char *buf, size_t len)
  * @buf: buffer
  * @len: size of buffer
  * @tsc: TSC value returned
+ * @rem: returns remaining size when TSC is found
  *
  * Find a TSC packet in @buf and return the TSC value.  This function assumes
  * that @buf starts at a PSB and that PSB+ will contain TSC and so stops if a
@@ -2493,7 +2488,8 @@ static unsigned char *intel_pt_last_psb(unsigned char *buf, size_t len)
  *
  * Return: %true if TSC is found, false otherwise.
  */
-static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc)
+static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc,
+                             size_t *rem)
 {
        struct intel_pt_pkt packet;
        int ret;
@@ -2504,6 +2500,7 @@ static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc)
                        return false;
                if (packet.type == INTEL_PT_TSC) {
                        *tsc = packet.payload;
+                       *rem = len;
                        return true;
                }
                if (packet.type == INTEL_PT_PSBEND)
@@ -2554,6 +2551,8 @@ static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2)
  * @len_a: size of first buffer
  * @buf_b: second buffer
  * @len_b: size of second buffer
+ * @consecutive: returns true if there is data in buf_b that is consecutive
+ *               to buf_a
  *
  * If the trace contains TSC we can look at the last TSC of @buf_a and the
  * first TSC of @buf_b in order to determine if the buffers overlap, and then
@@ -2566,33 +2565,41 @@ static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2)
 static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
                                                size_t len_a,
                                                unsigned char *buf_b,
-                                               size_t len_b)
+                                               size_t len_b, bool *consecutive)
 {
        uint64_t tsc_a, tsc_b;
        unsigned char *p;
-       size_t len;
+       size_t len, rem_a, rem_b;
 
        p = intel_pt_last_psb(buf_a, len_a);
        if (!p)
                return buf_b; /* No PSB in buf_a => no overlap */
 
        len = len_a - (p - buf_a);
-       if (!intel_pt_next_tsc(p, len, &tsc_a)) {
+       if (!intel_pt_next_tsc(p, len, &tsc_a, &rem_a)) {
                /* The last PSB+ in buf_a is incomplete, so go back one more */
                len_a -= len;
                p = intel_pt_last_psb(buf_a, len_a);
                if (!p)
                        return buf_b; /* No full PSB+ => assume no overlap */
                len = len_a - (p - buf_a);
-               if (!intel_pt_next_tsc(p, len, &tsc_a))
+               if (!intel_pt_next_tsc(p, len, &tsc_a, &rem_a))
                        return buf_b; /* No TSC in buf_a => assume no overlap */
        }
 
        while (1) {
                /* Ignore PSB+ with no TSC */
-               if (intel_pt_next_tsc(buf_b, len_b, &tsc_b) &&
-                   intel_pt_tsc_cmp(tsc_a, tsc_b) < 0)
-                       return buf_b; /* tsc_a < tsc_b => no overlap */
+               if (intel_pt_next_tsc(buf_b, len_b, &tsc_b, &rem_b)) {
+                       int cmp = intel_pt_tsc_cmp(tsc_a, tsc_b);
+
+                       /* Same TSC, so buffers are consecutive */
+                       if (!cmp && rem_b >= rem_a) {
+                               *consecutive = true;
+                               return buf_b + len_b - (rem_b - rem_a);
+                       }
+                       if (cmp < 0)
+                               return buf_b; /* tsc_a < tsc_b => no overlap */
+               }
 
                if (!intel_pt_step_psb(&buf_b, &len_b))
                        return buf_b + len_b; /* No PSB in buf_b => no data */
@@ -2606,6 +2613,8 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
  * @buf_b: second buffer
  * @len_b: size of second buffer
  * @have_tsc: can use TSC packets to detect overlap
+ * @consecutive: returns true if there is data in buf_b that is consecutive
+ *               to buf_a
  *
  * When trace samples or snapshots are recorded there is the possibility that
  * the data overlaps.  Note that, for the purposes of decoding, data is only
@@ -2616,7 +2625,7 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
  */
 unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
                                     unsigned char *buf_b, size_t len_b,
-                                    bool have_tsc)
+                                    bool have_tsc, bool *consecutive)
 {
        unsigned char *found;
 
@@ -2628,7 +2637,8 @@ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
                return buf_b; /* No overlap */
 
        if (have_tsc) {
-               found = intel_pt_find_overlap_tsc(buf_a, len_a, buf_b, len_b);
+               found = intel_pt_find_overlap_tsc(buf_a, len_a, buf_b, len_b,
+                                                 consecutive);
                if (found)
                        return found;
        }
@@ -2643,28 +2653,16 @@ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
        }
 
        /* Now len_b >= len_a */
-       if (len_b > len_a) {
-               /* The leftover buffer 'b' must start at a PSB */
-               while (!intel_pt_at_psb(buf_b + len_a, len_b - len_a)) {
-                       if (!intel_pt_step_psb(&buf_a, &len_a))
-                               return buf_b; /* No overlap */
-               }
-       }
-
        while (1) {
                /* Potential overlap so check the bytes */
                found = memmem(buf_a, len_a, buf_b, len_a);
-               if (found)
+               if (found) {
+                       *consecutive = true;
                        return buf_b + len_a;
+               }
 
                /* Try again at next PSB in buffer 'a' */
                if (!intel_pt_step_psb(&buf_a, &len_a))
                        return buf_b; /* No overlap */
-
-               /* The leftover buffer 'b' must start at a PSB */
-               while (!intel_pt_at_psb(buf_b + len_a, len_b - len_a)) {
-                       if (!intel_pt_step_psb(&buf_a, &len_a))
-                               return buf_b; /* No overlap */
-               }
        }
 }
index 921b22e..fc1752d 100644 (file)
@@ -117,7 +117,7 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder);
 
 unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
                                     unsigned char *buf_b, size_t len_b,
-                                    bool have_tsc);
+                                    bool have_tsc, bool *consecutive);
 
 int intel_pt__strerror(int code, char *buf, size_t buflen);
 
index 3773d9c..0effaff 100644 (file)
@@ -132,6 +132,7 @@ struct intel_pt_queue {
        struct intel_pt *pt;
        unsigned int queue_nr;
        struct auxtrace_buffer *buffer;
+       struct auxtrace_buffer *old_buffer;
        void *decoder;
        const struct intel_pt_state *state;
        struct ip_callchain *chain;
@@ -143,6 +144,7 @@ struct intel_pt_queue {
        bool stop;
        bool step_through_buffers;
        bool use_buffer_pid_tid;
+       bool sync_switch;
        pid_t pid, tid;
        int cpu;
        int switch_state;
@@ -207,49 +209,28 @@ static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
 static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
                                   struct auxtrace_buffer *b)
 {
+       bool consecutive = false;
        void *start;
 
        start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
-                                     pt->have_tsc);
+                                     pt->have_tsc, &consecutive);
        if (!start)
                return -EINVAL;
        b->use_size = b->data + b->size - start;
        b->use_data = start;
+       if (b->use_size && consecutive)
+               b->consecutive = true;
        return 0;
 }
 
-static void intel_pt_use_buffer_pid_tid(struct intel_pt_queue *ptq,
-                                       struct auxtrace_queue *queue,
-                                       struct auxtrace_buffer *buffer)
-{
-       if (queue->cpu == -1 && buffer->cpu != -1)
-               ptq->cpu = buffer->cpu;
-
-       ptq->pid = buffer->pid;
-       ptq->tid = buffer->tid;
-
-       intel_pt_log("queue %u cpu %d pid %d tid %d\n",
-                    ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
-
-       thread__zput(ptq->thread);
-
-       if (ptq->tid != -1) {
-               if (ptq->pid != -1)
-                       ptq->thread = machine__findnew_thread(ptq->pt->machine,
-                                                             ptq->pid,
-                                                             ptq->tid);
-               else
-                       ptq->thread = machine__find_thread(ptq->pt->machine, -1,
-                                                          ptq->tid);
-       }
-}
-
 /* This function assumes data is processed sequentially only */
 static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
 {
        struct intel_pt_queue *ptq = data;
-       struct auxtrace_buffer *buffer = ptq->buffer, *old_buffer = buffer;
+       struct auxtrace_buffer *buffer = ptq->buffer;
+       struct auxtrace_buffer *old_buffer = ptq->old_buffer;
        struct auxtrace_queue *queue;
+       bool might_overlap;
 
        if (ptq->stop) {
                b->len = 0;
@@ -257,7 +238,7 @@ static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
        }
 
        queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
-next:
+
        buffer = auxtrace_buffer__next(queue, buffer);
        if (!buffer) {
                if (old_buffer)
@@ -276,7 +257,8 @@ next:
                        return -ENOMEM;
        }
 
-       if (ptq->pt->snapshot_mode && !buffer->consecutive && old_buffer &&
+       might_overlap = ptq->pt->snapshot_mode || ptq->pt->sampling_mode;
+       if (might_overlap && !buffer->consecutive && old_buffer &&
            intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
                return -ENOMEM;
 
@@ -289,33 +271,24 @@ next:
        }
        b->ref_timestamp = buffer->reference;
 
-       /*
-        * If in snapshot mode and the buffer has no usable data, get next
-        * buffer and again check overlap against old_buffer.
-        */
-       if (ptq->pt->snapshot_mode && !b->len)
-               goto next;
-
-       if (old_buffer)
-               auxtrace_buffer__drop_data(old_buffer);
-
-       if (!old_buffer || ptq->pt->sampling_mode || (ptq->pt->snapshot_mode &&
-                                                     !buffer->consecutive)) {
+       if (!old_buffer || (might_overlap && !buffer->consecutive)) {
                b->consecutive = false;
                b->trace_nr = buffer->buffer_nr + 1;
        } else {
                b->consecutive = true;
        }
 
-       if (ptq->use_buffer_pid_tid && (ptq->pid != buffer->pid ||
-                                       ptq->tid != buffer->tid))
-               intel_pt_use_buffer_pid_tid(ptq, queue, buffer);
-
        if (ptq->step_through_buffers)
                ptq->stop = true;
 
-       if (!b->len)
+       if (b->len) {
+               if (old_buffer)
+                       auxtrace_buffer__drop_data(old_buffer);
+               ptq->old_buffer = buffer;
+       } else {
+               auxtrace_buffer__drop_data(buffer);
                return intel_pt_get_trace(b, data);
+       }
 
        return 0;
 }
@@ -954,16 +927,15 @@ static int intel_pt_setup_queue(struct intel_pt *pt,
                        ptq->cpu = queue->cpu;
                ptq->tid = queue->tid;
 
-               if (pt->sampling_mode) {
-                       if (pt->timeless_decoding)
-                               ptq->step_through_buffers = true;
-                       if (pt->timeless_decoding || !pt->have_sched_switch)
-                               ptq->use_buffer_pid_tid = true;
-               }
+               if (pt->sampling_mode && !pt->snapshot_mode &&
+                   pt->timeless_decoding)
+                       ptq->step_through_buffers = true;
+
+               ptq->sync_switch = pt->sync_switch;
        }
 
        if (!ptq->on_heap &&
-           (!pt->sync_switch ||
+           (!ptq->sync_switch ||
             ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
                const struct intel_pt_state *state;
                int ret;
@@ -1546,7 +1518,7 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
        if (pt->synth_opts.last_branch)
                intel_pt_update_last_branch_rb(ptq);
 
-       if (!pt->sync_switch)
+       if (!ptq->sync_switch)
                return 0;
 
        if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
@@ -1627,6 +1599,21 @@ static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
        return switch_ip;
 }
 
+static void intel_pt_enable_sync_switch(struct intel_pt *pt)
+{
+       unsigned int i;
+
+       pt->sync_switch = true;
+
+       for (i = 0; i < pt->queues.nr_queues; i++) {
+               struct auxtrace_queue *queue = &pt->queues.queue_array[i];
+               struct intel_pt_queue *ptq = queue->priv;
+
+               if (ptq)
+                       ptq->sync_switch = true;
+       }
+}
+
 static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
 {
        const struct intel_pt_state *state = ptq->state;
@@ -1643,7 +1630,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
                        if (pt->switch_ip) {
                                intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
                                             pt->switch_ip, pt->ptss_ip);
-                               pt->sync_switch = true;
+                               intel_pt_enable_sync_switch(pt);
                        }
                }
        }
@@ -1659,9 +1646,9 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
                if (state->err) {
                        if (state->err == INTEL_PT_ERR_NODATA)
                                return 1;
-                       if (pt->sync_switch &&
+                       if (ptq->sync_switch &&
                            state->from_ip >= pt->kernel_start) {
-                               pt->sync_switch = false;
+                               ptq->sync_switch = false;
                                intel_pt_next_tid(pt, ptq);
                        }
                        if (pt->synth_opts.errors) {
@@ -1687,7 +1674,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
                                     state->timestamp, state->est_timestamp);
                        ptq->timestamp = state->est_timestamp;
                /* Use estimated TSC in unknown switch state */
-               } else if (pt->sync_switch &&
+               } else if (ptq->sync_switch &&
                           ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
                           intel_pt_is_switch_ip(ptq, state->to_ip) &&
                           ptq->next_tid == -1) {
@@ -1834,7 +1821,7 @@ static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
                return 1;
 
        ptq = intel_pt_cpu_to_ptq(pt, cpu);
-       if (!ptq)
+       if (!ptq || !ptq->sync_switch)
                return 1;
 
        switch (ptq->switch_state) {
@@ -2075,9 +2062,6 @@ static int intel_pt_process_auxtrace_event(struct perf_session *session,
        struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
                                           auxtrace);
 
-       if (pt->sampling_mode)
-               return 0;
-
        if (!pt->data_queued) {
                struct auxtrace_buffer *buffer;
                off_t data_offset;
index 4952b42..1cca0a2 100644 (file)
@@ -433,6 +433,7 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
        char serr[STRERR_BUFSIZE];
        char *kbuild_dir = NULL, *kbuild_include_opts = NULL;
        const char *template = llvm_param.clang_bpf_cmd_template;
+       char *command_echo, *command_out;
 
        if (path[0] != '-' && realpath(path, abspath) == NULL) {
                err = errno;
@@ -487,6 +488,16 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
                      (path[0] == '-') ? path : abspath);
 
        pr_debug("llvm compiling command template: %s\n", template);
+
+       if (asprintf(&command_echo, "echo -n \"%s\"", template) < 0)
+               goto errout;
+
+       err = read_from_pipe(command_echo, (void **) &command_out, NULL);
+       if (err)
+               goto errout;
+
+       pr_debug("llvm compiling command : %s\n", command_out);
+
        err = read_from_pipe(template, &obj_buf, &obj_buf_sz);
        if (err) {
                pr_err("ERROR:\tunable to compile %s\n", path);
@@ -497,6 +508,8 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
                goto errout;
        }
 
+       free(command_echo);
+       free(command_out);
        free(kbuild_dir);
        free(kbuild_include_opts);
 
@@ -509,6 +522,7 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
                *p_obj_buf_sz = obj_buf_sz;
        return 0;
 errout:
+       free(command_echo);
        free(kbuild_dir);
        free(kbuild_include_opts);
        free(obj_buf);
index b05a674..2eca847 100644 (file)
@@ -48,8 +48,23 @@ static void machine__threads_init(struct machine *machine)
        }
 }
 
+static int machine__set_mmap_name(struct machine *machine)
+{
+       if (machine__is_host(machine))
+               machine->mmap_name = strdup("[kernel.kallsyms]");
+       else if (machine__is_default_guest(machine))
+               machine->mmap_name = strdup("[guest.kernel.kallsyms]");
+       else if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]",
+                         machine->pid) < 0)
+               machine->mmap_name = NULL;
+
+       return machine->mmap_name ? 0 : -ENOMEM;
+}
+
 int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
 {
+       int err = -ENOMEM;
+
        memset(machine, 0, sizeof(*machine));
        map_groups__init(&machine->kmaps, machine);
        RB_CLEAR_NODE(&machine->rb_node);
@@ -73,13 +88,16 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
        if (machine->root_dir == NULL)
                return -ENOMEM;
 
+       if (machine__set_mmap_name(machine))
+               goto out;
+
        if (pid != HOST_KERNEL_ID) {
                struct thread *thread = machine__findnew_thread(machine, -1,
                                                                pid);
                char comm[64];
 
                if (thread == NULL)
-                       return -ENOMEM;
+                       goto out;
 
                snprintf(comm, sizeof(comm), "[guest/%d]", pid);
                thread__set_comm(thread, comm, 0);
@@ -87,7 +105,13 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
        }
 
        machine->current_tid = NULL;
+       err = 0;
 
+out:
+       if (err) {
+               zfree(&machine->root_dir);
+               zfree(&machine->mmap_name);
+       }
        return 0;
 }
 
@@ -119,7 +143,7 @@ struct machine *machine__new_kallsyms(void)
         *    ask for not using the kcore parsing code, once this one is fixed
         *    to create a map per module.
         */
-       if (machine && __machine__load_kallsyms(machine, "/proc/kallsyms", MAP__FUNCTION, true) <= 0) {
+       if (machine && machine__load_kallsyms(machine, "/proc/kallsyms", MAP__FUNCTION) <= 0) {
                machine__delete(machine);
                machine = NULL;
        }
@@ -180,6 +204,7 @@ void machine__exit(struct machine *machine)
        dsos__exit(&machine->dsos);
        machine__exit_vdso(machine);
        zfree(&machine->root_dir);
+       zfree(&machine->mmap_name);
        zfree(&machine->current_tid);
 
        for (i = 0; i < THREADS__TABLE_SIZE; i++) {
@@ -322,20 +347,6 @@ void machines__process_guests(struct machines *machines,
        }
 }
 
-char *machine__mmap_name(struct machine *machine, char *bf, size_t size)
-{
-       if (machine__is_host(machine))
-               snprintf(bf, size, "[%s]", "kernel.kallsyms");
-       else if (machine__is_default_guest(machine))
-               snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
-       else {
-               snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms",
-                        machine->pid);
-       }
-
-       return bf;
-}
-
 void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
 {
        struct rb_node *node;
@@ -771,24 +782,18 @@ size_t machine__fprintf(struct machine *machine, FILE *fp)
 
 static struct dso *machine__get_kernel(struct machine *machine)
 {
-       const char *vmlinux_name = NULL;
+       const char *vmlinux_name = machine->mmap_name;
        struct dso *kernel;
 
        if (machine__is_host(machine)) {
-               vmlinux_name = symbol_conf.vmlinux_name;
-               if (!vmlinux_name)
-                       vmlinux_name = DSO__NAME_KALLSYMS;
+               if (symbol_conf.vmlinux_name)
+                       vmlinux_name = symbol_conf.vmlinux_name;
 
                kernel = machine__findnew_kernel(machine, vmlinux_name,
                                                 "[kernel]", DSO_TYPE_KERNEL);
        } else {
-               char bf[PATH_MAX];
-
-               if (machine__is_default_guest(machine))
+               if (symbol_conf.default_guest_vmlinux_name)
                        vmlinux_name = symbol_conf.default_guest_vmlinux_name;
-               if (!vmlinux_name)
-                       vmlinux_name = machine__mmap_name(machine, bf,
-                                                         sizeof(bf));
 
                kernel = machine__findnew_kernel(machine, vmlinux_name,
                                                 "[guest.kernel]",
@@ -849,13 +854,10 @@ static int machine__get_running_kernel_start(struct machine *machine,
        return 0;
 }
 
-int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
+static int
+__machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
 {
        int type;
-       u64 start = 0;
-
-       if (machine__get_running_kernel_start(machine, NULL, &start))
-               return -1;
 
        /* In case of renewal the kernel map, destroy previous one */
        machine__destroy_kernel_maps(machine);
@@ -864,7 +866,7 @@ int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
                struct kmap *kmap;
                struct map *map;
 
-               machine->vmlinux_maps[type] = map__new2(start, kernel, type);
+               machine->vmlinux_maps[type] = map__new2(0, kernel, type);
                if (machine->vmlinux_maps[type] == NULL)
                        return -1;
 
@@ -987,11 +989,11 @@ int machines__create_kernel_maps(struct machines *machines, pid_t pid)
        return machine__create_kernel_maps(machine);
 }
 
-int __machine__load_kallsyms(struct machine *machine, const char *filename,
-                            enum map_type type, bool no_kcore)
+int machine__load_kallsyms(struct machine *machine, const char *filename,
+                            enum map_type type)
 {
        struct map *map = machine__kernel_map(machine);
-       int ret = __dso__load_kallsyms(map->dso, filename, map, no_kcore);
+       int ret = __dso__load_kallsyms(map->dso, filename, map, true);
 
        if (ret > 0) {
                dso__set_loaded(map->dso, type);
@@ -1006,12 +1008,6 @@ int __machine__load_kallsyms(struct machine *machine, const char *filename,
        return ret;
 }
 
-int machine__load_kallsyms(struct machine *machine, const char *filename,
-                          enum map_type type)
-{
-       return __machine__load_kallsyms(machine, filename, type, false);
-}
-
 int machine__load_vmlinux_path(struct machine *machine, enum map_type type)
 {
        struct map *map = machine__kernel_map(machine);
@@ -1215,6 +1211,24 @@ static int machine__create_modules(struct machine *machine)
        return 0;
 }
 
+static void machine__set_kernel_mmap(struct machine *machine,
+                                    u64 start, u64 end)
+{
+       int i;
+
+       for (i = 0; i < MAP__NR_TYPES; i++) {
+               machine->vmlinux_maps[i]->start = start;
+               machine->vmlinux_maps[i]->end   = end;
+
+               /*
+                * Be a bit paranoid here, some perf.data file came with
+                * a zero sized synthesized MMAP event for the kernel.
+                */
+               if (start == 0 && end == 0)
+                       machine->vmlinux_maps[i]->end = ~0ULL;
+       }
+}
+
 int machine__create_kernel_maps(struct machine *machine)
 {
        struct dso *kernel = machine__get_kernel(machine);
@@ -1239,40 +1253,22 @@ int machine__create_kernel_maps(struct machine *machine)
                                 "continuing anyway...\n", machine->pid);
        }
 
-       /*
-        * Now that we have all the maps created, just set the ->end of them:
-        */
-       map_groups__fixup_end(&machine->kmaps);
-
        if (!machine__get_running_kernel_start(machine, &name, &addr)) {
                if (name &&
                    maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, addr)) {
                        machine__destroy_kernel_maps(machine);
                        return -1;
                }
+               machine__set_kernel_mmap(machine, addr, 0);
        }
 
+       /*
+        * Now that we have all the maps created, just set the ->end of them:
+        */
+       map_groups__fixup_end(&machine->kmaps);
        return 0;
 }
 
-static void machine__set_kernel_mmap_len(struct machine *machine,
-                                        union perf_event *event)
-{
-       int i;
-
-       for (i = 0; i < MAP__NR_TYPES; i++) {
-               machine->vmlinux_maps[i]->start = event->mmap.start;
-               machine->vmlinux_maps[i]->end   = (event->mmap.start +
-                                                  event->mmap.len);
-               /*
-                * Be a bit paranoid here, some perf.data file came with
-                * a zero sized synthesized MMAP event for the kernel.
-                */
-               if (machine->vmlinux_maps[i]->end == 0)
-                       machine->vmlinux_maps[i]->end = ~0ULL;
-       }
-}
-
 static bool machine__uses_kcore(struct machine *machine)
 {
        struct dso *dso;
@@ -1289,7 +1285,6 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
                                              union perf_event *event)
 {
        struct map *map;
-       char kmmap_prefix[PATH_MAX];
        enum dso_kernel_type kernel_type;
        bool is_kernel_mmap;
 
@@ -1297,15 +1292,14 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
        if (machine__uses_kcore(machine))
                return 0;
 
-       machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
        if (machine__is_host(machine))
                kernel_type = DSO_TYPE_KERNEL;
        else
                kernel_type = DSO_TYPE_GUEST_KERNEL;
 
        is_kernel_mmap = memcmp(event->mmap.filename,
-                               kmmap_prefix,
-                               strlen(kmmap_prefix) - 1) == 0;
+                               machine->mmap_name,
+                               strlen(machine->mmap_name) - 1) == 0;
        if (event->mmap.filename[0] == '/' ||
            (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
                map = machine__findnew_module_map(machine, event->mmap.start,
@@ -1316,7 +1310,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
                map->end = map->start + event->mmap.len;
        } else if (is_kernel_mmap) {
                const char *symbol_name = (event->mmap.filename +
-                               strlen(kmmap_prefix));
+                               strlen(machine->mmap_name));
                /*
                 * Should be there already, from the build-id table in
                 * the header.
@@ -1357,7 +1351,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
                up_read(&machine->dsos.lock);
 
                if (kernel == NULL)
-                       kernel = machine__findnew_dso(machine, kmmap_prefix);
+                       kernel = machine__findnew_dso(machine, machine->mmap_name);
                if (kernel == NULL)
                        goto out_problem;
 
@@ -1370,7 +1364,8 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
                if (strstr(kernel->long_name, "vmlinux"))
                        dso__set_short_name(kernel, "[kernel.vmlinux]", false);
 
-               machine__set_kernel_mmap_len(machine, event);
+               machine__set_kernel_mmap(machine, event->mmap.start,
+                                        event->mmap.start + event->mmap.len);
 
                /*
                 * Avoid using a zero address (kptr_restrict) for the ref reloc
@@ -1700,7 +1695,7 @@ static void ip__resolve_data(struct thread *thread,
 struct mem_info *sample__resolve_mem(struct perf_sample *sample,
                                     struct addr_location *al)
 {
-       struct mem_info *mi = zalloc(sizeof(*mi));
+       struct mem_info *mi = mem_info__new();
 
        if (!mi)
                return NULL;
index 5ce860b..66cc200 100644 (file)
@@ -43,6 +43,7 @@ struct machine {
        bool              comm_exec;
        bool              kptr_restrict_warned;
        char              *root_dir;
+       char              *mmap_name;
        struct threads    threads[THREADS__TABLE_SIZE];
        struct vdso_info  *vdso_info;
        struct perf_env   *env;
@@ -142,8 +143,6 @@ struct machine *machines__find(struct machines *machines, pid_t pid);
 struct machine *machines__findnew(struct machines *machines, pid_t pid);
 
 void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size);
-char *machine__mmap_name(struct machine *machine, char *bf, size_t size);
-
 void machines__set_comm_exec(struct machines *machines, bool comm_exec);
 
 struct machine *machine__new_host(void);
@@ -226,8 +225,6 @@ struct map *machine__findnew_module_map(struct machine *machine, u64 start,
                                        const char *filename);
 int arch__fix_module_text_start(u64 *start, const char *name);
 
-int __machine__load_kallsyms(struct machine *machine, const char *filename,
-                            enum map_type type, bool no_kcore);
 int machine__load_kallsyms(struct machine *machine, const char *filename,
                           enum map_type type);
 int machine__load_vmlinux_path(struct machine *machine, enum map_type type);
@@ -239,7 +236,6 @@ size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
                                     bool (skip)(struct dso *dso, int parm), int parm);
 
 void machine__destroy_kernel_maps(struct machine *machine);
-int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel);
 int machine__create_kernel_maps(struct machine *machine);
 
 int machines__create_kernel_maps(struct machines *machines, pid_t pid);
diff --git a/tools/perf/util/mem2node.c b/tools/perf/util/mem2node.c
new file mode 100644 (file)
index 0000000..c6fd81c
--- /dev/null
@@ -0,0 +1,134 @@
+#include <errno.h>
+#include <inttypes.h>
+#include <linux/bitmap.h>
+#include "mem2node.h"
+#include "util.h"
+
+struct phys_entry {
+       struct rb_node  rb_node;
+       u64     start;
+       u64     end;
+       u64     node;
+};
+
+static void phys_entry__insert(struct phys_entry *entry, struct rb_root *root)
+{
+       struct rb_node **p = &root->rb_node;
+       struct rb_node *parent = NULL;
+       struct phys_entry *e;
+
+       while (*p != NULL) {
+               parent = *p;
+               e = rb_entry(parent, struct phys_entry, rb_node);
+
+               if (entry->start < e->start)
+                       p = &(*p)->rb_left;
+               else
+                       p = &(*p)->rb_right;
+       }
+
+       rb_link_node(&entry->rb_node, parent, p);
+       rb_insert_color(&entry->rb_node, root);
+}
+
+static void
+phys_entry__init(struct phys_entry *entry, u64 start, u64 bsize, u64 node)
+{
+       entry->start = start;
+       entry->end   = start + bsize;
+       entry->node  = node;
+       RB_CLEAR_NODE(&entry->rb_node);
+}
+
+int mem2node__init(struct mem2node *map, struct perf_env *env)
+{
+       struct memory_node *n, *nodes = &env->memory_nodes[0];
+       struct phys_entry *entries, *tmp_entries;
+       u64 bsize = env->memory_bsize;
+       int i, j = 0, max = 0;
+
+       memset(map, 0x0, sizeof(*map));
+       map->root = RB_ROOT;
+
+       for (i = 0; i < env->nr_memory_nodes; i++) {
+               n = &nodes[i];
+               max += bitmap_weight(n->set, n->size);
+       }
+
+       entries = zalloc(sizeof(*entries) * max);
+       if (!entries)
+               return -ENOMEM;
+
+       for (i = 0; i < env->nr_memory_nodes; i++) {
+               u64 bit;
+
+               n = &nodes[i];
+
+               for (bit = 0; bit < n->size; bit++) {
+                       u64 start;
+
+                       if (!test_bit(bit, n->set))
+                               continue;
+
+                       start = bit * bsize;
+
+                       /*
+                        * Merge nearby areas, we walk in order
+                        * through the bitmap, so no need to sort.
+                        */
+                       if (j > 0) {
+                               struct phys_entry *prev = &entries[j - 1];
+
+                               if ((prev->end == start) &&
+                                   (prev->node == n->node)) {
+                                       prev->end += bsize;
+                                       continue;
+                               }
+                       }
+
+                       phys_entry__init(&entries[j++], start, bsize, n->node);
+               }
+       }
+
+       /* Cut unused entries, due to merging. */
+       tmp_entries = realloc(entries, sizeof(*entries) * j);
+       if (tmp_entries)
+               entries = tmp_entries;
+
+       for (i = 0; i < j; i++) {
+               pr_debug("mem2node %03" PRIu64 " [0x%016" PRIx64 "-0x%016" PRIx64 "]\n",
+                        entries[i].node, entries[i].start, entries[i].end);
+
+               phys_entry__insert(&entries[i], &map->root);
+       }
+
+       map->entries = entries;
+       return 0;
+}
+
+void mem2node__exit(struct mem2node *map)
+{
+       zfree(&map->entries);
+}
+
+int mem2node__node(struct mem2node *map, u64 addr)
+{
+       struct rb_node **p, *parent = NULL;
+       struct phys_entry *entry;
+
+       p = &map->root.rb_node;
+       while (*p != NULL) {
+               parent = *p;
+               entry = rb_entry(parent, struct phys_entry, rb_node);
+               if (addr < entry->start)
+                       p = &(*p)->rb_left;
+               else if (addr >= entry->end)
+                       p = &(*p)->rb_right;
+               else
+                       goto out;
+       }
+
+       entry = NULL;
+out:
+       return entry ? (int) entry->node : -1;
+}
diff --git a/tools/perf/util/mem2node.h b/tools/perf/util/mem2node.h
new file mode 100644 (file)
index 0000000..59c4752
--- /dev/null
@@ -0,0 +1,19 @@
+#ifndef __MEM2NODE_H
+#define __MEM2NODE_H
+
+#include <linux/rbtree.h>
+#include "env.h"
+
+struct phys_entry;
+
+struct mem2node {
+       struct rb_root           root;
+       struct phys_entry       *entries;
+       int                      cnt;
+};
+
+int  mem2node__init(struct mem2node *map, struct perf_env *env);
+void mem2node__exit(struct mem2node *map);
+int  mem2node__node(struct mem2node *map, u64 addr);
+
+#endif /* __MEM2NODE_H */
index 91531a7..38ca3ff 100644 (file)
@@ -63,25 +63,6 @@ static union perf_event *perf_mmap__read(struct perf_mmap *map,
        return event;
 }
 
-/*
- * legacy interface for mmap read.
- * Don't use it. Use perf_mmap__read_event().
- */
-union perf_event *perf_mmap__read_forward(struct perf_mmap *map)
-{
-       u64 head;
-
-       /*
-        * Check if event was unmapped due to a POLLHUP/POLLERR.
-        */
-       if (!refcount_read(&map->refcnt))
-               return NULL;
-
-       head = perf_mmap__read_head(map);
-
-       return perf_mmap__read(map, &map->prev, head);
-}
-
 /*
  * Read event from ring buffer one by one.
  * Return one event for each call.
@@ -94,9 +75,7 @@ union perf_event *perf_mmap__read_forward(struct perf_mmap *map)
  * }
  * perf_mmap__read_done()
  */
-union perf_event *perf_mmap__read_event(struct perf_mmap *map,
-                                       bool overwrite,
-                                       u64 *startp, u64 end)
+union perf_event *perf_mmap__read_event(struct perf_mmap *map)
 {
        union perf_event *event;
 
@@ -106,17 +85,14 @@ union perf_event *perf_mmap__read_event(struct perf_mmap *map,
        if (!refcount_read(&map->refcnt))
                return NULL;
 
-       if (startp == NULL)
-               return NULL;
-
        /* non-overwirte doesn't pause the ringbuffer */
-       if (!overwrite)
-               end = perf_mmap__read_head(map);
+       if (!map->overwrite)
+               map->end = perf_mmap__read_head(map);
 
-       event = perf_mmap__read(map, startp, end);
+       event = perf_mmap__read(map, &map->start, map->end);
 
-       if (!overwrite)
-               map->prev = *startp;
+       if (!map->overwrite)
+               map->prev = map->start;
 
        return event;
 }
@@ -139,9 +115,9 @@ void perf_mmap__put(struct perf_mmap *map)
                perf_mmap__munmap(map);
 }
 
-void perf_mmap__consume(struct perf_mmap *map, bool overwrite)
+void perf_mmap__consume(struct perf_mmap *map)
 {
-       if (!overwrite) {
+       if (!map->overwrite) {
                u64 old = map->prev;
 
                perf_mmap__write_tail(map, old);
@@ -191,7 +167,7 @@ void perf_mmap__munmap(struct perf_mmap *map)
 int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd)
 {
        /*
-        * The last one will be done at perf_evlist__mmap_consume(), so that we
+        * The last one will be done at perf_mmap__consume(), so that we
         * make sure we don't prevent tools from consuming every last event in
         * the ring buffer.
         *
@@ -223,19 +199,18 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd)
        return 0;
 }
 
-static int overwrite_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
+static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end)
 {
        struct perf_event_header *pheader;
-       u64 evt_head = head;
+       u64 evt_head = *start;
        int size = mask + 1;
 
-       pr_debug2("overwrite_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
-       pheader = (struct perf_event_header *)(buf + (head & mask));
-       *start = head;
+       pr_debug2("%s: buf=%p, start=%"PRIx64"\n", __func__, buf, *start);
+       pheader = (struct perf_event_header *)(buf + (*start & mask));
        while (true) {
-               if (evt_head - head >= (unsigned int)size) {
+               if (evt_head - *start >= (unsigned int)size) {
                        pr_debug("Finished reading overwrite ring buffer: rewind\n");
-                       if (evt_head - head > (unsigned int)size)
+                       if (evt_head - *start > (unsigned int)size)
                                evt_head -= pheader->size;
                        *end = evt_head;
                        return 0;
@@ -259,27 +234,26 @@ static int overwrite_rb_find_range(void *buf, int mask, u64 head, u64 *start, u6
 /*
  * Report the start and end of the available data in ringbuffer
  */
-int perf_mmap__read_init(struct perf_mmap *md, bool overwrite,
-                        u64 *startp, u64 *endp)
+int perf_mmap__read_init(struct perf_mmap *md)
 {
        u64 head = perf_mmap__read_head(md);
        u64 old = md->prev;
        unsigned char *data = md->base + page_size;
        unsigned long size;
 
-       *startp = overwrite ? head : old;
-       *endp = overwrite ? old : head;
+       md->start = md->overwrite ? head : old;
+       md->end = md->overwrite ? old : head;
 
-       if (*startp == *endp)
+       if (md->start == md->end)
                return -EAGAIN;
 
-       size = *endp - *startp;
+       size = md->end - md->start;
        if (size > (unsigned long)(md->mask) + 1) {
-               if (!overwrite) {
+               if (!md->overwrite) {
                        WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
 
                        md->prev = head;
-                       perf_mmap__consume(md, overwrite);
+                       perf_mmap__consume(md);
                        return -EAGAIN;
                }
 
@@ -287,33 +261,32 @@ int perf_mmap__read_init(struct perf_mmap *md, bool overwrite,
                 * Backward ring buffer is full. We still have a chance to read
                 * most of data from it.
                 */
-               if (overwrite_rb_find_range(data, md->mask, head, startp, endp))
+               if (overwrite_rb_find_range(data, md->mask, &md->start, &md->end))
                        return -EINVAL;
        }
 
        return 0;
 }
 
-int perf_mmap__push(struct perf_mmap *md, bool overwrite,
-                   void *to, int push(void *to, void *buf, size_t size))
+int perf_mmap__push(struct perf_mmap *md, void *to,
+                   int push(void *to, void *buf, size_t size))
 {
        u64 head = perf_mmap__read_head(md);
-       u64 end, start;
        unsigned char *data = md->base + page_size;
        unsigned long size;
        void *buf;
        int rc = 0;
 
-       rc = perf_mmap__read_init(md, overwrite, &start, &end);
+       rc = perf_mmap__read_init(md);
        if (rc < 0)
                return (rc == -EAGAIN) ? 0 : -1;
 
-       size = end - start;
+       size = md->end - md->start;
 
-       if ((start & md->mask) + size != (end & md->mask)) {
-               buf = &data[start & md->mask];
-               size = md->mask + 1 - (start & md->mask);
-               start += size;
+       if ((md->start & md->mask) + size != (md->end & md->mask)) {
+               buf = &data[md->start & md->mask];
+               size = md->mask + 1 - (md->start & md->mask);
+               md->start += size;
 
                if (push(to, buf, size) < 0) {
                        rc = -1;
@@ -321,9 +294,9 @@ int perf_mmap__push(struct perf_mmap *md, bool overwrite,
                }
        }
 
-       buf = &data[start & md->mask];
-       size = end - start;
-       start += size;
+       buf = &data[md->start & md->mask];
+       size = md->end - md->start;
+       md->start += size;
 
        if (push(to, buf, size) < 0) {
                rc = -1;
@@ -331,7 +304,7 @@ int perf_mmap__push(struct perf_mmap *md, bool overwrite,
        }
 
        md->prev = head;
-       perf_mmap__consume(md, overwrite);
+       perf_mmap__consume(md);
 out:
        return rc;
 }
index ec7d3a2..d82294d 100644 (file)
@@ -20,6 +20,9 @@ struct perf_mmap {
        int              fd;
        refcount_t       refcnt;
        u64              prev;
+       u64              start;
+       u64              end;
+       bool             overwrite;
        struct auxtrace_mmap auxtrace_mmap;
        char             event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
 };
@@ -63,7 +66,7 @@ void perf_mmap__munmap(struct perf_mmap *map);
 void perf_mmap__get(struct perf_mmap *map);
 void perf_mmap__put(struct perf_mmap *map);
 
-void perf_mmap__consume(struct perf_mmap *map, bool overwrite);
+void perf_mmap__consume(struct perf_mmap *map);
 
 static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
 {
@@ -86,16 +89,13 @@ static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
 
 union perf_event *perf_mmap__read_forward(struct perf_mmap *map);
 
-union perf_event *perf_mmap__read_event(struct perf_mmap *map,
-                                       bool overwrite,
-                                       u64 *startp, u64 end);
+union perf_event *perf_mmap__read_event(struct perf_mmap *map);
 
-int perf_mmap__push(struct perf_mmap *md, bool backward,
-                   void *to, int push(void *to, void *buf, size_t size));
+int perf_mmap__push(struct perf_mmap *md, void *to,
+                   int push(void *to, void *buf, size_t size));
 
 size_t perf_mmap__mmap_len(struct perf_mmap *map);
 
-int perf_mmap__read_init(struct perf_mmap *md, bool overwrite,
-                        u64 *startp, u64 *endp);
+int perf_mmap__read_init(struct perf_mmap *md);
 void perf_mmap__read_done(struct perf_mmap *map);
 #endif /*__PERF_MMAP_H */
index 34589c4..2fb0272 100644 (file)
@@ -206,8 +206,8 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
 
                for_each_event(sys_dirent, evt_dir, evt_dirent) {
 
-                       snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path,
-                                evt_dirent->d_name);
+                       scnprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path,
+                                 evt_dirent->d_name);
                        fd = open(evt_path, O_RDONLY);
                        if (fd < 0)
                                continue;
@@ -1217,7 +1217,7 @@ int parse_events_add_numeric(struct parse_events_state *parse_state,
                         get_config_name(head_config), &config_terms);
 }
 
-static int __parse_events_add_pmu(struct parse_events_state *parse_state,
+int parse_events_add_pmu(struct parse_events_state *parse_state,
                         struct list_head *list, char *name,
                         struct list_head *head_config, bool auto_merge_stats)
 {
@@ -1247,7 +1247,12 @@ static int __parse_events_add_pmu(struct parse_events_state *parse_state,
        if (!head_config) {
                attr.type = pmu->type;
                evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu, NULL, auto_merge_stats);
-               return evsel ? 0 : -ENOMEM;
+               if (evsel) {
+                       evsel->pmu_name = name;
+                       return 0;
+               } else {
+                       return -ENOMEM;
+               }
        }
 
        if (perf_pmu__check_alias(pmu, head_config, &info))
@@ -1276,18 +1281,12 @@ static int __parse_events_add_pmu(struct parse_events_state *parse_state,
                evsel->snapshot = info.snapshot;
                evsel->metric_expr = info.metric_expr;
                evsel->metric_name = info.metric_name;
+               evsel->pmu_name = name;
        }
 
        return evsel ? 0 : -ENOMEM;
 }
 
-int parse_events_add_pmu(struct parse_events_state *parse_state,
-                        struct list_head *list, char *name,
-                        struct list_head *head_config)
-{
-       return __parse_events_add_pmu(parse_state, list, name, head_config, false);
-}
-
 int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
                               char *str, struct list_head **listp)
 {
@@ -1317,8 +1316,8 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
                                        return -1;
                                list_add_tail(&term->list, head);
 
-                               if (!__parse_events_add_pmu(parse_state, list,
-                                                           pmu->name, head, true)) {
+                               if (!parse_events_add_pmu(parse_state, list,
+                                                         pmu->name, head, true)) {
                                        pr_debug("%s -> %s/%s/\n", str,
                                                 pmu->name, alias->str);
                                        ok++;
index 88108cd..5015cfd 100644 (file)
@@ -167,7 +167,7 @@ int parse_events_add_breakpoint(struct list_head *list, int *idx,
                                void *ptr, char *type, u64 len);
 int parse_events_add_pmu(struct parse_events_state *parse_state,
                         struct list_head *list, char *name,
-                        struct list_head *head_config);
+                        struct list_head *head_config, bool auto_merge_stats);
 
 int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
                               char *str,
index 655ecff..a1a01b1 100644 (file)
@@ -175,7 +175,7 @@ bpf_source  [^,{}]+\.c[a-zA-Z0-9._]*
 num_dec                [0-9]+
 num_hex                0x[a-fA-F0-9]+
 num_raw_hex    [a-fA-F0-9]+
-name           [a-zA-Z_*?][a-zA-Z0-9_*?.]*
+name           [a-zA-Z_*?\[\]][a-zA-Z0-9_*?.\[\]]*
 name_minus     [a-zA-Z_*?][a-zA-Z0-9\-_*?.:]*
 drv_cfg_term   [a-zA-Z0-9_\.]+(=[a-zA-Z0-9_*?\.:]+)?
 /* If you add a modifier you need to update check_modifier() */
index e81a20e..7afeb80 100644 (file)
@@ -8,6 +8,7 @@
 
 #define YYDEBUG 1
 
+#include <fnmatch.h>
 #include <linux/compiler.h>
 #include <linux/list.h>
 #include <linux/types.h>
@@ -231,9 +232,13 @@ PE_NAME opt_event_config
                YYABORT;
 
        ALLOC_LIST(list);
-       if (parse_events_add_pmu(_parse_state, list, $1, $2)) {
+       if (parse_events_add_pmu(_parse_state, list, $1, $2, false)) {
                struct perf_pmu *pmu = NULL;
                int ok = 0;
+               char *pattern;
+
+               if (asprintf(&pattern, "%s*", $1) < 0)
+                       YYABORT;
 
                while ((pmu = perf_pmu__scan(pmu)) != NULL) {
                        char *name = pmu->name;
@@ -241,14 +246,19 @@ PE_NAME opt_event_config
                        if (!strncmp(name, "uncore_", 7) &&
                            strncmp($1, "uncore_", 7))
                                name += 7;
-                       if (!strncmp($1, name, strlen($1))) {
-                               if (parse_events_copy_term_list(orig_terms, &terms))
+                       if (!fnmatch(pattern, name, 0)) {
+                               if (parse_events_copy_term_list(orig_terms, &terms)) {
+                                       free(pattern);
                                        YYABORT;
-                               if (!parse_events_add_pmu(_parse_state, list, pmu->name, terms))
+                               }
+                               if (!parse_events_add_pmu(_parse_state, list, pmu->name, terms, true))
                                        ok++;
                                parse_events_terms__delete(terms);
                        }
                }
+
+               free(pattern);
+
                if (!ok)
                        YYABORT;
        }
index 57e38fd..064bdcb 100644 (file)
@@ -351,7 +351,7 @@ static int pmu_aliases_parse(char *dir, struct list_head *head)
                if (pmu_alias_info_file(name))
                        continue;
 
-               snprintf(path, PATH_MAX, "%s/%s", dir, name);
+               scnprintf(path, PATH_MAX, "%s/%s", dir, name);
 
                file = fopen(path, "r");
                if (!file) {
@@ -576,6 +576,34 @@ char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
        return NULL;
 }
 
+/* Return zero when the cpuid from the mapfile.csv matches the
+ * cpuid string generated on this platform.
+ * Otherwise return non-zero.
+ */
+int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
+{
+       regex_t re;
+       regmatch_t pmatch[1];
+       int match;
+
+       if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) {
+               /* Warn unable to generate match particular string. */
+               pr_info("Invalid regular expression %s\n", mapcpuid);
+               return 1;
+       }
+
+       match = !regexec(&re, cpuid, 1, pmatch, 0);
+       regfree(&re);
+       if (match) {
+               size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so);
+
+               /* Verify the entire string matched. */
+               if (match_len == strlen(cpuid))
+                       return 0;
+       }
+       return 1;
+}
+
 static char *perf_pmu__getcpuid(struct perf_pmu *pmu)
 {
        char *cpuid;
@@ -610,31 +638,14 @@ struct pmu_events_map *perf_pmu__find_map(struct perf_pmu *pmu)
 
        i = 0;
        for (;;) {
-               regex_t re;
-               regmatch_t pmatch[1];
-               int match;
-
                map = &pmu_events_map[i++];
                if (!map->table) {
                        map = NULL;
                        break;
                }
 
-               if (regcomp(&re, map->cpuid, REG_EXTENDED) != 0) {
-                       /* Warn unable to generate match particular string. */
-                       pr_info("Invalid regular expression %s\n", map->cpuid);
+               if (!strcmp_cpuid_str(map->cpuid, cpuid))
                        break;
-               }
-
-               match = !regexec(&re, cpuid, 1, pmatch, 0);
-               regfree(&re);
-               if (match) {
-                       size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so);
-
-                       /* Verify the entire string matched. */
-                       if (match_len == strlen(cpuid))
-                               break;
-               }
        }
        free(cpuid);
        return map;
index a5731de..c37fbef 100644 (file)
@@ -423,20 +423,20 @@ static int convert_variable_fields(Dwarf_Die *vr_die, const char *varname,
                pr_warning("Failed to get the type of %s.\n", varname);
                return -ENOENT;
        }
-       pr_debug2("Var real type: (%x)\n", (unsigned)dwarf_dieoffset(&type));
+       pr_debug2("Var real type: %s (%x)\n", dwarf_diename(&type),
+                 (unsigned)dwarf_dieoffset(&type));
        tag = dwarf_tag(&type);
 
        if (field->name[0] == '[' &&
            (tag == DW_TAG_array_type || tag == DW_TAG_pointer_type)) {
-               if (field->next)
-                       /* Save original type for next field */
-                       memcpy(die_mem, &type, sizeof(*die_mem));
+               /* Save original type for next field or type */
+               memcpy(die_mem, &type, sizeof(*die_mem));
                /* Get the type of this array */
                if (die_get_real_type(&type, &type) == NULL) {
                        pr_warning("Failed to get the type of %s.\n", varname);
                        return -ENOENT;
                }
-               pr_debug2("Array real type: (%x)\n",
+               pr_debug2("Array real type: %s (%x)\n", dwarf_diename(&type),
                         (unsigned)dwarf_dieoffset(&type));
                if (tag == DW_TAG_pointer_type) {
                        ref = zalloc(sizeof(struct probe_trace_arg_ref));
@@ -448,9 +448,6 @@ static int convert_variable_fields(Dwarf_Die *vr_die, const char *varname,
                                *ref_ptr = ref;
                }
                ref->offset += dwarf_bytesize(&type) * field->index;
-               if (!field->next)
-                       /* Save vr_die for converting types */
-                       memcpy(die_mem, vr_die, sizeof(*die_mem));
                goto next;
        } else if (tag == DW_TAG_pointer_type) {
                /* Check the pointer and dereference */
index b1e999b..b956868 100644 (file)
 #include "print_binary.h"
 #include "thread_map.h"
 
+#if PY_MAJOR_VERSION < 3
+#define _PyUnicode_FromString(arg) \
+  PyString_FromString(arg)
+#define _PyUnicode_AsString(arg) \
+  PyString_AsString(arg)
+#define _PyUnicode_FromFormat(...) \
+  PyString_FromFormat(__VA_ARGS__)
+#define _PyLong_FromLong(arg) \
+  PyInt_FromLong(arg)
+
+#else
+
+#define _PyUnicode_FromString(arg) \
+  PyUnicode_FromString(arg)
+#define _PyUnicode_FromFormat(...) \
+  PyUnicode_FromFormat(__VA_ARGS__)
+#define _PyLong_FromLong(arg) \
+  PyLong_FromLong(arg)
+#endif
+
+#ifndef Py_TYPE
+#define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
+#endif
+
 /*
  * Provide these two so that we don't have to link against callchain.c and
  * start dragging hist.c, etc.
@@ -49,7 +73,11 @@ int eprintf(int level, int var, const char *fmt, ...)
 # define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size,
 #endif
 
+#if PY_MAJOR_VERSION < 3
 PyMODINIT_FUNC initperf(void);
+#else
+PyMODINIT_FUNC PyInit_perf(void);
+#endif
 
 #define member_def(type, member, ptype, help) \
        { #member, ptype, \
@@ -107,7 +135,7 @@ static PyObject *pyrf_mmap_event__repr(struct pyrf_event *pevent)
                     pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) {
                ret = PyErr_NoMemory();
        } else {
-               ret = PyString_FromString(s);
+               ret = _PyUnicode_FromString(s);
                free(s);
        }
        return ret;
@@ -138,7 +166,7 @@ static PyMemberDef pyrf_task_event__members[] = {
 
 static PyObject *pyrf_task_event__repr(struct pyrf_event *pevent)
 {
-       return PyString_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, "
+       return _PyUnicode_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, "
                                   "ptid: %u, time: %" PRIu64 "}",
                                   pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit",
                                   pevent->event.fork.pid,
@@ -171,7 +199,7 @@ static PyMemberDef pyrf_comm_event__members[] = {
 
 static PyObject *pyrf_comm_event__repr(struct pyrf_event *pevent)
 {
-       return PyString_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }",
+       return _PyUnicode_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }",
                                   pevent->event.comm.pid,
                                   pevent->event.comm.tid,
                                   pevent->event.comm.comm);
@@ -202,7 +230,7 @@ static PyObject *pyrf_throttle_event__repr(struct pyrf_event *pevent)
 {
        struct throttle_event *te = (struct throttle_event *)(&pevent->event.header + 1);
 
-       return PyString_FromFormat("{ type: %sthrottle, time: %" PRIu64 ", id: %" PRIu64
+       return _PyUnicode_FromFormat("{ type: %sthrottle, time: %" PRIu64 ", id: %" PRIu64
                                   ", stream_id: %" PRIu64 " }",
                                   pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un",
                                   te->time, te->id, te->stream_id);
@@ -237,7 +265,7 @@ static PyObject *pyrf_lost_event__repr(struct pyrf_event *pevent)
                     pevent->event.lost.id, pevent->event.lost.lost) < 0) {
                ret = PyErr_NoMemory();
        } else {
-               ret = PyString_FromString(s);
+               ret = _PyUnicode_FromString(s);
                free(s);
        }
        return ret;
@@ -264,7 +292,7 @@ static PyMemberDef pyrf_read_event__members[] = {
 
 static PyObject *pyrf_read_event__repr(struct pyrf_event *pevent)
 {
-       return PyString_FromFormat("{ type: read, pid: %u, tid: %u }",
+       return _PyUnicode_FromFormat("{ type: read, pid: %u, tid: %u }",
                                   pevent->event.read.pid,
                                   pevent->event.read.tid);
        /*
@@ -299,7 +327,7 @@ static PyObject *pyrf_sample_event__repr(struct pyrf_event *pevent)
        if (asprintf(&s, "{ type: sample }") < 0) {
                ret = PyErr_NoMemory();
        } else {
-               ret = PyString_FromString(s);
+               ret = _PyUnicode_FromString(s);
                free(s);
        }
        return ret;
@@ -330,7 +358,7 @@ tracepoint_field(struct pyrf_event *pe, struct format_field *field)
                }
                if (field->flags & FIELD_IS_STRING &&
                    is_printable_array(data + offset, len)) {
-                       ret = PyString_FromString((char *)data + offset);
+                       ret = _PyUnicode_FromString((char *)data + offset);
                } else {
                        ret = PyByteArray_FromStringAndSize((const char *) data + offset, len);
                        field->flags &= ~FIELD_IS_STRING;
@@ -352,7 +380,7 @@ tracepoint_field(struct pyrf_event *pe, struct format_field *field)
 static PyObject*
 get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name)
 {
-       const char *str = PyString_AsString(PyObject_Str(attr_name));
+       const char *str = _PyUnicode_AsString(PyObject_Str(attr_name));
        struct perf_evsel *evsel = pevent->evsel;
        struct format_field *field;
 
@@ -416,7 +444,7 @@ static PyObject *pyrf_context_switch_event__repr(struct pyrf_event *pevent)
                     !!(pevent->event.header.misc & PERF_RECORD_MISC_SWITCH_OUT)) < 0) {
                ret = PyErr_NoMemory();
        } else {
-               ret = PyString_FromString(s);
+               ret = _PyUnicode_FromString(s);
                free(s);
        }
        return ret;
@@ -528,7 +556,7 @@ static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus,
 static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus)
 {
        cpu_map__put(pcpus->cpus);
-       pcpus->ob_type->tp_free((PyObject*)pcpus);
+       Py_TYPE(pcpus)->tp_free((PyObject*)pcpus);
 }
 
 static Py_ssize_t pyrf_cpu_map__length(PyObject *obj)
@@ -597,7 +625,7 @@ static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads,
 static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads)
 {
        thread_map__put(pthreads->threads);
-       pthreads->ob_type->tp_free((PyObject*)pthreads);
+       Py_TYPE(pthreads)->tp_free((PyObject*)pthreads);
 }
 
 static Py_ssize_t pyrf_thread_map__length(PyObject *obj)
@@ -759,7 +787,7 @@ static int pyrf_evsel__init(struct pyrf_evsel *pevsel,
 static void pyrf_evsel__delete(struct pyrf_evsel *pevsel)
 {
        perf_evsel__exit(&pevsel->evsel);
-       pevsel->ob_type->tp_free((PyObject*)pevsel);
+       Py_TYPE(pevsel)->tp_free((PyObject*)pevsel);
 }
 
 static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
@@ -850,7 +878,7 @@ static int pyrf_evlist__init(struct pyrf_evlist *pevlist,
 static void pyrf_evlist__delete(struct pyrf_evlist *pevlist)
 {
        perf_evlist__exit(&pevlist->evlist);
-       pevlist->ob_type->tp_free((PyObject*)pevlist);
+       Py_TYPE(pevlist)->tp_free((PyObject*)pevlist);
 }
 
 static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist,
@@ -902,12 +930,16 @@ static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
 
        for (i = 0; i < evlist->pollfd.nr; ++i) {
                PyObject *file;
+#if PY_MAJOR_VERSION < 3
                FILE *fp = fdopen(evlist->pollfd.entries[i].fd, "r");
 
                if (fp == NULL)
                        goto free_list;
 
                file = PyFile_FromFile(fp, "perf", "r", NULL);
+#else
+               file = PyFile_FromFd(evlist->pollfd.entries[i].fd, "perf", "r", -1, NULL, NULL, NULL, 1);
+#endif
                if (file == NULL)
                        goto free_list;
 
@@ -951,13 +983,18 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
        union perf_event *event;
        int sample_id_all = 1, cpu;
        static char *kwlist[] = { "cpu", "sample_id_all", NULL };
+       struct perf_mmap *md;
        int err;
 
        if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist,
                                         &cpu, &sample_id_all))
                return NULL;
 
-       event = perf_evlist__mmap_read(evlist, cpu);
+       md = &evlist->mmap[cpu];
+       if (perf_mmap__read_init(md) < 0)
+               goto end;
+
+       event = perf_mmap__read_event(md);
        if (event != NULL) {
                PyObject *pyevent = pyrf_event__new(event);
                struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
@@ -975,14 +1012,14 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
                err = perf_evsel__parse_sample(evsel, event, &pevent->sample);
 
                /* Consume the even only after we parsed it out. */
-               perf_evlist__mmap_consume(evlist, cpu);
+               perf_mmap__consume(md);
 
                if (err)
                        return PyErr_Format(PyExc_OSError,
                                            "perf: can't parse sample, err=%d", err);
                return pyevent;
        }
-
+end:
        Py_INCREF(Py_None);
        return Py_None;
 }
@@ -1194,9 +1231,9 @@ static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel,
 
        tp_format = trace_event__tp_format(sys, name);
        if (IS_ERR(tp_format))
-               return PyInt_FromLong(-1);
+               return _PyLong_FromLong(-1);
 
-       return PyInt_FromLong(tp_format->id);
+       return _PyLong_FromLong(tp_format->id);
 }
 
 static PyMethodDef perf__methods[] = {
@@ -1209,11 +1246,31 @@ static PyMethodDef perf__methods[] = {
        { .ml_name = NULL, }
 };
 
+#if PY_MAJOR_VERSION < 3
 PyMODINIT_FUNC initperf(void)
+#else
+PyMODINIT_FUNC PyInit_perf(void)
+#endif
 {
        PyObject *obj;
        int i;
-       PyObject *dict, *module = Py_InitModule("perf", perf__methods);
+       PyObject *dict;
+#if PY_MAJOR_VERSION < 3
+       PyObject *module = Py_InitModule("perf", perf__methods);
+#else
+       static struct PyModuleDef moduledef = {
+               PyModuleDef_HEAD_INIT,
+               "perf",                 /* m_name */
+               "",                     /* m_doc */
+               -1,                     /* m_size */
+               perf__methods,          /* m_methods */
+               NULL,                   /* m_reload */
+               NULL,                   /* m_traverse */
+               NULL,                   /* m_clear */
+               NULL,                   /* m_free */
+       };
+       PyObject *module = PyModule_Create(&moduledef);
+#endif
 
        if (module == NULL ||
            pyrf_event__setup_types() < 0 ||
@@ -1221,7 +1278,11 @@ PyMODINIT_FUNC initperf(void)
            pyrf_evsel__setup_types() < 0 ||
            pyrf_thread_map__setup_types() < 0 ||
            pyrf_cpu_map__setup_types() < 0)
+#if PY_MAJOR_VERSION < 3
                return;
+#else
+               return module;
+#endif
 
        /* The page_size is placed in util object. */
        page_size = sysconf(_SC_PAGE_SIZE);
@@ -1270,7 +1331,7 @@ PyMODINIT_FUNC initperf(void)
                goto error;
 
        for (i = 0; perf__constants[i].name != NULL; i++) {
-               obj = PyInt_FromLong(perf__constants[i].value);
+               obj = _PyLong_FromLong(perf__constants[i].value);
                if (obj == NULL)
                        goto error;
                PyDict_SetItemString(dict, perf__constants[i].name, obj);
@@ -1280,6 +1341,9 @@ PyMODINIT_FUNC initperf(void)
 error:
        if (PyErr_Occurred())
                PyErr_SetString(PyExc_ImportError, "perf: Init failed!");
+#if PY_MAJOR_VERSION >= 3
+       return module;
+#endif
 }
 
 /*
index 6f09e49..9cfc7bf 100644 (file)
@@ -5,6 +5,7 @@
 #include "parse-events.h"
 #include <errno.h>
 #include <api/fs/fs.h>
+#include <subcmd/parse-options.h>
 #include "util.h"
 #include "cloexec.h"
 
@@ -219,11 +220,21 @@ static int record_opts__config_freq(struct record_opts *opts)
         * User specified frequency is over current maximum.
         */
        if (user_freq && (max_rate < opts->freq)) {
-               pr_err("Maximum frequency rate (%u) reached.\n"
-                  "Please use -F freq option with lower value or consider\n"
-                  "tweaking /proc/sys/kernel/perf_event_max_sample_rate.\n",
-                  max_rate);
-               return -1;
+               if (opts->strict_freq) {
+                       pr_err("error: Maximum frequency rate (%'u Hz) exceeded.\n"
+                              "       Please use -F freq option with a lower value or consider\n"
+                              "       tweaking /proc/sys/kernel/perf_event_max_sample_rate.\n",
+                              max_rate);
+                       return -1;
+               } else {
+                       pr_warning("warning: Maximum frequency rate (%'u Hz) exceeded, throttling from %'u Hz to %'u Hz.\n"
+                                  "         The limit can be raised via /proc/sys/kernel/perf_event_max_sample_rate.\n"
+                                  "         The kernel will lower it when perf's interrupts take too long.\n"
+                                  "         Use --strict-freq to disable this throttling, refusing to record.\n",
+                                  max_rate, opts->freq, max_rate);
+
+                       opts->freq = max_rate;
+               }
        }
 
        /*
@@ -291,3 +302,25 @@ out_delete:
        perf_evlist__delete(temp_evlist);
        return ret;
 }
+
+int record__parse_freq(const struct option *opt, const char *str, int unset __maybe_unused)
+{
+       unsigned int freq;
+       struct record_opts *opts = opt->value;
+
+       if (!str)
+               return -EINVAL;
+
+       if (strcasecmp(str, "max") == 0) {
+               if (get_max_rate(&freq)) {
+                       pr_err("couldn't read /proc/sys/kernel/perf_event_max_sample_rate\n");
+                       return -1;
+               }
+               pr_info("info: Using a maximum frequency rate of %'d Hz\n", freq);
+       } else {
+               freq = atoi(str);
+       }
+
+       opts->user_freq = freq;
+       return 0;
+}
index ea07088..10dd5fc 100644 (file)
 #include "print_binary.h"
 #include "stat.h"
 
+#if PY_MAJOR_VERSION < 3
+#define _PyUnicode_FromString(arg) \
+  PyString_FromString(arg)
+#define _PyUnicode_FromStringAndSize(arg1, arg2) \
+  PyString_FromStringAndSize((arg1), (arg2))
+#define _PyBytes_FromStringAndSize(arg1, arg2) \
+  PyString_FromStringAndSize((arg1), (arg2))
+#define _PyLong_FromLong(arg) \
+  PyInt_FromLong(arg)
+#define _PyLong_AsLong(arg) \
+  PyInt_AsLong(arg)
+#define _PyCapsule_New(arg1, arg2, arg3) \
+  PyCObject_FromVoidPtr((arg1), (arg2))
+
 PyMODINIT_FUNC initperf_trace_context(void);
+#else
+#define _PyUnicode_FromString(arg) \
+  PyUnicode_FromString(arg)
+#define _PyUnicode_FromStringAndSize(arg1, arg2) \
+  PyUnicode_FromStringAndSize((arg1), (arg2))
+#define _PyBytes_FromStringAndSize(arg1, arg2) \
+  PyBytes_FromStringAndSize((arg1), (arg2))
+#define _PyLong_FromLong(arg) \
+  PyLong_FromLong(arg)
+#define _PyLong_AsLong(arg) \
+  PyLong_AsLong(arg)
+#define _PyCapsule_New(arg1, arg2, arg3) \
+  PyCapsule_New((arg1), (arg2), (arg3))
+
+PyMODINIT_FUNC PyInit_perf_trace_context(void);
+#endif
 
 #define TRACE_EVENT_TYPE_MAX                           \
        ((1 << (sizeof(unsigned short) * 8)) - 1)
@@ -135,7 +165,7 @@ static int get_argument_count(PyObject *handler)
                PyObject *arg_count_obj = PyObject_GetAttrString(code_obj,
                        "co_argcount");
                if (arg_count_obj) {
-                       arg_count = (int) PyInt_AsLong(arg_count_obj);
+                       arg_count = (int) _PyLong_AsLong(arg_count_obj);
                        Py_DECREF(arg_count_obj);
                }
                Py_DECREF(code_obj);
@@ -182,10 +212,10 @@ static void define_value(enum print_arg_type field_type,
 
        value = eval_flag(field_value);
 
-       PyTuple_SetItem(t, n++, PyString_FromString(ev_name));
-       PyTuple_SetItem(t, n++, PyString_FromString(field_name));
-       PyTuple_SetItem(t, n++, PyInt_FromLong(value));
-       PyTuple_SetItem(t, n++, PyString_FromString(field_str));
+       PyTuple_SetItem(t, n++, _PyUnicode_FromString(ev_name));
+       PyTuple_SetItem(t, n++, _PyUnicode_FromString(field_name));
+       PyTuple_SetItem(t, n++, _PyLong_FromLong(value));
+       PyTuple_SetItem(t, n++, _PyUnicode_FromString(field_str));
 
        try_call_object(handler_name, t);
 
@@ -223,10 +253,10 @@ static void define_field(enum print_arg_type field_type,
        if (!t)
                Py_FatalError("couldn't create Python tuple");
 
-       PyTuple_SetItem(t, n++, PyString_FromString(ev_name));
-       PyTuple_SetItem(t, n++, PyString_FromString(field_name));
+       PyTuple_SetItem(t, n++, _PyUnicode_FromString(ev_name));
+       PyTuple_SetItem(t, n++, _PyUnicode_FromString(field_name));
        if (field_type == PRINT_FLAGS)
-               PyTuple_SetItem(t, n++, PyString_FromString(delim));
+               PyTuple_SetItem(t, n++, _PyUnicode_FromString(delim));
 
        try_call_object(handler_name, t);
 
@@ -325,12 +355,12 @@ static PyObject *get_field_numeric_entry(struct event_format *event,
                if (field->flags & FIELD_IS_SIGNED) {
                        if ((long long)val >= LONG_MIN &&
                                        (long long)val <= LONG_MAX)
-                               obj = PyInt_FromLong(val);
+                               obj = _PyLong_FromLong(val);
                        else
                                obj = PyLong_FromLongLong(val);
                } else {
                        if (val <= LONG_MAX)
-                               obj = PyInt_FromLong(val);
+                               obj = _PyLong_FromLong(val);
                        else
                                obj = PyLong_FromUnsignedLongLong(val);
                }
@@ -389,9 +419,9 @@ static PyObject *python_process_callchain(struct perf_sample *sample,
                        pydict_set_item_string_decref(pysym, "end",
                                        PyLong_FromUnsignedLongLong(node->sym->end));
                        pydict_set_item_string_decref(pysym, "binding",
-                                       PyInt_FromLong(node->sym->binding));
+                                       _PyLong_FromLong(node->sym->binding));
                        pydict_set_item_string_decref(pysym, "name",
-                                       PyString_FromStringAndSize(node->sym->name,
+                                       _PyUnicode_FromStringAndSize(node->sym->name,
                                                        node->sym->namelen));
                        pydict_set_item_string_decref(pyelem, "sym", pysym);
                }
@@ -406,7 +436,7 @@ static PyObject *python_process_callchain(struct perf_sample *sample,
                                        dsoname = map->dso->name;
                        }
                        pydict_set_item_string_decref(pyelem, "dso",
-                                       PyString_FromString(dsoname));
+                                       _PyUnicode_FromString(dsoname));
                }
 
                callchain_cursor_advance(&callchain_cursor);
@@ -483,16 +513,16 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
        if (!dict_sample)
                Py_FatalError("couldn't create Python dictionary");
 
-       pydict_set_item_string_decref(dict, "ev_name", PyString_FromString(perf_evsel__name(evsel)));
-       pydict_set_item_string_decref(dict, "attr", PyString_FromStringAndSize(
+       pydict_set_item_string_decref(dict, "ev_name", _PyUnicode_FromString(perf_evsel__name(evsel)));
+       pydict_set_item_string_decref(dict, "attr", _PyUnicode_FromStringAndSize(
                        (const char *)&evsel->attr, sizeof(evsel->attr)));
 
        pydict_set_item_string_decref(dict_sample, "pid",
-                       PyInt_FromLong(sample->pid));
+                       _PyLong_FromLong(sample->pid));
        pydict_set_item_string_decref(dict_sample, "tid",
-                       PyInt_FromLong(sample->tid));
+                       _PyLong_FromLong(sample->tid));
        pydict_set_item_string_decref(dict_sample, "cpu",
-                       PyInt_FromLong(sample->cpu));
+                       _PyLong_FromLong(sample->cpu));
        pydict_set_item_string_decref(dict_sample, "ip",
                        PyLong_FromUnsignedLongLong(sample->ip));
        pydict_set_item_string_decref(dict_sample, "time",
@@ -504,17 +534,17 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
        set_sample_read_in_dict(dict_sample, sample, evsel);
        pydict_set_item_string_decref(dict, "sample", dict_sample);
 
-       pydict_set_item_string_decref(dict, "raw_buf", PyString_FromStringAndSize(
+       pydict_set_item_string_decref(dict, "raw_buf", _PyBytes_FromStringAndSize(
                        (const char *)sample->raw_data, sample->raw_size));
        pydict_set_item_string_decref(dict, "comm",
-                       PyString_FromString(thread__comm_str(al->thread)));
+                       _PyUnicode_FromString(thread__comm_str(al->thread)));
        if (al->map) {
                pydict_set_item_string_decref(dict, "dso",
-                       PyString_FromString(al->map->dso->name));
+                       _PyUnicode_FromString(al->map->dso->name));
        }
        if (al->sym) {
                pydict_set_item_string_decref(dict, "symbol",
-                       PyString_FromString(al->sym->name));
+                       _PyUnicode_FromString(al->sym->name));
        }
 
        pydict_set_item_string_decref(dict, "callchain", callchain);
@@ -574,9 +604,9 @@ static void python_process_tracepoint(struct perf_sample *sample,
        scripting_context->event_data = data;
        scripting_context->pevent = evsel->tp_format->pevent;
 
-       context = PyCObject_FromVoidPtr(scripting_context, NULL);
+       context = _PyCapsule_New(scripting_context, NULL, NULL);
 
-       PyTuple_SetItem(t, n++, PyString_FromString(handler_name));
+       PyTuple_SetItem(t, n++, _PyUnicode_FromString(handler_name));
        PyTuple_SetItem(t, n++, context);
 
        /* ip unwinding */
@@ -585,18 +615,18 @@ static void python_process_tracepoint(struct perf_sample *sample,
        Py_INCREF(callchain);
 
        if (!dict) {
-               PyTuple_SetItem(t, n++, PyInt_FromLong(cpu));
-               PyTuple_SetItem(t, n++, PyInt_FromLong(s));
-               PyTuple_SetItem(t, n++, PyInt_FromLong(ns));
-               PyTuple_SetItem(t, n++, PyInt_FromLong(pid));
-               PyTuple_SetItem(t, n++, PyString_FromString(comm));
+               PyTuple_SetItem(t, n++, _PyLong_FromLong(cpu));
+               PyTuple_SetItem(t, n++, _PyLong_FromLong(s));
+               PyTuple_SetItem(t, n++, _PyLong_FromLong(ns));
+               PyTuple_SetItem(t, n++, _PyLong_FromLong(pid));
+               PyTuple_SetItem(t, n++, _PyUnicode_FromString(comm));
                PyTuple_SetItem(t, n++, callchain);
        } else {
-               pydict_set_item_string_decref(dict, "common_cpu", PyInt_FromLong(cpu));
-               pydict_set_item_string_decref(dict, "common_s", PyInt_FromLong(s));
-               pydict_set_item_string_decref(dict, "common_ns", PyInt_FromLong(ns));
-               pydict_set_item_string_decref(dict, "common_pid", PyInt_FromLong(pid));
-               pydict_set_item_string_decref(dict, "common_comm", PyString_FromString(comm));
+               pydict_set_item_string_decref(dict, "common_cpu", _PyLong_FromLong(cpu));
+               pydict_set_item_string_decref(dict, "common_s", _PyLong_FromLong(s));
+               pydict_set_item_string_decref(dict, "common_ns", _PyLong_FromLong(ns));
+               pydict_set_item_string_decref(dict, "common_pid", _PyLong_FromLong(pid));
+               pydict_set_item_string_decref(dict, "common_comm", _PyUnicode_FromString(comm));
                pydict_set_item_string_decref(dict, "common_callchain", callchain);
        }
        for (field = event->format.fields; field; field = field->next) {
@@ -615,7 +645,7 @@ static void python_process_tracepoint(struct perf_sample *sample,
                        }
                        if (field->flags & FIELD_IS_STRING &&
                            is_printable_array(data + offset, len)) {
-                               obj = PyString_FromString((char *) data + offset);
+                               obj = _PyUnicode_FromString((char *) data + offset);
                        } else {
                                obj = PyByteArray_FromStringAndSize((const char *) data + offset, len);
                                field->flags &= ~FIELD_IS_STRING;
@@ -668,7 +698,7 @@ static PyObject *tuple_new(unsigned int sz)
 static int tuple_set_u64(PyObject *t, unsigned int pos, u64 val)
 {
 #if BITS_PER_LONG == 64
-       return PyTuple_SetItem(t, pos, PyInt_FromLong(val));
+       return PyTuple_SetItem(t, pos, _PyLong_FromLong(val));
 #endif
 #if BITS_PER_LONG == 32
        return PyTuple_SetItem(t, pos, PyLong_FromLongLong(val));
@@ -677,12 +707,12 @@ static int tuple_set_u64(PyObject *t, unsigned int pos, u64 val)
 
 static int tuple_set_s32(PyObject *t, unsigned int pos, s32 val)
 {
-       return PyTuple_SetItem(t, pos, PyInt_FromLong(val));
+       return PyTuple_SetItem(t, pos, _PyLong_FromLong(val));
 }
 
 static int tuple_set_string(PyObject *t, unsigned int pos, const char *s)
 {
-       return PyTuple_SetItem(t, pos, PyString_FromString(s));
+       return PyTuple_SetItem(t, pos, _PyUnicode_FromString(s));
 }
 
 static int python_export_evsel(struct db_export *dbe, struct perf_evsel *evsel)
@@ -1029,8 +1059,8 @@ process_stat(struct perf_evsel *counter, int cpu, int thread, u64 tstamp,
                return;
        }
 
-       PyTuple_SetItem(t, n++, PyInt_FromLong(cpu));
-       PyTuple_SetItem(t, n++, PyInt_FromLong(thread));
+       PyTuple_SetItem(t, n++, _PyLong_FromLong(cpu));
+       PyTuple_SetItem(t, n++, _PyLong_FromLong(thread));
 
        tuple_set_u64(t, n++, tstamp);
        tuple_set_u64(t, n++, count->val);
@@ -1212,27 +1242,58 @@ static void set_table_handlers(struct tables *tables)
        SET_TABLE_HANDLER(call_return);
 }
 
+#if PY_MAJOR_VERSION < 3
+static void _free_command_line(const char **command_line, int num)
+{
+       free(command_line);
+}
+#else
+static void _free_command_line(wchar_t **command_line, int num)
+{
+       int i;
+       for (i = 0; i < num; i++)
+               PyMem_RawFree(command_line[i]);
+       free(command_line);
+}
+#endif
+
+
 /*
  * Start trace script
  */
 static int python_start_script(const char *script, int argc, const char **argv)
 {
        struct tables *tables = &tables_global;
+#if PY_MAJOR_VERSION < 3
        const char **command_line;
+#else
+       wchar_t **command_line;
+#endif
        char buf[PATH_MAX];
        int i, err = 0;
        FILE *fp;
 
+#if PY_MAJOR_VERSION < 3
        command_line = malloc((argc + 1) * sizeof(const char *));
        command_line[0] = script;
        for (i = 1; i < argc + 1; i++)
                command_line[i] = argv[i - 1];
+#else
+       command_line = malloc((argc + 1) * sizeof(wchar_t *));
+       command_line[0] = Py_DecodeLocale(script, NULL);
+       for (i = 1; i < argc + 1; i++)
+               command_line[i] = Py_DecodeLocale(argv[i - 1], NULL);
+#endif
 
        Py_Initialize();
 
+#if PY_MAJOR_VERSION < 3
        initperf_trace_context();
-
        PySys_SetArgv(argc + 1, (char **)command_line);
+#else
+       PyInit_perf_trace_context();
+       PySys_SetArgv(argc + 1, command_line);
+#endif
 
        fp = fopen(script, "r");
        if (!fp) {
@@ -1262,12 +1323,12 @@ static int python_start_script(const char *script, int argc, const char **argv)
                        goto error;
        }
 
-       free(command_line);
+       _free_command_line(command_line, argc + 1);
 
        return err;
 error:
        Py_Finalize();
-       free(command_line);
+       _free_command_line(command_line, argc + 1);
 
        return err;
 }
index af415fe..001be4f 100644 (file)
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/python
 
 from os import getenv
 
@@ -28,6 +28,8 @@ class install_lib(_install_lib):
 cflags = getenv('CFLAGS', '').split()
 # switch off several checks (need to be at the end of cflags list)
 cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
+if cc != "clang":
+    cflags += ['-Wno-cast-function-type' ]
 
 src_perf  = getenv('srctree') + '/tools/perf'
 build_lib = getenv('PYTHON_EXTBUILD_LIB')
@@ -35,11 +37,11 @@ build_tmp = getenv('PYTHON_EXTBUILD_TMP')
 libtraceevent = getenv('LIBTRACEEVENT')
 libapikfs = getenv('LIBAPI')
 
-ext_sources = [f.strip() for f in file('util/python-ext-sources')
+ext_sources = [f.strip() for f in open('util/python-ext-sources')
                                if len(f.strip()) > 0 and f[0] != '#']
 
 # use full paths with source files
-ext_sources = map(lambda x: '%s/%s' % (src_perf, x) , ext_sources)
+ext_sources = list(map(lambda x: '%s/%s' % (src_perf, x) , ext_sources))
 
 perf = Extension('perf',
                  sources = ext_sources,
index 2da4d04..e8514f6 100644 (file)
@@ -111,17 +111,20 @@ struct sort_entry sort_thread = {
 
 /* --sort comm */
 
+/*
+ * We can't use pointer comparison in functions below,
+ * because it gives different results based on pointer
+ * values, which could break some sorting assumptions.
+ */
 static int64_t
 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
 {
-       /* Compare the addr that should be unique among comm */
        return strcmp(comm__str(right->comm), comm__str(left->comm));
 }
 
 static int64_t
 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
 {
-       /* Compare the addr that should be unique among comm */
        return strcmp(comm__str(right->comm), comm__str(left->comm));
 }
 
index 3223565..a0061e0 100644 (file)
@@ -92,7 +92,7 @@ static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = {
 };
 #undef ID
 
-void perf_stat_evsel_id_init(struct perf_evsel *evsel)
+static void perf_stat_evsel_id_init(struct perf_evsel *evsel)
 {
        struct perf_stat_evsel *ps = evsel->stats;
        int i;
index dbc6f71..8f56ba4 100644 (file)
@@ -90,6 +90,8 @@ struct perf_stat_config {
        bool            scale;
        FILE            *output;
        unsigned int    interval;
+       unsigned int    timeout;
+       int             times;
        struct runtime_stat *stats;
        int             stats_num;
 };
@@ -126,8 +128,6 @@ bool __perf_evsel_stat__is(struct perf_evsel *evsel,
 #define perf_stat_evsel__is(evsel, id) \
        __perf_evsel_stat__is(evsel, PERF_STAT_EVSEL_ID__ ## id)
 
-void perf_stat_evsel_id_init(struct perf_evsel *evsel);
-
 extern struct runtime_stat rt_stat;
 extern struct stats walltime_nsecs_stats;
 
index cc065d4..62b2dd2 100644 (file)
@@ -1582,7 +1582,7 @@ int dso__load(struct dso *dso, struct map *map)
                bool next_slot = false;
                bool is_reg;
                bool nsexit;
-               int sirc;
+               int sirc = -1;
 
                enum dso_binary_type symtab_type = binary_type_symtab[i];
 
@@ -1600,16 +1600,14 @@ int dso__load(struct dso *dso, struct map *map)
                        nsinfo__mountns_exit(&nsc);
 
                is_reg = is_regular_file(name);
-               sirc = symsrc__init(ss, dso, name, symtab_type);
+               if (is_reg)
+                       sirc = symsrc__init(ss, dso, name, symtab_type);
 
                if (nsexit)
                        nsinfo__mountns_enter(dso->nsinfo, &nsc);
 
-               if (!is_reg || sirc < 0) {
-                       if (sirc >= 0)
-                               symsrc__destroy(ss);
+               if (!is_reg || sirc < 0)
                        continue;
-               }
 
                if (!syms_ss && symsrc__has_symtab(ss)) {
                        syms_ss = ss;
@@ -1960,8 +1958,7 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map)
                pr_debug("Using %s for symbols\n", kallsyms_filename);
        if (err > 0 && !dso__is_kcore(dso)) {
                dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
-               machine__mmap_name(machine, path, sizeof(path));
-               dso__set_long_name(dso, strdup(path), true);
+               dso__set_long_name(dso, machine->mmap_name, false);
                map__fixup_start(map);
                map__fixup_end(map);
        }
@@ -2224,3 +2221,25 @@ int symbol__config_symfs(const struct option *opt __maybe_unused,
        free(bf);
        return 0;
 }
+
+struct mem_info *mem_info__get(struct mem_info *mi)
+{
+       if (mi)
+               refcount_inc(&mi->refcnt);
+       return mi;
+}
+
+void mem_info__put(struct mem_info *mi)
+{
+       if (mi && refcount_dec_and_test(&mi->refcnt))
+               free(mi);
+}
+
+struct mem_info *mem_info__new(void)
+{
+       struct mem_info *mi = zalloc(sizeof(*mi));
+
+       if (mi)
+               refcount_set(&mi->refcnt, 1);
+       return mi;
+}
index 0563f33..70c1674 100644 (file)
@@ -200,9 +200,10 @@ struct branch_info {
 };
 
 struct mem_info {
-       struct addr_map_symbol iaddr;
-       struct addr_map_symbol daddr;
-       union perf_mem_data_src data_src;
+       struct addr_map_symbol  iaddr;
+       struct addr_map_symbol  daddr;
+       union perf_mem_data_src data_src;
+       refcount_t              refcnt;
 };
 
 struct addr_location {
@@ -389,4 +390,16 @@ int sdt_notes__get_count(struct list_head *start);
 #define SDT_NOTE_NAME "stapsdt"
 #define NR_ADDR 3
 
+struct mem_info *mem_info__new(void);
+struct mem_info *mem_info__get(struct mem_info *mi);
+void   mem_info__put(struct mem_info *mi);
+
+static inline void __mem_info__zput(struct mem_info **mi)
+{
+       mem_info__put(*mi);
+       *mi = NULL;
+}
+
+#define mem_info__zput(mi) __mem_info__zput(&mi)
+
 #endif /* __PERF_SYMBOL */
index 303bdb8..895122d 100644 (file)
@@ -30,6 +30,14 @@ static const char **syscalltbl_native = syscalltbl_x86_64;
 #include <asm/syscalls_64.c>
 const int syscalltbl_native_max_id = SYSCALLTBL_S390_64_MAX_ID;
 static const char **syscalltbl_native = syscalltbl_s390_64;
+#elif defined(__powerpc64__)
+#include <asm/syscalls_64.c>
+const int syscalltbl_native_max_id = SYSCALLTBL_POWERPC_64_MAX_ID;
+static const char **syscalltbl_native = syscalltbl_powerpc_64;
+#elif defined(__powerpc__)
+#include <asm/syscalls_32.c>
+const int syscalltbl_native_max_id = SYSCALLTBL_POWERPC_32_MAX_ID;
+static const char **syscalltbl_native = syscalltbl_powerpc_32;
 #endif
 
 struct syscall {
index 40cfa36..14d44c3 100644 (file)
@@ -26,7 +26,6 @@ struct thread {
        pid_t                   ppid;
        int                     cpu;
        refcount_t              refcnt;
-       char                    shortname[3];
        bool                    comm_set;
        int                     comm_len;
        bool                    dead; /* if set thread has exited */
index 3e1038f..5d467d8 100644 (file)
@@ -32,6 +32,7 @@ static void thread_map__reset(struct thread_map *map, int start, int nr)
        size_t size = (nr - start) * sizeof(map->map[0]);
 
        memset(&map->map[start], 0, size);
+       map->err_thread = -1;
 }
 
 static struct thread_map *thread_map__realloc(struct thread_map *map, int nr)
@@ -323,7 +324,7 @@ out_free_threads:
 }
 
 struct thread_map *thread_map__new_str(const char *pid, const char *tid,
-                                      uid_t uid, bool per_thread)
+                                      uid_t uid, bool all_threads)
 {
        if (pid)
                return thread_map__new_by_pid_str(pid);
@@ -331,7 +332,7 @@ struct thread_map *thread_map__new_str(const char *pid, const char *tid,
        if (!tid && uid != UINT_MAX)
                return thread_map__new_by_uid(uid);
 
-       if (per_thread)
+       if (all_threads)
                return thread_map__new_all_cpus();
 
        return thread_map__new_by_tid_str(tid);
index 0a806b9..2f689c9 100644 (file)
@@ -14,6 +14,7 @@ struct thread_map_data {
 struct thread_map {
        refcount_t refcnt;
        int nr;
+       int err_thread;
        struct thread_map_data map[];
 };
 
@@ -31,7 +32,7 @@ struct thread_map *thread_map__get(struct thread_map *map);
 void thread_map__put(struct thread_map *map);
 
 struct thread_map *thread_map__new_str(const char *pid,
-               const char *tid, uid_t uid, bool per_thread);
+               const char *tid, uid_t uid, bool all_threads);
 
 struct thread_map *thread_map__new_by_tid_str(const char *tid_str);
 
index 1e9c974..8e969f2 100644 (file)
@@ -50,7 +50,7 @@ static int __report_module(struct addr_location *al, u64 ip,
 
        if (!mod)
                mod = dwfl_report_elf(ui->dwfl, dso->short_name,
-                                     dso->long_name, -1, al->map->start,
+                                     (dso->symsrc_filename ? dso->symsrc_filename : dso->long_name), -1, al->map->start,
                                      false);
 
        return mod && dwfl_addrmodule(ui->dwfl, ip) == mod ? 0 : -1;