perf trace: Migrate BPF augmentation to use a skeleton
authorIan Rogers <irogers@google.com>
Thu, 10 Aug 2023 18:48:51 +0000 (11:48 -0700)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Tue, 15 Aug 2023 19:41:48 +0000 (16:41 -0300)
Previously a BPF event of augmented_raw_syscalls.c could be used to
enable augmentation of syscalls by perf trace. As BPF events are no
longer supported, switch to using a BPF skeleton which when attached
explicitly opens the sysenter and sysexit tracepoints.

The dump map is removed as debugging wasn't supported by the
augmentation and bpf_printk can be used when necessary.

Remove tools/perf/examples/bpf/augmented_raw_syscalls.c so that the
rename/migration to a BPF skeleton captures that this was the source.

Committer notes:

Some minor stylistic changes to help visualizing the diff.

Use libbpf_strerror when failing to load the augmented raw syscalls BPF.

Use  bpf_object__for_each_program(prog, trace.skel->obj) to disable auto
attachment for all but the sys_enter, sys_exit tracepoints, to avoid
having to add extra lines as we go adding support for more pointer
receiving syscalls.

Committer testing:

  # perf trace -e open*  --max-events=10
     0.000 ( 0.022 ms): systemd-oomd/1151 openat(dfd: CWD, filename: "/proc/meminfo", flags: RDONLY|CLOEXEC)    = 11
   208.833 (         ): gnome-terminal/3223 openat(dfd: CWD, filename: "/proc/51250/cmdline")                  ...
   249.993 ( 0.024 ms): systemd-oomd/1151 openat(dfd: CWD, filename: "/proc/meminfo", flags: RDONLY|CLOEXEC)    = 11
   250.118 ( 0.030 ms): systemd-oomd/1151 openat(dfd: CWD, filename: "/sys/fs/cgroup/user.slice/user-1000.slice/user@1000.service/memory.pressure", flags: RDONLY|CLOEXEC) = 11
   250.205 ( 0.016 ms): systemd-oomd/1151 openat(dfd: CWD, filename: "/sys/fs/cgroup/user.slice/user-1000.slice/user@1000.service/memory.current", flags: RDONLY|CLOEXEC) = 11
   250.244 ( 0.014 ms): systemd-oomd/1151 openat(dfd: CWD, filename: "/sys/fs/cgroup/user.slice/user-1000.slice/user@1000.service/memory.min", flags: RDONLY|CLOEXEC) = 11
   250.282 ( 0.014 ms): systemd-oomd/1151 openat(dfd: CWD, filename: "/sys/fs/cgroup/user.slice/user-1000.slice/user@1000.service/memory.low", flags: RDONLY|CLOEXEC) = 11
   250.320 ( 0.014 ms): systemd-oomd/1151 openat(dfd: CWD, filename: "/sys/fs/cgroup/user.slice/user-1000.slice/user@1000.service/memory.swap.current", flags: RDONLY|CLOEXEC) = 11
   250.355 ( 0.014 ms): systemd-oomd/1151 openat(dfd: CWD, filename: "/sys/fs/cgroup/user.slice/user-1000.slice/user@1000.service/memory.stat", flags: RDONLY|CLOEXEC) = 11
   250.717 ( 0.016 ms): systemd-oomd/1151 openat(dfd: CWD, filename: "/sys/fs/cgroup/user.slice/user-1001.slice/user@1001.service/memory.pressure", flags: RDONLY|CLOEXEC) = 11
  #
  # perf trace -e *nanosleep*  --max-events=10
         ? (         ): SCTP timer/28304  ... [continued]: clock_nanosleep())                                  = 0
     0.007 (10.058 ms): SCTP timer/28304 clock_nanosleep(rqtp: { .tv_sec: 0, .tv_nsec: 10000000 }, rmtp: 0x7f0466b78de0) = 0
    10.069 (         ): SCTP timer/28304 clock_nanosleep(rqtp: { .tv_sec: 0, .tv_nsec: 10000000 }, rmtp: 0x7f0466b78de0) ...
    10.069 (10.056 ms): SCTP timer/28304  ... [continued]: clock_nanosleep())                                  = 0
    17.059 (         ): podman/3572 nanosleep(rqtp: 0x7fc4f4d75be0)                                    ...
    17.059 (10.061 ms): podman/3572  ... [continued]: nanosleep())                                        = 0
    20.131 (10.059 ms): SCTP timer/28304 clock_nanosleep(rqtp: { .tv_sec: 0, .tv_nsec: 10000000 }, rmtp: 0x7f0466b78de0) = 0
    30.195 (10.038 ms): SCTP timer/28304 clock_nanosleep(rqtp: { .tv_sec: 0, .tv_nsec: 10000000 }, rmtp: 0x7f0466b78de0) = 0
    40.238 (10.057 ms): SCTP timer/28304 clock_nanosleep(rqtp: { .tv_sec: 0, .tv_nsec: 10000000 }, rmtp: 0x7f0466b78de0) = 0
    50.301 (         ): SCTP timer/28304 clock_nanosleep(rqtp: { .tv_sec: 0, .tv_nsec: 10000000 }, rmtp: 0x7f0466b78de0) ...
  #

  # perf trace -e perf_event*  -- perf stat -e instructions,cycles,cache-misses sleep 0.1
     0.000 ( 0.011 ms): perf/51331 perf_event_open(attr_uptr: { type: 0 (PERF_TYPE_HARDWARE), size: 136, config: 0x1 (PERF_COUNT_HW_INSTRUCTIONS), sample_type: IDENTIFIER, read_format: TOTAL_TIME_ENABLED|TOTAL_TIME_RUNNING, disabled: 1, inherit: 1, enable_on_exec: 1, exclude_guest: 1 }, pid: 51332 (perf), cpu: -1, group_fd: -1, flags: FD_CLOEXEC) = 3
     0.013 ( 0.003 ms): perf/51331 perf_event_open(attr_uptr: { type: 0 (PERF_TYPE_HARDWARE), size: 136, config: 0 (PERF_COUNT_HW_CPU_CYCLES), sample_type: IDENTIFIER, read_format: TOTAL_TIME_ENABLED|TOTAL_TIME_RUNNING, disabled: 1, inherit: 1, enable_on_exec: 1, exclude_guest: 1 }, pid: 51332 (perf), cpu: -1, group_fd: -1, flags: FD_CLOEXEC) = 4
     0.017 ( 0.002 ms): perf/51331 perf_event_open(attr_uptr: { type: 0 (PERF_TYPE_HARDWARE), size: 136, config: 0x3 (PERF_COUNT_HW_CACHE_MISSES), sample_type: IDENTIFIER, read_format: TOTAL_TIME_ENABLED|TOTAL_TIME_RUNNING, disabled: 1, inherit: 1, enable_on_exec: 1, exclude_guest: 1 }, pid: 51332 (perf), cpu: -1, group_fd: -1, flags: FD_CLOEXEC) = 5

 Performance counter stats for 'sleep 0.1':

         1,495,051      instructions                     #    1.11  insn per cycle
         1,347,641      cycles
            35,424      cache-misses

       0.100935279 seconds time elapsed

       0.000924000 seconds user
       0.000000000 seconds sys

  #

  # perf trace -e connect*  ssh localhost
       0.000 ( 0.012 ms): ssh/51346 connect(fd: 4, uservaddr: { .family: LOCAL, path: /var/lib/sss/pipes/nss }, addrlen: 110) = -1 ECONNREFUSED (Connection refused)
       0.118 ( 0.004 ms): ssh/51346 connect(fd: 6, uservaddr: { .family: LOCAL, path: /var/lib/sss/pipes/nss }, addrlen: 110) = -1 ECONNREFUSED (Connection refused)
       0.399 ( 0.007 ms): ssh/51346 connect(fd: 4, uservaddr: { .family: LOCAL, path: /var/lib/sss/pipes/nss }, addrlen: 110) = -1 ECONNREFUSED (Connection refused)
       0.426 ( 0.003 ms): ssh/51346 connect(fd: 4, uservaddr: { .family: LOCAL, path: /var/lib/sss/pipes/nss }, addrlen: 110) = -1 ECONNREFUSED (Connection refused)
       0.754 ( 0.009 ms): ssh/51346 connect(fd: 4, uservaddr: { .family: INET, port: 22, addr: 127.0.0.1 }, addrlen: 16) = 0
       0.771 ( 0.010 ms): ssh/51346 connect(fd: 4, uservaddr: { .family: INET6, port: 22, addr: ::1 }, addrlen: 28) = 0
       0.798 ( 0.053 ms): ssh/51346 connect(fd: 4, uservaddr: { .family: INET6, port: 22, addr: ::1 }, addrlen: 28) = 0
       0.870 ( 0.004 ms): ssh/51346 connect(fd: 5, uservaddr: { .family: LOCAL, path: /var/lib/sss/pipes/nss }, addrlen: 110) = -1 ECONNREFUSED (Connection refused)
       0.904 ( 0.003 ms): ssh/51346 connect(fd: 5, uservaddr: { .family: LOCAL, path: /var/lib/sss/pipes/nss }, addrlen: 110) = -1 ECONNREFUSED (Connection refused)
       0.930 ( 0.003 ms): ssh/51346 connect(fd: 5, uservaddr: { .family: LOCAL, path: /var/lib/sss/pipes/nss }, addrlen: 110) = -1 ECONNREFUSED (Connection refused)
       0.957 ( 0.003 ms): ssh/51346 connect(fd: 5, uservaddr: { .family: LOCAL, path: /var/lib/sss/pipes/nss }, addrlen: 110) = -1 ECONNREFUSED (Connection refused)
       0.981 ( 0.003 ms): ssh/51346 connect(fd: 5, uservaddr: { .family: LOCAL, path: /var/lib/sss/pipes/nss }, addrlen: 110) = -1 ECONNREFUSED (Connection refused)
       1.006 ( 0.004 ms): ssh/51346 connect(fd: 5, uservaddr: { .family: LOCAL, path: /var/lib/sss/pipes/nss }, addrlen: 110) = -1 ECONNREFUSED (Connection refused)
       1.036 ( 0.005 ms): ssh/51346 connect(fd: 5, uservaddr: { .family: LOCAL, path: /var/lib/sss/pipes/nss }, addrlen: 110) = -1 ECONNREFUSED (Connection refused)
      65.077 ( 0.022 ms): ssh/51346 connect(fd: 5, uservaddr: { .family: LOCAL, path: /var/run/.heim_org.h5l.kcm-socket }, addrlen: 110) = 0
      66.608 ( 0.014 ms): ssh/51346 connect(fd: 5, uservaddr: { .family: LOCAL, path: /var/run/.heim_org.h5l.kcm-socket }, addrlen: 110) = 0
  root@localhost's password:
  #

  # perf trace -e sendto*  ping -c 2 localhost
  PING localhost(localhost (::1)) 56 data bytes
  64 bytes from localhost (::1): icmp_seq=1 ttl=64 time=0.024 ms
       0.000 ( 0.011 ms): ping/51357 sendto(fd: 5, buff: 0x7ffcca35e620, len: 20, addr: { .family: NETLINK }, addr_len: 0xc) = 20
       0.135 ( 0.026 ms): ping/51357 sendto(fd: 4, buff: 0x5601398f7b20, len: 64, addr: { .family: INET6, port: 58, addr: ::1 }, addr_len: 0x1c) = 64
    1014.929 ( 0.050 ms): ping/51357 sendto(fd: 4, buff: 0x5601398f7b20, len: 64, flags: CONFIRM, addr: { .family: INET6, port: 58, addr: ::1 }, addr_len: 0x1c) = 64
  64 bytes from localhost (::1): icmp_seq=2 ttl=64 time=0.046 ms

  --- localhost ping statistics ---
  2 packets transmitted, 2 received, 0% packet loss, time 1015ms
  rtt min/avg/max/mdev = 0.024/0.035/0.046/0.011 ms
  #

Signed-off-by: Ian Rogers <irogers@google.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Andrii Nakryiko <andrii@kernel.org>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Cc: Brendan Gregg <brendan.d.gregg@gmail.com>
Cc: Carsten Haitzler <carsten.haitzler@arm.com>
Cc: Eduard Zingerman <eddyz87@gmail.com>
Cc: Fangrui Song <maskray@google.com>
Cc: He Kuang <hekuang@huawei.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Nathan Chancellor <nathan@kernel.org>
Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Cc: Nick Desaulniers <ndesaulniers@google.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@amd.com>
Cc: Rob Herring <robh@kernel.org>
Cc: Tiezhu Yang <yangtiezhu@loongson.cn>
Cc: Tom Rix <trix@redhat.com>
Cc: Wang Nan <wangnan0@huawei.com>
Cc: Wang ShaoBo <bobo.shaobowang@huawei.com>
Cc: Yang Jihong <yangjihong1@huawei.com>
Cc: Yonghong Song <yhs@fb.com>
Cc: YueHaibing <yuehaibing@huawei.com>
Cc: bpf@vger.kernel.org
Cc: llvm@lists.linux.dev
Link: https://lore.kernel.org/r/20230810184853.2860737-3-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/perf/Makefile.perf
tools/perf/builtin-trace.c
tools/perf/examples/bpf/augmented_raw_syscalls.c [deleted file]
tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c [new file with mode: 0644]

index 5370d7b..40663c6 100644 (file)
@@ -1038,6 +1038,7 @@ SKELETONS += $(SKEL_OUT)/bperf_cgroup.skel.h $(SKEL_OUT)/func_latency.skel.h
 SKELETONS += $(SKEL_OUT)/off_cpu.skel.h $(SKEL_OUT)/lock_contention.skel.h
 SKELETONS += $(SKEL_OUT)/kwork_trace.skel.h $(SKEL_OUT)/sample_filter.skel.h
 SKELETONS += $(SKEL_OUT)/bench_uprobe.skel.h
+SKELETONS += $(SKEL_OUT)/augmented_raw_syscalls.skel.h
 
 $(SKEL_TMP_OUT) $(LIBAPI_OUTPUT) $(LIBBPF_OUTPUT) $(LIBPERF_OUTPUT) $(LIBSUBCMD_OUTPUT) $(LIBSYMBOL_OUTPUT):
        $(Q)$(MKDIR) -p $@
index 5986246..0ebfa95 100644 (file)
@@ -19,6 +19,9 @@
 #ifdef HAVE_LIBBPF_SUPPORT
 #include <bpf/bpf.h>
 #include <bpf/libbpf.h>
+#ifdef HAVE_BPF_SKEL
+#include "bpf_skel/augmented_raw_syscalls.skel.h"
+#endif
 #endif
 #include "util/bpf_map.h"
 #include "util/rlimit.h"
@@ -127,25 +130,19 @@ struct trace {
        struct syscalltbl       *sctbl;
        struct {
                struct syscall  *table;
-               struct { // per syscall BPF_MAP_TYPE_PROG_ARRAY
-                       struct bpf_map  *sys_enter,
-                                       *sys_exit;
-               }               prog_array;
                struct {
                        struct evsel *sys_enter,
-                                         *sys_exit,
-                                         *augmented;
+                               *sys_exit,
+                               *bpf_output;
                }               events;
-               struct bpf_program *unaugmented_prog;
        } syscalls;
-       struct {
-               struct bpf_map *map;
-       } dump;
+#ifdef HAVE_BPF_SKEL
+       struct augmented_raw_syscalls_bpf *skel;
+#endif
        struct record_opts      opts;
        struct evlist   *evlist;
        struct machine          *host;
        struct thread           *current;
-       struct bpf_object       *bpf_obj;
        struct cgroup           *cgroup;
        u64                     base_time;
        FILE                    *output;
@@ -415,6 +412,7 @@ static int evsel__init_syscall_tp(struct evsel *evsel)
                if (evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") &&
                    evsel__init_tp_uint_field(evsel, &sc->id, "nr"))
                        return -ENOENT;
+
                return 0;
        }
 
@@ -2845,7 +2843,7 @@ static int trace__event_handler(struct trace *trace, struct evsel *evsel,
        if (thread)
                trace__fprintf_comm_tid(trace, thread, trace->output);
 
-       if (evsel == trace->syscalls.events.augmented) {
+       if (evsel == trace->syscalls.events.bpf_output) {
                int id = perf_evsel__sc_tp_uint(evsel, id, sample);
                struct syscall *sc = trace__syscall_info(trace, evsel, id);
 
@@ -3278,24 +3276,16 @@ out_enomem:
        goto out;
 }
 
-#ifdef HAVE_LIBBPF_SUPPORT
-static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace, const char *name)
-{
-       if (trace->bpf_obj == NULL)
-               return NULL;
-
-       return bpf_object__find_map_by_name(trace->bpf_obj, name);
-}
-
+#ifdef HAVE_BPF_SKEL
 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name)
 {
        struct bpf_program *pos, *prog = NULL;
        const char *sec_name;
 
-       if (trace->bpf_obj == NULL)
+       if (trace->skel->obj == NULL)
                return NULL;
 
-       bpf_object__for_each_program(pos, trace->bpf_obj) {
+       bpf_object__for_each_program(pos, trace->skel->obj) {
                sec_name = bpf_program__section_name(pos);
                if (sec_name && !strcmp(sec_name, name)) {
                        prog = pos;
@@ -3313,12 +3303,12 @@ static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, str
 
        if (prog_name == NULL) {
                char default_prog_name[256];
-               scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->name);
+               scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->name);
                prog = trace__find_bpf_program_by_title(trace, default_prog_name);
                if (prog != NULL)
                        goto out_found;
                if (sc->fmt && sc->fmt->alias) {
-                       scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->fmt->alias);
+                       scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->fmt->alias);
                        prog = trace__find_bpf_program_by_title(trace, default_prog_name);
                        if (prog != NULL)
                                goto out_found;
@@ -3336,7 +3326,7 @@ out_found:
        pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n",
                 prog_name, type, sc->name);
 out_unaugmented:
-       return trace->syscalls.unaugmented_prog;
+       return trace->skel->progs.syscall_unaugmented;
 }
 
 static void trace__init_syscall_bpf_progs(struct trace *trace, int id)
@@ -3353,13 +3343,13 @@ static void trace__init_syscall_bpf_progs(struct trace *trace, int id)
 static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id)
 {
        struct syscall *sc = trace__syscall_info(trace, NULL, id);
-       return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->syscalls.unaugmented_prog);
+       return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->skel->progs.syscall_unaugmented);
 }
 
 static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id)
 {
        struct syscall *sc = trace__syscall_info(trace, NULL, id);
-       return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->syscalls.unaugmented_prog);
+       return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->skel->progs.syscall_unaugmented);
 }
 
 static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *sc)
@@ -3384,7 +3374,7 @@ try_to_find_pair:
                bool is_candidate = false;
 
                if (pair == NULL || pair == sc ||
-                   pair->bpf_prog.sys_enter == trace->syscalls.unaugmented_prog)
+                   pair->bpf_prog.sys_enter == trace->skel->progs.syscall_unaugmented)
                        continue;
 
                for (field = sc->args, candidate_field = pair->args;
@@ -3437,7 +3427,7 @@ try_to_find_pair:
                 */
                if (pair_prog == NULL) {
                        pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_enter : NULL, "enter");
-                       if (pair_prog == trace->syscalls.unaugmented_prog)
+                       if (pair_prog == trace->skel->progs.syscall_unaugmented)
                                goto next_candidate;
                }
 
@@ -3452,8 +3442,8 @@ try_to_find_pair:
 
 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
 {
-       int map_enter_fd = bpf_map__fd(trace->syscalls.prog_array.sys_enter),
-           map_exit_fd  = bpf_map__fd(trace->syscalls.prog_array.sys_exit);
+       int map_enter_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_enter);
+       int map_exit_fd  = bpf_map__fd(trace->skel->maps.syscalls_sys_exit);
        int err = 0, key;
 
        for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
@@ -3515,7 +3505,7 @@ static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
                 * For now we're just reusing the sys_enter prog, and if it
                 * already has an augmenter, we don't need to find one.
                 */
-               if (sc->bpf_prog.sys_enter != trace->syscalls.unaugmented_prog)
+               if (sc->bpf_prog.sys_enter != trace->skel->progs.syscall_unaugmented)
                        continue;
 
                /*
@@ -3538,22 +3528,9 @@ static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
                        break;
        }
 
-
        return err;
 }
-
-#else // HAVE_LIBBPF_SUPPORT
-static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace __maybe_unused,
-                                                  const char *name __maybe_unused)
-{
-       return NULL;
-}
-
-static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace __maybe_unused)
-{
-       return 0;
-}
-#endif // HAVE_LIBBPF_SUPPORT
+#endif // HAVE_BPF_SKEL
 
 static int trace__set_ev_qualifier_filter(struct trace *trace)
 {
@@ -3917,13 +3894,31 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
        err = evlist__open(evlist);
        if (err < 0)
                goto out_error_open;
+#ifdef HAVE_BPF_SKEL
+       {
+               struct perf_cpu cpu;
 
+               /*
+                * Set up the __augmented_syscalls__ BPF map to hold for each
+                * CPU the bpf-output event's file descriptor.
+                */
+               perf_cpu_map__for_each_cpu(cpu, i, trace->syscalls.events.bpf_output->core.cpus) {
+                       bpf_map__update_elem(trace->skel->maps.__augmented_syscalls__,
+                                       &cpu.cpu, sizeof(int),
+                                       xyarray__entry(trace->syscalls.events.bpf_output->core.fd,
+                                                      cpu.cpu, 0),
+                                       sizeof(__u32), BPF_ANY);
+               }
+       }
+#endif
        err = trace__set_filter_pids(trace);
        if (err < 0)
                goto out_error_mem;
 
-       if (trace->syscalls.prog_array.sys_enter)
+#ifdef HAVE_BPF_SKEL
+       if (trace->skel->progs.sys_enter)
                trace__init_syscalls_bpf_prog_array_maps(trace);
+#endif
 
        if (trace->ev_qualifier_ids.nr > 0) {
                err = trace__set_ev_qualifier_filter(trace);
@@ -3956,9 +3951,6 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
        if (err < 0)
                goto out_error_apply_filters;
 
-       if (trace->dump.map)
-               bpf_map__fprintf(trace->dump.map, trace->output);
-
        err = evlist__mmap(evlist, trace->opts.mmap_pages);
        if (err < 0)
                goto out_error_mmap;
@@ -4655,6 +4647,18 @@ static void trace__exit(struct trace *trace)
        zfree(&trace->perfconfig_events);
 }
 
+#ifdef HAVE_BPF_SKEL
+static int bpf__setup_bpf_output(struct evlist *evlist)
+{
+       int err = parse_event(evlist, "bpf-output/no-inherit=1,name=__augmented_syscalls__/");
+
+       if (err)
+               pr_debug("ERROR: failed to create the \"__augmented_syscalls__\" bpf-output event\n");
+
+       return err;
+}
+#endif
+
 int cmd_trace(int argc, const char **argv)
 {
        const char *trace_usage[] = {
@@ -4686,7 +4690,6 @@ int cmd_trace(int argc, const char **argv)
                .max_stack = UINT_MAX,
                .max_events = ULONG_MAX,
        };
-       const char *map_dump_str = NULL;
        const char *output_name = NULL;
        const struct option trace_options[] = {
        OPT_CALLBACK('e', "event", &trace, "event",
@@ -4720,9 +4723,6 @@ int cmd_trace(int argc, const char **argv)
        OPT_CALLBACK(0, "duration", &trace, "float",
                     "show only events with duration > N.M ms",
                     trace__set_duration),
-#ifdef HAVE_LIBBPF_SUPPORT
-       OPT_STRING(0, "map-dump", &map_dump_str, "BPF map", "BPF map to periodically dump"),
-#endif
        OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
        OPT_INCR('v', "verbose", &verbose, "be more verbose"),
        OPT_BOOLEAN('T', "time", &trace.full_time,
@@ -4849,16 +4849,44 @@ int cmd_trace(int argc, const char **argv)
                                       "cgroup monitoring only available in system-wide mode");
        }
 
-       err = -1;
+#ifdef HAVE_BPF_SKEL
+       trace.skel = augmented_raw_syscalls_bpf__open();
+       if (!trace.skel) {
+               pr_debug("Failed to open augmented syscalls BPF skeleton");
+       } else {
+               /*
+                * Disable attaching the BPF programs except for sys_enter and
+                * sys_exit that tail call into this as necessary.
+                */
+               struct bpf_program *prog;
 
-       if (map_dump_str) {
-               trace.dump.map = trace__find_bpf_map_by_name(&trace, map_dump_str);
-               if (trace.dump.map == NULL) {
-                       pr_err("ERROR: BPF map \"%s\" not found\n", map_dump_str);
-                       goto out;
+               bpf_object__for_each_program(prog, trace.skel->obj) {
+                       if (prog != trace.skel->progs.sys_enter && prog != trace.skel->progs.sys_exit)
+                               bpf_program__set_autoattach(prog, /*autoattach=*/false);
+               }
+
+               err = augmented_raw_syscalls_bpf__load(trace.skel);
+
+               if (err < 0) {
+                       libbpf_strerror(err, bf, sizeof(bf));
+                       pr_debug("Failed to load augmented syscalls BPF skeleton: %s\n", bf);
+               } else {
+                       augmented_raw_syscalls_bpf__attach(trace.skel);
+                       trace__add_syscall_newtp(&trace);
                }
        }
 
+       err = bpf__setup_bpf_output(trace.evlist);
+       if (err) {
+               libbpf_strerror(err, bf, sizeof(bf));
+               pr_err("ERROR: Setup BPF output event failed: %s\n", bf);
+               goto out;
+       }
+       trace.syscalls.events.bpf_output = evlist__last(trace.evlist);
+       assert(!strcmp(evsel__name(trace.syscalls.events.bpf_output), "__augmented_syscalls__"));
+#endif
+       err = -1;
+
        if (trace.trace_pgfaults) {
                trace.opts.sample_address = true;
                trace.opts.sample_time = true;
@@ -4909,7 +4937,7 @@ int cmd_trace(int argc, const char **argv)
         * buffers that are being copied from kernel to userspace, think 'read'
         * syscall.
         */
-       if (trace.syscalls.events.augmented) {
+       if (trace.syscalls.events.bpf_output) {
                evlist__for_each_entry(trace.evlist, evsel) {
                        bool raw_syscalls_sys_exit = strcmp(evsel__name(evsel), "raw_syscalls:sys_exit") == 0;
 
@@ -4918,9 +4946,9 @@ int cmd_trace(int argc, const char **argv)
                                goto init_augmented_syscall_tp;
                        }
 
-                       if (trace.syscalls.events.augmented->priv == NULL &&
+                       if (trace.syscalls.events.bpf_output->priv == NULL &&
                            strstr(evsel__name(evsel), "syscalls:sys_enter")) {
-                               struct evsel *augmented = trace.syscalls.events.augmented;
+                               struct evsel *augmented = trace.syscalls.events.bpf_output;
                                if (evsel__init_augmented_syscall_tp(augmented, evsel) ||
                                    evsel__init_augmented_syscall_tp_args(augmented))
                                        goto out;
@@ -5025,5 +5053,8 @@ out_close:
                fclose(trace.output);
 out:
        trace__exit(&trace);
+#ifdef HAVE_BPF_SKEL
+       augmented_raw_syscalls_bpf__destroy(trace.skel);
+#endif
        return err;
 }
diff --git a/tools/perf/examples/bpf/augmented_raw_syscalls.c b/tools/perf/examples/bpf/augmented_raw_syscalls.c
deleted file mode 100644 (file)
index 9a03189..0000000
+++ /dev/null
@@ -1,417 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Augment the raw_syscalls tracepoints with the contents of the pointer arguments.
- *
- * Test it with:
- *
- * perf trace -e tools/perf/examples/bpf/augmented_raw_syscalls.c cat /etc/passwd > /dev/null
- *
- * This exactly matches what is marshalled into the raw_syscall:sys_enter
- * payload expected by the 'perf trace' beautifiers.
- *
- * For now it just uses the existing tracepoint augmentation code in 'perf
- * trace', in the next csets we'll hook up these with the sys_enter/sys_exit
- * code that will combine entry/exit in a strace like way.
- */
-
-#include <linux/bpf.h>
-#include <bpf/bpf_helpers.h>
-#include <linux/limits.h>
-
-// FIXME: These should come from system headers
-typedef char bool;
-typedef int pid_t;
-typedef long long int __s64;
-typedef __s64 time64_t;
-
-struct timespec64 {
-       time64_t        tv_sec;
-       long int        tv_nsec;
-};
-
-/* bpf-output associated map */
-struct __augmented_syscalls__ {
-       __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
-       __type(key, int);
-       __type(value, __u32);
-       __uint(max_entries, __NR_CPUS__);
-} __augmented_syscalls__ SEC(".maps");
-
-/*
- * What to augment at entry?
- *
- * Pointer arg payloads (filenames, etc) passed from userspace to the kernel
- */
-struct syscalls_sys_enter {
-       __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
-       __type(key, __u32);
-       __type(value, __u32);
-       __uint(max_entries, 512);
-} syscalls_sys_enter SEC(".maps");
-
-/*
- * What to augment at exit?
- *
- * Pointer arg payloads returned from the kernel (struct stat, etc) to userspace.
- */
-struct syscalls_sys_exit {
-       __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
-       __type(key, __u32);
-       __type(value, __u32);
-       __uint(max_entries, 512);
-} syscalls_sys_exit SEC(".maps");
-
-struct syscall_enter_args {
-       unsigned long long common_tp_fields;
-       long               syscall_nr;
-       unsigned long      args[6];
-};
-
-struct syscall_exit_args {
-       unsigned long long common_tp_fields;
-       long               syscall_nr;
-       long               ret;
-};
-
-struct augmented_arg {
-       unsigned int    size;
-       int             err;
-       char            value[PATH_MAX];
-};
-
-struct pids_filtered {
-       __uint(type, BPF_MAP_TYPE_HASH);
-       __type(key, pid_t);
-       __type(value, bool);
-       __uint(max_entries, 64);
-} pids_filtered SEC(".maps");
-
-/*
- * Desired design of maximum size and alignment (see RFC2553)
- */
-#define SS_MAXSIZE   128     /* Implementation specific max size */
-
-typedef unsigned short sa_family_t;
-
-/*
- * FIXME: Should come from system headers
- *
- * The definition uses anonymous union and struct in order to control the
- * default alignment.
- */
-struct sockaddr_storage {
-       union {
-               struct {
-                       sa_family_t    ss_family; /* address family */
-                       /* Following field(s) are implementation specific */
-                       char __data[SS_MAXSIZE - sizeof(unsigned short)];
-                               /* space to achieve desired size, */
-                               /* _SS_MAXSIZE value minus size of ss_family */
-               };
-               void *__align; /* implementation specific desired alignment */
-       };
-};
-
-struct augmented_args_payload {
-       struct syscall_enter_args args;
-       union {
-               struct {
-                       struct augmented_arg arg, arg2;
-               };
-               struct sockaddr_storage saddr;
-               char   __data[sizeof(struct augmented_arg)];
-       };
-};
-
-// We need more tmp space than the BPF stack can give us
-struct augmented_args_tmp {
-       __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
-       __type(key, int);
-       __type(value, struct augmented_args_payload);
-       __uint(max_entries, 1);
-} augmented_args_tmp SEC(".maps");
-
-static inline struct augmented_args_payload *augmented_args_payload(void)
-{
-       int key = 0;
-       return bpf_map_lookup_elem(&augmented_args_tmp, &key);
-}
-
-static inline int augmented__output(void *ctx, struct augmented_args_payload *args, int len)
-{
-       /* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */
-       return bpf_perf_event_output(ctx, &__augmented_syscalls__, BPF_F_CURRENT_CPU, args, len);
-}
-
-static inline
-unsigned int augmented_arg__read_str(struct augmented_arg *augmented_arg, const void *arg, unsigned int arg_len)
-{
-       unsigned int augmented_len = sizeof(*augmented_arg);
-       int string_len = bpf_probe_read_str(&augmented_arg->value, arg_len, arg);
-
-       augmented_arg->size = augmented_arg->err = 0;
-       /*
-        * probe_read_str may return < 0, e.g. -EFAULT
-        * So we leave that in the augmented_arg->size that userspace will
-        */
-       if (string_len > 0) {
-               augmented_len -= sizeof(augmented_arg->value) - string_len;
-               augmented_len &= sizeof(augmented_arg->value) - 1;
-               augmented_arg->size = string_len;
-       } else {
-               /*
-                * So that username notice the error while still being able
-                * to skip this augmented arg record
-                */
-               augmented_arg->err = string_len;
-               augmented_len = offsetof(struct augmented_arg, value);
-       }
-
-       return augmented_len;
-}
-
-SEC("!raw_syscalls:unaugmented")
-int syscall_unaugmented(struct syscall_enter_args *args)
-{
-       return 1;
-}
-
-/*
- * These will be tail_called from SEC("raw_syscalls:sys_enter"), so will find in
- * augmented_args_tmp what was read by that raw_syscalls:sys_enter and go
- * on from there, reading the first syscall arg as a string, i.e. open's
- * filename.
- */
-SEC("!syscalls:sys_enter_connect")
-int sys_enter_connect(struct syscall_enter_args *args)
-{
-       struct augmented_args_payload *augmented_args = augmented_args_payload();
-       const void *sockaddr_arg = (const void *)args->args[1];
-       unsigned int socklen = args->args[2];
-       unsigned int len = sizeof(augmented_args->args);
-
-        if (augmented_args == NULL)
-                return 1; /* Failure: don't filter */
-
-       if (socklen > sizeof(augmented_args->saddr))
-               socklen = sizeof(augmented_args->saddr);
-
-       bpf_probe_read(&augmented_args->saddr, socklen, sockaddr_arg);
-
-       return augmented__output(args, augmented_args, len + socklen);
-}
-
-SEC("!syscalls:sys_enter_sendto")
-int sys_enter_sendto(struct syscall_enter_args *args)
-{
-       struct augmented_args_payload *augmented_args = augmented_args_payload();
-       const void *sockaddr_arg = (const void *)args->args[4];
-       unsigned int socklen = args->args[5];
-       unsigned int len = sizeof(augmented_args->args);
-
-        if (augmented_args == NULL)
-                return 1; /* Failure: don't filter */
-
-       if (socklen > sizeof(augmented_args->saddr))
-               socklen = sizeof(augmented_args->saddr);
-
-       bpf_probe_read(&augmented_args->saddr, socklen, sockaddr_arg);
-
-       return augmented__output(args, augmented_args, len + socklen);
-}
-
-SEC("!syscalls:sys_enter_open")
-int sys_enter_open(struct syscall_enter_args *args)
-{
-       struct augmented_args_payload *augmented_args = augmented_args_payload();
-       const void *filename_arg = (const void *)args->args[0];
-       unsigned int len = sizeof(augmented_args->args);
-
-        if (augmented_args == NULL)
-                return 1; /* Failure: don't filter */
-
-       len += augmented_arg__read_str(&augmented_args->arg, filename_arg, sizeof(augmented_args->arg.value));
-
-       return augmented__output(args, augmented_args, len);
-}
-
-SEC("!syscalls:sys_enter_openat")
-int sys_enter_openat(struct syscall_enter_args *args)
-{
-       struct augmented_args_payload *augmented_args = augmented_args_payload();
-       const void *filename_arg = (const void *)args->args[1];
-       unsigned int len = sizeof(augmented_args->args);
-
-        if (augmented_args == NULL)
-                return 1; /* Failure: don't filter */
-
-       len += augmented_arg__read_str(&augmented_args->arg, filename_arg, sizeof(augmented_args->arg.value));
-
-       return augmented__output(args, augmented_args, len);
-}
-
-SEC("!syscalls:sys_enter_rename")
-int sys_enter_rename(struct syscall_enter_args *args)
-{
-       struct augmented_args_payload *augmented_args = augmented_args_payload();
-       const void *oldpath_arg = (const void *)args->args[0],
-                  *newpath_arg = (const void *)args->args[1];
-       unsigned int len = sizeof(augmented_args->args), oldpath_len;
-
-        if (augmented_args == NULL)
-                return 1; /* Failure: don't filter */
-
-       oldpath_len = augmented_arg__read_str(&augmented_args->arg, oldpath_arg, sizeof(augmented_args->arg.value));
-       len += oldpath_len + augmented_arg__read_str((void *)(&augmented_args->arg) + oldpath_len, newpath_arg, sizeof(augmented_args->arg.value));
-
-       return augmented__output(args, augmented_args, len);
-}
-
-SEC("!syscalls:sys_enter_renameat")
-int sys_enter_renameat(struct syscall_enter_args *args)
-{
-       struct augmented_args_payload *augmented_args = augmented_args_payload();
-       const void *oldpath_arg = (const void *)args->args[1],
-                  *newpath_arg = (const void *)args->args[3];
-       unsigned int len = sizeof(augmented_args->args), oldpath_len;
-
-        if (augmented_args == NULL)
-                return 1; /* Failure: don't filter */
-
-       oldpath_len = augmented_arg__read_str(&augmented_args->arg, oldpath_arg, sizeof(augmented_args->arg.value));
-       len += oldpath_len + augmented_arg__read_str((void *)(&augmented_args->arg) + oldpath_len, newpath_arg, sizeof(augmented_args->arg.value));
-
-       return augmented__output(args, augmented_args, len);
-}
-
-#define PERF_ATTR_SIZE_VER0     64      /* sizeof first published struct */
-
-// we need just the start, get the size to then copy it
-struct perf_event_attr_size {
-        __u32                   type;
-        /*
-         * Size of the attr structure, for fwd/bwd compat.
-         */
-        __u32                   size;
-};
-
-SEC("!syscalls:sys_enter_perf_event_open")
-int sys_enter_perf_event_open(struct syscall_enter_args *args)
-{
-       struct augmented_args_payload *augmented_args = augmented_args_payload();
-       const struct perf_event_attr_size *attr = (const struct perf_event_attr_size *)args->args[0], *attr_read;
-       unsigned int len = sizeof(augmented_args->args);
-
-        if (augmented_args == NULL)
-               goto failure;
-
-       if (bpf_probe_read(&augmented_args->__data, sizeof(*attr), attr) < 0)
-               goto failure;
-
-       attr_read = (const struct perf_event_attr_size *)augmented_args->__data;
-
-       __u32 size = attr_read->size;
-
-       if (!size)
-               size = PERF_ATTR_SIZE_VER0;
-
-       if (size > sizeof(augmented_args->__data))
-                goto failure;
-
-       // Now that we read attr->size and tested it against the size limits, read it completely
-       if (bpf_probe_read(&augmented_args->__data, size, attr) < 0)
-               goto failure;
-
-       return augmented__output(args, augmented_args, len + size);
-failure:
-       return 1; /* Failure: don't filter */
-}
-
-SEC("!syscalls:sys_enter_clock_nanosleep")
-int sys_enter_clock_nanosleep(struct syscall_enter_args *args)
-{
-       struct augmented_args_payload *augmented_args = augmented_args_payload();
-       const void *rqtp_arg = (const void *)args->args[2];
-       unsigned int len = sizeof(augmented_args->args);
-       __u32 size = sizeof(struct timespec64);
-
-        if (augmented_args == NULL)
-               goto failure;
-
-       if (size > sizeof(augmented_args->__data))
-                goto failure;
-
-       bpf_probe_read(&augmented_args->__data, size, rqtp_arg);
-
-       return augmented__output(args, augmented_args, len + size);
-failure:
-       return 1; /* Failure: don't filter */
-}
-
-static pid_t getpid(void)
-{
-       return bpf_get_current_pid_tgid();
-}
-
-static bool pid_filter__has(struct pids_filtered *pids, pid_t pid)
-{
-       return bpf_map_lookup_elem(pids, &pid) != NULL;
-}
-
-SEC("raw_syscalls:sys_enter")
-int sys_enter(struct syscall_enter_args *args)
-{
-       struct augmented_args_payload *augmented_args;
-       /*
-        * We start len, the amount of data that will be in the perf ring
-        * buffer, if this is not filtered out by one of pid_filter__has(),
-        * syscall->enabled, etc, with the non-augmented raw syscall payload,
-        * i.e. sizeof(augmented_args->args).
-        *
-        * We'll add to this as we add augmented syscalls right after that
-        * initial, non-augmented raw_syscalls:sys_enter payload.
-        */
-       unsigned int len = sizeof(augmented_args->args);
-
-       if (pid_filter__has(&pids_filtered, getpid()))
-               return 0;
-
-       augmented_args = augmented_args_payload();
-       if (augmented_args == NULL)
-               return 1;
-
-       bpf_probe_read(&augmented_args->args, sizeof(augmented_args->args), args);
-
-       /*
-        * Jump to syscall specific augmenter, even if the default one,
-        * "!raw_syscalls:unaugmented" that will just return 1 to return the
-        * unaugmented tracepoint payload.
-        */
-       bpf_tail_call(args, &syscalls_sys_enter, augmented_args->args.syscall_nr);
-
-       // If not found on the PROG_ARRAY syscalls map, then we're filtering it:
-       return 0;
-}
-
-SEC("raw_syscalls:sys_exit")
-int sys_exit(struct syscall_exit_args *args)
-{
-       struct syscall_exit_args exit_args;
-
-       if (pid_filter__has(&pids_filtered, getpid()))
-               return 0;
-
-       bpf_probe_read(&exit_args, sizeof(exit_args), args);
-       /*
-        * Jump to syscall specific return augmenter, even if the default one,
-        * "!raw_syscalls:unaugmented" that will just return 1 to return the
-        * unaugmented tracepoint payload.
-        */
-       bpf_tail_call(args, &syscalls_sys_exit, exit_args.syscall_nr);
-       /*
-        * If not found on the PROG_ARRAY syscalls map, then we're filtering it:
-        */
-       return 0;
-}
-
-char _license[] SEC("license") = "GPL";
diff --git a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
new file mode 100644 (file)
index 0000000..70478b9
--- /dev/null
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Augment the raw_syscalls tracepoints with the contents of the pointer arguments.
+ *
+ * Test it with:
+ *
+ * perf trace -e tools/perf/examples/bpf/augmented_raw_syscalls.c cat /etc/passwd > /dev/null
+ *
+ * This exactly matches what is marshalled into the raw_syscall:sys_enter
+ * payload expected by the 'perf trace' beautifiers.
+ *
+ * For now it just uses the existing tracepoint augmentation code in 'perf
+ * trace', in the next csets we'll hook up these with the sys_enter/sys_exit
+ * code that will combine entry/exit in a strace like way.
+ */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <linux/limits.h>
+
+#define MAX_CPUS  4096
+
+// FIXME: These should come from system headers
+typedef char bool;
+typedef int pid_t;
+typedef long long int __s64;
+typedef __s64 time64_t;
+
+struct timespec64 {
+       time64_t        tv_sec;
+       long int        tv_nsec;
+};
+
+/* bpf-output associated map */
+struct __augmented_syscalls__ {
+       __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+       __type(key, int);
+       __type(value, __u32);
+       __uint(max_entries, MAX_CPUS);
+} __augmented_syscalls__ SEC(".maps");
+
+/*
+ * What to augment at entry?
+ *
+ * Pointer arg payloads (filenames, etc) passed from userspace to the kernel
+ */
+struct syscalls_sys_enter {
+       __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+       __type(key, __u32);
+       __type(value, __u32);
+       __uint(max_entries, 512);
+} syscalls_sys_enter SEC(".maps");
+
+/*
+ * What to augment at exit?
+ *
+ * Pointer arg payloads returned from the kernel (struct stat, etc) to userspace.
+ */
+struct syscalls_sys_exit {
+       __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+       __type(key, __u32);
+       __type(value, __u32);
+       __uint(max_entries, 512);
+} syscalls_sys_exit SEC(".maps");
+
+struct syscall_enter_args {
+       unsigned long long common_tp_fields;
+       long               syscall_nr;
+       unsigned long      args[6];
+};
+
+struct syscall_exit_args {
+       unsigned long long common_tp_fields;
+       long               syscall_nr;
+       long               ret;
+};
+
+struct augmented_arg {
+       unsigned int    size;
+       int             err;
+       char            value[PATH_MAX];
+};
+
+struct pids_filtered {
+       __uint(type, BPF_MAP_TYPE_HASH);
+       __type(key, pid_t);
+       __type(value, bool);
+       __uint(max_entries, 64);
+} pids_filtered SEC(".maps");
+
+/*
+ * Desired design of maximum size and alignment (see RFC2553)
+ */
+#define SS_MAXSIZE   128     /* Implementation specific max size */
+
+typedef unsigned short sa_family_t;
+
+/*
+ * FIXME: Should come from system headers
+ *
+ * The definition uses anonymous union and struct in order to control the
+ * default alignment.
+ */
+struct sockaddr_storage {
+       union {
+               struct {
+                       sa_family_t    ss_family; /* address family */
+                       /* Following field(s) are implementation specific */
+                       char __data[SS_MAXSIZE - sizeof(unsigned short)];
+                               /* space to achieve desired size, */
+                               /* _SS_MAXSIZE value minus size of ss_family */
+               };
+               void *__align; /* implementation specific desired alignment */
+       };
+};
+
+struct augmented_args_payload {
+       struct syscall_enter_args args;
+       union {
+               struct {
+                       struct augmented_arg arg, arg2;
+               };
+               struct sockaddr_storage saddr;
+               char   __data[sizeof(struct augmented_arg)];
+       };
+};
+
+// We need more tmp space than the BPF stack can give us
+struct augmented_args_tmp {
+       __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+       __type(key, int);
+       __type(value, struct augmented_args_payload);
+       __uint(max_entries, 1);
+} augmented_args_tmp SEC(".maps");
+
+static inline struct augmented_args_payload *augmented_args_payload(void)
+{
+       int key = 0;
+       return bpf_map_lookup_elem(&augmented_args_tmp, &key);
+}
+
+static inline int augmented__output(void *ctx, struct augmented_args_payload *args, int len)
+{
+       /* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */
+       return bpf_perf_event_output(ctx, &__augmented_syscalls__, BPF_F_CURRENT_CPU, args, len);
+}
+
+static inline
+unsigned int augmented_arg__read_str(struct augmented_arg *augmented_arg, const void *arg, unsigned int arg_len)
+{
+       unsigned int augmented_len = sizeof(*augmented_arg);
+       int string_len = bpf_probe_read_str(&augmented_arg->value, arg_len, arg);
+
+       augmented_arg->size = augmented_arg->err = 0;
+       /*
+        * probe_read_str may return < 0, e.g. -EFAULT
+        * So we leave that in the augmented_arg->size that userspace will
+        */
+       if (string_len > 0) {
+               augmented_len -= sizeof(augmented_arg->value) - string_len;
+               augmented_len &= sizeof(augmented_arg->value) - 1;
+               augmented_arg->size = string_len;
+       } else {
+               /*
+                * So that username notice the error while still being able
+                * to skip this augmented arg record
+                */
+               augmented_arg->err = string_len;
+               augmented_len = offsetof(struct augmented_arg, value);
+       }
+
+       return augmented_len;
+}
+
+SEC("tp/raw_syscalls/sys_enter")
+int syscall_unaugmented(struct syscall_enter_args *args)
+{
+       return 1;
+}
+
+/*
+ * These will be tail_called from SEC("raw_syscalls:sys_enter"), so will find in
+ * augmented_args_tmp what was read by that raw_syscalls:sys_enter and go
+ * on from there, reading the first syscall arg as a string, i.e. open's
+ * filename.
+ */
+SEC("tp/syscalls/sys_enter_connect")
+int sys_enter_connect(struct syscall_enter_args *args)
+{
+       struct augmented_args_payload *augmented_args = augmented_args_payload();
+       const void *sockaddr_arg = (const void *)args->args[1];
+       unsigned int socklen = args->args[2];
+       unsigned int len = sizeof(augmented_args->args);
+
+        if (augmented_args == NULL)
+                return 1; /* Failure: don't filter */
+
+       if (socklen > sizeof(augmented_args->saddr))
+               socklen = sizeof(augmented_args->saddr);
+
+       bpf_probe_read(&augmented_args->saddr, socklen, sockaddr_arg);
+
+       return augmented__output(args, augmented_args, len + socklen);
+}
+
+SEC("tp/syscalls/sys_enter_sendto")
+int sys_enter_sendto(struct syscall_enter_args *args)
+{
+       struct augmented_args_payload *augmented_args = augmented_args_payload();
+       const void *sockaddr_arg = (const void *)args->args[4];
+       unsigned int socklen = args->args[5];
+       unsigned int len = sizeof(augmented_args->args);
+
+        if (augmented_args == NULL)
+                return 1; /* Failure: don't filter */
+
+       if (socklen > sizeof(augmented_args->saddr))
+               socklen = sizeof(augmented_args->saddr);
+
+       bpf_probe_read(&augmented_args->saddr, socklen, sockaddr_arg);
+
+       return augmented__output(args, augmented_args, len + socklen);
+}
+
+SEC("tp/syscalls/sys_enter_open")
+int sys_enter_open(struct syscall_enter_args *args)
+{
+       struct augmented_args_payload *augmented_args = augmented_args_payload();
+       const void *filename_arg = (const void *)args->args[0];
+       unsigned int len = sizeof(augmented_args->args);
+
+        if (augmented_args == NULL)
+                return 1; /* Failure: don't filter */
+
+       len += augmented_arg__read_str(&augmented_args->arg, filename_arg, sizeof(augmented_args->arg.value));
+
+       return augmented__output(args, augmented_args, len);
+}
+
+SEC("tp/syscalls/sys_enter_openat")
+int sys_enter_openat(struct syscall_enter_args *args)
+{
+       struct augmented_args_payload *augmented_args = augmented_args_payload();
+       const void *filename_arg = (const void *)args->args[1];
+       unsigned int len = sizeof(augmented_args->args);
+
+        if (augmented_args == NULL)
+                return 1; /* Failure: don't filter */
+
+       len += augmented_arg__read_str(&augmented_args->arg, filename_arg, sizeof(augmented_args->arg.value));
+
+       return augmented__output(args, augmented_args, len);
+}
+
+SEC("tp/syscalls/sys_enter_rename")
+int sys_enter_rename(struct syscall_enter_args *args)
+{
+       struct augmented_args_payload *augmented_args = augmented_args_payload();
+       const void *oldpath_arg = (const void *)args->args[0],
+                  *newpath_arg = (const void *)args->args[1];
+       unsigned int len = sizeof(augmented_args->args), oldpath_len;
+
+        if (augmented_args == NULL)
+                return 1; /* Failure: don't filter */
+
+       oldpath_len = augmented_arg__read_str(&augmented_args->arg, oldpath_arg, sizeof(augmented_args->arg.value));
+       len += oldpath_len + augmented_arg__read_str((void *)(&augmented_args->arg) + oldpath_len, newpath_arg, sizeof(augmented_args->arg.value));
+
+       return augmented__output(args, augmented_args, len);
+}
+
+SEC("tp/syscalls/sys_enter_renameat")
+int sys_enter_renameat(struct syscall_enter_args *args)
+{
+       struct augmented_args_payload *augmented_args = augmented_args_payload();
+       const void *oldpath_arg = (const void *)args->args[1],
+                  *newpath_arg = (const void *)args->args[3];
+       unsigned int len = sizeof(augmented_args->args), oldpath_len;
+
+        if (augmented_args == NULL)
+                return 1; /* Failure: don't filter */
+
+       oldpath_len = augmented_arg__read_str(&augmented_args->arg, oldpath_arg, sizeof(augmented_args->arg.value));
+       len += oldpath_len + augmented_arg__read_str((void *)(&augmented_args->arg) + oldpath_len, newpath_arg, sizeof(augmented_args->arg.value));
+
+       return augmented__output(args, augmented_args, len);
+}
+
+#define PERF_ATTR_SIZE_VER0     64      /* sizeof first published struct */
+
+// we need just the start, get the size to then copy it
+struct perf_event_attr_size {
+        __u32                   type;
+        /*
+         * Size of the attr structure, for fwd/bwd compat.
+         */
+        __u32                   size;
+};
+
+SEC("tp/syscalls/sys_enter_perf_event_open")
+int sys_enter_perf_event_open(struct syscall_enter_args *args)
+{
+       struct augmented_args_payload *augmented_args = augmented_args_payload();
+       const struct perf_event_attr_size *attr = (const struct perf_event_attr_size *)args->args[0], *attr_read;
+       unsigned int len = sizeof(augmented_args->args);
+
+        if (augmented_args == NULL)
+               goto failure;
+
+       if (bpf_probe_read(&augmented_args->__data, sizeof(*attr), attr) < 0)
+               goto failure;
+
+       attr_read = (const struct perf_event_attr_size *)augmented_args->__data;
+
+       __u32 size = attr_read->size;
+
+       if (!size)
+               size = PERF_ATTR_SIZE_VER0;
+
+       if (size > sizeof(augmented_args->__data))
+                goto failure;
+
+       // Now that we read attr->size and tested it against the size limits, read it completely
+       if (bpf_probe_read(&augmented_args->__data, size, attr) < 0)
+               goto failure;
+
+       return augmented__output(args, augmented_args, len + size);
+failure:
+       return 1; /* Failure: don't filter */
+}
+
+SEC("tp/syscalls/sys_enter_clock_nanosleep")
+int sys_enter_clock_nanosleep(struct syscall_enter_args *args)
+{
+       struct augmented_args_payload *augmented_args = augmented_args_payload();
+       const void *rqtp_arg = (const void *)args->args[2];
+       unsigned int len = sizeof(augmented_args->args);
+       __u32 size = sizeof(struct timespec64);
+
+        if (augmented_args == NULL)
+               goto failure;
+
+       if (size > sizeof(augmented_args->__data))
+                goto failure;
+
+       bpf_probe_read(&augmented_args->__data, size, rqtp_arg);
+
+       return augmented__output(args, augmented_args, len + size);
+failure:
+       return 1; /* Failure: don't filter */
+}
+
+static pid_t getpid(void)
+{
+       return bpf_get_current_pid_tgid();
+}
+
+static bool pid_filter__has(struct pids_filtered *pids, pid_t pid)
+{
+       return bpf_map_lookup_elem(pids, &pid) != NULL;
+}
+
+SEC("tp/raw_syscalls/sys_enter")
+int sys_enter(struct syscall_enter_args *args)
+{
+       struct augmented_args_payload *augmented_args;
+       /*
+        * We start len, the amount of data that will be in the perf ring
+        * buffer, if this is not filtered out by one of pid_filter__has(),
+        * syscall->enabled, etc, with the non-augmented raw syscall payload,
+        * i.e. sizeof(augmented_args->args).
+        *
+        * We'll add to this as we add augmented syscalls right after that
+        * initial, non-augmented raw_syscalls:sys_enter payload.
+        */
+
+       if (pid_filter__has(&pids_filtered, getpid()))
+               return 0;
+
+       augmented_args = augmented_args_payload();
+       if (augmented_args == NULL)
+               return 1;
+
+       bpf_probe_read(&augmented_args->args, sizeof(augmented_args->args), args);
+
+       /*
+        * Jump to syscall specific augmenter, even if the default one,
+        * "!raw_syscalls:unaugmented" that will just return 1 to return the
+        * unaugmented tracepoint payload.
+        */
+       bpf_tail_call(args, &syscalls_sys_enter, augmented_args->args.syscall_nr);
+
+       // If not found on the PROG_ARRAY syscalls map, then we're filtering it:
+       return 0;
+}
+
+SEC("tp/raw_syscalls/sys_exit")
+int sys_exit(struct syscall_exit_args *args)
+{
+       struct syscall_exit_args exit_args;
+
+       if (pid_filter__has(&pids_filtered, getpid()))
+               return 0;
+
+       bpf_probe_read(&exit_args, sizeof(exit_args), args);
+       /*
+        * Jump to syscall specific return augmenter, even if the default one,
+        * "!raw_syscalls:unaugmented" that will just return 1 to return the
+        * unaugmented tracepoint payload.
+        */
+       bpf_tail_call(args, &syscalls_sys_exit, exit_args.syscall_nr);
+       /*
+        * If not found on the PROG_ARRAY syscalls map, then we're filtering it:
+        */
+       return 0;
+}
+
+char _license[] SEC("license") = "GPL";