bpf: Count the number of times recursion was prevented
authorAlexei Starovoitov <ast@kernel.org>
Wed, 10 Feb 2021 03:36:31 +0000 (19:36 -0800)
committerDaniel Borkmann <daniel@iogearbox.net>
Thu, 11 Feb 2021 15:19:20 +0000 (16:19 +0100)
Add per-program counter for number of times recursion prevention mechanism
was triggered and expose it via show_fdinfo and bpf_prog_info.
Teach bpftool to print it.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20210210033634.62081-7-alexei.starovoitov@gmail.com
include/linux/filter.h
include/uapi/linux/bpf.h
kernel/bpf/syscall.c
kernel/bpf/trampoline.c
tools/bpf/bpftool/prog.c
tools/include/uapi/linux/bpf.h

index 6a06f3c..3b00fc9 100644 (file)
@@ -543,6 +543,7 @@ struct bpf_binary_header {
 struct bpf_prog_stats {
        u64 cnt;
        u64 nsecs;
+       u64 misses;
        struct u64_stats_sync syncp;
 } __aligned(2 * sizeof(u64));
 
index c001766..c547ad1 100644 (file)
@@ -4501,6 +4501,7 @@ struct bpf_prog_info {
        __aligned_u64 prog_tags;
        __u64 run_time_ns;
        __u64 run_cnt;
+       __u64 recursion_misses;
 } __attribute__((aligned(8)));
 
 struct bpf_map_info {
index f7df56a..c859bc4 100644 (file)
@@ -1731,25 +1731,28 @@ static int bpf_prog_release(struct inode *inode, struct file *filp)
 static void bpf_prog_get_stats(const struct bpf_prog *prog,
                               struct bpf_prog_stats *stats)
 {
-       u64 nsecs = 0, cnt = 0;
+       u64 nsecs = 0, cnt = 0, misses = 0;
        int cpu;
 
        for_each_possible_cpu(cpu) {
                const struct bpf_prog_stats *st;
                unsigned int start;
-               u64 tnsecs, tcnt;
+               u64 tnsecs, tcnt, tmisses;
 
                st = per_cpu_ptr(prog->stats, cpu);
                do {
                        start = u64_stats_fetch_begin_irq(&st->syncp);
                        tnsecs = st->nsecs;
                        tcnt = st->cnt;
+                       tmisses = st->misses;
                } while (u64_stats_fetch_retry_irq(&st->syncp, start));
                nsecs += tnsecs;
                cnt += tcnt;
+               misses += tmisses;
        }
        stats->nsecs = nsecs;
        stats->cnt = cnt;
+       stats->misses = misses;
 }
 
 #ifdef CONFIG_PROC_FS
@@ -1768,14 +1771,16 @@ static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
                   "memlock:\t%llu\n"
                   "prog_id:\t%u\n"
                   "run_time_ns:\t%llu\n"
-                  "run_cnt:\t%llu\n",
+                  "run_cnt:\t%llu\n"
+                  "recursion_misses:\t%llu\n",
                   prog->type,
                   prog->jited,
                   prog_tag,
                   prog->pages * 1ULL << PAGE_SHIFT,
                   prog->aux->id,
                   stats.nsecs,
-                  stats.cnt);
+                  stats.cnt,
+                  stats.misses);
 }
 #endif
 
@@ -3438,6 +3443,7 @@ static int bpf_prog_get_info_by_fd(struct file *file,
        bpf_prog_get_stats(prog, &stats);
        info.run_time_ns = stats.nsecs;
        info.run_cnt = stats.cnt;
+       info.recursion_misses = stats.misses;
 
        if (!bpf_capable()) {
                info.jited_prog_len = 0;
index 89ef632..7bc3b32 100644 (file)
@@ -394,6 +394,16 @@ static u64 notrace bpf_prog_start_time(void)
        return start;
 }
 
+static void notrace inc_misses_counter(struct bpf_prog *prog)
+{
+       struct bpf_prog_stats *stats;
+
+       stats = this_cpu_ptr(prog->stats);
+       u64_stats_update_begin(&stats->syncp);
+       stats->misses++;
+       u64_stats_update_end(&stats->syncp);
+}
+
 /* The logic is similar to BPF_PROG_RUN, but with an explicit
  * rcu_read_lock() and migrate_disable() which are required
  * for the trampoline. The macro is split into
@@ -412,8 +422,10 @@ u64 notrace __bpf_prog_enter(struct bpf_prog *prog)
 {
        rcu_read_lock();
        migrate_disable();
-       if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1))
+       if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
+               inc_misses_counter(prog);
                return 0;
+       }
        return bpf_prog_start_time();
 }
 
@@ -451,8 +463,10 @@ u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog)
        rcu_read_lock_trace();
        migrate_disable();
        might_fault();
-       if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1))
+       if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
+               inc_misses_counter(prog);
                return 0;
+       }
        return bpf_prog_start_time();
 }
 
index 1fe3ba2..f2b915b 100644 (file)
@@ -368,6 +368,8 @@ static void print_prog_header_json(struct bpf_prog_info *info)
                jsonw_uint_field(json_wtr, "run_time_ns", info->run_time_ns);
                jsonw_uint_field(json_wtr, "run_cnt", info->run_cnt);
        }
+       if (info->recursion_misses)
+               jsonw_uint_field(json_wtr, "recursion_misses", info->recursion_misses);
 }
 
 static void print_prog_json(struct bpf_prog_info *info, int fd)
@@ -446,6 +448,8 @@ static void print_prog_header_plain(struct bpf_prog_info *info)
        if (info->run_time_ns)
                printf(" run_time_ns %lld run_cnt %lld",
                       info->run_time_ns, info->run_cnt);
+       if (info->recursion_misses)
+               printf(" recursion_misses %lld", info->recursion_misses);
        printf("\n");
 }
 
index c001766..c547ad1 100644 (file)
@@ -4501,6 +4501,7 @@ struct bpf_prog_info {
        __aligned_u64 prog_tags;
        __u64 run_time_ns;
        __u64 run_cnt;
+       __u64 recursion_misses;
 } __attribute__((aligned(8)));
 
 struct bpf_map_info {