selftests/bpf: check if bpf_fastcall is recognized for kfuncs
authorEduard Zingerman <eddyz87@gmail.com>
Thu, 22 Aug 2024 08:41:12 +0000 (01:41 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Thu, 22 Aug 2024 15:35:21 +0000 (08:35 -0700)
Use kfunc_bpf_cast_to_kern_ctx() and kfunc_bpf_rdonly_cast() to verify
that bpf_fastcall pattern is recognized for kfunc calls.

Acked-by: Yonghong Song <yonghong.song@linux.dev>
Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20240822084112.3257995-7-eddyz87@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c

index e30ab9f..9da97d2 100644 (file)
@@ -2,8 +2,11 @@
 
 #include <linux/bpf.h>
 #include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
 #include "../../../include/linux/filter.h"
 #include "bpf_misc.h"
+#include <stdbool.h>
+#include "bpf_kfuncs.h"
 
 SEC("raw_tp")
 __arch_x86_64
@@ -842,4 +845,56 @@ __naked int bpf_fastcall_max_stack_fail(void)
        );
 }
 
+SEC("cgroup/getsockname_unix")
+__xlated("0: r2 = 1")
+/* bpf_cast_to_kern_ctx is replaced by a single assignment */
+__xlated("1: r0 = r1")
+__xlated("2: r0 = r2")
+__xlated("3: exit")
+__success
+__naked void kfunc_bpf_cast_to_kern_ctx(void)
+{
+       asm volatile (
+       "r2 = 1;"
+       "*(u64 *)(r10 - 32) = r2;"
+       "call %[bpf_cast_to_kern_ctx];"
+       "r2 = *(u64 *)(r10 - 32);"
+       "r0 = r2;"
+       "exit;"
+       :
+       : __imm(bpf_cast_to_kern_ctx)
+       : __clobber_all);
+}
+
+SEC("raw_tp")
+__xlated("3: r3 = 1")
+/* bpf_rdonly_cast is replaced by a single assignment */
+__xlated("4: r0 = r1")
+__xlated("5: r0 = r3")
+void kfunc_bpf_rdonly_cast(void)
+{
+       asm volatile (
+       "r2 = %[btf_id];"
+       "r3 = 1;"
+       "*(u64 *)(r10 - 32) = r3;"
+       "call %[bpf_rdonly_cast];"
+       "r3 = *(u64 *)(r10 - 32);"
+       "r0 = r3;"
+       :
+       : __imm(bpf_rdonly_cast),
+        [btf_id]"r"(bpf_core_type_id_kernel(union bpf_attr))
+       : __clobber_common);
+}
+
+/* BTF FUNC records are not generated for kfuncs referenced
+ * from inline assembly. These records are necessary for
+ * libbpf to link the program. The function below is a hack
+ * to ensure that BTF FUNC records are generated.
+ */
+void kfunc_root(void)
+{
+       bpf_cast_to_kern_ctx(0);
+       bpf_rdonly_cast(0, 0);
+}
+
 char _license[] SEC("license") = "GPL";