1 // SPDX-License-Identifier: GPL-2.0-only
3 * Testsuite for eBPF verifier
5 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
6 * Copyright (c) 2017 Facebook
7 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
11 #include <asm/types.h>
12 #include <linux/types.h>
25 #include <sys/capability.h>
27 #include <linux/unistd.h>
28 #include <linux/filter.h>
29 #include <linux/bpf_perf_event.h>
30 #include <linux/bpf.h>
31 #include <linux/if_ether.h>
32 #include <linux/btf.h>
35 #include <bpf/libbpf.h>
38 # include "autoconf.h"
40 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
41 # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
44 #include "bpf_rlimit.h"
48 #include "../../../include/linux/filter.h"
50 #define MAX_INSNS BPF_MAXINSNS
51 #define MAX_TEST_INSNS 1000000
53 #define MAX_NR_MAPS 20
54 #define MAX_TEST_RUNS 8
55 #define POINTER_VALUE 0xcafe4all
56 #define TEST_DATA_LEN 64
58 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
59 #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
61 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
62 static bool unpriv_disabled = false;
64 static bool verbose = false;
68 struct bpf_insn insns[MAX_INSNS];
69 struct bpf_insn *fill_insns;
70 int fixup_map_hash_8b[MAX_FIXUPS];
71 int fixup_map_hash_48b[MAX_FIXUPS];
72 int fixup_map_hash_16b[MAX_FIXUPS];
73 int fixup_map_array_48b[MAX_FIXUPS];
74 int fixup_map_sockmap[MAX_FIXUPS];
75 int fixup_map_sockhash[MAX_FIXUPS];
76 int fixup_map_xskmap[MAX_FIXUPS];
77 int fixup_map_stacktrace[MAX_FIXUPS];
78 int fixup_prog1[MAX_FIXUPS];
79 int fixup_prog2[MAX_FIXUPS];
80 int fixup_map_in_map[MAX_FIXUPS];
81 int fixup_cgroup_storage[MAX_FIXUPS];
82 int fixup_percpu_cgroup_storage[MAX_FIXUPS];
83 int fixup_map_spin_lock[MAX_FIXUPS];
84 int fixup_map_array_ro[MAX_FIXUPS];
85 int fixup_map_array_wo[MAX_FIXUPS];
86 int fixup_map_array_small[MAX_FIXUPS];
87 int fixup_sk_storage_map[MAX_FIXUPS];
88 int fixup_map_event_output[MAX_FIXUPS];
89 int fixup_map_reuseport_array[MAX_FIXUPS];
91 const char *errstr_unpriv;
92 uint32_t insn_processed;
99 } result, result_unpriv;
100 enum bpf_prog_type prog_type;
102 void (*fill_helper)(struct bpf_test *self);
104 #define bpf_testdata_struct_t \
106 uint32_t retval, retval_unpriv; \
108 __u8 data[TEST_DATA_LEN]; \
109 __u64 data64[TEST_DATA_LEN / 8]; \
113 bpf_testdata_struct_t;
114 bpf_testdata_struct_t retvals[MAX_TEST_RUNS];
116 enum bpf_attach_type expected_attach_type;
119 /* Note we want this to be 64 bit aligned so that the end of our array is
120 * actually the end of the structure.
122 #define MAX_ENTRIES 11
126 int foo[MAX_ENTRIES];
134 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
136 /* test: {skb->data[0], vlan_push} x 51 + {skb->data[0], vlan_pop} x 51 */
138 /* jump range is limited to 16 bit. PUSH_CNT of ld_abs needs room */
139 unsigned int len = (1 << 15) - PUSH_CNT * 2 * 5 * 6;
140 struct bpf_insn *insn = self->fill_insns;
143 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
145 for (j = 0; j < PUSH_CNT; j++) {
146 insn[i++] = BPF_LD_ABS(BPF_B, 0);
147 /* jump to error label */
148 insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
150 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
151 insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
152 insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
153 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
154 BPF_FUNC_skb_vlan_push),
155 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
159 for (j = 0; j < PUSH_CNT; j++) {
160 insn[i++] = BPF_LD_ABS(BPF_B, 0);
161 insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
163 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
164 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
165 BPF_FUNC_skb_vlan_pop),
166 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
172 for (; i < len - 3; i++)
173 insn[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0xbef);
174 insn[len - 3] = BPF_JMP_A(1);
176 insn[len - 2] = BPF_MOV32_IMM(BPF_REG_0, 0);
177 insn[len - 1] = BPF_EXIT_INSN();
178 self->prog_len = len;
181 static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
183 struct bpf_insn *insn = self->fill_insns;
184 /* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns,
185 * but on arches like arm, ppc etc, there will be one BPF_ZEXT inserted
186 * to extend the error value of the inlined ld_abs sequence which then
187 * contains 7 insns. so, set the dividend to 7 so the testcase could
188 * work on all arches.
190 unsigned int len = (1 << 15) / 7;
193 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
194 insn[i++] = BPF_LD_ABS(BPF_B, 0);
195 insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
198 insn[i++] = BPF_LD_ABS(BPF_B, 1);
199 insn[i] = BPF_EXIT_INSN();
200 self->prog_len = i + 1;
203 static void bpf_fill_rand_ld_dw(struct bpf_test *self)
205 struct bpf_insn *insn = self->fill_insns;
209 insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
210 while (i < self->retval) {
211 uint64_t val = bpf_semi_rand_get();
212 struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
217 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
219 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
220 insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
221 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
222 insn[i] = BPF_EXIT_INSN();
223 self->prog_len = i + 1;
225 self->retval = (uint32_t)res;
228 #define MAX_JMP_SEQ 8192
230 /* test the sequence of 8k jumps */
231 static void bpf_fill_scale1(struct bpf_test *self)
233 struct bpf_insn *insn = self->fill_insns;
236 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
237 /* test to check that the long sequence of jumps is acceptable */
238 while (k++ < MAX_JMP_SEQ) {
239 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
240 BPF_FUNC_get_prandom_u32);
241 insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
242 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
243 insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
246 /* is_state_visited() doesn't allocate state for pruning for every jump.
247 * Hence multiply jmps by 4 to accommodate that heuristic
249 while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
250 insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
251 insn[i] = BPF_EXIT_INSN();
252 self->prog_len = i + 1;
256 /* test the sequence of 8k jumps in inner most function (function depth 8)*/
257 static void bpf_fill_scale2(struct bpf_test *self)
259 struct bpf_insn *insn = self->fill_insns;
263 for (k = 0; k < FUNC_NEST; k++) {
264 insn[i++] = BPF_CALL_REL(1);
265 insn[i++] = BPF_EXIT_INSN();
267 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
268 /* test to check that the long sequence of jumps is acceptable */
270 while (k++ < MAX_JMP_SEQ) {
271 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
272 BPF_FUNC_get_prandom_u32);
273 insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
274 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
275 insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
276 -8 * (k % (64 - 4 * FUNC_NEST) + 1));
278 while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
279 insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
280 insn[i] = BPF_EXIT_INSN();
281 self->prog_len = i + 1;
285 static void bpf_fill_scale(struct bpf_test *self)
287 switch (self->retval) {
289 return bpf_fill_scale1(self);
291 return bpf_fill_scale2(self);
298 /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
299 #define BPF_SK_LOOKUP(func) \
300 /* struct bpf_sock_tuple tuple = {} */ \
301 BPF_MOV64_IMM(BPF_REG_2, 0), \
302 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), \
303 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16), \
304 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24), \
305 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32), \
306 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40), \
307 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48), \
308 /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \
309 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \
310 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), \
311 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), \
312 BPF_MOV64_IMM(BPF_REG_4, 0), \
313 BPF_MOV64_IMM(BPF_REG_5, 0), \
314 BPF_EMIT_CALL(BPF_FUNC_ ## func)
316 /* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return
317 * value into 0 and does necessary preparation for direct packet access
318 * through r2. The allowed access range is 8 bytes.
320 #define BPF_DIRECT_PKT_R2 \
321 BPF_MOV64_IMM(BPF_REG_0, 0), \
322 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
323 offsetof(struct __sk_buff, data)), \
324 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
325 offsetof(struct __sk_buff, data_end)), \
326 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), \
327 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), \
328 BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1), \
331 /* BPF_RAND_UEXT_R7 contains 4 instructions, it initializes R7 into a random
332 * positive u32, and zero-extend it into 64-bit.
334 #define BPF_RAND_UEXT_R7 \
335 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
336 BPF_FUNC_get_prandom_u32), \
337 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
338 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 33), \
339 BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 33)
341 /* BPF_RAND_SEXT_R7 contains 5 instructions, it initializes R7 into a random
342 * negative u32, and sign-extend it into 64-bit.
344 #define BPF_RAND_SEXT_R7 \
345 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
346 BPF_FUNC_get_prandom_u32), \
347 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
348 BPF_ALU64_IMM(BPF_OR, BPF_REG_7, 0x80000000), \
349 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 32), \
350 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_7, 32)
352 static struct bpf_test tests[] = {
354 #include <verifier/tests.h>
358 static int probe_filter_length(const struct bpf_insn *fp)
362 for (len = MAX_INSNS - 1; len > 0; --len)
363 if (fp[len].code != 0 || fp[len].imm != 0)
368 static bool skip_unsupported_map(enum bpf_map_type map_type)
370 if (!bpf_probe_map_type(map_type, 0)) {
371 printf("SKIP (unsupported map type %d)\n", map_type);
378 static int __create_map(uint32_t type, uint32_t size_key,
379 uint32_t size_value, uint32_t max_elem,
380 uint32_t extra_flags)
384 fd = bpf_create_map(type, size_key, size_value, max_elem,
385 (type == BPF_MAP_TYPE_HASH ?
386 BPF_F_NO_PREALLOC : 0) | extra_flags);
388 if (skip_unsupported_map(type))
390 printf("Failed to create hash map '%s'!\n", strerror(errno));
396 static int create_map(uint32_t type, uint32_t size_key,
397 uint32_t size_value, uint32_t max_elem)
399 return __create_map(type, size_key, size_value, max_elem, 0);
402 static void update_map(int fd, int index)
404 struct test_val value = {
405 .index = (6 + 1) * sizeof(int),
406 .foo[6] = 0xabcdef12,
409 assert(!bpf_map_update_elem(fd, &index, &value, 0));
412 static int create_prog_dummy_simple(enum bpf_prog_type prog_type, int ret)
414 struct bpf_insn prog[] = {
415 BPF_MOV64_IMM(BPF_REG_0, ret),
419 return bpf_load_program(prog_type, prog,
420 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
423 static int create_prog_dummy_loop(enum bpf_prog_type prog_type, int mfd,
426 struct bpf_insn prog[] = {
427 BPF_MOV64_IMM(BPF_REG_3, idx),
428 BPF_LD_MAP_FD(BPF_REG_2, mfd),
429 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
431 BPF_MOV64_IMM(BPF_REG_0, ret),
435 return bpf_load_program(prog_type, prog,
436 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
439 static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
440 int p1key, int p2key, int p3key)
442 int mfd, p1fd, p2fd, p3fd;
444 mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
445 sizeof(int), max_elem, 0);
447 if (skip_unsupported_map(BPF_MAP_TYPE_PROG_ARRAY))
449 printf("Failed to create prog array '%s'!\n", strerror(errno));
453 p1fd = create_prog_dummy_simple(prog_type, 42);
454 p2fd = create_prog_dummy_loop(prog_type, mfd, p2key, 41);
455 p3fd = create_prog_dummy_simple(prog_type, 24);
456 if (p1fd < 0 || p2fd < 0 || p3fd < 0)
458 if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
460 if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
462 if (bpf_map_update_elem(mfd, &p3key, &p3fd, BPF_ANY) < 0) {
473 static int create_map_in_map(void)
475 int inner_map_fd, outer_map_fd;
477 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
479 if (inner_map_fd < 0) {
480 if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY))
482 printf("Failed to create array '%s'!\n", strerror(errno));
486 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
487 sizeof(int), inner_map_fd, 1, 0);
488 if (outer_map_fd < 0) {
489 if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY_OF_MAPS))
491 printf("Failed to create array of maps '%s'!\n",
500 static int create_cgroup_storage(bool percpu)
502 enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
503 BPF_MAP_TYPE_CGROUP_STORAGE;
506 fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key),
507 TEST_DATA_LEN, 0, 0);
509 if (skip_unsupported_map(type))
511 printf("Failed to create cgroup storage '%s'!\n",
518 /* struct bpf_spin_lock {
523 * struct bpf_spin_lock l;
526 static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l";
527 static __u32 btf_raw_types[] = {
529 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
530 /* struct bpf_spin_lock */ /* [2] */
531 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
532 BTF_MEMBER_ENC(15, 1, 0), /* int val; */
533 /* struct val */ /* [3] */
534 BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
535 BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
536 BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
539 static int load_btf(void)
541 struct btf_header hdr = {
543 .version = BTF_VERSION,
544 .hdr_len = sizeof(struct btf_header),
545 .type_len = sizeof(btf_raw_types),
546 .str_off = sizeof(btf_raw_types),
547 .str_len = sizeof(btf_str_sec),
552 ptr = raw_btf = malloc(sizeof(hdr) + sizeof(btf_raw_types) +
553 sizeof(btf_str_sec));
555 memcpy(ptr, &hdr, sizeof(hdr));
557 memcpy(ptr, btf_raw_types, hdr.type_len);
559 memcpy(ptr, btf_str_sec, hdr.str_len);
562 btf_fd = bpf_load_btf(raw_btf, ptr - raw_btf, 0, 0, 0);
569 static int create_map_spin_lock(void)
571 struct bpf_create_map_attr attr = {
573 .map_type = BPF_MAP_TYPE_ARRAY,
577 .btf_key_type_id = 1,
578 .btf_value_type_id = 3,
585 attr.btf_fd = btf_fd;
586 fd = bpf_create_map_xattr(&attr);
588 printf("Failed to create map with spin_lock\n");
592 static int create_sk_storage_map(void)
594 struct bpf_create_map_attr attr = {
596 .map_type = BPF_MAP_TYPE_SK_STORAGE,
600 .map_flags = BPF_F_NO_PREALLOC,
601 .btf_key_type_id = 1,
602 .btf_value_type_id = 3,
609 attr.btf_fd = btf_fd;
610 fd = bpf_create_map_xattr(&attr);
613 printf("Failed to create sk_storage_map\n");
617 static char bpf_vlog[UINT_MAX >> 8];
619 static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
620 struct bpf_insn *prog, int *map_fds)
622 int *fixup_map_hash_8b = test->fixup_map_hash_8b;
623 int *fixup_map_hash_48b = test->fixup_map_hash_48b;
624 int *fixup_map_hash_16b = test->fixup_map_hash_16b;
625 int *fixup_map_array_48b = test->fixup_map_array_48b;
626 int *fixup_map_sockmap = test->fixup_map_sockmap;
627 int *fixup_map_sockhash = test->fixup_map_sockhash;
628 int *fixup_map_xskmap = test->fixup_map_xskmap;
629 int *fixup_map_stacktrace = test->fixup_map_stacktrace;
630 int *fixup_prog1 = test->fixup_prog1;
631 int *fixup_prog2 = test->fixup_prog2;
632 int *fixup_map_in_map = test->fixup_map_in_map;
633 int *fixup_cgroup_storage = test->fixup_cgroup_storage;
634 int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
635 int *fixup_map_spin_lock = test->fixup_map_spin_lock;
636 int *fixup_map_array_ro = test->fixup_map_array_ro;
637 int *fixup_map_array_wo = test->fixup_map_array_wo;
638 int *fixup_map_array_small = test->fixup_map_array_small;
639 int *fixup_sk_storage_map = test->fixup_sk_storage_map;
640 int *fixup_map_event_output = test->fixup_map_event_output;
641 int *fixup_map_reuseport_array = test->fixup_map_reuseport_array;
643 if (test->fill_helper) {
644 test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
645 test->fill_helper(test);
648 /* Allocating HTs with 1 elem is fine here, since we only test
649 * for verifier and not do a runtime lookup, so the only thing
650 * that really matters is value size in this case.
652 if (*fixup_map_hash_8b) {
653 map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
654 sizeof(long long), 1);
656 prog[*fixup_map_hash_8b].imm = map_fds[0];
658 } while (*fixup_map_hash_8b);
661 if (*fixup_map_hash_48b) {
662 map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
663 sizeof(struct test_val), 1);
665 prog[*fixup_map_hash_48b].imm = map_fds[1];
666 fixup_map_hash_48b++;
667 } while (*fixup_map_hash_48b);
670 if (*fixup_map_hash_16b) {
671 map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
672 sizeof(struct other_val), 1);
674 prog[*fixup_map_hash_16b].imm = map_fds[2];
675 fixup_map_hash_16b++;
676 } while (*fixup_map_hash_16b);
679 if (*fixup_map_array_48b) {
680 map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
681 sizeof(struct test_val), 1);
682 update_map(map_fds[3], 0);
684 prog[*fixup_map_array_48b].imm = map_fds[3];
685 fixup_map_array_48b++;
686 } while (*fixup_map_array_48b);
690 map_fds[4] = create_prog_array(prog_type, 4, 0, 1, 2);
692 prog[*fixup_prog1].imm = map_fds[4];
694 } while (*fixup_prog1);
698 map_fds[5] = create_prog_array(prog_type, 8, 7, 1, 2);
700 prog[*fixup_prog2].imm = map_fds[5];
702 } while (*fixup_prog2);
705 if (*fixup_map_in_map) {
706 map_fds[6] = create_map_in_map();
708 prog[*fixup_map_in_map].imm = map_fds[6];
710 } while (*fixup_map_in_map);
713 if (*fixup_cgroup_storage) {
714 map_fds[7] = create_cgroup_storage(false);
716 prog[*fixup_cgroup_storage].imm = map_fds[7];
717 fixup_cgroup_storage++;
718 } while (*fixup_cgroup_storage);
721 if (*fixup_percpu_cgroup_storage) {
722 map_fds[8] = create_cgroup_storage(true);
724 prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
725 fixup_percpu_cgroup_storage++;
726 } while (*fixup_percpu_cgroup_storage);
728 if (*fixup_map_sockmap) {
729 map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
732 prog[*fixup_map_sockmap].imm = map_fds[9];
734 } while (*fixup_map_sockmap);
736 if (*fixup_map_sockhash) {
737 map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
740 prog[*fixup_map_sockhash].imm = map_fds[10];
741 fixup_map_sockhash++;
742 } while (*fixup_map_sockhash);
744 if (*fixup_map_xskmap) {
745 map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
748 prog[*fixup_map_xskmap].imm = map_fds[11];
750 } while (*fixup_map_xskmap);
752 if (*fixup_map_stacktrace) {
753 map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
756 prog[*fixup_map_stacktrace].imm = map_fds[12];
757 fixup_map_stacktrace++;
758 } while (*fixup_map_stacktrace);
760 if (*fixup_map_spin_lock) {
761 map_fds[13] = create_map_spin_lock();
763 prog[*fixup_map_spin_lock].imm = map_fds[13];
764 fixup_map_spin_lock++;
765 } while (*fixup_map_spin_lock);
767 if (*fixup_map_array_ro) {
768 map_fds[14] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
769 sizeof(struct test_val), 1,
771 update_map(map_fds[14], 0);
773 prog[*fixup_map_array_ro].imm = map_fds[14];
774 fixup_map_array_ro++;
775 } while (*fixup_map_array_ro);
777 if (*fixup_map_array_wo) {
778 map_fds[15] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
779 sizeof(struct test_val), 1,
781 update_map(map_fds[15], 0);
783 prog[*fixup_map_array_wo].imm = map_fds[15];
784 fixup_map_array_wo++;
785 } while (*fixup_map_array_wo);
787 if (*fixup_map_array_small) {
788 map_fds[16] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
790 update_map(map_fds[16], 0);
792 prog[*fixup_map_array_small].imm = map_fds[16];
793 fixup_map_array_small++;
794 } while (*fixup_map_array_small);
796 if (*fixup_sk_storage_map) {
797 map_fds[17] = create_sk_storage_map();
799 prog[*fixup_sk_storage_map].imm = map_fds[17];
800 fixup_sk_storage_map++;
801 } while (*fixup_sk_storage_map);
803 if (*fixup_map_event_output) {
804 map_fds[18] = __create_map(BPF_MAP_TYPE_PERF_EVENT_ARRAY,
805 sizeof(int), sizeof(int), 1, 0);
807 prog[*fixup_map_event_output].imm = map_fds[18];
808 fixup_map_event_output++;
809 } while (*fixup_map_event_output);
811 if (*fixup_map_reuseport_array) {
812 map_fds[19] = __create_map(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
813 sizeof(u32), sizeof(u64), 1, 0);
815 prog[*fixup_map_reuseport_array].imm = map_fds[19];
816 fixup_map_reuseport_array++;
817 } while (*fixup_map_reuseport_array);
822 struct __user_cap_header_struct hdr;
823 struct __user_cap_data_struct data[2];
826 static int set_admin(bool admin)
829 /* need CAP_BPF, CAP_NET_ADMIN, CAP_PERFMON to load progs */
830 const cap_value_t cap_net_admin = CAP_NET_ADMIN;
831 const cap_value_t cap_sys_admin = CAP_SYS_ADMIN;
835 caps = cap_get_proc();
837 perror("cap_get_proc");
840 cap = (struct libcap *)caps;
841 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_sys_admin, CAP_CLEAR)) {
842 perror("cap_set_flag clear admin");
845 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_net_admin,
846 admin ? CAP_SET : CAP_CLEAR)) {
847 perror("cap_set_flag set_or_clear net");
850 /* libcap is likely old and simply ignores CAP_BPF and CAP_PERFMON,
851 * so update effective bits manually
854 cap->data[1].effective |= 1 << (38 /* CAP_PERFMON */ - 32);
855 cap->data[1].effective |= 1 << (39 /* CAP_BPF */ - 32);
857 cap->data[1].effective &= ~(1 << (38 - 32));
858 cap->data[1].effective &= ~(1 << (39 - 32));
860 if (cap_set_proc(caps)) {
861 perror("cap_set_proc");
871 static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val,
872 void *data, size_t size_data)
874 __u8 tmp[TEST_DATA_LEN << 2];
875 __u32 size_tmp = sizeof(tmp);
881 err = bpf_prog_test_run(fd_prog, 1, data, size_data,
882 tmp, &size_tmp, &retval, NULL);
885 if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
886 printf("Unexpected bpf_prog_test_run error ");
889 if (!err && retval != expected_val &&
890 expected_val != POINTER_VALUE) {
891 printf("FAIL retval %d != %d ", retval, expected_val);
898 static bool cmp_str_seq(const char *log, const char *exp)
905 p = strchr(exp, '\t');
907 p = exp + strlen(exp);
910 if (len >= sizeof(needle) || !len) {
911 printf("FAIL\nTestcase bug\n");
914 strncpy(needle, exp, len);
916 q = strstr(log, needle);
918 printf("FAIL\nUnexpected verifier log in successful load!\n"
919 "EXP: %s\nRES:\n", needle);
928 static void do_test_single(struct bpf_test *test, bool unpriv,
929 int *passes, int *errors)
931 int fd_prog, expected_ret, alignment_prevented_execution;
932 int prog_len, prog_type = test->prog_type;
933 struct bpf_insn *prog = test->insns;
934 struct bpf_load_program_attr attr;
935 int run_errs, run_successes;
936 int map_fds[MAX_NR_MAPS];
937 const char *expected_err;
942 for (i = 0; i < MAX_NR_MAPS; i++)
946 prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
948 do_test_fixup(test, prog_type, prog, map_fds);
949 if (test->fill_insns) {
950 prog = test->fill_insns;
951 prog_len = test->prog_len;
953 prog_len = probe_filter_length(prog);
955 /* If there were some map skips during fixup due to missing bpf
956 * features, skip this test.
958 if (fixup_skips != skips)
961 pflags = BPF_F_TEST_RND_HI32;
962 if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
963 pflags |= BPF_F_STRICT_ALIGNMENT;
964 if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
965 pflags |= BPF_F_ANY_ALIGNMENT;
966 if (test->flags & ~3)
967 pflags |= test->flags;
969 expected_ret = unpriv && test->result_unpriv != UNDEF ?
970 test->result_unpriv : test->result;
971 expected_err = unpriv && test->errstr_unpriv ?
972 test->errstr_unpriv : test->errstr;
973 memset(&attr, 0, sizeof(attr));
974 attr.prog_type = prog_type;
975 attr.expected_attach_type = test->expected_attach_type;
977 attr.insns_cnt = prog_len;
978 attr.license = "GPL";
981 else if (expected_ret == VERBOSE_ACCEPT)
985 attr.prog_flags = pflags;
987 fd_prog = bpf_load_program_xattr(&attr, bpf_vlog, sizeof(bpf_vlog));
988 if (fd_prog < 0 && !bpf_probe_prog_type(prog_type, 0)) {
989 printf("SKIP (unsupported program type %d)\n", prog_type);
994 alignment_prevented_execution = 0;
996 if (expected_ret == ACCEPT || expected_ret == VERBOSE_ACCEPT) {
998 printf("FAIL\nFailed to load prog '%s'!\n",
1002 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1004 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS))
1005 alignment_prevented_execution = 1;
1007 if (expected_ret == VERBOSE_ACCEPT && !cmp_str_seq(bpf_vlog, expected_err)) {
1012 printf("FAIL\nUnexpected success to load!\n");
1015 if (!expected_err || !strstr(bpf_vlog, expected_err)) {
1016 printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
1017 expected_err, bpf_vlog);
1022 if (test->insn_processed) {
1023 uint32_t insn_processed;
1026 proc = strstr(bpf_vlog, "processed ");
1027 insn_processed = atoi(proc + 10);
1028 if (test->insn_processed != insn_processed) {
1029 printf("FAIL\nUnexpected insn_processed %u vs %u\n",
1030 insn_processed, test->insn_processed);
1036 printf(", verifier log:\n%s", bpf_vlog);
1040 if (!alignment_prevented_execution && fd_prog >= 0) {
1041 uint32_t expected_val;
1047 for (i = 0; i < test->runs; i++) {
1048 if (unpriv && test->retvals[i].retval_unpriv)
1049 expected_val = test->retvals[i].retval_unpriv;
1051 expected_val = test->retvals[i].retval;
1053 err = do_prog_test_run(fd_prog, unpriv, expected_val,
1054 test->retvals[i].data,
1055 sizeof(test->retvals[i].data));
1057 printf("(run %d/%d) ", i + 1, test->runs);
1067 if (run_successes > 1)
1068 printf("%d cases ", run_successes);
1070 if (alignment_prevented_execution)
1071 printf(" (NOTE: not executed due to unknown alignment)");
1078 if (test->fill_insns)
1079 free(test->fill_insns);
1081 for (i = 0; i < MAX_NR_MAPS; i++)
1087 printf("%s", bpf_vlog);
1091 static bool is_admin(void)
1093 cap_flag_value_t net_priv = CAP_CLEAR;
1094 bool perfmon_priv = false;
1095 bool bpf_priv = false;
1099 #ifdef CAP_IS_SUPPORTED
1100 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
1101 perror("cap_get_flag");
1105 caps = cap_get_proc();
1107 perror("cap_get_proc");
1110 cap = (struct libcap *)caps;
1111 bpf_priv = cap->data[1].effective & (1 << (39/* CAP_BPF */ - 32));
1112 perfmon_priv = cap->data[1].effective & (1 << (38/* CAP_PERFMON */ - 32));
1113 if (cap_get_flag(caps, CAP_NET_ADMIN, CAP_EFFECTIVE, &net_priv))
1114 perror("cap_get_flag NET");
1117 return bpf_priv && perfmon_priv && net_priv == CAP_SET;
1120 static void get_unpriv_disabled()
1125 fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
1127 perror("fopen /proc/sys/"UNPRIV_SYSCTL);
1128 unpriv_disabled = true;
1131 if (fgets(buf, 2, fd) == buf && atoi(buf))
1132 unpriv_disabled = true;
1136 static bool test_as_unpriv(struct bpf_test *test)
1138 return !test->prog_type ||
1139 test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
1140 test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
1143 static int do_test(bool unpriv, unsigned int from, unsigned int to)
1145 int i, passes = 0, errors = 0;
1147 for (i = from; i < to; i++) {
1148 struct bpf_test *test = &tests[i];
1150 /* Program types that are not supported by non-root we
1153 if (test_as_unpriv(test) && unpriv_disabled) {
1154 printf("#%d/u %s SKIP\n", i, test->descr);
1156 } else if (test_as_unpriv(test)) {
1159 printf("#%d/u %s ", i, test->descr);
1160 do_test_single(test, true, &passes, &errors);
1166 printf("#%d/p %s SKIP\n", i, test->descr);
1169 printf("#%d/p %s ", i, test->descr);
1170 do_test_single(test, false, &passes, &errors);
1174 printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
1176 return errors ? EXIT_FAILURE : EXIT_SUCCESS;
1179 int main(int argc, char **argv)
1181 unsigned int from = 0, to = ARRAY_SIZE(tests);
1182 bool unpriv = !is_admin();
1185 if (argc > 1 && strcmp(argv[1], "-v") == 0) {
1192 unsigned int l = atoi(argv[arg]);
1193 unsigned int u = atoi(argv[arg + 1]);
1195 if (l < to && u < to) {
1199 } else if (argc == 2) {
1200 unsigned int t = atoi(argv[arg]);
1208 get_unpriv_disabled();
1209 if (unpriv && unpriv_disabled) {
1210 printf("Cannot run as unprivileged user with sysctl %s.\n",
1212 return EXIT_FAILURE;
1215 bpf_semi_rand_init();
1216 return do_test(unpriv, from, to);