2 * Testsuite for eBPF verifier
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5 * Copyright (c) 2017 Facebook
6 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
14 #include <asm/types.h>
15 #include <linux/types.h>
27 #include <sys/capability.h>
29 #include <linux/unistd.h>
30 #include <linux/filter.h>
31 #include <linux/bpf_perf_event.h>
32 #include <linux/bpf.h>
33 #include <linux/if_ether.h>
38 # include "autoconf.h"
40 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
41 # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
44 #include "bpf_rlimit.h"
47 #include "../../../include/linux/filter.h"
49 #define MAX_INSNS BPF_MAXINSNS
51 #define MAX_NR_MAPS 13
52 #define MAX_TEST_RUNS 8
53 #define POINTER_VALUE 0xcafe4all
54 #define TEST_DATA_LEN 64
56 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
57 #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
59 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
60 static bool unpriv_disabled = false;
64 struct bpf_insn insns[MAX_INSNS];
65 int fixup_map_hash_8b[MAX_FIXUPS];
66 int fixup_map_hash_48b[MAX_FIXUPS];
67 int fixup_map_hash_16b[MAX_FIXUPS];
68 int fixup_map_array_48b[MAX_FIXUPS];
69 int fixup_map_sockmap[MAX_FIXUPS];
70 int fixup_map_sockhash[MAX_FIXUPS];
71 int fixup_map_xskmap[MAX_FIXUPS];
72 int fixup_map_stacktrace[MAX_FIXUPS];
73 int fixup_prog1[MAX_FIXUPS];
74 int fixup_prog2[MAX_FIXUPS];
75 int fixup_map_in_map[MAX_FIXUPS];
76 int fixup_cgroup_storage[MAX_FIXUPS];
77 int fixup_percpu_cgroup_storage[MAX_FIXUPS];
79 const char *errstr_unpriv;
80 uint32_t retval, retval_unpriv, insn_processed;
85 } result, result_unpriv;
86 enum bpf_prog_type prog_type;
88 __u8 data[TEST_DATA_LEN];
89 void (*fill_helper)(struct bpf_test *self);
92 uint32_t retval, retval_unpriv;
94 __u8 data[TEST_DATA_LEN];
95 __u64 data64[TEST_DATA_LEN / 8];
97 } retvals[MAX_TEST_RUNS];
100 /* Note we want this to be 64 bit aligned so that the end of our array is
101 * actually the end of the structure.
103 #define MAX_ENTRIES 11
107 int foo[MAX_ENTRIES];
115 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
117 /* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
119 unsigned int len = BPF_MAXINSNS;
120 struct bpf_insn *insn = self->insns;
123 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
125 for (j = 0; j < PUSH_CNT; j++) {
126 insn[i++] = BPF_LD_ABS(BPF_B, 0);
127 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
129 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
130 insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
131 insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
132 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
133 BPF_FUNC_skb_vlan_push),
134 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
138 for (j = 0; j < PUSH_CNT; j++) {
139 insn[i++] = BPF_LD_ABS(BPF_B, 0);
140 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
142 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
143 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
144 BPF_FUNC_skb_vlan_pop),
145 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
151 for (; i < len - 1; i++)
152 insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef);
153 insn[len - 1] = BPF_EXIT_INSN();
156 static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
158 struct bpf_insn *insn = self->insns;
159 unsigned int len = BPF_MAXINSNS;
162 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
163 insn[i++] = BPF_LD_ABS(BPF_B, 0);
164 insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
167 insn[i++] = BPF_LD_ABS(BPF_B, 1);
168 insn[i] = BPF_EXIT_INSN();
171 static void bpf_fill_rand_ld_dw(struct bpf_test *self)
173 struct bpf_insn *insn = self->insns;
177 insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
178 while (i < self->retval) {
179 uint64_t val = bpf_semi_rand_get();
180 struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
185 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
187 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
188 insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
189 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
190 insn[i] = BPF_EXIT_INSN();
192 self->retval = (uint32_t)res;
195 /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
196 #define BPF_SK_LOOKUP \
197 /* struct bpf_sock_tuple tuple = {} */ \
198 BPF_MOV64_IMM(BPF_REG_2, 0), \
199 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), \
200 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16), \
201 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24), \
202 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32), \
203 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40), \
204 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48), \
205 /* sk = sk_lookup_tcp(ctx, &tuple, sizeof tuple, 0, 0) */ \
206 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \
207 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), \
208 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), \
209 BPF_MOV64_IMM(BPF_REG_4, 0), \
210 BPF_MOV64_IMM(BPF_REG_5, 0), \
211 BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp)
213 static struct bpf_test tests[] = {
217 BPF_MOV64_IMM(BPF_REG_1, 1),
218 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
219 BPF_MOV64_IMM(BPF_REG_2, 3),
220 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
221 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
222 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
223 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
230 "DIV32 by 0, zero check 1",
232 BPF_MOV32_IMM(BPF_REG_0, 42),
233 BPF_MOV32_IMM(BPF_REG_1, 0),
234 BPF_MOV32_IMM(BPF_REG_2, 1),
235 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
242 "DIV32 by 0, zero check 2",
244 BPF_MOV32_IMM(BPF_REG_0, 42),
245 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
246 BPF_MOV32_IMM(BPF_REG_2, 1),
247 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
254 "DIV64 by 0, zero check",
256 BPF_MOV32_IMM(BPF_REG_0, 42),
257 BPF_MOV32_IMM(BPF_REG_1, 0),
258 BPF_MOV32_IMM(BPF_REG_2, 1),
259 BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
266 "MOD32 by 0, zero check 1",
268 BPF_MOV32_IMM(BPF_REG_0, 42),
269 BPF_MOV32_IMM(BPF_REG_1, 0),
270 BPF_MOV32_IMM(BPF_REG_2, 1),
271 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
278 "MOD32 by 0, zero check 2",
280 BPF_MOV32_IMM(BPF_REG_0, 42),
281 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
282 BPF_MOV32_IMM(BPF_REG_2, 1),
283 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
290 "MOD64 by 0, zero check",
292 BPF_MOV32_IMM(BPF_REG_0, 42),
293 BPF_MOV32_IMM(BPF_REG_1, 0),
294 BPF_MOV32_IMM(BPF_REG_2, 1),
295 BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
302 "DIV32 by 0, zero check ok, cls",
304 BPF_MOV32_IMM(BPF_REG_0, 42),
305 BPF_MOV32_IMM(BPF_REG_1, 2),
306 BPF_MOV32_IMM(BPF_REG_2, 16),
307 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
308 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
311 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
316 "DIV32 by 0, zero check 1, cls",
318 BPF_MOV32_IMM(BPF_REG_1, 0),
319 BPF_MOV32_IMM(BPF_REG_0, 1),
320 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
323 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
328 "DIV32 by 0, zero check 2, cls",
330 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
331 BPF_MOV32_IMM(BPF_REG_0, 1),
332 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
335 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
340 "DIV64 by 0, zero check, cls",
342 BPF_MOV32_IMM(BPF_REG_1, 0),
343 BPF_MOV32_IMM(BPF_REG_0, 1),
344 BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
347 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
352 "MOD32 by 0, zero check ok, cls",
354 BPF_MOV32_IMM(BPF_REG_0, 42),
355 BPF_MOV32_IMM(BPF_REG_1, 3),
356 BPF_MOV32_IMM(BPF_REG_2, 5),
357 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
358 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
361 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
366 "MOD32 by 0, zero check 1, cls",
368 BPF_MOV32_IMM(BPF_REG_1, 0),
369 BPF_MOV32_IMM(BPF_REG_0, 1),
370 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
373 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
378 "MOD32 by 0, zero check 2, cls",
380 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
381 BPF_MOV32_IMM(BPF_REG_0, 1),
382 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
385 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
390 "MOD64 by 0, zero check 1, cls",
392 BPF_MOV32_IMM(BPF_REG_1, 0),
393 BPF_MOV32_IMM(BPF_REG_0, 2),
394 BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
397 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
402 "MOD64 by 0, zero check 2, cls",
404 BPF_MOV32_IMM(BPF_REG_1, 0),
405 BPF_MOV32_IMM(BPF_REG_0, -1),
406 BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
409 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
413 /* Just make sure that JITs used udiv/umod as otherwise we get
414 * an exception from INT_MIN/-1 overflow similarly as with div
418 "DIV32 overflow, check 1",
420 BPF_MOV32_IMM(BPF_REG_1, -1),
421 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
422 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
425 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
430 "DIV32 overflow, check 2",
432 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
433 BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
436 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
441 "DIV64 overflow, check 1",
443 BPF_MOV64_IMM(BPF_REG_1, -1),
444 BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
445 BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
448 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
453 "DIV64 overflow, check 2",
455 BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
456 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
459 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
464 "MOD32 overflow, check 1",
466 BPF_MOV32_IMM(BPF_REG_1, -1),
467 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
468 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
471 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
476 "MOD32 overflow, check 2",
478 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
479 BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
482 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
487 "MOD64 overflow, check 1",
489 BPF_MOV64_IMM(BPF_REG_1, -1),
490 BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
491 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
492 BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
493 BPF_MOV32_IMM(BPF_REG_0, 0),
494 BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
495 BPF_MOV32_IMM(BPF_REG_0, 1),
498 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
503 "MOD64 overflow, check 2",
505 BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
506 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
507 BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
508 BPF_MOV32_IMM(BPF_REG_0, 0),
509 BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
510 BPF_MOV32_IMM(BPF_REG_0, 1),
513 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
518 "xor32 zero extend check",
520 BPF_MOV32_IMM(BPF_REG_2, -1),
521 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
522 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
523 BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
524 BPF_MOV32_IMM(BPF_REG_0, 2),
525 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
526 BPF_MOV32_IMM(BPF_REG_0, 1),
529 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
537 .errstr = "unknown opcode 00",
545 .errstr = "R0 !read_ok",
554 .errstr = "unreachable",
560 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
561 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
564 .errstr = "unreachable",
570 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
573 .errstr = "jump out of range",
577 "out of range jump2",
579 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
582 .errstr = "jump out of range",
588 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
589 BPF_LD_IMM64(BPF_REG_0, 0),
590 BPF_LD_IMM64(BPF_REG_0, 0),
591 BPF_LD_IMM64(BPF_REG_0, 1),
592 BPF_LD_IMM64(BPF_REG_0, 1),
593 BPF_MOV64_IMM(BPF_REG_0, 2),
596 .errstr = "invalid BPF_LD_IMM insn",
597 .errstr_unpriv = "R1 pointer comparison",
603 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
604 BPF_LD_IMM64(BPF_REG_0, 0),
605 BPF_LD_IMM64(BPF_REG_0, 0),
606 BPF_LD_IMM64(BPF_REG_0, 1),
607 BPF_LD_IMM64(BPF_REG_0, 1),
610 .errstr = "invalid BPF_LD_IMM insn",
611 .errstr_unpriv = "R1 pointer comparison",
617 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
618 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
619 BPF_LD_IMM64(BPF_REG_0, 0),
620 BPF_LD_IMM64(BPF_REG_0, 0),
621 BPF_LD_IMM64(BPF_REG_0, 1),
622 BPF_LD_IMM64(BPF_REG_0, 1),
625 .errstr = "invalid bpf_ld_imm64 insn",
631 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
634 .errstr = "invalid bpf_ld_imm64 insn",
640 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
642 .errstr = "invalid bpf_ld_imm64 insn",
648 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
649 BPF_RAW_INSN(0, 0, 0, 0, 0),
657 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
658 BPF_RAW_INSN(0, 0, 0, 0, 1),
667 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
668 BPF_RAW_INSN(0, 0, 0, 0, 1),
671 .errstr = "uses reserved fields",
677 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
678 BPF_RAW_INSN(0, 0, 0, 1, 1),
681 .errstr = "invalid bpf_ld_imm64 insn",
687 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
688 BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
691 .errstr = "invalid bpf_ld_imm64 insn",
697 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
698 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
701 .errstr = "invalid bpf_ld_imm64 insn",
707 BPF_MOV64_IMM(BPF_REG_1, 0),
708 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
709 BPF_RAW_INSN(0, 0, 0, 0, 1),
712 .errstr = "not pointing to valid bpf_map",
718 BPF_MOV64_IMM(BPF_REG_1, 0),
719 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
720 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
723 .errstr = "invalid bpf_ld_imm64 insn",
729 BPF_MOV64_IMM(BPF_REG_0, 1),
730 BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
739 BPF_LD_IMM64(BPF_REG_0, 0x1122334485667788),
740 BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 7),
749 BPF_MOV64_IMM(BPF_REG_0, 1),
750 BPF_MOV64_IMM(BPF_REG_1, 5),
751 BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
760 BPF_LD_IMM64(BPF_REG_0, 0xffff55667788),
761 BPF_MOV64_IMM(BPF_REG_1, 15),
762 BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
771 BPF_MOV64_IMM(BPF_REG_0, 1),
772 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
780 BPF_MOV64_IMM(BPF_REG_0, 1),
781 BPF_MOV64_IMM(BPF_REG_1, 5),
782 BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
790 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
792 .errstr = "not an exit",
798 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
801 .errstr = "back-edge",
807 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
808 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
809 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
810 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
813 .errstr = "back-edge",
819 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
820 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
821 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
822 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
825 .errstr = "back-edge",
829 "read uninitialized register",
831 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
834 .errstr = "R2 !read_ok",
838 "read invalid register",
840 BPF_MOV64_REG(BPF_REG_0, -1),
843 .errstr = "R15 is invalid",
847 "program doesn't init R0 before exit",
849 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
852 .errstr = "R0 !read_ok",
856 "program doesn't init R0 before exit in all branches",
858 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
859 BPF_MOV64_IMM(BPF_REG_0, 1),
860 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
863 .errstr = "R0 !read_ok",
864 .errstr_unpriv = "R1 pointer comparison",
868 "stack out of bounds",
870 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
873 .errstr = "invalid stack",
877 "invalid call insn1",
879 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
882 .errstr = "unknown opcode 8d",
886 "invalid call insn2",
888 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
891 .errstr = "BPF_CALL uses reserved",
895 "invalid function call",
897 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
900 .errstr = "invalid func unknown#1234567",
904 "uninitialized stack1",
906 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
907 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
908 BPF_LD_MAP_FD(BPF_REG_1, 0),
909 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
910 BPF_FUNC_map_lookup_elem),
913 .fixup_map_hash_8b = { 2 },
914 .errstr = "invalid indirect read from stack",
918 "uninitialized stack2",
920 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
921 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
924 .errstr = "invalid read from stack",
928 "invalid fp arithmetic",
929 /* If this gets ever changed, make sure JITs can deal with it. */
931 BPF_MOV64_IMM(BPF_REG_0, 0),
932 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
933 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
934 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
937 .errstr = "R1 subtraction from stack pointer",
941 "non-invalid fp arithmetic",
943 BPF_MOV64_IMM(BPF_REG_0, 0),
944 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
950 "invalid argument register",
952 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
953 BPF_FUNC_get_cgroup_classid),
954 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
955 BPF_FUNC_get_cgroup_classid),
958 .errstr = "R1 !read_ok",
960 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
963 "non-invalid argument register",
965 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
966 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
967 BPF_FUNC_get_cgroup_classid),
968 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
969 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
970 BPF_FUNC_get_cgroup_classid),
974 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
977 "check valid spill/fill",
979 /* spill R1(ctx) into stack */
980 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
981 /* fill it back into R2 */
982 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
983 /* should be able to access R0 = *(R2 + 8) */
984 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
985 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
988 .errstr_unpriv = "R0 leaks addr",
990 .result_unpriv = REJECT,
991 .retval = POINTER_VALUE,
994 "check valid spill/fill, skb mark",
996 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
997 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
998 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
999 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
1000 offsetof(struct __sk_buff, mark)),
1004 .result_unpriv = ACCEPT,
1007 "check corrupted spill/fill",
1009 /* spill R1(ctx) into stack */
1010 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
1011 /* mess up with R1 pointer on stack */
1012 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
1013 /* fill back into R0 is fine for priv.
1014 * R0 now becomes SCALAR_VALUE.
1016 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1017 /* Load from R0 should fail. */
1018 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
1021 .errstr_unpriv = "attempt to corrupt spilled",
1022 .errstr = "R0 invalid mem access 'inv",
1026 "check corrupted spill/fill, LSB",
1028 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
1029 BPF_ST_MEM(BPF_H, BPF_REG_10, -8, 0xcafe),
1030 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1033 .errstr_unpriv = "attempt to corrupt spilled",
1034 .result_unpriv = REJECT,
1036 .retval = POINTER_VALUE,
1039 "check corrupted spill/fill, MSB",
1041 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
1042 BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x12345678),
1043 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1046 .errstr_unpriv = "attempt to corrupt spilled",
1047 .result_unpriv = REJECT,
1049 .retval = POINTER_VALUE,
1052 "invalid src register in STX",
1054 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
1057 .errstr = "R15 is invalid",
1061 "invalid dst register in STX",
1063 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
1066 .errstr = "R14 is invalid",
1070 "invalid dst register in ST",
1072 BPF_ST_MEM(BPF_B, 14, -1, -1),
1075 .errstr = "R14 is invalid",
1079 "invalid src register in LDX",
1081 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
1084 .errstr = "R12 is invalid",
1088 "invalid dst register in LDX",
1090 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
1093 .errstr = "R11 is invalid",
1099 BPF_RAW_INSN(0, 0, 0, 0, 0),
1102 .errstr = "unknown opcode 00",
1108 BPF_RAW_INSN(1, 0, 0, 0, 0),
1111 .errstr = "BPF_LDX uses reserved fields",
1117 BPF_RAW_INSN(-1, 0, 0, 0, 0),
1120 .errstr = "unknown opcode ff",
1126 BPF_RAW_INSN(-1, -1, -1, -1, -1),
1129 .errstr = "unknown opcode ff",
1135 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
1138 .errstr = "BPF_ALU uses reserved fields",
1142 "misaligned read from stack",
1144 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1145 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
1148 .errstr = "misaligned stack access",
1152 "invalid map_fd for function call",
1154 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1155 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
1156 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1157 BPF_LD_MAP_FD(BPF_REG_1, 0),
1158 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1159 BPF_FUNC_map_delete_elem),
1162 .errstr = "fd 0 is not pointing to valid bpf_map",
1166 "don't check return value before access",
1168 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1169 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1170 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1171 BPF_LD_MAP_FD(BPF_REG_1, 0),
1172 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1173 BPF_FUNC_map_lookup_elem),
1174 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1177 .fixup_map_hash_8b = { 3 },
1178 .errstr = "R0 invalid mem access 'map_value_or_null'",
1182 "access memory with incorrect alignment",
1184 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1185 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1186 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1187 BPF_LD_MAP_FD(BPF_REG_1, 0),
1188 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1189 BPF_FUNC_map_lookup_elem),
1190 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1191 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
1194 .fixup_map_hash_8b = { 3 },
1195 .errstr = "misaligned value access",
1197 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1200 "sometimes access memory with incorrect alignment",
1202 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1203 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1204 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1205 BPF_LD_MAP_FD(BPF_REG_1, 0),
1206 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1207 BPF_FUNC_map_lookup_elem),
1208 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1209 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1211 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
1214 .fixup_map_hash_8b = { 3 },
1215 .errstr = "R0 invalid mem access",
1216 .errstr_unpriv = "R0 leaks addr",
1218 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1223 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1224 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
1225 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1226 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1227 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
1228 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
1229 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
1230 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
1231 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
1232 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
1233 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
1234 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
1235 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1236 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
1237 BPF_MOV64_IMM(BPF_REG_0, 0),
1240 .errstr_unpriv = "R1 pointer comparison",
1241 .result_unpriv = REJECT,
1247 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1248 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
1249 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1250 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
1251 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
1252 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1253 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1254 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
1255 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1256 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
1257 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
1258 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1259 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
1260 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
1261 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1262 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1263 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1264 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1265 BPF_MOV64_IMM(BPF_REG_0, 0),
1268 .errstr_unpriv = "R1 pointer comparison",
1269 .result_unpriv = REJECT,
1275 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1276 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1277 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1278 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1279 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
1280 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
1281 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1282 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1283 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
1284 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
1285 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1286 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
1287 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1288 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
1289 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1290 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
1291 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
1292 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
1293 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1294 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
1295 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
1296 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
1297 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1298 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
1299 BPF_LD_MAP_FD(BPF_REG_1, 0),
1300 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1301 BPF_FUNC_map_delete_elem),
1304 .fixup_map_hash_8b = { 24 },
1305 .errstr_unpriv = "R1 pointer comparison",
1306 .result_unpriv = REJECT,
1313 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1314 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1315 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1316 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1317 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1318 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1319 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1320 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1321 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1322 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1323 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1324 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1325 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1326 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1327 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1328 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1329 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1330 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1331 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1332 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1333 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1334 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1335 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1336 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1337 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1338 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1339 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1340 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1341 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1342 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1343 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1344 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1345 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1346 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1347 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1348 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1349 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1350 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1351 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1352 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1353 BPF_MOV64_IMM(BPF_REG_0, 0),
1356 .errstr_unpriv = "R1 pointer comparison",
1357 .result_unpriv = REJECT,
1363 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1364 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1365 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1366 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1367 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1368 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1369 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1370 BPF_MOV64_IMM(BPF_REG_0, 0),
1371 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1372 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1373 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1374 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1375 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1376 BPF_MOV64_IMM(BPF_REG_0, 0),
1377 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1378 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1379 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1380 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1381 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1382 BPF_MOV64_IMM(BPF_REG_0, 0),
1383 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1384 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1385 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1386 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1387 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1388 BPF_MOV64_IMM(BPF_REG_0, 0),
1389 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1390 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1391 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1392 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1393 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1394 BPF_MOV64_IMM(BPF_REG_0, 0),
1397 .errstr_unpriv = "R1 pointer comparison",
1398 .result_unpriv = REJECT,
1402 "access skb fields ok",
1404 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1405 offsetof(struct __sk_buff, len)),
1406 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1407 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1408 offsetof(struct __sk_buff, mark)),
1409 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1410 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1411 offsetof(struct __sk_buff, pkt_type)),
1412 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1413 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1414 offsetof(struct __sk_buff, queue_mapping)),
1415 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1416 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1417 offsetof(struct __sk_buff, protocol)),
1418 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1419 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1420 offsetof(struct __sk_buff, vlan_present)),
1421 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1422 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1423 offsetof(struct __sk_buff, vlan_tci)),
1424 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1425 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1426 offsetof(struct __sk_buff, napi_id)),
1427 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1433 "access skb fields bad1",
1435 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
1438 .errstr = "invalid bpf_context access",
1442 "access skb fields bad2",
1444 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
1445 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1446 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1447 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1448 BPF_LD_MAP_FD(BPF_REG_1, 0),
1449 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1450 BPF_FUNC_map_lookup_elem),
1451 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1453 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1454 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1455 offsetof(struct __sk_buff, pkt_type)),
1458 .fixup_map_hash_8b = { 4 },
1459 .errstr = "different pointers",
1460 .errstr_unpriv = "R1 pointer comparison",
1464 "access skb fields bad3",
1466 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1467 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1468 offsetof(struct __sk_buff, pkt_type)),
1470 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1471 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1472 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1473 BPF_LD_MAP_FD(BPF_REG_1, 0),
1474 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1475 BPF_FUNC_map_lookup_elem),
1476 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1478 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1479 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
1481 .fixup_map_hash_8b = { 6 },
1482 .errstr = "different pointers",
1483 .errstr_unpriv = "R1 pointer comparison",
1487 "access skb fields bad4",
1489 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
1490 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1491 offsetof(struct __sk_buff, len)),
1492 BPF_MOV64_IMM(BPF_REG_0, 0),
1494 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1495 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1496 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1497 BPF_LD_MAP_FD(BPF_REG_1, 0),
1498 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1499 BPF_FUNC_map_lookup_elem),
1500 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1502 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1503 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
1505 .fixup_map_hash_8b = { 7 },
1506 .errstr = "different pointers",
1507 .errstr_unpriv = "R1 pointer comparison",
1511 "invalid access __sk_buff family",
1513 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1514 offsetof(struct __sk_buff, family)),
1517 .errstr = "invalid bpf_context access",
1521 "invalid access __sk_buff remote_ip4",
1523 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1524 offsetof(struct __sk_buff, remote_ip4)),
1527 .errstr = "invalid bpf_context access",
1531 "invalid access __sk_buff local_ip4",
1533 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1534 offsetof(struct __sk_buff, local_ip4)),
1537 .errstr = "invalid bpf_context access",
1541 "invalid access __sk_buff remote_ip6",
1543 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1544 offsetof(struct __sk_buff, remote_ip6)),
1547 .errstr = "invalid bpf_context access",
1551 "invalid access __sk_buff local_ip6",
1553 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1554 offsetof(struct __sk_buff, local_ip6)),
1557 .errstr = "invalid bpf_context access",
1561 "invalid access __sk_buff remote_port",
1563 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1564 offsetof(struct __sk_buff, remote_port)),
1567 .errstr = "invalid bpf_context access",
1571 "invalid access __sk_buff remote_port",
1573 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1574 offsetof(struct __sk_buff, local_port)),
1577 .errstr = "invalid bpf_context access",
1581 "valid access __sk_buff family",
1583 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1584 offsetof(struct __sk_buff, family)),
1588 .prog_type = BPF_PROG_TYPE_SK_SKB,
1591 "valid access __sk_buff remote_ip4",
1593 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1594 offsetof(struct __sk_buff, remote_ip4)),
1598 .prog_type = BPF_PROG_TYPE_SK_SKB,
1601 "valid access __sk_buff local_ip4",
1603 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1604 offsetof(struct __sk_buff, local_ip4)),
1608 .prog_type = BPF_PROG_TYPE_SK_SKB,
1611 "valid access __sk_buff remote_ip6",
1613 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1614 offsetof(struct __sk_buff, remote_ip6[0])),
1615 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1616 offsetof(struct __sk_buff, remote_ip6[1])),
1617 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1618 offsetof(struct __sk_buff, remote_ip6[2])),
1619 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1620 offsetof(struct __sk_buff, remote_ip6[3])),
1624 .prog_type = BPF_PROG_TYPE_SK_SKB,
1627 "valid access __sk_buff local_ip6",
1629 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1630 offsetof(struct __sk_buff, local_ip6[0])),
1631 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1632 offsetof(struct __sk_buff, local_ip6[1])),
1633 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1634 offsetof(struct __sk_buff, local_ip6[2])),
1635 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1636 offsetof(struct __sk_buff, local_ip6[3])),
1640 .prog_type = BPF_PROG_TYPE_SK_SKB,
1643 "valid access __sk_buff remote_port",
1645 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1646 offsetof(struct __sk_buff, remote_port)),
1650 .prog_type = BPF_PROG_TYPE_SK_SKB,
1653 "valid access __sk_buff remote_port",
1655 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1656 offsetof(struct __sk_buff, local_port)),
1660 .prog_type = BPF_PROG_TYPE_SK_SKB,
1663 "invalid access of tc_classid for SK_SKB",
1665 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1666 offsetof(struct __sk_buff, tc_classid)),
1670 .prog_type = BPF_PROG_TYPE_SK_SKB,
1671 .errstr = "invalid bpf_context access",
1674 "invalid access of skb->mark for SK_SKB",
1676 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1677 offsetof(struct __sk_buff, mark)),
1681 .prog_type = BPF_PROG_TYPE_SK_SKB,
1682 .errstr = "invalid bpf_context access",
1685 "check skb->mark is not writeable by SK_SKB",
1687 BPF_MOV64_IMM(BPF_REG_0, 0),
1688 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1689 offsetof(struct __sk_buff, mark)),
1693 .prog_type = BPF_PROG_TYPE_SK_SKB,
1694 .errstr = "invalid bpf_context access",
1697 "check skb->tc_index is writeable by SK_SKB",
1699 BPF_MOV64_IMM(BPF_REG_0, 0),
1700 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1701 offsetof(struct __sk_buff, tc_index)),
1705 .prog_type = BPF_PROG_TYPE_SK_SKB,
1708 "check skb->priority is writeable by SK_SKB",
1710 BPF_MOV64_IMM(BPF_REG_0, 0),
1711 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1712 offsetof(struct __sk_buff, priority)),
1716 .prog_type = BPF_PROG_TYPE_SK_SKB,
1719 "direct packet read for SK_SKB",
1721 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1722 offsetof(struct __sk_buff, data)),
1723 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1724 offsetof(struct __sk_buff, data_end)),
1725 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1726 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1727 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1728 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1729 BPF_MOV64_IMM(BPF_REG_0, 0),
1733 .prog_type = BPF_PROG_TYPE_SK_SKB,
1736 "direct packet write for SK_SKB",
1738 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1739 offsetof(struct __sk_buff, data)),
1740 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1741 offsetof(struct __sk_buff, data_end)),
1742 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1743 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1744 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1745 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1746 BPF_MOV64_IMM(BPF_REG_0, 0),
1750 .prog_type = BPF_PROG_TYPE_SK_SKB,
1753 "overlapping checks for direct packet access SK_SKB",
1755 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1756 offsetof(struct __sk_buff, data)),
1757 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1758 offsetof(struct __sk_buff, data_end)),
1759 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1760 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1761 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1762 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1763 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1764 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1765 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1766 BPF_MOV64_IMM(BPF_REG_0, 0),
1770 .prog_type = BPF_PROG_TYPE_SK_SKB,
1773 "valid access family in SK_MSG",
1775 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1776 offsetof(struct sk_msg_md, family)),
1780 .prog_type = BPF_PROG_TYPE_SK_MSG,
1783 "valid access remote_ip4 in SK_MSG",
1785 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1786 offsetof(struct sk_msg_md, remote_ip4)),
1790 .prog_type = BPF_PROG_TYPE_SK_MSG,
1793 "valid access local_ip4 in SK_MSG",
1795 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1796 offsetof(struct sk_msg_md, local_ip4)),
1800 .prog_type = BPF_PROG_TYPE_SK_MSG,
1803 "valid access remote_port in SK_MSG",
1805 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1806 offsetof(struct sk_msg_md, remote_port)),
1810 .prog_type = BPF_PROG_TYPE_SK_MSG,
1813 "valid access local_port in SK_MSG",
1815 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1816 offsetof(struct sk_msg_md, local_port)),
1820 .prog_type = BPF_PROG_TYPE_SK_MSG,
1823 "valid access remote_ip6 in SK_MSG",
1825 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1826 offsetof(struct sk_msg_md, remote_ip6[0])),
1827 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1828 offsetof(struct sk_msg_md, remote_ip6[1])),
1829 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1830 offsetof(struct sk_msg_md, remote_ip6[2])),
1831 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1832 offsetof(struct sk_msg_md, remote_ip6[3])),
1836 .prog_type = BPF_PROG_TYPE_SK_SKB,
1839 "valid access local_ip6 in SK_MSG",
1841 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1842 offsetof(struct sk_msg_md, local_ip6[0])),
1843 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1844 offsetof(struct sk_msg_md, local_ip6[1])),
1845 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1846 offsetof(struct sk_msg_md, local_ip6[2])),
1847 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1848 offsetof(struct sk_msg_md, local_ip6[3])),
1852 .prog_type = BPF_PROG_TYPE_SK_SKB,
1855 "valid access size in SK_MSG",
1857 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1858 offsetof(struct sk_msg_md, size)),
1862 .prog_type = BPF_PROG_TYPE_SK_MSG,
1865 "invalid 64B read of size in SK_MSG",
1867 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1868 offsetof(struct sk_msg_md, size)),
1871 .errstr = "invalid bpf_context access",
1873 .prog_type = BPF_PROG_TYPE_SK_MSG,
1876 "invalid read past end of SK_MSG",
1878 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1879 offsetof(struct sk_msg_md, size) + 4),
1882 .errstr = "invalid bpf_context access",
1884 .prog_type = BPF_PROG_TYPE_SK_MSG,
1887 "invalid read offset in SK_MSG",
1889 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1890 offsetof(struct sk_msg_md, family) + 1),
1893 .errstr = "invalid bpf_context access",
1895 .prog_type = BPF_PROG_TYPE_SK_MSG,
1896 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1899 "direct packet read for SK_MSG",
1901 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1902 offsetof(struct sk_msg_md, data)),
1903 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1904 offsetof(struct sk_msg_md, data_end)),
1905 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1906 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1907 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1908 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1909 BPF_MOV64_IMM(BPF_REG_0, 0),
1913 .prog_type = BPF_PROG_TYPE_SK_MSG,
1916 "direct packet write for SK_MSG",
1918 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1919 offsetof(struct sk_msg_md, data)),
1920 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1921 offsetof(struct sk_msg_md, data_end)),
1922 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1923 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1924 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1925 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1926 BPF_MOV64_IMM(BPF_REG_0, 0),
1930 .prog_type = BPF_PROG_TYPE_SK_MSG,
1933 "overlapping checks for direct packet access SK_MSG",
1935 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1936 offsetof(struct sk_msg_md, data)),
1937 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1938 offsetof(struct sk_msg_md, data_end)),
1939 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1940 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1941 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1942 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1943 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1944 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1945 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1946 BPF_MOV64_IMM(BPF_REG_0, 0),
1950 .prog_type = BPF_PROG_TYPE_SK_MSG,
1953 "check skb->mark is not writeable by sockets",
1955 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1956 offsetof(struct __sk_buff, mark)),
1959 .errstr = "invalid bpf_context access",
1960 .errstr_unpriv = "R1 leaks addr",
1964 "check skb->tc_index is not writeable by sockets",
1966 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1967 offsetof(struct __sk_buff, tc_index)),
1970 .errstr = "invalid bpf_context access",
1971 .errstr_unpriv = "R1 leaks addr",
1975 "check cb access: byte",
1977 BPF_MOV64_IMM(BPF_REG_0, 0),
1978 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1979 offsetof(struct __sk_buff, cb[0])),
1980 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1981 offsetof(struct __sk_buff, cb[0]) + 1),
1982 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1983 offsetof(struct __sk_buff, cb[0]) + 2),
1984 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1985 offsetof(struct __sk_buff, cb[0]) + 3),
1986 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1987 offsetof(struct __sk_buff, cb[1])),
1988 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1989 offsetof(struct __sk_buff, cb[1]) + 1),
1990 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1991 offsetof(struct __sk_buff, cb[1]) + 2),
1992 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1993 offsetof(struct __sk_buff, cb[1]) + 3),
1994 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1995 offsetof(struct __sk_buff, cb[2])),
1996 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1997 offsetof(struct __sk_buff, cb[2]) + 1),
1998 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1999 offsetof(struct __sk_buff, cb[2]) + 2),
2000 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2001 offsetof(struct __sk_buff, cb[2]) + 3),
2002 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2003 offsetof(struct __sk_buff, cb[3])),
2004 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2005 offsetof(struct __sk_buff, cb[3]) + 1),
2006 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2007 offsetof(struct __sk_buff, cb[3]) + 2),
2008 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2009 offsetof(struct __sk_buff, cb[3]) + 3),
2010 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2011 offsetof(struct __sk_buff, cb[4])),
2012 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2013 offsetof(struct __sk_buff, cb[4]) + 1),
2014 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2015 offsetof(struct __sk_buff, cb[4]) + 2),
2016 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2017 offsetof(struct __sk_buff, cb[4]) + 3),
2018 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2019 offsetof(struct __sk_buff, cb[0])),
2020 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2021 offsetof(struct __sk_buff, cb[0]) + 1),
2022 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2023 offsetof(struct __sk_buff, cb[0]) + 2),
2024 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2025 offsetof(struct __sk_buff, cb[0]) + 3),
2026 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2027 offsetof(struct __sk_buff, cb[1])),
2028 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2029 offsetof(struct __sk_buff, cb[1]) + 1),
2030 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2031 offsetof(struct __sk_buff, cb[1]) + 2),
2032 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2033 offsetof(struct __sk_buff, cb[1]) + 3),
2034 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2035 offsetof(struct __sk_buff, cb[2])),
2036 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2037 offsetof(struct __sk_buff, cb[2]) + 1),
2038 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2039 offsetof(struct __sk_buff, cb[2]) + 2),
2040 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2041 offsetof(struct __sk_buff, cb[2]) + 3),
2042 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2043 offsetof(struct __sk_buff, cb[3])),
2044 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2045 offsetof(struct __sk_buff, cb[3]) + 1),
2046 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2047 offsetof(struct __sk_buff, cb[3]) + 2),
2048 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2049 offsetof(struct __sk_buff, cb[3]) + 3),
2050 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2051 offsetof(struct __sk_buff, cb[4])),
2052 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2053 offsetof(struct __sk_buff, cb[4]) + 1),
2054 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2055 offsetof(struct __sk_buff, cb[4]) + 2),
2056 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2057 offsetof(struct __sk_buff, cb[4]) + 3),
2063 "__sk_buff->hash, offset 0, byte store not permitted",
2065 BPF_MOV64_IMM(BPF_REG_0, 0),
2066 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2067 offsetof(struct __sk_buff, hash)),
2070 .errstr = "invalid bpf_context access",
2074 "__sk_buff->tc_index, offset 3, byte store not permitted",
2076 BPF_MOV64_IMM(BPF_REG_0, 0),
2077 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2078 offsetof(struct __sk_buff, tc_index) + 3),
2081 .errstr = "invalid bpf_context access",
2085 "check skb->hash byte load permitted",
2087 BPF_MOV64_IMM(BPF_REG_0, 0),
2088 #if __BYTE_ORDER == __LITTLE_ENDIAN
2089 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2090 offsetof(struct __sk_buff, hash)),
2092 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2093 offsetof(struct __sk_buff, hash) + 3),
2100 "check skb->hash byte load permitted 1",
2102 BPF_MOV64_IMM(BPF_REG_0, 0),
2103 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2104 offsetof(struct __sk_buff, hash) + 1),
2110 "check skb->hash byte load permitted 2",
2112 BPF_MOV64_IMM(BPF_REG_0, 0),
2113 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2114 offsetof(struct __sk_buff, hash) + 2),
2120 "check skb->hash byte load permitted 3",
2122 BPF_MOV64_IMM(BPF_REG_0, 0),
2123 #if __BYTE_ORDER == __LITTLE_ENDIAN
2124 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2125 offsetof(struct __sk_buff, hash) + 3),
2127 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2128 offsetof(struct __sk_buff, hash)),
2135 "check cb access: byte, wrong type",
2137 BPF_MOV64_IMM(BPF_REG_0, 0),
2138 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2139 offsetof(struct __sk_buff, cb[0])),
2142 .errstr = "invalid bpf_context access",
2144 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2147 "check cb access: half",
2149 BPF_MOV64_IMM(BPF_REG_0, 0),
2150 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2151 offsetof(struct __sk_buff, cb[0])),
2152 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2153 offsetof(struct __sk_buff, cb[0]) + 2),
2154 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2155 offsetof(struct __sk_buff, cb[1])),
2156 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2157 offsetof(struct __sk_buff, cb[1]) + 2),
2158 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2159 offsetof(struct __sk_buff, cb[2])),
2160 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2161 offsetof(struct __sk_buff, cb[2]) + 2),
2162 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2163 offsetof(struct __sk_buff, cb[3])),
2164 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2165 offsetof(struct __sk_buff, cb[3]) + 2),
2166 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2167 offsetof(struct __sk_buff, cb[4])),
2168 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2169 offsetof(struct __sk_buff, cb[4]) + 2),
2170 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2171 offsetof(struct __sk_buff, cb[0])),
2172 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2173 offsetof(struct __sk_buff, cb[0]) + 2),
2174 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2175 offsetof(struct __sk_buff, cb[1])),
2176 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2177 offsetof(struct __sk_buff, cb[1]) + 2),
2178 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2179 offsetof(struct __sk_buff, cb[2])),
2180 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2181 offsetof(struct __sk_buff, cb[2]) + 2),
2182 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2183 offsetof(struct __sk_buff, cb[3])),
2184 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2185 offsetof(struct __sk_buff, cb[3]) + 2),
2186 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2187 offsetof(struct __sk_buff, cb[4])),
2188 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2189 offsetof(struct __sk_buff, cb[4]) + 2),
2195 "check cb access: half, unaligned",
2197 BPF_MOV64_IMM(BPF_REG_0, 0),
2198 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2199 offsetof(struct __sk_buff, cb[0]) + 1),
2202 .errstr = "misaligned context access",
2204 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2207 "check __sk_buff->hash, offset 0, half store not permitted",
2209 BPF_MOV64_IMM(BPF_REG_0, 0),
2210 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2211 offsetof(struct __sk_buff, hash)),
2214 .errstr = "invalid bpf_context access",
2218 "check __sk_buff->tc_index, offset 2, half store not permitted",
2220 BPF_MOV64_IMM(BPF_REG_0, 0),
2221 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2222 offsetof(struct __sk_buff, tc_index) + 2),
2225 .errstr = "invalid bpf_context access",
2229 "check skb->hash half load permitted",
2231 BPF_MOV64_IMM(BPF_REG_0, 0),
2232 #if __BYTE_ORDER == __LITTLE_ENDIAN
2233 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2234 offsetof(struct __sk_buff, hash)),
2236 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2237 offsetof(struct __sk_buff, hash) + 2),
2244 "check skb->hash half load permitted 2",
2246 BPF_MOV64_IMM(BPF_REG_0, 0),
2247 #if __BYTE_ORDER == __LITTLE_ENDIAN
2248 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2249 offsetof(struct __sk_buff, hash) + 2),
2251 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2252 offsetof(struct __sk_buff, hash)),
2259 "check skb->hash half load not permitted, unaligned 1",
2261 BPF_MOV64_IMM(BPF_REG_0, 0),
2262 #if __BYTE_ORDER == __LITTLE_ENDIAN
2263 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2264 offsetof(struct __sk_buff, hash) + 1),
2266 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2267 offsetof(struct __sk_buff, hash) + 3),
2271 .errstr = "invalid bpf_context access",
2275 "check skb->hash half load not permitted, unaligned 3",
2277 BPF_MOV64_IMM(BPF_REG_0, 0),
2278 #if __BYTE_ORDER == __LITTLE_ENDIAN
2279 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2280 offsetof(struct __sk_buff, hash) + 3),
2282 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2283 offsetof(struct __sk_buff, hash) + 1),
2287 .errstr = "invalid bpf_context access",
2289 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2290 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2293 "check cb access: half, wrong type",
2295 BPF_MOV64_IMM(BPF_REG_0, 0),
2296 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2297 offsetof(struct __sk_buff, cb[0])),
2300 .errstr = "invalid bpf_context access",
2302 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2305 "check cb access: word",
2307 BPF_MOV64_IMM(BPF_REG_0, 0),
2308 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2309 offsetof(struct __sk_buff, cb[0])),
2310 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2311 offsetof(struct __sk_buff, cb[1])),
2312 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2313 offsetof(struct __sk_buff, cb[2])),
2314 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2315 offsetof(struct __sk_buff, cb[3])),
2316 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2317 offsetof(struct __sk_buff, cb[4])),
2318 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2319 offsetof(struct __sk_buff, cb[0])),
2320 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2321 offsetof(struct __sk_buff, cb[1])),
2322 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2323 offsetof(struct __sk_buff, cb[2])),
2324 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2325 offsetof(struct __sk_buff, cb[3])),
2326 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2327 offsetof(struct __sk_buff, cb[4])),
2333 "check cb access: word, unaligned 1",
2335 BPF_MOV64_IMM(BPF_REG_0, 0),
2336 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2337 offsetof(struct __sk_buff, cb[0]) + 2),
2340 .errstr = "misaligned context access",
2342 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2345 "check cb access: word, unaligned 2",
2347 BPF_MOV64_IMM(BPF_REG_0, 0),
2348 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2349 offsetof(struct __sk_buff, cb[4]) + 1),
2352 .errstr = "misaligned context access",
2354 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2357 "check cb access: word, unaligned 3",
2359 BPF_MOV64_IMM(BPF_REG_0, 0),
2360 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2361 offsetof(struct __sk_buff, cb[4]) + 2),
2364 .errstr = "misaligned context access",
2366 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2369 "check cb access: word, unaligned 4",
2371 BPF_MOV64_IMM(BPF_REG_0, 0),
2372 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2373 offsetof(struct __sk_buff, cb[4]) + 3),
2376 .errstr = "misaligned context access",
2378 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2381 "check cb access: double",
2383 BPF_MOV64_IMM(BPF_REG_0, 0),
2384 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2385 offsetof(struct __sk_buff, cb[0])),
2386 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2387 offsetof(struct __sk_buff, cb[2])),
2388 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2389 offsetof(struct __sk_buff, cb[0])),
2390 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2391 offsetof(struct __sk_buff, cb[2])),
2397 "check cb access: double, unaligned 1",
2399 BPF_MOV64_IMM(BPF_REG_0, 0),
2400 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2401 offsetof(struct __sk_buff, cb[1])),
2404 .errstr = "misaligned context access",
2406 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2409 "check cb access: double, unaligned 2",
2411 BPF_MOV64_IMM(BPF_REG_0, 0),
2412 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2413 offsetof(struct __sk_buff, cb[3])),
2416 .errstr = "misaligned context access",
2418 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2421 "check cb access: double, oob 1",
2423 BPF_MOV64_IMM(BPF_REG_0, 0),
2424 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2425 offsetof(struct __sk_buff, cb[4])),
2428 .errstr = "invalid bpf_context access",
2432 "check cb access: double, oob 2",
2434 BPF_MOV64_IMM(BPF_REG_0, 0),
2435 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2436 offsetof(struct __sk_buff, cb[4])),
2439 .errstr = "invalid bpf_context access",
2443 "check __sk_buff->ifindex dw store not permitted",
2445 BPF_MOV64_IMM(BPF_REG_0, 0),
2446 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2447 offsetof(struct __sk_buff, ifindex)),
2450 .errstr = "invalid bpf_context access",
2454 "check __sk_buff->ifindex dw load not permitted",
2456 BPF_MOV64_IMM(BPF_REG_0, 0),
2457 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2458 offsetof(struct __sk_buff, ifindex)),
2461 .errstr = "invalid bpf_context access",
2465 "check cb access: double, wrong type",
2467 BPF_MOV64_IMM(BPF_REG_0, 0),
2468 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2469 offsetof(struct __sk_buff, cb[0])),
2472 .errstr = "invalid bpf_context access",
2474 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2477 "check out of range skb->cb access",
2479 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2480 offsetof(struct __sk_buff, cb[0]) + 256),
2483 .errstr = "invalid bpf_context access",
2484 .errstr_unpriv = "",
2486 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
2489 "write skb fields from socket prog",
2491 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2492 offsetof(struct __sk_buff, cb[4])),
2493 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2494 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2495 offsetof(struct __sk_buff, mark)),
2496 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2497 offsetof(struct __sk_buff, tc_index)),
2498 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2499 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2500 offsetof(struct __sk_buff, cb[0])),
2501 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2502 offsetof(struct __sk_buff, cb[2])),
2506 .errstr_unpriv = "R1 leaks addr",
2507 .result_unpriv = REJECT,
2510 "write skb fields from tc_cls_act prog",
2512 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2513 offsetof(struct __sk_buff, cb[0])),
2514 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2515 offsetof(struct __sk_buff, mark)),
2516 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2517 offsetof(struct __sk_buff, tc_index)),
2518 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2519 offsetof(struct __sk_buff, tc_index)),
2520 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2521 offsetof(struct __sk_buff, cb[3])),
2522 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2523 offsetof(struct __sk_buff, tstamp)),
2524 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2525 offsetof(struct __sk_buff, tstamp)),
2528 .errstr_unpriv = "",
2529 .result_unpriv = REJECT,
2531 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2534 "PTR_TO_STACK store/load",
2536 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2537 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2538 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2539 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2543 .retval = 0xfaceb00c,
2546 "PTR_TO_STACK store/load - bad alignment on off",
2548 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2549 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2550 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2551 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2555 .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
2558 "PTR_TO_STACK store/load - bad alignment on reg",
2560 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2561 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2562 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2563 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2567 .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
2570 "PTR_TO_STACK store/load - out of bounds low",
2572 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2573 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
2574 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2575 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2579 .errstr = "invalid stack off=-79992 size=8",
2582 "PTR_TO_STACK store/load - out of bounds high",
2584 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2585 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2586 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2587 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2591 .errstr = "invalid stack off=0 size=8",
2594 "unpriv: return pointer",
2596 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
2600 .result_unpriv = REJECT,
2601 .errstr_unpriv = "R0 leaks addr",
2602 .retval = POINTER_VALUE,
2605 "unpriv: add const to pointer",
2607 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2608 BPF_MOV64_IMM(BPF_REG_0, 0),
2614 "unpriv: add pointer to pointer",
2616 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2617 BPF_MOV64_IMM(BPF_REG_0, 0),
2621 .errstr = "R1 pointer += pointer",
2624 "unpriv: neg pointer",
2626 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
2627 BPF_MOV64_IMM(BPF_REG_0, 0),
2631 .result_unpriv = REJECT,
2632 .errstr_unpriv = "R1 pointer arithmetic",
2635 "unpriv: cmp pointer with const",
2637 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2638 BPF_MOV64_IMM(BPF_REG_0, 0),
2642 .result_unpriv = REJECT,
2643 .errstr_unpriv = "R1 pointer comparison",
2646 "unpriv: cmp pointer with pointer",
2648 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
2649 BPF_MOV64_IMM(BPF_REG_0, 0),
2653 .result_unpriv = REJECT,
2654 .errstr_unpriv = "R10 pointer comparison",
2657 "unpriv: check that printk is disallowed",
2659 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2660 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2661 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2662 BPF_MOV64_IMM(BPF_REG_2, 8),
2663 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2664 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2665 BPF_FUNC_trace_printk),
2666 BPF_MOV64_IMM(BPF_REG_0, 0),
2669 .errstr_unpriv = "unknown func bpf_trace_printk#6",
2670 .result_unpriv = REJECT,
2674 "unpriv: pass pointer to helper function",
2676 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2677 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2678 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2679 BPF_LD_MAP_FD(BPF_REG_1, 0),
2680 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2681 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2682 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2683 BPF_FUNC_map_update_elem),
2684 BPF_MOV64_IMM(BPF_REG_0, 0),
2687 .fixup_map_hash_8b = { 3 },
2688 .errstr_unpriv = "R4 leaks addr",
2689 .result_unpriv = REJECT,
2693 "unpriv: indirectly pass pointer on stack to helper function",
2695 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2696 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2697 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2698 BPF_LD_MAP_FD(BPF_REG_1, 0),
2699 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2700 BPF_FUNC_map_lookup_elem),
2701 BPF_MOV64_IMM(BPF_REG_0, 0),
2704 .fixup_map_hash_8b = { 3 },
2705 .errstr = "invalid indirect read from stack off -8+0 size 8",
2709 "unpriv: mangle pointer on stack 1",
2711 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2712 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
2713 BPF_MOV64_IMM(BPF_REG_0, 0),
2716 .errstr_unpriv = "attempt to corrupt spilled",
2717 .result_unpriv = REJECT,
2721 "unpriv: mangle pointer on stack 2",
2723 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2724 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
2725 BPF_MOV64_IMM(BPF_REG_0, 0),
2728 .errstr_unpriv = "attempt to corrupt spilled",
2729 .result_unpriv = REJECT,
2733 "unpriv: read pointer from stack in small chunks",
2735 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2736 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
2737 BPF_MOV64_IMM(BPF_REG_0, 0),
2740 .errstr = "invalid size",
2744 "unpriv: write pointer into ctx",
2746 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
2747 BPF_MOV64_IMM(BPF_REG_0, 0),
2750 .errstr_unpriv = "R1 leaks addr",
2751 .result_unpriv = REJECT,
2752 .errstr = "invalid bpf_context access",
2756 "unpriv: spill/fill of ctx",
2758 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2759 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2760 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2761 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2762 BPF_MOV64_IMM(BPF_REG_0, 0),
2768 "unpriv: spill/fill of ctx 2",
2770 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2771 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2772 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2773 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2774 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2775 BPF_FUNC_get_hash_recalc),
2776 BPF_MOV64_IMM(BPF_REG_0, 0),
2780 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2783 "unpriv: spill/fill of ctx 3",
2785 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2786 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2787 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2788 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2789 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2790 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2791 BPF_FUNC_get_hash_recalc),
2795 .errstr = "R1 type=fp expected=ctx",
2796 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2799 "unpriv: spill/fill of ctx 4",
2801 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2802 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2803 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2804 BPF_MOV64_IMM(BPF_REG_0, 1),
2805 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
2807 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2808 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2809 BPF_FUNC_get_hash_recalc),
2813 .errstr = "R1 type=inv expected=ctx",
2814 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2817 "unpriv: spill/fill of different pointers stx",
2819 BPF_MOV64_IMM(BPF_REG_3, 42),
2820 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2821 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2822 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2823 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2824 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
2825 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2826 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2827 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2828 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2829 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2830 offsetof(struct __sk_buff, mark)),
2831 BPF_MOV64_IMM(BPF_REG_0, 0),
2835 .errstr = "same insn cannot be used with different pointers",
2836 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2839 "unpriv: spill/fill of different pointers stx - ctx and sock",
2841 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2842 /* struct bpf_sock *sock = bpf_sock_lookup(...); */
2844 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2846 /* void *target = &foo; */
2847 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2848 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2849 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2850 /* if (skb == NULL) *target = sock; */
2851 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2852 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2853 /* else *target = skb; */
2854 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2855 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2856 /* struct __sk_buff *skb = *target; */
2857 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2858 /* skb->mark = 42; */
2859 BPF_MOV64_IMM(BPF_REG_3, 42),
2860 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2861 offsetof(struct __sk_buff, mark)),
2862 /* if (sk) bpf_sk_release(sk) */
2863 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2864 BPF_EMIT_CALL(BPF_FUNC_sk_release),
2865 BPF_MOV64_IMM(BPF_REG_0, 0),
2869 .errstr = "type=ctx expected=sock",
2870 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2873 "unpriv: spill/fill of different pointers stx - leak sock",
2875 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2876 /* struct bpf_sock *sock = bpf_sock_lookup(...); */
2878 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2880 /* void *target = &foo; */
2881 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2882 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2883 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2884 /* if (skb == NULL) *target = sock; */
2885 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2886 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2887 /* else *target = skb; */
2888 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2889 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2890 /* struct __sk_buff *skb = *target; */
2891 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2892 /* skb->mark = 42; */
2893 BPF_MOV64_IMM(BPF_REG_3, 42),
2894 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2895 offsetof(struct __sk_buff, mark)),
2899 //.errstr = "same insn cannot be used with different pointers",
2900 .errstr = "Unreleased reference",
2901 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2904 "unpriv: spill/fill of different pointers stx - sock and ctx (read)",
2906 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2907 /* struct bpf_sock *sock = bpf_sock_lookup(...); */
2909 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2911 /* void *target = &foo; */
2912 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2913 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2914 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2915 /* if (skb) *target = skb */
2916 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2917 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2918 /* else *target = sock */
2919 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2920 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2921 /* struct bpf_sock *sk = *target; */
2922 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2923 /* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */
2924 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
2925 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2926 offsetof(struct bpf_sock, mark)),
2927 BPF_EMIT_CALL(BPF_FUNC_sk_release),
2928 BPF_MOV64_IMM(BPF_REG_0, 0),
2932 .errstr = "same insn cannot be used with different pointers",
2933 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2936 "unpriv: spill/fill of different pointers stx - sock and ctx (write)",
2938 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2939 /* struct bpf_sock *sock = bpf_sock_lookup(...); */
2941 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2943 /* void *target = &foo; */
2944 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2945 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2946 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2947 /* if (skb) *target = skb */
2948 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2949 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2950 /* else *target = sock */
2951 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2952 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2953 /* struct bpf_sock *sk = *target; */
2954 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2955 /* if (sk) sk->mark = 42; bpf_sk_release(sk); */
2956 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2957 BPF_MOV64_IMM(BPF_REG_3, 42),
2958 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2959 offsetof(struct bpf_sock, mark)),
2960 BPF_EMIT_CALL(BPF_FUNC_sk_release),
2961 BPF_MOV64_IMM(BPF_REG_0, 0),
2965 //.errstr = "same insn cannot be used with different pointers",
2966 .errstr = "cannot write into socket",
2967 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2970 "unpriv: spill/fill of different pointers ldx",
2972 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2973 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2974 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2975 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2976 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
2977 -(__s32)offsetof(struct bpf_perf_event_data,
2978 sample_period) - 8),
2979 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2980 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2981 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2982 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2983 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
2984 offsetof(struct bpf_perf_event_data,
2986 BPF_MOV64_IMM(BPF_REG_0, 0),
2990 .errstr = "same insn cannot be used with different pointers",
2991 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
2994 "unpriv: write pointer into map elem value",
2996 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2997 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2998 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2999 BPF_LD_MAP_FD(BPF_REG_1, 0),
3000 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3001 BPF_FUNC_map_lookup_elem),
3002 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3003 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
3006 .fixup_map_hash_8b = { 3 },
3007 .errstr_unpriv = "R0 leaks addr",
3008 .result_unpriv = REJECT,
3012 "alu32: mov u32 const",
3014 BPF_MOV32_IMM(BPF_REG_7, 0),
3015 BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1),
3016 BPF_MOV32_REG(BPF_REG_0, BPF_REG_7),
3017 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3018 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
3025 "unpriv: partial copy of pointer",
3027 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
3028 BPF_MOV64_IMM(BPF_REG_0, 0),
3031 .errstr_unpriv = "R10 partial copy",
3032 .result_unpriv = REJECT,
3036 "unpriv: pass pointer to tail_call",
3038 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
3039 BPF_LD_MAP_FD(BPF_REG_2, 0),
3040 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3041 BPF_FUNC_tail_call),
3042 BPF_MOV64_IMM(BPF_REG_0, 0),
3045 .fixup_prog1 = { 1 },
3046 .errstr_unpriv = "R3 leaks addr into helper",
3047 .result_unpriv = REJECT,
3051 "unpriv: cmp map pointer with zero",
3053 BPF_MOV64_IMM(BPF_REG_1, 0),
3054 BPF_LD_MAP_FD(BPF_REG_1, 0),
3055 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
3056 BPF_MOV64_IMM(BPF_REG_0, 0),
3059 .fixup_map_hash_8b = { 1 },
3060 .errstr_unpriv = "R1 pointer comparison",
3061 .result_unpriv = REJECT,
3065 "unpriv: write into frame pointer",
3067 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
3068 BPF_MOV64_IMM(BPF_REG_0, 0),
3071 .errstr = "frame pointer is read only",
3075 "unpriv: spill/fill frame pointer",
3077 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3078 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3079 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
3080 BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
3081 BPF_MOV64_IMM(BPF_REG_0, 0),
3084 .errstr = "frame pointer is read only",
3088 "unpriv: cmp of frame pointer",
3090 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
3091 BPF_MOV64_IMM(BPF_REG_0, 0),
3094 .errstr_unpriv = "R10 pointer comparison",
3095 .result_unpriv = REJECT,
3099 "unpriv: adding of fp",
3101 BPF_MOV64_IMM(BPF_REG_0, 0),
3102 BPF_MOV64_IMM(BPF_REG_1, 0),
3103 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
3104 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
3110 "unpriv: cmp of stack pointer",
3112 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3113 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3114 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
3115 BPF_MOV64_IMM(BPF_REG_0, 0),
3118 .errstr_unpriv = "R2 pointer comparison",
3119 .result_unpriv = REJECT,
3123 "runtime/jit: tail_call within bounds, prog once",
3125 BPF_MOV64_IMM(BPF_REG_3, 0),
3126 BPF_LD_MAP_FD(BPF_REG_2, 0),
3127 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3128 BPF_FUNC_tail_call),
3129 BPF_MOV64_IMM(BPF_REG_0, 1),
3132 .fixup_prog1 = { 1 },
3137 "runtime/jit: tail_call within bounds, prog loop",
3139 BPF_MOV64_IMM(BPF_REG_3, 1),
3140 BPF_LD_MAP_FD(BPF_REG_2, 0),
3141 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3142 BPF_FUNC_tail_call),
3143 BPF_MOV64_IMM(BPF_REG_0, 1),
3146 .fixup_prog1 = { 1 },
3151 "runtime/jit: tail_call within bounds, no prog",
3153 BPF_MOV64_IMM(BPF_REG_3, 2),
3154 BPF_LD_MAP_FD(BPF_REG_2, 0),
3155 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3156 BPF_FUNC_tail_call),
3157 BPF_MOV64_IMM(BPF_REG_0, 1),
3160 .fixup_prog1 = { 1 },
3165 "runtime/jit: tail_call out of bounds",
3167 BPF_MOV64_IMM(BPF_REG_3, 256),
3168 BPF_LD_MAP_FD(BPF_REG_2, 0),
3169 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3170 BPF_FUNC_tail_call),
3171 BPF_MOV64_IMM(BPF_REG_0, 2),
3174 .fixup_prog1 = { 1 },
3179 "runtime/jit: pass negative index to tail_call",
3181 BPF_MOV64_IMM(BPF_REG_3, -1),
3182 BPF_LD_MAP_FD(BPF_REG_2, 0),
3183 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3184 BPF_FUNC_tail_call),
3185 BPF_MOV64_IMM(BPF_REG_0, 2),
3188 .fixup_prog1 = { 1 },
3193 "runtime/jit: pass > 32bit index to tail_call",
3195 BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
3196 BPF_LD_MAP_FD(BPF_REG_2, 0),
3197 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3198 BPF_FUNC_tail_call),
3199 BPF_MOV64_IMM(BPF_REG_0, 2),
3202 .fixup_prog1 = { 2 },
3205 /* Verifier rewrite for unpriv skips tail call here. */
3209 "stack pointer arithmetic",
3211 BPF_MOV64_IMM(BPF_REG_1, 4),
3212 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
3213 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
3214 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
3215 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
3216 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3217 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
3218 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
3219 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3220 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3221 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
3222 BPF_MOV64_IMM(BPF_REG_0, 0),
3228 "raw_stack: no skb_load_bytes",
3230 BPF_MOV64_IMM(BPF_REG_2, 4),
3231 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3232 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3233 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3234 BPF_MOV64_IMM(BPF_REG_4, 8),
3235 /* Call to skb_load_bytes() omitted. */
3236 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3240 .errstr = "invalid read from stack off -8+0 size 8",
3241 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3244 "raw_stack: skb_load_bytes, negative len",
3246 BPF_MOV64_IMM(BPF_REG_2, 4),
3247 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3248 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3249 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3250 BPF_MOV64_IMM(BPF_REG_4, -8),
3251 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3252 BPF_FUNC_skb_load_bytes),
3253 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3257 .errstr = "R4 min value is negative",
3258 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3261 "raw_stack: skb_load_bytes, negative len 2",
3263 BPF_MOV64_IMM(BPF_REG_2, 4),
3264 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3265 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3266 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3267 BPF_MOV64_IMM(BPF_REG_4, ~0),
3268 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3269 BPF_FUNC_skb_load_bytes),
3270 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3274 .errstr = "R4 min value is negative",
3275 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3278 "raw_stack: skb_load_bytes, zero len",
3280 BPF_MOV64_IMM(BPF_REG_2, 4),
3281 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3282 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3283 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3284 BPF_MOV64_IMM(BPF_REG_4, 0),
3285 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3286 BPF_FUNC_skb_load_bytes),
3287 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3291 .errstr = "invalid stack type R3",
3292 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3295 "raw_stack: skb_load_bytes, no init",
3297 BPF_MOV64_IMM(BPF_REG_2, 4),
3298 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3299 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3300 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3301 BPF_MOV64_IMM(BPF_REG_4, 8),
3302 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3303 BPF_FUNC_skb_load_bytes),
3304 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3308 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3311 "raw_stack: skb_load_bytes, init",
3313 BPF_MOV64_IMM(BPF_REG_2, 4),
3314 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3315 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3316 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
3317 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3318 BPF_MOV64_IMM(BPF_REG_4, 8),
3319 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3320 BPF_FUNC_skb_load_bytes),
3321 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3325 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3328 "raw_stack: skb_load_bytes, spilled regs around bounds",
3330 BPF_MOV64_IMM(BPF_REG_2, 4),
3331 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3332 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3333 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3334 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
3335 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3336 BPF_MOV64_IMM(BPF_REG_4, 8),
3337 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3338 BPF_FUNC_skb_load_bytes),
3339 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3340 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
3341 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3342 offsetof(struct __sk_buff, mark)),
3343 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3344 offsetof(struct __sk_buff, priority)),
3345 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3349 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3352 "raw_stack: skb_load_bytes, spilled regs corruption",
3354 BPF_MOV64_IMM(BPF_REG_2, 4),
3355 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3356 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3357 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
3358 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3359 BPF_MOV64_IMM(BPF_REG_4, 8),
3360 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3361 BPF_FUNC_skb_load_bytes),
3362 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3363 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3364 offsetof(struct __sk_buff, mark)),
3368 .errstr = "R0 invalid mem access 'inv'",
3369 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3370 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3373 "raw_stack: skb_load_bytes, spilled regs corruption 2",
3375 BPF_MOV64_IMM(BPF_REG_2, 4),
3376 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3377 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3378 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3379 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
3380 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
3381 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3382 BPF_MOV64_IMM(BPF_REG_4, 8),
3383 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3384 BPF_FUNC_skb_load_bytes),
3385 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3386 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
3387 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
3388 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3389 offsetof(struct __sk_buff, mark)),
3390 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3391 offsetof(struct __sk_buff, priority)),
3392 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3393 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
3394 offsetof(struct __sk_buff, pkt_type)),
3395 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
3399 .errstr = "R3 invalid mem access 'inv'",
3400 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3401 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3404 "raw_stack: skb_load_bytes, spilled regs + data",
3406 BPF_MOV64_IMM(BPF_REG_2, 4),
3407 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3408 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3409 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3410 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
3411 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
3412 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3413 BPF_MOV64_IMM(BPF_REG_4, 8),
3414 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3415 BPF_FUNC_skb_load_bytes),
3416 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3417 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
3418 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
3419 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3420 offsetof(struct __sk_buff, mark)),
3421 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3422 offsetof(struct __sk_buff, priority)),
3423 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3424 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
3428 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3431 "raw_stack: skb_load_bytes, invalid access 1",
3433 BPF_MOV64_IMM(BPF_REG_2, 4),
3434 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3435 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
3436 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3437 BPF_MOV64_IMM(BPF_REG_4, 8),
3438 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3439 BPF_FUNC_skb_load_bytes),
3440 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3444 .errstr = "invalid stack type R3 off=-513 access_size=8",
3445 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3448 "raw_stack: skb_load_bytes, invalid access 2",
3450 BPF_MOV64_IMM(BPF_REG_2, 4),
3451 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3452 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3453 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3454 BPF_MOV64_IMM(BPF_REG_4, 8),
3455 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3456 BPF_FUNC_skb_load_bytes),
3457 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3461 .errstr = "invalid stack type R3 off=-1 access_size=8",
3462 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3465 "raw_stack: skb_load_bytes, invalid access 3",
3467 BPF_MOV64_IMM(BPF_REG_2, 4),
3468 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3469 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
3470 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3471 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3472 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3473 BPF_FUNC_skb_load_bytes),
3474 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3478 .errstr = "R4 min value is negative",
3479 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3482 "raw_stack: skb_load_bytes, invalid access 4",
3484 BPF_MOV64_IMM(BPF_REG_2, 4),
3485 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3486 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3487 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3488 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
3489 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3490 BPF_FUNC_skb_load_bytes),
3491 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3495 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
3496 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3499 "raw_stack: skb_load_bytes, invalid access 5",
3501 BPF_MOV64_IMM(BPF_REG_2, 4),
3502 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3503 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3504 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3505 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
3506 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3507 BPF_FUNC_skb_load_bytes),
3508 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3512 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
3513 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3516 "raw_stack: skb_load_bytes, invalid access 6",
3518 BPF_MOV64_IMM(BPF_REG_2, 4),
3519 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3520 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3521 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3522 BPF_MOV64_IMM(BPF_REG_4, 0),
3523 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3524 BPF_FUNC_skb_load_bytes),
3525 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3529 .errstr = "invalid stack type R3 off=-512 access_size=0",
3530 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3533 "raw_stack: skb_load_bytes, large access",
3535 BPF_MOV64_IMM(BPF_REG_2, 4),
3536 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3537 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3538 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3539 BPF_MOV64_IMM(BPF_REG_4, 512),
3540 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3541 BPF_FUNC_skb_load_bytes),
3542 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3546 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3549 "context stores via ST",
3551 BPF_MOV64_IMM(BPF_REG_0, 0),
3552 BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
3555 .errstr = "BPF_ST stores into R1 ctx is not allowed",
3557 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3560 "context stores via XADD",
3562 BPF_MOV64_IMM(BPF_REG_0, 0),
3563 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
3564 BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
3567 .errstr = "BPF_XADD stores into R1 ctx is not allowed",
3569 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3572 "direct packet access: test1",
3574 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3575 offsetof(struct __sk_buff, data)),
3576 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3577 offsetof(struct __sk_buff, data_end)),
3578 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3579 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3580 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3581 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3582 BPF_MOV64_IMM(BPF_REG_0, 0),
3586 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3589 "direct packet access: test2",
3591 BPF_MOV64_IMM(BPF_REG_0, 1),
3592 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
3593 offsetof(struct __sk_buff, data_end)),
3594 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3595 offsetof(struct __sk_buff, data)),
3596 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3597 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
3598 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
3599 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
3600 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
3601 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
3602 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3603 offsetof(struct __sk_buff, data)),
3604 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
3605 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3606 offsetof(struct __sk_buff, len)),
3607 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
3608 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
3609 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
3610 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
3611 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3612 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
3613 offsetof(struct __sk_buff, data_end)),
3614 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3615 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
3616 BPF_MOV64_IMM(BPF_REG_0, 0),
3620 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3623 "direct packet access: test3",
3625 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3626 offsetof(struct __sk_buff, data)),
3627 BPF_MOV64_IMM(BPF_REG_0, 0),
3630 .errstr = "invalid bpf_context access off=76",
3632 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
3635 "direct packet access: test4 (write)",
3637 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3638 offsetof(struct __sk_buff, data)),
3639 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3640 offsetof(struct __sk_buff, data_end)),
3641 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3642 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3643 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3644 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3645 BPF_MOV64_IMM(BPF_REG_0, 0),
3649 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3652 "direct packet access: test5 (pkt_end >= reg, good access)",
3654 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3655 offsetof(struct __sk_buff, data)),
3656 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3657 offsetof(struct __sk_buff, data_end)),
3658 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3659 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3660 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3661 BPF_MOV64_IMM(BPF_REG_0, 1),
3663 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3664 BPF_MOV64_IMM(BPF_REG_0, 0),
3668 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3671 "direct packet access: test6 (pkt_end >= reg, bad access)",
3673 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3674 offsetof(struct __sk_buff, data)),
3675 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3676 offsetof(struct __sk_buff, data_end)),
3677 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3678 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3679 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3680 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3681 BPF_MOV64_IMM(BPF_REG_0, 1),
3683 BPF_MOV64_IMM(BPF_REG_0, 0),
3686 .errstr = "invalid access to packet",
3688 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3691 "direct packet access: test7 (pkt_end >= reg, both accesses)",
3693 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3694 offsetof(struct __sk_buff, data)),
3695 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3696 offsetof(struct __sk_buff, data_end)),
3697 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3698 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3699 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3700 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3701 BPF_MOV64_IMM(BPF_REG_0, 1),
3703 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3704 BPF_MOV64_IMM(BPF_REG_0, 0),
3707 .errstr = "invalid access to packet",
3709 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3712 "direct packet access: test8 (double test, variant 1)",
3714 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3715 offsetof(struct __sk_buff, data)),
3716 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3717 offsetof(struct __sk_buff, data_end)),
3718 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3719 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3720 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
3721 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3722 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3723 BPF_MOV64_IMM(BPF_REG_0, 1),
3725 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3726 BPF_MOV64_IMM(BPF_REG_0, 0),
3730 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3733 "direct packet access: test9 (double test, variant 2)",
3735 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3736 offsetof(struct __sk_buff, data)),
3737 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3738 offsetof(struct __sk_buff, data_end)),
3739 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3740 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3741 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3742 BPF_MOV64_IMM(BPF_REG_0, 1),
3744 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3745 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3746 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3747 BPF_MOV64_IMM(BPF_REG_0, 0),
3751 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3754 "direct packet access: test10 (write invalid)",
3756 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3757 offsetof(struct __sk_buff, data)),
3758 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3759 offsetof(struct __sk_buff, data_end)),
3760 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3761 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3762 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3763 BPF_MOV64_IMM(BPF_REG_0, 0),
3765 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3766 BPF_MOV64_IMM(BPF_REG_0, 0),
3769 .errstr = "invalid access to packet",
3771 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3774 "direct packet access: test11 (shift, good access)",
3776 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3777 offsetof(struct __sk_buff, data)),
3778 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3779 offsetof(struct __sk_buff, data_end)),
3780 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3781 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3782 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3783 BPF_MOV64_IMM(BPF_REG_3, 144),
3784 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3785 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3786 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
3787 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3788 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3789 BPF_MOV64_IMM(BPF_REG_0, 1),
3791 BPF_MOV64_IMM(BPF_REG_0, 0),
3795 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3799 "direct packet access: test12 (and, good access)",
3801 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3802 offsetof(struct __sk_buff, data)),
3803 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3804 offsetof(struct __sk_buff, data_end)),
3805 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3806 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3807 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3808 BPF_MOV64_IMM(BPF_REG_3, 144),
3809 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3810 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3811 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3812 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3813 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3814 BPF_MOV64_IMM(BPF_REG_0, 1),
3816 BPF_MOV64_IMM(BPF_REG_0, 0),
3820 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3824 "direct packet access: test13 (branches, good access)",
3826 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3827 offsetof(struct __sk_buff, data)),
3828 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3829 offsetof(struct __sk_buff, data_end)),
3830 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3831 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3832 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
3833 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3834 offsetof(struct __sk_buff, mark)),
3835 BPF_MOV64_IMM(BPF_REG_4, 1),
3836 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
3837 BPF_MOV64_IMM(BPF_REG_3, 14),
3838 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
3839 BPF_MOV64_IMM(BPF_REG_3, 24),
3840 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3841 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3842 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3843 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3844 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3845 BPF_MOV64_IMM(BPF_REG_0, 1),
3847 BPF_MOV64_IMM(BPF_REG_0, 0),
3851 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3855 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
3857 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3858 offsetof(struct __sk_buff, data)),
3859 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3860 offsetof(struct __sk_buff, data_end)),
3861 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3862 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3863 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
3864 BPF_MOV64_IMM(BPF_REG_5, 12),
3865 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
3866 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3867 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3868 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
3869 BPF_MOV64_IMM(BPF_REG_0, 1),
3871 BPF_MOV64_IMM(BPF_REG_0, 0),
3875 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3879 "direct packet access: test15 (spill with xadd)",
3881 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3882 offsetof(struct __sk_buff, data)),
3883 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3884 offsetof(struct __sk_buff, data_end)),
3885 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3886 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3887 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3888 BPF_MOV64_IMM(BPF_REG_5, 4096),
3889 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
3890 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
3891 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
3892 BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
3893 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
3894 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
3895 BPF_MOV64_IMM(BPF_REG_0, 0),
3898 .errstr = "R2 invalid mem access 'inv'",
3900 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3901 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3904 "direct packet access: test16 (arith on data_end)",
3906 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3907 offsetof(struct __sk_buff, data)),
3908 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3909 offsetof(struct __sk_buff, data_end)),
3910 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3911 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3912 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
3913 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3914 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3915 BPF_MOV64_IMM(BPF_REG_0, 0),
3918 .errstr = "R3 pointer arithmetic on pkt_end",
3920 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3923 "direct packet access: test17 (pruning, alignment)",
3925 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3926 offsetof(struct __sk_buff, data)),
3927 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3928 offsetof(struct __sk_buff, data_end)),
3929 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3930 offsetof(struct __sk_buff, mark)),
3931 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3932 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
3933 BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
3934 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3935 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
3936 BPF_MOV64_IMM(BPF_REG_0, 0),
3938 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
3941 .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
3943 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3944 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
3947 "direct packet access: test18 (imm += pkt_ptr, 1)",
3949 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3950 offsetof(struct __sk_buff, data)),
3951 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3952 offsetof(struct __sk_buff, data_end)),
3953 BPF_MOV64_IMM(BPF_REG_0, 8),
3954 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3955 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3956 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3957 BPF_MOV64_IMM(BPF_REG_0, 0),
3961 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3964 "direct packet access: test19 (imm += pkt_ptr, 2)",
3966 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3967 offsetof(struct __sk_buff, data)),
3968 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3969 offsetof(struct __sk_buff, data_end)),
3970 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3971 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3972 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
3973 BPF_MOV64_IMM(BPF_REG_4, 4),
3974 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3975 BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
3976 BPF_MOV64_IMM(BPF_REG_0, 0),
3980 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3983 "direct packet access: test20 (x += pkt_ptr, 1)",
3985 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3986 offsetof(struct __sk_buff, data)),
3987 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3988 offsetof(struct __sk_buff, data_end)),
3989 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3990 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3991 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3992 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
3993 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3994 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3995 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3996 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
3997 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3998 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3999 BPF_MOV64_IMM(BPF_REG_0, 0),
4002 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4004 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4007 "direct packet access: test21 (x += pkt_ptr, 2)",
4009 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4010 offsetof(struct __sk_buff, data)),
4011 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4012 offsetof(struct __sk_buff, data_end)),
4013 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4014 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4015 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
4016 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
4017 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
4018 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
4019 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
4020 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
4021 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4022 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
4023 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
4024 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
4025 BPF_MOV64_IMM(BPF_REG_0, 0),
4028 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4030 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4033 "direct packet access: test22 (x += pkt_ptr, 3)",
4035 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4036 offsetof(struct __sk_buff, data)),
4037 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4038 offsetof(struct __sk_buff, data_end)),
4039 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4040 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4041 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
4042 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
4043 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
4044 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
4045 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
4046 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
4047 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
4048 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
4049 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
4050 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
4051 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
4052 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
4053 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
4054 BPF_MOV64_IMM(BPF_REG_2, 1),
4055 BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
4056 BPF_MOV64_IMM(BPF_REG_0, 0),
4059 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4061 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4064 "direct packet access: test23 (x += pkt_ptr, 4)",
4066 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4067 offsetof(struct __sk_buff, data)),
4068 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4069 offsetof(struct __sk_buff, data_end)),
4070 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
4071 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4072 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
4073 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
4074 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4075 BPF_MOV64_IMM(BPF_REG_0, 31),
4076 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
4077 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
4078 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
4079 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
4080 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4081 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
4082 BPF_MOV64_IMM(BPF_REG_0, 0),
4085 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4087 .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
4088 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4091 "direct packet access: test24 (x += pkt_ptr, 5)",
4093 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4094 offsetof(struct __sk_buff, data)),
4095 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4096 offsetof(struct __sk_buff, data_end)),
4097 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
4098 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4099 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
4100 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
4101 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4102 BPF_MOV64_IMM(BPF_REG_0, 64),
4103 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
4104 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
4105 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
4106 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
4107 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4108 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
4109 BPF_MOV64_IMM(BPF_REG_0, 0),
4112 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4114 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4117 "direct packet access: test25 (marking on <, good access)",
4119 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4120 offsetof(struct __sk_buff, data)),
4121 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4122 offsetof(struct __sk_buff, data_end)),
4123 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4124 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4125 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
4126 BPF_MOV64_IMM(BPF_REG_0, 0),
4128 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4129 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
4132 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4135 "direct packet access: test26 (marking on <, bad access)",
4137 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4138 offsetof(struct __sk_buff, data)),
4139 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4140 offsetof(struct __sk_buff, data_end)),
4141 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4142 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4143 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
4144 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4145 BPF_MOV64_IMM(BPF_REG_0, 0),
4147 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
4150 .errstr = "invalid access to packet",
4151 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4154 "direct packet access: test27 (marking on <=, good access)",
4156 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4157 offsetof(struct __sk_buff, data)),
4158 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4159 offsetof(struct __sk_buff, data_end)),
4160 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4161 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4162 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
4163 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4164 BPF_MOV64_IMM(BPF_REG_0, 1),
4168 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4172 "direct packet access: test28 (marking on <=, bad access)",
4174 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4175 offsetof(struct __sk_buff, data)),
4176 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4177 offsetof(struct __sk_buff, data_end)),
4178 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4179 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4180 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
4181 BPF_MOV64_IMM(BPF_REG_0, 1),
4183 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4184 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
4187 .errstr = "invalid access to packet",
4188 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4191 "helper access to packet: test1, valid packet_ptr range",
4193 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4194 offsetof(struct xdp_md, data)),
4195 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4196 offsetof(struct xdp_md, data_end)),
4197 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4198 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4199 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
4200 BPF_LD_MAP_FD(BPF_REG_1, 0),
4201 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
4202 BPF_MOV64_IMM(BPF_REG_4, 0),
4203 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4204 BPF_FUNC_map_update_elem),
4205 BPF_MOV64_IMM(BPF_REG_0, 0),
4208 .fixup_map_hash_8b = { 5 },
4209 .result_unpriv = ACCEPT,
4211 .prog_type = BPF_PROG_TYPE_XDP,
4214 "helper access to packet: test2, unchecked packet_ptr",
4216 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4217 offsetof(struct xdp_md, data)),
4218 BPF_LD_MAP_FD(BPF_REG_1, 0),
4219 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4220 BPF_FUNC_map_lookup_elem),
4221 BPF_MOV64_IMM(BPF_REG_0, 0),
4224 .fixup_map_hash_8b = { 1 },
4226 .errstr = "invalid access to packet",
4227 .prog_type = BPF_PROG_TYPE_XDP,
4230 "helper access to packet: test3, variable add",
4232 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4233 offsetof(struct xdp_md, data)),
4234 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4235 offsetof(struct xdp_md, data_end)),
4236 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4237 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
4238 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
4239 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
4240 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4241 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
4242 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4243 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
4244 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
4245 BPF_LD_MAP_FD(BPF_REG_1, 0),
4246 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
4247 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4248 BPF_FUNC_map_lookup_elem),
4249 BPF_MOV64_IMM(BPF_REG_0, 0),
4252 .fixup_map_hash_8b = { 11 },
4254 .prog_type = BPF_PROG_TYPE_XDP,
4257 "helper access to packet: test4, packet_ptr with bad range",
4259 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4260 offsetof(struct xdp_md, data)),
4261 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4262 offsetof(struct xdp_md, data_end)),
4263 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4264 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
4265 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
4266 BPF_MOV64_IMM(BPF_REG_0, 0),
4268 BPF_LD_MAP_FD(BPF_REG_1, 0),
4269 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4270 BPF_FUNC_map_lookup_elem),
4271 BPF_MOV64_IMM(BPF_REG_0, 0),
4274 .fixup_map_hash_8b = { 7 },
4276 .errstr = "invalid access to packet",
4277 .prog_type = BPF_PROG_TYPE_XDP,
4280 "helper access to packet: test5, packet_ptr with too short range",
4282 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4283 offsetof(struct xdp_md, data)),
4284 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4285 offsetof(struct xdp_md, data_end)),
4286 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4287 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4288 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
4289 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
4290 BPF_LD_MAP_FD(BPF_REG_1, 0),
4291 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4292 BPF_FUNC_map_lookup_elem),
4293 BPF_MOV64_IMM(BPF_REG_0, 0),
4296 .fixup_map_hash_8b = { 6 },
4298 .errstr = "invalid access to packet",
4299 .prog_type = BPF_PROG_TYPE_XDP,
4302 "helper access to packet: test6, cls valid packet_ptr range",
4304 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4305 offsetof(struct __sk_buff, data)),
4306 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4307 offsetof(struct __sk_buff, data_end)),
4308 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4309 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4310 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
4311 BPF_LD_MAP_FD(BPF_REG_1, 0),
4312 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
4313 BPF_MOV64_IMM(BPF_REG_4, 0),
4314 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4315 BPF_FUNC_map_update_elem),
4316 BPF_MOV64_IMM(BPF_REG_0, 0),
4319 .fixup_map_hash_8b = { 5 },
4321 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4324 "helper access to packet: test7, cls unchecked packet_ptr",
4326 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4327 offsetof(struct __sk_buff, data)),
4328 BPF_LD_MAP_FD(BPF_REG_1, 0),
4329 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4330 BPF_FUNC_map_lookup_elem),
4331 BPF_MOV64_IMM(BPF_REG_0, 0),
4334 .fixup_map_hash_8b = { 1 },
4336 .errstr = "invalid access to packet",
4337 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4340 "helper access to packet: test8, cls variable add",
4342 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4343 offsetof(struct __sk_buff, data)),
4344 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4345 offsetof(struct __sk_buff, data_end)),
4346 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4347 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
4348 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
4349 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
4350 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4351 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
4352 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4353 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
4354 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
4355 BPF_LD_MAP_FD(BPF_REG_1, 0),
4356 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
4357 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4358 BPF_FUNC_map_lookup_elem),
4359 BPF_MOV64_IMM(BPF_REG_0, 0),
4362 .fixup_map_hash_8b = { 11 },
4364 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4367 "helper access to packet: test9, cls packet_ptr with bad range",
4369 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4370 offsetof(struct __sk_buff, data)),
4371 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4372 offsetof(struct __sk_buff, data_end)),
4373 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4374 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
4375 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
4376 BPF_MOV64_IMM(BPF_REG_0, 0),
4378 BPF_LD_MAP_FD(BPF_REG_1, 0),
4379 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4380 BPF_FUNC_map_lookup_elem),
4381 BPF_MOV64_IMM(BPF_REG_0, 0),
4384 .fixup_map_hash_8b = { 7 },
4386 .errstr = "invalid access to packet",
4387 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4390 "helper access to packet: test10, cls packet_ptr with too short range",
4392 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4393 offsetof(struct __sk_buff, data)),
4394 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4395 offsetof(struct __sk_buff, data_end)),
4396 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4397 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4398 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
4399 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
4400 BPF_LD_MAP_FD(BPF_REG_1, 0),
4401 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4402 BPF_FUNC_map_lookup_elem),
4403 BPF_MOV64_IMM(BPF_REG_0, 0),
4406 .fixup_map_hash_8b = { 6 },
4408 .errstr = "invalid access to packet",
4409 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4412 "helper access to packet: test11, cls unsuitable helper 1",
4414 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4415 offsetof(struct __sk_buff, data)),
4416 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4417 offsetof(struct __sk_buff, data_end)),
4418 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4419 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
4420 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
4421 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
4422 BPF_MOV64_IMM(BPF_REG_2, 0),
4423 BPF_MOV64_IMM(BPF_REG_4, 42),
4424 BPF_MOV64_IMM(BPF_REG_5, 0),
4425 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4426 BPF_FUNC_skb_store_bytes),
4427 BPF_MOV64_IMM(BPF_REG_0, 0),
4431 .errstr = "helper access to the packet",
4432 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4435 "helper access to packet: test12, cls unsuitable helper 2",
4437 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4438 offsetof(struct __sk_buff, data)),
4439 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4440 offsetof(struct __sk_buff, data_end)),
4441 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
4442 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
4443 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
4444 BPF_MOV64_IMM(BPF_REG_2, 0),
4445 BPF_MOV64_IMM(BPF_REG_4, 4),
4446 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4447 BPF_FUNC_skb_load_bytes),
4448 BPF_MOV64_IMM(BPF_REG_0, 0),
4452 .errstr = "helper access to the packet",
4453 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4456 "helper access to packet: test13, cls helper ok",
4458 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4459 offsetof(struct __sk_buff, data)),
4460 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4461 offsetof(struct __sk_buff, data_end)),
4462 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4463 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4464 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4465 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4466 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4467 BPF_MOV64_IMM(BPF_REG_2, 4),
4468 BPF_MOV64_IMM(BPF_REG_3, 0),
4469 BPF_MOV64_IMM(BPF_REG_4, 0),
4470 BPF_MOV64_IMM(BPF_REG_5, 0),
4471 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4472 BPF_FUNC_csum_diff),
4473 BPF_MOV64_IMM(BPF_REG_0, 0),
4477 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4480 "helper access to packet: test14, cls helper ok sub",
4482 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4483 offsetof(struct __sk_buff, data)),
4484 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4485 offsetof(struct __sk_buff, data_end)),
4486 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4487 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4488 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4489 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4490 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
4491 BPF_MOV64_IMM(BPF_REG_2, 4),
4492 BPF_MOV64_IMM(BPF_REG_3, 0),
4493 BPF_MOV64_IMM(BPF_REG_4, 0),
4494 BPF_MOV64_IMM(BPF_REG_5, 0),
4495 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4496 BPF_FUNC_csum_diff),
4497 BPF_MOV64_IMM(BPF_REG_0, 0),
4501 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4504 "helper access to packet: test15, cls helper fail sub",
4506 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4507 offsetof(struct __sk_buff, data)),
4508 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4509 offsetof(struct __sk_buff, data_end)),
4510 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4511 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4512 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4513 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4514 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
4515 BPF_MOV64_IMM(BPF_REG_2, 4),
4516 BPF_MOV64_IMM(BPF_REG_3, 0),
4517 BPF_MOV64_IMM(BPF_REG_4, 0),
4518 BPF_MOV64_IMM(BPF_REG_5, 0),
4519 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4520 BPF_FUNC_csum_diff),
4521 BPF_MOV64_IMM(BPF_REG_0, 0),
4525 .errstr = "invalid access to packet",
4526 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4529 "helper access to packet: test16, cls helper fail range 1",
4531 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4532 offsetof(struct __sk_buff, data)),
4533 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4534 offsetof(struct __sk_buff, data_end)),
4535 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4536 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4537 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4538 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4539 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4540 BPF_MOV64_IMM(BPF_REG_2, 8),
4541 BPF_MOV64_IMM(BPF_REG_3, 0),
4542 BPF_MOV64_IMM(BPF_REG_4, 0),
4543 BPF_MOV64_IMM(BPF_REG_5, 0),
4544 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4545 BPF_FUNC_csum_diff),
4546 BPF_MOV64_IMM(BPF_REG_0, 0),
4550 .errstr = "invalid access to packet",
4551 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4554 "helper access to packet: test17, cls helper fail range 2",
4556 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4557 offsetof(struct __sk_buff, data)),
4558 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4559 offsetof(struct __sk_buff, data_end)),
4560 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4561 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4562 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4563 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4564 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4565 BPF_MOV64_IMM(BPF_REG_2, -9),
4566 BPF_MOV64_IMM(BPF_REG_3, 0),
4567 BPF_MOV64_IMM(BPF_REG_4, 0),
4568 BPF_MOV64_IMM(BPF_REG_5, 0),
4569 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4570 BPF_FUNC_csum_diff),
4571 BPF_MOV64_IMM(BPF_REG_0, 0),
4575 .errstr = "R2 min value is negative",
4576 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4579 "helper access to packet: test18, cls helper fail range 3",
4581 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4582 offsetof(struct __sk_buff, data)),
4583 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4584 offsetof(struct __sk_buff, data_end)),
4585 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4586 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4587 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4588 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4589 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4590 BPF_MOV64_IMM(BPF_REG_2, ~0),
4591 BPF_MOV64_IMM(BPF_REG_3, 0),
4592 BPF_MOV64_IMM(BPF_REG_4, 0),
4593 BPF_MOV64_IMM(BPF_REG_5, 0),
4594 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4595 BPF_FUNC_csum_diff),
4596 BPF_MOV64_IMM(BPF_REG_0, 0),
4600 .errstr = "R2 min value is negative",
4601 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4604 "helper access to packet: test19, cls helper range zero",
4606 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4607 offsetof(struct __sk_buff, data)),
4608 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4609 offsetof(struct __sk_buff, data_end)),
4610 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4611 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4612 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4613 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4614 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4615 BPF_MOV64_IMM(BPF_REG_2, 0),
4616 BPF_MOV64_IMM(BPF_REG_3, 0),
4617 BPF_MOV64_IMM(BPF_REG_4, 0),
4618 BPF_MOV64_IMM(BPF_REG_5, 0),
4619 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4620 BPF_FUNC_csum_diff),
4621 BPF_MOV64_IMM(BPF_REG_0, 0),
4625 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4628 "helper access to packet: test20, pkt end as input",
4630 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4631 offsetof(struct __sk_buff, data)),
4632 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4633 offsetof(struct __sk_buff, data_end)),
4634 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4635 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4636 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4637 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4638 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
4639 BPF_MOV64_IMM(BPF_REG_2, 4),
4640 BPF_MOV64_IMM(BPF_REG_3, 0),
4641 BPF_MOV64_IMM(BPF_REG_4, 0),
4642 BPF_MOV64_IMM(BPF_REG_5, 0),
4643 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4644 BPF_FUNC_csum_diff),
4645 BPF_MOV64_IMM(BPF_REG_0, 0),
4649 .errstr = "R1 type=pkt_end expected=fp",
4650 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4653 "helper access to packet: test21, wrong reg",
4655 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4656 offsetof(struct __sk_buff, data)),
4657 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4658 offsetof(struct __sk_buff, data_end)),
4659 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4660 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4661 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4662 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4663 BPF_MOV64_IMM(BPF_REG_2, 4),
4664 BPF_MOV64_IMM(BPF_REG_3, 0),
4665 BPF_MOV64_IMM(BPF_REG_4, 0),
4666 BPF_MOV64_IMM(BPF_REG_5, 0),
4667 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4668 BPF_FUNC_csum_diff),
4669 BPF_MOV64_IMM(BPF_REG_0, 0),
4673 .errstr = "invalid access to packet",
4674 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4677 "prevent map lookup in sockmap",
4679 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4680 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4681 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4682 BPF_LD_MAP_FD(BPF_REG_1, 0),
4683 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4684 BPF_FUNC_map_lookup_elem),
4687 .fixup_map_sockmap = { 3 },
4689 .errstr = "cannot pass map_type 15 into func bpf_map_lookup_elem",
4690 .prog_type = BPF_PROG_TYPE_SOCK_OPS,
4693 "prevent map lookup in sockhash",
4695 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4696 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4697 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4698 BPF_LD_MAP_FD(BPF_REG_1, 0),
4699 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4700 BPF_FUNC_map_lookup_elem),
4703 .fixup_map_sockhash = { 3 },
4705 .errstr = "cannot pass map_type 18 into func bpf_map_lookup_elem",
4706 .prog_type = BPF_PROG_TYPE_SOCK_OPS,
4709 "prevent map lookup in xskmap",
4711 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4712 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4713 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4714 BPF_LD_MAP_FD(BPF_REG_1, 0),
4715 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4716 BPF_FUNC_map_lookup_elem),
4719 .fixup_map_xskmap = { 3 },
4721 .errstr = "cannot pass map_type 17 into func bpf_map_lookup_elem",
4722 .prog_type = BPF_PROG_TYPE_XDP,
4725 "prevent map lookup in stack trace",
4727 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4728 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4729 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4730 BPF_LD_MAP_FD(BPF_REG_1, 0),
4731 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4732 BPF_FUNC_map_lookup_elem),
4735 .fixup_map_stacktrace = { 3 },
4737 .errstr = "cannot pass map_type 7 into func bpf_map_lookup_elem",
4738 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
4741 "prevent map lookup in prog array",
4743 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4744 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4745 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4746 BPF_LD_MAP_FD(BPF_REG_1, 0),
4747 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4748 BPF_FUNC_map_lookup_elem),
4751 .fixup_prog2 = { 3 },
4753 .errstr = "cannot pass map_type 3 into func bpf_map_lookup_elem",
4756 "valid map access into an array with a constant",
4758 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4759 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4760 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4761 BPF_LD_MAP_FD(BPF_REG_1, 0),
4762 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4763 BPF_FUNC_map_lookup_elem),
4764 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4765 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4766 offsetof(struct test_val, foo)),
4769 .fixup_map_hash_48b = { 3 },
4770 .errstr_unpriv = "R0 leaks addr",
4771 .result_unpriv = REJECT,
4775 "valid map access into an array with a register",
4777 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4778 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4779 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4780 BPF_LD_MAP_FD(BPF_REG_1, 0),
4781 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4782 BPF_FUNC_map_lookup_elem),
4783 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4784 BPF_MOV64_IMM(BPF_REG_1, 4),
4785 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4786 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4787 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4788 offsetof(struct test_val, foo)),
4791 .fixup_map_hash_48b = { 3 },
4792 .errstr_unpriv = "R0 leaks addr",
4793 .result_unpriv = REJECT,
4795 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4798 "valid map access into an array with a variable",
4800 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4801 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4802 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4803 BPF_LD_MAP_FD(BPF_REG_1, 0),
4804 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4805 BPF_FUNC_map_lookup_elem),
4806 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4807 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4808 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
4809 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4810 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4811 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4812 offsetof(struct test_val, foo)),
4815 .fixup_map_hash_48b = { 3 },
4816 .errstr_unpriv = "R0 leaks addr",
4817 .result_unpriv = REJECT,
4819 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4822 "valid map access into an array with a signed variable",
4824 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4825 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4826 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4827 BPF_LD_MAP_FD(BPF_REG_1, 0),
4828 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4829 BPF_FUNC_map_lookup_elem),
4830 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
4831 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4832 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
4833 BPF_MOV32_IMM(BPF_REG_1, 0),
4834 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4835 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4836 BPF_MOV32_IMM(BPF_REG_1, 0),
4837 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4838 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4839 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4840 offsetof(struct test_val, foo)),
4843 .fixup_map_hash_48b = { 3 },
4844 .errstr_unpriv = "R0 leaks addr",
4845 .result_unpriv = REJECT,
4847 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4850 "invalid map access into an array with a constant",
4852 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4853 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4854 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4855 BPF_LD_MAP_FD(BPF_REG_1, 0),
4856 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4857 BPF_FUNC_map_lookup_elem),
4858 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4859 BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
4860 offsetof(struct test_val, foo)),
4863 .fixup_map_hash_48b = { 3 },
4864 .errstr = "invalid access to map value, value_size=48 off=48 size=8",
4868 "invalid map access into an array with a register",
4870 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4871 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4872 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4873 BPF_LD_MAP_FD(BPF_REG_1, 0),
4874 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4875 BPF_FUNC_map_lookup_elem),
4876 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4877 BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
4878 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4879 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4880 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4881 offsetof(struct test_val, foo)),
4884 .fixup_map_hash_48b = { 3 },
4885 .errstr = "R0 min value is outside of the array range",
4887 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4890 "invalid map access into an array with a variable",
4892 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4893 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4894 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4895 BPF_LD_MAP_FD(BPF_REG_1, 0),
4896 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4897 BPF_FUNC_map_lookup_elem),
4898 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4899 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4900 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4901 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4902 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4903 offsetof(struct test_val, foo)),
4906 .fixup_map_hash_48b = { 3 },
4907 .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
4909 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4912 "invalid map access into an array with no floor check",
4914 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4915 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4916 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4917 BPF_LD_MAP_FD(BPF_REG_1, 0),
4918 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4919 BPF_FUNC_map_lookup_elem),
4920 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4921 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4922 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4923 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4924 BPF_MOV32_IMM(BPF_REG_1, 0),
4925 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4926 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4927 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4928 offsetof(struct test_val, foo)),
4931 .fixup_map_hash_48b = { 3 },
4932 .errstr_unpriv = "R0 leaks addr",
4933 .errstr = "R0 unbounded memory access",
4934 .result_unpriv = REJECT,
4936 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4939 "invalid map access into an array with a invalid max check",
4941 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4942 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4943 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4944 BPF_LD_MAP_FD(BPF_REG_1, 0),
4945 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4946 BPF_FUNC_map_lookup_elem),
4947 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4948 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4949 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
4950 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
4951 BPF_MOV32_IMM(BPF_REG_1, 0),
4952 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4953 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4954 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4955 offsetof(struct test_val, foo)),
4958 .fixup_map_hash_48b = { 3 },
4959 .errstr_unpriv = "R0 leaks addr",
4960 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
4961 .result_unpriv = REJECT,
4963 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4966 "invalid map access into an array with a invalid max check",
4968 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4969 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4970 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4971 BPF_LD_MAP_FD(BPF_REG_1, 0),
4972 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4973 BPF_FUNC_map_lookup_elem),
4974 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4975 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4976 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4977 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4978 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4979 BPF_LD_MAP_FD(BPF_REG_1, 0),
4980 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4981 BPF_FUNC_map_lookup_elem),
4982 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4983 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
4984 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
4985 offsetof(struct test_val, foo)),
4988 .fixup_map_hash_48b = { 3, 11 },
4989 .errstr = "R0 pointer += pointer",
4991 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4994 "direct packet read test#1 for CGROUP_SKB",
4996 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4997 offsetof(struct __sk_buff, data)),
4998 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4999 offsetof(struct __sk_buff, data_end)),
5000 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
5001 offsetof(struct __sk_buff, len)),
5002 BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5003 offsetof(struct __sk_buff, pkt_type)),
5004 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5005 offsetof(struct __sk_buff, mark)),
5006 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
5007 offsetof(struct __sk_buff, mark)),
5008 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
5009 offsetof(struct __sk_buff, queue_mapping)),
5010 BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
5011 offsetof(struct __sk_buff, protocol)),
5012 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
5013 offsetof(struct __sk_buff, vlan_present)),
5014 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5015 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5016 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5017 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5018 BPF_MOV64_IMM(BPF_REG_0, 0),
5022 .result_unpriv = REJECT,
5023 .errstr_unpriv = "invalid bpf_context access off=76 size=4",
5024 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5027 "direct packet read test#2 for CGROUP_SKB",
5029 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
5030 offsetof(struct __sk_buff, vlan_tci)),
5031 BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5032 offsetof(struct __sk_buff, vlan_proto)),
5033 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5034 offsetof(struct __sk_buff, priority)),
5035 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
5036 offsetof(struct __sk_buff, priority)),
5037 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
5038 offsetof(struct __sk_buff,
5040 BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
5041 offsetof(struct __sk_buff, tc_index)),
5042 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
5043 offsetof(struct __sk_buff, hash)),
5044 BPF_MOV64_IMM(BPF_REG_0, 0),
5048 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5051 "direct packet read test#3 for CGROUP_SKB",
5053 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
5054 offsetof(struct __sk_buff, cb[0])),
5055 BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5056 offsetof(struct __sk_buff, cb[1])),
5057 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5058 offsetof(struct __sk_buff, cb[2])),
5059 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
5060 offsetof(struct __sk_buff, cb[3])),
5061 BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
5062 offsetof(struct __sk_buff, cb[4])),
5063 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
5064 offsetof(struct __sk_buff, napi_id)),
5065 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_4,
5066 offsetof(struct __sk_buff, cb[0])),
5067 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_5,
5068 offsetof(struct __sk_buff, cb[1])),
5069 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
5070 offsetof(struct __sk_buff, cb[2])),
5071 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7,
5072 offsetof(struct __sk_buff, cb[3])),
5073 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_8,
5074 offsetof(struct __sk_buff, cb[4])),
5075 BPF_MOV64_IMM(BPF_REG_0, 0),
5079 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5082 "direct packet read test#4 for CGROUP_SKB",
5084 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5085 offsetof(struct __sk_buff, family)),
5086 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5087 offsetof(struct __sk_buff, remote_ip4)),
5088 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
5089 offsetof(struct __sk_buff, local_ip4)),
5090 BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5091 offsetof(struct __sk_buff, remote_ip6[0])),
5092 BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5093 offsetof(struct __sk_buff, remote_ip6[1])),
5094 BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5095 offsetof(struct __sk_buff, remote_ip6[2])),
5096 BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5097 offsetof(struct __sk_buff, remote_ip6[3])),
5098 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5099 offsetof(struct __sk_buff, local_ip6[0])),
5100 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5101 offsetof(struct __sk_buff, local_ip6[1])),
5102 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5103 offsetof(struct __sk_buff, local_ip6[2])),
5104 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5105 offsetof(struct __sk_buff, local_ip6[3])),
5106 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
5107 offsetof(struct __sk_buff, remote_port)),
5108 BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
5109 offsetof(struct __sk_buff, local_port)),
5110 BPF_MOV64_IMM(BPF_REG_0, 0),
5114 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5117 "invalid access of tc_classid for CGROUP_SKB",
5119 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5120 offsetof(struct __sk_buff, tc_classid)),
5121 BPF_MOV64_IMM(BPF_REG_0, 0),
5125 .errstr = "invalid bpf_context access",
5126 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5129 "invalid access of data_meta for CGROUP_SKB",
5131 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5132 offsetof(struct __sk_buff, data_meta)),
5133 BPF_MOV64_IMM(BPF_REG_0, 0),
5137 .errstr = "invalid bpf_context access",
5138 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5141 "invalid access of flow_keys for CGROUP_SKB",
5143 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5144 offsetof(struct __sk_buff, flow_keys)),
5145 BPF_MOV64_IMM(BPF_REG_0, 0),
5149 .errstr = "invalid bpf_context access",
5150 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5153 "invalid write access to napi_id for CGROUP_SKB",
5155 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
5156 offsetof(struct __sk_buff, napi_id)),
5157 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_9,
5158 offsetof(struct __sk_buff, napi_id)),
5159 BPF_MOV64_IMM(BPF_REG_0, 0),
5163 .errstr = "invalid bpf_context access",
5164 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5167 "valid cgroup storage access",
5169 BPF_MOV64_IMM(BPF_REG_2, 0),
5170 BPF_LD_MAP_FD(BPF_REG_1, 0),
5171 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5172 BPF_FUNC_get_local_storage),
5173 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5174 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5175 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5178 .fixup_cgroup_storage = { 1 },
5180 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5183 "invalid cgroup storage access 1",
5185 BPF_MOV64_IMM(BPF_REG_2, 0),
5186 BPF_LD_MAP_FD(BPF_REG_1, 0),
5187 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5188 BPF_FUNC_get_local_storage),
5189 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5190 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5191 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5194 .fixup_map_hash_8b = { 1 },
5196 .errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
5197 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5200 "invalid cgroup storage access 2",
5202 BPF_MOV64_IMM(BPF_REG_2, 0),
5203 BPF_LD_MAP_FD(BPF_REG_1, 1),
5204 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5205 BPF_FUNC_get_local_storage),
5206 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5210 .errstr = "fd 1 is not pointing to valid bpf_map",
5211 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5214 "invalid cgroup storage access 3",
5216 BPF_MOV64_IMM(BPF_REG_2, 0),
5217 BPF_LD_MAP_FD(BPF_REG_1, 0),
5218 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5219 BPF_FUNC_get_local_storage),
5220 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
5221 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5222 BPF_MOV64_IMM(BPF_REG_0, 0),
5225 .fixup_cgroup_storage = { 1 },
5227 .errstr = "invalid access to map value, value_size=64 off=256 size=4",
5228 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5231 "invalid cgroup storage access 4",
5233 BPF_MOV64_IMM(BPF_REG_2, 0),
5234 BPF_LD_MAP_FD(BPF_REG_1, 0),
5235 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5236 BPF_FUNC_get_local_storage),
5237 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
5238 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5239 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5242 .fixup_cgroup_storage = { 1 },
5244 .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
5245 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5246 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5249 "invalid cgroup storage access 5",
5251 BPF_MOV64_IMM(BPF_REG_2, 7),
5252 BPF_LD_MAP_FD(BPF_REG_1, 0),
5253 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5254 BPF_FUNC_get_local_storage),
5255 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5256 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5257 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5260 .fixup_cgroup_storage = { 1 },
5262 .errstr = "get_local_storage() doesn't support non-zero flags",
5263 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5266 "invalid cgroup storage access 6",
5268 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
5269 BPF_LD_MAP_FD(BPF_REG_1, 0),
5270 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5271 BPF_FUNC_get_local_storage),
5272 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5273 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5274 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5277 .fixup_cgroup_storage = { 1 },
5279 .errstr = "get_local_storage() doesn't support non-zero flags",
5280 .errstr_unpriv = "R2 leaks addr into helper function",
5281 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5284 "valid per-cpu cgroup storage access",
5286 BPF_MOV64_IMM(BPF_REG_2, 0),
5287 BPF_LD_MAP_FD(BPF_REG_1, 0),
5288 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5289 BPF_FUNC_get_local_storage),
5290 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5291 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5292 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5295 .fixup_percpu_cgroup_storage = { 1 },
5297 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5300 "invalid per-cpu cgroup storage access 1",
5302 BPF_MOV64_IMM(BPF_REG_2, 0),
5303 BPF_LD_MAP_FD(BPF_REG_1, 0),
5304 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5305 BPF_FUNC_get_local_storage),
5306 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5307 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5308 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5311 .fixup_map_hash_8b = { 1 },
5313 .errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
5314 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5317 "invalid per-cpu cgroup storage access 2",
5319 BPF_MOV64_IMM(BPF_REG_2, 0),
5320 BPF_LD_MAP_FD(BPF_REG_1, 1),
5321 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5322 BPF_FUNC_get_local_storage),
5323 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5327 .errstr = "fd 1 is not pointing to valid bpf_map",
5328 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5331 "invalid per-cpu cgroup storage access 3",
5333 BPF_MOV64_IMM(BPF_REG_2, 0),
5334 BPF_LD_MAP_FD(BPF_REG_1, 0),
5335 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5336 BPF_FUNC_get_local_storage),
5337 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
5338 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5339 BPF_MOV64_IMM(BPF_REG_0, 0),
5342 .fixup_percpu_cgroup_storage = { 1 },
5344 .errstr = "invalid access to map value, value_size=64 off=256 size=4",
5345 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5348 "invalid per-cpu cgroup storage access 4",
5350 BPF_MOV64_IMM(BPF_REG_2, 0),
5351 BPF_LD_MAP_FD(BPF_REG_1, 0),
5352 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5353 BPF_FUNC_get_local_storage),
5354 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
5355 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5356 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5359 .fixup_cgroup_storage = { 1 },
5361 .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
5362 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5363 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5366 "invalid per-cpu cgroup storage access 5",
5368 BPF_MOV64_IMM(BPF_REG_2, 7),
5369 BPF_LD_MAP_FD(BPF_REG_1, 0),
5370 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5371 BPF_FUNC_get_local_storage),
5372 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5373 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5374 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5377 .fixup_percpu_cgroup_storage = { 1 },
5379 .errstr = "get_local_storage() doesn't support non-zero flags",
5380 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5383 "invalid per-cpu cgroup storage access 6",
5385 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
5386 BPF_LD_MAP_FD(BPF_REG_1, 0),
5387 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5388 BPF_FUNC_get_local_storage),
5389 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5390 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5391 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5394 .fixup_percpu_cgroup_storage = { 1 },
5396 .errstr = "get_local_storage() doesn't support non-zero flags",
5397 .errstr_unpriv = "R2 leaks addr into helper function",
5398 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5401 "write tstamp from CGROUP_SKB",
5403 BPF_MOV64_IMM(BPF_REG_0, 0),
5404 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5405 offsetof(struct __sk_buff, tstamp)),
5406 BPF_MOV64_IMM(BPF_REG_0, 0),
5410 .result_unpriv = REJECT,
5411 .errstr_unpriv = "invalid bpf_context access off=152 size=8",
5412 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5415 "read tstamp from CGROUP_SKB",
5417 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
5418 offsetof(struct __sk_buff, tstamp)),
5419 BPF_MOV64_IMM(BPF_REG_0, 0),
5423 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5426 "multiple registers share map_lookup_elem result",
5428 BPF_MOV64_IMM(BPF_REG_1, 10),
5429 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5430 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5431 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5432 BPF_LD_MAP_FD(BPF_REG_1, 0),
5433 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5434 BPF_FUNC_map_lookup_elem),
5435 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5436 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5437 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5440 .fixup_map_hash_8b = { 4 },
5442 .prog_type = BPF_PROG_TYPE_SCHED_CLS
5445 "alu ops on ptr_to_map_value_or_null, 1",
5447 BPF_MOV64_IMM(BPF_REG_1, 10),
5448 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5449 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5450 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5451 BPF_LD_MAP_FD(BPF_REG_1, 0),
5452 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5453 BPF_FUNC_map_lookup_elem),
5454 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5455 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
5456 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
5457 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5458 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5461 .fixup_map_hash_8b = { 4 },
5462 .errstr = "R4 pointer arithmetic on map_value_or_null",
5464 .prog_type = BPF_PROG_TYPE_SCHED_CLS
5467 "alu ops on ptr_to_map_value_or_null, 2",
5469 BPF_MOV64_IMM(BPF_REG_1, 10),
5470 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5471 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5472 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5473 BPF_LD_MAP_FD(BPF_REG_1, 0),
5474 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5475 BPF_FUNC_map_lookup_elem),
5476 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5477 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
5478 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5479 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5482 .fixup_map_hash_8b = { 4 },
5483 .errstr = "R4 pointer arithmetic on map_value_or_null",
5485 .prog_type = BPF_PROG_TYPE_SCHED_CLS
5488 "alu ops on ptr_to_map_value_or_null, 3",
5490 BPF_MOV64_IMM(BPF_REG_1, 10),
5491 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5492 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5493 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5494 BPF_LD_MAP_FD(BPF_REG_1, 0),
5495 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5496 BPF_FUNC_map_lookup_elem),
5497 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5498 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
5499 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5500 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5503 .fixup_map_hash_8b = { 4 },
5504 .errstr = "R4 pointer arithmetic on map_value_or_null",
5506 .prog_type = BPF_PROG_TYPE_SCHED_CLS
5509 "invalid memory access with multiple map_lookup_elem calls",
5511 BPF_MOV64_IMM(BPF_REG_1, 10),
5512 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5513 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5514 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5515 BPF_LD_MAP_FD(BPF_REG_1, 0),
5516 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
5517 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
5518 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5519 BPF_FUNC_map_lookup_elem),
5520 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5521 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
5522 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
5523 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5524 BPF_FUNC_map_lookup_elem),
5525 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5526 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5529 .fixup_map_hash_8b = { 4 },
5531 .errstr = "R4 !read_ok",
5532 .prog_type = BPF_PROG_TYPE_SCHED_CLS
5535 "valid indirect map_lookup_elem access with 2nd lookup in branch",
5537 BPF_MOV64_IMM(BPF_REG_1, 10),
5538 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5539 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5540 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5541 BPF_LD_MAP_FD(BPF_REG_1, 0),
5542 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
5543 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
5544 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5545 BPF_FUNC_map_lookup_elem),
5546 BPF_MOV64_IMM(BPF_REG_2, 10),
5547 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
5548 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
5549 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
5550 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5551 BPF_FUNC_map_lookup_elem),
5552 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5553 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5554 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5557 .fixup_map_hash_8b = { 4 },
5559 .prog_type = BPF_PROG_TYPE_SCHED_CLS
5562 "invalid map access from else condition",
5564 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5565 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5566 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5567 BPF_LD_MAP_FD(BPF_REG_1, 0),
5568 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
5569 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5570 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5571 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
5572 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5573 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5574 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5575 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
5578 .fixup_map_hash_48b = { 3 },
5579 .errstr = "R0 unbounded memory access",
5581 .errstr_unpriv = "R0 leaks addr",
5582 .result_unpriv = REJECT,
5583 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5586 "constant register |= constant should keep constant type",
5588 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5589 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5590 BPF_MOV64_IMM(BPF_REG_2, 34),
5591 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
5592 BPF_MOV64_IMM(BPF_REG_3, 0),
5593 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5597 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5600 "constant register |= constant should not bypass stack boundary checks",
5602 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5603 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5604 BPF_MOV64_IMM(BPF_REG_2, 34),
5605 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
5606 BPF_MOV64_IMM(BPF_REG_3, 0),
5607 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5610 .errstr = "invalid stack type R1 off=-48 access_size=58",
5612 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5615 "constant register |= constant register should keep constant type",
5617 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5618 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5619 BPF_MOV64_IMM(BPF_REG_2, 34),
5620 BPF_MOV64_IMM(BPF_REG_4, 13),
5621 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
5622 BPF_MOV64_IMM(BPF_REG_3, 0),
5623 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5627 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5630 "constant register |= constant register should not bypass stack boundary checks",
5632 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5633 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5634 BPF_MOV64_IMM(BPF_REG_2, 34),
5635 BPF_MOV64_IMM(BPF_REG_4, 24),
5636 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
5637 BPF_MOV64_IMM(BPF_REG_3, 0),
5638 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5641 .errstr = "invalid stack type R1 off=-48 access_size=58",
5643 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5646 "invalid direct packet write for LWT_IN",
5648 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5649 offsetof(struct __sk_buff, data)),
5650 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5651 offsetof(struct __sk_buff, data_end)),
5652 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5653 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5654 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5655 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5656 BPF_MOV64_IMM(BPF_REG_0, 0),
5659 .errstr = "cannot write into packet",
5661 .prog_type = BPF_PROG_TYPE_LWT_IN,
5664 "invalid direct packet write for LWT_OUT",
5666 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5667 offsetof(struct __sk_buff, data)),
5668 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5669 offsetof(struct __sk_buff, data_end)),
5670 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5671 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5672 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5673 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5674 BPF_MOV64_IMM(BPF_REG_0, 0),
5677 .errstr = "cannot write into packet",
5679 .prog_type = BPF_PROG_TYPE_LWT_OUT,
5682 "direct packet write for LWT_XMIT",
5684 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5685 offsetof(struct __sk_buff, data)),
5686 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5687 offsetof(struct __sk_buff, data_end)),
5688 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5689 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5690 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5691 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5692 BPF_MOV64_IMM(BPF_REG_0, 0),
5696 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
5699 "direct packet read for LWT_IN",
5701 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5702 offsetof(struct __sk_buff, data)),
5703 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5704 offsetof(struct __sk_buff, data_end)),
5705 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5706 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5707 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5708 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5709 BPF_MOV64_IMM(BPF_REG_0, 0),
5713 .prog_type = BPF_PROG_TYPE_LWT_IN,
5716 "direct packet read for LWT_OUT",
5718 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5719 offsetof(struct __sk_buff, data)),
5720 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5721 offsetof(struct __sk_buff, data_end)),
5722 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5723 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5724 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5725 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5726 BPF_MOV64_IMM(BPF_REG_0, 0),
5730 .prog_type = BPF_PROG_TYPE_LWT_OUT,
5733 "direct packet read for LWT_XMIT",
5735 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5736 offsetof(struct __sk_buff, data)),
5737 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5738 offsetof(struct __sk_buff, data_end)),
5739 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5740 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5741 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5742 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5743 BPF_MOV64_IMM(BPF_REG_0, 0),
5747 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
5750 "overlapping checks for direct packet access",
5752 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5753 offsetof(struct __sk_buff, data)),
5754 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5755 offsetof(struct __sk_buff, data_end)),
5756 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5757 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5758 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
5759 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
5760 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
5761 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
5762 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
5763 BPF_MOV64_IMM(BPF_REG_0, 0),
5767 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
5770 "make headroom for LWT_XMIT",
5772 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5773 BPF_MOV64_IMM(BPF_REG_2, 34),
5774 BPF_MOV64_IMM(BPF_REG_3, 0),
5775 BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
5776 /* split for s390 to succeed */
5777 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
5778 BPF_MOV64_IMM(BPF_REG_2, 42),
5779 BPF_MOV64_IMM(BPF_REG_3, 0),
5780 BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
5781 BPF_MOV64_IMM(BPF_REG_0, 0),
5785 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
5788 "invalid access of tc_classid for LWT_IN",
5790 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5791 offsetof(struct __sk_buff, tc_classid)),
5795 .errstr = "invalid bpf_context access",
5798 "invalid access of tc_classid for LWT_OUT",
5800 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5801 offsetof(struct __sk_buff, tc_classid)),
5805 .errstr = "invalid bpf_context access",
5808 "invalid access of tc_classid for LWT_XMIT",
5810 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5811 offsetof(struct __sk_buff, tc_classid)),
5815 .errstr = "invalid bpf_context access",
5818 "leak pointer into ctx 1",
5820 BPF_MOV64_IMM(BPF_REG_0, 0),
5821 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5822 offsetof(struct __sk_buff, cb[0])),
5823 BPF_LD_MAP_FD(BPF_REG_2, 0),
5824 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
5825 offsetof(struct __sk_buff, cb[0])),
5828 .fixup_map_hash_8b = { 2 },
5829 .errstr_unpriv = "R2 leaks addr into mem",
5830 .result_unpriv = REJECT,
5832 .errstr = "BPF_XADD stores into R1 ctx is not allowed",
5835 "leak pointer into ctx 2",
5837 BPF_MOV64_IMM(BPF_REG_0, 0),
5838 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5839 offsetof(struct __sk_buff, cb[0])),
5840 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
5841 offsetof(struct __sk_buff, cb[0])),
5844 .errstr_unpriv = "R10 leaks addr into mem",
5845 .result_unpriv = REJECT,
5847 .errstr = "BPF_XADD stores into R1 ctx is not allowed",
5850 "leak pointer into ctx 3",
5852 BPF_MOV64_IMM(BPF_REG_0, 0),
5853 BPF_LD_MAP_FD(BPF_REG_2, 0),
5854 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
5855 offsetof(struct __sk_buff, cb[0])),
5858 .fixup_map_hash_8b = { 1 },
5859 .errstr_unpriv = "R2 leaks addr into ctx",
5860 .result_unpriv = REJECT,
5864 "leak pointer into map val",
5866 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5867 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5868 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5869 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5870 BPF_LD_MAP_FD(BPF_REG_1, 0),
5871 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5872 BPF_FUNC_map_lookup_elem),
5873 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
5874 BPF_MOV64_IMM(BPF_REG_3, 0),
5875 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
5876 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
5877 BPF_MOV64_IMM(BPF_REG_0, 0),
5880 .fixup_map_hash_8b = { 4 },
5881 .errstr_unpriv = "R6 leaks addr into mem",
5882 .result_unpriv = REJECT,
5886 "helper access to map: full range",
5888 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5889 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5890 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5891 BPF_LD_MAP_FD(BPF_REG_1, 0),
5892 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5893 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5894 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5895 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5896 BPF_MOV64_IMM(BPF_REG_3, 0),
5897 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5900 .fixup_map_hash_48b = { 3 },
5902 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5905 "helper access to map: partial range",
5907 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5908 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5909 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5910 BPF_LD_MAP_FD(BPF_REG_1, 0),
5911 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5912 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5913 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5914 BPF_MOV64_IMM(BPF_REG_2, 8),
5915 BPF_MOV64_IMM(BPF_REG_3, 0),
5916 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5919 .fixup_map_hash_48b = { 3 },
5921 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5924 "helper access to map: empty range",
5926 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5927 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5928 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5929 BPF_LD_MAP_FD(BPF_REG_1, 0),
5930 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5931 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
5932 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5933 BPF_MOV64_IMM(BPF_REG_2, 0),
5934 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
5937 .fixup_map_hash_48b = { 3 },
5938 .errstr = "invalid access to map value, value_size=48 off=0 size=0",
5940 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5943 "helper access to map: out-of-bound range",
5945 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5946 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5947 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5948 BPF_LD_MAP_FD(BPF_REG_1, 0),
5949 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5950 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5951 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5952 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
5953 BPF_MOV64_IMM(BPF_REG_3, 0),
5954 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5957 .fixup_map_hash_48b = { 3 },
5958 .errstr = "invalid access to map value, value_size=48 off=0 size=56",
5960 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5963 "helper access to map: negative range",
5965 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5966 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5967 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5968 BPF_LD_MAP_FD(BPF_REG_1, 0),
5969 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5970 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5971 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5972 BPF_MOV64_IMM(BPF_REG_2, -8),
5973 BPF_MOV64_IMM(BPF_REG_3, 0),
5974 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5977 .fixup_map_hash_48b = { 3 },
5978 .errstr = "R2 min value is negative",
5980 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5983 "helper access to adjusted map (via const imm): full range",
5985 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5986 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5987 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5988 BPF_LD_MAP_FD(BPF_REG_1, 0),
5989 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5990 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5991 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5992 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5993 offsetof(struct test_val, foo)),
5994 BPF_MOV64_IMM(BPF_REG_2,
5995 sizeof(struct test_val) -
5996 offsetof(struct test_val, foo)),
5997 BPF_MOV64_IMM(BPF_REG_3, 0),
5998 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6001 .fixup_map_hash_48b = { 3 },
6003 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6006 "helper access to adjusted map (via const imm): partial range",
6008 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6009 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6010 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6011 BPF_LD_MAP_FD(BPF_REG_1, 0),
6012 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6013 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6014 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6015 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6016 offsetof(struct test_val, foo)),
6017 BPF_MOV64_IMM(BPF_REG_2, 8),
6018 BPF_MOV64_IMM(BPF_REG_3, 0),
6019 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6022 .fixup_map_hash_48b = { 3 },
6024 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6027 "helper access to adjusted map (via const imm): empty range",
6029 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6030 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6031 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6032 BPF_LD_MAP_FD(BPF_REG_1, 0),
6033 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6034 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6035 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6036 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6037 offsetof(struct test_val, foo)),
6038 BPF_MOV64_IMM(BPF_REG_2, 0),
6039 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
6042 .fixup_map_hash_48b = { 3 },
6043 .errstr = "invalid access to map value, value_size=48 off=4 size=0",
6045 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6048 "helper access to adjusted map (via const imm): out-of-bound range",
6050 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6051 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6052 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6053 BPF_LD_MAP_FD(BPF_REG_1, 0),
6054 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6055 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6056 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6057 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6058 offsetof(struct test_val, foo)),
6059 BPF_MOV64_IMM(BPF_REG_2,
6060 sizeof(struct test_val) -
6061 offsetof(struct test_val, foo) + 8),
6062 BPF_MOV64_IMM(BPF_REG_3, 0),
6063 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6066 .fixup_map_hash_48b = { 3 },
6067 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
6069 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6072 "helper access to adjusted map (via const imm): negative range (> adjustment)",
6074 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6075 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6076 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6077 BPF_LD_MAP_FD(BPF_REG_1, 0),
6078 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6079 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6080 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6081 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6082 offsetof(struct test_val, foo)),
6083 BPF_MOV64_IMM(BPF_REG_2, -8),
6084 BPF_MOV64_IMM(BPF_REG_3, 0),
6085 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6088 .fixup_map_hash_48b = { 3 },
6089 .errstr = "R2 min value is negative",
6091 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6094 "helper access to adjusted map (via const imm): negative range (< adjustment)",
6096 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6097 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6098 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6099 BPF_LD_MAP_FD(BPF_REG_1, 0),
6100 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6101 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6102 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6103 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6104 offsetof(struct test_val, foo)),
6105 BPF_MOV64_IMM(BPF_REG_2, -1),
6106 BPF_MOV64_IMM(BPF_REG_3, 0),
6107 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6110 .fixup_map_hash_48b = { 3 },
6111 .errstr = "R2 min value is negative",
6113 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6116 "helper access to adjusted map (via const reg): full range",
6118 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6119 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6120 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6121 BPF_LD_MAP_FD(BPF_REG_1, 0),
6122 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6123 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6124 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6125 BPF_MOV64_IMM(BPF_REG_3,
6126 offsetof(struct test_val, foo)),
6127 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6128 BPF_MOV64_IMM(BPF_REG_2,
6129 sizeof(struct test_val) -
6130 offsetof(struct test_val, foo)),
6131 BPF_MOV64_IMM(BPF_REG_3, 0),
6132 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6135 .fixup_map_hash_48b = { 3 },
6137 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6140 "helper access to adjusted map (via const reg): partial range",
6142 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6143 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6144 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6145 BPF_LD_MAP_FD(BPF_REG_1, 0),
6146 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6147 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6148 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6149 BPF_MOV64_IMM(BPF_REG_3,
6150 offsetof(struct test_val, foo)),
6151 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6152 BPF_MOV64_IMM(BPF_REG_2, 8),
6153 BPF_MOV64_IMM(BPF_REG_3, 0),
6154 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6157 .fixup_map_hash_48b = { 3 },
6159 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6162 "helper access to adjusted map (via const reg): empty range",
6164 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6165 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6166 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6167 BPF_LD_MAP_FD(BPF_REG_1, 0),
6168 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6169 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6170 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6171 BPF_MOV64_IMM(BPF_REG_3, 0),
6172 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6173 BPF_MOV64_IMM(BPF_REG_2, 0),
6174 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
6177 .fixup_map_hash_48b = { 3 },
6178 .errstr = "R1 min value is outside of the array range",
6180 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6183 "helper access to adjusted map (via const reg): out-of-bound range",
6185 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6186 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6187 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6188 BPF_LD_MAP_FD(BPF_REG_1, 0),
6189 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6190 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6191 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6192 BPF_MOV64_IMM(BPF_REG_3,
6193 offsetof(struct test_val, foo)),
6194 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6195 BPF_MOV64_IMM(BPF_REG_2,
6196 sizeof(struct test_val) -
6197 offsetof(struct test_val, foo) + 8),
6198 BPF_MOV64_IMM(BPF_REG_3, 0),
6199 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6202 .fixup_map_hash_48b = { 3 },
6203 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
6205 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6208 "helper access to adjusted map (via const reg): negative range (> adjustment)",
6210 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6211 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6212 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6213 BPF_LD_MAP_FD(BPF_REG_1, 0),
6214 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6215 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6216 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6217 BPF_MOV64_IMM(BPF_REG_3,
6218 offsetof(struct test_val, foo)),
6219 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6220 BPF_MOV64_IMM(BPF_REG_2, -8),
6221 BPF_MOV64_IMM(BPF_REG_3, 0),
6222 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6225 .fixup_map_hash_48b = { 3 },
6226 .errstr = "R2 min value is negative",
6228 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6231 "helper access to adjusted map (via const reg): negative range (< adjustment)",
6233 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6234 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6235 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6236 BPF_LD_MAP_FD(BPF_REG_1, 0),
6237 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6238 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6239 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6240 BPF_MOV64_IMM(BPF_REG_3,
6241 offsetof(struct test_val, foo)),
6242 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6243 BPF_MOV64_IMM(BPF_REG_2, -1),
6244 BPF_MOV64_IMM(BPF_REG_3, 0),
6245 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6248 .fixup_map_hash_48b = { 3 },
6249 .errstr = "R2 min value is negative",
6251 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6254 "helper access to adjusted map (via variable): full range",
6256 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6257 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6258 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6259 BPF_LD_MAP_FD(BPF_REG_1, 0),
6260 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6261 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6262 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6263 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6264 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6265 offsetof(struct test_val, foo), 4),
6266 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6267 BPF_MOV64_IMM(BPF_REG_2,
6268 sizeof(struct test_val) -
6269 offsetof(struct test_val, foo)),
6270 BPF_MOV64_IMM(BPF_REG_3, 0),
6271 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6274 .fixup_map_hash_48b = { 3 },
6276 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6279 "helper access to adjusted map (via variable): partial range",
6281 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6282 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6283 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6284 BPF_LD_MAP_FD(BPF_REG_1, 0),
6285 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6286 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6287 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6288 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6289 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6290 offsetof(struct test_val, foo), 4),
6291 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6292 BPF_MOV64_IMM(BPF_REG_2, 8),
6293 BPF_MOV64_IMM(BPF_REG_3, 0),
6294 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6297 .fixup_map_hash_48b = { 3 },
6299 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6302 "helper access to adjusted map (via variable): empty range",
6304 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6305 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6306 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6307 BPF_LD_MAP_FD(BPF_REG_1, 0),
6308 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6309 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6310 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6311 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6312 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6313 offsetof(struct test_val, foo), 3),
6314 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6315 BPF_MOV64_IMM(BPF_REG_2, 0),
6316 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
6319 .fixup_map_hash_48b = { 3 },
6320 .errstr = "R1 min value is outside of the array range",
6322 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6325 "helper access to adjusted map (via variable): no max check",
6327 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6328 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6329 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6330 BPF_LD_MAP_FD(BPF_REG_1, 0),
6331 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6332 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6333 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6334 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6335 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6336 BPF_MOV64_IMM(BPF_REG_2, 1),
6337 BPF_MOV64_IMM(BPF_REG_3, 0),
6338 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6341 .fixup_map_hash_48b = { 3 },
6342 .errstr = "R1 unbounded memory access",
6344 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6347 "helper access to adjusted map (via variable): wrong max check",
6349 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6350 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6351 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6352 BPF_LD_MAP_FD(BPF_REG_1, 0),
6353 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6354 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6355 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6356 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6357 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6358 offsetof(struct test_val, foo), 4),
6359 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6360 BPF_MOV64_IMM(BPF_REG_2,
6361 sizeof(struct test_val) -
6362 offsetof(struct test_val, foo) + 1),
6363 BPF_MOV64_IMM(BPF_REG_3, 0),
6364 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6367 .fixup_map_hash_48b = { 3 },
6368 .errstr = "invalid access to map value, value_size=48 off=4 size=45",
6370 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6373 "helper access to map: bounds check using <, good access",
6375 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6376 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6377 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6378 BPF_LD_MAP_FD(BPF_REG_1, 0),
6379 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6380 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6381 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6382 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6383 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
6384 BPF_MOV64_IMM(BPF_REG_0, 0),
6386 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6387 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6388 BPF_MOV64_IMM(BPF_REG_0, 0),
6391 .fixup_map_hash_48b = { 3 },
6393 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6396 "helper access to map: bounds check using <, bad access",
6398 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6399 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6400 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6401 BPF_LD_MAP_FD(BPF_REG_1, 0),
6402 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6403 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6404 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6405 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6406 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
6407 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6408 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6409 BPF_MOV64_IMM(BPF_REG_0, 0),
6411 BPF_MOV64_IMM(BPF_REG_0, 0),
6414 .fixup_map_hash_48b = { 3 },
6416 .errstr = "R1 unbounded memory access",
6417 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6420 "helper access to map: bounds check using <=, good access",
6422 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6423 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6424 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6425 BPF_LD_MAP_FD(BPF_REG_1, 0),
6426 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6427 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6428 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6429 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6430 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
6431 BPF_MOV64_IMM(BPF_REG_0, 0),
6433 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6434 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6435 BPF_MOV64_IMM(BPF_REG_0, 0),
6438 .fixup_map_hash_48b = { 3 },
6440 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6443 "helper access to map: bounds check using <=, bad access",
6445 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6446 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6447 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6448 BPF_LD_MAP_FD(BPF_REG_1, 0),
6449 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6450 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6451 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6452 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6453 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
6454 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6455 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6456 BPF_MOV64_IMM(BPF_REG_0, 0),
6458 BPF_MOV64_IMM(BPF_REG_0, 0),
6461 .fixup_map_hash_48b = { 3 },
6463 .errstr = "R1 unbounded memory access",
6464 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6467 "helper access to map: bounds check using s<, good access",
6469 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6470 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6471 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6472 BPF_LD_MAP_FD(BPF_REG_1, 0),
6473 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6474 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6475 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6476 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6477 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6478 BPF_MOV64_IMM(BPF_REG_0, 0),
6480 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
6481 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6482 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6483 BPF_MOV64_IMM(BPF_REG_0, 0),
6486 .fixup_map_hash_48b = { 3 },
6488 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6491 "helper access to map: bounds check using s<, good access 2",
6493 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6494 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6495 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6496 BPF_LD_MAP_FD(BPF_REG_1, 0),
6497 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6498 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6499 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6500 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6501 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6502 BPF_MOV64_IMM(BPF_REG_0, 0),
6504 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
6505 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6506 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6507 BPF_MOV64_IMM(BPF_REG_0, 0),
6510 .fixup_map_hash_48b = { 3 },
6512 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6515 "helper access to map: bounds check using s<, bad access",
6517 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6518 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6519 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6520 BPF_LD_MAP_FD(BPF_REG_1, 0),
6521 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6522 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6523 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6524 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
6525 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6526 BPF_MOV64_IMM(BPF_REG_0, 0),
6528 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
6529 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6530 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6531 BPF_MOV64_IMM(BPF_REG_0, 0),
6534 .fixup_map_hash_48b = { 3 },
6536 .errstr = "R1 min value is negative",
6537 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6540 "helper access to map: bounds check using s<=, good access",
6542 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6543 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6544 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6545 BPF_LD_MAP_FD(BPF_REG_1, 0),
6546 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6547 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6548 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6549 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6550 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6551 BPF_MOV64_IMM(BPF_REG_0, 0),
6553 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
6554 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6555 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6556 BPF_MOV64_IMM(BPF_REG_0, 0),
6559 .fixup_map_hash_48b = { 3 },
6561 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6564 "helper access to map: bounds check using s<=, good access 2",
6566 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6567 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6568 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6569 BPF_LD_MAP_FD(BPF_REG_1, 0),
6570 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6571 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6572 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6573 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6574 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6575 BPF_MOV64_IMM(BPF_REG_0, 0),
6577 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
6578 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6579 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6580 BPF_MOV64_IMM(BPF_REG_0, 0),
6583 .fixup_map_hash_48b = { 3 },
6585 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6588 "helper access to map: bounds check using s<=, bad access",
6590 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6591 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6592 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6593 BPF_LD_MAP_FD(BPF_REG_1, 0),
6594 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6595 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6596 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6597 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
6598 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6599 BPF_MOV64_IMM(BPF_REG_0, 0),
6601 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
6602 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6603 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6604 BPF_MOV64_IMM(BPF_REG_0, 0),
6607 .fixup_map_hash_48b = { 3 },
6609 .errstr = "R1 min value is negative",
6610 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6613 "map access: known scalar += value_ptr",
6615 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6616 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6617 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6618 BPF_LD_MAP_FD(BPF_REG_1, 0),
6619 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6620 BPF_FUNC_map_lookup_elem),
6621 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6622 BPF_MOV64_IMM(BPF_REG_1, 4),
6623 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
6624 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
6625 BPF_MOV64_IMM(BPF_REG_0, 1),
6628 .fixup_map_array_48b = { 3 },
6633 "map access: value_ptr += known scalar",
6635 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6636 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6637 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6638 BPF_LD_MAP_FD(BPF_REG_1, 0),
6639 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6640 BPF_FUNC_map_lookup_elem),
6641 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6642 BPF_MOV64_IMM(BPF_REG_1, 4),
6643 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6644 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6645 BPF_MOV64_IMM(BPF_REG_0, 1),
6648 .fixup_map_array_48b = { 3 },
6653 "map access: unknown scalar += value_ptr",
6655 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6656 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6657 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6658 BPF_LD_MAP_FD(BPF_REG_1, 0),
6659 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6660 BPF_FUNC_map_lookup_elem),
6661 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6662 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6663 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
6664 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
6665 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
6666 BPF_MOV64_IMM(BPF_REG_0, 1),
6669 .fixup_map_array_48b = { 3 },
6674 "map access: value_ptr += unknown scalar",
6676 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6677 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6678 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6679 BPF_LD_MAP_FD(BPF_REG_1, 0),
6680 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6681 BPF_FUNC_map_lookup_elem),
6682 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6683 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6684 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
6685 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6686 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6687 BPF_MOV64_IMM(BPF_REG_0, 1),
6690 .fixup_map_array_48b = { 3 },
6695 "map access: value_ptr += value_ptr",
6697 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6698 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6699 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6700 BPF_LD_MAP_FD(BPF_REG_1, 0),
6701 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6702 BPF_FUNC_map_lookup_elem),
6703 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6704 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_0),
6705 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6706 BPF_MOV64_IMM(BPF_REG_0, 1),
6709 .fixup_map_array_48b = { 3 },
6711 .errstr = "R0 pointer += pointer prohibited",
6714 "map access: known scalar -= value_ptr",
6716 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6717 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6718 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6719 BPF_LD_MAP_FD(BPF_REG_1, 0),
6720 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6721 BPF_FUNC_map_lookup_elem),
6722 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6723 BPF_MOV64_IMM(BPF_REG_1, 4),
6724 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
6725 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
6726 BPF_MOV64_IMM(BPF_REG_0, 1),
6729 .fixup_map_array_48b = { 3 },
6731 .errstr = "R1 tried to subtract pointer from scalar",
6734 "map access: value_ptr -= known scalar",
6736 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6737 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6738 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6739 BPF_LD_MAP_FD(BPF_REG_1, 0),
6740 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6741 BPF_FUNC_map_lookup_elem),
6742 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6743 BPF_MOV64_IMM(BPF_REG_1, 4),
6744 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6745 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6746 BPF_MOV64_IMM(BPF_REG_0, 1),
6749 .fixup_map_array_48b = { 3 },
6751 .errstr = "R0 min value is outside of the array range",
6754 "map access: value_ptr -= known scalar, 2",
6756 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6757 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6758 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6759 BPF_LD_MAP_FD(BPF_REG_1, 0),
6760 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6761 BPF_FUNC_map_lookup_elem),
6762 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6763 BPF_MOV64_IMM(BPF_REG_1, 6),
6764 BPF_MOV64_IMM(BPF_REG_2, 4),
6765 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6766 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
6767 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6768 BPF_MOV64_IMM(BPF_REG_0, 1),
6771 .fixup_map_array_48b = { 3 },
6776 "map access: unknown scalar -= value_ptr",
6778 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6779 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6780 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6781 BPF_LD_MAP_FD(BPF_REG_1, 0),
6782 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6783 BPF_FUNC_map_lookup_elem),
6784 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6785 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6786 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
6787 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
6788 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
6789 BPF_MOV64_IMM(BPF_REG_0, 1),
6792 .fixup_map_array_48b = { 3 },
6794 .errstr = "R1 tried to subtract pointer from scalar",
6797 "map access: value_ptr -= unknown scalar",
6799 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6800 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6801 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6802 BPF_LD_MAP_FD(BPF_REG_1, 0),
6803 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6804 BPF_FUNC_map_lookup_elem),
6805 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6806 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6807 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
6808 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6809 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6810 BPF_MOV64_IMM(BPF_REG_0, 1),
6813 .fixup_map_array_48b = { 3 },
6815 .errstr = "R0 min value is negative",
6818 "map access: value_ptr -= unknown scalar, 2",
6820 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6821 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6822 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6823 BPF_LD_MAP_FD(BPF_REG_1, 0),
6824 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6825 BPF_FUNC_map_lookup_elem),
6826 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6827 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6828 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
6829 BPF_ALU64_IMM(BPF_OR, BPF_REG_1, 0x7),
6830 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6831 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6832 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
6833 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6834 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6835 BPF_MOV64_IMM(BPF_REG_0, 1),
6838 .fixup_map_array_48b = { 3 },
6843 "map access: value_ptr -= value_ptr",
6845 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6846 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6847 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6848 BPF_LD_MAP_FD(BPF_REG_1, 0),
6849 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6850 BPF_FUNC_map_lookup_elem),
6851 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6852 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_0),
6853 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6854 BPF_MOV64_IMM(BPF_REG_0, 1),
6857 .fixup_map_array_48b = { 3 },
6859 .errstr = "R0 invalid mem access 'inv'",
6860 .errstr_unpriv = "R0 pointer -= pointer prohibited",
6863 "map lookup helper access to map",
6865 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6866 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6867 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6868 BPF_LD_MAP_FD(BPF_REG_1, 0),
6869 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6870 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6871 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6872 BPF_LD_MAP_FD(BPF_REG_1, 0),
6873 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6876 .fixup_map_hash_16b = { 3, 8 },
6878 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6881 "map update helper access to map",
6883 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6884 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6885 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6886 BPF_LD_MAP_FD(BPF_REG_1, 0),
6887 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6888 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6889 BPF_MOV64_IMM(BPF_REG_4, 0),
6890 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
6891 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6892 BPF_LD_MAP_FD(BPF_REG_1, 0),
6893 BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
6896 .fixup_map_hash_16b = { 3, 10 },
6898 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6901 "map update helper access to map: wrong size",
6903 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6904 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6905 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6906 BPF_LD_MAP_FD(BPF_REG_1, 0),
6907 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6908 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6909 BPF_MOV64_IMM(BPF_REG_4, 0),
6910 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
6911 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6912 BPF_LD_MAP_FD(BPF_REG_1, 0),
6913 BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
6916 .fixup_map_hash_8b = { 3 },
6917 .fixup_map_hash_16b = { 10 },
6919 .errstr = "invalid access to map value, value_size=8 off=0 size=16",
6920 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6923 "map helper access to adjusted map (via const imm)",
6925 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6926 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6927 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6928 BPF_LD_MAP_FD(BPF_REG_1, 0),
6929 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6930 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6931 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6932 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
6933 offsetof(struct other_val, bar)),
6934 BPF_LD_MAP_FD(BPF_REG_1, 0),
6935 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6938 .fixup_map_hash_16b = { 3, 9 },
6940 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6943 "map helper access to adjusted map (via const imm): out-of-bound 1",
6945 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6946 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6947 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6948 BPF_LD_MAP_FD(BPF_REG_1, 0),
6949 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6950 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6951 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6952 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
6953 sizeof(struct other_val) - 4),
6954 BPF_LD_MAP_FD(BPF_REG_1, 0),
6955 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6958 .fixup_map_hash_16b = { 3, 9 },
6960 .errstr = "invalid access to map value, value_size=16 off=12 size=8",
6961 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6964 "map helper access to adjusted map (via const imm): out-of-bound 2",
6966 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6967 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6968 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6969 BPF_LD_MAP_FD(BPF_REG_1, 0),
6970 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6971 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6972 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6973 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6974 BPF_LD_MAP_FD(BPF_REG_1, 0),
6975 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6978 .fixup_map_hash_16b = { 3, 9 },
6980 .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
6981 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6984 "map helper access to adjusted map (via const reg)",
6986 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6987 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6988 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6989 BPF_LD_MAP_FD(BPF_REG_1, 0),
6990 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6991 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6992 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6993 BPF_MOV64_IMM(BPF_REG_3,
6994 offsetof(struct other_val, bar)),
6995 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6996 BPF_LD_MAP_FD(BPF_REG_1, 0),
6997 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7000 .fixup_map_hash_16b = { 3, 10 },
7002 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7005 "map helper access to adjusted map (via const reg): out-of-bound 1",
7007 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7008 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7009 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7010 BPF_LD_MAP_FD(BPF_REG_1, 0),
7011 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7012 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7013 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7014 BPF_MOV64_IMM(BPF_REG_3,
7015 sizeof(struct other_val) - 4),
7016 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7017 BPF_LD_MAP_FD(BPF_REG_1, 0),
7018 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7021 .fixup_map_hash_16b = { 3, 10 },
7023 .errstr = "invalid access to map value, value_size=16 off=12 size=8",
7024 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7027 "map helper access to adjusted map (via const reg): out-of-bound 2",
7029 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7030 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7031 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7032 BPF_LD_MAP_FD(BPF_REG_1, 0),
7033 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7034 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7035 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7036 BPF_MOV64_IMM(BPF_REG_3, -4),
7037 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7038 BPF_LD_MAP_FD(BPF_REG_1, 0),
7039 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7042 .fixup_map_hash_16b = { 3, 10 },
7044 .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
7045 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7048 "map helper access to adjusted map (via variable)",
7050 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7051 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7052 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7053 BPF_LD_MAP_FD(BPF_REG_1, 0),
7054 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7055 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7056 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7057 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
7058 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
7059 offsetof(struct other_val, bar), 4),
7060 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7061 BPF_LD_MAP_FD(BPF_REG_1, 0),
7062 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7065 .fixup_map_hash_16b = { 3, 11 },
7067 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7070 "map helper access to adjusted map (via variable): no max check",
7072 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7073 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7074 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7075 BPF_LD_MAP_FD(BPF_REG_1, 0),
7076 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7077 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7078 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7079 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
7080 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7081 BPF_LD_MAP_FD(BPF_REG_1, 0),
7082 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7085 .fixup_map_hash_16b = { 3, 10 },
7087 .errstr = "R2 unbounded memory access, make sure to bounds check any array access into a map",
7088 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7091 "map helper access to adjusted map (via variable): wrong max check",
7093 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7094 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7095 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7096 BPF_LD_MAP_FD(BPF_REG_1, 0),
7097 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7098 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7099 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7100 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
7101 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
7102 offsetof(struct other_val, bar) + 1, 4),
7103 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7104 BPF_LD_MAP_FD(BPF_REG_1, 0),
7105 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7108 .fixup_map_hash_16b = { 3, 11 },
7110 .errstr = "invalid access to map value, value_size=16 off=9 size=8",
7111 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7114 "map element value is preserved across register spilling",
7116 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7117 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7118 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7119 BPF_LD_MAP_FD(BPF_REG_1, 0),
7120 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7121 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7122 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
7123 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7124 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
7125 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7126 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
7127 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
7130 .fixup_map_hash_48b = { 3 },
7131 .errstr_unpriv = "R0 leaks addr",
7133 .result_unpriv = REJECT,
7136 "map element value or null is marked on register spilling",
7138 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7139 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7140 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7141 BPF_LD_MAP_FD(BPF_REG_1, 0),
7142 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7143 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7144 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
7145 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7146 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7147 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
7148 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
7151 .fixup_map_hash_48b = { 3 },
7152 .errstr_unpriv = "R0 leaks addr",
7154 .result_unpriv = REJECT,
7157 "map element value store of cleared call register",
7159 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7160 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7161 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7162 BPF_LD_MAP_FD(BPF_REG_1, 0),
7163 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7164 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
7165 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
7168 .fixup_map_hash_48b = { 3 },
7169 .errstr_unpriv = "R1 !read_ok",
7170 .errstr = "R1 !read_ok",
7172 .result_unpriv = REJECT,
7175 "map element value with unaligned store",
7177 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7178 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7179 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7180 BPF_LD_MAP_FD(BPF_REG_1, 0),
7181 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7182 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
7183 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
7184 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
7185 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
7186 BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
7187 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
7188 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
7189 BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
7190 BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
7191 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
7192 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
7193 BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
7194 BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
7195 BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
7196 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
7197 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
7198 BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
7199 BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
7202 .fixup_map_hash_48b = { 3 },
7203 .errstr_unpriv = "R0 leaks addr",
7205 .result_unpriv = REJECT,
7206 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7209 "map element value with unaligned load",
7211 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7212 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7213 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7214 BPF_LD_MAP_FD(BPF_REG_1, 0),
7215 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7216 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
7217 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
7218 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
7219 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
7220 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
7221 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
7222 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
7223 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
7224 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
7225 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
7226 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
7227 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
7230 .fixup_map_hash_48b = { 3 },
7231 .errstr_unpriv = "R0 leaks addr",
7233 .result_unpriv = REJECT,
7234 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7237 "map element value illegal alu op, 1",
7239 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7240 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7241 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7242 BPF_LD_MAP_FD(BPF_REG_1, 0),
7243 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7244 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7245 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
7246 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
7249 .fixup_map_hash_48b = { 3 },
7250 .errstr = "R0 bitwise operator &= on pointer",
7254 "map element value illegal alu op, 2",
7256 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7257 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7258 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7259 BPF_LD_MAP_FD(BPF_REG_1, 0),
7260 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7261 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7262 BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
7263 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
7266 .fixup_map_hash_48b = { 3 },
7267 .errstr = "R0 32-bit pointer arithmetic prohibited",
7271 "map element value illegal alu op, 3",
7273 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7274 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7275 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7276 BPF_LD_MAP_FD(BPF_REG_1, 0),
7277 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7278 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7279 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
7280 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
7283 .fixup_map_hash_48b = { 3 },
7284 .errstr = "R0 pointer arithmetic with /= operator",
7288 "map element value illegal alu op, 4",
7290 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7291 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7292 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7293 BPF_LD_MAP_FD(BPF_REG_1, 0),
7294 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7295 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7296 BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
7297 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
7300 .fixup_map_hash_48b = { 3 },
7301 .errstr_unpriv = "R0 pointer arithmetic prohibited",
7302 .errstr = "invalid mem access 'inv'",
7304 .result_unpriv = REJECT,
7305 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7308 "map element value illegal alu op, 5",
7310 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7311 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7312 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7313 BPF_LD_MAP_FD(BPF_REG_1, 0),
7314 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7315 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7316 BPF_MOV64_IMM(BPF_REG_3, 4096),
7317 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7318 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7319 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7320 BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
7321 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
7322 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
7325 .fixup_map_hash_48b = { 3 },
7326 .errstr = "R0 invalid mem access 'inv'",
7328 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7331 "map element value is preserved across register spilling",
7333 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7334 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7335 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7336 BPF_LD_MAP_FD(BPF_REG_1, 0),
7337 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7338 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7339 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
7340 offsetof(struct test_val, foo)),
7341 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
7342 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7343 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
7344 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7345 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
7346 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
7349 .fixup_map_hash_48b = { 3 },
7350 .errstr_unpriv = "R0 leaks addr",
7352 .result_unpriv = REJECT,
7353 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7356 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
7358 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7359 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7360 BPF_MOV64_IMM(BPF_REG_0, 0),
7361 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7362 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7363 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7364 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7365 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7366 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7367 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7368 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7369 BPF_MOV64_IMM(BPF_REG_2, 16),
7370 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7371 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7372 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
7373 BPF_MOV64_IMM(BPF_REG_4, 0),
7374 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7375 BPF_MOV64_IMM(BPF_REG_3, 0),
7376 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7377 BPF_MOV64_IMM(BPF_REG_0, 0),
7381 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7384 "helper access to variable memory: stack, bitwise AND, zero included",
7386 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7387 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7388 BPF_MOV64_IMM(BPF_REG_2, 16),
7389 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7390 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7391 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
7392 BPF_MOV64_IMM(BPF_REG_3, 0),
7393 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7396 .errstr = "invalid indirect read from stack off -64+0 size 64",
7398 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7401 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
7403 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7404 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7405 BPF_MOV64_IMM(BPF_REG_2, 16),
7406 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7407 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7408 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
7409 BPF_MOV64_IMM(BPF_REG_4, 0),
7410 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7411 BPF_MOV64_IMM(BPF_REG_3, 0),
7412 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7413 BPF_MOV64_IMM(BPF_REG_0, 0),
7416 .errstr = "invalid stack type R1 off=-64 access_size=65",
7418 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7421 "helper access to variable memory: stack, JMP, correct bounds",
7423 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7424 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7425 BPF_MOV64_IMM(BPF_REG_0, 0),
7426 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7427 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7428 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7429 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7430 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7431 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7432 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7433 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7434 BPF_MOV64_IMM(BPF_REG_2, 16),
7435 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7436 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7437 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
7438 BPF_MOV64_IMM(BPF_REG_4, 0),
7439 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7440 BPF_MOV64_IMM(BPF_REG_3, 0),
7441 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7442 BPF_MOV64_IMM(BPF_REG_0, 0),
7446 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7449 "helper access to variable memory: stack, JMP (signed), correct bounds",
7451 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7452 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7453 BPF_MOV64_IMM(BPF_REG_0, 0),
7454 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7455 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7456 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7457 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7458 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7459 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7460 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7461 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7462 BPF_MOV64_IMM(BPF_REG_2, 16),
7463 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7464 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7465 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
7466 BPF_MOV64_IMM(BPF_REG_4, 0),
7467 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7468 BPF_MOV64_IMM(BPF_REG_3, 0),
7469 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7470 BPF_MOV64_IMM(BPF_REG_0, 0),
7474 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7477 "helper access to variable memory: stack, JMP, bounds + offset",
7479 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7480 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7481 BPF_MOV64_IMM(BPF_REG_2, 16),
7482 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7483 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7484 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
7485 BPF_MOV64_IMM(BPF_REG_4, 0),
7486 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
7487 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
7488 BPF_MOV64_IMM(BPF_REG_3, 0),
7489 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7490 BPF_MOV64_IMM(BPF_REG_0, 0),
7493 .errstr = "invalid stack type R1 off=-64 access_size=65",
7495 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7498 "helper access to variable memory: stack, JMP, wrong max",
7500 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7501 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7502 BPF_MOV64_IMM(BPF_REG_2, 16),
7503 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7504 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7505 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
7506 BPF_MOV64_IMM(BPF_REG_4, 0),
7507 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7508 BPF_MOV64_IMM(BPF_REG_3, 0),
7509 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7510 BPF_MOV64_IMM(BPF_REG_0, 0),
7513 .errstr = "invalid stack type R1 off=-64 access_size=65",
7515 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7518 "helper access to variable memory: stack, JMP, no max check",
7520 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7521 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7522 BPF_MOV64_IMM(BPF_REG_2, 16),
7523 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7524 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7525 BPF_MOV64_IMM(BPF_REG_4, 0),
7526 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7527 BPF_MOV64_IMM(BPF_REG_3, 0),
7528 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7529 BPF_MOV64_IMM(BPF_REG_0, 0),
7532 /* because max wasn't checked, signed min is negative */
7533 .errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
7535 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7538 "helper access to variable memory: stack, JMP, no min check",
7540 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7541 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7542 BPF_MOV64_IMM(BPF_REG_2, 16),
7543 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7544 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7545 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
7546 BPF_MOV64_IMM(BPF_REG_3, 0),
7547 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7548 BPF_MOV64_IMM(BPF_REG_0, 0),
7551 .errstr = "invalid indirect read from stack off -64+0 size 64",
7553 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7556 "helper access to variable memory: stack, JMP (signed), no min check",
7558 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7559 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7560 BPF_MOV64_IMM(BPF_REG_2, 16),
7561 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7562 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7563 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
7564 BPF_MOV64_IMM(BPF_REG_3, 0),
7565 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7566 BPF_MOV64_IMM(BPF_REG_0, 0),
7569 .errstr = "R2 min value is negative",
7571 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7574 "helper access to variable memory: map, JMP, correct bounds",
7576 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7577 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7578 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7579 BPF_LD_MAP_FD(BPF_REG_1, 0),
7580 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7581 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
7582 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7583 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7584 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7585 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7586 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7587 sizeof(struct test_val), 4),
7588 BPF_MOV64_IMM(BPF_REG_4, 0),
7589 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7590 BPF_MOV64_IMM(BPF_REG_3, 0),
7591 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7592 BPF_MOV64_IMM(BPF_REG_0, 0),
7595 .fixup_map_hash_48b = { 3 },
7597 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7600 "helper access to variable memory: map, JMP, wrong max",
7602 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7603 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7604 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7605 BPF_LD_MAP_FD(BPF_REG_1, 0),
7606 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7607 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
7608 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7609 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7610 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7611 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7612 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7613 sizeof(struct test_val) + 1, 4),
7614 BPF_MOV64_IMM(BPF_REG_4, 0),
7615 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7616 BPF_MOV64_IMM(BPF_REG_3, 0),
7617 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7618 BPF_MOV64_IMM(BPF_REG_0, 0),
7621 .fixup_map_hash_48b = { 3 },
7622 .errstr = "invalid access to map value, value_size=48 off=0 size=49",
7624 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7627 "helper access to variable memory: map adjusted, JMP, correct bounds",
7629 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7630 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7631 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7632 BPF_LD_MAP_FD(BPF_REG_1, 0),
7633 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7634 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
7635 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7636 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
7637 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7638 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7639 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7640 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7641 sizeof(struct test_val) - 20, 4),
7642 BPF_MOV64_IMM(BPF_REG_4, 0),
7643 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7644 BPF_MOV64_IMM(BPF_REG_3, 0),
7645 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7646 BPF_MOV64_IMM(BPF_REG_0, 0),
7649 .fixup_map_hash_48b = { 3 },
7651 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7654 "helper access to variable memory: map adjusted, JMP, wrong max",
7656 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7657 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7658 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7659 BPF_LD_MAP_FD(BPF_REG_1, 0),
7660 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7661 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
7662 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7663 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
7664 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7665 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7666 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7667 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7668 sizeof(struct test_val) - 19, 4),
7669 BPF_MOV64_IMM(BPF_REG_4, 0),
7670 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7671 BPF_MOV64_IMM(BPF_REG_3, 0),
7672 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7673 BPF_MOV64_IMM(BPF_REG_0, 0),
7676 .fixup_map_hash_48b = { 3 },
7677 .errstr = "R1 min value is outside of the array range",
7679 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7682 "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
7684 BPF_MOV64_IMM(BPF_REG_1, 0),
7685 BPF_MOV64_IMM(BPF_REG_2, 0),
7686 BPF_MOV64_IMM(BPF_REG_3, 0),
7687 BPF_MOV64_IMM(BPF_REG_4, 0),
7688 BPF_MOV64_IMM(BPF_REG_5, 0),
7689 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7693 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7696 "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
7698 BPF_MOV64_IMM(BPF_REG_1, 0),
7699 BPF_MOV64_IMM(BPF_REG_2, 1),
7700 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7701 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7702 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
7703 BPF_MOV64_IMM(BPF_REG_3, 0),
7704 BPF_MOV64_IMM(BPF_REG_4, 0),
7705 BPF_MOV64_IMM(BPF_REG_5, 0),
7706 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7709 .errstr = "R1 type=inv expected=fp",
7711 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7714 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
7716 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7717 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7718 BPF_MOV64_IMM(BPF_REG_2, 0),
7719 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
7720 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
7721 BPF_MOV64_IMM(BPF_REG_3, 0),
7722 BPF_MOV64_IMM(BPF_REG_4, 0),
7723 BPF_MOV64_IMM(BPF_REG_5, 0),
7724 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7728 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7731 "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
7733 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7734 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7735 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7736 BPF_LD_MAP_FD(BPF_REG_1, 0),
7737 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7738 BPF_FUNC_map_lookup_elem),
7739 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7740 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7741 BPF_MOV64_IMM(BPF_REG_2, 0),
7742 BPF_MOV64_IMM(BPF_REG_3, 0),
7743 BPF_MOV64_IMM(BPF_REG_4, 0),
7744 BPF_MOV64_IMM(BPF_REG_5, 0),
7745 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7748 .fixup_map_hash_8b = { 3 },
7750 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7753 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
7755 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7756 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7757 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7758 BPF_LD_MAP_FD(BPF_REG_1, 0),
7759 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7760 BPF_FUNC_map_lookup_elem),
7761 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7762 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7763 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
7764 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7765 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7766 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
7767 BPF_MOV64_IMM(BPF_REG_3, 0),
7768 BPF_MOV64_IMM(BPF_REG_4, 0),
7769 BPF_MOV64_IMM(BPF_REG_5, 0),
7770 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7773 .fixup_map_hash_8b = { 3 },
7775 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7778 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
7780 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7781 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7782 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7783 BPF_LD_MAP_FD(BPF_REG_1, 0),
7784 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7785 BPF_FUNC_map_lookup_elem),
7786 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7787 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7788 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7789 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7790 BPF_MOV64_IMM(BPF_REG_3, 0),
7791 BPF_MOV64_IMM(BPF_REG_4, 0),
7792 BPF_MOV64_IMM(BPF_REG_5, 0),
7793 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7796 .fixup_map_hash_8b = { 3 },
7798 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7801 "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
7803 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
7804 offsetof(struct __sk_buff, data)),
7805 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7806 offsetof(struct __sk_buff, data_end)),
7807 BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
7808 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7809 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
7810 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
7811 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
7812 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7813 BPF_MOV64_IMM(BPF_REG_3, 0),
7814 BPF_MOV64_IMM(BPF_REG_4, 0),
7815 BPF_MOV64_IMM(BPF_REG_5, 0),
7816 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7820 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7821 .retval = 0 /* csum_diff of 64-byte packet */,
7822 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7825 "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
7827 BPF_MOV64_IMM(BPF_REG_1, 0),
7828 BPF_MOV64_IMM(BPF_REG_2, 0),
7829 BPF_MOV64_IMM(BPF_REG_3, 0),
7830 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7833 .errstr = "R1 type=inv expected=fp",
7835 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7838 "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
7840 BPF_MOV64_IMM(BPF_REG_1, 0),
7841 BPF_MOV64_IMM(BPF_REG_2, 1),
7842 BPF_MOV64_IMM(BPF_REG_3, 0),
7843 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7846 .errstr = "R1 type=inv expected=fp",
7848 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7851 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7853 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7854 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7855 BPF_MOV64_IMM(BPF_REG_2, 0),
7856 BPF_MOV64_IMM(BPF_REG_3, 0),
7857 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7861 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7864 "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7866 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7867 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7868 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7869 BPF_LD_MAP_FD(BPF_REG_1, 0),
7870 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7871 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7872 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7873 BPF_MOV64_IMM(BPF_REG_2, 0),
7874 BPF_MOV64_IMM(BPF_REG_3, 0),
7875 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7878 .fixup_map_hash_8b = { 3 },
7880 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7883 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7885 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7886 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7887 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7888 BPF_LD_MAP_FD(BPF_REG_1, 0),
7889 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7890 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7891 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7892 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7893 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7894 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7895 BPF_MOV64_IMM(BPF_REG_3, 0),
7896 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7899 .fixup_map_hash_8b = { 3 },
7901 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7904 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7906 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7907 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7908 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7909 BPF_LD_MAP_FD(BPF_REG_1, 0),
7910 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7911 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7912 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7913 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7914 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
7915 BPF_MOV64_IMM(BPF_REG_3, 0),
7916 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7919 .fixup_map_hash_8b = { 3 },
7921 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7924 "helper access to variable memory: 8 bytes leak",
7926 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7927 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7928 BPF_MOV64_IMM(BPF_REG_0, 0),
7929 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7930 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7931 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7932 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7933 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7934 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7935 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7936 BPF_MOV64_IMM(BPF_REG_2, 1),
7937 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7938 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7939 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
7940 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
7941 BPF_MOV64_IMM(BPF_REG_3, 0),
7942 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7943 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7946 .errstr = "invalid indirect read from stack off -64+32 size 64",
7948 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7951 "helper access to variable memory: 8 bytes no leak (init memory)",
7953 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7954 BPF_MOV64_IMM(BPF_REG_0, 0),
7955 BPF_MOV64_IMM(BPF_REG_0, 0),
7956 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7957 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7958 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7959 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7960 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7961 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7962 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7963 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7964 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7965 BPF_MOV64_IMM(BPF_REG_2, 0),
7966 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
7967 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
7968 BPF_MOV64_IMM(BPF_REG_3, 0),
7969 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7970 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7974 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7977 "invalid and of negative number",
7979 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7980 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7981 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7982 BPF_LD_MAP_FD(BPF_REG_1, 0),
7983 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7984 BPF_FUNC_map_lookup_elem),
7985 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7986 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7987 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
7988 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
7989 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7990 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
7991 offsetof(struct test_val, foo)),
7994 .fixup_map_hash_48b = { 3 },
7995 .errstr = "R0 max value is outside of the array range",
7997 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8000 "invalid range check",
8002 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8003 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8004 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8005 BPF_LD_MAP_FD(BPF_REG_1, 0),
8006 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8007 BPF_FUNC_map_lookup_elem),
8008 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
8009 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
8010 BPF_MOV64_IMM(BPF_REG_9, 1),
8011 BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
8012 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
8013 BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
8014 BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
8015 BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
8016 BPF_MOV32_IMM(BPF_REG_3, 1),
8017 BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
8018 BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
8019 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
8020 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
8021 BPF_MOV64_REG(BPF_REG_0, 0),
8024 .fixup_map_hash_48b = { 3 },
8025 .errstr = "R0 max value is outside of the array range",
8027 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8030 "map in map access",
8032 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8033 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8034 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8035 BPF_LD_MAP_FD(BPF_REG_1, 0),
8036 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8037 BPF_FUNC_map_lookup_elem),
8038 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
8039 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8040 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8041 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8042 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8043 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8044 BPF_FUNC_map_lookup_elem),
8045 BPF_MOV64_IMM(BPF_REG_0, 0),
8048 .fixup_map_in_map = { 3 },
8052 "invalid inner map pointer",
8054 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8055 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8056 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8057 BPF_LD_MAP_FD(BPF_REG_1, 0),
8058 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8059 BPF_FUNC_map_lookup_elem),
8060 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8061 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8062 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8063 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8064 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8065 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8066 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8067 BPF_FUNC_map_lookup_elem),
8068 BPF_MOV64_IMM(BPF_REG_0, 0),
8071 .fixup_map_in_map = { 3 },
8072 .errstr = "R1 pointer arithmetic on map_ptr prohibited",
8076 "forgot null checking on the inner map pointer",
8078 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8079 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8080 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8081 BPF_LD_MAP_FD(BPF_REG_1, 0),
8082 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8083 BPF_FUNC_map_lookup_elem),
8084 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8085 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8086 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8087 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8088 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8089 BPF_FUNC_map_lookup_elem),
8090 BPF_MOV64_IMM(BPF_REG_0, 0),
8093 .fixup_map_in_map = { 3 },
8094 .errstr = "R1 type=map_value_or_null expected=map_ptr",
8098 "ld_abs: check calling conv, r1",
8100 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8101 BPF_MOV64_IMM(BPF_REG_1, 0),
8102 BPF_LD_ABS(BPF_W, -0x200000),
8103 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
8106 .errstr = "R1 !read_ok",
8110 "ld_abs: check calling conv, r2",
8112 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8113 BPF_MOV64_IMM(BPF_REG_2, 0),
8114 BPF_LD_ABS(BPF_W, -0x200000),
8115 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8118 .errstr = "R2 !read_ok",
8122 "ld_abs: check calling conv, r3",
8124 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8125 BPF_MOV64_IMM(BPF_REG_3, 0),
8126 BPF_LD_ABS(BPF_W, -0x200000),
8127 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8130 .errstr = "R3 !read_ok",
8134 "ld_abs: check calling conv, r4",
8136 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8137 BPF_MOV64_IMM(BPF_REG_4, 0),
8138 BPF_LD_ABS(BPF_W, -0x200000),
8139 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
8142 .errstr = "R4 !read_ok",
8146 "ld_abs: check calling conv, r5",
8148 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8149 BPF_MOV64_IMM(BPF_REG_5, 0),
8150 BPF_LD_ABS(BPF_W, -0x200000),
8151 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
8154 .errstr = "R5 !read_ok",
8158 "ld_abs: check calling conv, r7",
8160 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8161 BPF_MOV64_IMM(BPF_REG_7, 0),
8162 BPF_LD_ABS(BPF_W, -0x200000),
8163 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
8169 "ld_abs: tests on r6 and skb data reload helper",
8171 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8172 BPF_LD_ABS(BPF_B, 0),
8173 BPF_LD_ABS(BPF_H, 0),
8174 BPF_LD_ABS(BPF_W, 0),
8175 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
8176 BPF_MOV64_IMM(BPF_REG_6, 0),
8177 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
8178 BPF_MOV64_IMM(BPF_REG_2, 1),
8179 BPF_MOV64_IMM(BPF_REG_3, 2),
8180 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8181 BPF_FUNC_skb_vlan_push),
8182 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
8183 BPF_LD_ABS(BPF_B, 0),
8184 BPF_LD_ABS(BPF_H, 0),
8185 BPF_LD_ABS(BPF_W, 0),
8186 BPF_MOV64_IMM(BPF_REG_0, 42),
8189 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8191 .retval = 42 /* ultimate return value */,
8194 "ld_ind: check calling conv, r1",
8196 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8197 BPF_MOV64_IMM(BPF_REG_1, 1),
8198 BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
8199 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
8202 .errstr = "R1 !read_ok",
8206 "ld_ind: check calling conv, r2",
8208 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8209 BPF_MOV64_IMM(BPF_REG_2, 1),
8210 BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
8211 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8214 .errstr = "R2 !read_ok",
8218 "ld_ind: check calling conv, r3",
8220 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8221 BPF_MOV64_IMM(BPF_REG_3, 1),
8222 BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
8223 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8226 .errstr = "R3 !read_ok",
8230 "ld_ind: check calling conv, r4",
8232 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8233 BPF_MOV64_IMM(BPF_REG_4, 1),
8234 BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
8235 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
8238 .errstr = "R4 !read_ok",
8242 "ld_ind: check calling conv, r5",
8244 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8245 BPF_MOV64_IMM(BPF_REG_5, 1),
8246 BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
8247 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
8250 .errstr = "R5 !read_ok",
8254 "ld_ind: check calling conv, r7",
8256 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8257 BPF_MOV64_IMM(BPF_REG_7, 1),
8258 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
8259 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
8266 "check bpf_perf_event_data->sample_period byte load permitted",
8268 BPF_MOV64_IMM(BPF_REG_0, 0),
8269 #if __BYTE_ORDER == __LITTLE_ENDIAN
8270 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
8271 offsetof(struct bpf_perf_event_data, sample_period)),
8273 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
8274 offsetof(struct bpf_perf_event_data, sample_period) + 7),
8279 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
8282 "check bpf_perf_event_data->sample_period half load permitted",
8284 BPF_MOV64_IMM(BPF_REG_0, 0),
8285 #if __BYTE_ORDER == __LITTLE_ENDIAN
8286 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8287 offsetof(struct bpf_perf_event_data, sample_period)),
8289 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8290 offsetof(struct bpf_perf_event_data, sample_period) + 6),
8295 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
8298 "check bpf_perf_event_data->sample_period word load permitted",
8300 BPF_MOV64_IMM(BPF_REG_0, 0),
8301 #if __BYTE_ORDER == __LITTLE_ENDIAN
8302 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8303 offsetof(struct bpf_perf_event_data, sample_period)),
8305 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8306 offsetof(struct bpf_perf_event_data, sample_period) + 4),
8311 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
8314 "check bpf_perf_event_data->sample_period dword load permitted",
8316 BPF_MOV64_IMM(BPF_REG_0, 0),
8317 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
8318 offsetof(struct bpf_perf_event_data, sample_period)),
8322 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
8325 "check skb->data half load not permitted",
8327 BPF_MOV64_IMM(BPF_REG_0, 0),
8328 #if __BYTE_ORDER == __LITTLE_ENDIAN
8329 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8330 offsetof(struct __sk_buff, data)),
8332 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8333 offsetof(struct __sk_buff, data) + 2),
8338 .errstr = "invalid bpf_context access",
8341 "check skb->tc_classid half load not permitted for lwt prog",
8343 BPF_MOV64_IMM(BPF_REG_0, 0),
8344 #if __BYTE_ORDER == __LITTLE_ENDIAN
8345 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8346 offsetof(struct __sk_buff, tc_classid)),
8348 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8349 offsetof(struct __sk_buff, tc_classid) + 2),
8354 .errstr = "invalid bpf_context access",
8355 .prog_type = BPF_PROG_TYPE_LWT_IN,
8358 "bounds checks mixing signed and unsigned, positive bounds",
8360 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8361 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8362 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8363 BPF_LD_MAP_FD(BPF_REG_1, 0),
8364 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8365 BPF_FUNC_map_lookup_elem),
8366 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8367 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8368 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8369 BPF_MOV64_IMM(BPF_REG_2, 2),
8370 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
8371 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
8372 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8373 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8374 BPF_MOV64_IMM(BPF_REG_0, 0),
8377 .fixup_map_hash_8b = { 3 },
8378 .errstr = "unbounded min value",
8382 "bounds checks mixing signed and unsigned",
8384 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8385 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8386 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8387 BPF_LD_MAP_FD(BPF_REG_1, 0),
8388 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8389 BPF_FUNC_map_lookup_elem),
8390 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8391 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8392 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8393 BPF_MOV64_IMM(BPF_REG_2, -1),
8394 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
8395 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8396 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8397 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8398 BPF_MOV64_IMM(BPF_REG_0, 0),
8401 .fixup_map_hash_8b = { 3 },
8402 .errstr = "unbounded min value",
8406 "bounds checks mixing signed and unsigned, variant 2",
8408 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8409 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8410 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8411 BPF_LD_MAP_FD(BPF_REG_1, 0),
8412 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8413 BPF_FUNC_map_lookup_elem),
8414 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8415 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8416 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8417 BPF_MOV64_IMM(BPF_REG_2, -1),
8418 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
8419 BPF_MOV64_IMM(BPF_REG_8, 0),
8420 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
8421 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
8422 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
8423 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
8424 BPF_MOV64_IMM(BPF_REG_0, 0),
8427 .fixup_map_hash_8b = { 3 },
8428 .errstr = "unbounded min value",
8432 "bounds checks mixing signed and unsigned, variant 3",
8434 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8435 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8436 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8437 BPF_LD_MAP_FD(BPF_REG_1, 0),
8438 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8439 BPF_FUNC_map_lookup_elem),
8440 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8441 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8442 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8443 BPF_MOV64_IMM(BPF_REG_2, -1),
8444 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
8445 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
8446 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
8447 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
8448 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
8449 BPF_MOV64_IMM(BPF_REG_0, 0),
8452 .fixup_map_hash_8b = { 3 },
8453 .errstr = "unbounded min value",
8457 "bounds checks mixing signed and unsigned, variant 4",
8459 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8460 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8461 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8462 BPF_LD_MAP_FD(BPF_REG_1, 0),
8463 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8464 BPF_FUNC_map_lookup_elem),
8465 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8466 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8467 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8468 BPF_MOV64_IMM(BPF_REG_2, 1),
8469 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
8470 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8471 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8472 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8473 BPF_MOV64_IMM(BPF_REG_0, 0),
8476 .fixup_map_hash_8b = { 3 },
8480 "bounds checks mixing signed and unsigned, variant 5",
8482 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8483 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8484 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8485 BPF_LD_MAP_FD(BPF_REG_1, 0),
8486 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8487 BPF_FUNC_map_lookup_elem),
8488 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8489 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8490 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8491 BPF_MOV64_IMM(BPF_REG_2, -1),
8492 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
8493 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
8494 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
8495 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
8496 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8497 BPF_MOV64_IMM(BPF_REG_0, 0),
8500 .fixup_map_hash_8b = { 3 },
8501 .errstr = "unbounded min value",
8505 "bounds checks mixing signed and unsigned, variant 6",
8507 BPF_MOV64_IMM(BPF_REG_2, 0),
8508 BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
8509 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
8510 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8511 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
8512 BPF_MOV64_IMM(BPF_REG_6, -1),
8513 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
8514 BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
8515 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
8516 BPF_MOV64_IMM(BPF_REG_5, 0),
8517 BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
8518 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8519 BPF_FUNC_skb_load_bytes),
8520 BPF_MOV64_IMM(BPF_REG_0, 0),
8523 .errstr = "R4 min value is negative, either use unsigned",
8527 "bounds checks mixing signed and unsigned, variant 7",
8529 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8530 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8531 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8532 BPF_LD_MAP_FD(BPF_REG_1, 0),
8533 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8534 BPF_FUNC_map_lookup_elem),
8535 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8536 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8537 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8538 BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
8539 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
8540 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8541 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8542 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8543 BPF_MOV64_IMM(BPF_REG_0, 0),
8546 .fixup_map_hash_8b = { 3 },
8550 "bounds checks mixing signed and unsigned, variant 8",
8552 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8553 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8554 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8555 BPF_LD_MAP_FD(BPF_REG_1, 0),
8556 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8557 BPF_FUNC_map_lookup_elem),
8558 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8559 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8560 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8561 BPF_MOV64_IMM(BPF_REG_2, -1),
8562 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
8563 BPF_MOV64_IMM(BPF_REG_0, 0),
8565 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8566 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8567 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8568 BPF_MOV64_IMM(BPF_REG_0, 0),
8571 .fixup_map_hash_8b = { 3 },
8572 .errstr = "unbounded min value",
8576 "bounds checks mixing signed and unsigned, variant 9",
8578 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8579 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8580 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8581 BPF_LD_MAP_FD(BPF_REG_1, 0),
8582 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8583 BPF_FUNC_map_lookup_elem),
8584 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
8585 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8586 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8587 BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
8588 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
8589 BPF_MOV64_IMM(BPF_REG_0, 0),
8591 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8592 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8593 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8594 BPF_MOV64_IMM(BPF_REG_0, 0),
8597 .fixup_map_hash_8b = { 3 },
8601 "bounds checks mixing signed and unsigned, variant 10",
8603 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8604 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8605 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8606 BPF_LD_MAP_FD(BPF_REG_1, 0),
8607 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8608 BPF_FUNC_map_lookup_elem),
8609 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8610 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8611 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8612 BPF_MOV64_IMM(BPF_REG_2, 0),
8613 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
8614 BPF_MOV64_IMM(BPF_REG_0, 0),
8616 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8617 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8618 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8619 BPF_MOV64_IMM(BPF_REG_0, 0),
8622 .fixup_map_hash_8b = { 3 },
8623 .errstr = "unbounded min value",
8627 "bounds checks mixing signed and unsigned, variant 11",
8629 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8630 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8631 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8632 BPF_LD_MAP_FD(BPF_REG_1, 0),
8633 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8634 BPF_FUNC_map_lookup_elem),
8635 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8636 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8637 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8638 BPF_MOV64_IMM(BPF_REG_2, -1),
8639 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8641 BPF_MOV64_IMM(BPF_REG_0, 0),
8643 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8644 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8645 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8646 BPF_MOV64_IMM(BPF_REG_0, 0),
8649 .fixup_map_hash_8b = { 3 },
8650 .errstr = "unbounded min value",
8654 "bounds checks mixing signed and unsigned, variant 12",
8656 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8657 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8658 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8659 BPF_LD_MAP_FD(BPF_REG_1, 0),
8660 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8661 BPF_FUNC_map_lookup_elem),
8662 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8663 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8664 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8665 BPF_MOV64_IMM(BPF_REG_2, -6),
8666 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8667 BPF_MOV64_IMM(BPF_REG_0, 0),
8669 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8670 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8671 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8672 BPF_MOV64_IMM(BPF_REG_0, 0),
8675 .fixup_map_hash_8b = { 3 },
8676 .errstr = "unbounded min value",
8680 "bounds checks mixing signed and unsigned, variant 13",
8682 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8683 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8684 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8685 BPF_LD_MAP_FD(BPF_REG_1, 0),
8686 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8687 BPF_FUNC_map_lookup_elem),
8688 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8689 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8690 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8691 BPF_MOV64_IMM(BPF_REG_2, 2),
8692 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8693 BPF_MOV64_IMM(BPF_REG_7, 1),
8694 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
8695 BPF_MOV64_IMM(BPF_REG_0, 0),
8697 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
8698 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
8699 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
8700 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8701 BPF_MOV64_IMM(BPF_REG_0, 0),
8704 .fixup_map_hash_8b = { 3 },
8705 .errstr = "unbounded min value",
8709 "bounds checks mixing signed and unsigned, variant 14",
8711 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
8712 offsetof(struct __sk_buff, mark)),
8713 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8714 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8715 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8716 BPF_LD_MAP_FD(BPF_REG_1, 0),
8717 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8718 BPF_FUNC_map_lookup_elem),
8719 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8720 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8721 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8722 BPF_MOV64_IMM(BPF_REG_2, -1),
8723 BPF_MOV64_IMM(BPF_REG_8, 2),
8724 BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
8725 BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
8726 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8727 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8728 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8729 BPF_MOV64_IMM(BPF_REG_0, 0),
8731 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
8732 BPF_JMP_IMM(BPF_JA, 0, 0, -7),
8734 .fixup_map_hash_8b = { 4 },
8735 .errstr = "unbounded min value",
8739 "bounds checks mixing signed and unsigned, variant 15",
8741 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8742 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8743 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8744 BPF_LD_MAP_FD(BPF_REG_1, 0),
8745 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8746 BPF_FUNC_map_lookup_elem),
8747 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8748 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8749 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8750 BPF_MOV64_IMM(BPF_REG_2, -6),
8751 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8752 BPF_MOV64_IMM(BPF_REG_0, 0),
8754 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8755 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
8756 BPF_MOV64_IMM(BPF_REG_0, 0),
8758 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8759 BPF_MOV64_IMM(BPF_REG_0, 0),
8762 .fixup_map_hash_8b = { 3 },
8763 .errstr = "unbounded min value",
8765 .result_unpriv = REJECT,
8768 "subtraction bounds (map value) variant 1",
8770 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8771 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8772 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8773 BPF_LD_MAP_FD(BPF_REG_1, 0),
8774 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8775 BPF_FUNC_map_lookup_elem),
8776 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8777 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8778 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
8779 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
8780 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
8781 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
8782 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
8783 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8784 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8786 BPF_MOV64_IMM(BPF_REG_0, 0),
8789 .fixup_map_hash_8b = { 3 },
8790 .errstr = "R0 max value is outside of the array range",
8794 "subtraction bounds (map value) variant 2",
8796 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8797 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8798 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8799 BPF_LD_MAP_FD(BPF_REG_1, 0),
8800 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8801 BPF_FUNC_map_lookup_elem),
8802 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8803 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8804 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
8805 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
8806 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
8807 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
8808 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8809 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8811 BPF_MOV64_IMM(BPF_REG_0, 0),
8814 .fixup_map_hash_8b = { 3 },
8815 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
8819 "bounds check based on zero-extended MOV",
8821 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8822 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8823 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8824 BPF_LD_MAP_FD(BPF_REG_1, 0),
8825 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8826 BPF_FUNC_map_lookup_elem),
8827 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8828 /* r2 = 0x0000'0000'ffff'ffff */
8829 BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
8831 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
8833 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8834 /* access at offset 0 */
8835 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8837 BPF_MOV64_IMM(BPF_REG_0, 0),
8840 .fixup_map_hash_8b = { 3 },
8844 "bounds check based on sign-extended MOV. test1",
8846 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8847 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8848 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8849 BPF_LD_MAP_FD(BPF_REG_1, 0),
8850 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8851 BPF_FUNC_map_lookup_elem),
8852 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8853 /* r2 = 0xffff'ffff'ffff'ffff */
8854 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
8855 /* r2 = 0xffff'ffff */
8856 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
8857 /* r0 = <oob pointer> */
8858 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8859 /* access to OOB pointer */
8860 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8862 BPF_MOV64_IMM(BPF_REG_0, 0),
8865 .fixup_map_hash_8b = { 3 },
8866 .errstr = "map_value pointer and 4294967295",
8870 "bounds check based on sign-extended MOV. test2",
8872 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8873 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8874 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8875 BPF_LD_MAP_FD(BPF_REG_1, 0),
8876 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8877 BPF_FUNC_map_lookup_elem),
8878 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8879 /* r2 = 0xffff'ffff'ffff'ffff */
8880 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
8881 /* r2 = 0xfff'ffff */
8882 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
8883 /* r0 = <oob pointer> */
8884 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8885 /* access to OOB pointer */
8886 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8888 BPF_MOV64_IMM(BPF_REG_0, 0),
8891 .fixup_map_hash_8b = { 3 },
8892 .errstr = "R0 min value is outside of the array range",
8896 "bounds check based on reg_off + var_off + insn_off. test1",
8898 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
8899 offsetof(struct __sk_buff, mark)),
8900 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8901 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8902 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8903 BPF_LD_MAP_FD(BPF_REG_1, 0),
8904 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8905 BPF_FUNC_map_lookup_elem),
8906 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8907 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
8908 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
8909 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
8910 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
8911 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
8912 BPF_MOV64_IMM(BPF_REG_0, 0),
8915 .fixup_map_hash_8b = { 4 },
8916 .errstr = "value_size=8 off=1073741825",
8918 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8921 "bounds check based on reg_off + var_off + insn_off. test2",
8923 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
8924 offsetof(struct __sk_buff, mark)),
8925 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8926 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8927 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8928 BPF_LD_MAP_FD(BPF_REG_1, 0),
8929 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8930 BPF_FUNC_map_lookup_elem),
8931 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8932 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
8933 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
8934 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
8935 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
8936 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
8937 BPF_MOV64_IMM(BPF_REG_0, 0),
8940 .fixup_map_hash_8b = { 4 },
8941 .errstr = "value 1073741823",
8943 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8946 "bounds check after truncation of non-boundary-crossing range",
8948 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8949 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8950 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8951 BPF_LD_MAP_FD(BPF_REG_1, 0),
8952 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8953 BPF_FUNC_map_lookup_elem),
8954 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8955 /* r1 = [0x00, 0xff] */
8956 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8957 BPF_MOV64_IMM(BPF_REG_2, 1),
8958 /* r2 = 0x10'0000'0000 */
8959 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
8960 /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
8961 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
8962 /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
8963 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8964 /* r1 = [0x00, 0xff] */
8965 BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
8967 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8969 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8970 /* access at offset 0 */
8971 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8973 BPF_MOV64_IMM(BPF_REG_0, 0),
8976 .fixup_map_hash_8b = { 3 },
8980 "bounds check after truncation of boundary-crossing range (1)",
8982 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8983 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8984 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8985 BPF_LD_MAP_FD(BPF_REG_1, 0),
8986 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8987 BPF_FUNC_map_lookup_elem),
8988 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8989 /* r1 = [0x00, 0xff] */
8990 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8991 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8992 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
8993 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8994 /* r1 = [0xffff'ff80, 0xffff'ffff] or
8995 * [0x0000'0000, 0x0000'007f]
8997 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
8998 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8999 /* r1 = [0x00, 0xff] or
9000 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
9002 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
9004 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
9006 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
9007 /* no-op or OOB pointer computation */
9008 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9009 /* potentially OOB access */
9010 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9012 BPF_MOV64_IMM(BPF_REG_0, 0),
9015 .fixup_map_hash_8b = { 3 },
9016 /* not actually fully unbounded, but the bound is very high */
9017 .errstr = "R0 unbounded memory access",
9021 "bounds check after truncation of boundary-crossing range (2)",
9023 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9024 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9025 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9026 BPF_LD_MAP_FD(BPF_REG_1, 0),
9027 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9028 BPF_FUNC_map_lookup_elem),
9029 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
9030 /* r1 = [0x00, 0xff] */
9031 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9032 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
9033 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
9034 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
9035 /* r1 = [0xffff'ff80, 0xffff'ffff] or
9036 * [0x0000'0000, 0x0000'007f]
9037 * difference to previous test: truncation via MOV32
9040 BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
9041 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
9042 /* r1 = [0x00, 0xff] or
9043 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
9045 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
9047 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
9049 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
9050 /* no-op or OOB pointer computation */
9051 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9052 /* potentially OOB access */
9053 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9055 BPF_MOV64_IMM(BPF_REG_0, 0),
9058 .fixup_map_hash_8b = { 3 },
9059 /* not actually fully unbounded, but the bound is very high */
9060 .errstr = "R0 unbounded memory access",
9064 "bounds check after wrapping 32-bit addition",
9066 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9067 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9068 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9069 BPF_LD_MAP_FD(BPF_REG_1, 0),
9070 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9071 BPF_FUNC_map_lookup_elem),
9072 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
9073 /* r1 = 0x7fff'ffff */
9074 BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
9075 /* r1 = 0xffff'fffe */
9076 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
9078 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
9080 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9081 /* access at offset 0 */
9082 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9084 BPF_MOV64_IMM(BPF_REG_0, 0),
9087 .fixup_map_hash_8b = { 3 },
9091 "bounds check after shift with oversized count operand",
9093 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9094 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9095 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9096 BPF_LD_MAP_FD(BPF_REG_1, 0),
9097 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9098 BPF_FUNC_map_lookup_elem),
9099 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
9100 BPF_MOV64_IMM(BPF_REG_2, 32),
9101 BPF_MOV64_IMM(BPF_REG_1, 1),
9102 /* r1 = (u32)1 << (u32)32 = ? */
9103 BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
9104 /* r1 = [0x0000, 0xffff] */
9105 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
9106 /* computes unknown pointer, potentially OOB */
9107 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9108 /* potentially OOB access */
9109 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9111 BPF_MOV64_IMM(BPF_REG_0, 0),
9114 .fixup_map_hash_8b = { 3 },
9115 .errstr = "R0 max value is outside of the array range",
9119 "bounds check after right shift of maybe-negative number",
9121 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9122 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9123 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9124 BPF_LD_MAP_FD(BPF_REG_1, 0),
9125 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9126 BPF_FUNC_map_lookup_elem),
9127 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
9128 /* r1 = [0x00, 0xff] */
9129 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9130 /* r1 = [-0x01, 0xfe] */
9131 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
9132 /* r1 = 0 or 0xff'ffff'ffff'ffff */
9133 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
9134 /* r1 = 0 or 0xffff'ffff'ffff */
9135 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
9136 /* computes unknown pointer, potentially OOB */
9137 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9138 /* potentially OOB access */
9139 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9141 BPF_MOV64_IMM(BPF_REG_0, 0),
9144 .fixup_map_hash_8b = { 3 },
9145 .errstr = "R0 unbounded memory access",
9149 "bounds check map access with off+size signed 32bit overflow. test1",
9151 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9152 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9153 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9154 BPF_LD_MAP_FD(BPF_REG_1, 0),
9155 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9156 BPF_FUNC_map_lookup_elem),
9157 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9159 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
9160 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
9164 .fixup_map_hash_8b = { 3 },
9165 .errstr = "map_value pointer and 2147483646",
9169 "bounds check map access with off+size signed 32bit overflow. test2",
9171 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9172 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9173 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9174 BPF_LD_MAP_FD(BPF_REG_1, 0),
9175 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9176 BPF_FUNC_map_lookup_elem),
9177 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9179 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
9180 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
9181 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
9182 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
9186 .fixup_map_hash_8b = { 3 },
9187 .errstr = "pointer offset 1073741822",
9191 "bounds check map access with off+size signed 32bit overflow. test3",
9193 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9194 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9195 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9196 BPF_LD_MAP_FD(BPF_REG_1, 0),
9197 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9198 BPF_FUNC_map_lookup_elem),
9199 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9201 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
9202 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
9203 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
9207 .fixup_map_hash_8b = { 3 },
9208 .errstr = "pointer offset -1073741822",
9212 "bounds check map access with off+size signed 32bit overflow. test4",
9214 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9215 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9216 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9217 BPF_LD_MAP_FD(BPF_REG_1, 0),
9218 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9219 BPF_FUNC_map_lookup_elem),
9220 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9222 BPF_MOV64_IMM(BPF_REG_1, 1000000),
9223 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
9224 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9225 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
9229 .fixup_map_hash_8b = { 3 },
9230 .errstr = "map_value pointer and 1000000000000",
9234 "pointer/scalar confusion in state equality check (way 1)",
9236 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9237 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9238 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9239 BPF_LD_MAP_FD(BPF_REG_1, 0),
9240 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9241 BPF_FUNC_map_lookup_elem),
9242 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
9243 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
9245 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
9249 .fixup_map_hash_8b = { 3 },
9251 .retval = POINTER_VALUE,
9252 .result_unpriv = REJECT,
9253 .errstr_unpriv = "R0 leaks addr as return value"
9256 "pointer/scalar confusion in state equality check (way 2)",
9258 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9259 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9260 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9261 BPF_LD_MAP_FD(BPF_REG_1, 0),
9262 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9263 BPF_FUNC_map_lookup_elem),
9264 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
9265 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
9267 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
9270 .fixup_map_hash_8b = { 3 },
9272 .retval = POINTER_VALUE,
9273 .result_unpriv = REJECT,
9274 .errstr_unpriv = "R0 leaks addr as return value"
9277 "variable-offset ctx access",
9279 /* Get an unknown value */
9280 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
9281 /* Make it small and 4-byte aligned */
9282 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
9283 /* add it to skb. We now have either &skb->len or
9284 * &skb->pkt_type, but we don't know which
9286 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
9287 /* dereference it */
9288 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9291 .errstr = "variable ctx access var_off=(0x0; 0x4)",
9293 .prog_type = BPF_PROG_TYPE_LWT_IN,
9296 "variable-offset stack access",
9298 /* Fill the top 8 bytes of the stack */
9299 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9300 /* Get an unknown value */
9301 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
9302 /* Make it small and 4-byte aligned */
9303 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
9304 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
9305 /* add it to fp. We now have either fp-4 or fp-8, but
9306 * we don't know which
9308 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
9309 /* dereference it */
9310 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
9313 .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
9315 .prog_type = BPF_PROG_TYPE_LWT_IN,
9318 "indirect variable-offset stack access",
9320 /* Fill the top 8 bytes of the stack */
9321 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9322 /* Get an unknown value */
9323 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
9324 /* Make it small and 4-byte aligned */
9325 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
9326 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
9327 /* add it to fp. We now have either fp-4 or fp-8, but
9328 * we don't know which
9330 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
9331 /* dereference it indirectly */
9332 BPF_LD_MAP_FD(BPF_REG_1, 0),
9333 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9334 BPF_FUNC_map_lookup_elem),
9335 BPF_MOV64_IMM(BPF_REG_0, 0),
9338 .fixup_map_hash_8b = { 5 },
9339 .errstr = "variable stack read R2",
9341 .prog_type = BPF_PROG_TYPE_LWT_IN,
9344 "direct stack access with 32-bit wraparound. test1",
9346 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9347 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
9348 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
9349 BPF_MOV32_IMM(BPF_REG_0, 0),
9350 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9353 .errstr = "fp pointer and 2147483647",
9357 "direct stack access with 32-bit wraparound. test2",
9359 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9360 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
9361 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
9362 BPF_MOV32_IMM(BPF_REG_0, 0),
9363 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9366 .errstr = "fp pointer and 1073741823",
9370 "direct stack access with 32-bit wraparound. test3",
9372 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9373 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
9374 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
9375 BPF_MOV32_IMM(BPF_REG_0, 0),
9376 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9379 .errstr = "fp pointer offset 1073741822",
9383 "liveness pruning and write screening",
9385 /* Get an unknown value */
9386 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
9387 /* branch conditions teach us nothing about R2 */
9388 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
9389 BPF_MOV64_IMM(BPF_REG_0, 0),
9390 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
9391 BPF_MOV64_IMM(BPF_REG_0, 0),
9394 .errstr = "R0 !read_ok",
9396 .prog_type = BPF_PROG_TYPE_LWT_IN,
9399 "varlen_map_value_access pruning",
9401 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9402 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9403 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9404 BPF_LD_MAP_FD(BPF_REG_1, 0),
9405 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9406 BPF_FUNC_map_lookup_elem),
9407 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
9408 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
9409 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
9410 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
9411 BPF_MOV32_IMM(BPF_REG_1, 0),
9412 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
9413 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9414 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
9415 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
9416 offsetof(struct test_val, foo)),
9419 .fixup_map_hash_48b = { 3 },
9420 .errstr_unpriv = "R0 leaks addr",
9421 .errstr = "R0 unbounded memory access",
9422 .result_unpriv = REJECT,
9424 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9427 "invalid 64-bit BPF_END",
9429 BPF_MOV32_IMM(BPF_REG_0, 0),
9431 .code = BPF_ALU64 | BPF_END | BPF_TO_LE,
9432 .dst_reg = BPF_REG_0,
9439 .errstr = "unknown opcode d7",
9443 "XDP, using ifindex from netdev",
9445 BPF_MOV64_IMM(BPF_REG_0, 0),
9446 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9447 offsetof(struct xdp_md, ingress_ifindex)),
9448 BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1),
9449 BPF_MOV64_IMM(BPF_REG_0, 1),
9453 .prog_type = BPF_PROG_TYPE_XDP,
9457 "meta access, test1",
9459 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9460 offsetof(struct xdp_md, data_meta)),
9461 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9462 offsetof(struct xdp_md, data)),
9463 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9464 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9465 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
9466 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9467 BPF_MOV64_IMM(BPF_REG_0, 0),
9471 .prog_type = BPF_PROG_TYPE_XDP,
9474 "meta access, test2",
9476 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9477 offsetof(struct xdp_md, data_meta)),
9478 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9479 offsetof(struct xdp_md, data)),
9480 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9481 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
9482 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9483 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
9484 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9485 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9486 BPF_MOV64_IMM(BPF_REG_0, 0),
9490 .errstr = "invalid access to packet, off=-8",
9491 .prog_type = BPF_PROG_TYPE_XDP,
9494 "meta access, test3",
9496 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9497 offsetof(struct xdp_md, data_meta)),
9498 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9499 offsetof(struct xdp_md, data_end)),
9500 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9501 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9502 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
9503 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9504 BPF_MOV64_IMM(BPF_REG_0, 0),
9508 .errstr = "invalid access to packet",
9509 .prog_type = BPF_PROG_TYPE_XDP,
9512 "meta access, test4",
9514 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9515 offsetof(struct xdp_md, data_meta)),
9516 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9517 offsetof(struct xdp_md, data_end)),
9518 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9519 offsetof(struct xdp_md, data)),
9520 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
9521 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9522 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
9523 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9524 BPF_MOV64_IMM(BPF_REG_0, 0),
9528 .errstr = "invalid access to packet",
9529 .prog_type = BPF_PROG_TYPE_XDP,
9532 "meta access, test5",
9534 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9535 offsetof(struct xdp_md, data_meta)),
9536 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9537 offsetof(struct xdp_md, data)),
9538 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
9539 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9540 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
9541 BPF_MOV64_IMM(BPF_REG_2, -8),
9542 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9543 BPF_FUNC_xdp_adjust_meta),
9544 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
9545 BPF_MOV64_IMM(BPF_REG_0, 0),
9549 .errstr = "R3 !read_ok",
9550 .prog_type = BPF_PROG_TYPE_XDP,
9553 "meta access, test6",
9555 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9556 offsetof(struct xdp_md, data_meta)),
9557 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9558 offsetof(struct xdp_md, data)),
9559 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
9560 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9561 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9562 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
9563 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
9564 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9565 BPF_MOV64_IMM(BPF_REG_0, 0),
9569 .errstr = "invalid access to packet",
9570 .prog_type = BPF_PROG_TYPE_XDP,
9573 "meta access, test7",
9575 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9576 offsetof(struct xdp_md, data_meta)),
9577 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9578 offsetof(struct xdp_md, data)),
9579 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
9580 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9581 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9582 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
9583 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9584 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9585 BPF_MOV64_IMM(BPF_REG_0, 0),
9589 .prog_type = BPF_PROG_TYPE_XDP,
9592 "meta access, test8",
9594 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9595 offsetof(struct xdp_md, data_meta)),
9596 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9597 offsetof(struct xdp_md, data)),
9598 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9599 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
9600 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9601 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9602 BPF_MOV64_IMM(BPF_REG_0, 0),
9606 .prog_type = BPF_PROG_TYPE_XDP,
9609 "meta access, test9",
9611 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9612 offsetof(struct xdp_md, data_meta)),
9613 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9614 offsetof(struct xdp_md, data)),
9615 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9616 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
9617 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
9618 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9619 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9620 BPF_MOV64_IMM(BPF_REG_0, 0),
9624 .errstr = "invalid access to packet",
9625 .prog_type = BPF_PROG_TYPE_XDP,
9628 "meta access, test10",
9630 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9631 offsetof(struct xdp_md, data_meta)),
9632 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9633 offsetof(struct xdp_md, data)),
9634 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9635 offsetof(struct xdp_md, data_end)),
9636 BPF_MOV64_IMM(BPF_REG_5, 42),
9637 BPF_MOV64_IMM(BPF_REG_6, 24),
9638 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
9639 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
9640 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
9641 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
9642 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
9643 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
9644 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
9645 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
9646 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
9647 BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
9648 BPF_MOV64_IMM(BPF_REG_0, 0),
9652 .errstr = "invalid access to packet",
9653 .prog_type = BPF_PROG_TYPE_XDP,
9656 "meta access, test11",
9658 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9659 offsetof(struct xdp_md, data_meta)),
9660 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9661 offsetof(struct xdp_md, data)),
9662 BPF_MOV64_IMM(BPF_REG_5, 42),
9663 BPF_MOV64_IMM(BPF_REG_6, 24),
9664 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
9665 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
9666 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
9667 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
9668 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
9669 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
9670 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
9671 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
9672 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
9673 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
9674 BPF_MOV64_IMM(BPF_REG_0, 0),
9678 .prog_type = BPF_PROG_TYPE_XDP,
9681 "meta access, test12",
9683 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9684 offsetof(struct xdp_md, data_meta)),
9685 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9686 offsetof(struct xdp_md, data)),
9687 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9688 offsetof(struct xdp_md, data_end)),
9689 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
9690 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
9691 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
9692 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
9693 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
9694 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
9695 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
9696 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9697 BPF_MOV64_IMM(BPF_REG_0, 0),
9701 .prog_type = BPF_PROG_TYPE_XDP,
9704 "arithmetic ops make PTR_TO_CTX unusable",
9706 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
9707 offsetof(struct __sk_buff, data) -
9708 offsetof(struct __sk_buff, mark)),
9709 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9710 offsetof(struct __sk_buff, mark)),
9713 .errstr = "dereference of modified ctx ptr",
9715 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9718 "pkt_end - pkt_start is allowed",
9720 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9721 offsetof(struct __sk_buff, data_end)),
9722 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9723 offsetof(struct __sk_buff, data)),
9724 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
9728 .retval = TEST_DATA_LEN,
9729 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9732 "XDP pkt read, pkt_end mangling, bad access 1",
9734 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9735 offsetof(struct xdp_md, data)),
9736 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9737 offsetof(struct xdp_md, data_end)),
9738 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9739 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9740 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
9741 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9742 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9743 BPF_MOV64_IMM(BPF_REG_0, 0),
9746 .errstr = "R3 pointer arithmetic on pkt_end",
9748 .prog_type = BPF_PROG_TYPE_XDP,
9751 "XDP pkt read, pkt_end mangling, bad access 2",
9753 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9754 offsetof(struct xdp_md, data)),
9755 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9756 offsetof(struct xdp_md, data_end)),
9757 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9758 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9759 BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
9760 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9761 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9762 BPF_MOV64_IMM(BPF_REG_0, 0),
9765 .errstr = "R3 pointer arithmetic on pkt_end",
9767 .prog_type = BPF_PROG_TYPE_XDP,
9770 "XDP pkt read, pkt_data' > pkt_end, good access",
9772 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9773 offsetof(struct xdp_md, data)),
9774 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9775 offsetof(struct xdp_md, data_end)),
9776 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9777 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9778 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9779 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9780 BPF_MOV64_IMM(BPF_REG_0, 0),
9784 .prog_type = BPF_PROG_TYPE_XDP,
9785 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9788 "XDP pkt read, pkt_data' > pkt_end, bad access 1",
9790 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9791 offsetof(struct xdp_md, data)),
9792 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9793 offsetof(struct xdp_md, data_end)),
9794 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9795 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9796 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9797 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9798 BPF_MOV64_IMM(BPF_REG_0, 0),
9801 .errstr = "R1 offset is outside of the packet",
9803 .prog_type = BPF_PROG_TYPE_XDP,
9804 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9807 "XDP pkt read, pkt_data' > pkt_end, bad access 2",
9809 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9810 offsetof(struct xdp_md, data)),
9811 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9812 offsetof(struct xdp_md, data_end)),
9813 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9814 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9815 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
9816 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9817 BPF_MOV64_IMM(BPF_REG_0, 0),
9820 .errstr = "R1 offset is outside of the packet",
9822 .prog_type = BPF_PROG_TYPE_XDP,
9823 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9826 "XDP pkt read, pkt_end > pkt_data', good access",
9828 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9829 offsetof(struct xdp_md, data)),
9830 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9831 offsetof(struct xdp_md, data_end)),
9832 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9833 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9834 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9835 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9836 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9837 BPF_MOV64_IMM(BPF_REG_0, 0),
9841 .prog_type = BPF_PROG_TYPE_XDP,
9842 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9845 "XDP pkt read, pkt_end > pkt_data', bad access 1",
9847 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9848 offsetof(struct xdp_md, data)),
9849 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9850 offsetof(struct xdp_md, data_end)),
9851 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9852 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9853 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9854 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9855 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9856 BPF_MOV64_IMM(BPF_REG_0, 0),
9859 .errstr = "R1 offset is outside of the packet",
9861 .prog_type = BPF_PROG_TYPE_XDP,
9862 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9865 "XDP pkt read, pkt_end > pkt_data', bad access 2",
9867 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9868 offsetof(struct xdp_md, data)),
9869 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9870 offsetof(struct xdp_md, data_end)),
9871 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9872 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9873 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9874 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9875 BPF_MOV64_IMM(BPF_REG_0, 0),
9878 .errstr = "R1 offset is outside of the packet",
9880 .prog_type = BPF_PROG_TYPE_XDP,
9881 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9884 "XDP pkt read, pkt_data' < pkt_end, good access",
9886 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9887 offsetof(struct xdp_md, data)),
9888 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9889 offsetof(struct xdp_md, data_end)),
9890 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9891 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9892 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9893 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9894 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9895 BPF_MOV64_IMM(BPF_REG_0, 0),
9899 .prog_type = BPF_PROG_TYPE_XDP,
9900 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9903 "XDP pkt read, pkt_data' < pkt_end, bad access 1",
9905 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9906 offsetof(struct xdp_md, data)),
9907 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9908 offsetof(struct xdp_md, data_end)),
9909 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9910 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9911 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9912 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9913 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9914 BPF_MOV64_IMM(BPF_REG_0, 0),
9917 .errstr = "R1 offset is outside of the packet",
9919 .prog_type = BPF_PROG_TYPE_XDP,
9920 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9923 "XDP pkt read, pkt_data' < pkt_end, bad access 2",
9925 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9926 offsetof(struct xdp_md, data)),
9927 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9928 offsetof(struct xdp_md, data_end)),
9929 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9930 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9931 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9932 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9933 BPF_MOV64_IMM(BPF_REG_0, 0),
9936 .errstr = "R1 offset is outside of the packet",
9938 .prog_type = BPF_PROG_TYPE_XDP,
9939 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9942 "XDP pkt read, pkt_end < pkt_data', good access",
9944 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9945 offsetof(struct xdp_md, data)),
9946 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9947 offsetof(struct xdp_md, data_end)),
9948 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9949 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9950 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9951 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9952 BPF_MOV64_IMM(BPF_REG_0, 0),
9956 .prog_type = BPF_PROG_TYPE_XDP,
9957 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9960 "XDP pkt read, pkt_end < pkt_data', bad access 1",
9962 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9963 offsetof(struct xdp_md, data)),
9964 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9965 offsetof(struct xdp_md, data_end)),
9966 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9967 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9968 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9969 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9970 BPF_MOV64_IMM(BPF_REG_0, 0),
9973 .errstr = "R1 offset is outside of the packet",
9975 .prog_type = BPF_PROG_TYPE_XDP,
9976 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9979 "XDP pkt read, pkt_end < pkt_data', bad access 2",
9981 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9982 offsetof(struct xdp_md, data)),
9983 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9984 offsetof(struct xdp_md, data_end)),
9985 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9986 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9987 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
9988 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9989 BPF_MOV64_IMM(BPF_REG_0, 0),
9992 .errstr = "R1 offset is outside of the packet",
9994 .prog_type = BPF_PROG_TYPE_XDP,
9995 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9998 "XDP pkt read, pkt_data' >= pkt_end, good access",
10000 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10001 offsetof(struct xdp_md, data)),
10002 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10003 offsetof(struct xdp_md, data_end)),
10004 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10005 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10006 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
10007 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10008 BPF_MOV64_IMM(BPF_REG_0, 0),
10012 .prog_type = BPF_PROG_TYPE_XDP,
10013 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10016 "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
10018 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10019 offsetof(struct xdp_md, data)),
10020 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10021 offsetof(struct xdp_md, data_end)),
10022 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10023 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10024 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
10025 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10026 BPF_MOV64_IMM(BPF_REG_0, 0),
10029 .errstr = "R1 offset is outside of the packet",
10031 .prog_type = BPF_PROG_TYPE_XDP,
10032 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10035 "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
10037 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10038 offsetof(struct xdp_md, data)),
10039 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10040 offsetof(struct xdp_md, data_end)),
10041 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10042 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10043 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
10044 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10045 BPF_MOV64_IMM(BPF_REG_0, 0),
10048 .errstr = "R1 offset is outside of the packet",
10050 .prog_type = BPF_PROG_TYPE_XDP,
10051 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10054 "XDP pkt read, pkt_end >= pkt_data', good access",
10056 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10057 offsetof(struct xdp_md, data)),
10058 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10059 offsetof(struct xdp_md, data_end)),
10060 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10061 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10062 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10063 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10064 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10065 BPF_MOV64_IMM(BPF_REG_0, 0),
10069 .prog_type = BPF_PROG_TYPE_XDP,
10070 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10073 "XDP pkt read, pkt_end >= pkt_data', bad access 1",
10075 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10076 offsetof(struct xdp_md, data)),
10077 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10078 offsetof(struct xdp_md, data_end)),
10079 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10080 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10081 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10082 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10083 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10084 BPF_MOV64_IMM(BPF_REG_0, 0),
10087 .errstr = "R1 offset is outside of the packet",
10089 .prog_type = BPF_PROG_TYPE_XDP,
10090 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10093 "XDP pkt read, pkt_end >= pkt_data', bad access 2",
10095 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10096 offsetof(struct xdp_md, data)),
10097 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10098 offsetof(struct xdp_md, data_end)),
10099 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10100 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10101 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10102 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10103 BPF_MOV64_IMM(BPF_REG_0, 0),
10106 .errstr = "R1 offset is outside of the packet",
10108 .prog_type = BPF_PROG_TYPE_XDP,
10109 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10112 "XDP pkt read, pkt_data' <= pkt_end, good access",
10114 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10115 offsetof(struct xdp_md, data)),
10116 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10117 offsetof(struct xdp_md, data_end)),
10118 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10119 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10120 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10121 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10122 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10123 BPF_MOV64_IMM(BPF_REG_0, 0),
10127 .prog_type = BPF_PROG_TYPE_XDP,
10128 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10131 "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
10133 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10134 offsetof(struct xdp_md, data)),
10135 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10136 offsetof(struct xdp_md, data_end)),
10137 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10138 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10139 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10140 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10141 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10142 BPF_MOV64_IMM(BPF_REG_0, 0),
10145 .errstr = "R1 offset is outside of the packet",
10147 .prog_type = BPF_PROG_TYPE_XDP,
10148 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10151 "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
10153 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10154 offsetof(struct xdp_md, data)),
10155 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10156 offsetof(struct xdp_md, data_end)),
10157 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10158 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10159 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10160 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10161 BPF_MOV64_IMM(BPF_REG_0, 0),
10164 .errstr = "R1 offset is outside of the packet",
10166 .prog_type = BPF_PROG_TYPE_XDP,
10167 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10170 "XDP pkt read, pkt_end <= pkt_data', good access",
10172 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10173 offsetof(struct xdp_md, data)),
10174 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10175 offsetof(struct xdp_md, data_end)),
10176 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10177 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10178 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
10179 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10180 BPF_MOV64_IMM(BPF_REG_0, 0),
10184 .prog_type = BPF_PROG_TYPE_XDP,
10185 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10188 "XDP pkt read, pkt_end <= pkt_data', bad access 1",
10190 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10191 offsetof(struct xdp_md, data)),
10192 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10193 offsetof(struct xdp_md, data_end)),
10194 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10195 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10196 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
10197 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10198 BPF_MOV64_IMM(BPF_REG_0, 0),
10201 .errstr = "R1 offset is outside of the packet",
10203 .prog_type = BPF_PROG_TYPE_XDP,
10204 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10207 "XDP pkt read, pkt_end <= pkt_data', bad access 2",
10209 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10210 offsetof(struct xdp_md, data)),
10211 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10212 offsetof(struct xdp_md, data_end)),
10213 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10214 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10215 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
10216 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10217 BPF_MOV64_IMM(BPF_REG_0, 0),
10220 .errstr = "R1 offset is outside of the packet",
10222 .prog_type = BPF_PROG_TYPE_XDP,
10223 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10226 "XDP pkt read, pkt_meta' > pkt_data, good access",
10228 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10229 offsetof(struct xdp_md, data_meta)),
10230 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10231 offsetof(struct xdp_md, data)),
10232 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10233 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10234 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
10235 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10236 BPF_MOV64_IMM(BPF_REG_0, 0),
10240 .prog_type = BPF_PROG_TYPE_XDP,
10241 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10244 "XDP pkt read, pkt_meta' > pkt_data, bad access 1",
10246 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10247 offsetof(struct xdp_md, data_meta)),
10248 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10249 offsetof(struct xdp_md, data)),
10250 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10251 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10252 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
10253 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10254 BPF_MOV64_IMM(BPF_REG_0, 0),
10257 .errstr = "R1 offset is outside of the packet",
10259 .prog_type = BPF_PROG_TYPE_XDP,
10260 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10263 "XDP pkt read, pkt_meta' > pkt_data, bad access 2",
10265 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10266 offsetof(struct xdp_md, data_meta)),
10267 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10268 offsetof(struct xdp_md, data)),
10269 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10270 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10271 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
10272 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10273 BPF_MOV64_IMM(BPF_REG_0, 0),
10276 .errstr = "R1 offset is outside of the packet",
10278 .prog_type = BPF_PROG_TYPE_XDP,
10279 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10282 "XDP pkt read, pkt_data > pkt_meta', good access",
10284 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10285 offsetof(struct xdp_md, data_meta)),
10286 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10287 offsetof(struct xdp_md, data)),
10288 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10289 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10290 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
10291 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10292 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10293 BPF_MOV64_IMM(BPF_REG_0, 0),
10297 .prog_type = BPF_PROG_TYPE_XDP,
10298 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10301 "XDP pkt read, pkt_data > pkt_meta', bad access 1",
10303 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10304 offsetof(struct xdp_md, data_meta)),
10305 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10306 offsetof(struct xdp_md, data)),
10307 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10308 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10309 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
10310 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10311 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10312 BPF_MOV64_IMM(BPF_REG_0, 0),
10315 .errstr = "R1 offset is outside of the packet",
10317 .prog_type = BPF_PROG_TYPE_XDP,
10318 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10321 "XDP pkt read, pkt_data > pkt_meta', bad access 2",
10323 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10324 offsetof(struct xdp_md, data_meta)),
10325 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10326 offsetof(struct xdp_md, data)),
10327 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10328 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10329 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
10330 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10331 BPF_MOV64_IMM(BPF_REG_0, 0),
10334 .errstr = "R1 offset is outside of the packet",
10336 .prog_type = BPF_PROG_TYPE_XDP,
10337 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10340 "XDP pkt read, pkt_meta' < pkt_data, good access",
10342 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10343 offsetof(struct xdp_md, data_meta)),
10344 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10345 offsetof(struct xdp_md, data)),
10346 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10347 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10348 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
10349 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10350 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10351 BPF_MOV64_IMM(BPF_REG_0, 0),
10355 .prog_type = BPF_PROG_TYPE_XDP,
10356 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10359 "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
10361 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10362 offsetof(struct xdp_md, data_meta)),
10363 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10364 offsetof(struct xdp_md, data)),
10365 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10366 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10367 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
10368 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10369 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10370 BPF_MOV64_IMM(BPF_REG_0, 0),
10373 .errstr = "R1 offset is outside of the packet",
10375 .prog_type = BPF_PROG_TYPE_XDP,
10376 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10379 "XDP pkt read, pkt_meta' < pkt_data, bad access 2",
10381 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10382 offsetof(struct xdp_md, data_meta)),
10383 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10384 offsetof(struct xdp_md, data)),
10385 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10386 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10387 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
10388 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10389 BPF_MOV64_IMM(BPF_REG_0, 0),
10392 .errstr = "R1 offset is outside of the packet",
10394 .prog_type = BPF_PROG_TYPE_XDP,
10395 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10398 "XDP pkt read, pkt_data < pkt_meta', good access",
10400 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10401 offsetof(struct xdp_md, data_meta)),
10402 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10403 offsetof(struct xdp_md, data)),
10404 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10405 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10406 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
10407 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10408 BPF_MOV64_IMM(BPF_REG_0, 0),
10412 .prog_type = BPF_PROG_TYPE_XDP,
10413 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10416 "XDP pkt read, pkt_data < pkt_meta', bad access 1",
10418 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10419 offsetof(struct xdp_md, data_meta)),
10420 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10421 offsetof(struct xdp_md, data)),
10422 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10423 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10424 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
10425 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10426 BPF_MOV64_IMM(BPF_REG_0, 0),
10429 .errstr = "R1 offset is outside of the packet",
10431 .prog_type = BPF_PROG_TYPE_XDP,
10432 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10435 "XDP pkt read, pkt_data < pkt_meta', bad access 2",
10437 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10438 offsetof(struct xdp_md, data_meta)),
10439 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10440 offsetof(struct xdp_md, data)),
10441 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10442 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10443 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
10444 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10445 BPF_MOV64_IMM(BPF_REG_0, 0),
10448 .errstr = "R1 offset is outside of the packet",
10450 .prog_type = BPF_PROG_TYPE_XDP,
10451 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10454 "XDP pkt read, pkt_meta' >= pkt_data, good access",
10456 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10457 offsetof(struct xdp_md, data_meta)),
10458 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10459 offsetof(struct xdp_md, data)),
10460 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10461 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10462 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
10463 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10464 BPF_MOV64_IMM(BPF_REG_0, 0),
10468 .prog_type = BPF_PROG_TYPE_XDP,
10469 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10472 "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
10474 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10475 offsetof(struct xdp_md, data_meta)),
10476 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10477 offsetof(struct xdp_md, data)),
10478 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10479 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10480 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
10481 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10482 BPF_MOV64_IMM(BPF_REG_0, 0),
10485 .errstr = "R1 offset is outside of the packet",
10487 .prog_type = BPF_PROG_TYPE_XDP,
10488 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10491 "XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
10493 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10494 offsetof(struct xdp_md, data_meta)),
10495 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10496 offsetof(struct xdp_md, data)),
10497 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10498 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10499 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
10500 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10501 BPF_MOV64_IMM(BPF_REG_0, 0),
10504 .errstr = "R1 offset is outside of the packet",
10506 .prog_type = BPF_PROG_TYPE_XDP,
10507 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10510 "XDP pkt read, pkt_data >= pkt_meta', good access",
10512 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10513 offsetof(struct xdp_md, data_meta)),
10514 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10515 offsetof(struct xdp_md, data)),
10516 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10517 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10518 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10519 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10520 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10521 BPF_MOV64_IMM(BPF_REG_0, 0),
10525 .prog_type = BPF_PROG_TYPE_XDP,
10526 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10529 "XDP pkt read, pkt_data >= pkt_meta', bad access 1",
10531 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10532 offsetof(struct xdp_md, data_meta)),
10533 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10534 offsetof(struct xdp_md, data)),
10535 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10536 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10537 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10538 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10539 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10540 BPF_MOV64_IMM(BPF_REG_0, 0),
10543 .errstr = "R1 offset is outside of the packet",
10545 .prog_type = BPF_PROG_TYPE_XDP,
10546 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10549 "XDP pkt read, pkt_data >= pkt_meta', bad access 2",
10551 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10552 offsetof(struct xdp_md, data_meta)),
10553 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10554 offsetof(struct xdp_md, data)),
10555 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10556 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10557 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10558 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10559 BPF_MOV64_IMM(BPF_REG_0, 0),
10562 .errstr = "R1 offset is outside of the packet",
10564 .prog_type = BPF_PROG_TYPE_XDP,
10565 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10568 "XDP pkt read, pkt_meta' <= pkt_data, good access",
10570 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10571 offsetof(struct xdp_md, data_meta)),
10572 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10573 offsetof(struct xdp_md, data)),
10574 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10575 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10576 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10577 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10578 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10579 BPF_MOV64_IMM(BPF_REG_0, 0),
10583 .prog_type = BPF_PROG_TYPE_XDP,
10584 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10587 "XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
10589 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10590 offsetof(struct xdp_md, data_meta)),
10591 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10592 offsetof(struct xdp_md, data)),
10593 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10594 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10595 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10596 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10597 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10598 BPF_MOV64_IMM(BPF_REG_0, 0),
10601 .errstr = "R1 offset is outside of the packet",
10603 .prog_type = BPF_PROG_TYPE_XDP,
10604 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10607 "XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
10609 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10610 offsetof(struct xdp_md, data_meta)),
10611 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10612 offsetof(struct xdp_md, data)),
10613 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10614 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10615 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10616 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10617 BPF_MOV64_IMM(BPF_REG_0, 0),
10620 .errstr = "R1 offset is outside of the packet",
10622 .prog_type = BPF_PROG_TYPE_XDP,
10623 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10626 "XDP pkt read, pkt_data <= pkt_meta', good access",
10628 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10629 offsetof(struct xdp_md, data_meta)),
10630 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10631 offsetof(struct xdp_md, data)),
10632 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10633 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10634 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
10635 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10636 BPF_MOV64_IMM(BPF_REG_0, 0),
10640 .prog_type = BPF_PROG_TYPE_XDP,
10641 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10644 "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
10646 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10647 offsetof(struct xdp_md, data_meta)),
10648 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10649 offsetof(struct xdp_md, data)),
10650 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10651 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10652 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
10653 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10654 BPF_MOV64_IMM(BPF_REG_0, 0),
10657 .errstr = "R1 offset is outside of the packet",
10659 .prog_type = BPF_PROG_TYPE_XDP,
10660 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10663 "XDP pkt read, pkt_data <= pkt_meta', bad access 2",
10665 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10666 offsetof(struct xdp_md, data_meta)),
10667 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10668 offsetof(struct xdp_md, data)),
10669 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10670 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10671 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
10672 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10673 BPF_MOV64_IMM(BPF_REG_0, 0),
10676 .errstr = "R1 offset is outside of the packet",
10678 .prog_type = BPF_PROG_TYPE_XDP,
10679 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10682 "check deducing bounds from const, 1",
10684 BPF_MOV64_IMM(BPF_REG_0, 1),
10685 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
10686 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10690 .errstr = "R0 tried to subtract pointer from scalar",
10693 "check deducing bounds from const, 2",
10695 BPF_MOV64_IMM(BPF_REG_0, 1),
10696 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
10698 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
10700 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10707 "check deducing bounds from const, 3",
10709 BPF_MOV64_IMM(BPF_REG_0, 0),
10710 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
10711 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10715 .errstr = "R0 tried to subtract pointer from scalar",
10718 "check deducing bounds from const, 4",
10720 BPF_MOV64_IMM(BPF_REG_0, 0),
10721 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
10723 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10725 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10731 "check deducing bounds from const, 5",
10733 BPF_MOV64_IMM(BPF_REG_0, 0),
10734 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
10735 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10739 .errstr = "R0 tried to subtract pointer from scalar",
10742 "check deducing bounds from const, 6",
10744 BPF_MOV64_IMM(BPF_REG_0, 0),
10745 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10747 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10751 .errstr = "R0 tried to subtract pointer from scalar",
10754 "check deducing bounds from const, 7",
10756 BPF_MOV64_IMM(BPF_REG_0, ~0),
10757 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
10758 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10759 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10760 offsetof(struct __sk_buff, mark)),
10764 .errstr = "dereference of modified ctx ptr",
10765 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10768 "check deducing bounds from const, 8",
10770 BPF_MOV64_IMM(BPF_REG_0, ~0),
10771 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10772 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
10773 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10774 offsetof(struct __sk_buff, mark)),
10778 .errstr = "dereference of modified ctx ptr",
10779 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10782 "check deducing bounds from const, 9",
10784 BPF_MOV64_IMM(BPF_REG_0, 0),
10785 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
10786 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10790 .errstr = "R0 tried to subtract pointer from scalar",
10793 "check deducing bounds from const, 10",
10795 BPF_MOV64_IMM(BPF_REG_0, 0),
10796 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
10797 /* Marks reg as unknown. */
10798 BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
10799 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10803 .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
10806 "bpf_exit with invalid return code. test1",
10808 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10811 .errstr = "R0 has value (0x0; 0xffffffff)",
10813 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10816 "bpf_exit with invalid return code. test2",
10818 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10819 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
10823 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10826 "bpf_exit with invalid return code. test3",
10828 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10829 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
10832 .errstr = "R0 has value (0x0; 0x3)",
10834 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10837 "bpf_exit with invalid return code. test4",
10839 BPF_MOV64_IMM(BPF_REG_0, 1),
10843 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10846 "bpf_exit with invalid return code. test5",
10848 BPF_MOV64_IMM(BPF_REG_0, 2),
10851 .errstr = "R0 has value (0x2; 0x0)",
10853 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10856 "bpf_exit with invalid return code. test6",
10858 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
10861 .errstr = "R0 is not a known value (ctx)",
10863 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10866 "bpf_exit with invalid return code. test7",
10868 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10869 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
10870 BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
10873 .errstr = "R0 has unknown scalar value",
10875 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10878 "calls: basic sanity",
10880 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10881 BPF_MOV64_IMM(BPF_REG_0, 1),
10883 BPF_MOV64_IMM(BPF_REG_0, 2),
10886 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10890 "calls: not on unpriviledged",
10892 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10893 BPF_MOV64_IMM(BPF_REG_0, 1),
10895 BPF_MOV64_IMM(BPF_REG_0, 2),
10898 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
10899 .result_unpriv = REJECT,
10904 "calls: div by 0 in subprog",
10906 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10907 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10908 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10909 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10910 offsetof(struct __sk_buff, data_end)),
10911 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10912 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10913 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10914 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10915 BPF_MOV64_IMM(BPF_REG_0, 1),
10917 BPF_MOV32_IMM(BPF_REG_2, 0),
10918 BPF_MOV32_IMM(BPF_REG_3, 1),
10919 BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
10920 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10921 offsetof(struct __sk_buff, data)),
10924 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10929 "calls: multiple ret types in subprog 1",
10931 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10932 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10933 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10934 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10935 offsetof(struct __sk_buff, data_end)),
10936 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10937 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10938 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10939 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10940 BPF_MOV64_IMM(BPF_REG_0, 1),
10942 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10943 offsetof(struct __sk_buff, data)),
10944 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10945 BPF_MOV32_IMM(BPF_REG_0, 42),
10948 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10950 .errstr = "R0 invalid mem access 'inv'",
10953 "calls: multiple ret types in subprog 2",
10955 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10956 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10957 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10958 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10959 offsetof(struct __sk_buff, data_end)),
10960 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10961 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10962 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10963 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10964 BPF_MOV64_IMM(BPF_REG_0, 1),
10966 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10967 offsetof(struct __sk_buff, data)),
10968 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10969 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
10970 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10971 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10972 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10973 BPF_LD_MAP_FD(BPF_REG_1, 0),
10974 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10975 BPF_FUNC_map_lookup_elem),
10976 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10977 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
10978 offsetof(struct __sk_buff, data)),
10979 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
10982 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10983 .fixup_map_hash_8b = { 16 },
10985 .errstr = "R0 min value is outside of the array range",
10988 "calls: overlapping caller/callee",
10990 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
10991 BPF_MOV64_IMM(BPF_REG_0, 1),
10994 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10995 .errstr = "last insn is not an exit or jmp",
10999 "calls: wrong recursive calls",
11001 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
11002 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
11003 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
11004 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
11005 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
11006 BPF_MOV64_IMM(BPF_REG_0, 1),
11009 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11010 .errstr = "jump out of range",
11014 "calls: wrong src reg",
11016 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
11017 BPF_MOV64_IMM(BPF_REG_0, 1),
11020 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11021 .errstr = "BPF_CALL uses reserved fields",
11025 "calls: wrong off value",
11027 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
11028 BPF_MOV64_IMM(BPF_REG_0, 1),
11030 BPF_MOV64_IMM(BPF_REG_0, 2),
11033 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11034 .errstr = "BPF_CALL uses reserved fields",
11038 "calls: jump back loop",
11040 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
11041 BPF_MOV64_IMM(BPF_REG_0, 1),
11044 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11045 .errstr = "back-edge from insn 0 to 0",
11049 "calls: conditional call",
11051 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11052 offsetof(struct __sk_buff, mark)),
11053 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
11054 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11055 BPF_MOV64_IMM(BPF_REG_0, 1),
11057 BPF_MOV64_IMM(BPF_REG_0, 2),
11060 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11061 .errstr = "jump out of range",
11065 "calls: conditional call 2",
11067 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11068 offsetof(struct __sk_buff, mark)),
11069 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
11070 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11071 BPF_MOV64_IMM(BPF_REG_0, 1),
11073 BPF_MOV64_IMM(BPF_REG_0, 2),
11075 BPF_MOV64_IMM(BPF_REG_0, 3),
11078 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11082 "calls: conditional call 3",
11084 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11085 offsetof(struct __sk_buff, mark)),
11086 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
11087 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
11088 BPF_MOV64_IMM(BPF_REG_0, 1),
11090 BPF_MOV64_IMM(BPF_REG_0, 1),
11091 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
11092 BPF_MOV64_IMM(BPF_REG_0, 3),
11093 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
11095 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11096 .errstr = "back-edge from insn",
11100 "calls: conditional call 4",
11102 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11103 offsetof(struct __sk_buff, mark)),
11104 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
11105 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11106 BPF_MOV64_IMM(BPF_REG_0, 1),
11108 BPF_MOV64_IMM(BPF_REG_0, 1),
11109 BPF_JMP_IMM(BPF_JA, 0, 0, -5),
11110 BPF_MOV64_IMM(BPF_REG_0, 3),
11113 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11117 "calls: conditional call 5",
11119 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11120 offsetof(struct __sk_buff, mark)),
11121 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
11122 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11123 BPF_MOV64_IMM(BPF_REG_0, 1),
11125 BPF_MOV64_IMM(BPF_REG_0, 1),
11126 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
11127 BPF_MOV64_IMM(BPF_REG_0, 3),
11130 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11131 .errstr = "back-edge from insn",
11135 "calls: conditional call 6",
11137 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11138 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
11140 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11141 offsetof(struct __sk_buff, mark)),
11144 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11145 .errstr = "back-edge from insn",
11149 "calls: using r0 returned by callee",
11151 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11153 BPF_MOV64_IMM(BPF_REG_0, 2),
11156 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11160 "calls: using uninit r0 from callee",
11162 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11166 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11167 .errstr = "!read_ok",
11171 "calls: callee is using r1",
11173 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11175 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11176 offsetof(struct __sk_buff, len)),
11179 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
11181 .retval = TEST_DATA_LEN,
11184 "calls: callee using args1",
11186 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11188 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
11191 .errstr_unpriv = "allowed for root only",
11192 .result_unpriv = REJECT,
11194 .retval = POINTER_VALUE,
11197 "calls: callee using wrong args2",
11199 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11201 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11204 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11205 .errstr = "R2 !read_ok",
11209 "calls: callee using two args",
11211 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11212 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
11213 offsetof(struct __sk_buff, len)),
11214 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
11215 offsetof(struct __sk_buff, len)),
11216 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11218 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
11219 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
11222 .errstr_unpriv = "allowed for root only",
11223 .result_unpriv = REJECT,
11225 .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
11228 "calls: callee changing pkt pointers",
11230 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
11231 offsetof(struct xdp_md, data)),
11232 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
11233 offsetof(struct xdp_md, data_end)),
11234 BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
11235 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
11236 BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
11237 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11238 /* clear_all_pkt_pointers() has to walk all frames
11239 * to make sure that pkt pointers in the caller
11240 * are cleared when callee is calling a helper that
11241 * adjusts packet size
11243 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11244 BPF_MOV32_IMM(BPF_REG_0, 0),
11246 BPF_MOV64_IMM(BPF_REG_2, 0),
11247 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11248 BPF_FUNC_xdp_adjust_head),
11252 .errstr = "R6 invalid mem access 'inv'",
11253 .prog_type = BPF_PROG_TYPE_XDP,
11254 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11257 "calls: two calls with args",
11259 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11261 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11262 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
11263 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
11264 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11265 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11266 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
11267 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
11269 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11270 offsetof(struct __sk_buff, len)),
11273 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11275 .retval = TEST_DATA_LEN + TEST_DATA_LEN,
11278 "calls: calls with stack arith",
11280 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11281 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
11282 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11284 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
11285 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11287 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
11288 BPF_MOV64_IMM(BPF_REG_0, 42),
11289 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
11292 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11297 "calls: calls with misaligned stack access",
11299 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11300 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
11301 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11303 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
11304 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11306 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
11307 BPF_MOV64_IMM(BPF_REG_0, 42),
11308 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
11311 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11312 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
11313 .errstr = "misaligned stack access",
11317 "calls: calls control flow, jump test",
11319 BPF_MOV64_IMM(BPF_REG_0, 42),
11320 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11321 BPF_MOV64_IMM(BPF_REG_0, 43),
11322 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11323 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
11326 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11331 "calls: calls control flow, jump test 2",
11333 BPF_MOV64_IMM(BPF_REG_0, 42),
11334 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11335 BPF_MOV64_IMM(BPF_REG_0, 43),
11336 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11337 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
11340 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11341 .errstr = "jump out of range from insn 1 to 4",
11345 "calls: two calls with bad jump",
11347 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11349 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11350 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
11351 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
11352 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11353 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11354 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
11355 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
11357 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11358 offsetof(struct __sk_buff, len)),
11359 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
11362 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11363 .errstr = "jump out of range from insn 11 to 9",
11367 "calls: recursive call. test1",
11369 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11371 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
11374 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11375 .errstr = "back-edge",
11379 "calls: recursive call. test2",
11381 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11383 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
11386 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11387 .errstr = "back-edge",
11391 "calls: unreachable code",
11393 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11395 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11397 BPF_MOV64_IMM(BPF_REG_0, 0),
11399 BPF_MOV64_IMM(BPF_REG_0, 0),
11402 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11403 .errstr = "unreachable insn 6",
11407 "calls: invalid call",
11409 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11411 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
11414 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11415 .errstr = "invalid destination",
11419 "calls: invalid call 2",
11421 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11423 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
11426 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11427 .errstr = "invalid destination",
11431 "calls: jumping across function bodies. test1",
11433 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11434 BPF_MOV64_IMM(BPF_REG_0, 0),
11436 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
11439 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11440 .errstr = "jump out of range",
11444 "calls: jumping across function bodies. test2",
11446 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
11447 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11448 BPF_MOV64_IMM(BPF_REG_0, 0),
11452 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11453 .errstr = "jump out of range",
11457 "calls: call without exit",
11459 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11461 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11463 BPF_MOV64_IMM(BPF_REG_0, 0),
11464 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
11466 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11467 .errstr = "not an exit",
11471 "calls: call into middle of ld_imm64",
11473 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11474 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11475 BPF_MOV64_IMM(BPF_REG_0, 0),
11477 BPF_LD_IMM64(BPF_REG_0, 0),
11480 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11481 .errstr = "last insn",
11485 "calls: call into middle of other call",
11487 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11488 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11489 BPF_MOV64_IMM(BPF_REG_0, 0),
11491 BPF_MOV64_IMM(BPF_REG_0, 0),
11492 BPF_MOV64_IMM(BPF_REG_0, 0),
11495 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11496 .errstr = "last insn",
11500 "calls: ld_abs with changing ctx data in callee",
11502 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11503 BPF_LD_ABS(BPF_B, 0),
11504 BPF_LD_ABS(BPF_H, 0),
11505 BPF_LD_ABS(BPF_W, 0),
11506 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
11507 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
11508 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
11509 BPF_LD_ABS(BPF_B, 0),
11510 BPF_LD_ABS(BPF_H, 0),
11511 BPF_LD_ABS(BPF_W, 0),
11513 BPF_MOV64_IMM(BPF_REG_2, 1),
11514 BPF_MOV64_IMM(BPF_REG_3, 2),
11515 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11516 BPF_FUNC_skb_vlan_push),
11519 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11520 .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
11524 "calls: two calls with bad fallthrough",
11526 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11528 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11529 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
11530 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
11531 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11532 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11533 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
11534 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
11535 BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
11536 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11537 offsetof(struct __sk_buff, len)),
11540 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11541 .errstr = "not an exit",
11545 "calls: two calls with stack read",
11547 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11548 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11549 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11550 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11552 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11553 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
11554 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
11555 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11556 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11557 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
11558 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
11560 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
11563 .prog_type = BPF_PROG_TYPE_XDP,
11567 "calls: two calls with stack write",
11570 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11571 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11572 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11573 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11574 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11575 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11576 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11580 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11581 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11582 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
11583 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
11584 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11585 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11586 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
11587 BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
11588 /* write into stack frame of main prog */
11589 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11593 /* read from stack frame of main prog */
11594 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
11597 .prog_type = BPF_PROG_TYPE_XDP,
11601 "calls: stack overflow using two frames (pre-call access)",
11604 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11605 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
11609 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11610 BPF_MOV64_IMM(BPF_REG_0, 0),
11613 .prog_type = BPF_PROG_TYPE_XDP,
11614 .errstr = "combined stack size",
11618 "calls: stack overflow using two frames (post-call access)",
11621 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
11622 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11626 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11627 BPF_MOV64_IMM(BPF_REG_0, 0),
11630 .prog_type = BPF_PROG_TYPE_XDP,
11631 .errstr = "combined stack size",
11635 "calls: stack depth check using three frames. test1",
11638 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
11639 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
11640 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
11641 BPF_MOV64_IMM(BPF_REG_0, 0),
11644 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
11647 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
11648 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
11651 .prog_type = BPF_PROG_TYPE_XDP,
11652 /* stack_main=32, stack_A=256, stack_B=64
11653 * and max(main+A, main+A+B) < 512
11658 "calls: stack depth check using three frames. test2",
11661 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
11662 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
11663 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
11664 BPF_MOV64_IMM(BPF_REG_0, 0),
11667 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
11670 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
11671 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
11674 .prog_type = BPF_PROG_TYPE_XDP,
11675 /* stack_main=32, stack_A=64, stack_B=256
11676 * and max(main+A, main+A+B) < 512
11681 "calls: stack depth check using three frames. test3",
11684 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11685 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
11686 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11687 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
11688 BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
11689 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
11690 BPF_MOV64_IMM(BPF_REG_0, 0),
11693 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
11695 BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
11696 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
11698 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
11699 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
11700 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
11703 .prog_type = BPF_PROG_TYPE_XDP,
11704 /* stack_main=64, stack_A=224, stack_B=256
11705 * and max(main+A, main+A+B) > 512
11707 .errstr = "combined stack",
11711 "calls: stack depth check using three frames. test4",
11712 /* void main(void) {
11717 * void func1(int alloc_or_recurse) {
11718 * if (alloc_or_recurse) {
11719 * frame_pointer[-300] = 1;
11721 * func2(alloc_or_recurse);
11724 * void func2(int alloc_or_recurse) {
11725 * if (alloc_or_recurse) {
11726 * frame_pointer[-300] = 1;
11732 BPF_MOV64_IMM(BPF_REG_1, 0),
11733 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
11734 BPF_MOV64_IMM(BPF_REG_1, 1),
11735 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
11736 BPF_MOV64_IMM(BPF_REG_1, 1),
11737 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
11738 BPF_MOV64_IMM(BPF_REG_0, 0),
11741 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
11742 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11744 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
11747 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
11748 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11751 .prog_type = BPF_PROG_TYPE_XDP,
11753 .errstr = "combined stack",
11756 "calls: stack depth check using three frames. test5",
11759 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
11762 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
11765 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
11768 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
11771 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
11774 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
11777 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
11780 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
11783 BPF_MOV64_IMM(BPF_REG_0, 0),
11786 .prog_type = BPF_PROG_TYPE_XDP,
11787 .errstr = "call stack",
11791 "calls: spill into caller stack frame",
11793 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11794 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11795 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11796 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11798 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
11799 BPF_MOV64_IMM(BPF_REG_0, 0),
11802 .prog_type = BPF_PROG_TYPE_XDP,
11803 .errstr = "cannot spill",
11807 "calls: write into caller stack frame",
11809 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11810 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11811 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11812 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11813 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11815 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
11816 BPF_MOV64_IMM(BPF_REG_0, 0),
11819 .prog_type = BPF_PROG_TYPE_XDP,
11824 "calls: write into callee stack frame",
11826 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11827 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
11829 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
11830 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
11833 .prog_type = BPF_PROG_TYPE_XDP,
11834 .errstr = "cannot return stack pointer",
11838 "calls: two calls with stack write and void return",
11841 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11842 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11843 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11844 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11845 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11846 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11847 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11851 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11852 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11853 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11854 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11855 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11859 /* write into stack frame of main prog */
11860 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
11861 BPF_EXIT_INSN(), /* void return */
11863 .prog_type = BPF_PROG_TYPE_XDP,
11867 "calls: ambiguous return value",
11869 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11870 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
11871 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
11872 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11873 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11874 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
11876 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
11877 BPF_MOV64_IMM(BPF_REG_0, 0),
11880 .errstr_unpriv = "allowed for root only",
11881 .result_unpriv = REJECT,
11882 .errstr = "R0 !read_ok",
11886 "calls: two calls that return map_value",
11889 /* pass fp-16, fp-8 into a function */
11890 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11891 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11892 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11893 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11894 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
11896 /* fetch map_value_ptr from the stack of this function */
11897 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
11898 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11899 /* write into map value */
11900 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11901 /* fetch secound map_value_ptr from the stack */
11902 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11903 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11904 /* write into map value */
11905 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11906 BPF_MOV64_IMM(BPF_REG_0, 0),
11910 /* call 3rd function twice */
11911 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11912 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11913 /* first time with fp-8 */
11914 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11915 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11916 /* second time with fp-16 */
11917 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11921 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11922 /* lookup from map */
11923 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11924 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11925 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11926 BPF_LD_MAP_FD(BPF_REG_1, 0),
11927 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11928 BPF_FUNC_map_lookup_elem),
11929 /* write map_value_ptr into stack frame of main prog */
11930 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11931 BPF_MOV64_IMM(BPF_REG_0, 0),
11932 BPF_EXIT_INSN(), /* return 0 */
11934 .prog_type = BPF_PROG_TYPE_XDP,
11935 .fixup_map_hash_8b = { 23 },
11939 "calls: two calls that return map_value with bool condition",
11942 /* pass fp-16, fp-8 into a function */
11943 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11944 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11945 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11946 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11947 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11948 BPF_MOV64_IMM(BPF_REG_0, 0),
11952 /* call 3rd function twice */
11953 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11954 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11955 /* first time with fp-8 */
11956 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
11957 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11958 /* fetch map_value_ptr from the stack of this function */
11959 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11960 /* write into map value */
11961 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11962 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11963 /* second time with fp-16 */
11964 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11965 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11966 /* fetch secound map_value_ptr from the stack */
11967 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
11968 /* write into map value */
11969 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11973 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11974 /* lookup from map */
11975 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11976 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11977 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11978 BPF_LD_MAP_FD(BPF_REG_1, 0),
11979 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11980 BPF_FUNC_map_lookup_elem),
11981 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11982 BPF_MOV64_IMM(BPF_REG_0, 0),
11983 BPF_EXIT_INSN(), /* return 0 */
11984 /* write map_value_ptr into stack frame of main prog */
11985 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11986 BPF_MOV64_IMM(BPF_REG_0, 1),
11987 BPF_EXIT_INSN(), /* return 1 */
11989 .prog_type = BPF_PROG_TYPE_XDP,
11990 .fixup_map_hash_8b = { 23 },
11994 "calls: two calls that return map_value with incorrect bool check",
11997 /* pass fp-16, fp-8 into a function */
11998 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11999 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12000 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12001 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12002 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12003 BPF_MOV64_IMM(BPF_REG_0, 0),
12007 /* call 3rd function twice */
12008 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12009 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12010 /* first time with fp-8 */
12011 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
12012 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
12013 /* fetch map_value_ptr from the stack of this function */
12014 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
12015 /* write into map value */
12016 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12017 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
12018 /* second time with fp-16 */
12019 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12020 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12021 /* fetch secound map_value_ptr from the stack */
12022 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
12023 /* write into map value */
12024 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12028 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12029 /* lookup from map */
12030 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12031 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12032 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12033 BPF_LD_MAP_FD(BPF_REG_1, 0),
12034 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12035 BPF_FUNC_map_lookup_elem),
12036 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12037 BPF_MOV64_IMM(BPF_REG_0, 0),
12038 BPF_EXIT_INSN(), /* return 0 */
12039 /* write map_value_ptr into stack frame of main prog */
12040 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12041 BPF_MOV64_IMM(BPF_REG_0, 1),
12042 BPF_EXIT_INSN(), /* return 1 */
12044 .prog_type = BPF_PROG_TYPE_XDP,
12045 .fixup_map_hash_8b = { 23 },
12047 .errstr = "invalid read from stack off -16+0 size 8",
12050 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
12053 /* pass fp-16, fp-8 into a function */
12054 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12055 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12056 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12057 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12058 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12059 BPF_MOV64_IMM(BPF_REG_0, 0),
12063 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12064 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12065 /* 1st lookup from map */
12066 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12067 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12068 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12069 BPF_LD_MAP_FD(BPF_REG_1, 0),
12070 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12071 BPF_FUNC_map_lookup_elem),
12072 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12073 BPF_MOV64_IMM(BPF_REG_8, 0),
12074 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12075 /* write map_value_ptr into stack frame of main prog at fp-8 */
12076 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12077 BPF_MOV64_IMM(BPF_REG_8, 1),
12079 /* 2nd lookup from map */
12080 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
12081 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12082 BPF_LD_MAP_FD(BPF_REG_1, 0),
12083 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
12084 BPF_FUNC_map_lookup_elem),
12085 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12086 BPF_MOV64_IMM(BPF_REG_9, 0),
12087 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12088 /* write map_value_ptr into stack frame of main prog at fp-16 */
12089 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12090 BPF_MOV64_IMM(BPF_REG_9, 1),
12092 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
12093 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
12094 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
12095 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
12096 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
12097 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
12101 /* if arg2 == 1 do *arg1 = 0 */
12102 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
12103 /* fetch map_value_ptr from the stack of this function */
12104 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
12105 /* write into map value */
12106 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12108 /* if arg4 == 1 do *arg3 = 0 */
12109 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
12110 /* fetch map_value_ptr from the stack of this function */
12111 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
12112 /* write into map value */
12113 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
12116 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12117 .fixup_map_hash_8b = { 12, 22 },
12119 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
12120 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12123 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
12126 /* pass fp-16, fp-8 into a function */
12127 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12128 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12129 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12130 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12131 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12132 BPF_MOV64_IMM(BPF_REG_0, 0),
12136 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12137 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12138 /* 1st lookup from map */
12139 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12140 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12141 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12142 BPF_LD_MAP_FD(BPF_REG_1, 0),
12143 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12144 BPF_FUNC_map_lookup_elem),
12145 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12146 BPF_MOV64_IMM(BPF_REG_8, 0),
12147 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12148 /* write map_value_ptr into stack frame of main prog at fp-8 */
12149 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12150 BPF_MOV64_IMM(BPF_REG_8, 1),
12152 /* 2nd lookup from map */
12153 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
12154 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12155 BPF_LD_MAP_FD(BPF_REG_1, 0),
12156 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
12157 BPF_FUNC_map_lookup_elem),
12158 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12159 BPF_MOV64_IMM(BPF_REG_9, 0),
12160 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12161 /* write map_value_ptr into stack frame of main prog at fp-16 */
12162 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12163 BPF_MOV64_IMM(BPF_REG_9, 1),
12165 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
12166 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
12167 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
12168 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
12169 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
12170 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
12174 /* if arg2 == 1 do *arg1 = 0 */
12175 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
12176 /* fetch map_value_ptr from the stack of this function */
12177 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
12178 /* write into map value */
12179 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12181 /* if arg4 == 1 do *arg3 = 0 */
12182 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
12183 /* fetch map_value_ptr from the stack of this function */
12184 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
12185 /* write into map value */
12186 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12189 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12190 .fixup_map_hash_8b = { 12, 22 },
12194 "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
12197 /* pass fp-16, fp-8 into a function */
12198 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12199 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12200 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12201 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12202 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
12203 BPF_MOV64_IMM(BPF_REG_0, 0),
12207 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12208 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12209 /* 1st lookup from map */
12210 BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
12211 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12212 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
12213 BPF_LD_MAP_FD(BPF_REG_1, 0),
12214 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12215 BPF_FUNC_map_lookup_elem),
12216 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12217 BPF_MOV64_IMM(BPF_REG_8, 0),
12218 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12219 /* write map_value_ptr into stack frame of main prog at fp-8 */
12220 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12221 BPF_MOV64_IMM(BPF_REG_8, 1),
12223 /* 2nd lookup from map */
12224 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12225 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
12226 BPF_LD_MAP_FD(BPF_REG_1, 0),
12227 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12228 BPF_FUNC_map_lookup_elem),
12229 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12230 BPF_MOV64_IMM(BPF_REG_9, 0), // 26
12231 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12232 /* write map_value_ptr into stack frame of main prog at fp-16 */
12233 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12234 BPF_MOV64_IMM(BPF_REG_9, 1),
12236 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
12237 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
12238 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
12239 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
12240 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
12241 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
12242 BPF_JMP_IMM(BPF_JA, 0, 0, -30),
12245 /* if arg2 == 1 do *arg1 = 0 */
12246 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
12247 /* fetch map_value_ptr from the stack of this function */
12248 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
12249 /* write into map value */
12250 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12252 /* if arg4 == 1 do *arg3 = 0 */
12253 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
12254 /* fetch map_value_ptr from the stack of this function */
12255 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
12256 /* write into map value */
12257 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
12258 BPF_JMP_IMM(BPF_JA, 0, 0, -8),
12260 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12261 .fixup_map_hash_8b = { 12, 22 },
12263 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
12264 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12267 "calls: two calls that receive map_value_ptr_or_null via arg. test1",
12270 /* pass fp-16, fp-8 into a function */
12271 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12272 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12273 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12274 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12275 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12276 BPF_MOV64_IMM(BPF_REG_0, 0),
12280 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12281 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12282 /* 1st lookup from map */
12283 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12284 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12285 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12286 BPF_LD_MAP_FD(BPF_REG_1, 0),
12287 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12288 BPF_FUNC_map_lookup_elem),
12289 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
12290 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12291 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12292 BPF_MOV64_IMM(BPF_REG_8, 0),
12293 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12294 BPF_MOV64_IMM(BPF_REG_8, 1),
12296 /* 2nd lookup from map */
12297 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12298 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12299 BPF_LD_MAP_FD(BPF_REG_1, 0),
12300 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12301 BPF_FUNC_map_lookup_elem),
12302 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
12303 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12304 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12305 BPF_MOV64_IMM(BPF_REG_9, 0),
12306 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12307 BPF_MOV64_IMM(BPF_REG_9, 1),
12309 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
12310 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12311 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
12312 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
12313 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
12314 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12318 /* if arg2 == 1 do *arg1 = 0 */
12319 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
12320 /* fetch map_value_ptr from the stack of this function */
12321 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
12322 /* write into map value */
12323 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12325 /* if arg4 == 1 do *arg3 = 0 */
12326 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
12327 /* fetch map_value_ptr from the stack of this function */
12328 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
12329 /* write into map value */
12330 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12333 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12334 .fixup_map_hash_8b = { 12, 22 },
12338 "calls: two calls that receive map_value_ptr_or_null via arg. test2",
12341 /* pass fp-16, fp-8 into a function */
12342 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12343 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12344 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12345 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12346 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12347 BPF_MOV64_IMM(BPF_REG_0, 0),
12351 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12352 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12353 /* 1st lookup from map */
12354 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12355 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12356 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12357 BPF_LD_MAP_FD(BPF_REG_1, 0),
12358 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12359 BPF_FUNC_map_lookup_elem),
12360 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
12361 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12362 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12363 BPF_MOV64_IMM(BPF_REG_8, 0),
12364 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12365 BPF_MOV64_IMM(BPF_REG_8, 1),
12367 /* 2nd lookup from map */
12368 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12369 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12370 BPF_LD_MAP_FD(BPF_REG_1, 0),
12371 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12372 BPF_FUNC_map_lookup_elem),
12373 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
12374 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12375 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12376 BPF_MOV64_IMM(BPF_REG_9, 0),
12377 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12378 BPF_MOV64_IMM(BPF_REG_9, 1),
12380 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
12381 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12382 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
12383 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
12384 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
12385 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12389 /* if arg2 == 1 do *arg1 = 0 */
12390 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
12391 /* fetch map_value_ptr from the stack of this function */
12392 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
12393 /* write into map value */
12394 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12396 /* if arg4 == 0 do *arg3 = 0 */
12397 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
12398 /* fetch map_value_ptr from the stack of this function */
12399 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
12400 /* write into map value */
12401 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12404 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12405 .fixup_map_hash_8b = { 12, 22 },
12407 .errstr = "R0 invalid mem access 'inv'",
12410 "calls: pkt_ptr spill into caller stack",
12412 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12413 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12414 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12418 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12419 offsetof(struct __sk_buff, data)),
12420 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12421 offsetof(struct __sk_buff, data_end)),
12422 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12423 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12424 /* spill unchecked pkt_ptr into stack of caller */
12425 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12426 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12427 /* now the pkt range is verified, read pkt_ptr from stack */
12428 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
12429 /* write 4 bytes into packet */
12430 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12434 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12435 .retval = POINTER_VALUE,
12436 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12439 "calls: pkt_ptr spill into caller stack 2",
12441 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12442 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12443 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12444 /* Marking is still kept, but not in all cases safe. */
12445 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12446 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
12450 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12451 offsetof(struct __sk_buff, data)),
12452 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12453 offsetof(struct __sk_buff, data_end)),
12454 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12455 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12456 /* spill unchecked pkt_ptr into stack of caller */
12457 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12458 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12459 /* now the pkt range is verified, read pkt_ptr from stack */
12460 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
12461 /* write 4 bytes into packet */
12462 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12465 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12466 .errstr = "invalid access to packet",
12468 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12471 "calls: pkt_ptr spill into caller stack 3",
12473 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12474 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12475 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12476 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12477 /* Marking is still kept and safe here. */
12478 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12479 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
12483 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12484 offsetof(struct __sk_buff, data)),
12485 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12486 offsetof(struct __sk_buff, data_end)),
12487 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12488 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12489 /* spill unchecked pkt_ptr into stack of caller */
12490 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12491 BPF_MOV64_IMM(BPF_REG_5, 0),
12492 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12493 BPF_MOV64_IMM(BPF_REG_5, 1),
12494 /* now the pkt range is verified, read pkt_ptr from stack */
12495 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
12496 /* write 4 bytes into packet */
12497 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12498 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12501 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12504 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12507 "calls: pkt_ptr spill into caller stack 4",
12509 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12510 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12511 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12512 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12513 /* Check marking propagated. */
12514 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12515 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
12519 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12520 offsetof(struct __sk_buff, data)),
12521 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12522 offsetof(struct __sk_buff, data_end)),
12523 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12524 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12525 /* spill unchecked pkt_ptr into stack of caller */
12526 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12527 BPF_MOV64_IMM(BPF_REG_5, 0),
12528 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12529 BPF_MOV64_IMM(BPF_REG_5, 1),
12530 /* don't read back pkt_ptr from stack here */
12531 /* write 4 bytes into packet */
12532 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12533 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12536 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12539 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12542 "calls: pkt_ptr spill into caller stack 5",
12544 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12545 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12546 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
12547 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12548 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12549 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12553 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12554 offsetof(struct __sk_buff, data)),
12555 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12556 offsetof(struct __sk_buff, data_end)),
12557 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12558 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12559 BPF_MOV64_IMM(BPF_REG_5, 0),
12560 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12561 /* spill checked pkt_ptr into stack of caller */
12562 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12563 BPF_MOV64_IMM(BPF_REG_5, 1),
12564 /* don't read back pkt_ptr from stack here */
12565 /* write 4 bytes into packet */
12566 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12567 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12570 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12571 .errstr = "same insn cannot be used with different",
12573 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12576 "calls: pkt_ptr spill into caller stack 6",
12578 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12579 offsetof(struct __sk_buff, data_end)),
12580 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12581 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12582 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12583 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12584 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12585 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12589 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12590 offsetof(struct __sk_buff, data)),
12591 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12592 offsetof(struct __sk_buff, data_end)),
12593 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12594 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12595 BPF_MOV64_IMM(BPF_REG_5, 0),
12596 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12597 /* spill checked pkt_ptr into stack of caller */
12598 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12599 BPF_MOV64_IMM(BPF_REG_5, 1),
12600 /* don't read back pkt_ptr from stack here */
12601 /* write 4 bytes into packet */
12602 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12603 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12606 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12607 .errstr = "R4 invalid mem access",
12609 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12612 "calls: pkt_ptr spill into caller stack 7",
12614 BPF_MOV64_IMM(BPF_REG_2, 0),
12615 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12616 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12617 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12618 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12619 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12620 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12624 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12625 offsetof(struct __sk_buff, data)),
12626 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12627 offsetof(struct __sk_buff, data_end)),
12628 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12629 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12630 BPF_MOV64_IMM(BPF_REG_5, 0),
12631 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12632 /* spill checked pkt_ptr into stack of caller */
12633 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12634 BPF_MOV64_IMM(BPF_REG_5, 1),
12635 /* don't read back pkt_ptr from stack here */
12636 /* write 4 bytes into packet */
12637 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12638 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12641 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12642 .errstr = "R4 invalid mem access",
12644 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12647 "calls: pkt_ptr spill into caller stack 8",
12649 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12650 offsetof(struct __sk_buff, data)),
12651 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12652 offsetof(struct __sk_buff, data_end)),
12653 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12654 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12655 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
12657 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12658 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12659 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12660 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12661 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12662 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12666 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12667 offsetof(struct __sk_buff, data)),
12668 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12669 offsetof(struct __sk_buff, data_end)),
12670 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12671 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12672 BPF_MOV64_IMM(BPF_REG_5, 0),
12673 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12674 /* spill checked pkt_ptr into stack of caller */
12675 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12676 BPF_MOV64_IMM(BPF_REG_5, 1),
12677 /* don't read back pkt_ptr from stack here */
12678 /* write 4 bytes into packet */
12679 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12680 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12683 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12685 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12688 "calls: pkt_ptr spill into caller stack 9",
12690 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12691 offsetof(struct __sk_buff, data)),
12692 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12693 offsetof(struct __sk_buff, data_end)),
12694 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12695 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12696 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
12698 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12699 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12700 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12701 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12702 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12703 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12707 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12708 offsetof(struct __sk_buff, data)),
12709 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12710 offsetof(struct __sk_buff, data_end)),
12711 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12712 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12713 BPF_MOV64_IMM(BPF_REG_5, 0),
12714 /* spill unchecked pkt_ptr into stack of caller */
12715 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12716 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12717 BPF_MOV64_IMM(BPF_REG_5, 1),
12718 /* don't read back pkt_ptr from stack here */
12719 /* write 4 bytes into packet */
12720 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12721 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12724 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12725 .errstr = "invalid access to packet",
12727 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12730 "calls: caller stack init to zero or map_value_or_null",
12732 BPF_MOV64_IMM(BPF_REG_0, 0),
12733 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12734 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12735 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12736 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12737 /* fetch map_value_or_null or const_zero from stack */
12738 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12739 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
12740 /* store into map_value */
12741 BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
12745 /* if (ctx == 0) return; */
12746 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
12747 /* else bpf_map_lookup() and *(fp - 8) = r0 */
12748 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
12749 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12750 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12751 BPF_LD_MAP_FD(BPF_REG_1, 0),
12752 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12753 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12754 BPF_FUNC_map_lookup_elem),
12755 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
12756 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12759 .fixup_map_hash_8b = { 13 },
12761 .prog_type = BPF_PROG_TYPE_XDP,
12764 "calls: stack init to zero and pruning",
12766 /* first make allocated_stack 16 byte */
12767 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
12768 /* now fork the execution such that the false branch
12769 * of JGT insn will be verified second and it skisp zero
12770 * init of fp-8 stack slot. If stack liveness marking
12771 * is missing live_read marks from call map_lookup
12772 * processing then pruning will incorrectly assume
12773 * that fp-8 stack slot was unused in the fall-through
12774 * branch and will accept the program incorrectly
12776 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
12777 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12778 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
12779 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12780 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12781 BPF_LD_MAP_FD(BPF_REG_1, 0),
12782 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12783 BPF_FUNC_map_lookup_elem),
12786 .fixup_map_hash_48b = { 6 },
12787 .errstr = "invalid indirect read from stack off -8+0 size 8",
12789 .prog_type = BPF_PROG_TYPE_XDP,
12792 "calls: two calls returning different map pointers for lookup (hash, array)",
12795 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
12797 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12799 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12800 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12801 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12802 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12803 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12804 BPF_FUNC_map_lookup_elem),
12805 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12806 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
12807 offsetof(struct test_val, foo)),
12808 BPF_MOV64_IMM(BPF_REG_0, 1),
12811 BPF_LD_MAP_FD(BPF_REG_0, 0),
12814 BPF_LD_MAP_FD(BPF_REG_0, 0),
12817 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12818 .fixup_map_hash_48b = { 13 },
12819 .fixup_map_array_48b = { 16 },
12824 "calls: two calls returning different map pointers for lookup (hash, map in map)",
12827 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
12829 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12831 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12832 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12833 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12834 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12835 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12836 BPF_FUNC_map_lookup_elem),
12837 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12838 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
12839 offsetof(struct test_val, foo)),
12840 BPF_MOV64_IMM(BPF_REG_0, 1),
12843 BPF_LD_MAP_FD(BPF_REG_0, 0),
12846 BPF_LD_MAP_FD(BPF_REG_0, 0),
12849 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12850 .fixup_map_in_map = { 16 },
12851 .fixup_map_array_48b = { 13 },
12853 .errstr = "R0 invalid mem access 'map_ptr'",
12856 "cond: two branches returning different map pointers for lookup (tail, tail)",
12858 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
12859 offsetof(struct __sk_buff, mark)),
12860 BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 3),
12861 BPF_LD_MAP_FD(BPF_REG_2, 0),
12862 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12863 BPF_LD_MAP_FD(BPF_REG_2, 0),
12864 BPF_MOV64_IMM(BPF_REG_3, 7),
12865 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12866 BPF_FUNC_tail_call),
12867 BPF_MOV64_IMM(BPF_REG_0, 1),
12870 .fixup_prog1 = { 5 },
12871 .fixup_prog2 = { 2 },
12872 .result_unpriv = REJECT,
12873 .errstr_unpriv = "tail_call abusing map_ptr",
12878 "cond: two branches returning same map pointers for lookup (tail, tail)",
12880 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
12881 offsetof(struct __sk_buff, mark)),
12882 BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 3),
12883 BPF_LD_MAP_FD(BPF_REG_2, 0),
12884 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12885 BPF_LD_MAP_FD(BPF_REG_2, 0),
12886 BPF_MOV64_IMM(BPF_REG_3, 7),
12887 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12888 BPF_FUNC_tail_call),
12889 BPF_MOV64_IMM(BPF_REG_0, 1),
12892 .fixup_prog2 = { 2, 5 },
12893 .result_unpriv = ACCEPT,
12898 "search pruning: all branches should be verified (nop operation)",
12900 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12901 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12902 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
12903 BPF_LD_MAP_FD(BPF_REG_1, 0),
12904 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
12905 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
12906 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
12907 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
12908 BPF_MOV64_IMM(BPF_REG_4, 0),
12910 BPF_MOV64_IMM(BPF_REG_4, 1),
12911 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
12912 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
12913 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
12914 BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
12915 BPF_MOV64_IMM(BPF_REG_6, 0),
12916 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
12919 .fixup_map_hash_8b = { 3 },
12920 .errstr = "R6 invalid mem access 'inv'",
12922 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12925 "search pruning: all branches should be verified (invalid stack access)",
12927 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12928 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12929 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
12930 BPF_LD_MAP_FD(BPF_REG_1, 0),
12931 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
12932 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
12933 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
12934 BPF_MOV64_IMM(BPF_REG_4, 0),
12935 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
12936 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
12938 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
12939 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
12940 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
12943 .fixup_map_hash_8b = { 3 },
12944 .errstr = "invalid read from stack off -16+0 size 8",
12946 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12949 "jit: lsh, rsh, arsh by 1",
12951 BPF_MOV64_IMM(BPF_REG_0, 1),
12952 BPF_MOV64_IMM(BPF_REG_1, 0xff),
12953 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
12954 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
12955 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
12957 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
12958 BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
12959 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
12961 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
12962 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
12964 BPF_MOV64_IMM(BPF_REG_0, 2),
12971 "jit: mov32 for ldimm64, 1",
12973 BPF_MOV64_IMM(BPF_REG_0, 2),
12974 BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
12975 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
12976 BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
12977 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
12978 BPF_MOV64_IMM(BPF_REG_0, 1),
12985 "jit: mov32 for ldimm64, 2",
12987 BPF_MOV64_IMM(BPF_REG_0, 1),
12988 BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
12989 BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
12990 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
12991 BPF_MOV64_IMM(BPF_REG_0, 2),
12998 "jit: various mul tests",
13000 BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
13001 BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
13002 BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
13003 BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
13004 BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
13005 BPF_MOV64_IMM(BPF_REG_0, 1),
13007 BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
13008 BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
13009 BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
13010 BPF_MOV64_IMM(BPF_REG_0, 1),
13012 BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
13013 BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
13014 BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
13015 BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
13016 BPF_MOV64_IMM(BPF_REG_0, 1),
13018 BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
13019 BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
13020 BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
13021 BPF_MOV64_IMM(BPF_REG_0, 1),
13023 BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
13024 BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
13025 BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
13026 BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
13027 BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
13028 BPF_MOV64_IMM(BPF_REG_0, 1),
13030 BPF_MOV64_IMM(BPF_REG_0, 2),
13037 "xadd/w check unaligned stack",
13039 BPF_MOV64_IMM(BPF_REG_0, 1),
13040 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
13041 BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
13042 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
13046 .errstr = "misaligned stack access off",
13047 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13050 "xadd/w check unaligned map",
13052 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
13053 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13054 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13055 BPF_LD_MAP_FD(BPF_REG_1, 0),
13056 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13057 BPF_FUNC_map_lookup_elem),
13058 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
13060 BPF_MOV64_IMM(BPF_REG_1, 1),
13061 BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
13062 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
13065 .fixup_map_hash_8b = { 3 },
13067 .errstr = "misaligned value access off",
13068 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13071 "xadd/w check unaligned pkt",
13073 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13074 offsetof(struct xdp_md, data)),
13075 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13076 offsetof(struct xdp_md, data_end)),
13077 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
13078 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
13079 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
13080 BPF_MOV64_IMM(BPF_REG_0, 99),
13081 BPF_JMP_IMM(BPF_JA, 0, 0, 6),
13082 BPF_MOV64_IMM(BPF_REG_0, 1),
13083 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
13084 BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
13085 BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
13086 BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
13087 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
13091 .errstr = "BPF_XADD stores into R2 pkt is not allowed",
13092 .prog_type = BPF_PROG_TYPE_XDP,
13093 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13096 "xadd/w check whether src/dst got mangled, 1",
13098 BPF_MOV64_IMM(BPF_REG_0, 1),
13099 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13100 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
13101 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
13102 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
13103 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
13104 BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
13105 BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
13106 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
13108 BPF_MOV64_IMM(BPF_REG_0, 42),
13112 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13116 "xadd/w check whether src/dst got mangled, 2",
13118 BPF_MOV64_IMM(BPF_REG_0, 1),
13119 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13120 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
13121 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
13122 BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
13123 BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
13124 BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
13125 BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
13126 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
13128 BPF_MOV64_IMM(BPF_REG_0, 42),
13132 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13136 "bpf_get_stack return R0 within range",
13138 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13139 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
13140 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13141 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13142 BPF_LD_MAP_FD(BPF_REG_1, 0),
13143 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13144 BPF_FUNC_map_lookup_elem),
13145 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
13146 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
13147 BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)),
13148 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13149 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
13150 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)),
13151 BPF_MOV64_IMM(BPF_REG_4, 256),
13152 BPF_EMIT_CALL(BPF_FUNC_get_stack),
13153 BPF_MOV64_IMM(BPF_REG_1, 0),
13154 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
13155 BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
13156 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
13157 BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16),
13158 BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
13159 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
13160 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
13161 BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
13162 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
13163 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32),
13164 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
13165 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
13166 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13167 BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)),
13168 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
13169 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
13170 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13171 BPF_MOV64_REG(BPF_REG_3, BPF_REG_9),
13172 BPF_MOV64_IMM(BPF_REG_4, 0),
13173 BPF_EMIT_CALL(BPF_FUNC_get_stack),
13176 .fixup_map_hash_48b = { 4 },
13178 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
13181 "ld_abs: invalid op 1",
13183 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13184 BPF_LD_ABS(BPF_DW, 0),
13187 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13189 .errstr = "unknown opcode",
13192 "ld_abs: invalid op 2",
13194 BPF_MOV32_IMM(BPF_REG_0, 256),
13195 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13196 BPF_LD_IND(BPF_DW, BPF_REG_0, 0),
13199 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13201 .errstr = "unknown opcode",
13204 "ld_abs: nmap reduced",
13206 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13207 BPF_LD_ABS(BPF_H, 12),
13208 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 28),
13209 BPF_LD_ABS(BPF_H, 12),
13210 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 26),
13211 BPF_MOV32_IMM(BPF_REG_0, 18),
13212 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -64),
13213 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -64),
13214 BPF_LD_IND(BPF_W, BPF_REG_7, 14),
13215 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -60),
13216 BPF_MOV32_IMM(BPF_REG_0, 280971478),
13217 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
13218 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
13219 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -60),
13220 BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
13221 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 15),
13222 BPF_LD_ABS(BPF_H, 12),
13223 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 13),
13224 BPF_MOV32_IMM(BPF_REG_0, 22),
13225 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
13226 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
13227 BPF_LD_IND(BPF_H, BPF_REG_7, 14),
13228 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -52),
13229 BPF_MOV32_IMM(BPF_REG_0, 17366),
13230 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -48),
13231 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -48),
13232 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -52),
13233 BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
13234 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
13235 BPF_MOV32_IMM(BPF_REG_0, 256),
13237 BPF_MOV32_IMM(BPF_REG_0, 0),
13241 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0,
13242 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
13243 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
13245 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13250 "ld_abs: div + abs, test 1",
13252 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
13253 BPF_LD_ABS(BPF_B, 3),
13254 BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
13255 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
13256 BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
13257 BPF_LD_ABS(BPF_B, 4),
13258 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
13259 BPF_LD_IND(BPF_B, BPF_REG_8, -70),
13263 10, 20, 30, 40, 50,
13265 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13270 "ld_abs: div + abs, test 2",
13272 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
13273 BPF_LD_ABS(BPF_B, 3),
13274 BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
13275 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
13276 BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
13277 BPF_LD_ABS(BPF_B, 128),
13278 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
13279 BPF_LD_IND(BPF_B, BPF_REG_8, -70),
13283 10, 20, 30, 40, 50,
13285 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13290 "ld_abs: div + abs, test 3",
13292 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
13293 BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
13294 BPF_LD_ABS(BPF_B, 3),
13295 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
13299 10, 20, 30, 40, 50,
13301 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13306 "ld_abs: div + abs, test 4",
13308 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
13309 BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
13310 BPF_LD_ABS(BPF_B, 256),
13311 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
13315 10, 20, 30, 40, 50,
13317 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13322 "ld_abs: vlan + abs, test 1",
13327 .fill_helper = bpf_fill_ld_abs_vlan_push_pop,
13328 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13333 "ld_abs: vlan + abs, test 2",
13335 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13336 BPF_LD_ABS(BPF_B, 0),
13337 BPF_LD_ABS(BPF_H, 0),
13338 BPF_LD_ABS(BPF_W, 0),
13339 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
13340 BPF_MOV64_IMM(BPF_REG_6, 0),
13341 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13342 BPF_MOV64_IMM(BPF_REG_2, 1),
13343 BPF_MOV64_IMM(BPF_REG_3, 2),
13344 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13345 BPF_FUNC_skb_vlan_push),
13346 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
13347 BPF_LD_ABS(BPF_B, 0),
13348 BPF_LD_ABS(BPF_H, 0),
13349 BPF_LD_ABS(BPF_W, 0),
13350 BPF_MOV64_IMM(BPF_REG_0, 42),
13356 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13361 "ld_abs: jump around ld_abs",
13366 .fill_helper = bpf_fill_jump_around_ld_abs,
13367 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13372 "ld_dw: xor semi-random 64 bit imms, test 1",
13375 .fill_helper = bpf_fill_rand_ld_dw,
13376 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13381 "ld_dw: xor semi-random 64 bit imms, test 2",
13384 .fill_helper = bpf_fill_rand_ld_dw,
13385 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13390 "ld_dw: xor semi-random 64 bit imms, test 3",
13393 .fill_helper = bpf_fill_rand_ld_dw,
13394 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13399 "ld_dw: xor semi-random 64 bit imms, test 4",
13402 .fill_helper = bpf_fill_rand_ld_dw,
13403 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13408 "pass unmodified ctx pointer to helper",
13410 BPF_MOV64_IMM(BPF_REG_2, 0),
13411 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13412 BPF_FUNC_csum_update),
13413 BPF_MOV64_IMM(BPF_REG_0, 0),
13416 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13420 "reference tracking: leak potential reference",
13423 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
13426 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13427 .errstr = "Unreleased reference",
13431 "reference tracking: leak potential reference on stack",
13434 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13435 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13436 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13437 BPF_MOV64_IMM(BPF_REG_0, 0),
13440 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13441 .errstr = "Unreleased reference",
13445 "reference tracking: leak potential reference on stack 2",
13448 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13449 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13450 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13451 BPF_MOV64_IMM(BPF_REG_0, 0),
13452 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
13455 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13456 .errstr = "Unreleased reference",
13460 "reference tracking: zero potential reference",
13463 BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
13466 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13467 .errstr = "Unreleased reference",
13471 "reference tracking: copy and zero potential references",
13474 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
13475 BPF_MOV64_IMM(BPF_REG_0, 0),
13476 BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */
13479 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13480 .errstr = "Unreleased reference",
13484 "reference tracking: release reference without check",
13487 /* reference in r0 may be NULL */
13488 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13489 BPF_MOV64_IMM(BPF_REG_2, 0),
13490 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13493 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13494 .errstr = "type=sock_or_null expected=sock",
13498 "reference tracking: release reference",
13501 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13502 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13503 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13506 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13510 "reference tracking: release reference 2",
13513 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13514 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
13516 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13519 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13523 "reference tracking: release reference twice",
13526 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13527 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13528 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13529 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13530 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13531 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13534 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13535 .errstr = "type=inv expected=sock",
13539 "reference tracking: release reference twice inside branch",
13542 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13543 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13544 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */
13545 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13546 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13547 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13550 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13551 .errstr = "type=inv expected=sock",
13555 "reference tracking: alloc, check, free in one subbranch",
13557 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13558 offsetof(struct __sk_buff, data)),
13559 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13560 offsetof(struct __sk_buff, data_end)),
13561 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13562 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
13563 /* if (offsetof(skb, mark) > data_len) exit; */
13564 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
13566 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
13567 offsetof(struct __sk_buff, mark)),
13569 BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */
13570 /* Leak reference in R0 */
13572 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
13573 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13574 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13577 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13578 .errstr = "Unreleased reference",
13580 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13583 "reference tracking: alloc, check, free in both subbranches",
13585 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13586 offsetof(struct __sk_buff, data)),
13587 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13588 offsetof(struct __sk_buff, data_end)),
13589 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13590 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
13591 /* if (offsetof(skb, mark) > data_len) exit; */
13592 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
13594 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
13595 offsetof(struct __sk_buff, mark)),
13597 BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
13598 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
13599 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13600 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13602 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
13603 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13604 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13607 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13609 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13612 "reference tracking in call: free reference in subprog",
13615 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
13616 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13617 BPF_MOV64_IMM(BPF_REG_0, 0),
13621 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
13622 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
13623 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13626 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13630 "pass modified ctx pointer to helper, 1",
13632 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
13633 BPF_MOV64_IMM(BPF_REG_2, 0),
13634 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13635 BPF_FUNC_csum_update),
13636 BPF_MOV64_IMM(BPF_REG_0, 0),
13639 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13641 .errstr = "dereference of modified ctx ptr",
13644 "pass modified ctx pointer to helper, 2",
13646 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
13647 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13648 BPF_FUNC_get_socket_cookie),
13649 BPF_MOV64_IMM(BPF_REG_0, 0),
13652 .result_unpriv = REJECT,
13654 .errstr_unpriv = "dereference of modified ctx ptr",
13655 .errstr = "dereference of modified ctx ptr",
13658 "pass modified ctx pointer to helper, 3",
13660 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
13661 BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
13662 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
13663 BPF_MOV64_IMM(BPF_REG_2, 0),
13664 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13665 BPF_FUNC_csum_update),
13666 BPF_MOV64_IMM(BPF_REG_0, 0),
13669 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13671 .errstr = "variable ctx access var_off=(0x0; 0x4)",
13674 "mov64 src == dst",
13676 BPF_MOV64_IMM(BPF_REG_2, 0),
13677 BPF_MOV64_REG(BPF_REG_2, BPF_REG_2),
13678 // Check bounds are OK
13679 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
13680 BPF_MOV64_IMM(BPF_REG_0, 0),
13683 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13687 "mov64 src != dst",
13689 BPF_MOV64_IMM(BPF_REG_3, 0),
13690 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
13691 // Check bounds are OK
13692 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
13693 BPF_MOV64_IMM(BPF_REG_0, 0),
13696 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13702 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
13703 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
13704 BPF_ALU64_REG(BPF_MOV, BPF_REG_7, BPF_REG_0),
13705 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
13706 BPF_MOV64_IMM(BPF_REG_0, 0),
13707 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
13708 BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, -8),
13709 BPF_STX_MEM(BPF_B, BPF_REG_10, BPF_REG_7, -9),
13710 BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_10, -9),
13711 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
13712 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
13713 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
13714 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
13718 .result_unpriv = ACCEPT,
13719 .insn_processed = 15,
13722 "reference tracking in call: free reference in subprog and outside",
13725 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
13726 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13727 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
13728 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13729 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13733 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
13734 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
13735 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13738 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13739 .errstr = "type=inv expected=sock",
13743 "reference tracking in call: alloc & leak reference in subprog",
13745 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13746 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13747 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
13748 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13749 BPF_MOV64_IMM(BPF_REG_0, 0),
13753 BPF_MOV64_REG(BPF_REG_6, BPF_REG_4),
13755 /* spill unchecked sk_ptr into stack of caller */
13756 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
13757 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13760 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13761 .errstr = "Unreleased reference",
13765 "reference tracking in call: alloc in subprog, release outside",
13767 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13768 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
13769 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13770 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13771 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13776 BPF_EXIT_INSN(), /* return sk */
13778 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13779 .retval = POINTER_VALUE,
13783 "reference tracking in call: sk_ptr leak into caller stack",
13785 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13786 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13787 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13788 BPF_MOV64_IMM(BPF_REG_0, 0),
13792 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13793 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13794 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
13795 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
13796 /* spill unchecked sk_ptr into stack of caller */
13797 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13798 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13799 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
13800 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13807 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13808 .errstr = "Unreleased reference",
13812 "reference tracking in call: sk_ptr spill into caller stack",
13814 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13815 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13816 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13817 BPF_MOV64_IMM(BPF_REG_0, 0),
13821 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13822 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13823 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
13824 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
13825 /* spill unchecked sk_ptr into stack of caller */
13826 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13827 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13828 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
13829 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13830 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13831 /* now the sk_ptr is verified, free the reference */
13832 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0),
13833 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13840 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13844 "reference tracking: allow LD_ABS",
13846 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13848 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13849 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13850 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13851 BPF_LD_ABS(BPF_B, 0),
13852 BPF_LD_ABS(BPF_H, 0),
13853 BPF_LD_ABS(BPF_W, 0),
13856 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13860 "reference tracking: forbid LD_ABS while holding reference",
13862 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13864 BPF_LD_ABS(BPF_B, 0),
13865 BPF_LD_ABS(BPF_H, 0),
13866 BPF_LD_ABS(BPF_W, 0),
13867 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13868 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13869 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13872 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13873 .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
13877 "reference tracking: allow LD_IND",
13879 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13881 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13882 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13883 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13884 BPF_MOV64_IMM(BPF_REG_7, 1),
13885 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
13886 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
13889 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13894 "reference tracking: forbid LD_IND while holding reference",
13896 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13898 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
13899 BPF_MOV64_IMM(BPF_REG_7, 1),
13900 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
13901 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
13902 BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),
13903 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
13904 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13907 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13908 .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
13912 "reference tracking: check reference or tail call",
13914 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13916 /* if (sk) bpf_sk_release() */
13917 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13918 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7),
13919 /* bpf_tail_call() */
13920 BPF_MOV64_IMM(BPF_REG_3, 2),
13921 BPF_LD_MAP_FD(BPF_REG_2, 0),
13922 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13923 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13924 BPF_FUNC_tail_call),
13925 BPF_MOV64_IMM(BPF_REG_0, 0),
13927 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13930 .fixup_prog1 = { 17 },
13931 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13935 "reference tracking: release reference then tail call",
13937 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13939 /* if (sk) bpf_sk_release() */
13940 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13941 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
13942 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13943 /* bpf_tail_call() */
13944 BPF_MOV64_IMM(BPF_REG_3, 2),
13945 BPF_LD_MAP_FD(BPF_REG_2, 0),
13946 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13947 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13948 BPF_FUNC_tail_call),
13949 BPF_MOV64_IMM(BPF_REG_0, 0),
13952 .fixup_prog1 = { 18 },
13953 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13957 "reference tracking: leak possible reference over tail call",
13959 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13960 /* Look up socket and store in REG_6 */
13962 /* bpf_tail_call() */
13963 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13964 BPF_MOV64_IMM(BPF_REG_3, 2),
13965 BPF_LD_MAP_FD(BPF_REG_2, 0),
13966 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13967 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13968 BPF_FUNC_tail_call),
13969 BPF_MOV64_IMM(BPF_REG_0, 0),
13970 /* if (sk) bpf_sk_release() */
13971 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13972 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
13973 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13976 .fixup_prog1 = { 16 },
13977 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13978 .errstr = "tail_call would lead to reference leak",
13982 "reference tracking: leak checked reference over tail call",
13984 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13985 /* Look up socket and store in REG_6 */
13987 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13988 /* if (!sk) goto end */
13989 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
13990 /* bpf_tail_call() */
13991 BPF_MOV64_IMM(BPF_REG_3, 0),
13992 BPF_LD_MAP_FD(BPF_REG_2, 0),
13993 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13994 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13995 BPF_FUNC_tail_call),
13996 BPF_MOV64_IMM(BPF_REG_0, 0),
13997 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13998 BPF_EMIT_CALL(BPF_FUNC_sk_release),
14001 .fixup_prog1 = { 17 },
14002 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14003 .errstr = "tail_call would lead to reference leak",
14007 "reference tracking: mangle and release sock_or_null",
14010 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14011 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
14012 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
14013 BPF_EMIT_CALL(BPF_FUNC_sk_release),
14016 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14017 .errstr = "R1 pointer arithmetic on sock_or_null prohibited",
14021 "reference tracking: mangle and release sock",
14024 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14025 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
14026 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
14027 BPF_EMIT_CALL(BPF_FUNC_sk_release),
14030 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14031 .errstr = "R1 pointer arithmetic on sock prohibited",
14035 "reference tracking: access member",
14038 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
14039 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
14040 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
14041 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
14042 BPF_EMIT_CALL(BPF_FUNC_sk_release),
14045 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14049 "reference tracking: write to member",
14052 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
14053 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
14054 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
14055 BPF_LD_IMM64(BPF_REG_2, 42),
14056 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2,
14057 offsetof(struct bpf_sock, mark)),
14058 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
14059 BPF_EMIT_CALL(BPF_FUNC_sk_release),
14060 BPF_LD_IMM64(BPF_REG_0, 0),
14063 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14064 .errstr = "cannot write into socket",
14068 "reference tracking: invalid 64-bit access of member",
14071 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
14072 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
14073 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
14074 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
14075 BPF_EMIT_CALL(BPF_FUNC_sk_release),
14078 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14079 .errstr = "invalid bpf_sock access off=0 size=8",
14083 "reference tracking: access after release",
14086 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14087 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
14088 BPF_EMIT_CALL(BPF_FUNC_sk_release),
14089 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
14092 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14093 .errstr = "!read_ok",
14097 "reference tracking: direct access for lookup",
14099 /* Check that the packet is at least 64B long */
14100 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
14101 offsetof(struct __sk_buff, data)),
14102 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
14103 offsetof(struct __sk_buff, data_end)),
14104 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
14105 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
14106 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
14107 /* sk = sk_lookup_tcp(ctx, skb->data, ...) */
14108 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),
14109 BPF_MOV64_IMM(BPF_REG_4, 0),
14110 BPF_MOV64_IMM(BPF_REG_5, 0),
14111 BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp),
14112 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
14113 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
14114 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
14115 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
14116 BPF_EMIT_CALL(BPF_FUNC_sk_release),
14119 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14123 "calls: ctx read at start of subprog",
14125 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
14126 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
14127 BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
14128 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
14129 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
14130 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14132 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
14133 BPF_MOV64_IMM(BPF_REG_0, 0),
14136 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
14137 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
14138 .result_unpriv = REJECT,
14142 "check wire_len is not readable by sockets",
14144 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
14145 offsetof(struct __sk_buff, wire_len)),
14148 .errstr = "invalid bpf_context access",
14152 "check wire_len is readable by tc classifier",
14154 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
14155 offsetof(struct __sk_buff, wire_len)),
14158 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14162 "check wire_len is not writable by tc classifier",
14164 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
14165 offsetof(struct __sk_buff, wire_len)),
14168 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14169 .errstr = "invalid bpf_context access",
14170 .errstr_unpriv = "R1 leaks addr",
14174 "calls: cross frame pruning",
14176 /* r8 = !!random();
14179 * do something bad;
14181 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14182 BPF_FUNC_get_prandom_u32),
14183 BPF_MOV64_IMM(BPF_REG_8, 0),
14184 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
14185 BPF_MOV64_IMM(BPF_REG_8, 1),
14186 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
14187 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
14188 BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
14189 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
14190 BPF_MOV64_IMM(BPF_REG_0, 0),
14192 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
14195 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
14196 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
14197 .errstr = "!read_ok",
14201 "jset: functional",
14204 BPF_MOV64_IMM(BPF_REG_0, 0),
14205 /* prep for direct packet access via r2 */
14206 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
14207 offsetof(struct __sk_buff, data)),
14208 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
14209 offsetof(struct __sk_buff, data_end)),
14210 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
14211 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
14212 BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1),
14215 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
14217 /* reg, bit 63 or bit 0 set, taken */
14218 BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001),
14219 BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
14222 /* reg, bit 62, not taken */
14223 BPF_LD_IMM64(BPF_REG_8, 0x4000000000000000),
14224 BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
14225 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
14228 /* imm, any bit set, taken */
14229 BPF_JMP_IMM(BPF_JSET, BPF_REG_7, -1, 1),
14232 /* imm, bit 31 set, taken */
14233 BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1),
14236 /* all good - return r0 == 2 */
14237 BPF_MOV64_IMM(BPF_REG_0, 2),
14240 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14245 .data64 = { (1ULL << 63) | (1U << 31) | (1U << 0), }
14248 .data64 = { (1ULL << 63) | (1U << 31), }
14251 .data64 = { (1ULL << 31) | (1U << 0), }
14254 .data64 = { (__u32)-1, }
14257 .data64 = { ~0x4000000000000000ULL, }
14263 .data64 = { ~0ULL, }
14268 "jset: sign-extend",
14271 BPF_MOV64_IMM(BPF_REG_0, 0),
14272 /* prep for direct packet access via r2 */
14273 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
14274 offsetof(struct __sk_buff, data)),
14275 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
14276 offsetof(struct __sk_buff, data_end)),
14277 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
14278 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
14279 BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1),
14282 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
14284 BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1),
14287 BPF_MOV64_IMM(BPF_REG_0, 2),
14290 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14293 .data = { 1, 0, 0, 0, 0, 0, 0, 1, },
14296 "jset: known const compare",
14298 BPF_MOV64_IMM(BPF_REG_0, 1),
14299 BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
14300 BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
14303 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
14304 .retval_unpriv = 1,
14305 .result_unpriv = ACCEPT,
14310 "jset: known const compare bad",
14312 BPF_MOV64_IMM(BPF_REG_0, 0),
14313 BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
14314 BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
14317 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
14318 .errstr_unpriv = "!read_ok",
14319 .result_unpriv = REJECT,
14320 .errstr = "!read_ok",
14324 "jset: unknown const compare taken",
14326 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14327 BPF_FUNC_get_prandom_u32),
14328 BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
14329 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
14330 BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
14333 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
14334 .errstr_unpriv = "!read_ok",
14335 .result_unpriv = REJECT,
14336 .errstr = "!read_ok",
14340 "jset: unknown const compare not taken",
14342 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14343 BPF_FUNC_get_prandom_u32),
14344 BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
14345 BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
14348 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
14349 .errstr_unpriv = "!read_ok",
14350 .result_unpriv = REJECT,
14351 .errstr = "!read_ok",
14355 "jset: half-known const compare",
14357 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14358 BPF_FUNC_get_prandom_u32),
14359 BPF_ALU64_IMM(BPF_OR, BPF_REG_0, 2),
14360 BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 3, 1),
14361 BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
14362 BPF_MOV64_IMM(BPF_REG_0, 0),
14365 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
14366 .result_unpriv = ACCEPT,
14372 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14373 BPF_FUNC_get_prandom_u32),
14374 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14375 BPF_MOV64_IMM(BPF_REG_0, 0),
14376 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xff),
14377 BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0xf0, 3),
14378 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 0x10, 1),
14379 BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
14381 BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0x10, 1),
14383 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0x10, 1),
14384 BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
14387 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
14388 .result_unpriv = ACCEPT,
14393 static int probe_filter_length(const struct bpf_insn *fp)
14397 for (len = MAX_INSNS - 1; len > 0; --len)
14398 if (fp[len].code != 0 || fp[len].imm != 0)
14403 static int create_map(uint32_t type, uint32_t size_key,
14404 uint32_t size_value, uint32_t max_elem)
14408 fd = bpf_create_map(type, size_key, size_value, max_elem,
14409 type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0);
14411 printf("Failed to create hash map '%s'!\n", strerror(errno));
14416 static int create_prog_dummy1(enum bpf_prog_type prog_type)
14418 struct bpf_insn prog[] = {
14419 BPF_MOV64_IMM(BPF_REG_0, 42),
14423 return bpf_load_program(prog_type, prog,
14424 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
14427 static int create_prog_dummy2(enum bpf_prog_type prog_type, int mfd, int idx)
14429 struct bpf_insn prog[] = {
14430 BPF_MOV64_IMM(BPF_REG_3, idx),
14431 BPF_LD_MAP_FD(BPF_REG_2, mfd),
14432 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14433 BPF_FUNC_tail_call),
14434 BPF_MOV64_IMM(BPF_REG_0, 41),
14438 return bpf_load_program(prog_type, prog,
14439 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
14442 static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
14446 int mfd, p1fd, p2fd;
14448 mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
14449 sizeof(int), max_elem, 0);
14451 printf("Failed to create prog array '%s'!\n", strerror(errno));
14455 p1fd = create_prog_dummy1(prog_type);
14456 p2fd = create_prog_dummy2(prog_type, mfd, p2key);
14457 if (p1fd < 0 || p2fd < 0)
14459 if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
14461 if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
14474 static int create_map_in_map(void)
14476 int inner_map_fd, outer_map_fd;
14478 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
14479 sizeof(int), 1, 0);
14480 if (inner_map_fd < 0) {
14481 printf("Failed to create array '%s'!\n", strerror(errno));
14482 return inner_map_fd;
14485 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
14486 sizeof(int), inner_map_fd, 1, 0);
14487 if (outer_map_fd < 0)
14488 printf("Failed to create array of maps '%s'!\n",
14491 close(inner_map_fd);
14493 return outer_map_fd;
14496 static int create_cgroup_storage(bool percpu)
14498 enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
14499 BPF_MAP_TYPE_CGROUP_STORAGE;
14502 fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key),
14503 TEST_DATA_LEN, 0, 0);
14505 printf("Failed to create cgroup storage '%s'!\n",
14511 static char bpf_vlog[UINT_MAX >> 8];
14513 static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
14514 struct bpf_insn *prog, int *map_fds)
14516 int *fixup_map_hash_8b = test->fixup_map_hash_8b;
14517 int *fixup_map_hash_48b = test->fixup_map_hash_48b;
14518 int *fixup_map_hash_16b = test->fixup_map_hash_16b;
14519 int *fixup_map_array_48b = test->fixup_map_array_48b;
14520 int *fixup_map_sockmap = test->fixup_map_sockmap;
14521 int *fixup_map_sockhash = test->fixup_map_sockhash;
14522 int *fixup_map_xskmap = test->fixup_map_xskmap;
14523 int *fixup_map_stacktrace = test->fixup_map_stacktrace;
14524 int *fixup_prog1 = test->fixup_prog1;
14525 int *fixup_prog2 = test->fixup_prog2;
14526 int *fixup_map_in_map = test->fixup_map_in_map;
14527 int *fixup_cgroup_storage = test->fixup_cgroup_storage;
14528 int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
14530 if (test->fill_helper)
14531 test->fill_helper(test);
14533 /* Allocating HTs with 1 elem is fine here, since we only test
14534 * for verifier and not do a runtime lookup, so the only thing
14535 * that really matters is value size in this case.
14537 if (*fixup_map_hash_8b) {
14538 map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
14539 sizeof(long long), 1);
14541 prog[*fixup_map_hash_8b].imm = map_fds[0];
14542 fixup_map_hash_8b++;
14543 } while (*fixup_map_hash_8b);
14546 if (*fixup_map_hash_48b) {
14547 map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
14548 sizeof(struct test_val), 1);
14550 prog[*fixup_map_hash_48b].imm = map_fds[1];
14551 fixup_map_hash_48b++;
14552 } while (*fixup_map_hash_48b);
14555 if (*fixup_map_hash_16b) {
14556 map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
14557 sizeof(struct other_val), 1);
14559 prog[*fixup_map_hash_16b].imm = map_fds[2];
14560 fixup_map_hash_16b++;
14561 } while (*fixup_map_hash_16b);
14564 if (*fixup_map_array_48b) {
14565 map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
14566 sizeof(struct test_val), 1);
14568 prog[*fixup_map_array_48b].imm = map_fds[3];
14569 fixup_map_array_48b++;
14570 } while (*fixup_map_array_48b);
14573 if (*fixup_prog1) {
14574 map_fds[4] = create_prog_array(prog_type, 4, 0);
14576 prog[*fixup_prog1].imm = map_fds[4];
14578 } while (*fixup_prog1);
14581 if (*fixup_prog2) {
14582 map_fds[5] = create_prog_array(prog_type, 8, 7);
14584 prog[*fixup_prog2].imm = map_fds[5];
14586 } while (*fixup_prog2);
14589 if (*fixup_map_in_map) {
14590 map_fds[6] = create_map_in_map();
14592 prog[*fixup_map_in_map].imm = map_fds[6];
14593 fixup_map_in_map++;
14594 } while (*fixup_map_in_map);
14597 if (*fixup_cgroup_storage) {
14598 map_fds[7] = create_cgroup_storage(false);
14600 prog[*fixup_cgroup_storage].imm = map_fds[7];
14601 fixup_cgroup_storage++;
14602 } while (*fixup_cgroup_storage);
14605 if (*fixup_percpu_cgroup_storage) {
14606 map_fds[8] = create_cgroup_storage(true);
14608 prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
14609 fixup_percpu_cgroup_storage++;
14610 } while (*fixup_percpu_cgroup_storage);
14612 if (*fixup_map_sockmap) {
14613 map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
14616 prog[*fixup_map_sockmap].imm = map_fds[9];
14617 fixup_map_sockmap++;
14618 } while (*fixup_map_sockmap);
14620 if (*fixup_map_sockhash) {
14621 map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
14624 prog[*fixup_map_sockhash].imm = map_fds[10];
14625 fixup_map_sockhash++;
14626 } while (*fixup_map_sockhash);
14628 if (*fixup_map_xskmap) {
14629 map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
14632 prog[*fixup_map_xskmap].imm = map_fds[11];
14633 fixup_map_xskmap++;
14634 } while (*fixup_map_xskmap);
14636 if (*fixup_map_stacktrace) {
14637 map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
14640 prog[*fixup_map_stacktrace].imm = map_fds[12];
14641 fixup_map_stacktrace++;
14642 } while (*fixup_map_stacktrace);
14646 static int set_admin(bool admin)
14649 const cap_value_t cap_val = CAP_SYS_ADMIN;
14652 caps = cap_get_proc();
14654 perror("cap_get_proc");
14657 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
14658 admin ? CAP_SET : CAP_CLEAR)) {
14659 perror("cap_set_flag");
14662 if (cap_set_proc(caps)) {
14663 perror("cap_set_proc");
14668 if (cap_free(caps))
14669 perror("cap_free");
14673 static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val,
14674 void *data, size_t size_data)
14676 __u8 tmp[TEST_DATA_LEN << 2];
14677 __u32 size_tmp = sizeof(tmp);
14683 err = bpf_prog_test_run(fd_prog, 1, data, size_data,
14684 tmp, &size_tmp, &retval, NULL);
14687 if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
14688 printf("Unexpected bpf_prog_test_run error ");
14691 if (!err && retval != expected_val &&
14692 expected_val != POINTER_VALUE) {
14693 printf("FAIL retval %d != %d ", retval, expected_val);
14700 static void do_test_single(struct bpf_test *test, bool unpriv,
14701 int *passes, int *errors)
14703 int fd_prog, expected_ret, alignment_prevented_execution;
14704 int prog_len, prog_type = test->prog_type;
14705 struct bpf_insn *prog = test->insns;
14706 int run_errs, run_successes;
14707 int map_fds[MAX_NR_MAPS];
14708 const char *expected_err;
14712 for (i = 0; i < MAX_NR_MAPS; i++)
14716 prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
14717 do_test_fixup(test, prog_type, prog, map_fds);
14718 prog_len = probe_filter_length(prog);
14721 if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
14722 pflags |= BPF_F_STRICT_ALIGNMENT;
14723 if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
14724 pflags |= BPF_F_ANY_ALIGNMENT;
14725 fd_prog = bpf_verify_program(prog_type, prog, prog_len, pflags,
14726 "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
14728 expected_ret = unpriv && test->result_unpriv != UNDEF ?
14729 test->result_unpriv : test->result;
14730 expected_err = unpriv && test->errstr_unpriv ?
14731 test->errstr_unpriv : test->errstr;
14733 alignment_prevented_execution = 0;
14735 if (expected_ret == ACCEPT) {
14737 printf("FAIL\nFailed to load prog '%s'!\n",
14741 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14742 if (fd_prog >= 0 &&
14743 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS))
14744 alignment_prevented_execution = 1;
14747 if (fd_prog >= 0) {
14748 printf("FAIL\nUnexpected success to load!\n");
14751 if (!strstr(bpf_vlog, expected_err)) {
14752 printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
14753 expected_err, bpf_vlog);
14758 if (test->insn_processed) {
14759 uint32_t insn_processed;
14762 proc = strstr(bpf_vlog, "processed ");
14763 insn_processed = atoi(proc + 10);
14764 if (test->insn_processed != insn_processed) {
14765 printf("FAIL\nUnexpected insn_processed %u vs %u\n",
14766 insn_processed, test->insn_processed);
14773 if (!alignment_prevented_execution && fd_prog >= 0) {
14774 uint32_t expected_val;
14778 expected_val = unpriv && test->retval_unpriv ?
14779 test->retval_unpriv : test->retval;
14781 err = do_prog_test_run(fd_prog, unpriv, expected_val,
14782 test->data, sizeof(test->data));
14789 for (i = 0; i < test->runs; i++) {
14790 if (unpriv && test->retvals[i].retval_unpriv)
14791 expected_val = test->retvals[i].retval_unpriv;
14793 expected_val = test->retvals[i].retval;
14795 err = do_prog_test_run(fd_prog, unpriv, expected_val,
14796 test->retvals[i].data,
14797 sizeof(test->retvals[i].data));
14799 printf("(run %d/%d) ", i + 1, test->runs);
14809 if (run_successes > 1)
14810 printf("%d cases ", run_successes);
14812 if (alignment_prevented_execution)
14813 printf(" (NOTE: not executed due to unknown alignment)");
14821 for (i = 0; i < MAX_NR_MAPS; i++)
14827 printf("%s", bpf_vlog);
14831 static bool is_admin(void)
14834 cap_flag_value_t sysadmin = CAP_CLEAR;
14835 const cap_value_t cap_val = CAP_SYS_ADMIN;
14837 #ifdef CAP_IS_SUPPORTED
14838 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
14839 perror("cap_get_flag");
14843 caps = cap_get_proc();
14845 perror("cap_get_proc");
14848 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
14849 perror("cap_get_flag");
14850 if (cap_free(caps))
14851 perror("cap_free");
14852 return (sysadmin == CAP_SET);
14855 static void get_unpriv_disabled()
14860 fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
14862 perror("fopen /proc/sys/"UNPRIV_SYSCTL);
14863 unpriv_disabled = true;
14866 if (fgets(buf, 2, fd) == buf && atoi(buf))
14867 unpriv_disabled = true;
14871 static bool test_as_unpriv(struct bpf_test *test)
14873 return !test->prog_type ||
14874 test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
14875 test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
14878 static int do_test(bool unpriv, unsigned int from, unsigned int to)
14880 int i, passes = 0, errors = 0, skips = 0;
14882 for (i = from; i < to; i++) {
14883 struct bpf_test *test = &tests[i];
14885 /* Program types that are not supported by non-root we
14888 if (test_as_unpriv(test) && unpriv_disabled) {
14889 printf("#%d/u %s SKIP\n", i, test->descr);
14891 } else if (test_as_unpriv(test)) {
14894 printf("#%d/u %s ", i, test->descr);
14895 do_test_single(test, true, &passes, &errors);
14901 printf("#%d/p %s SKIP\n", i, test->descr);
14904 printf("#%d/p %s ", i, test->descr);
14905 do_test_single(test, false, &passes, &errors);
14909 printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
14911 return errors ? EXIT_FAILURE : EXIT_SUCCESS;
14914 int main(int argc, char **argv)
14916 unsigned int from = 0, to = ARRAY_SIZE(tests);
14917 bool unpriv = !is_admin();
14920 unsigned int l = atoi(argv[argc - 2]);
14921 unsigned int u = atoi(argv[argc - 1]);
14923 if (l < to && u < to) {
14927 } else if (argc == 2) {
14928 unsigned int t = atoi(argv[argc - 1]);
14936 get_unpriv_disabled();
14937 if (unpriv && unpriv_disabled) {
14938 printf("Cannot run as unprivileged user with sysctl %s.\n",
14940 return EXIT_FAILURE;
14943 bpf_semi_rand_init();
14944 return do_test(unpriv, from, to);