2 * Testsuite for eBPF verifier
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
12 #include <asm/types.h>
13 #include <linux/types.h>
24 #include <sys/capability.h>
25 #include <sys/resource.h>
27 #include <linux/unistd.h>
28 #include <linux/filter.h>
29 #include <linux/bpf_perf_event.h>
30 #include <linux/bpf.h>
35 # include "autoconf.h"
37 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
38 # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
42 #include "../../../include/linux/filter.h"
45 # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
52 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
53 #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
57 struct bpf_insn insns[MAX_INSNS];
58 int fixup_map1[MAX_FIXUPS];
59 int fixup_map2[MAX_FIXUPS];
60 int fixup_prog[MAX_FIXUPS];
61 int fixup_map_in_map[MAX_FIXUPS];
63 const char *errstr_unpriv;
68 } result, result_unpriv;
69 enum bpf_prog_type prog_type;
73 /* Note we want this to be 64 bit aligned so that the end of our array is
74 * actually the end of the structure.
76 #define MAX_ENTRIES 11
83 static struct bpf_test tests[] = {
87 BPF_MOV64_IMM(BPF_REG_1, 1),
88 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
89 BPF_MOV64_IMM(BPF_REG_2, 3),
90 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
91 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
92 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
93 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
104 .errstr = "unreachable",
110 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
111 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
114 .errstr = "unreachable",
120 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
123 .errstr = "jump out of range",
127 "out of range jump2",
129 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
132 .errstr = "jump out of range",
138 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
139 BPF_LD_IMM64(BPF_REG_0, 0),
140 BPF_LD_IMM64(BPF_REG_0, 0),
141 BPF_LD_IMM64(BPF_REG_0, 1),
142 BPF_LD_IMM64(BPF_REG_0, 1),
143 BPF_MOV64_IMM(BPF_REG_0, 2),
146 .errstr = "invalid BPF_LD_IMM insn",
147 .errstr_unpriv = "R1 pointer comparison",
153 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
154 BPF_LD_IMM64(BPF_REG_0, 0),
155 BPF_LD_IMM64(BPF_REG_0, 0),
156 BPF_LD_IMM64(BPF_REG_0, 1),
157 BPF_LD_IMM64(BPF_REG_0, 1),
160 .errstr = "invalid BPF_LD_IMM insn",
161 .errstr_unpriv = "R1 pointer comparison",
167 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
168 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
169 BPF_LD_IMM64(BPF_REG_0, 0),
170 BPF_LD_IMM64(BPF_REG_0, 0),
171 BPF_LD_IMM64(BPF_REG_0, 1),
172 BPF_LD_IMM64(BPF_REG_0, 1),
175 .errstr = "invalid bpf_ld_imm64 insn",
181 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
184 .errstr = "invalid bpf_ld_imm64 insn",
190 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
192 .errstr = "invalid bpf_ld_imm64 insn",
198 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
199 BPF_RAW_INSN(0, 0, 0, 0, 0),
207 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
208 BPF_RAW_INSN(0, 0, 0, 0, 1),
216 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
217 BPF_RAW_INSN(0, 0, 0, 0, 1),
220 .errstr = "uses reserved fields",
226 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
227 BPF_RAW_INSN(0, 0, 0, 1, 1),
230 .errstr = "invalid bpf_ld_imm64 insn",
236 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
237 BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
240 .errstr = "invalid bpf_ld_imm64 insn",
246 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
247 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
250 .errstr = "invalid bpf_ld_imm64 insn",
256 BPF_MOV64_IMM(BPF_REG_1, 0),
257 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
258 BPF_RAW_INSN(0, 0, 0, 0, 1),
261 .errstr = "not pointing to valid bpf_map",
267 BPF_MOV64_IMM(BPF_REG_1, 0),
268 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
269 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
272 .errstr = "invalid bpf_ld_imm64 insn",
278 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
280 .errstr = "jump out of range",
286 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
289 .errstr = "back-edge",
295 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
296 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
297 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
298 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
301 .errstr = "back-edge",
307 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
308 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
309 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
310 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
313 .errstr = "back-edge",
317 "read uninitialized register",
319 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
322 .errstr = "R2 !read_ok",
326 "read invalid register",
328 BPF_MOV64_REG(BPF_REG_0, -1),
331 .errstr = "R15 is invalid",
335 "program doesn't init R0 before exit",
337 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
340 .errstr = "R0 !read_ok",
344 "program doesn't init R0 before exit in all branches",
346 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
347 BPF_MOV64_IMM(BPF_REG_0, 1),
348 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
351 .errstr = "R0 !read_ok",
352 .errstr_unpriv = "R1 pointer comparison",
356 "stack out of bounds",
358 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
361 .errstr = "invalid stack",
365 "invalid call insn1",
367 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
370 .errstr = "BPF_CALL uses reserved",
374 "invalid call insn2",
376 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
379 .errstr = "BPF_CALL uses reserved",
383 "invalid function call",
385 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
388 .errstr = "invalid func unknown#1234567",
392 "uninitialized stack1",
394 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
395 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
396 BPF_LD_MAP_FD(BPF_REG_1, 0),
397 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
398 BPF_FUNC_map_lookup_elem),
402 .errstr = "invalid indirect read from stack",
406 "uninitialized stack2",
408 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
409 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
412 .errstr = "invalid read from stack",
416 "invalid fp arithmetic",
417 /* If this gets ever changed, make sure JITs can deal with it. */
419 BPF_MOV64_IMM(BPF_REG_0, 0),
420 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
421 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
422 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
425 .errstr_unpriv = "R1 subtraction from stack pointer",
426 .result_unpriv = REJECT,
427 .errstr = "R1 invalid mem access",
431 "non-invalid fp arithmetic",
433 BPF_MOV64_IMM(BPF_REG_0, 0),
434 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
440 "invalid argument register",
442 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
443 BPF_FUNC_get_cgroup_classid),
444 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
445 BPF_FUNC_get_cgroup_classid),
448 .errstr = "R1 !read_ok",
450 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
453 "non-invalid argument register",
455 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
456 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
457 BPF_FUNC_get_cgroup_classid),
458 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
459 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
460 BPF_FUNC_get_cgroup_classid),
464 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
467 "check valid spill/fill",
469 /* spill R1(ctx) into stack */
470 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
471 /* fill it back into R2 */
472 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
473 /* should be able to access R0 = *(R2 + 8) */
474 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
475 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
478 .errstr_unpriv = "R0 leaks addr",
480 .result_unpriv = REJECT,
483 "check valid spill/fill, skb mark",
485 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
486 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
487 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
488 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
489 offsetof(struct __sk_buff, mark)),
493 .result_unpriv = ACCEPT,
496 "check corrupted spill/fill",
498 /* spill R1(ctx) into stack */
499 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
500 /* mess up with R1 pointer on stack */
501 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
502 /* fill back into R0 should fail */
503 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
506 .errstr_unpriv = "attempt to corrupt spilled",
507 .errstr = "corrupted spill",
511 "invalid src register in STX",
513 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
516 .errstr = "R15 is invalid",
520 "invalid dst register in STX",
522 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
525 .errstr = "R14 is invalid",
529 "invalid dst register in ST",
531 BPF_ST_MEM(BPF_B, 14, -1, -1),
534 .errstr = "R14 is invalid",
538 "invalid src register in LDX",
540 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
543 .errstr = "R12 is invalid",
547 "invalid dst register in LDX",
549 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
552 .errstr = "R11 is invalid",
558 BPF_RAW_INSN(0, 0, 0, 0, 0),
561 .errstr = "invalid BPF_LD_IMM",
567 BPF_RAW_INSN(1, 0, 0, 0, 0),
570 .errstr = "BPF_LDX uses reserved fields",
576 BPF_RAW_INSN(-1, 0, 0, 0, 0),
579 .errstr = "invalid BPF_ALU opcode f0",
585 BPF_RAW_INSN(-1, -1, -1, -1, -1),
588 .errstr = "invalid BPF_ALU opcode f0",
594 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
597 .errstr = "BPF_ALU uses reserved fields",
601 "misaligned read from stack",
603 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
604 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
607 .errstr = "misaligned stack access",
609 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
612 "invalid map_fd for function call",
614 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
615 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
616 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
617 BPF_LD_MAP_FD(BPF_REG_1, 0),
618 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
619 BPF_FUNC_map_delete_elem),
622 .errstr = "fd 0 is not pointing to valid bpf_map",
626 "don't check return value before access",
628 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
629 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
630 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
631 BPF_LD_MAP_FD(BPF_REG_1, 0),
632 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
633 BPF_FUNC_map_lookup_elem),
634 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
638 .errstr = "R0 invalid mem access 'map_value_or_null'",
642 "access memory with incorrect alignment",
644 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
645 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
646 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
647 BPF_LD_MAP_FD(BPF_REG_1, 0),
648 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
649 BPF_FUNC_map_lookup_elem),
650 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
651 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
655 .errstr = "misaligned value access",
657 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
660 "sometimes access memory with incorrect alignment",
662 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
663 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
664 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
665 BPF_LD_MAP_FD(BPF_REG_1, 0),
666 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
667 BPF_FUNC_map_lookup_elem),
668 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
669 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
671 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
675 .errstr = "R0 invalid mem access",
676 .errstr_unpriv = "R0 leaks addr",
678 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
683 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
684 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
685 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
686 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
687 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
688 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
689 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
690 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
691 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
692 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
693 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
694 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
695 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
696 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
697 BPF_MOV64_IMM(BPF_REG_0, 0),
700 .errstr_unpriv = "R1 pointer comparison",
701 .result_unpriv = REJECT,
707 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
708 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
709 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
710 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
711 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
712 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
713 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
714 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
715 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
716 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
717 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
718 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
719 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
720 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
721 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
722 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
723 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
724 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
725 BPF_MOV64_IMM(BPF_REG_0, 0),
728 .errstr_unpriv = "R1 pointer comparison",
729 .result_unpriv = REJECT,
735 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
736 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
737 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
738 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
739 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
740 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
741 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
742 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
743 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
744 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
745 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
746 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
747 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
748 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
749 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
750 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
751 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
752 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
753 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
754 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
755 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
756 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
757 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
758 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
759 BPF_LD_MAP_FD(BPF_REG_1, 0),
760 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
761 BPF_FUNC_map_delete_elem),
764 .fixup_map1 = { 24 },
765 .errstr_unpriv = "R1 pointer comparison",
766 .result_unpriv = REJECT,
772 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
773 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
774 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
775 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
776 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
777 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
778 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
779 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
780 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
781 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
782 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
783 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
784 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
785 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
786 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
787 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
788 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
789 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
790 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
791 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
792 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
793 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
794 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
795 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
796 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
797 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
798 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
799 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
800 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
801 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
802 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
803 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
804 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
805 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
806 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
807 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
808 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
809 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
810 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
811 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
812 BPF_MOV64_IMM(BPF_REG_0, 0),
815 .errstr_unpriv = "R1 pointer comparison",
816 .result_unpriv = REJECT,
822 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
823 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
824 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
825 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
826 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
827 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
828 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
829 BPF_MOV64_IMM(BPF_REG_0, 0),
830 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
831 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
832 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
833 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
834 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
835 BPF_MOV64_IMM(BPF_REG_0, 0),
836 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
837 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
838 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
839 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
840 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
841 BPF_MOV64_IMM(BPF_REG_0, 0),
842 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
843 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
844 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
845 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
846 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
847 BPF_MOV64_IMM(BPF_REG_0, 0),
848 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
849 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
850 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
851 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
852 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
853 BPF_MOV64_IMM(BPF_REG_0, 0),
856 .errstr_unpriv = "R1 pointer comparison",
857 .result_unpriv = REJECT,
861 "access skb fields ok",
863 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
864 offsetof(struct __sk_buff, len)),
865 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
866 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
867 offsetof(struct __sk_buff, mark)),
868 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
869 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
870 offsetof(struct __sk_buff, pkt_type)),
871 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
872 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
873 offsetof(struct __sk_buff, queue_mapping)),
874 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
875 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
876 offsetof(struct __sk_buff, protocol)),
877 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
878 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
879 offsetof(struct __sk_buff, vlan_present)),
880 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
881 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
882 offsetof(struct __sk_buff, vlan_tci)),
883 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
884 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
885 offsetof(struct __sk_buff, napi_id)),
886 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
892 "access skb fields bad1",
894 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
897 .errstr = "invalid bpf_context access",
901 "access skb fields bad2",
903 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
904 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
905 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
906 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
907 BPF_LD_MAP_FD(BPF_REG_1, 0),
908 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
909 BPF_FUNC_map_lookup_elem),
910 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
912 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
913 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
914 offsetof(struct __sk_buff, pkt_type)),
918 .errstr = "different pointers",
919 .errstr_unpriv = "R1 pointer comparison",
923 "access skb fields bad3",
925 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
926 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
927 offsetof(struct __sk_buff, pkt_type)),
929 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
930 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
931 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
932 BPF_LD_MAP_FD(BPF_REG_1, 0),
933 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
934 BPF_FUNC_map_lookup_elem),
935 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
937 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
938 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
941 .errstr = "different pointers",
942 .errstr_unpriv = "R1 pointer comparison",
946 "access skb fields bad4",
948 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
949 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
950 offsetof(struct __sk_buff, len)),
951 BPF_MOV64_IMM(BPF_REG_0, 0),
953 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
954 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
955 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
956 BPF_LD_MAP_FD(BPF_REG_1, 0),
957 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
958 BPF_FUNC_map_lookup_elem),
959 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
961 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
962 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
965 .errstr = "different pointers",
966 .errstr_unpriv = "R1 pointer comparison",
970 "invalid access __sk_buff family",
972 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
973 offsetof(struct __sk_buff, family)),
976 .errstr = "invalid bpf_context access",
980 "invalid access __sk_buff remote_ip4",
982 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
983 offsetof(struct __sk_buff, remote_ip4)),
986 .errstr = "invalid bpf_context access",
990 "invalid access __sk_buff local_ip4",
992 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
993 offsetof(struct __sk_buff, local_ip4)),
996 .errstr = "invalid bpf_context access",
1000 "invalid access __sk_buff remote_ip6",
1002 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1003 offsetof(struct __sk_buff, remote_ip6)),
1006 .errstr = "invalid bpf_context access",
1010 "invalid access __sk_buff local_ip6",
1012 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1013 offsetof(struct __sk_buff, local_ip6)),
1016 .errstr = "invalid bpf_context access",
1020 "invalid access __sk_buff remote_port",
1022 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1023 offsetof(struct __sk_buff, remote_port)),
1026 .errstr = "invalid bpf_context access",
1030 "invalid access __sk_buff remote_port",
1032 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1033 offsetof(struct __sk_buff, local_port)),
1036 .errstr = "invalid bpf_context access",
1040 "valid access __sk_buff family",
1042 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1043 offsetof(struct __sk_buff, family)),
1047 .prog_type = BPF_PROG_TYPE_SK_SKB,
1050 "valid access __sk_buff remote_ip4",
1052 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1053 offsetof(struct __sk_buff, remote_ip4)),
1057 .prog_type = BPF_PROG_TYPE_SK_SKB,
1060 "valid access __sk_buff local_ip4",
1062 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1063 offsetof(struct __sk_buff, local_ip4)),
1067 .prog_type = BPF_PROG_TYPE_SK_SKB,
1070 "valid access __sk_buff remote_ip6",
1072 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1073 offsetof(struct __sk_buff, remote_ip6[0])),
1074 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1075 offsetof(struct __sk_buff, remote_ip6[1])),
1076 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1077 offsetof(struct __sk_buff, remote_ip6[2])),
1078 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1079 offsetof(struct __sk_buff, remote_ip6[3])),
1083 .prog_type = BPF_PROG_TYPE_SK_SKB,
1086 "valid access __sk_buff local_ip6",
1088 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1089 offsetof(struct __sk_buff, local_ip6[0])),
1090 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1091 offsetof(struct __sk_buff, local_ip6[1])),
1092 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1093 offsetof(struct __sk_buff, local_ip6[2])),
1094 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1095 offsetof(struct __sk_buff, local_ip6[3])),
1099 .prog_type = BPF_PROG_TYPE_SK_SKB,
1102 "valid access __sk_buff remote_port",
1104 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1105 offsetof(struct __sk_buff, remote_port)),
1109 .prog_type = BPF_PROG_TYPE_SK_SKB,
1112 "valid access __sk_buff remote_port",
1114 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1115 offsetof(struct __sk_buff, local_port)),
1119 .prog_type = BPF_PROG_TYPE_SK_SKB,
1122 "check skb->mark is not writeable by sockets",
1124 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1125 offsetof(struct __sk_buff, mark)),
1128 .errstr = "invalid bpf_context access",
1129 .errstr_unpriv = "R1 leaks addr",
1133 "check skb->tc_index is not writeable by sockets",
1135 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1136 offsetof(struct __sk_buff, tc_index)),
1139 .errstr = "invalid bpf_context access",
1140 .errstr_unpriv = "R1 leaks addr",
1144 "check cb access: byte",
1146 BPF_MOV64_IMM(BPF_REG_0, 0),
1147 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1148 offsetof(struct __sk_buff, cb[0])),
1149 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1150 offsetof(struct __sk_buff, cb[0]) + 1),
1151 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1152 offsetof(struct __sk_buff, cb[0]) + 2),
1153 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1154 offsetof(struct __sk_buff, cb[0]) + 3),
1155 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1156 offsetof(struct __sk_buff, cb[1])),
1157 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1158 offsetof(struct __sk_buff, cb[1]) + 1),
1159 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1160 offsetof(struct __sk_buff, cb[1]) + 2),
1161 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1162 offsetof(struct __sk_buff, cb[1]) + 3),
1163 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1164 offsetof(struct __sk_buff, cb[2])),
1165 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1166 offsetof(struct __sk_buff, cb[2]) + 1),
1167 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1168 offsetof(struct __sk_buff, cb[2]) + 2),
1169 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1170 offsetof(struct __sk_buff, cb[2]) + 3),
1171 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1172 offsetof(struct __sk_buff, cb[3])),
1173 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1174 offsetof(struct __sk_buff, cb[3]) + 1),
1175 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1176 offsetof(struct __sk_buff, cb[3]) + 2),
1177 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1178 offsetof(struct __sk_buff, cb[3]) + 3),
1179 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1180 offsetof(struct __sk_buff, cb[4])),
1181 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1182 offsetof(struct __sk_buff, cb[4]) + 1),
1183 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1184 offsetof(struct __sk_buff, cb[4]) + 2),
1185 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1186 offsetof(struct __sk_buff, cb[4]) + 3),
1187 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1188 offsetof(struct __sk_buff, cb[0])),
1189 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1190 offsetof(struct __sk_buff, cb[0]) + 1),
1191 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1192 offsetof(struct __sk_buff, cb[0]) + 2),
1193 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1194 offsetof(struct __sk_buff, cb[0]) + 3),
1195 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1196 offsetof(struct __sk_buff, cb[1])),
1197 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1198 offsetof(struct __sk_buff, cb[1]) + 1),
1199 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1200 offsetof(struct __sk_buff, cb[1]) + 2),
1201 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1202 offsetof(struct __sk_buff, cb[1]) + 3),
1203 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1204 offsetof(struct __sk_buff, cb[2])),
1205 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1206 offsetof(struct __sk_buff, cb[2]) + 1),
1207 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1208 offsetof(struct __sk_buff, cb[2]) + 2),
1209 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1210 offsetof(struct __sk_buff, cb[2]) + 3),
1211 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1212 offsetof(struct __sk_buff, cb[3])),
1213 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1214 offsetof(struct __sk_buff, cb[3]) + 1),
1215 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1216 offsetof(struct __sk_buff, cb[3]) + 2),
1217 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1218 offsetof(struct __sk_buff, cb[3]) + 3),
1219 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1220 offsetof(struct __sk_buff, cb[4])),
1221 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1222 offsetof(struct __sk_buff, cb[4]) + 1),
1223 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1224 offsetof(struct __sk_buff, cb[4]) + 2),
1225 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1226 offsetof(struct __sk_buff, cb[4]) + 3),
1232 "__sk_buff->hash, offset 0, byte store not permitted",
1234 BPF_MOV64_IMM(BPF_REG_0, 0),
1235 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1236 offsetof(struct __sk_buff, hash)),
1239 .errstr = "invalid bpf_context access",
1243 "__sk_buff->tc_index, offset 3, byte store not permitted",
1245 BPF_MOV64_IMM(BPF_REG_0, 0),
1246 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1247 offsetof(struct __sk_buff, tc_index) + 3),
1250 .errstr = "invalid bpf_context access",
1254 "check skb->hash byte load permitted",
1256 BPF_MOV64_IMM(BPF_REG_0, 0),
1257 #if __BYTE_ORDER == __LITTLE_ENDIAN
1258 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1259 offsetof(struct __sk_buff, hash)),
1261 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1262 offsetof(struct __sk_buff, hash) + 3),
1269 "check skb->hash byte load not permitted 1",
1271 BPF_MOV64_IMM(BPF_REG_0, 0),
1272 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1273 offsetof(struct __sk_buff, hash) + 1),
1276 .errstr = "invalid bpf_context access",
1280 "check skb->hash byte load not permitted 2",
1282 BPF_MOV64_IMM(BPF_REG_0, 0),
1283 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1284 offsetof(struct __sk_buff, hash) + 2),
1287 .errstr = "invalid bpf_context access",
1291 "check skb->hash byte load not permitted 3",
1293 BPF_MOV64_IMM(BPF_REG_0, 0),
1294 #if __BYTE_ORDER == __LITTLE_ENDIAN
1295 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1296 offsetof(struct __sk_buff, hash) + 3),
1298 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1299 offsetof(struct __sk_buff, hash)),
1303 .errstr = "invalid bpf_context access",
1307 "check cb access: byte, wrong type",
1309 BPF_MOV64_IMM(BPF_REG_0, 0),
1310 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1311 offsetof(struct __sk_buff, cb[0])),
1314 .errstr = "invalid bpf_context access",
1316 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1319 "check cb access: half",
1321 BPF_MOV64_IMM(BPF_REG_0, 0),
1322 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1323 offsetof(struct __sk_buff, cb[0])),
1324 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1325 offsetof(struct __sk_buff, cb[0]) + 2),
1326 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1327 offsetof(struct __sk_buff, cb[1])),
1328 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1329 offsetof(struct __sk_buff, cb[1]) + 2),
1330 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1331 offsetof(struct __sk_buff, cb[2])),
1332 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1333 offsetof(struct __sk_buff, cb[2]) + 2),
1334 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1335 offsetof(struct __sk_buff, cb[3])),
1336 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1337 offsetof(struct __sk_buff, cb[3]) + 2),
1338 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1339 offsetof(struct __sk_buff, cb[4])),
1340 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1341 offsetof(struct __sk_buff, cb[4]) + 2),
1342 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1343 offsetof(struct __sk_buff, cb[0])),
1344 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1345 offsetof(struct __sk_buff, cb[0]) + 2),
1346 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1347 offsetof(struct __sk_buff, cb[1])),
1348 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1349 offsetof(struct __sk_buff, cb[1]) + 2),
1350 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1351 offsetof(struct __sk_buff, cb[2])),
1352 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1353 offsetof(struct __sk_buff, cb[2]) + 2),
1354 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1355 offsetof(struct __sk_buff, cb[3])),
1356 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1357 offsetof(struct __sk_buff, cb[3]) + 2),
1358 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1359 offsetof(struct __sk_buff, cb[4])),
1360 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1361 offsetof(struct __sk_buff, cb[4]) + 2),
1367 "check cb access: half, unaligned",
1369 BPF_MOV64_IMM(BPF_REG_0, 0),
1370 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1371 offsetof(struct __sk_buff, cb[0]) + 1),
1374 .errstr = "misaligned context access",
1376 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1379 "check __sk_buff->hash, offset 0, half store not permitted",
1381 BPF_MOV64_IMM(BPF_REG_0, 0),
1382 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1383 offsetof(struct __sk_buff, hash)),
1386 .errstr = "invalid bpf_context access",
1390 "check __sk_buff->tc_index, offset 2, half store not permitted",
1392 BPF_MOV64_IMM(BPF_REG_0, 0),
1393 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1394 offsetof(struct __sk_buff, tc_index) + 2),
1397 .errstr = "invalid bpf_context access",
1401 "check skb->hash half load permitted",
1403 BPF_MOV64_IMM(BPF_REG_0, 0),
1404 #if __BYTE_ORDER == __LITTLE_ENDIAN
1405 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1406 offsetof(struct __sk_buff, hash)),
1408 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1409 offsetof(struct __sk_buff, hash) + 2),
1416 "check skb->hash half load not permitted",
1418 BPF_MOV64_IMM(BPF_REG_0, 0),
1419 #if __BYTE_ORDER == __LITTLE_ENDIAN
1420 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1421 offsetof(struct __sk_buff, hash) + 2),
1423 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1424 offsetof(struct __sk_buff, hash)),
1428 .errstr = "invalid bpf_context access",
1432 "check cb access: half, wrong type",
1434 BPF_MOV64_IMM(BPF_REG_0, 0),
1435 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1436 offsetof(struct __sk_buff, cb[0])),
1439 .errstr = "invalid bpf_context access",
1441 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1444 "check cb access: word",
1446 BPF_MOV64_IMM(BPF_REG_0, 0),
1447 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1448 offsetof(struct __sk_buff, cb[0])),
1449 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1450 offsetof(struct __sk_buff, cb[1])),
1451 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1452 offsetof(struct __sk_buff, cb[2])),
1453 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1454 offsetof(struct __sk_buff, cb[3])),
1455 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1456 offsetof(struct __sk_buff, cb[4])),
1457 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1458 offsetof(struct __sk_buff, cb[0])),
1459 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1460 offsetof(struct __sk_buff, cb[1])),
1461 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1462 offsetof(struct __sk_buff, cb[2])),
1463 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1464 offsetof(struct __sk_buff, cb[3])),
1465 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1466 offsetof(struct __sk_buff, cb[4])),
1472 "check cb access: word, unaligned 1",
1474 BPF_MOV64_IMM(BPF_REG_0, 0),
1475 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1476 offsetof(struct __sk_buff, cb[0]) + 2),
1479 .errstr = "misaligned context access",
1481 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1484 "check cb access: word, unaligned 2",
1486 BPF_MOV64_IMM(BPF_REG_0, 0),
1487 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1488 offsetof(struct __sk_buff, cb[4]) + 1),
1491 .errstr = "misaligned context access",
1493 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1496 "check cb access: word, unaligned 3",
1498 BPF_MOV64_IMM(BPF_REG_0, 0),
1499 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1500 offsetof(struct __sk_buff, cb[4]) + 2),
1503 .errstr = "misaligned context access",
1505 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1508 "check cb access: word, unaligned 4",
1510 BPF_MOV64_IMM(BPF_REG_0, 0),
1511 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1512 offsetof(struct __sk_buff, cb[4]) + 3),
1515 .errstr = "misaligned context access",
1517 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1520 "check cb access: double",
1522 BPF_MOV64_IMM(BPF_REG_0, 0),
1523 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1524 offsetof(struct __sk_buff, cb[0])),
1525 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1526 offsetof(struct __sk_buff, cb[2])),
1527 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1528 offsetof(struct __sk_buff, cb[0])),
1529 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1530 offsetof(struct __sk_buff, cb[2])),
1536 "check cb access: double, unaligned 1",
1538 BPF_MOV64_IMM(BPF_REG_0, 0),
1539 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1540 offsetof(struct __sk_buff, cb[1])),
1543 .errstr = "misaligned context access",
1545 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1548 "check cb access: double, unaligned 2",
1550 BPF_MOV64_IMM(BPF_REG_0, 0),
1551 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1552 offsetof(struct __sk_buff, cb[3])),
1555 .errstr = "misaligned context access",
1557 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1560 "check cb access: double, oob 1",
1562 BPF_MOV64_IMM(BPF_REG_0, 0),
1563 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1564 offsetof(struct __sk_buff, cb[4])),
1567 .errstr = "invalid bpf_context access",
1571 "check cb access: double, oob 2",
1573 BPF_MOV64_IMM(BPF_REG_0, 0),
1574 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1575 offsetof(struct __sk_buff, cb[4])),
1578 .errstr = "invalid bpf_context access",
1582 "check __sk_buff->ifindex dw store not permitted",
1584 BPF_MOV64_IMM(BPF_REG_0, 0),
1585 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1586 offsetof(struct __sk_buff, ifindex)),
1589 .errstr = "invalid bpf_context access",
1593 "check __sk_buff->ifindex dw load not permitted",
1595 BPF_MOV64_IMM(BPF_REG_0, 0),
1596 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1597 offsetof(struct __sk_buff, ifindex)),
1600 .errstr = "invalid bpf_context access",
1604 "check cb access: double, wrong type",
1606 BPF_MOV64_IMM(BPF_REG_0, 0),
1607 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1608 offsetof(struct __sk_buff, cb[0])),
1611 .errstr = "invalid bpf_context access",
1613 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1616 "check out of range skb->cb access",
1618 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1619 offsetof(struct __sk_buff, cb[0]) + 256),
1622 .errstr = "invalid bpf_context access",
1623 .errstr_unpriv = "",
1625 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
1628 "write skb fields from socket prog",
1630 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1631 offsetof(struct __sk_buff, cb[4])),
1632 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1633 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1634 offsetof(struct __sk_buff, mark)),
1635 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1636 offsetof(struct __sk_buff, tc_index)),
1637 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1638 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1639 offsetof(struct __sk_buff, cb[0])),
1640 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1641 offsetof(struct __sk_buff, cb[2])),
1645 .errstr_unpriv = "R1 leaks addr",
1646 .result_unpriv = REJECT,
1649 "write skb fields from tc_cls_act prog",
1651 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1652 offsetof(struct __sk_buff, cb[0])),
1653 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1654 offsetof(struct __sk_buff, mark)),
1655 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1656 offsetof(struct __sk_buff, tc_index)),
1657 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1658 offsetof(struct __sk_buff, tc_index)),
1659 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1660 offsetof(struct __sk_buff, cb[3])),
1663 .errstr_unpriv = "",
1664 .result_unpriv = REJECT,
1666 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1669 "PTR_TO_STACK store/load",
1671 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1672 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1673 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1674 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1680 "PTR_TO_STACK store/load - bad alignment on off",
1682 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1683 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1684 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1685 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1689 .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
1690 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1693 "PTR_TO_STACK store/load - bad alignment on reg",
1695 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1696 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1697 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1698 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1702 .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
1703 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1706 "PTR_TO_STACK store/load - out of bounds low",
1708 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1709 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
1710 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1711 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1715 .errstr = "invalid stack off=-79992 size=8",
1718 "PTR_TO_STACK store/load - out of bounds high",
1720 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1721 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1722 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1723 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1727 .errstr = "invalid stack off=0 size=8",
1730 "unpriv: return pointer",
1732 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
1736 .result_unpriv = REJECT,
1737 .errstr_unpriv = "R0 leaks addr",
1740 "unpriv: add const to pointer",
1742 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
1743 BPF_MOV64_IMM(BPF_REG_0, 0),
1749 "unpriv: add pointer to pointer",
1751 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
1752 BPF_MOV64_IMM(BPF_REG_0, 0),
1756 .result_unpriv = REJECT,
1757 .errstr_unpriv = "R1 pointer += pointer",
1760 "unpriv: neg pointer",
1762 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
1763 BPF_MOV64_IMM(BPF_REG_0, 0),
1767 .result_unpriv = REJECT,
1768 .errstr_unpriv = "R1 pointer arithmetic",
1771 "unpriv: cmp pointer with const",
1773 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1774 BPF_MOV64_IMM(BPF_REG_0, 0),
1778 .result_unpriv = REJECT,
1779 .errstr_unpriv = "R1 pointer comparison",
1782 "unpriv: cmp pointer with pointer",
1784 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1785 BPF_MOV64_IMM(BPF_REG_0, 0),
1789 .result_unpriv = REJECT,
1790 .errstr_unpriv = "R10 pointer comparison",
1793 "unpriv: check that printk is disallowed",
1795 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1796 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1797 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1798 BPF_MOV64_IMM(BPF_REG_2, 8),
1799 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
1800 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1801 BPF_FUNC_trace_printk),
1802 BPF_MOV64_IMM(BPF_REG_0, 0),
1805 .errstr_unpriv = "unknown func bpf_trace_printk#6",
1806 .result_unpriv = REJECT,
1810 "unpriv: pass pointer to helper function",
1812 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1813 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1814 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1815 BPF_LD_MAP_FD(BPF_REG_1, 0),
1816 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1817 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
1818 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1819 BPF_FUNC_map_update_elem),
1820 BPF_MOV64_IMM(BPF_REG_0, 0),
1823 .fixup_map1 = { 3 },
1824 .errstr_unpriv = "R4 leaks addr",
1825 .result_unpriv = REJECT,
1829 "unpriv: indirectly pass pointer on stack to helper function",
1831 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1832 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1833 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1834 BPF_LD_MAP_FD(BPF_REG_1, 0),
1835 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1836 BPF_FUNC_map_lookup_elem),
1837 BPF_MOV64_IMM(BPF_REG_0, 0),
1840 .fixup_map1 = { 3 },
1841 .errstr = "invalid indirect read from stack off -8+0 size 8",
1845 "unpriv: mangle pointer on stack 1",
1847 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1848 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
1849 BPF_MOV64_IMM(BPF_REG_0, 0),
1852 .errstr_unpriv = "attempt to corrupt spilled",
1853 .result_unpriv = REJECT,
1857 "unpriv: mangle pointer on stack 2",
1859 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1860 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
1861 BPF_MOV64_IMM(BPF_REG_0, 0),
1864 .errstr_unpriv = "attempt to corrupt spilled",
1865 .result_unpriv = REJECT,
1869 "unpriv: read pointer from stack in small chunks",
1871 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1872 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
1873 BPF_MOV64_IMM(BPF_REG_0, 0),
1876 .errstr = "invalid size",
1880 "unpriv: write pointer into ctx",
1882 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
1883 BPF_MOV64_IMM(BPF_REG_0, 0),
1886 .errstr_unpriv = "R1 leaks addr",
1887 .result_unpriv = REJECT,
1888 .errstr = "invalid bpf_context access",
1892 "unpriv: spill/fill of ctx",
1894 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1895 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1896 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1897 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1898 BPF_MOV64_IMM(BPF_REG_0, 0),
1904 "unpriv: spill/fill of ctx 2",
1906 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1907 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1908 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1909 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1910 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1911 BPF_FUNC_get_hash_recalc),
1915 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1918 "unpriv: spill/fill of ctx 3",
1920 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1921 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1922 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1923 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
1924 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1925 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1926 BPF_FUNC_get_hash_recalc),
1930 .errstr = "R1 type=fp expected=ctx",
1931 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1934 "unpriv: spill/fill of ctx 4",
1936 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1937 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1938 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1939 BPF_MOV64_IMM(BPF_REG_0, 1),
1940 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
1942 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1943 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1944 BPF_FUNC_get_hash_recalc),
1948 .errstr = "R1 type=inv expected=ctx",
1949 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1952 "unpriv: spill/fill of different pointers stx",
1954 BPF_MOV64_IMM(BPF_REG_3, 42),
1955 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1956 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1957 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1958 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1959 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1960 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
1961 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
1962 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1963 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1964 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
1965 offsetof(struct __sk_buff, mark)),
1966 BPF_MOV64_IMM(BPF_REG_0, 0),
1970 .errstr = "same insn cannot be used with different pointers",
1971 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1974 "unpriv: spill/fill of different pointers ldx",
1976 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1977 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1978 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1979 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1980 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
1981 -(__s32)offsetof(struct bpf_perf_event_data,
1982 sample_period) - 8),
1983 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
1984 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
1985 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1986 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1987 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
1988 offsetof(struct bpf_perf_event_data,
1990 BPF_MOV64_IMM(BPF_REG_0, 0),
1994 .errstr = "same insn cannot be used with different pointers",
1995 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
1998 "unpriv: write pointer into map elem value",
2000 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2001 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2002 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2003 BPF_LD_MAP_FD(BPF_REG_1, 0),
2004 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2005 BPF_FUNC_map_lookup_elem),
2006 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2007 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
2010 .fixup_map1 = { 3 },
2011 .errstr_unpriv = "R0 leaks addr",
2012 .result_unpriv = REJECT,
2016 "unpriv: partial copy of pointer",
2018 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
2019 BPF_MOV64_IMM(BPF_REG_0, 0),
2022 .errstr_unpriv = "R10 partial copy",
2023 .result_unpriv = REJECT,
2027 "unpriv: pass pointer to tail_call",
2029 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2030 BPF_LD_MAP_FD(BPF_REG_2, 0),
2031 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2032 BPF_FUNC_tail_call),
2033 BPF_MOV64_IMM(BPF_REG_0, 0),
2036 .fixup_prog = { 1 },
2037 .errstr_unpriv = "R3 leaks addr into helper",
2038 .result_unpriv = REJECT,
2042 "unpriv: cmp map pointer with zero",
2044 BPF_MOV64_IMM(BPF_REG_1, 0),
2045 BPF_LD_MAP_FD(BPF_REG_1, 0),
2046 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2047 BPF_MOV64_IMM(BPF_REG_0, 0),
2050 .fixup_map1 = { 1 },
2051 .errstr_unpriv = "R1 pointer comparison",
2052 .result_unpriv = REJECT,
2056 "unpriv: write into frame pointer",
2058 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
2059 BPF_MOV64_IMM(BPF_REG_0, 0),
2062 .errstr = "frame pointer is read only",
2066 "unpriv: spill/fill frame pointer",
2068 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2069 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2070 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2071 BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
2072 BPF_MOV64_IMM(BPF_REG_0, 0),
2075 .errstr = "frame pointer is read only",
2079 "unpriv: cmp of frame pointer",
2081 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
2082 BPF_MOV64_IMM(BPF_REG_0, 0),
2085 .errstr_unpriv = "R10 pointer comparison",
2086 .result_unpriv = REJECT,
2090 "unpriv: adding of fp",
2092 BPF_MOV64_IMM(BPF_REG_0, 0),
2093 BPF_MOV64_IMM(BPF_REG_1, 0),
2094 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2095 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
2101 "unpriv: cmp of stack pointer",
2103 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2104 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2105 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
2106 BPF_MOV64_IMM(BPF_REG_0, 0),
2109 .errstr_unpriv = "R2 pointer comparison",
2110 .result_unpriv = REJECT,
2114 "stack pointer arithmetic",
2116 BPF_MOV64_IMM(BPF_REG_1, 4),
2117 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
2118 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
2119 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
2120 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
2121 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2122 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
2123 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
2124 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2125 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2126 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
2127 BPF_MOV64_IMM(BPF_REG_0, 0),
2133 "raw_stack: no skb_load_bytes",
2135 BPF_MOV64_IMM(BPF_REG_2, 4),
2136 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2137 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2138 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2139 BPF_MOV64_IMM(BPF_REG_4, 8),
2140 /* Call to skb_load_bytes() omitted. */
2141 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2145 .errstr = "invalid read from stack off -8+0 size 8",
2146 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2149 "raw_stack: skb_load_bytes, negative len",
2151 BPF_MOV64_IMM(BPF_REG_2, 4),
2152 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2153 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2154 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2155 BPF_MOV64_IMM(BPF_REG_4, -8),
2156 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2157 BPF_FUNC_skb_load_bytes),
2158 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2162 .errstr = "R4 min value is negative",
2163 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2166 "raw_stack: skb_load_bytes, negative len 2",
2168 BPF_MOV64_IMM(BPF_REG_2, 4),
2169 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2170 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2171 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2172 BPF_MOV64_IMM(BPF_REG_4, ~0),
2173 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2174 BPF_FUNC_skb_load_bytes),
2175 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2179 .errstr = "R4 min value is negative",
2180 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2183 "raw_stack: skb_load_bytes, zero len",
2185 BPF_MOV64_IMM(BPF_REG_2, 4),
2186 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2187 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2188 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2189 BPF_MOV64_IMM(BPF_REG_4, 0),
2190 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2191 BPF_FUNC_skb_load_bytes),
2192 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2196 .errstr = "invalid stack type R3",
2197 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2200 "raw_stack: skb_load_bytes, no init",
2202 BPF_MOV64_IMM(BPF_REG_2, 4),
2203 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2204 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2205 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2206 BPF_MOV64_IMM(BPF_REG_4, 8),
2207 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2208 BPF_FUNC_skb_load_bytes),
2209 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2213 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2216 "raw_stack: skb_load_bytes, init",
2218 BPF_MOV64_IMM(BPF_REG_2, 4),
2219 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2220 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2221 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
2222 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2223 BPF_MOV64_IMM(BPF_REG_4, 8),
2224 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2225 BPF_FUNC_skb_load_bytes),
2226 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2230 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2233 "raw_stack: skb_load_bytes, spilled regs around bounds",
2235 BPF_MOV64_IMM(BPF_REG_2, 4),
2236 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2237 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
2238 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2239 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
2240 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2241 BPF_MOV64_IMM(BPF_REG_4, 8),
2242 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2243 BPF_FUNC_skb_load_bytes),
2244 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2245 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2246 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2247 offsetof(struct __sk_buff, mark)),
2248 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2249 offsetof(struct __sk_buff, priority)),
2250 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2254 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2257 "raw_stack: skb_load_bytes, spilled regs corruption",
2259 BPF_MOV64_IMM(BPF_REG_2, 4),
2260 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2261 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2262 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2263 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2264 BPF_MOV64_IMM(BPF_REG_4, 8),
2265 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2266 BPF_FUNC_skb_load_bytes),
2267 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2268 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2269 offsetof(struct __sk_buff, mark)),
2273 .errstr = "R0 invalid mem access 'inv'",
2274 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2277 "raw_stack: skb_load_bytes, spilled regs corruption 2",
2279 BPF_MOV64_IMM(BPF_REG_2, 4),
2280 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2281 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
2282 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2283 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2284 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
2285 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2286 BPF_MOV64_IMM(BPF_REG_4, 8),
2287 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2288 BPF_FUNC_skb_load_bytes),
2289 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2290 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2291 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
2292 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2293 offsetof(struct __sk_buff, mark)),
2294 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2295 offsetof(struct __sk_buff, priority)),
2296 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2297 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
2298 offsetof(struct __sk_buff, pkt_type)),
2299 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2303 .errstr = "R3 invalid mem access 'inv'",
2304 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2307 "raw_stack: skb_load_bytes, spilled regs + data",
2309 BPF_MOV64_IMM(BPF_REG_2, 4),
2310 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2311 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
2312 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2313 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2314 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
2315 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2316 BPF_MOV64_IMM(BPF_REG_4, 8),
2317 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2318 BPF_FUNC_skb_load_bytes),
2319 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2320 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2321 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
2322 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2323 offsetof(struct __sk_buff, mark)),
2324 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2325 offsetof(struct __sk_buff, priority)),
2326 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2327 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2331 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2334 "raw_stack: skb_load_bytes, invalid access 1",
2336 BPF_MOV64_IMM(BPF_REG_2, 4),
2337 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2338 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
2339 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2340 BPF_MOV64_IMM(BPF_REG_4, 8),
2341 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2342 BPF_FUNC_skb_load_bytes),
2343 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2347 .errstr = "invalid stack type R3 off=-513 access_size=8",
2348 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2351 "raw_stack: skb_load_bytes, invalid access 2",
2353 BPF_MOV64_IMM(BPF_REG_2, 4),
2354 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2355 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2356 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2357 BPF_MOV64_IMM(BPF_REG_4, 8),
2358 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2359 BPF_FUNC_skb_load_bytes),
2360 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2364 .errstr = "invalid stack type R3 off=-1 access_size=8",
2365 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2368 "raw_stack: skb_load_bytes, invalid access 3",
2370 BPF_MOV64_IMM(BPF_REG_2, 4),
2371 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2372 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
2373 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2374 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
2375 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2376 BPF_FUNC_skb_load_bytes),
2377 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2381 .errstr = "R4 min value is negative",
2382 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2385 "raw_stack: skb_load_bytes, invalid access 4",
2387 BPF_MOV64_IMM(BPF_REG_2, 4),
2388 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2389 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2390 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2391 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
2392 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2393 BPF_FUNC_skb_load_bytes),
2394 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2398 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
2399 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2402 "raw_stack: skb_load_bytes, invalid access 5",
2404 BPF_MOV64_IMM(BPF_REG_2, 4),
2405 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2406 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2407 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2408 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
2409 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2410 BPF_FUNC_skb_load_bytes),
2411 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2415 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
2416 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2419 "raw_stack: skb_load_bytes, invalid access 6",
2421 BPF_MOV64_IMM(BPF_REG_2, 4),
2422 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2423 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2424 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2425 BPF_MOV64_IMM(BPF_REG_4, 0),
2426 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2427 BPF_FUNC_skb_load_bytes),
2428 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2432 .errstr = "invalid stack type R3 off=-512 access_size=0",
2433 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2436 "raw_stack: skb_load_bytes, large access",
2438 BPF_MOV64_IMM(BPF_REG_2, 4),
2439 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2440 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2441 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2442 BPF_MOV64_IMM(BPF_REG_4, 512),
2443 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2444 BPF_FUNC_skb_load_bytes),
2445 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2449 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2452 "direct packet access: test1",
2454 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2455 offsetof(struct __sk_buff, data)),
2456 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2457 offsetof(struct __sk_buff, data_end)),
2458 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2459 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2460 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2461 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2462 BPF_MOV64_IMM(BPF_REG_0, 0),
2466 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2469 "direct packet access: test2",
2471 BPF_MOV64_IMM(BPF_REG_0, 1),
2472 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
2473 offsetof(struct __sk_buff, data_end)),
2474 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2475 offsetof(struct __sk_buff, data)),
2476 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2477 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
2478 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
2479 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
2480 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
2481 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
2482 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2483 offsetof(struct __sk_buff, data)),
2484 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
2485 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
2486 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
2487 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
2488 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
2489 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
2490 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2491 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2492 offsetof(struct __sk_buff, data_end)),
2493 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
2494 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
2495 BPF_MOV64_IMM(BPF_REG_0, 0),
2499 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2502 "direct packet access: test3",
2504 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2505 offsetof(struct __sk_buff, data)),
2506 BPF_MOV64_IMM(BPF_REG_0, 0),
2509 .errstr = "invalid bpf_context access off=76",
2511 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2514 "direct packet access: test4 (write)",
2516 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2517 offsetof(struct __sk_buff, data)),
2518 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2519 offsetof(struct __sk_buff, data_end)),
2520 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2521 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2522 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2523 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2524 BPF_MOV64_IMM(BPF_REG_0, 0),
2528 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2531 "direct packet access: test5 (pkt_end >= reg, good access)",
2533 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2534 offsetof(struct __sk_buff, data)),
2535 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2536 offsetof(struct __sk_buff, data_end)),
2537 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2538 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2539 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2540 BPF_MOV64_IMM(BPF_REG_0, 1),
2542 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2543 BPF_MOV64_IMM(BPF_REG_0, 0),
2547 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2550 "direct packet access: test6 (pkt_end >= reg, bad access)",
2552 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2553 offsetof(struct __sk_buff, data)),
2554 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2555 offsetof(struct __sk_buff, data_end)),
2556 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2557 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2558 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2559 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2560 BPF_MOV64_IMM(BPF_REG_0, 1),
2562 BPF_MOV64_IMM(BPF_REG_0, 0),
2565 .errstr = "invalid access to packet",
2567 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2570 "direct packet access: test7 (pkt_end >= reg, both accesses)",
2572 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2573 offsetof(struct __sk_buff, data)),
2574 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2575 offsetof(struct __sk_buff, data_end)),
2576 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2577 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2578 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2579 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2580 BPF_MOV64_IMM(BPF_REG_0, 1),
2582 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2583 BPF_MOV64_IMM(BPF_REG_0, 0),
2586 .errstr = "invalid access to packet",
2588 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2591 "direct packet access: test8 (double test, variant 1)",
2593 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2594 offsetof(struct __sk_buff, data)),
2595 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2596 offsetof(struct __sk_buff, data_end)),
2597 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2598 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2599 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
2600 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2601 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2602 BPF_MOV64_IMM(BPF_REG_0, 1),
2604 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2605 BPF_MOV64_IMM(BPF_REG_0, 0),
2609 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2612 "direct packet access: test9 (double test, variant 2)",
2614 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2615 offsetof(struct __sk_buff, data)),
2616 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2617 offsetof(struct __sk_buff, data_end)),
2618 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2619 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2620 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2621 BPF_MOV64_IMM(BPF_REG_0, 1),
2623 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2624 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2625 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2626 BPF_MOV64_IMM(BPF_REG_0, 0),
2630 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2633 "direct packet access: test10 (write invalid)",
2635 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2636 offsetof(struct __sk_buff, data)),
2637 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2638 offsetof(struct __sk_buff, data_end)),
2639 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2640 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2641 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
2642 BPF_MOV64_IMM(BPF_REG_0, 0),
2644 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2645 BPF_MOV64_IMM(BPF_REG_0, 0),
2648 .errstr = "invalid access to packet",
2650 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2653 "direct packet access: test11 (shift, good access)",
2655 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2656 offsetof(struct __sk_buff, data)),
2657 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2658 offsetof(struct __sk_buff, data_end)),
2659 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2660 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2661 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2662 BPF_MOV64_IMM(BPF_REG_3, 144),
2663 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2664 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2665 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
2666 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2667 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2668 BPF_MOV64_IMM(BPF_REG_0, 1),
2670 BPF_MOV64_IMM(BPF_REG_0, 0),
2674 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2677 "direct packet access: test12 (and, good access)",
2679 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2680 offsetof(struct __sk_buff, data)),
2681 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2682 offsetof(struct __sk_buff, data_end)),
2683 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2684 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2685 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2686 BPF_MOV64_IMM(BPF_REG_3, 144),
2687 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2688 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2689 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2690 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2691 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2692 BPF_MOV64_IMM(BPF_REG_0, 1),
2694 BPF_MOV64_IMM(BPF_REG_0, 0),
2698 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2701 "direct packet access: test13 (branches, good access)",
2703 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2704 offsetof(struct __sk_buff, data)),
2705 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2706 offsetof(struct __sk_buff, data_end)),
2707 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2708 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2709 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
2710 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2711 offsetof(struct __sk_buff, mark)),
2712 BPF_MOV64_IMM(BPF_REG_4, 1),
2713 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
2714 BPF_MOV64_IMM(BPF_REG_3, 14),
2715 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
2716 BPF_MOV64_IMM(BPF_REG_3, 24),
2717 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2718 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2719 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2720 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2721 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2722 BPF_MOV64_IMM(BPF_REG_0, 1),
2724 BPF_MOV64_IMM(BPF_REG_0, 0),
2728 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2731 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
2733 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2734 offsetof(struct __sk_buff, data)),
2735 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2736 offsetof(struct __sk_buff, data_end)),
2737 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2738 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2739 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
2740 BPF_MOV64_IMM(BPF_REG_5, 12),
2741 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
2742 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2743 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2744 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
2745 BPF_MOV64_IMM(BPF_REG_0, 1),
2747 BPF_MOV64_IMM(BPF_REG_0, 0),
2751 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2754 "direct packet access: test15 (spill with xadd)",
2756 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2757 offsetof(struct __sk_buff, data)),
2758 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2759 offsetof(struct __sk_buff, data_end)),
2760 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2761 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2762 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2763 BPF_MOV64_IMM(BPF_REG_5, 4096),
2764 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2765 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2766 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2767 BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
2768 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
2769 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
2770 BPF_MOV64_IMM(BPF_REG_0, 0),
2773 .errstr = "R2 invalid mem access 'inv'",
2775 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2778 "direct packet access: test16 (arith on data_end)",
2780 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2781 offsetof(struct __sk_buff, data)),
2782 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2783 offsetof(struct __sk_buff, data_end)),
2784 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2785 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2786 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
2787 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2788 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2789 BPF_MOV64_IMM(BPF_REG_0, 0),
2792 .errstr = "invalid access to packet",
2794 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2797 "direct packet access: test17 (pruning, alignment)",
2799 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2800 offsetof(struct __sk_buff, data)),
2801 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2802 offsetof(struct __sk_buff, data_end)),
2803 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2804 offsetof(struct __sk_buff, mark)),
2805 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2806 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
2807 BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
2808 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2809 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
2810 BPF_MOV64_IMM(BPF_REG_0, 0),
2812 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
2815 .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
2817 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2818 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2821 "direct packet access: test18 (imm += pkt_ptr, 1)",
2823 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2824 offsetof(struct __sk_buff, data)),
2825 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2826 offsetof(struct __sk_buff, data_end)),
2827 BPF_MOV64_IMM(BPF_REG_0, 8),
2828 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2829 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2830 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2831 BPF_MOV64_IMM(BPF_REG_0, 0),
2835 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2838 "direct packet access: test19 (imm += pkt_ptr, 2)",
2840 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2841 offsetof(struct __sk_buff, data)),
2842 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2843 offsetof(struct __sk_buff, data_end)),
2844 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2845 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2846 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
2847 BPF_MOV64_IMM(BPF_REG_4, 4),
2848 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
2849 BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
2850 BPF_MOV64_IMM(BPF_REG_0, 0),
2854 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2857 "direct packet access: test20 (x += pkt_ptr, 1)",
2859 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2860 offsetof(struct __sk_buff, data)),
2861 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2862 offsetof(struct __sk_buff, data_end)),
2863 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
2864 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
2865 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
2866 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
2867 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
2868 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
2869 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
2870 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
2871 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
2872 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
2873 BPF_MOV64_IMM(BPF_REG_0, 0),
2876 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2880 "direct packet access: test21 (x += pkt_ptr, 2)",
2882 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2883 offsetof(struct __sk_buff, data)),
2884 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2885 offsetof(struct __sk_buff, data_end)),
2886 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2887 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2888 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
2889 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
2890 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
2891 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2892 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
2893 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
2894 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
2895 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
2896 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
2897 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
2898 BPF_MOV64_IMM(BPF_REG_0, 0),
2901 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2905 "direct packet access: test22 (x += pkt_ptr, 3)",
2907 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2908 offsetof(struct __sk_buff, data)),
2909 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2910 offsetof(struct __sk_buff, data_end)),
2911 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2912 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2913 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
2914 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
2915 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
2916 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
2917 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
2918 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
2919 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
2920 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2921 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
2922 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
2923 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
2924 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
2925 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
2926 BPF_MOV64_IMM(BPF_REG_2, 1),
2927 BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
2928 BPF_MOV64_IMM(BPF_REG_0, 0),
2931 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2935 "direct packet access: test23 (x += pkt_ptr, 4)",
2937 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2938 offsetof(struct __sk_buff, data)),
2939 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2940 offsetof(struct __sk_buff, data_end)),
2941 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
2942 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
2943 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
2944 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
2945 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
2946 BPF_MOV64_IMM(BPF_REG_0, 31),
2947 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
2948 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2949 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
2950 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
2951 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2952 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
2953 BPF_MOV64_IMM(BPF_REG_0, 0),
2956 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2958 .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
2961 "direct packet access: test24 (x += pkt_ptr, 5)",
2963 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2964 offsetof(struct __sk_buff, data)),
2965 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2966 offsetof(struct __sk_buff, data_end)),
2967 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
2968 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
2969 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
2970 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
2971 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
2972 BPF_MOV64_IMM(BPF_REG_0, 64),
2973 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
2974 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2975 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
2976 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
2977 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2978 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
2979 BPF_MOV64_IMM(BPF_REG_0, 0),
2982 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2986 "direct packet access: test25 (marking on <, good access)",
2988 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2989 offsetof(struct __sk_buff, data)),
2990 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2991 offsetof(struct __sk_buff, data_end)),
2992 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2993 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2994 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
2995 BPF_MOV64_IMM(BPF_REG_0, 0),
2997 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2998 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3001 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3004 "direct packet access: test26 (marking on <, bad access)",
3006 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3007 offsetof(struct __sk_buff, data)),
3008 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3009 offsetof(struct __sk_buff, data_end)),
3010 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3011 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3012 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
3013 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3014 BPF_MOV64_IMM(BPF_REG_0, 0),
3016 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
3019 .errstr = "invalid access to packet",
3020 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3023 "direct packet access: test27 (marking on <=, good access)",
3025 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3026 offsetof(struct __sk_buff, data)),
3027 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3028 offsetof(struct __sk_buff, data_end)),
3029 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3030 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3031 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
3032 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3033 BPF_MOV64_IMM(BPF_REG_0, 1),
3037 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3040 "direct packet access: test28 (marking on <=, bad access)",
3042 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3043 offsetof(struct __sk_buff, data)),
3044 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3045 offsetof(struct __sk_buff, data_end)),
3046 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3047 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3048 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
3049 BPF_MOV64_IMM(BPF_REG_0, 1),
3051 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3052 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3055 .errstr = "invalid access to packet",
3056 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3059 "helper access to packet: test1, valid packet_ptr range",
3061 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3062 offsetof(struct xdp_md, data)),
3063 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3064 offsetof(struct xdp_md, data_end)),
3065 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3066 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3067 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3068 BPF_LD_MAP_FD(BPF_REG_1, 0),
3069 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3070 BPF_MOV64_IMM(BPF_REG_4, 0),
3071 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3072 BPF_FUNC_map_update_elem),
3073 BPF_MOV64_IMM(BPF_REG_0, 0),
3076 .fixup_map1 = { 5 },
3077 .result_unpriv = ACCEPT,
3079 .prog_type = BPF_PROG_TYPE_XDP,
3082 "helper access to packet: test2, unchecked packet_ptr",
3084 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3085 offsetof(struct xdp_md, data)),
3086 BPF_LD_MAP_FD(BPF_REG_1, 0),
3087 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3088 BPF_FUNC_map_lookup_elem),
3089 BPF_MOV64_IMM(BPF_REG_0, 0),
3092 .fixup_map1 = { 1 },
3094 .errstr = "invalid access to packet",
3095 .prog_type = BPF_PROG_TYPE_XDP,
3098 "helper access to packet: test3, variable add",
3100 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3101 offsetof(struct xdp_md, data)),
3102 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3103 offsetof(struct xdp_md, data_end)),
3104 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3105 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
3106 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
3107 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
3108 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3109 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
3110 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3111 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
3112 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
3113 BPF_LD_MAP_FD(BPF_REG_1, 0),
3114 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
3115 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3116 BPF_FUNC_map_lookup_elem),
3117 BPF_MOV64_IMM(BPF_REG_0, 0),
3120 .fixup_map1 = { 11 },
3122 .prog_type = BPF_PROG_TYPE_XDP,
3125 "helper access to packet: test4, packet_ptr with bad range",
3127 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3128 offsetof(struct xdp_md, data)),
3129 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3130 offsetof(struct xdp_md, data_end)),
3131 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3132 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3133 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3134 BPF_MOV64_IMM(BPF_REG_0, 0),
3136 BPF_LD_MAP_FD(BPF_REG_1, 0),
3137 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3138 BPF_FUNC_map_lookup_elem),
3139 BPF_MOV64_IMM(BPF_REG_0, 0),
3142 .fixup_map1 = { 7 },
3144 .errstr = "invalid access to packet",
3145 .prog_type = BPF_PROG_TYPE_XDP,
3148 "helper access to packet: test5, packet_ptr with too short range",
3150 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3151 offsetof(struct xdp_md, data)),
3152 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3153 offsetof(struct xdp_md, data_end)),
3154 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3155 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3156 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3157 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3158 BPF_LD_MAP_FD(BPF_REG_1, 0),
3159 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3160 BPF_FUNC_map_lookup_elem),
3161 BPF_MOV64_IMM(BPF_REG_0, 0),
3164 .fixup_map1 = { 6 },
3166 .errstr = "invalid access to packet",
3167 .prog_type = BPF_PROG_TYPE_XDP,
3170 "helper access to packet: test6, cls valid packet_ptr range",
3172 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3173 offsetof(struct __sk_buff, data)),
3174 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3175 offsetof(struct __sk_buff, data_end)),
3176 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3177 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3178 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3179 BPF_LD_MAP_FD(BPF_REG_1, 0),
3180 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3181 BPF_MOV64_IMM(BPF_REG_4, 0),
3182 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3183 BPF_FUNC_map_update_elem),
3184 BPF_MOV64_IMM(BPF_REG_0, 0),
3187 .fixup_map1 = { 5 },
3189 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3192 "helper access to packet: test7, cls unchecked packet_ptr",
3194 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3195 offsetof(struct __sk_buff, data)),
3196 BPF_LD_MAP_FD(BPF_REG_1, 0),
3197 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3198 BPF_FUNC_map_lookup_elem),
3199 BPF_MOV64_IMM(BPF_REG_0, 0),
3202 .fixup_map1 = { 1 },
3204 .errstr = "invalid access to packet",
3205 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3208 "helper access to packet: test8, cls variable add",
3210 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3211 offsetof(struct __sk_buff, data)),
3212 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3213 offsetof(struct __sk_buff, data_end)),
3214 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3215 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
3216 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
3217 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
3218 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3219 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
3220 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3221 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
3222 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
3223 BPF_LD_MAP_FD(BPF_REG_1, 0),
3224 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
3225 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3226 BPF_FUNC_map_lookup_elem),
3227 BPF_MOV64_IMM(BPF_REG_0, 0),
3230 .fixup_map1 = { 11 },
3232 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3235 "helper access to packet: test9, cls packet_ptr with bad range",
3237 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3238 offsetof(struct __sk_buff, data)),
3239 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3240 offsetof(struct __sk_buff, data_end)),
3241 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3242 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3243 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3244 BPF_MOV64_IMM(BPF_REG_0, 0),
3246 BPF_LD_MAP_FD(BPF_REG_1, 0),
3247 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3248 BPF_FUNC_map_lookup_elem),
3249 BPF_MOV64_IMM(BPF_REG_0, 0),
3252 .fixup_map1 = { 7 },
3254 .errstr = "invalid access to packet",
3255 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3258 "helper access to packet: test10, cls packet_ptr with too short range",
3260 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3261 offsetof(struct __sk_buff, data)),
3262 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3263 offsetof(struct __sk_buff, data_end)),
3264 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3265 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3266 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3267 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3268 BPF_LD_MAP_FD(BPF_REG_1, 0),
3269 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3270 BPF_FUNC_map_lookup_elem),
3271 BPF_MOV64_IMM(BPF_REG_0, 0),
3274 .fixup_map1 = { 6 },
3276 .errstr = "invalid access to packet",
3277 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3280 "helper access to packet: test11, cls unsuitable helper 1",
3282 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3283 offsetof(struct __sk_buff, data)),
3284 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3285 offsetof(struct __sk_buff, data_end)),
3286 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3287 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3288 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
3289 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
3290 BPF_MOV64_IMM(BPF_REG_2, 0),
3291 BPF_MOV64_IMM(BPF_REG_4, 42),
3292 BPF_MOV64_IMM(BPF_REG_5, 0),
3293 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3294 BPF_FUNC_skb_store_bytes),
3295 BPF_MOV64_IMM(BPF_REG_0, 0),
3299 .errstr = "helper access to the packet",
3300 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3303 "helper access to packet: test12, cls unsuitable helper 2",
3305 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3306 offsetof(struct __sk_buff, data)),
3307 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3308 offsetof(struct __sk_buff, data_end)),
3309 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3310 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
3311 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
3312 BPF_MOV64_IMM(BPF_REG_2, 0),
3313 BPF_MOV64_IMM(BPF_REG_4, 4),
3314 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3315 BPF_FUNC_skb_load_bytes),
3316 BPF_MOV64_IMM(BPF_REG_0, 0),
3320 .errstr = "helper access to the packet",
3321 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3324 "helper access to packet: test13, cls helper ok",
3326 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3327 offsetof(struct __sk_buff, data)),
3328 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3329 offsetof(struct __sk_buff, data_end)),
3330 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3331 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3332 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3333 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3334 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3335 BPF_MOV64_IMM(BPF_REG_2, 4),
3336 BPF_MOV64_IMM(BPF_REG_3, 0),
3337 BPF_MOV64_IMM(BPF_REG_4, 0),
3338 BPF_MOV64_IMM(BPF_REG_5, 0),
3339 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3340 BPF_FUNC_csum_diff),
3341 BPF_MOV64_IMM(BPF_REG_0, 0),
3345 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3348 "helper access to packet: test14, cls helper ok sub",
3350 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3351 offsetof(struct __sk_buff, data)),
3352 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3353 offsetof(struct __sk_buff, data_end)),
3354 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3355 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3356 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3357 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3358 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
3359 BPF_MOV64_IMM(BPF_REG_2, 4),
3360 BPF_MOV64_IMM(BPF_REG_3, 0),
3361 BPF_MOV64_IMM(BPF_REG_4, 0),
3362 BPF_MOV64_IMM(BPF_REG_5, 0),
3363 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3364 BPF_FUNC_csum_diff),
3365 BPF_MOV64_IMM(BPF_REG_0, 0),
3369 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3372 "helper access to packet: test15, cls helper fail sub",
3374 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3375 offsetof(struct __sk_buff, data)),
3376 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3377 offsetof(struct __sk_buff, data_end)),
3378 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3379 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3380 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3381 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3382 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
3383 BPF_MOV64_IMM(BPF_REG_2, 4),
3384 BPF_MOV64_IMM(BPF_REG_3, 0),
3385 BPF_MOV64_IMM(BPF_REG_4, 0),
3386 BPF_MOV64_IMM(BPF_REG_5, 0),
3387 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3388 BPF_FUNC_csum_diff),
3389 BPF_MOV64_IMM(BPF_REG_0, 0),
3393 .errstr = "invalid access to packet",
3394 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3397 "helper access to packet: test16, cls helper fail range 1",
3399 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3400 offsetof(struct __sk_buff, data)),
3401 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3402 offsetof(struct __sk_buff, data_end)),
3403 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3404 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3405 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3406 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3407 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3408 BPF_MOV64_IMM(BPF_REG_2, 8),
3409 BPF_MOV64_IMM(BPF_REG_3, 0),
3410 BPF_MOV64_IMM(BPF_REG_4, 0),
3411 BPF_MOV64_IMM(BPF_REG_5, 0),
3412 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3413 BPF_FUNC_csum_diff),
3414 BPF_MOV64_IMM(BPF_REG_0, 0),
3418 .errstr = "invalid access to packet",
3419 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3422 "helper access to packet: test17, cls helper fail range 2",
3424 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3425 offsetof(struct __sk_buff, data)),
3426 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3427 offsetof(struct __sk_buff, data_end)),
3428 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3429 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3430 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3431 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3432 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3433 BPF_MOV64_IMM(BPF_REG_2, -9),
3434 BPF_MOV64_IMM(BPF_REG_3, 0),
3435 BPF_MOV64_IMM(BPF_REG_4, 0),
3436 BPF_MOV64_IMM(BPF_REG_5, 0),
3437 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3438 BPF_FUNC_csum_diff),
3439 BPF_MOV64_IMM(BPF_REG_0, 0),
3443 .errstr = "R2 min value is negative",
3444 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3447 "helper access to packet: test18, cls helper fail range 3",
3449 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3450 offsetof(struct __sk_buff, data)),
3451 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3452 offsetof(struct __sk_buff, data_end)),
3453 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3454 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3455 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3456 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3457 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3458 BPF_MOV64_IMM(BPF_REG_2, ~0),
3459 BPF_MOV64_IMM(BPF_REG_3, 0),
3460 BPF_MOV64_IMM(BPF_REG_4, 0),
3461 BPF_MOV64_IMM(BPF_REG_5, 0),
3462 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3463 BPF_FUNC_csum_diff),
3464 BPF_MOV64_IMM(BPF_REG_0, 0),
3468 .errstr = "R2 min value is negative",
3469 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3472 "helper access to packet: test19, cls helper fail range zero",
3474 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3475 offsetof(struct __sk_buff, data)),
3476 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3477 offsetof(struct __sk_buff, data_end)),
3478 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3479 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3480 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3481 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3482 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3483 BPF_MOV64_IMM(BPF_REG_2, 0),
3484 BPF_MOV64_IMM(BPF_REG_3, 0),
3485 BPF_MOV64_IMM(BPF_REG_4, 0),
3486 BPF_MOV64_IMM(BPF_REG_5, 0),
3487 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3488 BPF_FUNC_csum_diff),
3489 BPF_MOV64_IMM(BPF_REG_0, 0),
3493 .errstr = "invalid access to packet",
3494 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3497 "helper access to packet: test20, pkt end as input",
3499 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3500 offsetof(struct __sk_buff, data)),
3501 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3502 offsetof(struct __sk_buff, data_end)),
3503 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3504 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3505 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3506 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3507 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
3508 BPF_MOV64_IMM(BPF_REG_2, 4),
3509 BPF_MOV64_IMM(BPF_REG_3, 0),
3510 BPF_MOV64_IMM(BPF_REG_4, 0),
3511 BPF_MOV64_IMM(BPF_REG_5, 0),
3512 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3513 BPF_FUNC_csum_diff),
3514 BPF_MOV64_IMM(BPF_REG_0, 0),
3518 .errstr = "R1 type=pkt_end expected=fp",
3519 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3522 "helper access to packet: test21, wrong reg",
3524 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3525 offsetof(struct __sk_buff, data)),
3526 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3527 offsetof(struct __sk_buff, data_end)),
3528 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3529 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3530 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3531 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3532 BPF_MOV64_IMM(BPF_REG_2, 4),
3533 BPF_MOV64_IMM(BPF_REG_3, 0),
3534 BPF_MOV64_IMM(BPF_REG_4, 0),
3535 BPF_MOV64_IMM(BPF_REG_5, 0),
3536 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3537 BPF_FUNC_csum_diff),
3538 BPF_MOV64_IMM(BPF_REG_0, 0),
3542 .errstr = "invalid access to packet",
3543 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3546 "valid map access into an array with a constant",
3548 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3549 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3550 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3551 BPF_LD_MAP_FD(BPF_REG_1, 0),
3552 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3553 BPF_FUNC_map_lookup_elem),
3554 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3555 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3556 offsetof(struct test_val, foo)),
3559 .fixup_map2 = { 3 },
3560 .errstr_unpriv = "R0 leaks addr",
3561 .result_unpriv = REJECT,
3565 "valid map access into an array with a register",
3567 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3568 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3569 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3570 BPF_LD_MAP_FD(BPF_REG_1, 0),
3571 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3572 BPF_FUNC_map_lookup_elem),
3573 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3574 BPF_MOV64_IMM(BPF_REG_1, 4),
3575 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3576 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3577 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3578 offsetof(struct test_val, foo)),
3581 .fixup_map2 = { 3 },
3582 .errstr_unpriv = "R0 leaks addr",
3583 .result_unpriv = REJECT,
3585 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3588 "valid map access into an array with a variable",
3590 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3591 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3592 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3593 BPF_LD_MAP_FD(BPF_REG_1, 0),
3594 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3595 BPF_FUNC_map_lookup_elem),
3596 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3597 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3598 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
3599 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3600 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3601 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3602 offsetof(struct test_val, foo)),
3605 .fixup_map2 = { 3 },
3606 .errstr_unpriv = "R0 leaks addr",
3607 .result_unpriv = REJECT,
3609 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3612 "valid map access into an array with a signed variable",
3614 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3615 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3616 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3617 BPF_LD_MAP_FD(BPF_REG_1, 0),
3618 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3619 BPF_FUNC_map_lookup_elem),
3620 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
3621 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3622 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
3623 BPF_MOV32_IMM(BPF_REG_1, 0),
3624 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3625 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3626 BPF_MOV32_IMM(BPF_REG_1, 0),
3627 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3628 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3629 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3630 offsetof(struct test_val, foo)),
3633 .fixup_map2 = { 3 },
3634 .errstr_unpriv = "R0 leaks addr",
3635 .result_unpriv = REJECT,
3637 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3640 "invalid map access into an array with a constant",
3642 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3643 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3644 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3645 BPF_LD_MAP_FD(BPF_REG_1, 0),
3646 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3647 BPF_FUNC_map_lookup_elem),
3648 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3649 BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
3650 offsetof(struct test_val, foo)),
3653 .fixup_map2 = { 3 },
3654 .errstr = "invalid access to map value, value_size=48 off=48 size=8",
3658 "invalid map access into an array with a register",
3660 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3661 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3662 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3663 BPF_LD_MAP_FD(BPF_REG_1, 0),
3664 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3665 BPF_FUNC_map_lookup_elem),
3666 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3667 BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
3668 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3669 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3670 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3671 offsetof(struct test_val, foo)),
3674 .fixup_map2 = { 3 },
3675 .errstr = "R0 min value is outside of the array range",
3677 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3680 "invalid map access into an array with a variable",
3682 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3683 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3684 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3685 BPF_LD_MAP_FD(BPF_REG_1, 0),
3686 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3687 BPF_FUNC_map_lookup_elem),
3688 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3689 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3690 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3691 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3692 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3693 offsetof(struct test_val, foo)),
3696 .fixup_map2 = { 3 },
3697 .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
3699 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3702 "invalid map access into an array with no floor check",
3704 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3705 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3706 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3707 BPF_LD_MAP_FD(BPF_REG_1, 0),
3708 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3709 BPF_FUNC_map_lookup_elem),
3710 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3711 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
3712 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3713 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3714 BPF_MOV32_IMM(BPF_REG_1, 0),
3715 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3716 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3717 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3718 offsetof(struct test_val, foo)),
3721 .fixup_map2 = { 3 },
3722 .errstr_unpriv = "R0 leaks addr",
3723 .errstr = "R0 unbounded memory access",
3724 .result_unpriv = REJECT,
3726 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3729 "invalid map access into an array with a invalid max check",
3731 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3732 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3733 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3734 BPF_LD_MAP_FD(BPF_REG_1, 0),
3735 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3736 BPF_FUNC_map_lookup_elem),
3737 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3738 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3739 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
3740 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3741 BPF_MOV32_IMM(BPF_REG_1, 0),
3742 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3743 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3744 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3745 offsetof(struct test_val, foo)),
3748 .fixup_map2 = { 3 },
3749 .errstr_unpriv = "R0 leaks addr",
3750 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
3751 .result_unpriv = REJECT,
3753 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3756 "invalid map access into an array with a invalid max check",
3758 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3759 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3760 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3761 BPF_LD_MAP_FD(BPF_REG_1, 0),
3762 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3763 BPF_FUNC_map_lookup_elem),
3764 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
3765 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
3766 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3767 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3768 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3769 BPF_LD_MAP_FD(BPF_REG_1, 0),
3770 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3771 BPF_FUNC_map_lookup_elem),
3772 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
3773 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
3774 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3775 offsetof(struct test_val, foo)),
3778 .fixup_map2 = { 3, 11 },
3779 .errstr_unpriv = "R0 pointer += pointer",
3780 .errstr = "R0 invalid mem access 'inv'",
3781 .result_unpriv = REJECT,
3783 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3786 "multiple registers share map_lookup_elem result",
3788 BPF_MOV64_IMM(BPF_REG_1, 10),
3789 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3790 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3791 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3792 BPF_LD_MAP_FD(BPF_REG_1, 0),
3793 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3794 BPF_FUNC_map_lookup_elem),
3795 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3796 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3797 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3800 .fixup_map1 = { 4 },
3802 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3805 "alu ops on ptr_to_map_value_or_null, 1",
3807 BPF_MOV64_IMM(BPF_REG_1, 10),
3808 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3809 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3810 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3811 BPF_LD_MAP_FD(BPF_REG_1, 0),
3812 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3813 BPF_FUNC_map_lookup_elem),
3814 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3815 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
3816 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
3817 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3818 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3821 .fixup_map1 = { 4 },
3822 .errstr = "R4 invalid mem access",
3824 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3827 "alu ops on ptr_to_map_value_or_null, 2",
3829 BPF_MOV64_IMM(BPF_REG_1, 10),
3830 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3831 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3832 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3833 BPF_LD_MAP_FD(BPF_REG_1, 0),
3834 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3835 BPF_FUNC_map_lookup_elem),
3836 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3837 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
3838 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3839 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3842 .fixup_map1 = { 4 },
3843 .errstr = "R4 invalid mem access",
3845 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3848 "alu ops on ptr_to_map_value_or_null, 3",
3850 BPF_MOV64_IMM(BPF_REG_1, 10),
3851 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3852 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3853 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3854 BPF_LD_MAP_FD(BPF_REG_1, 0),
3855 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3856 BPF_FUNC_map_lookup_elem),
3857 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3858 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
3859 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3860 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3863 .fixup_map1 = { 4 },
3864 .errstr = "R4 invalid mem access",
3866 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3869 "invalid memory access with multiple map_lookup_elem calls",
3871 BPF_MOV64_IMM(BPF_REG_1, 10),
3872 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3873 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3874 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3875 BPF_LD_MAP_FD(BPF_REG_1, 0),
3876 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
3877 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
3878 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3879 BPF_FUNC_map_lookup_elem),
3880 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3881 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
3882 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3883 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3884 BPF_FUNC_map_lookup_elem),
3885 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3886 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3889 .fixup_map1 = { 4 },
3891 .errstr = "R4 !read_ok",
3892 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3895 "valid indirect map_lookup_elem access with 2nd lookup in branch",
3897 BPF_MOV64_IMM(BPF_REG_1, 10),
3898 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3899 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3900 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3901 BPF_LD_MAP_FD(BPF_REG_1, 0),
3902 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
3903 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
3904 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3905 BPF_FUNC_map_lookup_elem),
3906 BPF_MOV64_IMM(BPF_REG_2, 10),
3907 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
3908 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
3909 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3910 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3911 BPF_FUNC_map_lookup_elem),
3912 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3913 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3914 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3917 .fixup_map1 = { 4 },
3919 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3922 "invalid map access from else condition",
3924 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3925 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3926 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3927 BPF_LD_MAP_FD(BPF_REG_1, 0),
3928 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
3929 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3930 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3931 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
3932 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
3933 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3934 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3935 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
3938 .fixup_map2 = { 3 },
3939 .errstr = "R0 unbounded memory access",
3941 .errstr_unpriv = "R0 leaks addr",
3942 .result_unpriv = REJECT,
3943 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3946 "constant register |= constant should keep constant type",
3948 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3949 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3950 BPF_MOV64_IMM(BPF_REG_2, 34),
3951 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
3952 BPF_MOV64_IMM(BPF_REG_3, 0),
3953 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3957 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3960 "constant register |= constant should not bypass stack boundary checks",
3962 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3963 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3964 BPF_MOV64_IMM(BPF_REG_2, 34),
3965 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
3966 BPF_MOV64_IMM(BPF_REG_3, 0),
3967 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3970 .errstr = "invalid stack type R1 off=-48 access_size=58",
3972 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3975 "constant register |= constant register should keep constant type",
3977 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3978 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3979 BPF_MOV64_IMM(BPF_REG_2, 34),
3980 BPF_MOV64_IMM(BPF_REG_4, 13),
3981 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
3982 BPF_MOV64_IMM(BPF_REG_3, 0),
3983 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3987 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3990 "constant register |= constant register should not bypass stack boundary checks",
3992 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3993 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3994 BPF_MOV64_IMM(BPF_REG_2, 34),
3995 BPF_MOV64_IMM(BPF_REG_4, 24),
3996 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
3997 BPF_MOV64_IMM(BPF_REG_3, 0),
3998 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4001 .errstr = "invalid stack type R1 off=-48 access_size=58",
4003 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4006 "invalid direct packet write for LWT_IN",
4008 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4009 offsetof(struct __sk_buff, data)),
4010 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4011 offsetof(struct __sk_buff, data_end)),
4012 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4013 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4014 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4015 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4016 BPF_MOV64_IMM(BPF_REG_0, 0),
4019 .errstr = "cannot write into packet",
4021 .prog_type = BPF_PROG_TYPE_LWT_IN,
4024 "invalid direct packet write for LWT_OUT",
4026 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4027 offsetof(struct __sk_buff, data)),
4028 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4029 offsetof(struct __sk_buff, data_end)),
4030 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4031 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4032 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4033 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4034 BPF_MOV64_IMM(BPF_REG_0, 0),
4037 .errstr = "cannot write into packet",
4039 .prog_type = BPF_PROG_TYPE_LWT_OUT,
4042 "direct packet write for LWT_XMIT",
4044 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4045 offsetof(struct __sk_buff, data)),
4046 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4047 offsetof(struct __sk_buff, data_end)),
4048 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4049 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4050 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4051 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4052 BPF_MOV64_IMM(BPF_REG_0, 0),
4056 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4059 "direct packet read for LWT_IN",
4061 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4062 offsetof(struct __sk_buff, data)),
4063 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4064 offsetof(struct __sk_buff, data_end)),
4065 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4066 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4067 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4068 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4069 BPF_MOV64_IMM(BPF_REG_0, 0),
4073 .prog_type = BPF_PROG_TYPE_LWT_IN,
4076 "direct packet read for LWT_OUT",
4078 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4079 offsetof(struct __sk_buff, data)),
4080 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4081 offsetof(struct __sk_buff, data_end)),
4082 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4083 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4084 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4085 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4086 BPF_MOV64_IMM(BPF_REG_0, 0),
4090 .prog_type = BPF_PROG_TYPE_LWT_OUT,
4093 "direct packet read for LWT_XMIT",
4095 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4096 offsetof(struct __sk_buff, data)),
4097 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4098 offsetof(struct __sk_buff, data_end)),
4099 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4100 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4101 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4102 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4103 BPF_MOV64_IMM(BPF_REG_0, 0),
4107 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4110 "overlapping checks for direct packet access",
4112 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4113 offsetof(struct __sk_buff, data)),
4114 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4115 offsetof(struct __sk_buff, data_end)),
4116 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4117 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4118 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
4119 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4120 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
4121 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
4122 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
4123 BPF_MOV64_IMM(BPF_REG_0, 0),
4127 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4130 "invalid access of tc_classid for LWT_IN",
4132 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4133 offsetof(struct __sk_buff, tc_classid)),
4137 .errstr = "invalid bpf_context access",
4140 "invalid access of tc_classid for LWT_OUT",
4142 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4143 offsetof(struct __sk_buff, tc_classid)),
4147 .errstr = "invalid bpf_context access",
4150 "invalid access of tc_classid for LWT_XMIT",
4152 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4153 offsetof(struct __sk_buff, tc_classid)),
4157 .errstr = "invalid bpf_context access",
4160 "leak pointer into ctx 1",
4162 BPF_MOV64_IMM(BPF_REG_0, 0),
4163 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4164 offsetof(struct __sk_buff, cb[0])),
4165 BPF_LD_MAP_FD(BPF_REG_2, 0),
4166 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
4167 offsetof(struct __sk_buff, cb[0])),
4170 .fixup_map1 = { 2 },
4171 .errstr_unpriv = "R2 leaks addr into mem",
4172 .result_unpriv = REJECT,
4176 "leak pointer into ctx 2",
4178 BPF_MOV64_IMM(BPF_REG_0, 0),
4179 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4180 offsetof(struct __sk_buff, cb[0])),
4181 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
4182 offsetof(struct __sk_buff, cb[0])),
4185 .errstr_unpriv = "R10 leaks addr into mem",
4186 .result_unpriv = REJECT,
4190 "leak pointer into ctx 3",
4192 BPF_MOV64_IMM(BPF_REG_0, 0),
4193 BPF_LD_MAP_FD(BPF_REG_2, 0),
4194 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
4195 offsetof(struct __sk_buff, cb[0])),
4198 .fixup_map1 = { 1 },
4199 .errstr_unpriv = "R2 leaks addr into ctx",
4200 .result_unpriv = REJECT,
4204 "leak pointer into map val",
4206 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
4207 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4208 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4209 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4210 BPF_LD_MAP_FD(BPF_REG_1, 0),
4211 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4212 BPF_FUNC_map_lookup_elem),
4213 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
4214 BPF_MOV64_IMM(BPF_REG_3, 0),
4215 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
4216 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
4217 BPF_MOV64_IMM(BPF_REG_0, 0),
4220 .fixup_map1 = { 4 },
4221 .errstr_unpriv = "R6 leaks addr into mem",
4222 .result_unpriv = REJECT,
4226 "helper access to map: full range",
4228 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4229 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4230 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4231 BPF_LD_MAP_FD(BPF_REG_1, 0),
4232 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4233 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4234 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4235 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4236 BPF_MOV64_IMM(BPF_REG_3, 0),
4237 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4240 .fixup_map2 = { 3 },
4242 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4245 "helper access to map: partial range",
4247 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4248 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4249 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4250 BPF_LD_MAP_FD(BPF_REG_1, 0),
4251 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4252 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4253 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4254 BPF_MOV64_IMM(BPF_REG_2, 8),
4255 BPF_MOV64_IMM(BPF_REG_3, 0),
4256 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4259 .fixup_map2 = { 3 },
4261 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4264 "helper access to map: empty range",
4266 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4267 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4268 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4269 BPF_LD_MAP_FD(BPF_REG_1, 0),
4270 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4271 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4272 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4273 BPF_MOV64_IMM(BPF_REG_2, 0),
4274 BPF_MOV64_IMM(BPF_REG_3, 0),
4275 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4278 .fixup_map2 = { 3 },
4279 .errstr = "invalid access to map value, value_size=48 off=0 size=0",
4281 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4284 "helper access to map: out-of-bound range",
4286 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4287 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4288 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4289 BPF_LD_MAP_FD(BPF_REG_1, 0),
4290 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4291 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4292 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4293 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
4294 BPF_MOV64_IMM(BPF_REG_3, 0),
4295 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4298 .fixup_map2 = { 3 },
4299 .errstr = "invalid access to map value, value_size=48 off=0 size=56",
4301 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4304 "helper access to map: negative range",
4306 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4307 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4308 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4309 BPF_LD_MAP_FD(BPF_REG_1, 0),
4310 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4311 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4312 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4313 BPF_MOV64_IMM(BPF_REG_2, -8),
4314 BPF_MOV64_IMM(BPF_REG_3, 0),
4315 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4318 .fixup_map2 = { 3 },
4319 .errstr = "R2 min value is negative",
4321 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4324 "helper access to adjusted map (via const imm): full range",
4326 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4327 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4328 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4329 BPF_LD_MAP_FD(BPF_REG_1, 0),
4330 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4331 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4332 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4333 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4334 offsetof(struct test_val, foo)),
4335 BPF_MOV64_IMM(BPF_REG_2,
4336 sizeof(struct test_val) -
4337 offsetof(struct test_val, foo)),
4338 BPF_MOV64_IMM(BPF_REG_3, 0),
4339 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4342 .fixup_map2 = { 3 },
4344 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4347 "helper access to adjusted map (via const imm): partial range",
4349 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4350 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4351 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4352 BPF_LD_MAP_FD(BPF_REG_1, 0),
4353 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4354 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4355 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4356 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4357 offsetof(struct test_val, foo)),
4358 BPF_MOV64_IMM(BPF_REG_2, 8),
4359 BPF_MOV64_IMM(BPF_REG_3, 0),
4360 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4363 .fixup_map2 = { 3 },
4365 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4368 "helper access to adjusted map (via const imm): empty range",
4370 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4371 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4372 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4373 BPF_LD_MAP_FD(BPF_REG_1, 0),
4374 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4375 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4376 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4377 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4378 offsetof(struct test_val, foo)),
4379 BPF_MOV64_IMM(BPF_REG_2, 0),
4380 BPF_MOV64_IMM(BPF_REG_3, 0),
4381 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4384 .fixup_map2 = { 3 },
4385 .errstr = "invalid access to map value, value_size=48 off=4 size=0",
4387 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4390 "helper access to adjusted map (via const imm): out-of-bound range",
4392 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4393 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4394 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4395 BPF_LD_MAP_FD(BPF_REG_1, 0),
4396 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4397 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4398 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4399 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4400 offsetof(struct test_val, foo)),
4401 BPF_MOV64_IMM(BPF_REG_2,
4402 sizeof(struct test_val) -
4403 offsetof(struct test_val, foo) + 8),
4404 BPF_MOV64_IMM(BPF_REG_3, 0),
4405 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4408 .fixup_map2 = { 3 },
4409 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
4411 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4414 "helper access to adjusted map (via const imm): negative range (> adjustment)",
4416 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4417 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4418 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4419 BPF_LD_MAP_FD(BPF_REG_1, 0),
4420 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4421 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4422 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4423 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4424 offsetof(struct test_val, foo)),
4425 BPF_MOV64_IMM(BPF_REG_2, -8),
4426 BPF_MOV64_IMM(BPF_REG_3, 0),
4427 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4430 .fixup_map2 = { 3 },
4431 .errstr = "R2 min value is negative",
4433 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4436 "helper access to adjusted map (via const imm): negative range (< adjustment)",
4438 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4439 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4440 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4441 BPF_LD_MAP_FD(BPF_REG_1, 0),
4442 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4443 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4444 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4445 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4446 offsetof(struct test_val, foo)),
4447 BPF_MOV64_IMM(BPF_REG_2, -1),
4448 BPF_MOV64_IMM(BPF_REG_3, 0),
4449 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4452 .fixup_map2 = { 3 },
4453 .errstr = "R2 min value is negative",
4455 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4458 "helper access to adjusted map (via const reg): full range",
4460 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4461 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4462 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4463 BPF_LD_MAP_FD(BPF_REG_1, 0),
4464 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4465 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4466 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4467 BPF_MOV64_IMM(BPF_REG_3,
4468 offsetof(struct test_val, foo)),
4469 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4470 BPF_MOV64_IMM(BPF_REG_2,
4471 sizeof(struct test_val) -
4472 offsetof(struct test_val, foo)),
4473 BPF_MOV64_IMM(BPF_REG_3, 0),
4474 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4477 .fixup_map2 = { 3 },
4479 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4482 "helper access to adjusted map (via const reg): partial range",
4484 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4485 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4486 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4487 BPF_LD_MAP_FD(BPF_REG_1, 0),
4488 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4489 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4490 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4491 BPF_MOV64_IMM(BPF_REG_3,
4492 offsetof(struct test_val, foo)),
4493 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4494 BPF_MOV64_IMM(BPF_REG_2, 8),
4495 BPF_MOV64_IMM(BPF_REG_3, 0),
4496 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4499 .fixup_map2 = { 3 },
4501 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4504 "helper access to adjusted map (via const reg): empty range",
4506 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4507 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4508 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4509 BPF_LD_MAP_FD(BPF_REG_1, 0),
4510 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4511 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4512 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4513 BPF_MOV64_IMM(BPF_REG_3, 0),
4514 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4515 BPF_MOV64_IMM(BPF_REG_2, 0),
4516 BPF_MOV64_IMM(BPF_REG_3, 0),
4517 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4520 .fixup_map2 = { 3 },
4521 .errstr = "R1 min value is outside of the array range",
4523 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4526 "helper access to adjusted map (via const reg): out-of-bound range",
4528 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4529 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4530 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4531 BPF_LD_MAP_FD(BPF_REG_1, 0),
4532 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4533 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4534 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4535 BPF_MOV64_IMM(BPF_REG_3,
4536 offsetof(struct test_val, foo)),
4537 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4538 BPF_MOV64_IMM(BPF_REG_2,
4539 sizeof(struct test_val) -
4540 offsetof(struct test_val, foo) + 8),
4541 BPF_MOV64_IMM(BPF_REG_3, 0),
4542 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4545 .fixup_map2 = { 3 },
4546 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
4548 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4551 "helper access to adjusted map (via const reg): negative range (> adjustment)",
4553 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4554 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4555 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4556 BPF_LD_MAP_FD(BPF_REG_1, 0),
4557 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4558 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4559 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4560 BPF_MOV64_IMM(BPF_REG_3,
4561 offsetof(struct test_val, foo)),
4562 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4563 BPF_MOV64_IMM(BPF_REG_2, -8),
4564 BPF_MOV64_IMM(BPF_REG_3, 0),
4565 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4568 .fixup_map2 = { 3 },
4569 .errstr = "R2 min value is negative",
4571 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4574 "helper access to adjusted map (via const reg): negative range (< adjustment)",
4576 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4577 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4578 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4579 BPF_LD_MAP_FD(BPF_REG_1, 0),
4580 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4581 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4582 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4583 BPF_MOV64_IMM(BPF_REG_3,
4584 offsetof(struct test_val, foo)),
4585 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4586 BPF_MOV64_IMM(BPF_REG_2, -1),
4587 BPF_MOV64_IMM(BPF_REG_3, 0),
4588 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4591 .fixup_map2 = { 3 },
4592 .errstr = "R2 min value is negative",
4594 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4597 "helper access to adjusted map (via variable): full range",
4599 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4600 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4601 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4602 BPF_LD_MAP_FD(BPF_REG_1, 0),
4603 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4604 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4605 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4606 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4607 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4608 offsetof(struct test_val, foo), 4),
4609 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4610 BPF_MOV64_IMM(BPF_REG_2,
4611 sizeof(struct test_val) -
4612 offsetof(struct test_val, foo)),
4613 BPF_MOV64_IMM(BPF_REG_3, 0),
4614 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4617 .fixup_map2 = { 3 },
4619 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4622 "helper access to adjusted map (via variable): partial range",
4624 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4625 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4626 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4627 BPF_LD_MAP_FD(BPF_REG_1, 0),
4628 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4629 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4630 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4631 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4632 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4633 offsetof(struct test_val, foo), 4),
4634 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4635 BPF_MOV64_IMM(BPF_REG_2, 8),
4636 BPF_MOV64_IMM(BPF_REG_3, 0),
4637 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4640 .fixup_map2 = { 3 },
4642 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4645 "helper access to adjusted map (via variable): empty range",
4647 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4648 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4649 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4650 BPF_LD_MAP_FD(BPF_REG_1, 0),
4651 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4652 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4653 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4654 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4655 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4656 offsetof(struct test_val, foo), 4),
4657 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4658 BPF_MOV64_IMM(BPF_REG_2, 0),
4659 BPF_MOV64_IMM(BPF_REG_3, 0),
4660 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4663 .fixup_map2 = { 3 },
4664 .errstr = "R1 min value is outside of the array range",
4666 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4669 "helper access to adjusted map (via variable): no max check",
4671 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4672 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4673 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4674 BPF_LD_MAP_FD(BPF_REG_1, 0),
4675 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4676 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4677 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4678 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4679 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4680 BPF_MOV64_IMM(BPF_REG_2, 1),
4681 BPF_MOV64_IMM(BPF_REG_3, 0),
4682 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4685 .fixup_map2 = { 3 },
4686 .errstr = "R1 unbounded memory access",
4688 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4691 "helper access to adjusted map (via variable): wrong max check",
4693 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4694 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4695 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4696 BPF_LD_MAP_FD(BPF_REG_1, 0),
4697 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4698 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4699 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4700 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4701 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4702 offsetof(struct test_val, foo), 4),
4703 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4704 BPF_MOV64_IMM(BPF_REG_2,
4705 sizeof(struct test_val) -
4706 offsetof(struct test_val, foo) + 1),
4707 BPF_MOV64_IMM(BPF_REG_3, 0),
4708 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4711 .fixup_map2 = { 3 },
4712 .errstr = "invalid access to map value, value_size=48 off=4 size=45",
4714 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4717 "helper access to map: bounds check using <, good access",
4719 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4720 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4721 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4722 BPF_LD_MAP_FD(BPF_REG_1, 0),
4723 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4724 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4725 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4726 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4727 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
4728 BPF_MOV64_IMM(BPF_REG_0, 0),
4730 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4731 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4732 BPF_MOV64_IMM(BPF_REG_0, 0),
4735 .fixup_map2 = { 3 },
4737 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4740 "helper access to map: bounds check using <, bad access",
4742 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4743 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4744 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4745 BPF_LD_MAP_FD(BPF_REG_1, 0),
4746 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4747 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4748 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4749 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4750 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
4751 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4752 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4753 BPF_MOV64_IMM(BPF_REG_0, 0),
4755 BPF_MOV64_IMM(BPF_REG_0, 0),
4758 .fixup_map2 = { 3 },
4760 .errstr = "R1 unbounded memory access",
4761 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4764 "helper access to map: bounds check using <=, good access",
4766 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4767 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4768 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4769 BPF_LD_MAP_FD(BPF_REG_1, 0),
4770 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4771 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4772 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4773 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4774 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
4775 BPF_MOV64_IMM(BPF_REG_0, 0),
4777 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4778 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4779 BPF_MOV64_IMM(BPF_REG_0, 0),
4782 .fixup_map2 = { 3 },
4784 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4787 "helper access to map: bounds check using <=, bad access",
4789 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4790 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4791 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4792 BPF_LD_MAP_FD(BPF_REG_1, 0),
4793 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4794 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4795 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4796 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4797 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
4798 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4799 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4800 BPF_MOV64_IMM(BPF_REG_0, 0),
4802 BPF_MOV64_IMM(BPF_REG_0, 0),
4805 .fixup_map2 = { 3 },
4807 .errstr = "R1 unbounded memory access",
4808 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4811 "helper access to map: bounds check using s<, good access",
4813 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4814 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4815 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4816 BPF_LD_MAP_FD(BPF_REG_1, 0),
4817 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4818 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4819 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4820 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4821 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
4822 BPF_MOV64_IMM(BPF_REG_0, 0),
4824 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
4825 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4826 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4827 BPF_MOV64_IMM(BPF_REG_0, 0),
4830 .fixup_map2 = { 3 },
4832 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4835 "helper access to map: bounds check using s<, good access 2",
4837 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4838 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4839 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4840 BPF_LD_MAP_FD(BPF_REG_1, 0),
4841 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4842 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4843 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4844 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4845 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
4846 BPF_MOV64_IMM(BPF_REG_0, 0),
4848 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
4849 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4850 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4851 BPF_MOV64_IMM(BPF_REG_0, 0),
4854 .fixup_map2 = { 3 },
4856 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4859 "helper access to map: bounds check using s<, bad access",
4861 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4862 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4863 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4864 BPF_LD_MAP_FD(BPF_REG_1, 0),
4865 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4866 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4867 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4868 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
4869 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
4870 BPF_MOV64_IMM(BPF_REG_0, 0),
4872 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
4873 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4874 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4875 BPF_MOV64_IMM(BPF_REG_0, 0),
4878 .fixup_map2 = { 3 },
4880 .errstr = "R1 min value is negative",
4881 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4884 "helper access to map: bounds check using s<=, good access",
4886 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4887 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4888 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4889 BPF_LD_MAP_FD(BPF_REG_1, 0),
4890 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4891 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4892 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4893 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4894 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
4895 BPF_MOV64_IMM(BPF_REG_0, 0),
4897 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
4898 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4899 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4900 BPF_MOV64_IMM(BPF_REG_0, 0),
4903 .fixup_map2 = { 3 },
4905 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4908 "helper access to map: bounds check using s<=, good access 2",
4910 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4911 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4912 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4913 BPF_LD_MAP_FD(BPF_REG_1, 0),
4914 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4915 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4916 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4917 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4918 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
4919 BPF_MOV64_IMM(BPF_REG_0, 0),
4921 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
4922 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4923 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4924 BPF_MOV64_IMM(BPF_REG_0, 0),
4927 .fixup_map2 = { 3 },
4929 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4932 "helper access to map: bounds check using s<=, bad access",
4934 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4935 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4936 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4937 BPF_LD_MAP_FD(BPF_REG_1, 0),
4938 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4939 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4940 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4941 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
4942 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
4943 BPF_MOV64_IMM(BPF_REG_0, 0),
4945 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
4946 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4947 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4948 BPF_MOV64_IMM(BPF_REG_0, 0),
4951 .fixup_map2 = { 3 },
4953 .errstr = "R1 min value is negative",
4954 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4957 "map element value is preserved across register spilling",
4959 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4960 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4961 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4962 BPF_LD_MAP_FD(BPF_REG_1, 0),
4963 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4964 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4965 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
4966 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4967 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
4968 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4969 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
4970 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
4973 .fixup_map2 = { 3 },
4974 .errstr_unpriv = "R0 leaks addr",
4976 .result_unpriv = REJECT,
4979 "map element value or null is marked on register spilling",
4981 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4982 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4983 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4984 BPF_LD_MAP_FD(BPF_REG_1, 0),
4985 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4986 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4987 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
4988 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4989 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4990 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
4991 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
4994 .fixup_map2 = { 3 },
4995 .errstr_unpriv = "R0 leaks addr",
4997 .result_unpriv = REJECT,
5000 "map element value store of cleared call register",
5002 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5003 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5004 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5005 BPF_LD_MAP_FD(BPF_REG_1, 0),
5006 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5007 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5008 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
5011 .fixup_map2 = { 3 },
5012 .errstr_unpriv = "R1 !read_ok",
5013 .errstr = "R1 !read_ok",
5015 .result_unpriv = REJECT,
5018 "map element value with unaligned store",
5020 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5021 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5022 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5023 BPF_LD_MAP_FD(BPF_REG_1, 0),
5024 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5025 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
5026 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
5027 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5028 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
5029 BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
5030 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5031 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
5032 BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
5033 BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
5034 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
5035 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
5036 BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
5037 BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
5038 BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
5039 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
5040 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
5041 BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
5042 BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
5045 .fixup_map2 = { 3 },
5046 .errstr_unpriv = "R0 leaks addr",
5048 .result_unpriv = REJECT,
5049 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5052 "map element value with unaligned load",
5054 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5055 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5056 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5057 BPF_LD_MAP_FD(BPF_REG_1, 0),
5058 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5059 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5060 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5061 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
5062 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
5063 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
5064 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
5065 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5066 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
5067 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
5068 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
5069 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
5070 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
5073 .fixup_map2 = { 3 },
5074 .errstr_unpriv = "R0 leaks addr",
5076 .result_unpriv = REJECT,
5077 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5080 "map element value illegal alu op, 1",
5082 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5083 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5084 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5085 BPF_LD_MAP_FD(BPF_REG_1, 0),
5086 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5087 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5088 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
5089 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5092 .fixup_map2 = { 3 },
5093 .errstr_unpriv = "R0 bitwise operator &= on pointer",
5094 .errstr = "invalid mem access 'inv'",
5096 .result_unpriv = REJECT,
5099 "map element value illegal alu op, 2",
5101 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5102 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5103 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5104 BPF_LD_MAP_FD(BPF_REG_1, 0),
5105 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5106 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5107 BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
5108 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5111 .fixup_map2 = { 3 },
5112 .errstr_unpriv = "R0 32-bit pointer arithmetic prohibited",
5113 .errstr = "invalid mem access 'inv'",
5115 .result_unpriv = REJECT,
5118 "map element value illegal alu op, 3",
5120 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5121 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5122 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5123 BPF_LD_MAP_FD(BPF_REG_1, 0),
5124 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5125 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5126 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
5127 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5130 .fixup_map2 = { 3 },
5131 .errstr_unpriv = "R0 pointer arithmetic with /= operator",
5132 .errstr = "invalid mem access 'inv'",
5134 .result_unpriv = REJECT,
5137 "map element value illegal alu op, 4",
5139 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5140 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5141 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5142 BPF_LD_MAP_FD(BPF_REG_1, 0),
5143 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5144 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5145 BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
5146 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5149 .fixup_map2 = { 3 },
5150 .errstr_unpriv = "R0 pointer arithmetic prohibited",
5151 .errstr = "invalid mem access 'inv'",
5153 .result_unpriv = REJECT,
5156 "map element value illegal alu op, 5",
5158 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5159 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5160 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5161 BPF_LD_MAP_FD(BPF_REG_1, 0),
5162 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5163 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5164 BPF_MOV64_IMM(BPF_REG_3, 4096),
5165 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5166 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5167 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
5168 BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
5169 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
5170 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5173 .fixup_map2 = { 3 },
5174 .errstr = "R0 invalid mem access 'inv'",
5178 "map element value is preserved across register spilling",
5180 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5181 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5182 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5183 BPF_LD_MAP_FD(BPF_REG_1, 0),
5184 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5185 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5186 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
5187 offsetof(struct test_val, foo)),
5188 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5189 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5190 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
5191 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5192 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5193 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5196 .fixup_map2 = { 3 },
5197 .errstr_unpriv = "R0 leaks addr",
5199 .result_unpriv = REJECT,
5200 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5203 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
5205 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5206 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5207 BPF_MOV64_IMM(BPF_REG_0, 0),
5208 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5209 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5210 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5211 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5212 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5213 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5214 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5215 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5216 BPF_MOV64_IMM(BPF_REG_2, 16),
5217 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5218 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5219 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5220 BPF_MOV64_IMM(BPF_REG_4, 0),
5221 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5222 BPF_MOV64_IMM(BPF_REG_3, 0),
5223 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5224 BPF_MOV64_IMM(BPF_REG_0, 0),
5228 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5231 "helper access to variable memory: stack, bitwise AND, zero included",
5233 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5234 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5235 BPF_MOV64_IMM(BPF_REG_2, 16),
5236 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5237 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5238 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5239 BPF_MOV64_IMM(BPF_REG_3, 0),
5240 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5243 .errstr = "invalid stack type R1 off=-64 access_size=0",
5245 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5248 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
5250 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5251 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5252 BPF_MOV64_IMM(BPF_REG_2, 16),
5253 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5254 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5255 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
5256 BPF_MOV64_IMM(BPF_REG_4, 0),
5257 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5258 BPF_MOV64_IMM(BPF_REG_3, 0),
5259 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5260 BPF_MOV64_IMM(BPF_REG_0, 0),
5263 .errstr = "invalid stack type R1 off=-64 access_size=65",
5265 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5268 "helper access to variable memory: stack, JMP, correct bounds",
5270 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5271 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5272 BPF_MOV64_IMM(BPF_REG_0, 0),
5273 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5274 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5275 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5276 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5277 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5278 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5279 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5280 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5281 BPF_MOV64_IMM(BPF_REG_2, 16),
5282 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5283 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5284 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
5285 BPF_MOV64_IMM(BPF_REG_4, 0),
5286 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5287 BPF_MOV64_IMM(BPF_REG_3, 0),
5288 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5289 BPF_MOV64_IMM(BPF_REG_0, 0),
5293 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5296 "helper access to variable memory: stack, JMP (signed), correct bounds",
5298 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5299 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5300 BPF_MOV64_IMM(BPF_REG_0, 0),
5301 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5302 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5303 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5304 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5305 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5306 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5307 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5308 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5309 BPF_MOV64_IMM(BPF_REG_2, 16),
5310 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5311 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5312 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
5313 BPF_MOV64_IMM(BPF_REG_4, 0),
5314 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5315 BPF_MOV64_IMM(BPF_REG_3, 0),
5316 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5317 BPF_MOV64_IMM(BPF_REG_0, 0),
5321 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5324 "helper access to variable memory: stack, JMP, bounds + offset",
5326 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5327 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5328 BPF_MOV64_IMM(BPF_REG_2, 16),
5329 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5330 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5331 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
5332 BPF_MOV64_IMM(BPF_REG_4, 0),
5333 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
5334 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
5335 BPF_MOV64_IMM(BPF_REG_3, 0),
5336 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5337 BPF_MOV64_IMM(BPF_REG_0, 0),
5340 .errstr = "invalid stack type R1 off=-64 access_size=65",
5342 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5345 "helper access to variable memory: stack, JMP, wrong max",
5347 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5348 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5349 BPF_MOV64_IMM(BPF_REG_2, 16),
5350 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5351 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5352 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
5353 BPF_MOV64_IMM(BPF_REG_4, 0),
5354 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5355 BPF_MOV64_IMM(BPF_REG_3, 0),
5356 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5357 BPF_MOV64_IMM(BPF_REG_0, 0),
5360 .errstr = "invalid stack type R1 off=-64 access_size=65",
5362 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5365 "helper access to variable memory: stack, JMP, no max check",
5367 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5368 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5369 BPF_MOV64_IMM(BPF_REG_2, 16),
5370 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5371 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5372 BPF_MOV64_IMM(BPF_REG_4, 0),
5373 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5374 BPF_MOV64_IMM(BPF_REG_3, 0),
5375 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5376 BPF_MOV64_IMM(BPF_REG_0, 0),
5379 /* because max wasn't checked, signed min is negative */
5380 .errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
5382 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5385 "helper access to variable memory: stack, JMP, no min check",
5387 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5388 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5389 BPF_MOV64_IMM(BPF_REG_2, 16),
5390 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5391 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5392 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
5393 BPF_MOV64_IMM(BPF_REG_3, 0),
5394 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5395 BPF_MOV64_IMM(BPF_REG_0, 0),
5398 .errstr = "invalid stack type R1 off=-64 access_size=0",
5400 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5403 "helper access to variable memory: stack, JMP (signed), no min check",
5405 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5406 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5407 BPF_MOV64_IMM(BPF_REG_2, 16),
5408 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5409 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5410 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
5411 BPF_MOV64_IMM(BPF_REG_3, 0),
5412 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5413 BPF_MOV64_IMM(BPF_REG_0, 0),
5416 .errstr = "R2 min value is negative",
5418 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5421 "helper access to variable memory: map, JMP, correct bounds",
5423 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5424 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5425 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5426 BPF_LD_MAP_FD(BPF_REG_1, 0),
5427 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5428 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
5429 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5430 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5431 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5432 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5433 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5434 sizeof(struct test_val), 4),
5435 BPF_MOV64_IMM(BPF_REG_4, 0),
5436 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5437 BPF_MOV64_IMM(BPF_REG_3, 0),
5438 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5439 BPF_MOV64_IMM(BPF_REG_0, 0),
5442 .fixup_map2 = { 3 },
5444 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5447 "helper access to variable memory: map, JMP, wrong max",
5449 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5450 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5451 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5452 BPF_LD_MAP_FD(BPF_REG_1, 0),
5453 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5454 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
5455 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5456 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5457 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5458 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5459 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5460 sizeof(struct test_val) + 1, 4),
5461 BPF_MOV64_IMM(BPF_REG_4, 0),
5462 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5463 BPF_MOV64_IMM(BPF_REG_3, 0),
5464 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5465 BPF_MOV64_IMM(BPF_REG_0, 0),
5468 .fixup_map2 = { 3 },
5469 .errstr = "invalid access to map value, value_size=48 off=0 size=49",
5471 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5474 "helper access to variable memory: map adjusted, JMP, correct bounds",
5476 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5477 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5478 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5479 BPF_LD_MAP_FD(BPF_REG_1, 0),
5480 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5481 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5482 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5483 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
5484 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5485 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5486 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5487 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5488 sizeof(struct test_val) - 20, 4),
5489 BPF_MOV64_IMM(BPF_REG_4, 0),
5490 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5491 BPF_MOV64_IMM(BPF_REG_3, 0),
5492 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5493 BPF_MOV64_IMM(BPF_REG_0, 0),
5496 .fixup_map2 = { 3 },
5498 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5501 "helper access to variable memory: map adjusted, JMP, wrong max",
5503 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5504 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5505 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5506 BPF_LD_MAP_FD(BPF_REG_1, 0),
5507 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5508 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5509 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5510 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
5511 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5512 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5513 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5514 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5515 sizeof(struct test_val) - 19, 4),
5516 BPF_MOV64_IMM(BPF_REG_4, 0),
5517 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5518 BPF_MOV64_IMM(BPF_REG_3, 0),
5519 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5520 BPF_MOV64_IMM(BPF_REG_0, 0),
5523 .fixup_map2 = { 3 },
5524 .errstr = "R1 min value is outside of the array range",
5526 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5529 "helper access to variable memory: size = 0 allowed on NULL",
5531 BPF_MOV64_IMM(BPF_REG_1, 0),
5532 BPF_MOV64_IMM(BPF_REG_2, 0),
5533 BPF_MOV64_IMM(BPF_REG_3, 0),
5534 BPF_MOV64_IMM(BPF_REG_4, 0),
5535 BPF_MOV64_IMM(BPF_REG_5, 0),
5536 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5540 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5543 "helper access to variable memory: size > 0 not allowed on NULL",
5545 BPF_MOV64_IMM(BPF_REG_1, 0),
5546 BPF_MOV64_IMM(BPF_REG_2, 0),
5547 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5548 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5549 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5550 BPF_MOV64_IMM(BPF_REG_3, 0),
5551 BPF_MOV64_IMM(BPF_REG_4, 0),
5552 BPF_MOV64_IMM(BPF_REG_5, 0),
5553 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5556 .errstr = "R1 type=inv expected=fp",
5558 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5561 "helper access to variable memory: size = 0 not allowed on != NULL",
5563 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5564 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
5565 BPF_MOV64_IMM(BPF_REG_2, 0),
5566 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
5567 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
5568 BPF_MOV64_IMM(BPF_REG_3, 0),
5569 BPF_MOV64_IMM(BPF_REG_4, 0),
5570 BPF_MOV64_IMM(BPF_REG_5, 0),
5571 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5574 .errstr = "invalid stack type R1 off=-8 access_size=0",
5576 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5579 "helper access to variable memory: 8 bytes leak",
5581 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5582 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5583 BPF_MOV64_IMM(BPF_REG_0, 0),
5584 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5585 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5586 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5587 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5588 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5589 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5590 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5591 BPF_MOV64_IMM(BPF_REG_2, 0),
5592 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5593 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5594 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
5595 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
5596 BPF_MOV64_IMM(BPF_REG_3, 0),
5597 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5598 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
5601 .errstr = "invalid indirect read from stack off -64+32 size 64",
5603 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5606 "helper access to variable memory: 8 bytes no leak (init memory)",
5608 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5609 BPF_MOV64_IMM(BPF_REG_0, 0),
5610 BPF_MOV64_IMM(BPF_REG_0, 0),
5611 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5612 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5613 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5614 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5615 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5616 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5617 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5618 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5619 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5620 BPF_MOV64_IMM(BPF_REG_2, 0),
5621 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
5622 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
5623 BPF_MOV64_IMM(BPF_REG_3, 0),
5624 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5625 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
5629 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5632 "invalid and of negative number",
5634 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5635 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5636 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5637 BPF_LD_MAP_FD(BPF_REG_1, 0),
5638 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5639 BPF_FUNC_map_lookup_elem),
5640 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5641 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
5642 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
5643 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5644 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5645 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
5646 offsetof(struct test_val, foo)),
5649 .fixup_map2 = { 3 },
5650 .errstr = "R0 max value is outside of the array range",
5652 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5655 "invalid range check",
5657 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5658 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5659 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5660 BPF_LD_MAP_FD(BPF_REG_1, 0),
5661 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5662 BPF_FUNC_map_lookup_elem),
5663 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
5664 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5665 BPF_MOV64_IMM(BPF_REG_9, 1),
5666 BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
5667 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
5668 BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
5669 BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
5670 BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
5671 BPF_MOV32_IMM(BPF_REG_3, 1),
5672 BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
5673 BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
5674 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
5675 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
5676 BPF_MOV64_REG(BPF_REG_0, 0),
5679 .fixup_map2 = { 3 },
5680 .errstr = "R0 max value is outside of the array range",
5682 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5685 "map in map access",
5687 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5688 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5689 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5690 BPF_LD_MAP_FD(BPF_REG_1, 0),
5691 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5692 BPF_FUNC_map_lookup_elem),
5693 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5694 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5695 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5696 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5697 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5698 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5699 BPF_FUNC_map_lookup_elem),
5700 BPF_MOV64_REG(BPF_REG_0, 0),
5703 .fixup_map_in_map = { 3 },
5707 "invalid inner map pointer",
5709 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5710 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5711 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5712 BPF_LD_MAP_FD(BPF_REG_1, 0),
5713 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5714 BPF_FUNC_map_lookup_elem),
5715 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5716 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5717 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5718 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5719 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5720 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
5721 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5722 BPF_FUNC_map_lookup_elem),
5723 BPF_MOV64_REG(BPF_REG_0, 0),
5726 .fixup_map_in_map = { 3 },
5727 .errstr = "R1 type=inv expected=map_ptr",
5728 .errstr_unpriv = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
5732 "forgot null checking on the inner map pointer",
5734 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5735 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5736 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5737 BPF_LD_MAP_FD(BPF_REG_1, 0),
5738 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5739 BPF_FUNC_map_lookup_elem),
5740 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5741 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5742 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5743 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5744 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5745 BPF_FUNC_map_lookup_elem),
5746 BPF_MOV64_REG(BPF_REG_0, 0),
5749 .fixup_map_in_map = { 3 },
5750 .errstr = "R1 type=map_value_or_null expected=map_ptr",
5754 "ld_abs: check calling conv, r1",
5756 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5757 BPF_MOV64_IMM(BPF_REG_1, 0),
5758 BPF_LD_ABS(BPF_W, -0x200000),
5759 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5762 .errstr = "R1 !read_ok",
5766 "ld_abs: check calling conv, r2",
5768 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5769 BPF_MOV64_IMM(BPF_REG_2, 0),
5770 BPF_LD_ABS(BPF_W, -0x200000),
5771 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5774 .errstr = "R2 !read_ok",
5778 "ld_abs: check calling conv, r3",
5780 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5781 BPF_MOV64_IMM(BPF_REG_3, 0),
5782 BPF_LD_ABS(BPF_W, -0x200000),
5783 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
5786 .errstr = "R3 !read_ok",
5790 "ld_abs: check calling conv, r4",
5792 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5793 BPF_MOV64_IMM(BPF_REG_4, 0),
5794 BPF_LD_ABS(BPF_W, -0x200000),
5795 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
5798 .errstr = "R4 !read_ok",
5802 "ld_abs: check calling conv, r5",
5804 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5805 BPF_MOV64_IMM(BPF_REG_5, 0),
5806 BPF_LD_ABS(BPF_W, -0x200000),
5807 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
5810 .errstr = "R5 !read_ok",
5814 "ld_abs: check calling conv, r7",
5816 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5817 BPF_MOV64_IMM(BPF_REG_7, 0),
5818 BPF_LD_ABS(BPF_W, -0x200000),
5819 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
5825 "ld_ind: check calling conv, r1",
5827 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5828 BPF_MOV64_IMM(BPF_REG_1, 1),
5829 BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
5830 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5833 .errstr = "R1 !read_ok",
5837 "ld_ind: check calling conv, r2",
5839 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5840 BPF_MOV64_IMM(BPF_REG_2, 1),
5841 BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
5842 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5845 .errstr = "R2 !read_ok",
5849 "ld_ind: check calling conv, r3",
5851 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5852 BPF_MOV64_IMM(BPF_REG_3, 1),
5853 BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
5854 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
5857 .errstr = "R3 !read_ok",
5861 "ld_ind: check calling conv, r4",
5863 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5864 BPF_MOV64_IMM(BPF_REG_4, 1),
5865 BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
5866 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
5869 .errstr = "R4 !read_ok",
5873 "ld_ind: check calling conv, r5",
5875 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5876 BPF_MOV64_IMM(BPF_REG_5, 1),
5877 BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
5878 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
5881 .errstr = "R5 !read_ok",
5885 "ld_ind: check calling conv, r7",
5887 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5888 BPF_MOV64_IMM(BPF_REG_7, 1),
5889 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
5890 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
5896 "check bpf_perf_event_data->sample_period byte load permitted",
5898 BPF_MOV64_IMM(BPF_REG_0, 0),
5899 #if __BYTE_ORDER == __LITTLE_ENDIAN
5900 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
5901 offsetof(struct bpf_perf_event_data, sample_period)),
5903 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
5904 offsetof(struct bpf_perf_event_data, sample_period) + 7),
5909 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
5912 "check bpf_perf_event_data->sample_period half load permitted",
5914 BPF_MOV64_IMM(BPF_REG_0, 0),
5915 #if __BYTE_ORDER == __LITTLE_ENDIAN
5916 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
5917 offsetof(struct bpf_perf_event_data, sample_period)),
5919 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
5920 offsetof(struct bpf_perf_event_data, sample_period) + 6),
5925 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
5928 "check bpf_perf_event_data->sample_period word load permitted",
5930 BPF_MOV64_IMM(BPF_REG_0, 0),
5931 #if __BYTE_ORDER == __LITTLE_ENDIAN
5932 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5933 offsetof(struct bpf_perf_event_data, sample_period)),
5935 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5936 offsetof(struct bpf_perf_event_data, sample_period) + 4),
5941 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
5944 "check bpf_perf_event_data->sample_period dword load permitted",
5946 BPF_MOV64_IMM(BPF_REG_0, 0),
5947 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
5948 offsetof(struct bpf_perf_event_data, sample_period)),
5952 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
5955 "check skb->data half load not permitted",
5957 BPF_MOV64_IMM(BPF_REG_0, 0),
5958 #if __BYTE_ORDER == __LITTLE_ENDIAN
5959 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
5960 offsetof(struct __sk_buff, data)),
5962 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
5963 offsetof(struct __sk_buff, data) + 2),
5968 .errstr = "invalid bpf_context access",
5971 "check skb->tc_classid half load not permitted for lwt prog",
5973 BPF_MOV64_IMM(BPF_REG_0, 0),
5974 #if __BYTE_ORDER == __LITTLE_ENDIAN
5975 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
5976 offsetof(struct __sk_buff, tc_classid)),
5978 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
5979 offsetof(struct __sk_buff, tc_classid) + 2),
5984 .errstr = "invalid bpf_context access",
5985 .prog_type = BPF_PROG_TYPE_LWT_IN,
5988 "bounds checks mixing signed and unsigned, positive bounds",
5990 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5991 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5992 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5993 BPF_LD_MAP_FD(BPF_REG_1, 0),
5994 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5995 BPF_FUNC_map_lookup_elem),
5996 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5997 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
5998 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
5999 BPF_MOV64_IMM(BPF_REG_2, 2),
6000 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
6001 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
6002 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6003 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6004 BPF_MOV64_IMM(BPF_REG_0, 0),
6007 .fixup_map1 = { 3 },
6008 .errstr = "R0 min value is negative",
6012 "bounds checks mixing signed and unsigned",
6014 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6015 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6016 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6017 BPF_LD_MAP_FD(BPF_REG_1, 0),
6018 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6019 BPF_FUNC_map_lookup_elem),
6020 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6021 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6022 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6023 BPF_MOV64_IMM(BPF_REG_2, -1),
6024 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
6025 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6026 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6027 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6028 BPF_MOV64_IMM(BPF_REG_0, 0),
6031 .fixup_map1 = { 3 },
6032 .errstr = "R0 min value is negative",
6036 "bounds checks mixing signed and unsigned, variant 2",
6038 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6039 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6040 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6041 BPF_LD_MAP_FD(BPF_REG_1, 0),
6042 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6043 BPF_FUNC_map_lookup_elem),
6044 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6045 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6046 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6047 BPF_MOV64_IMM(BPF_REG_2, -1),
6048 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
6049 BPF_MOV64_IMM(BPF_REG_8, 0),
6050 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
6051 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
6052 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
6053 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
6054 BPF_MOV64_IMM(BPF_REG_0, 0),
6057 .fixup_map1 = { 3 },
6058 .errstr = "R8 invalid mem access 'inv'",
6062 "bounds checks mixing signed and unsigned, variant 3",
6064 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6065 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6066 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6067 BPF_LD_MAP_FD(BPF_REG_1, 0),
6068 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6069 BPF_FUNC_map_lookup_elem),
6070 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6071 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6072 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6073 BPF_MOV64_IMM(BPF_REG_2, -1),
6074 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
6075 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
6076 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
6077 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
6078 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
6079 BPF_MOV64_IMM(BPF_REG_0, 0),
6082 .fixup_map1 = { 3 },
6083 .errstr = "R8 invalid mem access 'inv'",
6087 "bounds checks mixing signed and unsigned, variant 4",
6089 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6090 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6091 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6092 BPF_LD_MAP_FD(BPF_REG_1, 0),
6093 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6094 BPF_FUNC_map_lookup_elem),
6095 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6096 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6097 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6098 BPF_MOV64_IMM(BPF_REG_2, 1),
6099 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
6100 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6101 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6102 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6103 BPF_MOV64_IMM(BPF_REG_0, 0),
6106 .fixup_map1 = { 3 },
6110 "bounds checks mixing signed and unsigned, variant 5",
6112 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6113 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6114 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6115 BPF_LD_MAP_FD(BPF_REG_1, 0),
6116 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6117 BPF_FUNC_map_lookup_elem),
6118 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6119 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6120 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6121 BPF_MOV64_IMM(BPF_REG_2, -1),
6122 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
6123 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
6124 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
6125 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6126 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6127 BPF_MOV64_IMM(BPF_REG_0, 0),
6130 .fixup_map1 = { 3 },
6131 .errstr = "R0 min value is negative",
6135 "bounds checks mixing signed and unsigned, variant 6",
6137 BPF_MOV64_IMM(BPF_REG_2, 0),
6138 BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
6139 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
6140 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6141 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
6142 BPF_MOV64_IMM(BPF_REG_6, -1),
6143 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
6144 BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
6145 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
6146 BPF_MOV64_IMM(BPF_REG_5, 0),
6147 BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
6148 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6149 BPF_FUNC_skb_load_bytes),
6150 BPF_MOV64_IMM(BPF_REG_0, 0),
6153 .errstr = "R4 min value is negative, either use unsigned",
6157 "bounds checks mixing signed and unsigned, variant 7",
6159 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6160 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6161 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6162 BPF_LD_MAP_FD(BPF_REG_1, 0),
6163 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6164 BPF_FUNC_map_lookup_elem),
6165 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6166 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6167 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6168 BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
6169 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
6170 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6171 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6172 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6173 BPF_MOV64_IMM(BPF_REG_0, 0),
6176 .fixup_map1 = { 3 },
6180 "bounds checks mixing signed and unsigned, variant 8",
6182 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6183 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6184 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6185 BPF_LD_MAP_FD(BPF_REG_1, 0),
6186 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6187 BPF_FUNC_map_lookup_elem),
6188 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6189 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6190 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6191 BPF_MOV64_IMM(BPF_REG_2, -1),
6192 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6193 BPF_MOV64_IMM(BPF_REG_0, 0),
6195 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6196 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6197 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6198 BPF_MOV64_IMM(BPF_REG_0, 0),
6201 .fixup_map1 = { 3 },
6202 .errstr = "R0 min value is negative",
6206 "bounds checks mixing signed and unsigned, variant 9",
6208 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6209 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6210 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6211 BPF_LD_MAP_FD(BPF_REG_1, 0),
6212 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6213 BPF_FUNC_map_lookup_elem),
6214 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
6215 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6216 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6217 BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
6218 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6219 BPF_MOV64_IMM(BPF_REG_0, 0),
6221 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6222 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6223 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6224 BPF_MOV64_IMM(BPF_REG_0, 0),
6227 .fixup_map1 = { 3 },
6231 "bounds checks mixing signed and unsigned, variant 10",
6233 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6234 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6235 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6236 BPF_LD_MAP_FD(BPF_REG_1, 0),
6237 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6238 BPF_FUNC_map_lookup_elem),
6239 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6240 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6241 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6242 BPF_MOV64_IMM(BPF_REG_2, 0),
6243 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6244 BPF_MOV64_IMM(BPF_REG_0, 0),
6246 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6247 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6248 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6249 BPF_MOV64_IMM(BPF_REG_0, 0),
6252 .fixup_map1 = { 3 },
6253 .errstr = "R0 min value is negative",
6257 "bounds checks mixing signed and unsigned, variant 11",
6259 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6260 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6261 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6262 BPF_LD_MAP_FD(BPF_REG_1, 0),
6263 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6264 BPF_FUNC_map_lookup_elem),
6265 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6266 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6267 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6268 BPF_MOV64_IMM(BPF_REG_2, -1),
6269 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6271 BPF_MOV64_IMM(BPF_REG_0, 0),
6273 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6274 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6275 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6276 BPF_MOV64_IMM(BPF_REG_0, 0),
6279 .fixup_map1 = { 3 },
6280 .errstr = "R0 min value is negative",
6284 "bounds checks mixing signed and unsigned, variant 12",
6286 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6287 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6288 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6289 BPF_LD_MAP_FD(BPF_REG_1, 0),
6290 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6291 BPF_FUNC_map_lookup_elem),
6292 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6293 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6294 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6295 BPF_MOV64_IMM(BPF_REG_2, -6),
6296 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6297 BPF_MOV64_IMM(BPF_REG_0, 0),
6299 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6300 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6301 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6302 BPF_MOV64_IMM(BPF_REG_0, 0),
6305 .fixup_map1 = { 3 },
6306 .errstr = "R0 min value is negative",
6310 "bounds checks mixing signed and unsigned, variant 13",
6312 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6313 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6314 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6315 BPF_LD_MAP_FD(BPF_REG_1, 0),
6316 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6317 BPF_FUNC_map_lookup_elem),
6318 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6319 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6320 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6321 BPF_MOV64_IMM(BPF_REG_2, 2),
6322 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6323 BPF_MOV64_IMM(BPF_REG_7, 1),
6324 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
6325 BPF_MOV64_IMM(BPF_REG_0, 0),
6327 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
6328 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
6329 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
6330 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6331 BPF_MOV64_IMM(BPF_REG_0, 0),
6334 .fixup_map1 = { 3 },
6335 .errstr = "R0 min value is negative",
6339 "bounds checks mixing signed and unsigned, variant 14",
6341 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
6342 offsetof(struct __sk_buff, mark)),
6343 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6344 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6345 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6346 BPF_LD_MAP_FD(BPF_REG_1, 0),
6347 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6348 BPF_FUNC_map_lookup_elem),
6349 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6350 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6351 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6352 BPF_MOV64_IMM(BPF_REG_2, -1),
6353 BPF_MOV64_IMM(BPF_REG_8, 2),
6354 BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
6355 BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
6356 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6357 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6358 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6359 BPF_MOV64_IMM(BPF_REG_0, 0),
6361 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
6362 BPF_JMP_IMM(BPF_JA, 0, 0, -7),
6364 .fixup_map1 = { 4 },
6365 .errstr = "R0 min value is negative",
6369 "bounds checks mixing signed and unsigned, variant 15",
6371 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6372 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6373 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6374 BPF_LD_MAP_FD(BPF_REG_1, 0),
6375 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6376 BPF_FUNC_map_lookup_elem),
6377 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6378 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6379 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6380 BPF_MOV64_IMM(BPF_REG_2, -6),
6381 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6382 BPF_MOV64_IMM(BPF_REG_0, 0),
6384 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6385 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
6386 BPF_MOV64_IMM(BPF_REG_0, 0),
6388 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6389 BPF_MOV64_IMM(BPF_REG_0, 0),
6392 .fixup_map1 = { 3 },
6393 .errstr_unpriv = "R0 pointer comparison prohibited",
6394 .errstr = "R0 min value is negative",
6396 .result_unpriv = REJECT,
6399 "subtraction bounds (map value) variant 1",
6401 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6402 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6403 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6404 BPF_LD_MAP_FD(BPF_REG_1, 0),
6405 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6406 BPF_FUNC_map_lookup_elem),
6407 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6408 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6409 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
6410 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
6411 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
6412 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
6413 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
6414 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6415 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6417 BPF_MOV64_IMM(BPF_REG_0, 0),
6420 .fixup_map1 = { 3 },
6421 .errstr = "R0 max value is outside of the array range",
6425 "subtraction bounds (map value) variant 2",
6427 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6428 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6429 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6430 BPF_LD_MAP_FD(BPF_REG_1, 0),
6431 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6432 BPF_FUNC_map_lookup_elem),
6433 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6434 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6435 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
6436 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
6437 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
6438 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
6439 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6440 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6442 BPF_MOV64_IMM(BPF_REG_0, 0),
6445 .fixup_map1 = { 3 },
6446 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
6450 "variable-offset ctx access",
6452 /* Get an unknown value */
6453 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
6454 /* Make it small and 4-byte aligned */
6455 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
6456 /* add it to skb. We now have either &skb->len or
6457 * &skb->pkt_type, but we don't know which
6459 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
6460 /* dereference it */
6461 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
6464 .errstr = "variable ctx access var_off=(0x0; 0x4)",
6466 .prog_type = BPF_PROG_TYPE_LWT_IN,
6469 "variable-offset stack access",
6471 /* Fill the top 8 bytes of the stack */
6472 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6473 /* Get an unknown value */
6474 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
6475 /* Make it small and 4-byte aligned */
6476 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
6477 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
6478 /* add it to fp. We now have either fp-4 or fp-8, but
6479 * we don't know which
6481 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
6482 /* dereference it */
6483 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
6486 .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
6488 .prog_type = BPF_PROG_TYPE_LWT_IN,
6491 "liveness pruning and write screening",
6493 /* Get an unknown value */
6494 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
6495 /* branch conditions teach us nothing about R2 */
6496 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
6497 BPF_MOV64_IMM(BPF_REG_0, 0),
6498 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
6499 BPF_MOV64_IMM(BPF_REG_0, 0),
6502 .errstr = "R0 !read_ok",
6504 .prog_type = BPF_PROG_TYPE_LWT_IN,
6508 static int probe_filter_length(const struct bpf_insn *fp)
6512 for (len = MAX_INSNS - 1; len > 0; --len)
6513 if (fp[len].code != 0 || fp[len].imm != 0)
6518 static int create_map(uint32_t size_value, uint32_t max_elem)
6522 fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
6523 size_value, max_elem, BPF_F_NO_PREALLOC);
6525 printf("Failed to create hash map '%s'!\n", strerror(errno));
6530 static int create_prog_array(void)
6534 fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
6537 printf("Failed to create prog array '%s'!\n", strerror(errno));
6542 static int create_map_in_map(void)
6544 int inner_map_fd, outer_map_fd;
6546 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
6548 if (inner_map_fd < 0) {
6549 printf("Failed to create array '%s'!\n", strerror(errno));
6550 return inner_map_fd;
6553 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS,
6554 sizeof(int), inner_map_fd, 1, 0);
6555 if (outer_map_fd < 0)
6556 printf("Failed to create array of maps '%s'!\n",
6559 close(inner_map_fd);
6561 return outer_map_fd;
6564 static char bpf_vlog[32768];
6566 static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
6569 int *fixup_map1 = test->fixup_map1;
6570 int *fixup_map2 = test->fixup_map2;
6571 int *fixup_prog = test->fixup_prog;
6572 int *fixup_map_in_map = test->fixup_map_in_map;
6574 /* Allocating HTs with 1 elem is fine here, since we only test
6575 * for verifier and not do a runtime lookup, so the only thing
6576 * that really matters is value size in this case.
6579 map_fds[0] = create_map(sizeof(long long), 1);
6581 prog[*fixup_map1].imm = map_fds[0];
6583 } while (*fixup_map1);
6587 map_fds[1] = create_map(sizeof(struct test_val), 1);
6589 prog[*fixup_map2].imm = map_fds[1];
6591 } while (*fixup_map2);
6595 map_fds[2] = create_prog_array();
6597 prog[*fixup_prog].imm = map_fds[2];
6599 } while (*fixup_prog);
6602 if (*fixup_map_in_map) {
6603 map_fds[3] = create_map_in_map();
6605 prog[*fixup_map_in_map].imm = map_fds[3];
6607 } while (*fixup_map_in_map);
6611 static void do_test_single(struct bpf_test *test, bool unpriv,
6612 int *passes, int *errors)
6614 int fd_prog, expected_ret, reject_from_alignment;
6615 struct bpf_insn *prog = test->insns;
6616 int prog_len = probe_filter_length(prog);
6617 int prog_type = test->prog_type;
6618 int map_fds[MAX_NR_MAPS];
6619 const char *expected_err;
6622 for (i = 0; i < MAX_NR_MAPS; i++)
6625 do_test_fixup(test, prog, map_fds);
6627 fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
6628 prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
6629 "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
6631 expected_ret = unpriv && test->result_unpriv != UNDEF ?
6632 test->result_unpriv : test->result;
6633 expected_err = unpriv && test->errstr_unpriv ?
6634 test->errstr_unpriv : test->errstr;
6636 reject_from_alignment = fd_prog < 0 &&
6637 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
6638 strstr(bpf_vlog, "Unknown alignment.");
6639 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
6640 if (reject_from_alignment) {
6641 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
6646 if (expected_ret == ACCEPT) {
6647 if (fd_prog < 0 && !reject_from_alignment) {
6648 printf("FAIL\nFailed to load prog '%s'!\n",
6654 printf("FAIL\nUnexpected success to load!\n");
6657 if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
6658 printf("FAIL\nUnexpected error message!\n");
6664 printf("OK%s\n", reject_from_alignment ?
6665 " (NOTE: reject due to unknown alignment)" : "");
6668 for (i = 0; i < MAX_NR_MAPS; i++)
6674 printf("%s", bpf_vlog);
6678 static bool is_admin(void)
6681 cap_flag_value_t sysadmin = CAP_CLEAR;
6682 const cap_value_t cap_val = CAP_SYS_ADMIN;
6684 #ifdef CAP_IS_SUPPORTED
6685 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
6686 perror("cap_get_flag");
6690 caps = cap_get_proc();
6692 perror("cap_get_proc");
6695 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
6696 perror("cap_get_flag");
6699 return (sysadmin == CAP_SET);
6702 static int set_admin(bool admin)
6705 const cap_value_t cap_val = CAP_SYS_ADMIN;
6708 caps = cap_get_proc();
6710 perror("cap_get_proc");
6713 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
6714 admin ? CAP_SET : CAP_CLEAR)) {
6715 perror("cap_set_flag");
6718 if (cap_set_proc(caps)) {
6719 perror("cap_set_proc");
6729 static int do_test(bool unpriv, unsigned int from, unsigned int to)
6731 int i, passes = 0, errors = 0;
6733 for (i = from; i < to; i++) {
6734 struct bpf_test *test = &tests[i];
6736 /* Program types that are not supported by non-root we
6739 if (!test->prog_type) {
6742 printf("#%d/u %s ", i, test->descr);
6743 do_test_single(test, true, &passes, &errors);
6749 printf("#%d/p %s ", i, test->descr);
6750 do_test_single(test, false, &passes, &errors);
6754 printf("Summary: %d PASSED, %d FAILED\n", passes, errors);
6755 return errors ? EXIT_FAILURE : EXIT_SUCCESS;
6758 int main(int argc, char **argv)
6760 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
6761 struct rlimit rlim = { 1 << 20, 1 << 20 };
6762 unsigned int from = 0, to = ARRAY_SIZE(tests);
6763 bool unpriv = !is_admin();
6766 unsigned int l = atoi(argv[argc - 2]);
6767 unsigned int u = atoi(argv[argc - 1]);
6769 if (l < to && u < to) {
6773 } else if (argc == 2) {
6774 unsigned int t = atoi(argv[argc - 1]);
6782 setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf);
6783 return do_test(unpriv, from, to);