4 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
5 BPF_MOV64_IMM(BPF_REG_0, 1),
7 BPF_MOV64_IMM(BPF_REG_0, 2),
10 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
14 "calls: not on unpriviledged",
16 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
17 BPF_MOV64_IMM(BPF_REG_0, 1),
19 BPF_MOV64_IMM(BPF_REG_0, 2),
22 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
23 .result_unpriv = REJECT,
28 "calls: div by 0 in subprog",
30 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
31 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
32 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
33 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
34 offsetof(struct __sk_buff, data_end)),
35 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
36 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
37 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
38 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
39 BPF_MOV64_IMM(BPF_REG_0, 1),
41 BPF_MOV32_IMM(BPF_REG_2, 0),
42 BPF_MOV32_IMM(BPF_REG_3, 1),
43 BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
44 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
45 offsetof(struct __sk_buff, data)),
48 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
53 "calls: multiple ret types in subprog 1",
55 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
56 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
57 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
58 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
59 offsetof(struct __sk_buff, data_end)),
60 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
61 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
62 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
63 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
64 BPF_MOV64_IMM(BPF_REG_0, 1),
66 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
67 offsetof(struct __sk_buff, data)),
68 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
69 BPF_MOV32_IMM(BPF_REG_0, 42),
72 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
74 .errstr = "R0 invalid mem access 'inv'",
77 "calls: multiple ret types in subprog 2",
79 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
80 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
81 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
82 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
83 offsetof(struct __sk_buff, data_end)),
84 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
85 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
86 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
87 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
88 BPF_MOV64_IMM(BPF_REG_0, 1),
90 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
91 offsetof(struct __sk_buff, data)),
92 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
93 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
94 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
95 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
96 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
97 BPF_LD_MAP_FD(BPF_REG_1, 0),
98 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
99 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
100 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
101 offsetof(struct __sk_buff, data)),
102 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
105 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
106 .fixup_map_hash_8b = { 16 },
108 .errstr = "R0 min value is outside of the array range",
111 "calls: overlapping caller/callee",
113 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
114 BPF_MOV64_IMM(BPF_REG_0, 1),
117 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
118 .errstr = "last insn is not an exit or jmp",
122 "calls: wrong recursive calls",
124 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
125 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
126 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
127 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
128 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
129 BPF_MOV64_IMM(BPF_REG_0, 1),
132 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
133 .errstr = "jump out of range",
137 "calls: wrong src reg",
139 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
140 BPF_MOV64_IMM(BPF_REG_0, 1),
143 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
144 .errstr = "BPF_CALL uses reserved fields",
148 "calls: wrong off value",
150 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
151 BPF_MOV64_IMM(BPF_REG_0, 1),
153 BPF_MOV64_IMM(BPF_REG_0, 2),
156 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
157 .errstr = "BPF_CALL uses reserved fields",
161 "calls: jump back loop",
163 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
164 BPF_MOV64_IMM(BPF_REG_0, 1),
167 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
168 .errstr = "back-edge from insn 0 to 0",
172 "calls: conditional call",
174 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
175 offsetof(struct __sk_buff, mark)),
176 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
177 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
178 BPF_MOV64_IMM(BPF_REG_0, 1),
180 BPF_MOV64_IMM(BPF_REG_0, 2),
183 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
184 .errstr = "jump out of range",
188 "calls: conditional call 2",
190 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
191 offsetof(struct __sk_buff, mark)),
192 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
193 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
194 BPF_MOV64_IMM(BPF_REG_0, 1),
196 BPF_MOV64_IMM(BPF_REG_0, 2),
198 BPF_MOV64_IMM(BPF_REG_0, 3),
201 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
205 "calls: conditional call 3",
207 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
208 offsetof(struct __sk_buff, mark)),
209 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
210 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
211 BPF_MOV64_IMM(BPF_REG_0, 1),
213 BPF_MOV64_IMM(BPF_REG_0, 1),
214 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
215 BPF_MOV64_IMM(BPF_REG_0, 3),
216 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
218 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
219 .errstr = "back-edge from insn",
223 "calls: conditional call 4",
225 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
226 offsetof(struct __sk_buff, mark)),
227 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
228 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
229 BPF_MOV64_IMM(BPF_REG_0, 1),
231 BPF_MOV64_IMM(BPF_REG_0, 1),
232 BPF_JMP_IMM(BPF_JA, 0, 0, -5),
233 BPF_MOV64_IMM(BPF_REG_0, 3),
236 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
240 "calls: conditional call 5",
242 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
243 offsetof(struct __sk_buff, mark)),
244 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
245 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
246 BPF_MOV64_IMM(BPF_REG_0, 1),
248 BPF_MOV64_IMM(BPF_REG_0, 1),
249 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
250 BPF_MOV64_IMM(BPF_REG_0, 3),
253 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
254 .errstr = "back-edge from insn",
258 "calls: conditional call 6",
260 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
261 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
263 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
264 offsetof(struct __sk_buff, mark)),
267 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
268 .errstr = "back-edge from insn",
272 "calls: using r0 returned by callee",
274 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
276 BPF_MOV64_IMM(BPF_REG_0, 2),
279 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
283 "calls: using uninit r0 from callee",
285 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
289 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
290 .errstr = "!read_ok",
294 "calls: callee is using r1",
296 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
298 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
299 offsetof(struct __sk_buff, len)),
302 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
304 .retval = TEST_DATA_LEN,
307 "calls: callee using args1",
309 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
311 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
314 .errstr_unpriv = "allowed for root only",
315 .result_unpriv = REJECT,
317 .retval = POINTER_VALUE,
320 "calls: callee using wrong args2",
322 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
324 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
327 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
328 .errstr = "R2 !read_ok",
332 "calls: callee using two args",
334 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
335 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
336 offsetof(struct __sk_buff, len)),
337 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
338 offsetof(struct __sk_buff, len)),
339 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
341 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
342 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
345 .errstr_unpriv = "allowed for root only",
346 .result_unpriv = REJECT,
348 .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
351 "calls: callee changing pkt pointers",
353 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct xdp_md, data)),
354 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
355 offsetof(struct xdp_md, data_end)),
356 BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
357 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
358 BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
359 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
360 /* clear_all_pkt_pointers() has to walk all frames
361 * to make sure that pkt pointers in the caller
362 * are cleared when callee is calling a helper that
363 * adjusts packet size
365 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
366 BPF_MOV32_IMM(BPF_REG_0, 0),
368 BPF_MOV64_IMM(BPF_REG_2, 0),
369 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_head),
373 .errstr = "R6 invalid mem access 'inv'",
374 .prog_type = BPF_PROG_TYPE_XDP,
375 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
378 "calls: ptr null check in subprog",
380 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
381 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
382 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
383 BPF_LD_MAP_FD(BPF_REG_1, 0),
384 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
385 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
386 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
387 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
388 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
389 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
391 BPF_MOV64_IMM(BPF_REG_0, 0),
392 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
393 BPF_MOV64_IMM(BPF_REG_0, 1),
396 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
397 .fixup_map_hash_48b = { 3 },
398 .result_unpriv = REJECT,
403 "calls: two calls with args",
405 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
407 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
408 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
409 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
410 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
411 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
412 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
413 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
415 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
416 offsetof(struct __sk_buff, len)),
419 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
421 .retval = TEST_DATA_LEN + TEST_DATA_LEN,
424 "calls: calls with stack arith",
426 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
427 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
428 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
430 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
431 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
433 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
434 BPF_MOV64_IMM(BPF_REG_0, 42),
435 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
438 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
443 "calls: calls with misaligned stack access",
445 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
446 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
447 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
449 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
450 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
452 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
453 BPF_MOV64_IMM(BPF_REG_0, 42),
454 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
457 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
458 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
459 .errstr = "misaligned stack access",
463 "calls: calls control flow, jump test",
465 BPF_MOV64_IMM(BPF_REG_0, 42),
466 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
467 BPF_MOV64_IMM(BPF_REG_0, 43),
468 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
469 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
472 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
477 "calls: calls control flow, jump test 2",
479 BPF_MOV64_IMM(BPF_REG_0, 42),
480 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
481 BPF_MOV64_IMM(BPF_REG_0, 43),
482 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
483 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
486 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
487 .errstr = "jump out of range from insn 1 to 4",
491 "calls: two calls with bad jump",
493 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
495 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
496 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
497 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
498 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
499 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
500 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
501 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
503 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
504 offsetof(struct __sk_buff, len)),
505 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
508 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
509 .errstr = "jump out of range from insn 11 to 9",
513 "calls: recursive call. test1",
515 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
517 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
520 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
521 .errstr = "back-edge",
525 "calls: recursive call. test2",
527 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
529 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
532 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
533 .errstr = "back-edge",
537 "calls: unreachable code",
539 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
541 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
543 BPF_MOV64_IMM(BPF_REG_0, 0),
545 BPF_MOV64_IMM(BPF_REG_0, 0),
548 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
549 .errstr = "unreachable insn 6",
553 "calls: invalid call",
555 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
557 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
560 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
561 .errstr = "invalid destination",
565 "calls: invalid call 2",
567 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
569 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
572 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
573 .errstr = "invalid destination",
577 "calls: jumping across function bodies. test1",
579 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
580 BPF_MOV64_IMM(BPF_REG_0, 0),
582 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
585 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
586 .errstr = "jump out of range",
590 "calls: jumping across function bodies. test2",
592 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
593 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
594 BPF_MOV64_IMM(BPF_REG_0, 0),
598 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
599 .errstr = "jump out of range",
603 "calls: call without exit",
605 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
607 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
609 BPF_MOV64_IMM(BPF_REG_0, 0),
610 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
612 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
613 .errstr = "not an exit",
617 "calls: call into middle of ld_imm64",
619 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
620 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
621 BPF_MOV64_IMM(BPF_REG_0, 0),
623 BPF_LD_IMM64(BPF_REG_0, 0),
626 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
627 .errstr = "last insn",
631 "calls: call into middle of other call",
633 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
634 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
635 BPF_MOV64_IMM(BPF_REG_0, 0),
637 BPF_MOV64_IMM(BPF_REG_0, 0),
638 BPF_MOV64_IMM(BPF_REG_0, 0),
641 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
642 .errstr = "last insn",
646 "calls: ld_abs with changing ctx data in callee",
648 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
649 BPF_LD_ABS(BPF_B, 0),
650 BPF_LD_ABS(BPF_H, 0),
651 BPF_LD_ABS(BPF_W, 0),
652 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
653 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
654 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
655 BPF_LD_ABS(BPF_B, 0),
656 BPF_LD_ABS(BPF_H, 0),
657 BPF_LD_ABS(BPF_W, 0),
659 BPF_MOV64_IMM(BPF_REG_2, 1),
660 BPF_MOV64_IMM(BPF_REG_3, 2),
661 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push),
664 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
665 .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
669 "calls: two calls with bad fallthrough",
671 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
673 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
674 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
675 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
676 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
677 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
678 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
679 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
680 BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
681 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
682 offsetof(struct __sk_buff, len)),
685 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
686 .errstr = "not an exit",
690 "calls: two calls with stack read",
692 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
693 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
694 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
695 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
697 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
698 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
699 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
700 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
701 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
702 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
703 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
705 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
708 .prog_type = BPF_PROG_TYPE_XDP,
712 "calls: two calls with stack write",
715 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
716 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
717 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
718 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
719 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
720 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
721 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
725 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
726 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
727 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
728 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
729 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
730 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
731 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
732 BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
733 /* write into stack frame of main prog */
734 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
738 /* read from stack frame of main prog */
739 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
742 .prog_type = BPF_PROG_TYPE_XDP,
746 "calls: stack overflow using two frames (pre-call access)",
749 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
750 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
754 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
755 BPF_MOV64_IMM(BPF_REG_0, 0),
758 .prog_type = BPF_PROG_TYPE_XDP,
759 .errstr = "combined stack size",
763 "calls: stack overflow using two frames (post-call access)",
766 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
767 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
771 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
772 BPF_MOV64_IMM(BPF_REG_0, 0),
775 .prog_type = BPF_PROG_TYPE_XDP,
776 .errstr = "combined stack size",
780 "calls: stack depth check using three frames. test1",
783 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
784 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
785 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
786 BPF_MOV64_IMM(BPF_REG_0, 0),
789 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
792 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
793 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
796 .prog_type = BPF_PROG_TYPE_XDP,
797 /* stack_main=32, stack_A=256, stack_B=64
798 * and max(main+A, main+A+B) < 512
803 "calls: stack depth check using three frames. test2",
806 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
807 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
808 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
809 BPF_MOV64_IMM(BPF_REG_0, 0),
812 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
815 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
816 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
819 .prog_type = BPF_PROG_TYPE_XDP,
820 /* stack_main=32, stack_A=64, stack_B=256
821 * and max(main+A, main+A+B) < 512
826 "calls: stack depth check using three frames. test3",
829 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
830 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
831 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
832 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
833 BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
834 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
835 BPF_MOV64_IMM(BPF_REG_0, 0),
838 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
840 BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
841 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
843 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
844 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
845 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
848 .prog_type = BPF_PROG_TYPE_XDP,
849 /* stack_main=64, stack_A=224, stack_B=256
850 * and max(main+A, main+A+B) > 512
852 .errstr = "combined stack",
856 "calls: stack depth check using three frames. test4",
862 * void func1(int alloc_or_recurse) {
863 * if (alloc_or_recurse) {
864 * frame_pointer[-300] = 1;
866 * func2(alloc_or_recurse);
869 * void func2(int alloc_or_recurse) {
870 * if (alloc_or_recurse) {
871 * frame_pointer[-300] = 1;
877 BPF_MOV64_IMM(BPF_REG_1, 0),
878 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
879 BPF_MOV64_IMM(BPF_REG_1, 1),
880 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
881 BPF_MOV64_IMM(BPF_REG_1, 1),
882 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
883 BPF_MOV64_IMM(BPF_REG_0, 0),
886 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
887 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
889 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
892 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
893 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
896 .prog_type = BPF_PROG_TYPE_XDP,
898 .errstr = "combined stack",
901 "calls: stack depth check using three frames. test5",
904 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
907 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
910 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
913 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
916 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
919 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
922 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
925 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
928 BPF_MOV64_IMM(BPF_REG_0, 0),
931 .prog_type = BPF_PROG_TYPE_XDP,
932 .errstr = "call stack",
936 "calls: stack depth check in dead code",
939 BPF_MOV64_IMM(BPF_REG_1, 0),
940 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
943 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
944 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), /* call B */
945 BPF_MOV64_IMM(BPF_REG_0, 0),
948 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
951 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
954 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
957 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
960 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
963 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
966 BPF_MOV64_IMM(BPF_REG_0, 0),
969 .prog_type = BPF_PROG_TYPE_XDP,
970 .errstr = "call stack",
974 "calls: spill into caller stack frame",
976 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
977 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
978 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
979 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
981 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
982 BPF_MOV64_IMM(BPF_REG_0, 0),
985 .prog_type = BPF_PROG_TYPE_XDP,
986 .errstr = "cannot spill",
990 "calls: write into caller stack frame",
992 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
993 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
994 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
995 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
996 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
998 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
999 BPF_MOV64_IMM(BPF_REG_0, 0),
1002 .prog_type = BPF_PROG_TYPE_XDP,
1007 "calls: write into callee stack frame",
1009 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1010 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
1012 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
1013 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
1016 .prog_type = BPF_PROG_TYPE_XDP,
1017 .errstr = "cannot return stack pointer",
1021 "calls: two calls with stack write and void return",
1024 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1025 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1026 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1027 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1028 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1029 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1030 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
1034 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1035 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1036 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1037 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1038 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1042 /* write into stack frame of main prog */
1043 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
1044 BPF_EXIT_INSN(), /* void return */
1046 .prog_type = BPF_PROG_TYPE_XDP,
1050 "calls: ambiguous return value",
1052 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1053 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
1054 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1055 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1056 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1057 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1059 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1060 BPF_MOV64_IMM(BPF_REG_0, 0),
1063 .errstr_unpriv = "allowed for root only",
1064 .result_unpriv = REJECT,
1065 .errstr = "R0 !read_ok",
1069 "calls: two calls that return map_value",
1072 /* pass fp-16, fp-8 into a function */
1073 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1074 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1075 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1076 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1077 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
1079 /* fetch map_value_ptr from the stack of this function */
1080 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1081 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1082 /* write into map value */
1083 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1084 /* fetch secound map_value_ptr from the stack */
1085 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
1086 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1087 /* write into map value */
1088 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1089 BPF_MOV64_IMM(BPF_REG_0, 0),
1093 /* call 3rd function twice */
1094 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1095 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1096 /* first time with fp-8 */
1097 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1098 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1099 /* second time with fp-16 */
1100 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1104 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1105 /* lookup from map */
1106 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1107 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1108 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1109 BPF_LD_MAP_FD(BPF_REG_1, 0),
1110 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1111 /* write map_value_ptr into stack frame of main prog */
1112 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1113 BPF_MOV64_IMM(BPF_REG_0, 0),
1114 BPF_EXIT_INSN(), /* return 0 */
1116 .prog_type = BPF_PROG_TYPE_XDP,
1117 .fixup_map_hash_8b = { 23 },
1121 "calls: two calls that return map_value with bool condition",
1124 /* pass fp-16, fp-8 into a function */
1125 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1126 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1127 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1128 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1129 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1130 BPF_MOV64_IMM(BPF_REG_0, 0),
1134 /* call 3rd function twice */
1135 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1136 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1137 /* first time with fp-8 */
1138 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
1139 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1140 /* fetch map_value_ptr from the stack of this function */
1141 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1142 /* write into map value */
1143 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1144 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1145 /* second time with fp-16 */
1146 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1147 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1148 /* fetch secound map_value_ptr from the stack */
1149 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
1150 /* write into map value */
1151 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1155 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1156 /* lookup from map */
1157 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1158 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1159 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1160 BPF_LD_MAP_FD(BPF_REG_1, 0),
1161 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1162 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1163 BPF_MOV64_IMM(BPF_REG_0, 0),
1164 BPF_EXIT_INSN(), /* return 0 */
1165 /* write map_value_ptr into stack frame of main prog */
1166 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1167 BPF_MOV64_IMM(BPF_REG_0, 1),
1168 BPF_EXIT_INSN(), /* return 1 */
1170 .prog_type = BPF_PROG_TYPE_XDP,
1171 .fixup_map_hash_8b = { 23 },
1175 "calls: two calls that return map_value with incorrect bool check",
1178 /* pass fp-16, fp-8 into a function */
1179 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1180 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1181 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1182 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1183 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1184 BPF_MOV64_IMM(BPF_REG_0, 0),
1188 /* call 3rd function twice */
1189 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1190 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1191 /* first time with fp-8 */
1192 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
1193 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1194 /* fetch map_value_ptr from the stack of this function */
1195 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1196 /* write into map value */
1197 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1198 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1199 /* second time with fp-16 */
1200 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1201 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1202 /* fetch secound map_value_ptr from the stack */
1203 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
1204 /* write into map value */
1205 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1209 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1210 /* lookup from map */
1211 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1212 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1213 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1214 BPF_LD_MAP_FD(BPF_REG_1, 0),
1215 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1216 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1217 BPF_MOV64_IMM(BPF_REG_0, 0),
1218 BPF_EXIT_INSN(), /* return 0 */
1219 /* write map_value_ptr into stack frame of main prog */
1220 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1221 BPF_MOV64_IMM(BPF_REG_0, 1),
1222 BPF_EXIT_INSN(), /* return 1 */
1224 .prog_type = BPF_PROG_TYPE_XDP,
1225 .fixup_map_hash_8b = { 23 },
1227 .errstr = "invalid read from stack off -16+0 size 8",
1230 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
1233 /* pass fp-16, fp-8 into a function */
1234 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1235 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1236 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1237 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1238 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1239 BPF_MOV64_IMM(BPF_REG_0, 0),
1243 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1244 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1245 /* 1st lookup from map */
1246 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1247 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1248 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1249 BPF_LD_MAP_FD(BPF_REG_1, 0),
1250 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1251 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1252 BPF_MOV64_IMM(BPF_REG_8, 0),
1253 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1254 /* write map_value_ptr into stack frame of main prog at fp-8 */
1255 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1256 BPF_MOV64_IMM(BPF_REG_8, 1),
1258 /* 2nd lookup from map */
1259 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
1260 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1261 BPF_LD_MAP_FD(BPF_REG_1, 0),
1262 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
1263 BPF_FUNC_map_lookup_elem),
1264 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1265 BPF_MOV64_IMM(BPF_REG_9, 0),
1266 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1267 /* write map_value_ptr into stack frame of main prog at fp-16 */
1268 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1269 BPF_MOV64_IMM(BPF_REG_9, 1),
1271 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1272 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
1273 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1274 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1275 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1276 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
1280 /* if arg2 == 1 do *arg1 = 0 */
1281 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1282 /* fetch map_value_ptr from the stack of this function */
1283 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1284 /* write into map value */
1285 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1287 /* if arg4 == 1 do *arg3 = 0 */
1288 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1289 /* fetch map_value_ptr from the stack of this function */
1290 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1291 /* write into map value */
1292 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
1295 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1296 .fixup_map_hash_8b = { 12, 22 },
1298 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
1299 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1302 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
1305 /* pass fp-16, fp-8 into a function */
1306 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1307 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1308 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1309 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1310 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1311 BPF_MOV64_IMM(BPF_REG_0, 0),
1315 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1316 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1317 /* 1st lookup from map */
1318 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1319 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1320 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1321 BPF_LD_MAP_FD(BPF_REG_1, 0),
1322 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1323 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1324 BPF_MOV64_IMM(BPF_REG_8, 0),
1325 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1326 /* write map_value_ptr into stack frame of main prog at fp-8 */
1327 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1328 BPF_MOV64_IMM(BPF_REG_8, 1),
1330 /* 2nd lookup from map */
1331 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
1332 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1333 BPF_LD_MAP_FD(BPF_REG_1, 0),
1334 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
1335 BPF_FUNC_map_lookup_elem),
1336 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1337 BPF_MOV64_IMM(BPF_REG_9, 0),
1338 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1339 /* write map_value_ptr into stack frame of main prog at fp-16 */
1340 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1341 BPF_MOV64_IMM(BPF_REG_9, 1),
1343 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1344 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
1345 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1346 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1347 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1348 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
1352 /* if arg2 == 1 do *arg1 = 0 */
1353 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1354 /* fetch map_value_ptr from the stack of this function */
1355 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1356 /* write into map value */
1357 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1359 /* if arg4 == 1 do *arg3 = 0 */
1360 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1361 /* fetch map_value_ptr from the stack of this function */
1362 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1363 /* write into map value */
1364 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1367 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1368 .fixup_map_hash_8b = { 12, 22 },
1372 "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
1375 /* pass fp-16, fp-8 into a function */
1376 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1377 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1378 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1379 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1380 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
1381 BPF_MOV64_IMM(BPF_REG_0, 0),
1385 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1386 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1387 /* 1st lookup from map */
1388 BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
1389 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1390 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
1391 BPF_LD_MAP_FD(BPF_REG_1, 0),
1392 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1393 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1394 BPF_MOV64_IMM(BPF_REG_8, 0),
1395 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1396 /* write map_value_ptr into stack frame of main prog at fp-8 */
1397 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1398 BPF_MOV64_IMM(BPF_REG_8, 1),
1400 /* 2nd lookup from map */
1401 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1402 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
1403 BPF_LD_MAP_FD(BPF_REG_1, 0),
1404 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1405 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1406 BPF_MOV64_IMM(BPF_REG_9, 0), // 26
1407 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1408 /* write map_value_ptr into stack frame of main prog at fp-16 */
1409 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1410 BPF_MOV64_IMM(BPF_REG_9, 1),
1412 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1413 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
1414 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1415 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1416 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1417 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
1418 BPF_JMP_IMM(BPF_JA, 0, 0, -30),
1421 /* if arg2 == 1 do *arg1 = 0 */
1422 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1423 /* fetch map_value_ptr from the stack of this function */
1424 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1425 /* write into map value */
1426 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1428 /* if arg4 == 1 do *arg3 = 0 */
1429 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1430 /* fetch map_value_ptr from the stack of this function */
1431 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1432 /* write into map value */
1433 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
1434 BPF_JMP_IMM(BPF_JA, 0, 0, -8),
1436 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1437 .fixup_map_hash_8b = { 12, 22 },
1439 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
1440 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1443 "calls: two calls that receive map_value_ptr_or_null via arg. test1",
1446 /* pass fp-16, fp-8 into a function */
1447 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1448 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1449 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1450 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1451 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1452 BPF_MOV64_IMM(BPF_REG_0, 0),
1456 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1457 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1458 /* 1st lookup from map */
1459 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1460 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1461 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1462 BPF_LD_MAP_FD(BPF_REG_1, 0),
1463 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1464 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
1465 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1466 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1467 BPF_MOV64_IMM(BPF_REG_8, 0),
1468 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1469 BPF_MOV64_IMM(BPF_REG_8, 1),
1471 /* 2nd lookup from map */
1472 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1473 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1474 BPF_LD_MAP_FD(BPF_REG_1, 0),
1475 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1476 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
1477 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1478 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1479 BPF_MOV64_IMM(BPF_REG_9, 0),
1480 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1481 BPF_MOV64_IMM(BPF_REG_9, 1),
1483 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1484 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1485 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1486 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1487 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1488 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1492 /* if arg2 == 1 do *arg1 = 0 */
1493 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1494 /* fetch map_value_ptr from the stack of this function */
1495 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1496 /* write into map value */
1497 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1499 /* if arg4 == 1 do *arg3 = 0 */
1500 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1501 /* fetch map_value_ptr from the stack of this function */
1502 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1503 /* write into map value */
1504 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1507 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1508 .fixup_map_hash_8b = { 12, 22 },
1512 "calls: two calls that receive map_value_ptr_or_null via arg. test2",
1515 /* pass fp-16, fp-8 into a function */
1516 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1517 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1518 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1519 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1520 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1521 BPF_MOV64_IMM(BPF_REG_0, 0),
1525 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1526 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1527 /* 1st lookup from map */
1528 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1529 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1530 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1531 BPF_LD_MAP_FD(BPF_REG_1, 0),
1532 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1533 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
1534 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1535 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1536 BPF_MOV64_IMM(BPF_REG_8, 0),
1537 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1538 BPF_MOV64_IMM(BPF_REG_8, 1),
1540 /* 2nd lookup from map */
1541 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1542 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1543 BPF_LD_MAP_FD(BPF_REG_1, 0),
1544 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1545 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
1546 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1547 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1548 BPF_MOV64_IMM(BPF_REG_9, 0),
1549 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1550 BPF_MOV64_IMM(BPF_REG_9, 1),
1552 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1553 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1554 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1555 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1556 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1557 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1561 /* if arg2 == 1 do *arg1 = 0 */
1562 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1563 /* fetch map_value_ptr from the stack of this function */
1564 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1565 /* write into map value */
1566 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1568 /* if arg4 == 0 do *arg3 = 0 */
1569 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
1570 /* fetch map_value_ptr from the stack of this function */
1571 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1572 /* write into map value */
1573 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1576 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1577 .fixup_map_hash_8b = { 12, 22 },
1579 .errstr = "R0 invalid mem access 'inv'",
1582 "calls: pkt_ptr spill into caller stack",
1584 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1585 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1586 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1590 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1591 offsetof(struct __sk_buff, data)),
1592 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1593 offsetof(struct __sk_buff, data_end)),
1594 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1595 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1596 /* spill unchecked pkt_ptr into stack of caller */
1597 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1598 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1599 /* now the pkt range is verified, read pkt_ptr from stack */
1600 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1601 /* write 4 bytes into packet */
1602 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1606 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1607 .retval = POINTER_VALUE,
1608 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1611 "calls: pkt_ptr spill into caller stack 2",
1613 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1614 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1615 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1616 /* Marking is still kept, but not in all cases safe. */
1617 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1618 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1622 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1623 offsetof(struct __sk_buff, data)),
1624 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1625 offsetof(struct __sk_buff, data_end)),
1626 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1627 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1628 /* spill unchecked pkt_ptr into stack of caller */
1629 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1630 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1631 /* now the pkt range is verified, read pkt_ptr from stack */
1632 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1633 /* write 4 bytes into packet */
1634 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1637 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1638 .errstr = "invalid access to packet",
1640 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1643 "calls: pkt_ptr spill into caller stack 3",
1645 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1646 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1647 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1648 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1649 /* Marking is still kept and safe here. */
1650 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1651 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1655 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1656 offsetof(struct __sk_buff, data)),
1657 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1658 offsetof(struct __sk_buff, data_end)),
1659 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1660 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1661 /* spill unchecked pkt_ptr into stack of caller */
1662 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1663 BPF_MOV64_IMM(BPF_REG_5, 0),
1664 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1665 BPF_MOV64_IMM(BPF_REG_5, 1),
1666 /* now the pkt range is verified, read pkt_ptr from stack */
1667 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1668 /* write 4 bytes into packet */
1669 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1670 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1673 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1676 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1679 "calls: pkt_ptr spill into caller stack 4",
1681 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1682 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1683 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1684 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1685 /* Check marking propagated. */
1686 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1687 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1691 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1692 offsetof(struct __sk_buff, data)),
1693 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1694 offsetof(struct __sk_buff, data_end)),
1695 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1696 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1697 /* spill unchecked pkt_ptr into stack of caller */
1698 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1699 BPF_MOV64_IMM(BPF_REG_5, 0),
1700 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1701 BPF_MOV64_IMM(BPF_REG_5, 1),
1702 /* don't read back pkt_ptr from stack here */
1703 /* write 4 bytes into packet */
1704 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1705 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1708 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1711 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1714 "calls: pkt_ptr spill into caller stack 5",
1716 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1717 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1718 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
1719 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1720 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1721 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1725 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1726 offsetof(struct __sk_buff, data)),
1727 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1728 offsetof(struct __sk_buff, data_end)),
1729 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1730 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1731 BPF_MOV64_IMM(BPF_REG_5, 0),
1732 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1733 /* spill checked pkt_ptr into stack of caller */
1734 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1735 BPF_MOV64_IMM(BPF_REG_5, 1),
1736 /* don't read back pkt_ptr from stack here */
1737 /* write 4 bytes into packet */
1738 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1739 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1742 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1743 .errstr = "same insn cannot be used with different",
1745 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1748 "calls: pkt_ptr spill into caller stack 6",
1750 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1751 offsetof(struct __sk_buff, data_end)),
1752 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1753 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1754 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1755 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1756 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1757 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1761 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1762 offsetof(struct __sk_buff, data)),
1763 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1764 offsetof(struct __sk_buff, data_end)),
1765 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1766 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1767 BPF_MOV64_IMM(BPF_REG_5, 0),
1768 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1769 /* spill checked pkt_ptr into stack of caller */
1770 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1771 BPF_MOV64_IMM(BPF_REG_5, 1),
1772 /* don't read back pkt_ptr from stack here */
1773 /* write 4 bytes into packet */
1774 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1775 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1778 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1779 .errstr = "R4 invalid mem access",
1781 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1784 "calls: pkt_ptr spill into caller stack 7",
1786 BPF_MOV64_IMM(BPF_REG_2, 0),
1787 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1788 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1789 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1790 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1791 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1792 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1796 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1797 offsetof(struct __sk_buff, data)),
1798 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1799 offsetof(struct __sk_buff, data_end)),
1800 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1801 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1802 BPF_MOV64_IMM(BPF_REG_5, 0),
1803 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1804 /* spill checked pkt_ptr into stack of caller */
1805 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1806 BPF_MOV64_IMM(BPF_REG_5, 1),
1807 /* don't read back pkt_ptr from stack here */
1808 /* write 4 bytes into packet */
1809 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1810 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1813 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1814 .errstr = "R4 invalid mem access",
1816 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1819 "calls: pkt_ptr spill into caller stack 8",
1821 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1822 offsetof(struct __sk_buff, data)),
1823 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1824 offsetof(struct __sk_buff, data_end)),
1825 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1826 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1827 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
1829 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1830 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1831 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1832 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1833 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1834 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1838 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1839 offsetof(struct __sk_buff, data)),
1840 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1841 offsetof(struct __sk_buff, data_end)),
1842 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1843 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1844 BPF_MOV64_IMM(BPF_REG_5, 0),
1845 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1846 /* spill checked pkt_ptr into stack of caller */
1847 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1848 BPF_MOV64_IMM(BPF_REG_5, 1),
1849 /* don't read back pkt_ptr from stack here */
1850 /* write 4 bytes into packet */
1851 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1852 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1855 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1857 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1860 "calls: pkt_ptr spill into caller stack 9",
1862 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1863 offsetof(struct __sk_buff, data)),
1864 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1865 offsetof(struct __sk_buff, data_end)),
1866 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1867 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1868 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
1870 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1871 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1872 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1873 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1874 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1875 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1879 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1880 offsetof(struct __sk_buff, data)),
1881 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1882 offsetof(struct __sk_buff, data_end)),
1883 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1884 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1885 BPF_MOV64_IMM(BPF_REG_5, 0),
1886 /* spill unchecked pkt_ptr into stack of caller */
1887 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1888 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1889 BPF_MOV64_IMM(BPF_REG_5, 1),
1890 /* don't read back pkt_ptr from stack here */
1891 /* write 4 bytes into packet */
1892 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1893 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1896 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1897 .errstr = "invalid access to packet",
1899 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1902 "calls: caller stack init to zero or map_value_or_null",
1904 BPF_MOV64_IMM(BPF_REG_0, 0),
1905 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
1906 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1907 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1908 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1909 /* fetch map_value_or_null or const_zero from stack */
1910 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1911 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1912 /* store into map_value */
1913 BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
1917 /* if (ctx == 0) return; */
1918 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
1919 /* else bpf_map_lookup() and *(fp - 8) = r0 */
1920 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
1921 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1922 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1923 BPF_LD_MAP_FD(BPF_REG_1, 0),
1924 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1925 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1926 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
1927 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1930 .fixup_map_hash_8b = { 13 },
1932 .prog_type = BPF_PROG_TYPE_XDP,
1935 "calls: stack init to zero and pruning",
1937 /* first make allocated_stack 16 byte */
1938 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
1939 /* now fork the execution such that the false branch
1940 * of JGT insn will be verified second and it skisp zero
1941 * init of fp-8 stack slot. If stack liveness marking
1942 * is missing live_read marks from call map_lookup
1943 * processing then pruning will incorrectly assume
1944 * that fp-8 stack slot was unused in the fall-through
1945 * branch and will accept the program incorrectly
1947 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
1948 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1949 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1950 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1951 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1952 BPF_LD_MAP_FD(BPF_REG_1, 0),
1953 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1956 .fixup_map_hash_48b = { 6 },
1957 .errstr = "invalid indirect read from stack off -8+0 size 8",
1959 .prog_type = BPF_PROG_TYPE_XDP,
1962 "calls: ctx read at start of subprog",
1964 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1965 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
1966 BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
1967 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1968 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1969 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1971 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
1972 BPF_MOV64_IMM(BPF_REG_0, 0),
1975 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
1976 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
1977 .result_unpriv = REJECT,
1981 "calls: cross frame pruning",
1988 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
1989 BPF_MOV64_IMM(BPF_REG_8, 0),
1990 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1991 BPF_MOV64_IMM(BPF_REG_8, 1),
1992 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
1993 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1994 BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
1995 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
1996 BPF_MOV64_IMM(BPF_REG_0, 0),
1998 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2001 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2002 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
2003 .errstr = "!read_ok",
2007 "calls: cross frame pruning - liveness propagation",
2009 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
2010 BPF_MOV64_IMM(BPF_REG_8, 0),
2011 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
2012 BPF_MOV64_IMM(BPF_REG_8, 1),
2013 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
2014 BPF_MOV64_IMM(BPF_REG_9, 0),
2015 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
2016 BPF_MOV64_IMM(BPF_REG_9, 1),
2017 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
2018 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
2019 BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
2020 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0),
2021 BPF_MOV64_IMM(BPF_REG_0, 0),
2023 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2026 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2027 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
2028 .errstr = "!read_ok",