2 "calls: invalid kfunc call not eliminated",
4 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
5 BPF_MOV64_IMM(BPF_REG_0, 1),
8 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10 .errstr = "invalid kernel function call not eliminated in verifier pass",
13 "calls: invalid kfunc call unreachable",
15 BPF_MOV64_IMM(BPF_REG_0, 1),
16 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 0, 2),
17 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
18 BPF_MOV64_IMM(BPF_REG_0, 1),
21 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
25 "calls: invalid kfunc call: ptr_to_mem to struct with non-scalar",
27 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
28 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
29 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
32 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
34 .errstr = "arg#0 pointer type STRUCT prog_test_fail1 must point to scalar",
35 .fixup_kfunc_btf_id = {
36 { "bpf_kfunc_call_test_fail1", 2 },
40 "calls: invalid kfunc call: ptr_to_mem to struct with nesting depth > 4",
42 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
43 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
44 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
47 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
49 .errstr = "max struct nesting depth exceeded\narg#0 pointer type STRUCT prog_test_fail2",
50 .fixup_kfunc_btf_id = {
51 { "bpf_kfunc_call_test_fail2", 2 },
55 "calls: invalid kfunc call: ptr_to_mem to struct with FAM",
57 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
58 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
59 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
62 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
64 .errstr = "arg#0 pointer type STRUCT prog_test_fail3 must point to scalar",
65 .fixup_kfunc_btf_id = {
66 { "bpf_kfunc_call_test_fail3", 2 },
70 "calls: invalid kfunc call: reg->type != PTR_TO_CTX",
72 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
73 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
74 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
77 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
79 .errstr = "arg#0 expected pointer to ctx, but got PTR",
80 .fixup_kfunc_btf_id = {
81 { "bpf_kfunc_call_test_pass_ctx", 2 },
85 "calls: invalid kfunc call: void * not allowed in func proto without mem size arg",
87 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
88 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
89 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
92 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
94 .errstr = "arg#0 pointer type UNKNOWN must point to scalar",
95 .fixup_kfunc_btf_id = {
96 { "bpf_kfunc_call_test_mem_len_fail1", 2 },
100 "calls: trigger reg2btf_ids[reg->type] for reg->type > __BPF_REG_TYPE_MAX",
102 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
103 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
104 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
105 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
106 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
107 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
110 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
112 .errstr = "arg#0 pointer type STRUCT prog_test_ref_kfunc must point",
113 .fixup_kfunc_btf_id = {
114 { "bpf_kfunc_call_test_acquire", 3 },
115 { "bpf_kfunc_call_test_release", 5 },
119 "calls: invalid kfunc call: reg->off must be zero when passed to release kfunc",
121 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
122 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
123 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
124 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
125 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
127 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
128 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
129 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
130 BPF_MOV64_IMM(BPF_REG_0, 0),
133 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
135 .errstr = "R1 must have zero offset when passed to release func",
136 .fixup_kfunc_btf_id = {
137 { "bpf_kfunc_call_test_acquire", 3 },
138 { "bpf_kfunc_call_memb_release", 8 },
142 "calls: invalid kfunc call: don't match first member type when passed to release kfunc",
144 BPF_MOV64_IMM(BPF_REG_0, 0),
145 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
146 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
148 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
149 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
150 BPF_MOV64_IMM(BPF_REG_0, 0),
153 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
155 .errstr = "kernel function bpf_kfunc_call_memb1_release args#0 expected pointer",
156 .fixup_kfunc_btf_id = {
157 { "bpf_kfunc_call_memb_acquire", 1 },
158 { "bpf_kfunc_call_memb1_release", 5 },
162 "calls: invalid kfunc call: PTR_TO_BTF_ID with negative offset",
164 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
165 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
166 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
167 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
168 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
170 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
171 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 16),
172 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -4),
173 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
174 BPF_MOV64_IMM(BPF_REG_0, 0),
177 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
178 .fixup_kfunc_btf_id = {
179 { "bpf_kfunc_call_test_acquire", 3 },
180 { "bpf_kfunc_call_test_release", 9 },
182 .result_unpriv = REJECT,
184 .errstr = "negative offset ptr_ ptr R1 off=-4 disallowed",
187 "calls: invalid kfunc call: PTR_TO_BTF_ID with variable offset",
189 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
190 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
191 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
192 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
193 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
195 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
196 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
197 BPF_JMP_IMM(BPF_JLE, BPF_REG_2, 4, 3),
198 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
199 BPF_MOV64_IMM(BPF_REG_0, 0),
201 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 3),
202 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
203 BPF_MOV64_IMM(BPF_REG_0, 0),
205 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
206 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
207 BPF_MOV64_IMM(BPF_REG_0, 0),
210 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
211 .fixup_kfunc_btf_id = {
212 { "bpf_kfunc_call_test_acquire", 3 },
213 { "bpf_kfunc_call_test_release", 9 },
214 { "bpf_kfunc_call_test_release", 13 },
215 { "bpf_kfunc_call_test_release", 17 },
217 .result_unpriv = REJECT,
219 .errstr = "variable ptr_ access var_off=(0x0; 0x7) disallowed",
222 "calls: basic sanity",
224 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
225 BPF_MOV64_IMM(BPF_REG_0, 1),
227 BPF_MOV64_IMM(BPF_REG_0, 2),
230 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
234 "calls: not on unpriviledged",
236 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
237 BPF_MOV64_IMM(BPF_REG_0, 1),
239 BPF_MOV64_IMM(BPF_REG_0, 2),
242 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
243 .result_unpriv = REJECT,
248 "calls: div by 0 in subprog",
250 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
251 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
252 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
253 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
254 offsetof(struct __sk_buff, data_end)),
255 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
256 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
257 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
258 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
259 BPF_MOV64_IMM(BPF_REG_0, 1),
261 BPF_MOV32_IMM(BPF_REG_2, 0),
262 BPF_MOV32_IMM(BPF_REG_3, 1),
263 BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
264 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
265 offsetof(struct __sk_buff, data)),
268 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
273 "calls: multiple ret types in subprog 1",
275 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
276 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
277 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
278 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
279 offsetof(struct __sk_buff, data_end)),
280 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
281 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
282 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
283 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
284 BPF_MOV64_IMM(BPF_REG_0, 1),
286 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
287 offsetof(struct __sk_buff, data)),
288 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
289 BPF_MOV32_IMM(BPF_REG_0, 42),
292 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
294 .errstr = "R0 invalid mem access 'scalar'",
297 "calls: multiple ret types in subprog 2",
299 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
300 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
301 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
302 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
303 offsetof(struct __sk_buff, data_end)),
304 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
305 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
306 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
307 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
308 BPF_MOV64_IMM(BPF_REG_0, 1),
310 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
311 offsetof(struct __sk_buff, data)),
312 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
313 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
314 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
315 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
316 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
317 BPF_LD_MAP_FD(BPF_REG_1, 0),
318 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
319 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
320 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
321 offsetof(struct __sk_buff, data)),
322 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
325 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
326 .fixup_map_hash_8b = { 16 },
328 .errstr = "R0 min value is outside of the allowed memory range",
331 "calls: overlapping caller/callee",
333 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
334 BPF_MOV64_IMM(BPF_REG_0, 1),
337 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
338 .errstr = "last insn is not an exit or jmp",
342 "calls: wrong recursive calls",
344 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
345 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
346 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
347 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
348 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
349 BPF_MOV64_IMM(BPF_REG_0, 1),
352 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
353 .errstr = "jump out of range",
357 "calls: wrong src reg",
359 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 3, 0, 0),
360 BPF_MOV64_IMM(BPF_REG_0, 1),
363 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
364 .errstr = "BPF_CALL uses reserved fields",
368 "calls: wrong off value",
370 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
371 BPF_MOV64_IMM(BPF_REG_0, 1),
373 BPF_MOV64_IMM(BPF_REG_0, 2),
376 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
377 .errstr = "BPF_CALL uses reserved fields",
381 "calls: jump back loop",
383 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
384 BPF_MOV64_IMM(BPF_REG_0, 1),
387 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
388 .errstr = "back-edge from insn 0 to 0",
392 "calls: conditional call",
394 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
395 offsetof(struct __sk_buff, mark)),
396 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
397 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
398 BPF_MOV64_IMM(BPF_REG_0, 1),
400 BPF_MOV64_IMM(BPF_REG_0, 2),
403 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
404 .errstr = "jump out of range",
408 "calls: conditional call 2",
410 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
411 offsetof(struct __sk_buff, mark)),
412 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
413 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
414 BPF_MOV64_IMM(BPF_REG_0, 1),
416 BPF_MOV64_IMM(BPF_REG_0, 2),
418 BPF_MOV64_IMM(BPF_REG_0, 3),
421 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
425 "calls: conditional call 3",
427 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
428 offsetof(struct __sk_buff, mark)),
429 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
430 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
431 BPF_MOV64_IMM(BPF_REG_0, 1),
433 BPF_MOV64_IMM(BPF_REG_0, 1),
434 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
435 BPF_MOV64_IMM(BPF_REG_0, 3),
436 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
438 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
439 .errstr_unpriv = "back-edge from insn",
440 .result_unpriv = REJECT,
445 "calls: conditional call 4",
447 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
448 offsetof(struct __sk_buff, mark)),
449 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
450 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
451 BPF_MOV64_IMM(BPF_REG_0, 1),
453 BPF_MOV64_IMM(BPF_REG_0, 1),
454 BPF_JMP_IMM(BPF_JA, 0, 0, -5),
455 BPF_MOV64_IMM(BPF_REG_0, 3),
458 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
462 "calls: conditional call 5",
464 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
465 offsetof(struct __sk_buff, mark)),
466 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
467 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
468 BPF_MOV64_IMM(BPF_REG_0, 1),
470 BPF_MOV64_IMM(BPF_REG_0, 1),
471 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
472 BPF_MOV64_IMM(BPF_REG_0, 3),
475 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
480 "calls: conditional call 6",
482 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
483 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
484 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
485 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
487 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
488 offsetof(struct __sk_buff, mark)),
491 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
492 .errstr = "infinite loop detected",
496 "calls: using r0 returned by callee",
498 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
500 BPF_MOV64_IMM(BPF_REG_0, 2),
503 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
507 "calls: using uninit r0 from callee",
509 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
513 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
514 .errstr = "!read_ok",
518 "calls: callee is using r1",
520 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
522 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
523 offsetof(struct __sk_buff, len)),
526 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
528 .retval = TEST_DATA_LEN,
531 "calls: callee using args1",
533 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
535 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
538 .errstr_unpriv = "allowed for",
539 .result_unpriv = REJECT,
541 .retval = POINTER_VALUE,
544 "calls: callee using wrong args2",
546 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
548 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
551 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
552 .errstr = "R2 !read_ok",
556 "calls: callee using two args",
558 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
559 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
560 offsetof(struct __sk_buff, len)),
561 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
562 offsetof(struct __sk_buff, len)),
563 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
565 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
566 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
569 .errstr_unpriv = "allowed for",
570 .result_unpriv = REJECT,
572 .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
575 "calls: callee changing pkt pointers",
577 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct xdp_md, data)),
578 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
579 offsetof(struct xdp_md, data_end)),
580 BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
581 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
582 BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
583 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
584 /* clear_all_pkt_pointers() has to walk all frames
585 * to make sure that pkt pointers in the caller
586 * are cleared when callee is calling a helper that
587 * adjusts packet size
589 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
590 BPF_MOV32_IMM(BPF_REG_0, 0),
592 BPF_MOV64_IMM(BPF_REG_2, 0),
593 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_head),
597 .errstr = "R6 invalid mem access 'scalar'",
598 .prog_type = BPF_PROG_TYPE_XDP,
599 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
602 "calls: ptr null check in subprog",
604 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
605 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
606 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
607 BPF_LD_MAP_FD(BPF_REG_1, 0),
608 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
609 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
610 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
611 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
612 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
613 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
615 BPF_MOV64_IMM(BPF_REG_0, 0),
616 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
617 BPF_MOV64_IMM(BPF_REG_0, 1),
620 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
621 .fixup_map_hash_48b = { 3 },
622 .result_unpriv = REJECT,
627 "calls: two calls with args",
629 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
631 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
632 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
633 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
634 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
635 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
636 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
637 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
639 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
640 offsetof(struct __sk_buff, len)),
643 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
645 .retval = TEST_DATA_LEN + TEST_DATA_LEN,
648 "calls: calls with stack arith",
650 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
651 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
652 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
654 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
655 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
657 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
658 BPF_MOV64_IMM(BPF_REG_0, 42),
659 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
662 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
667 "calls: calls with misaligned stack access",
669 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
670 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
671 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
673 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
674 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
676 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
677 BPF_MOV64_IMM(BPF_REG_0, 42),
678 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
681 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
682 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
683 .errstr = "misaligned stack access",
687 "calls: calls control flow, jump test",
689 BPF_MOV64_IMM(BPF_REG_0, 42),
690 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
691 BPF_MOV64_IMM(BPF_REG_0, 43),
692 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
693 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
696 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
701 "calls: calls control flow, jump test 2",
703 BPF_MOV64_IMM(BPF_REG_0, 42),
704 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
705 BPF_MOV64_IMM(BPF_REG_0, 43),
706 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
707 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
710 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
711 .errstr = "jump out of range from insn 1 to 4",
715 "calls: two calls with bad jump",
717 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
719 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
720 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
721 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
722 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
723 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
724 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
725 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
727 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
728 offsetof(struct __sk_buff, len)),
729 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
732 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
733 .errstr = "jump out of range from insn 11 to 9",
737 "calls: recursive call. test1",
739 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
741 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
744 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
745 .errstr = "back-edge",
749 "calls: recursive call. test2",
751 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
753 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
756 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
757 .errstr = "back-edge",
761 "calls: unreachable code",
763 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
765 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
767 BPF_MOV64_IMM(BPF_REG_0, 0),
769 BPF_MOV64_IMM(BPF_REG_0, 0),
772 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
773 .errstr = "unreachable insn 6",
777 "calls: invalid call",
779 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
781 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
784 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
785 .errstr = "invalid destination",
789 "calls: invalid call 2",
791 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
793 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
796 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
797 .errstr = "invalid destination",
801 "calls: jumping across function bodies. test1",
803 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
804 BPF_MOV64_IMM(BPF_REG_0, 0),
806 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
809 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
810 .errstr = "jump out of range",
814 "calls: jumping across function bodies. test2",
816 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
817 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
818 BPF_MOV64_IMM(BPF_REG_0, 0),
822 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
823 .errstr = "jump out of range",
827 "calls: call without exit",
829 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
831 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
833 BPF_MOV64_IMM(BPF_REG_0, 0),
834 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
836 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
837 .errstr = "not an exit",
841 "calls: call into middle of ld_imm64",
843 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
844 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
845 BPF_MOV64_IMM(BPF_REG_0, 0),
847 BPF_LD_IMM64(BPF_REG_0, 0),
850 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
851 .errstr = "last insn",
855 "calls: call into middle of other call",
857 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
858 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
859 BPF_MOV64_IMM(BPF_REG_0, 0),
861 BPF_MOV64_IMM(BPF_REG_0, 0),
862 BPF_MOV64_IMM(BPF_REG_0, 0),
865 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
866 .errstr = "last insn",
870 "calls: subprog call with ld_abs in main prog",
872 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
873 BPF_LD_ABS(BPF_B, 0),
874 BPF_LD_ABS(BPF_H, 0),
875 BPF_LD_ABS(BPF_W, 0),
876 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
877 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
878 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
879 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
880 BPF_LD_ABS(BPF_B, 0),
881 BPF_LD_ABS(BPF_H, 0),
882 BPF_LD_ABS(BPF_W, 0),
884 BPF_MOV64_IMM(BPF_REG_2, 1),
885 BPF_MOV64_IMM(BPF_REG_3, 2),
886 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push),
889 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
893 "calls: two calls with bad fallthrough",
895 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
897 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
898 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
899 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
900 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
901 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
902 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
903 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
904 BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
905 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
906 offsetof(struct __sk_buff, len)),
909 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
910 .errstr = "not an exit",
914 "calls: two calls with stack read",
916 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
917 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
918 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
919 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
921 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
922 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
923 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
924 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
925 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
926 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
927 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
929 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
932 .prog_type = BPF_PROG_TYPE_XDP,
936 "calls: two calls with stack write",
939 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
940 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
941 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
942 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
943 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
944 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
945 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
949 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
950 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
951 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
952 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
953 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
954 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
955 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
956 BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
957 /* write into stack frame of main prog */
958 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
962 /* read from stack frame of main prog */
963 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
966 .prog_type = BPF_PROG_TYPE_XDP,
970 "calls: stack overflow using two frames (pre-call access)",
973 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
974 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
978 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
979 BPF_MOV64_IMM(BPF_REG_0, 0),
982 .prog_type = BPF_PROG_TYPE_XDP,
983 .errstr = "combined stack size",
987 "calls: stack overflow using two frames (post-call access)",
990 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
991 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
995 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
996 BPF_MOV64_IMM(BPF_REG_0, 0),
999 .prog_type = BPF_PROG_TYPE_XDP,
1000 .errstr = "combined stack size",
1004 "calls: stack depth check using three frames. test1",
1007 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
1008 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
1009 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
1010 BPF_MOV64_IMM(BPF_REG_0, 0),
1013 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
1016 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
1017 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
1020 .prog_type = BPF_PROG_TYPE_XDP,
1021 /* stack_main=32, stack_A=256, stack_B=64
1022 * and max(main+A, main+A+B) < 512
1027 "calls: stack depth check using three frames. test2",
1030 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
1031 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
1032 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
1033 BPF_MOV64_IMM(BPF_REG_0, 0),
1036 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
1039 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
1040 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
1043 .prog_type = BPF_PROG_TYPE_XDP,
1044 /* stack_main=32, stack_A=64, stack_B=256
1045 * and max(main+A, main+A+B) < 512
1050 "calls: stack depth check using three frames. test3",
1053 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1054 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
1055 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1056 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
1057 BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
1058 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
1059 BPF_MOV64_IMM(BPF_REG_0, 0),
1062 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
1064 BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
1065 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
1067 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
1068 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
1069 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
1072 .prog_type = BPF_PROG_TYPE_XDP,
1073 /* stack_main=64, stack_A=224, stack_B=256
1074 * and max(main+A, main+A+B) > 512
1076 .errstr = "combined stack",
1080 "calls: stack depth check using three frames. test4",
1081 /* void main(void) {
1086 * void func1(int alloc_or_recurse) {
1087 * if (alloc_or_recurse) {
1088 * frame_pointer[-300] = 1;
1090 * func2(alloc_or_recurse);
1093 * void func2(int alloc_or_recurse) {
1094 * if (alloc_or_recurse) {
1095 * frame_pointer[-300] = 1;
1101 BPF_MOV64_IMM(BPF_REG_1, 0),
1102 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
1103 BPF_MOV64_IMM(BPF_REG_1, 1),
1104 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
1105 BPF_MOV64_IMM(BPF_REG_1, 1),
1106 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
1107 BPF_MOV64_IMM(BPF_REG_0, 0),
1110 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
1111 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1113 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
1116 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1117 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1120 .prog_type = BPF_PROG_TYPE_XDP,
1122 .errstr = "combined stack",
1125 "calls: stack depth check using three frames. test5",
1128 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
1131 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
1134 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
1137 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
1140 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
1143 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
1146 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
1149 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
1152 BPF_MOV64_IMM(BPF_REG_0, 0),
1155 .prog_type = BPF_PROG_TYPE_XDP,
1156 .errstr = "call stack",
1160 "calls: stack depth check in dead code",
1163 BPF_MOV64_IMM(BPF_REG_1, 0),
1164 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
1167 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1168 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), /* call B */
1169 BPF_MOV64_IMM(BPF_REG_0, 0),
1172 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
1175 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
1178 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
1181 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
1184 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
1187 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
1190 BPF_MOV64_IMM(BPF_REG_0, 0),
1193 .prog_type = BPF_PROG_TYPE_XDP,
1194 .errstr = "call stack",
1198 "calls: spill into caller stack frame",
1200 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1201 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1202 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1203 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1205 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
1206 BPF_MOV64_IMM(BPF_REG_0, 0),
1209 .prog_type = BPF_PROG_TYPE_XDP,
1210 .errstr = "cannot spill",
1214 "calls: write into caller stack frame",
1216 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1217 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1218 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1219 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1220 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1222 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
1223 BPF_MOV64_IMM(BPF_REG_0, 0),
1226 .prog_type = BPF_PROG_TYPE_XDP,
1231 "calls: write into callee stack frame",
1233 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1234 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
1236 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
1237 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
1240 .prog_type = BPF_PROG_TYPE_XDP,
1241 .errstr = "cannot return stack pointer",
1245 "calls: two calls with stack write and void return",
1248 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1249 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1250 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1251 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1252 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1253 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1254 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
1258 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1259 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1260 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1261 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1262 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1266 /* write into stack frame of main prog */
1267 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
1268 BPF_EXIT_INSN(), /* void return */
1270 .prog_type = BPF_PROG_TYPE_XDP,
1274 "calls: ambiguous return value",
1276 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1277 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
1278 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1279 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1280 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1281 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1283 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1284 BPF_MOV64_IMM(BPF_REG_0, 0),
1287 .errstr_unpriv = "allowed for",
1288 .result_unpriv = REJECT,
1289 .errstr = "R0 !read_ok",
1293 "calls: two calls that return map_value",
1296 /* pass fp-16, fp-8 into a function */
1297 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1298 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1299 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1300 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1301 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
1303 /* fetch map_value_ptr from the stack of this function */
1304 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1305 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1306 /* write into map value */
1307 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1308 /* fetch secound map_value_ptr from the stack */
1309 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
1310 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1311 /* write into map value */
1312 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1313 BPF_MOV64_IMM(BPF_REG_0, 0),
1317 /* call 3rd function twice */
1318 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1319 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1320 /* first time with fp-8 */
1321 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1322 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1323 /* second time with fp-16 */
1324 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1328 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1329 /* lookup from map */
1330 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1331 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1332 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1333 BPF_LD_MAP_FD(BPF_REG_1, 0),
1334 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1335 /* write map_value_ptr into stack frame of main prog */
1336 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1337 BPF_MOV64_IMM(BPF_REG_0, 0),
1338 BPF_EXIT_INSN(), /* return 0 */
1340 .prog_type = BPF_PROG_TYPE_XDP,
1341 .fixup_map_hash_8b = { 23 },
1345 "calls: two calls that return map_value with bool condition",
1348 /* pass fp-16, fp-8 into a function */
1349 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1350 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1351 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1352 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1353 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1354 BPF_MOV64_IMM(BPF_REG_0, 0),
1358 /* call 3rd function twice */
1359 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1360 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1361 /* first time with fp-8 */
1362 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
1363 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1364 /* fetch map_value_ptr from the stack of this function */
1365 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1366 /* write into map value */
1367 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1368 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1369 /* second time with fp-16 */
1370 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1371 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1372 /* fetch secound map_value_ptr from the stack */
1373 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
1374 /* write into map value */
1375 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1379 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1380 /* lookup from map */
1381 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1382 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1383 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1384 BPF_LD_MAP_FD(BPF_REG_1, 0),
1385 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1386 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1387 BPF_MOV64_IMM(BPF_REG_0, 0),
1388 BPF_EXIT_INSN(), /* return 0 */
1389 /* write map_value_ptr into stack frame of main prog */
1390 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1391 BPF_MOV64_IMM(BPF_REG_0, 1),
1392 BPF_EXIT_INSN(), /* return 1 */
1394 .prog_type = BPF_PROG_TYPE_XDP,
1395 .fixup_map_hash_8b = { 23 },
1399 "calls: two calls that return map_value with incorrect bool check",
1402 /* pass fp-16, fp-8 into a function */
1403 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1404 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1405 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1406 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1407 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1408 BPF_MOV64_IMM(BPF_REG_0, 0),
1412 /* call 3rd function twice */
1413 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1414 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1415 /* first time with fp-8 */
1416 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
1417 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1418 /* fetch map_value_ptr from the stack of this function */
1419 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1420 /* write into map value */
1421 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1422 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1423 /* second time with fp-16 */
1424 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1425 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1426 /* fetch secound map_value_ptr from the stack */
1427 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
1428 /* write into map value */
1429 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1433 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1434 /* lookup from map */
1435 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1436 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1437 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1438 BPF_LD_MAP_FD(BPF_REG_1, 0),
1439 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1440 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1441 BPF_MOV64_IMM(BPF_REG_0, 0),
1442 BPF_EXIT_INSN(), /* return 0 */
1443 /* write map_value_ptr into stack frame of main prog */
1444 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1445 BPF_MOV64_IMM(BPF_REG_0, 1),
1446 BPF_EXIT_INSN(), /* return 1 */
1448 .prog_type = BPF_PROG_TYPE_XDP,
1449 .fixup_map_hash_8b = { 23 },
1451 .errstr = "invalid read from stack R7 off=-16 size=8",
1454 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
1457 /* pass fp-16, fp-8 into a function */
1458 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1459 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1460 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1461 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1462 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1463 BPF_MOV64_IMM(BPF_REG_0, 0),
1467 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1468 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1469 /* 1st lookup from map */
1470 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1471 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1472 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1473 BPF_LD_MAP_FD(BPF_REG_1, 0),
1474 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1475 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1476 BPF_MOV64_IMM(BPF_REG_8, 0),
1477 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1478 /* write map_value_ptr into stack frame of main prog at fp-8 */
1479 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1480 BPF_MOV64_IMM(BPF_REG_8, 1),
1482 /* 2nd lookup from map */
1483 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
1484 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1485 BPF_LD_MAP_FD(BPF_REG_1, 0),
1486 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
1487 BPF_FUNC_map_lookup_elem),
1488 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1489 BPF_MOV64_IMM(BPF_REG_9, 0),
1490 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1491 /* write map_value_ptr into stack frame of main prog at fp-16 */
1492 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1493 BPF_MOV64_IMM(BPF_REG_9, 1),
1495 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1496 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
1497 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1498 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1499 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1500 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
1504 /* if arg2 == 1 do *arg1 = 0 */
1505 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1506 /* fetch map_value_ptr from the stack of this function */
1507 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1508 /* write into map value */
1509 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1511 /* if arg4 == 1 do *arg3 = 0 */
1512 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1513 /* fetch map_value_ptr from the stack of this function */
1514 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1515 /* write into map value */
1516 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
1519 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1520 .fixup_map_hash_8b = { 12, 22 },
1522 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
1523 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1526 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
1529 /* pass fp-16, fp-8 into a function */
1530 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1531 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1532 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1533 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1534 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1535 BPF_MOV64_IMM(BPF_REG_0, 0),
1539 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1540 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1541 /* 1st lookup from map */
1542 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1543 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1544 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1545 BPF_LD_MAP_FD(BPF_REG_1, 0),
1546 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1547 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1548 BPF_MOV64_IMM(BPF_REG_8, 0),
1549 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1550 /* write map_value_ptr into stack frame of main prog at fp-8 */
1551 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1552 BPF_MOV64_IMM(BPF_REG_8, 1),
1554 /* 2nd lookup from map */
1555 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
1556 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1557 BPF_LD_MAP_FD(BPF_REG_1, 0),
1558 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
1559 BPF_FUNC_map_lookup_elem),
1560 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1561 BPF_MOV64_IMM(BPF_REG_9, 0),
1562 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1563 /* write map_value_ptr into stack frame of main prog at fp-16 */
1564 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1565 BPF_MOV64_IMM(BPF_REG_9, 1),
1567 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1568 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
1569 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1570 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1571 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1572 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
1576 /* if arg2 == 1 do *arg1 = 0 */
1577 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1578 /* fetch map_value_ptr from the stack of this function */
1579 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1580 /* write into map value */
1581 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1583 /* if arg4 == 1 do *arg3 = 0 */
1584 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1585 /* fetch map_value_ptr from the stack of this function */
1586 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1587 /* write into map value */
1588 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1591 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1592 .fixup_map_hash_8b = { 12, 22 },
1596 "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
1599 /* pass fp-16, fp-8 into a function */
1600 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1601 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1602 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1603 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1604 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
1605 BPF_MOV64_IMM(BPF_REG_0, 0),
1609 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1610 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1611 /* 1st lookup from map */
1612 BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
1613 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1614 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
1615 BPF_LD_MAP_FD(BPF_REG_1, 0),
1616 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1617 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1618 BPF_MOV64_IMM(BPF_REG_8, 0),
1619 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1620 /* write map_value_ptr into stack frame of main prog at fp-8 */
1621 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1622 BPF_MOV64_IMM(BPF_REG_8, 1),
1624 /* 2nd lookup from map */
1625 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1626 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
1627 BPF_LD_MAP_FD(BPF_REG_1, 0),
1628 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1629 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1630 BPF_MOV64_IMM(BPF_REG_9, 0), // 26
1631 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1632 /* write map_value_ptr into stack frame of main prog at fp-16 */
1633 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1634 BPF_MOV64_IMM(BPF_REG_9, 1),
1636 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1637 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
1638 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1639 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1640 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1641 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
1642 BPF_JMP_IMM(BPF_JA, 0, 0, -30),
1645 /* if arg2 == 1 do *arg1 = 0 */
1646 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1647 /* fetch map_value_ptr from the stack of this function */
1648 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1649 /* write into map value */
1650 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1652 /* if arg4 == 1 do *arg3 = 0 */
1653 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1654 /* fetch map_value_ptr from the stack of this function */
1655 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1656 /* write into map value */
1657 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
1658 BPF_JMP_IMM(BPF_JA, 0, 0, -8),
1660 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1661 .fixup_map_hash_8b = { 12, 22 },
1663 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
1664 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1667 "calls: two calls that receive map_value_ptr_or_null via arg. test1",
1670 /* pass fp-16, fp-8 into a function */
1671 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1672 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1673 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1674 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1675 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1676 BPF_MOV64_IMM(BPF_REG_0, 0),
1680 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1681 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1682 /* 1st lookup from map */
1683 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1684 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1685 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1686 BPF_LD_MAP_FD(BPF_REG_1, 0),
1687 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1688 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
1689 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1690 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1691 BPF_MOV64_IMM(BPF_REG_8, 0),
1692 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1693 BPF_MOV64_IMM(BPF_REG_8, 1),
1695 /* 2nd lookup from map */
1696 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1697 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1698 BPF_LD_MAP_FD(BPF_REG_1, 0),
1699 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1700 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
1701 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1702 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1703 BPF_MOV64_IMM(BPF_REG_9, 0),
1704 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1705 BPF_MOV64_IMM(BPF_REG_9, 1),
1707 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1708 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1709 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1710 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1711 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1712 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1716 /* if arg2 == 1 do *arg1 = 0 */
1717 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1718 /* fetch map_value_ptr from the stack of this function */
1719 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1720 /* write into map value */
1721 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1723 /* if arg4 == 1 do *arg3 = 0 */
1724 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1725 /* fetch map_value_ptr from the stack of this function */
1726 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1727 /* write into map value */
1728 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1731 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1732 .fixup_map_hash_8b = { 12, 22 },
1736 "calls: two calls that receive map_value_ptr_or_null via arg. test2",
1739 /* pass fp-16, fp-8 into a function */
1740 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1741 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1742 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1743 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1744 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1745 BPF_MOV64_IMM(BPF_REG_0, 0),
1749 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1750 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1751 /* 1st lookup from map */
1752 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1753 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1754 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1755 BPF_LD_MAP_FD(BPF_REG_1, 0),
1756 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1757 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
1758 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1759 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1760 BPF_MOV64_IMM(BPF_REG_8, 0),
1761 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1762 BPF_MOV64_IMM(BPF_REG_8, 1),
1764 /* 2nd lookup from map */
1765 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1766 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1767 BPF_LD_MAP_FD(BPF_REG_1, 0),
1768 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1769 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
1770 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1771 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1772 BPF_MOV64_IMM(BPF_REG_9, 0),
1773 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1774 BPF_MOV64_IMM(BPF_REG_9, 1),
1776 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1777 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1778 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1779 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1780 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1781 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1785 /* if arg2 == 1 do *arg1 = 0 */
1786 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1787 /* fetch map_value_ptr from the stack of this function */
1788 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1789 /* write into map value */
1790 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1792 /* if arg4 == 0 do *arg3 = 0 */
1793 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
1794 /* fetch map_value_ptr from the stack of this function */
1795 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1796 /* write into map value */
1797 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1800 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1801 .fixup_map_hash_8b = { 12, 22 },
1803 .errstr = "R0 invalid mem access 'scalar'",
1806 "calls: pkt_ptr spill into caller stack",
1808 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1809 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1810 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1814 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1815 offsetof(struct __sk_buff, data)),
1816 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1817 offsetof(struct __sk_buff, data_end)),
1818 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1819 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1820 /* spill unchecked pkt_ptr into stack of caller */
1821 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1822 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1823 /* now the pkt range is verified, read pkt_ptr from stack */
1824 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1825 /* write 4 bytes into packet */
1826 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1830 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1831 .retval = POINTER_VALUE,
1832 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1835 "calls: pkt_ptr spill into caller stack 2",
1837 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1838 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1839 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1840 /* Marking is still kept, but not in all cases safe. */
1841 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1842 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1846 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1847 offsetof(struct __sk_buff, data)),
1848 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1849 offsetof(struct __sk_buff, data_end)),
1850 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1851 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1852 /* spill unchecked pkt_ptr into stack of caller */
1853 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1854 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1855 /* now the pkt range is verified, read pkt_ptr from stack */
1856 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1857 /* write 4 bytes into packet */
1858 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1861 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1862 .errstr = "invalid access to packet",
1864 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1867 "calls: pkt_ptr spill into caller stack 3",
1869 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1870 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1871 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1872 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1873 /* Marking is still kept and safe here. */
1874 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1875 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1879 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1880 offsetof(struct __sk_buff, data)),
1881 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1882 offsetof(struct __sk_buff, data_end)),
1883 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1884 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1885 /* spill unchecked pkt_ptr into stack of caller */
1886 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1887 BPF_MOV64_IMM(BPF_REG_5, 0),
1888 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1889 BPF_MOV64_IMM(BPF_REG_5, 1),
1890 /* now the pkt range is verified, read pkt_ptr from stack */
1891 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1892 /* write 4 bytes into packet */
1893 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1894 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1897 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1900 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1903 "calls: pkt_ptr spill into caller stack 4",
1905 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1906 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1907 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1908 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1909 /* Check marking propagated. */
1910 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1911 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1915 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1916 offsetof(struct __sk_buff, data)),
1917 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1918 offsetof(struct __sk_buff, data_end)),
1919 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1920 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1921 /* spill unchecked pkt_ptr into stack of caller */
1922 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1923 BPF_MOV64_IMM(BPF_REG_5, 0),
1924 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1925 BPF_MOV64_IMM(BPF_REG_5, 1),
1926 /* don't read back pkt_ptr from stack here */
1927 /* write 4 bytes into packet */
1928 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1929 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1932 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1935 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1938 "calls: pkt_ptr spill into caller stack 5",
1940 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1941 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1942 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
1943 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1944 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1945 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1949 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1950 offsetof(struct __sk_buff, data)),
1951 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1952 offsetof(struct __sk_buff, data_end)),
1953 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1954 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1955 BPF_MOV64_IMM(BPF_REG_5, 0),
1956 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1957 /* spill checked pkt_ptr into stack of caller */
1958 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1959 BPF_MOV64_IMM(BPF_REG_5, 1),
1960 /* don't read back pkt_ptr from stack here */
1961 /* write 4 bytes into packet */
1962 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1963 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1966 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1967 .errstr = "same insn cannot be used with different",
1969 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1972 "calls: pkt_ptr spill into caller stack 6",
1974 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1975 offsetof(struct __sk_buff, data_end)),
1976 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1977 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1978 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1979 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1980 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1981 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1985 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1986 offsetof(struct __sk_buff, data)),
1987 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1988 offsetof(struct __sk_buff, data_end)),
1989 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1990 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1991 BPF_MOV64_IMM(BPF_REG_5, 0),
1992 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1993 /* spill checked pkt_ptr into stack of caller */
1994 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1995 BPF_MOV64_IMM(BPF_REG_5, 1),
1996 /* don't read back pkt_ptr from stack here */
1997 /* write 4 bytes into packet */
1998 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1999 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
2002 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2003 .errstr = "R4 invalid mem access",
2005 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2008 "calls: pkt_ptr spill into caller stack 7",
2010 BPF_MOV64_IMM(BPF_REG_2, 0),
2011 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2012 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2013 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2014 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
2015 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2016 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
2020 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2021 offsetof(struct __sk_buff, data)),
2022 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2023 offsetof(struct __sk_buff, data_end)),
2024 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2025 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2026 BPF_MOV64_IMM(BPF_REG_5, 0),
2027 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
2028 /* spill checked pkt_ptr into stack of caller */
2029 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2030 BPF_MOV64_IMM(BPF_REG_5, 1),
2031 /* don't read back pkt_ptr from stack here */
2032 /* write 4 bytes into packet */
2033 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
2034 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
2037 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2038 .errstr = "R4 invalid mem access",
2040 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2043 "calls: pkt_ptr spill into caller stack 8",
2045 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2046 offsetof(struct __sk_buff, data)),
2047 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2048 offsetof(struct __sk_buff, data_end)),
2049 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2050 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2051 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
2053 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2054 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2055 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2056 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
2057 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2058 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
2062 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2063 offsetof(struct __sk_buff, data)),
2064 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2065 offsetof(struct __sk_buff, data_end)),
2066 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2067 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2068 BPF_MOV64_IMM(BPF_REG_5, 0),
2069 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
2070 /* spill checked pkt_ptr into stack of caller */
2071 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2072 BPF_MOV64_IMM(BPF_REG_5, 1),
2073 /* don't read back pkt_ptr from stack here */
2074 /* write 4 bytes into packet */
2075 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
2076 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
2079 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2081 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2084 "calls: pkt_ptr spill into caller stack 9",
2086 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2087 offsetof(struct __sk_buff, data)),
2088 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2089 offsetof(struct __sk_buff, data_end)),
2090 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2091 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2092 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
2094 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2095 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2096 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2097 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
2098 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2099 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
2103 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2104 offsetof(struct __sk_buff, data)),
2105 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2106 offsetof(struct __sk_buff, data_end)),
2107 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2108 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2109 BPF_MOV64_IMM(BPF_REG_5, 0),
2110 /* spill unchecked pkt_ptr into stack of caller */
2111 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2112 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
2113 BPF_MOV64_IMM(BPF_REG_5, 1),
2114 /* don't read back pkt_ptr from stack here */
2115 /* write 4 bytes into packet */
2116 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
2117 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
2120 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2121 .errstr = "invalid access to packet",
2123 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2126 "calls: caller stack init to zero or map_value_or_null",
2128 BPF_MOV64_IMM(BPF_REG_0, 0),
2129 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
2130 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2131 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2132 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
2133 /* fetch map_value_or_null or const_zero from stack */
2134 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
2135 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2136 /* store into map_value */
2137 BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
2141 /* if (ctx == 0) return; */
2142 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
2143 /* else bpf_map_lookup() and *(fp - 8) = r0 */
2144 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2145 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2146 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2147 BPF_LD_MAP_FD(BPF_REG_1, 0),
2148 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2149 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
2150 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
2151 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
2154 .fixup_map_hash_8b = { 13 },
2156 .prog_type = BPF_PROG_TYPE_XDP,
2159 "calls: stack init to zero and pruning",
2161 /* first make allocated_stack 16 byte */
2162 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
2163 /* now fork the execution such that the false branch
2164 * of JGT insn will be verified second and it skisp zero
2165 * init of fp-8 stack slot. If stack liveness marking
2166 * is missing live_read marks from call map_lookup
2167 * processing then pruning will incorrectly assume
2168 * that fp-8 stack slot was unused in the fall-through
2169 * branch and will accept the program incorrectly
2171 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
2172 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2173 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
2174 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2175 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2176 BPF_LD_MAP_FD(BPF_REG_1, 0),
2177 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
2180 .fixup_map_hash_48b = { 6 },
2181 .errstr = "invalid indirect read from stack R2 off -8+0 size 8",
2183 .prog_type = BPF_PROG_TYPE_XDP,
2186 "calls: ctx read at start of subprog",
2188 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
2189 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
2190 BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
2191 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2192 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
2193 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
2195 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
2196 BPF_MOV64_IMM(BPF_REG_0, 0),
2199 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2200 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
2201 .result_unpriv = REJECT,
2205 "calls: cross frame pruning",
2212 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
2213 BPF_MOV64_IMM(BPF_REG_8, 0),
2214 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
2215 BPF_MOV64_IMM(BPF_REG_8, 1),
2216 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2217 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
2218 BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
2219 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
2220 BPF_MOV64_IMM(BPF_REG_0, 0),
2222 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2225 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2226 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
2227 .errstr = "!read_ok",
2231 "calls: cross frame pruning - liveness propagation",
2233 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
2234 BPF_MOV64_IMM(BPF_REG_8, 0),
2235 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
2236 BPF_MOV64_IMM(BPF_REG_8, 1),
2237 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
2238 BPF_MOV64_IMM(BPF_REG_9, 0),
2239 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
2240 BPF_MOV64_IMM(BPF_REG_9, 1),
2241 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
2242 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
2243 BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
2244 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0),
2245 BPF_MOV64_IMM(BPF_REG_0, 0),
2247 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2250 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2251 .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
2252 .errstr = "!read_ok",