2 "unpriv: return pointer",
4 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
8 .result_unpriv = REJECT,
9 .errstr_unpriv = "R0 leaks addr",
10 .retval = POINTER_VALUE,
13 "unpriv: add const to pointer",
15 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
16 BPF_MOV64_IMM(BPF_REG_0, 0),
22 "unpriv: add pointer to pointer",
24 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
25 BPF_MOV64_IMM(BPF_REG_0, 0),
29 .errstr = "R1 pointer += pointer",
32 "unpriv: neg pointer",
34 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
35 BPF_MOV64_IMM(BPF_REG_0, 0),
39 .result_unpriv = REJECT,
40 .errstr_unpriv = "R1 pointer arithmetic",
43 "unpriv: cmp pointer with const",
45 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
46 BPF_MOV64_IMM(BPF_REG_0, 0),
50 .result_unpriv = REJECT,
51 .errstr_unpriv = "R1 pointer comparison",
54 "unpriv: cmp pointer with pointer",
56 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
57 BPF_MOV64_IMM(BPF_REG_0, 0),
61 .result_unpriv = REJECT,
62 .errstr_unpriv = "R10 pointer comparison",
65 "unpriv: check that printk is disallowed",
67 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
68 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
69 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
70 BPF_MOV64_IMM(BPF_REG_2, 8),
71 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
72 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_trace_printk),
73 BPF_MOV64_IMM(BPF_REG_0, 0),
76 .errstr_unpriv = "unknown func bpf_trace_printk#6",
77 .result_unpriv = REJECT,
79 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
82 "unpriv: pass pointer to helper function",
84 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
85 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
86 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
87 BPF_LD_MAP_FD(BPF_REG_1, 0),
88 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
89 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
90 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
91 BPF_MOV64_IMM(BPF_REG_0, 0),
94 .fixup_map_hash_8b = { 3 },
95 .errstr_unpriv = "R4 leaks addr",
96 .result_unpriv = REJECT,
100 "unpriv: indirectly pass pointer on stack to helper function",
102 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
103 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
104 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
105 BPF_LD_MAP_FD(BPF_REG_1, 0),
106 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
107 BPF_MOV64_IMM(BPF_REG_0, 0),
110 .fixup_map_hash_8b = { 3 },
111 .errstr_unpriv = "invalid indirect read from stack R2 off -8+0 size 8",
112 .result_unpriv = REJECT,
116 "unpriv: mangle pointer on stack 1",
118 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
119 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
120 BPF_MOV64_IMM(BPF_REG_0, 0),
123 .errstr_unpriv = "attempt to corrupt spilled",
124 .result_unpriv = REJECT,
128 "unpriv: mangle pointer on stack 2",
130 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
131 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
132 BPF_MOV64_IMM(BPF_REG_0, 0),
135 .errstr_unpriv = "attempt to corrupt spilled",
136 .result_unpriv = REJECT,
140 "unpriv: read pointer from stack in small chunks",
142 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
143 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
144 BPF_MOV64_IMM(BPF_REG_0, 0),
147 .errstr = "invalid size",
151 "unpriv: write pointer into ctx",
153 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
154 BPF_MOV64_IMM(BPF_REG_0, 0),
157 .errstr_unpriv = "R1 leaks addr",
158 .result_unpriv = REJECT,
159 .errstr = "invalid bpf_context access",
163 "unpriv: spill/fill of ctx",
165 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
166 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
167 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
168 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
169 BPF_MOV64_IMM(BPF_REG_0, 0),
175 "unpriv: spill/fill of ctx 2",
177 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
178 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
179 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
180 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
181 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc),
182 BPF_MOV64_IMM(BPF_REG_0, 0),
186 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
189 "unpriv: spill/fill of ctx 3",
191 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
192 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
193 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
194 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
195 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
196 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc),
200 .errstr = "R1 type=fp expected=ctx",
201 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
204 "unpriv: spill/fill of ctx 4",
206 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
207 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
208 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
209 BPF_MOV64_IMM(BPF_REG_0, 1),
210 BPF_RAW_INSN(BPF_STX | BPF_ATOMIC | BPF_DW,
211 BPF_REG_10, BPF_REG_0, -8, BPF_ADD),
212 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
213 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc),
217 .errstr = "R1 type=inv expected=ctx",
218 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
221 "unpriv: spill/fill of different pointers stx",
223 BPF_MOV64_IMM(BPF_REG_3, 42),
224 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
225 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
226 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
227 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
228 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
229 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
230 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
231 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
232 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
233 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
234 offsetof(struct __sk_buff, mark)),
235 BPF_MOV64_IMM(BPF_REG_0, 0),
239 .errstr = "same insn cannot be used with different pointers",
240 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
243 "unpriv: spill/fill of different pointers stx - ctx and sock",
245 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
246 /* struct bpf_sock *sock = bpf_sock_lookup(...); */
247 BPF_SK_LOOKUP(sk_lookup_tcp),
248 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
250 /* void *target = &foo; */
251 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
252 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
253 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
254 /* if (skb == NULL) *target = sock; */
255 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
256 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
257 /* else *target = skb; */
258 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
259 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
260 /* struct __sk_buff *skb = *target; */
261 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
262 /* skb->mark = 42; */
263 BPF_MOV64_IMM(BPF_REG_3, 42),
264 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
265 offsetof(struct __sk_buff, mark)),
266 /* if (sk) bpf_sk_release(sk) */
267 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
268 BPF_EMIT_CALL(BPF_FUNC_sk_release),
269 BPF_MOV64_IMM(BPF_REG_0, 0),
273 .errstr = "type=ctx expected=sock",
274 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
277 "unpriv: spill/fill of different pointers stx - leak sock",
279 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
280 /* struct bpf_sock *sock = bpf_sock_lookup(...); */
281 BPF_SK_LOOKUP(sk_lookup_tcp),
282 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
284 /* void *target = &foo; */
285 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
286 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
287 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
288 /* if (skb == NULL) *target = sock; */
289 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
290 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
291 /* else *target = skb; */
292 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
293 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
294 /* struct __sk_buff *skb = *target; */
295 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
296 /* skb->mark = 42; */
297 BPF_MOV64_IMM(BPF_REG_3, 42),
298 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
299 offsetof(struct __sk_buff, mark)),
303 //.errstr = "same insn cannot be used with different pointers",
304 .errstr = "Unreleased reference",
305 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
308 "unpriv: spill/fill of different pointers stx - sock and ctx (read)",
310 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
311 /* struct bpf_sock *sock = bpf_sock_lookup(...); */
312 BPF_SK_LOOKUP(sk_lookup_tcp),
313 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
315 /* void *target = &foo; */
316 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
317 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
318 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
319 /* if (skb) *target = skb */
320 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
321 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
322 /* else *target = sock */
323 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
324 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
325 /* struct bpf_sock *sk = *target; */
326 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
327 /* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */
328 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
329 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
330 offsetof(struct bpf_sock, mark)),
331 BPF_EMIT_CALL(BPF_FUNC_sk_release),
332 BPF_MOV64_IMM(BPF_REG_0, 0),
336 .errstr = "same insn cannot be used with different pointers",
337 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
340 "unpriv: spill/fill of different pointers stx - sock and ctx (write)",
342 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
343 /* struct bpf_sock *sock = bpf_sock_lookup(...); */
344 BPF_SK_LOOKUP(sk_lookup_tcp),
345 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
347 /* void *target = &foo; */
348 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
349 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
350 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
351 /* if (skb) *target = skb */
352 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
353 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
354 /* else *target = sock */
355 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
356 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
357 /* struct bpf_sock *sk = *target; */
358 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
359 /* if (sk) sk->mark = 42; bpf_sk_release(sk); */
360 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
361 BPF_MOV64_IMM(BPF_REG_3, 42),
362 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
363 offsetof(struct bpf_sock, mark)),
364 BPF_EMIT_CALL(BPF_FUNC_sk_release),
365 BPF_MOV64_IMM(BPF_REG_0, 0),
369 //.errstr = "same insn cannot be used with different pointers",
370 .errstr = "cannot write into sock",
371 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
374 "unpriv: spill/fill of different pointers ldx",
376 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
377 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
378 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
379 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
380 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
381 -(__s32)offsetof(struct bpf_perf_event_data,
383 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
384 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
385 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
386 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
387 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
388 offsetof(struct bpf_perf_event_data, sample_period)),
389 BPF_MOV64_IMM(BPF_REG_0, 0),
393 .errstr = "same insn cannot be used with different pointers",
394 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
397 "unpriv: write pointer into map elem value",
399 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
400 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
401 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
402 BPF_LD_MAP_FD(BPF_REG_1, 0),
403 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
404 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
405 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
408 .fixup_map_hash_8b = { 3 },
409 .errstr_unpriv = "R0 leaks addr",
410 .result_unpriv = REJECT,
414 "alu32: mov u32 const",
416 BPF_MOV32_IMM(BPF_REG_7, 0),
417 BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1),
418 BPF_MOV32_REG(BPF_REG_0, BPF_REG_7),
419 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
420 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
423 .errstr_unpriv = "R7 invalid mem access 'inv'",
424 .result_unpriv = REJECT,
429 "unpriv: partial copy of pointer",
431 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
432 BPF_MOV64_IMM(BPF_REG_0, 0),
435 .errstr_unpriv = "R10 partial copy",
436 .result_unpriv = REJECT,
440 "unpriv: pass pointer to tail_call",
442 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
443 BPF_LD_MAP_FD(BPF_REG_2, 0),
444 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
445 BPF_MOV64_IMM(BPF_REG_0, 0),
448 .fixup_prog1 = { 1 },
449 .errstr_unpriv = "R3 leaks addr into helper",
450 .result_unpriv = REJECT,
454 "unpriv: cmp map pointer with zero",
456 BPF_MOV64_IMM(BPF_REG_1, 0),
457 BPF_LD_MAP_FD(BPF_REG_1, 0),
458 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
459 BPF_MOV64_IMM(BPF_REG_0, 0),
462 .fixup_map_hash_8b = { 1 },
463 .errstr_unpriv = "R1 pointer comparison",
464 .result_unpriv = REJECT,
468 "unpriv: write into frame pointer",
470 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
471 BPF_MOV64_IMM(BPF_REG_0, 0),
474 .errstr = "frame pointer is read only",
478 "unpriv: spill/fill frame pointer",
480 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
481 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
482 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
483 BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
484 BPF_MOV64_IMM(BPF_REG_0, 0),
487 .errstr = "frame pointer is read only",
491 "unpriv: cmp of frame pointer",
493 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
494 BPF_MOV64_IMM(BPF_REG_0, 0),
497 .errstr_unpriv = "R10 pointer comparison",
498 .result_unpriv = REJECT,
502 "unpriv: adding of fp, reg",
504 BPF_MOV64_IMM(BPF_REG_0, 0),
505 BPF_MOV64_IMM(BPF_REG_1, 0),
506 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
507 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
510 .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
511 .result_unpriv = REJECT,
515 "unpriv: adding of fp, imm",
517 BPF_MOV64_IMM(BPF_REG_0, 0),
518 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
519 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
520 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
523 .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
524 .result_unpriv = REJECT,
528 "unpriv: cmp of stack pointer",
530 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
531 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
532 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
533 BPF_MOV64_IMM(BPF_REG_0, 0),
536 .errstr_unpriv = "R2 pointer comparison",
537 .result_unpriv = REJECT,