1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 * Copyright (c) 2016 Facebook
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <linux/bpf.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/filter.h>
19 #include <net/netlink.h>
20 #include <linux/file.h>
21 #include <linux/vmalloc.h>
22 #include <linux/stringify.h>
23 #include <linux/bsearch.h>
24 #include <linux/sort.h>
25 #include <linux/perf_event.h>
29 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
30 #define BPF_PROG_TYPE(_id, _name) \
31 [_id] = & _name ## _verifier_ops,
32 #define BPF_MAP_TYPE(_id, _ops)
33 #include <linux/bpf_types.h>
38 /* bpf_check() is a static code analyzer that walks eBPF program
39 * instruction by instruction and updates register/stack state.
40 * All paths of conditional branches are analyzed until 'bpf_exit' insn.
42 * The first pass is depth-first-search to check that the program is a DAG.
43 * It rejects the following programs:
44 * - larger than BPF_MAXINSNS insns
45 * - if loop is present (detected via back-edge)
46 * - unreachable insns exist (shouldn't be a forest. program = one function)
47 * - out of bounds or malformed jumps
48 * The second pass is all possible path descent from the 1st insn.
49 * Since it's analyzing all pathes through the program, the length of the
50 * analysis is limited to 64k insn, which may be hit even if total number of
51 * insn is less then 4K, but there are too many branches that change stack/regs.
52 * Number of 'branches to be analyzed' is limited to 1k
54 * On entry to each instruction, each register has a type, and the instruction
55 * changes the types of the registers depending on instruction semantics.
56 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
59 * All registers are 64-bit.
60 * R0 - return register
61 * R1-R5 argument passing registers
62 * R6-R9 callee saved registers
63 * R10 - frame pointer read-only
65 * At the start of BPF program the register R1 contains a pointer to bpf_context
66 * and has type PTR_TO_CTX.
68 * Verifier tracks arithmetic operations on pointers in case:
69 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
70 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
71 * 1st insn copies R10 (which has FRAME_PTR) type into R1
72 * and 2nd arithmetic instruction is pattern matched to recognize
73 * that it wants to construct a pointer to some element within stack.
74 * So after 2nd insn, the register R1 has type PTR_TO_STACK
75 * (and -20 constant is saved for further stack bounds checking).
76 * Meaning that this reg is a pointer to stack plus known immediate constant.
78 * Most of the time the registers have SCALAR_VALUE type, which
79 * means the register has some value, but it's not a valid pointer.
80 * (like pointer plus pointer becomes SCALAR_VALUE type)
82 * When verifier sees load or store instructions the type of base register
83 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK. These are three pointer
84 * types recognized by check_mem_access() function.
86 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
87 * and the range of [ptr, ptr + map's value_size) is accessible.
89 * registers used to pass values to function calls are checked against
90 * function argument constraints.
92 * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
93 * It means that the register type passed to this function must be
94 * PTR_TO_STACK and it will be used inside the function as
95 * 'pointer to map element key'
97 * For example the argument constraints for bpf_map_lookup_elem():
98 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
99 * .arg1_type = ARG_CONST_MAP_PTR,
100 * .arg2_type = ARG_PTR_TO_MAP_KEY,
102 * ret_type says that this function returns 'pointer to map elem value or null'
103 * function expects 1st argument to be a const pointer to 'struct bpf_map' and
104 * 2nd argument should be a pointer to stack, which will be used inside
105 * the helper function as a pointer to map element key.
107 * On the kernel side the helper function looks like:
108 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
110 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
111 * void *key = (void *) (unsigned long) r2;
114 * here kernel can access 'key' and 'map' pointers safely, knowing that
115 * [key, key + map->key_size) bytes are valid and were initialized on
116 * the stack of eBPF program.
119 * Corresponding eBPF program may look like:
120 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
121 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
122 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
123 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
124 * here verifier looks at prototype of map_lookup_elem() and sees:
125 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
126 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
128 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
129 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
130 * and were initialized prior to this call.
131 * If it's ok, then verifier allows this BPF_CALL insn and looks at
132 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
133 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
134 * returns ether pointer to map value or NULL.
136 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
137 * insn, the register holding that pointer in the true branch changes state to
138 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
139 * branch. See check_cond_jmp_op().
141 * After the call R0 is set to return type of the function and registers R1-R5
142 * are set to NOT_INIT to indicate that they are no longer readable.
145 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
146 struct bpf_verifier_stack_elem {
147 /* verifer state is 'st'
148 * before processing instruction 'insn_idx'
149 * and after processing instruction 'prev_insn_idx'
151 struct bpf_verifier_state st;
154 struct bpf_verifier_stack_elem *next;
157 #define BPF_COMPLEXITY_LIMIT_INSNS 131072
158 #define BPF_COMPLEXITY_LIMIT_STACK 1024
160 #define BPF_MAP_PTR_UNPRIV 1UL
161 #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
162 POISON_POINTER_DELTA))
163 #define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
165 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
167 return BPF_MAP_PTR(aux->map_state) == BPF_MAP_PTR_POISON;
170 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
172 return aux->map_state & BPF_MAP_PTR_UNPRIV;
175 static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
176 const struct bpf_map *map, bool unpriv)
178 BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
179 unpriv |= bpf_map_ptr_unpriv(aux);
180 aux->map_state = (unsigned long)map |
181 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
184 struct bpf_call_arg_meta {
185 struct bpf_map *map_ptr;
190 s64 msize_smax_value;
191 u64 msize_umax_value;
194 static DEFINE_MUTEX(bpf_verifier_lock);
196 void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
201 n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
203 WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
204 "verifier log line truncated - local buffer too short\n");
206 n = min(log->len_total - log->len_used - 1, n);
209 if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
215 /* log_level controls verbosity level of eBPF verifier.
216 * bpf_verifier_log_write() is used to dump the verification trace to the log,
217 * so the user can figure out what's wrong with the program
219 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
220 const char *fmt, ...)
224 if (!bpf_verifier_log_needed(&env->log))
228 bpf_verifier_vlog(&env->log, fmt, args);
231 EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
233 __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
235 struct bpf_verifier_env *env = private_data;
238 if (!bpf_verifier_log_needed(&env->log))
242 bpf_verifier_vlog(&env->log, fmt, args);
246 static bool type_is_pkt_pointer(enum bpf_reg_type type)
248 return type == PTR_TO_PACKET ||
249 type == PTR_TO_PACKET_META;
252 /* string representation of 'enum bpf_reg_type' */
253 static const char * const reg_type_str[] = {
255 [SCALAR_VALUE] = "inv",
256 [PTR_TO_CTX] = "ctx",
257 [CONST_PTR_TO_MAP] = "map_ptr",
258 [PTR_TO_MAP_VALUE] = "map_value",
259 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
260 [PTR_TO_STACK] = "fp",
261 [PTR_TO_PACKET] = "pkt",
262 [PTR_TO_PACKET_META] = "pkt_meta",
263 [PTR_TO_PACKET_END] = "pkt_end",
266 static void print_liveness(struct bpf_verifier_env *env,
267 enum bpf_reg_liveness live)
269 if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN))
271 if (live & REG_LIVE_READ)
273 if (live & REG_LIVE_WRITTEN)
277 static struct bpf_func_state *func(struct bpf_verifier_env *env,
278 const struct bpf_reg_state *reg)
280 struct bpf_verifier_state *cur = env->cur_state;
282 return cur->frame[reg->frameno];
285 static void print_verifier_state(struct bpf_verifier_env *env,
286 const struct bpf_func_state *state)
288 const struct bpf_reg_state *reg;
293 verbose(env, " frame%d:", state->frameno);
294 for (i = 0; i < MAX_BPF_REG; i++) {
295 reg = &state->regs[i];
299 verbose(env, " R%d", i);
300 print_liveness(env, reg->live);
301 verbose(env, "=%s", reg_type_str[t]);
302 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
303 tnum_is_const(reg->var_off)) {
304 /* reg->off should be 0 for SCALAR_VALUE */
305 verbose(env, "%lld", reg->var_off.value + reg->off);
306 if (t == PTR_TO_STACK)
307 verbose(env, ",call_%d", func(env, reg)->callsite);
309 verbose(env, "(id=%d", reg->id);
310 if (t != SCALAR_VALUE)
311 verbose(env, ",off=%d", reg->off);
312 if (type_is_pkt_pointer(t))
313 verbose(env, ",r=%d", reg->range);
314 else if (t == CONST_PTR_TO_MAP ||
315 t == PTR_TO_MAP_VALUE ||
316 t == PTR_TO_MAP_VALUE_OR_NULL)
317 verbose(env, ",ks=%d,vs=%d",
318 reg->map_ptr->key_size,
319 reg->map_ptr->value_size);
320 if (tnum_is_const(reg->var_off)) {
321 /* Typically an immediate SCALAR_VALUE, but
322 * could be a pointer whose offset is too big
325 verbose(env, ",imm=%llx", reg->var_off.value);
327 if (reg->smin_value != reg->umin_value &&
328 reg->smin_value != S64_MIN)
329 verbose(env, ",smin_value=%lld",
330 (long long)reg->smin_value);
331 if (reg->smax_value != reg->umax_value &&
332 reg->smax_value != S64_MAX)
333 verbose(env, ",smax_value=%lld",
334 (long long)reg->smax_value);
335 if (reg->umin_value != 0)
336 verbose(env, ",umin_value=%llu",
337 (unsigned long long)reg->umin_value);
338 if (reg->umax_value != U64_MAX)
339 verbose(env, ",umax_value=%llu",
340 (unsigned long long)reg->umax_value);
341 if (!tnum_is_unknown(reg->var_off)) {
344 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
345 verbose(env, ",var_off=%s", tn_buf);
351 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
352 if (state->stack[i].slot_type[0] == STACK_SPILL) {
353 verbose(env, " fp%d",
354 (-i - 1) * BPF_REG_SIZE);
355 print_liveness(env, state->stack[i].spilled_ptr.live);
357 reg_type_str[state->stack[i].spilled_ptr.type]);
359 if (state->stack[i].slot_type[0] == STACK_ZERO)
360 verbose(env, " fp%d=0", (-i - 1) * BPF_REG_SIZE);
365 static int copy_stack_state(struct bpf_func_state *dst,
366 const struct bpf_func_state *src)
370 if (WARN_ON_ONCE(dst->allocated_stack < src->allocated_stack)) {
371 /* internal bug, make state invalid to reject the program */
372 memset(dst, 0, sizeof(*dst));
375 memcpy(dst->stack, src->stack,
376 sizeof(*src->stack) * (src->allocated_stack / BPF_REG_SIZE));
380 /* do_check() starts with zero-sized stack in struct bpf_verifier_state to
381 * make it consume minimal amount of memory. check_stack_write() access from
382 * the program calls into realloc_func_state() to grow the stack size.
383 * Note there is a non-zero 'parent' pointer inside bpf_verifier_state
384 * which this function copies over. It points to previous bpf_verifier_state
385 * which is never reallocated
387 static int realloc_func_state(struct bpf_func_state *state, int size,
390 u32 old_size = state->allocated_stack;
391 struct bpf_stack_state *new_stack;
392 int slot = size / BPF_REG_SIZE;
394 if (size <= old_size || !size) {
397 state->allocated_stack = slot * BPF_REG_SIZE;
398 if (!size && old_size) {
404 new_stack = kmalloc_array(slot, sizeof(struct bpf_stack_state),
410 memcpy(new_stack, state->stack,
411 sizeof(*new_stack) * (old_size / BPF_REG_SIZE));
412 memset(new_stack + old_size / BPF_REG_SIZE, 0,
413 sizeof(*new_stack) * (size - old_size) / BPF_REG_SIZE);
415 state->allocated_stack = slot * BPF_REG_SIZE;
417 state->stack = new_stack;
421 static void free_func_state(struct bpf_func_state *state)
429 static void free_verifier_state(struct bpf_verifier_state *state,
434 for (i = 0; i <= state->curframe; i++) {
435 free_func_state(state->frame[i]);
436 state->frame[i] = NULL;
442 /* copy verifier state from src to dst growing dst stack space
443 * when necessary to accommodate larger src stack
445 static int copy_func_state(struct bpf_func_state *dst,
446 const struct bpf_func_state *src)
450 err = realloc_func_state(dst, src->allocated_stack, false);
453 memcpy(dst, src, offsetof(struct bpf_func_state, allocated_stack));
454 return copy_stack_state(dst, src);
457 static int copy_verifier_state(struct bpf_verifier_state *dst_state,
458 const struct bpf_verifier_state *src)
460 struct bpf_func_state *dst;
463 /* if dst has more stack frames then src frame, free them */
464 for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
465 free_func_state(dst_state->frame[i]);
466 dst_state->frame[i] = NULL;
468 dst_state->curframe = src->curframe;
469 dst_state->parent = src->parent;
470 for (i = 0; i <= src->curframe; i++) {
471 dst = dst_state->frame[i];
473 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
476 dst_state->frame[i] = dst;
478 err = copy_func_state(dst, src->frame[i]);
485 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
488 struct bpf_verifier_state *cur = env->cur_state;
489 struct bpf_verifier_stack_elem *elem, *head = env->head;
492 if (env->head == NULL)
496 err = copy_verifier_state(cur, &head->st);
501 *insn_idx = head->insn_idx;
503 *prev_insn_idx = head->prev_insn_idx;
505 free_verifier_state(&head->st, false);
512 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
513 int insn_idx, int prev_insn_idx)
515 struct bpf_verifier_state *cur = env->cur_state;
516 struct bpf_verifier_stack_elem *elem;
519 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
523 elem->insn_idx = insn_idx;
524 elem->prev_insn_idx = prev_insn_idx;
525 elem->next = env->head;
528 err = copy_verifier_state(&elem->st, cur);
531 if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) {
532 verbose(env, "BPF program is too complex\n");
537 free_verifier_state(env->cur_state, true);
538 env->cur_state = NULL;
539 /* pop all elements and return */
540 while (!pop_stack(env, NULL, NULL));
544 #define CALLER_SAVED_REGS 6
545 static const int caller_saved[CALLER_SAVED_REGS] = {
546 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
549 static void __mark_reg_not_init(struct bpf_reg_state *reg);
551 /* Mark the unknown part of a register (variable offset or scalar value) as
552 * known to have the value @imm.
554 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
557 reg->var_off = tnum_const(imm);
558 reg->smin_value = (s64)imm;
559 reg->smax_value = (s64)imm;
560 reg->umin_value = imm;
561 reg->umax_value = imm;
564 /* Mark the 'variable offset' part of a register as zero. This should be
565 * used only on registers holding a pointer type.
567 static void __mark_reg_known_zero(struct bpf_reg_state *reg)
569 __mark_reg_known(reg, 0);
572 static void __mark_reg_const_zero(struct bpf_reg_state *reg)
574 __mark_reg_known(reg, 0);
576 reg->type = SCALAR_VALUE;
579 static void mark_reg_known_zero(struct bpf_verifier_env *env,
580 struct bpf_reg_state *regs, u32 regno)
582 if (WARN_ON(regno >= MAX_BPF_REG)) {
583 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
584 /* Something bad happened, let's kill all regs */
585 for (regno = 0; regno < MAX_BPF_REG; regno++)
586 __mark_reg_not_init(regs + regno);
589 __mark_reg_known_zero(regs + regno);
592 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
594 return type_is_pkt_pointer(reg->type);
597 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
599 return reg_is_pkt_pointer(reg) ||
600 reg->type == PTR_TO_PACKET_END;
603 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
604 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
605 enum bpf_reg_type which)
607 /* The register can already have a range from prior markings.
608 * This is fine as long as it hasn't been advanced from its
611 return reg->type == which &&
614 tnum_equals_const(reg->var_off, 0);
617 /* Attempts to improve min/max values based on var_off information */
618 static void __update_reg_bounds(struct bpf_reg_state *reg)
620 /* min signed is max(sign bit) | min(other bits) */
621 reg->smin_value = max_t(s64, reg->smin_value,
622 reg->var_off.value | (reg->var_off.mask & S64_MIN));
623 /* max signed is min(sign bit) | max(other bits) */
624 reg->smax_value = min_t(s64, reg->smax_value,
625 reg->var_off.value | (reg->var_off.mask & S64_MAX));
626 reg->umin_value = max(reg->umin_value, reg->var_off.value);
627 reg->umax_value = min(reg->umax_value,
628 reg->var_off.value | reg->var_off.mask);
631 /* Uses signed min/max values to inform unsigned, and vice-versa */
632 static void __reg_deduce_bounds(struct bpf_reg_state *reg)
634 /* Learn sign from signed bounds.
635 * If we cannot cross the sign boundary, then signed and unsigned bounds
636 * are the same, so combine. This works even in the negative case, e.g.
637 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
639 if (reg->smin_value >= 0 || reg->smax_value < 0) {
640 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
642 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
646 /* Learn sign from unsigned bounds. Signed bounds cross the sign
647 * boundary, so we must be careful.
649 if ((s64)reg->umax_value >= 0) {
650 /* Positive. We can't learn anything from the smin, but smax
651 * is positive, hence safe.
653 reg->smin_value = reg->umin_value;
654 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
656 } else if ((s64)reg->umin_value < 0) {
657 /* Negative. We can't learn anything from the smax, but smin
658 * is negative, hence safe.
660 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
662 reg->smax_value = reg->umax_value;
666 /* Attempts to improve var_off based on unsigned min/max information */
667 static void __reg_bound_offset(struct bpf_reg_state *reg)
669 reg->var_off = tnum_intersect(reg->var_off,
670 tnum_range(reg->umin_value,
674 /* Reset the min/max bounds of a register */
675 static void __mark_reg_unbounded(struct bpf_reg_state *reg)
677 reg->smin_value = S64_MIN;
678 reg->smax_value = S64_MAX;
680 reg->umax_value = U64_MAX;
683 /* Mark a register as having a completely unknown (scalar) value. */
684 static void __mark_reg_unknown(struct bpf_reg_state *reg)
686 reg->type = SCALAR_VALUE;
689 reg->var_off = tnum_unknown;
691 __mark_reg_unbounded(reg);
694 static void mark_reg_unknown(struct bpf_verifier_env *env,
695 struct bpf_reg_state *regs, u32 regno)
697 if (WARN_ON(regno >= MAX_BPF_REG)) {
698 verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
699 /* Something bad happened, let's kill all regs except FP */
700 for (regno = 0; regno < BPF_REG_FP; regno++)
701 __mark_reg_not_init(regs + regno);
704 __mark_reg_unknown(regs + regno);
707 static void __mark_reg_not_init(struct bpf_reg_state *reg)
709 __mark_reg_unknown(reg);
710 reg->type = NOT_INIT;
713 static void mark_reg_not_init(struct bpf_verifier_env *env,
714 struct bpf_reg_state *regs, u32 regno)
716 if (WARN_ON(regno >= MAX_BPF_REG)) {
717 verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
718 /* Something bad happened, let's kill all regs except FP */
719 for (regno = 0; regno < BPF_REG_FP; regno++)
720 __mark_reg_not_init(regs + regno);
723 __mark_reg_not_init(regs + regno);
726 static void init_reg_state(struct bpf_verifier_env *env,
727 struct bpf_func_state *state)
729 struct bpf_reg_state *regs = state->regs;
732 for (i = 0; i < MAX_BPF_REG; i++) {
733 mark_reg_not_init(env, regs, i);
734 regs[i].live = REG_LIVE_NONE;
738 regs[BPF_REG_FP].type = PTR_TO_STACK;
739 mark_reg_known_zero(env, regs, BPF_REG_FP);
740 regs[BPF_REG_FP].frameno = state->frameno;
742 /* 1st arg to a function */
743 regs[BPF_REG_1].type = PTR_TO_CTX;
744 mark_reg_known_zero(env, regs, BPF_REG_1);
747 #define BPF_MAIN_FUNC (-1)
748 static void init_func_state(struct bpf_verifier_env *env,
749 struct bpf_func_state *state,
750 int callsite, int frameno, int subprogno)
752 state->callsite = callsite;
753 state->frameno = frameno;
754 state->subprogno = subprogno;
755 init_reg_state(env, state);
759 SRC_OP, /* register is used as source operand */
760 DST_OP, /* register is used as destination operand */
761 DST_OP_NO_MARK /* same as above, check only, don't mark */
764 static int cmp_subprogs(const void *a, const void *b)
766 return ((struct bpf_subprog_info *)a)->start -
767 ((struct bpf_subprog_info *)b)->start;
770 static int find_subprog(struct bpf_verifier_env *env, int off)
772 struct bpf_subprog_info *p;
774 p = bsearch(&off, env->subprog_info, env->subprog_cnt,
775 sizeof(env->subprog_info[0]), cmp_subprogs);
778 return p - env->subprog_info;
782 static int add_subprog(struct bpf_verifier_env *env, int off)
784 int insn_cnt = env->prog->len;
787 if (off >= insn_cnt || off < 0) {
788 verbose(env, "call to invalid destination\n");
791 ret = find_subprog(env, off);
794 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
795 verbose(env, "too many subprograms\n");
798 env->subprog_info[env->subprog_cnt++].start = off;
799 sort(env->subprog_info, env->subprog_cnt,
800 sizeof(env->subprog_info[0]), cmp_subprogs, NULL);
804 static int check_subprogs(struct bpf_verifier_env *env)
806 int i, ret, subprog_start, subprog_end, off, cur_subprog = 0;
807 struct bpf_subprog_info *subprog = env->subprog_info;
808 struct bpf_insn *insn = env->prog->insnsi;
809 int insn_cnt = env->prog->len;
811 /* Add entry function. */
812 ret = add_subprog(env, 0);
816 /* determine subprog starts. The end is one before the next starts */
817 for (i = 0; i < insn_cnt; i++) {
818 if (insn[i].code != (BPF_JMP | BPF_CALL))
820 if (insn[i].src_reg != BPF_PSEUDO_CALL)
822 if (!env->allow_ptr_leaks) {
823 verbose(env, "function calls to other bpf functions are allowed for root only\n");
826 if (bpf_prog_is_dev_bound(env->prog->aux)) {
827 verbose(env, "function calls in offloaded programs are not supported yet\n");
830 ret = add_subprog(env, i + insn[i].imm + 1);
835 /* Add a fake 'exit' subprog which could simplify subprog iteration
836 * logic. 'subprog_cnt' should not be increased.
838 subprog[env->subprog_cnt].start = insn_cnt;
840 if (env->log.level > 1)
841 for (i = 0; i < env->subprog_cnt; i++)
842 verbose(env, "func#%d @%d\n", i, subprog[i].start);
844 /* now check that all jumps are within the same subprog */
845 subprog_start = subprog[cur_subprog].start;
846 subprog_end = subprog[cur_subprog + 1].start;
847 for (i = 0; i < insn_cnt; i++) {
848 u8 code = insn[i].code;
850 if (BPF_CLASS(code) != BPF_JMP)
852 if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
854 off = i + insn[i].off + 1;
855 if (off < subprog_start || off >= subprog_end) {
856 verbose(env, "jump out of range from insn %d to %d\n", i, off);
860 if (i == subprog_end - 1) {
861 /* to avoid fall-through from one subprog into another
862 * the last insn of the subprog should be either exit
863 * or unconditional jump back
865 if (code != (BPF_JMP | BPF_EXIT) &&
866 code != (BPF_JMP | BPF_JA)) {
867 verbose(env, "last insn is not an exit or jmp\n");
870 subprog_start = subprog_end;
872 if (cur_subprog < env->subprog_cnt)
873 subprog_end = subprog[cur_subprog + 1].start;
880 struct bpf_verifier_state *skip_callee(struct bpf_verifier_env *env,
881 const struct bpf_verifier_state *state,
882 struct bpf_verifier_state *parent,
885 struct bpf_verifier_state *tmp = NULL;
887 /* 'parent' could be a state of caller and
888 * 'state' could be a state of callee. In such case
889 * parent->curframe < state->curframe
890 * and it's ok for r1 - r5 registers
892 * 'parent' could be a callee's state after it bpf_exit-ed.
893 * In such case parent->curframe > state->curframe
894 * and it's ok for r0 only
896 if (parent->curframe == state->curframe ||
897 (parent->curframe < state->curframe &&
898 regno >= BPF_REG_1 && regno <= BPF_REG_5) ||
899 (parent->curframe > state->curframe &&
903 if (parent->curframe > state->curframe &&
904 regno >= BPF_REG_6) {
905 /* for callee saved regs we have to skip the whole chain
906 * of states that belong to callee and mark as LIVE_READ
907 * the registers before the call
910 while (tmp && tmp->curframe != state->curframe) {
921 verbose(env, "verifier bug regno %d tmp %p\n", regno, tmp);
922 verbose(env, "regno %d parent frame %d current frame %d\n",
923 regno, parent->curframe, state->curframe);
927 static int mark_reg_read(struct bpf_verifier_env *env,
928 const struct bpf_verifier_state *state,
929 struct bpf_verifier_state *parent,
932 bool writes = parent == state->parent; /* Observe write marks */
934 if (regno == BPF_REG_FP)
935 /* We don't need to worry about FP liveness because it's read-only */
939 /* if read wasn't screened by an earlier write ... */
940 if (writes && state->frame[state->curframe]->regs[regno].live & REG_LIVE_WRITTEN)
942 parent = skip_callee(env, state, parent, regno);
945 /* ... then we depend on parent's value */
946 parent->frame[parent->curframe]->regs[regno].live |= REG_LIVE_READ;
948 parent = state->parent;
954 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
957 struct bpf_verifier_state *vstate = env->cur_state;
958 struct bpf_func_state *state = vstate->frame[vstate->curframe];
959 struct bpf_reg_state *regs = state->regs;
961 if (regno >= MAX_BPF_REG) {
962 verbose(env, "R%d is invalid\n", regno);
967 /* check whether register used as source operand can be read */
968 if (regs[regno].type == NOT_INIT) {
969 verbose(env, "R%d !read_ok\n", regno);
972 return mark_reg_read(env, vstate, vstate->parent, regno);
974 /* check whether register used as dest operand can be written to */
975 if (regno == BPF_REG_FP) {
976 verbose(env, "frame pointer is read only\n");
979 regs[regno].live |= REG_LIVE_WRITTEN;
981 mark_reg_unknown(env, regs, regno);
986 static bool is_spillable_regtype(enum bpf_reg_type type)
989 case PTR_TO_MAP_VALUE:
990 case PTR_TO_MAP_VALUE_OR_NULL:
994 case PTR_TO_PACKET_META:
995 case PTR_TO_PACKET_END:
996 case CONST_PTR_TO_MAP:
1003 /* Does this register contain a constant zero? */
1004 static bool register_is_null(struct bpf_reg_state *reg)
1006 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
1009 /* check_stack_read/write functions track spill/fill of registers,
1010 * stack boundary and alignment are checked in check_mem_access()
1012 static int check_stack_write(struct bpf_verifier_env *env,
1013 struct bpf_func_state *state, /* func where register points to */
1014 int off, int size, int value_regno, int insn_idx)
1016 struct bpf_func_state *cur; /* state of the current function */
1017 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
1018 enum bpf_reg_type type;
1020 err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE),
1024 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
1025 * so it's aligned access and [off, off + size) are within stack limits
1027 if (!env->allow_ptr_leaks &&
1028 state->stack[spi].slot_type[0] == STACK_SPILL &&
1029 size != BPF_REG_SIZE) {
1030 verbose(env, "attempt to corrupt spilled pointer on stack\n");
1034 cur = env->cur_state->frame[env->cur_state->curframe];
1035 if (value_regno >= 0 &&
1036 is_spillable_regtype((type = cur->regs[value_regno].type))) {
1038 /* register containing pointer is being spilled into stack */
1039 if (size != BPF_REG_SIZE) {
1040 verbose(env, "invalid size of register spill\n");
1044 if (state != cur && type == PTR_TO_STACK) {
1045 verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
1049 /* save register state */
1050 state->stack[spi].spilled_ptr = cur->regs[value_regno];
1051 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
1053 for (i = 0; i < BPF_REG_SIZE; i++) {
1054 if (state->stack[spi].slot_type[i] == STACK_MISC &&
1055 !env->allow_ptr_leaks) {
1056 int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
1057 int soff = (-spi - 1) * BPF_REG_SIZE;
1059 /* detected reuse of integer stack slot with a pointer
1060 * which means either llvm is reusing stack slot or
1061 * an attacker is trying to exploit CVE-2018-3639
1062 * (speculative store bypass)
1063 * Have to sanitize that slot with preemptive
1066 if (*poff && *poff != soff) {
1067 /* disallow programs where single insn stores
1068 * into two different stack slots, since verifier
1069 * cannot sanitize them
1072 "insn %d cannot access two stack slots fp%d and fp%d",
1073 insn_idx, *poff, soff);
1078 state->stack[spi].slot_type[i] = STACK_SPILL;
1081 u8 type = STACK_MISC;
1083 /* regular write of data into stack */
1084 state->stack[spi].spilled_ptr = (struct bpf_reg_state) {};
1086 /* only mark the slot as written if all 8 bytes were written
1087 * otherwise read propagation may incorrectly stop too soon
1088 * when stack slots are partially written.
1089 * This heuristic means that read propagation will be
1090 * conservative, since it will add reg_live_read marks
1091 * to stack slots all the way to first state when programs
1092 * writes+reads less than 8 bytes
1094 if (size == BPF_REG_SIZE)
1095 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
1097 /* when we zero initialize stack slots mark them as such */
1098 if (value_regno >= 0 &&
1099 register_is_null(&cur->regs[value_regno]))
1102 for (i = 0; i < size; i++)
1103 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
1109 /* registers of every function are unique and mark_reg_read() propagates
1110 * the liveness in the following cases:
1111 * - from callee into caller for R1 - R5 that were used as arguments
1112 * - from caller into callee for R0 that used as result of the call
1113 * - from caller to the same caller skipping states of the callee for R6 - R9,
1114 * since R6 - R9 are callee saved by implicit function prologue and
1115 * caller's R6 != callee's R6, so when we propagate liveness up to
1116 * parent states we need to skip callee states for R6 - R9.
1118 * stack slot marking is different, since stacks of caller and callee are
1119 * accessible in both (since caller can pass a pointer to caller's stack to
1120 * callee which can pass it to another function), hence mark_stack_slot_read()
1121 * has to propagate the stack liveness to all parent states at given frame number.
1131 * First *ptr is reading from f1's stack and mark_stack_slot_read() has
1132 * to mark liveness at the f1's frame and not f2's frame.
1133 * Second *ptr is also reading from f1's stack and mark_stack_slot_read() has
1134 * to propagate liveness to f2 states at f1's frame level and further into
1135 * f1 states at f1's frame level until write into that stack slot
1137 static void mark_stack_slot_read(struct bpf_verifier_env *env,
1138 const struct bpf_verifier_state *state,
1139 struct bpf_verifier_state *parent,
1140 int slot, int frameno)
1142 bool writes = parent == state->parent; /* Observe write marks */
1145 if (parent->frame[frameno]->allocated_stack <= slot * BPF_REG_SIZE)
1146 /* since LIVE_WRITTEN mark is only done for full 8-byte
1147 * write the read marks are conservative and parent
1148 * state may not even have the stack allocated. In such case
1149 * end the propagation, since the loop reached beginning
1153 /* if read wasn't screened by an earlier write ... */
1154 if (writes && state->frame[frameno]->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN)
1156 /* ... then we depend on parent's value */
1157 parent->frame[frameno]->stack[slot].spilled_ptr.live |= REG_LIVE_READ;
1159 parent = state->parent;
1164 static int check_stack_read(struct bpf_verifier_env *env,
1165 struct bpf_func_state *reg_state /* func where register points to */,
1166 int off, int size, int value_regno)
1168 struct bpf_verifier_state *vstate = env->cur_state;
1169 struct bpf_func_state *state = vstate->frame[vstate->curframe];
1170 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
1173 if (reg_state->allocated_stack <= slot) {
1174 verbose(env, "invalid read from stack off %d+0 size %d\n",
1178 stype = reg_state->stack[spi].slot_type;
1180 if (stype[0] == STACK_SPILL) {
1181 if (size != BPF_REG_SIZE) {
1182 verbose(env, "invalid size of register spill\n");
1185 for (i = 1; i < BPF_REG_SIZE; i++) {
1186 if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) {
1187 verbose(env, "corrupted spill memory\n");
1192 if (value_regno >= 0) {
1193 /* restore register state from stack */
1194 state->regs[value_regno] = reg_state->stack[spi].spilled_ptr;
1195 /* mark reg as written since spilled pointer state likely
1196 * has its liveness marks cleared by is_state_visited()
1197 * which resets stack/reg liveness for state transitions
1199 state->regs[value_regno].live |= REG_LIVE_WRITTEN;
1201 mark_stack_slot_read(env, vstate, vstate->parent, spi,
1202 reg_state->frameno);
1207 for (i = 0; i < size; i++) {
1208 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC)
1210 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) {
1214 verbose(env, "invalid read from stack off %d+%d size %d\n",
1218 mark_stack_slot_read(env, vstate, vstate->parent, spi,
1219 reg_state->frameno);
1220 if (value_regno >= 0) {
1221 if (zeros == size) {
1222 /* any size read into register is zero extended,
1223 * so the whole register == const_zero
1225 __mark_reg_const_zero(&state->regs[value_regno]);
1227 /* have read misc data from the stack */
1228 mark_reg_unknown(env, state->regs, value_regno);
1230 state->regs[value_regno].live |= REG_LIVE_WRITTEN;
1236 /* check read/write into map element returned by bpf_map_lookup_elem() */
1237 static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
1238 int size, bool zero_size_allowed)
1240 struct bpf_reg_state *regs = cur_regs(env);
1241 struct bpf_map *map = regs[regno].map_ptr;
1243 if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
1244 off + size > map->value_size) {
1245 verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
1246 map->value_size, off, size);
1252 /* check read/write into a map element with possible variable offset */
1253 static int check_map_access(struct bpf_verifier_env *env, u32 regno,
1254 int off, int size, bool zero_size_allowed)
1256 struct bpf_verifier_state *vstate = env->cur_state;
1257 struct bpf_func_state *state = vstate->frame[vstate->curframe];
1258 struct bpf_reg_state *reg = &state->regs[regno];
1261 /* We may have adjusted the register to this map value, so we
1262 * need to try adding each of min_value and max_value to off
1263 * to make sure our theoretical access will be safe.
1266 print_verifier_state(env, state);
1267 /* The minimum value is only important with signed
1268 * comparisons where we can't assume the floor of a
1269 * value is 0. If we are using signed variables for our
1270 * index'es we need to make sure that whatever we use
1271 * will have a set floor within our range.
1273 if (reg->smin_value < 0) {
1274 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
1278 err = __check_map_access(env, regno, reg->smin_value + off, size,
1281 verbose(env, "R%d min value is outside of the array range\n",
1286 /* If we haven't set a max value then we need to bail since we can't be
1287 * sure we won't do bad things.
1288 * If reg->umax_value + off could overflow, treat that as unbounded too.
1290 if (reg->umax_value >= BPF_MAX_VAR_OFF) {
1291 verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n",
1295 err = __check_map_access(env, regno, reg->umax_value + off, size,
1298 verbose(env, "R%d max value is outside of the array range\n",
1303 #define MAX_PACKET_OFF 0xffff
1305 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
1306 const struct bpf_call_arg_meta *meta,
1307 enum bpf_access_type t)
1309 switch (env->prog->type) {
1310 case BPF_PROG_TYPE_LWT_IN:
1311 case BPF_PROG_TYPE_LWT_OUT:
1312 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
1313 case BPF_PROG_TYPE_SK_REUSEPORT:
1314 /* dst_input() and dst_output() can't write for now */
1318 case BPF_PROG_TYPE_SCHED_CLS:
1319 case BPF_PROG_TYPE_SCHED_ACT:
1320 case BPF_PROG_TYPE_XDP:
1321 case BPF_PROG_TYPE_LWT_XMIT:
1322 case BPF_PROG_TYPE_SK_SKB:
1323 case BPF_PROG_TYPE_SK_MSG:
1325 return meta->pkt_access;
1327 env->seen_direct_write = true;
1334 static int __check_packet_access(struct bpf_verifier_env *env, u32 regno,
1335 int off, int size, bool zero_size_allowed)
1337 struct bpf_reg_state *regs = cur_regs(env);
1338 struct bpf_reg_state *reg = ®s[regno];
1340 if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
1341 (u64)off + size > reg->range) {
1342 verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
1343 off, size, regno, reg->id, reg->off, reg->range);
1349 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
1350 int size, bool zero_size_allowed)
1352 struct bpf_reg_state *regs = cur_regs(env);
1353 struct bpf_reg_state *reg = ®s[regno];
1356 /* We may have added a variable offset to the packet pointer; but any
1357 * reg->range we have comes after that. We are only checking the fixed
1361 /* We don't allow negative numbers, because we aren't tracking enough
1362 * detail to prove they're safe.
1364 if (reg->smin_value < 0) {
1365 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
1369 err = __check_packet_access(env, regno, off, size, zero_size_allowed);
1371 verbose(env, "R%d offset is outside of the packet\n", regno);
1377 /* check access to 'struct bpf_context' fields. Supports fixed offsets only */
1378 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
1379 enum bpf_access_type t, enum bpf_reg_type *reg_type)
1381 struct bpf_insn_access_aux info = {
1382 .reg_type = *reg_type,
1385 if (env->ops->is_valid_access &&
1386 env->ops->is_valid_access(off, size, t, env->prog, &info)) {
1387 /* A non zero info.ctx_field_size indicates that this field is a
1388 * candidate for later verifier transformation to load the whole
1389 * field and then apply a mask when accessed with a narrower
1390 * access than actual ctx access size. A zero info.ctx_field_size
1391 * will only allow for whole field access and rejects any other
1392 * type of narrower access.
1394 *reg_type = info.reg_type;
1396 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
1397 /* remember the offset of last byte accessed in ctx */
1398 if (env->prog->aux->max_ctx_offset < off + size)
1399 env->prog->aux->max_ctx_offset = off + size;
1403 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
1407 static bool __is_pointer_value(bool allow_ptr_leaks,
1408 const struct bpf_reg_state *reg)
1410 if (allow_ptr_leaks)
1413 return reg->type != SCALAR_VALUE;
1416 static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
1418 return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno);
1421 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
1423 const struct bpf_reg_state *reg = cur_regs(env) + regno;
1425 return reg->type == PTR_TO_CTX;
1428 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
1430 const struct bpf_reg_state *reg = cur_regs(env) + regno;
1432 return type_is_pkt_pointer(reg->type);
1435 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
1436 const struct bpf_reg_state *reg,
1437 int off, int size, bool strict)
1439 struct tnum reg_off;
1442 /* Byte size accesses are always allowed. */
1443 if (!strict || size == 1)
1446 /* For platforms that do not have a Kconfig enabling
1447 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
1448 * NET_IP_ALIGN is universally set to '2'. And on platforms
1449 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
1450 * to this code only in strict mode where we want to emulate
1451 * the NET_IP_ALIGN==2 checking. Therefore use an
1452 * unconditional IP align value of '2'.
1456 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
1457 if (!tnum_is_aligned(reg_off, size)) {
1460 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1462 "misaligned packet access off %d+%s+%d+%d size %d\n",
1463 ip_align, tn_buf, reg->off, off, size);
1470 static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
1471 const struct bpf_reg_state *reg,
1472 const char *pointer_desc,
1473 int off, int size, bool strict)
1475 struct tnum reg_off;
1477 /* Byte size accesses are always allowed. */
1478 if (!strict || size == 1)
1481 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
1482 if (!tnum_is_aligned(reg_off, size)) {
1485 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1486 verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
1487 pointer_desc, tn_buf, reg->off, off, size);
1494 static int check_ptr_alignment(struct bpf_verifier_env *env,
1495 const struct bpf_reg_state *reg, int off,
1496 int size, bool strict_alignment_once)
1498 bool strict = env->strict_alignment || strict_alignment_once;
1499 const char *pointer_desc = "";
1501 switch (reg->type) {
1503 case PTR_TO_PACKET_META:
1504 /* Special case, because of NET_IP_ALIGN. Given metadata sits
1505 * right in front, treat it the very same way.
1507 return check_pkt_ptr_alignment(env, reg, off, size, strict);
1508 case PTR_TO_MAP_VALUE:
1509 pointer_desc = "value ";
1512 pointer_desc = "context ";
1515 pointer_desc = "stack ";
1516 /* The stack spill tracking logic in check_stack_write()
1517 * and check_stack_read() relies on stack accesses being
1525 return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
1529 static int update_stack_depth(struct bpf_verifier_env *env,
1530 const struct bpf_func_state *func,
1533 u16 stack = env->subprog_info[func->subprogno].stack_depth;
1538 /* update known max for given subprogram */
1539 env->subprog_info[func->subprogno].stack_depth = -off;
1543 /* starting from main bpf function walk all instructions of the function
1544 * and recursively walk all callees that given function can call.
1545 * Ignore jump and exit insns.
1546 * Since recursion is prevented by check_cfg() this algorithm
1547 * only needs a local stack of MAX_CALL_FRAMES to remember callsites
1549 static int check_max_stack_depth(struct bpf_verifier_env *env)
1551 int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
1552 struct bpf_subprog_info *subprog = env->subprog_info;
1553 struct bpf_insn *insn = env->prog->insnsi;
1554 int ret_insn[MAX_CALL_FRAMES];
1555 int ret_prog[MAX_CALL_FRAMES];
1558 /* round up to 32-bytes, since this is granularity
1559 * of interpreter stack size
1561 depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
1562 if (depth > MAX_BPF_STACK) {
1563 verbose(env, "combined stack size of %d calls is %d. Too large\n",
1568 subprog_end = subprog[idx + 1].start;
1569 for (; i < subprog_end; i++) {
1570 if (insn[i].code != (BPF_JMP | BPF_CALL))
1572 if (insn[i].src_reg != BPF_PSEUDO_CALL)
1574 /* remember insn and function to return to */
1575 ret_insn[frame] = i + 1;
1576 ret_prog[frame] = idx;
1578 /* find the callee */
1579 i = i + insn[i].imm + 1;
1580 idx = find_subprog(env, i);
1582 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
1587 if (frame >= MAX_CALL_FRAMES) {
1588 WARN_ONCE(1, "verifier bug. Call stack is too deep\n");
1593 /* end of for() loop means the last insn of the 'subprog'
1594 * was reached. Doesn't matter whether it was JA or EXIT
1598 depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
1600 i = ret_insn[frame];
1601 idx = ret_prog[frame];
1605 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1606 static int get_callee_stack_depth(struct bpf_verifier_env *env,
1607 const struct bpf_insn *insn, int idx)
1609 int start = idx + insn->imm + 1, subprog;
1611 subprog = find_subprog(env, start);
1613 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
1617 return env->subprog_info[subprog].stack_depth;
1621 static int check_ctx_reg(struct bpf_verifier_env *env,
1622 const struct bpf_reg_state *reg, int regno)
1624 /* Access to ctx or passing it to a helper is only allowed in
1625 * its original, unmodified form.
1629 verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n",
1634 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
1637 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1638 verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf);
1645 /* truncate register to smaller size (in bytes)
1646 * must be called with size < BPF_REG_SIZE
1648 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
1652 /* clear high bits in bit representation */
1653 reg->var_off = tnum_cast(reg->var_off, size);
1655 /* fix arithmetic bounds */
1656 mask = ((u64)1 << (size * 8)) - 1;
1657 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
1658 reg->umin_value &= mask;
1659 reg->umax_value &= mask;
1661 reg->umin_value = 0;
1662 reg->umax_value = mask;
1664 reg->smin_value = reg->umin_value;
1665 reg->smax_value = reg->umax_value;
1668 /* check whether memory at (regno + off) is accessible for t = (read | write)
1669 * if t==write, value_regno is a register which value is stored into memory
1670 * if t==read, value_regno is a register which will receive the value from memory
1671 * if t==write && value_regno==-1, some unknown value is stored into memory
1672 * if t==read && value_regno==-1, don't care what we read from memory
1674 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
1675 int off, int bpf_size, enum bpf_access_type t,
1676 int value_regno, bool strict_alignment_once)
1678 struct bpf_reg_state *regs = cur_regs(env);
1679 struct bpf_reg_state *reg = regs + regno;
1680 struct bpf_func_state *state;
1683 size = bpf_size_to_bytes(bpf_size);
1687 /* alignment checks will add in reg->off themselves */
1688 err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
1692 /* for access checks, reg->off is just part of off */
1695 if (reg->type == PTR_TO_MAP_VALUE) {
1696 if (t == BPF_WRITE && value_regno >= 0 &&
1697 is_pointer_value(env, value_regno)) {
1698 verbose(env, "R%d leaks addr into map\n", value_regno);
1702 err = check_map_access(env, regno, off, size, false);
1703 if (!err && t == BPF_READ && value_regno >= 0)
1704 mark_reg_unknown(env, regs, value_regno);
1706 } else if (reg->type == PTR_TO_CTX) {
1707 enum bpf_reg_type reg_type = SCALAR_VALUE;
1709 if (t == BPF_WRITE && value_regno >= 0 &&
1710 is_pointer_value(env, value_regno)) {
1711 verbose(env, "R%d leaks addr into ctx\n", value_regno);
1715 err = check_ctx_reg(env, reg, regno);
1719 err = check_ctx_access(env, insn_idx, off, size, t, ®_type);
1720 if (!err && t == BPF_READ && value_regno >= 0) {
1721 /* ctx access returns either a scalar, or a
1722 * PTR_TO_PACKET[_META,_END]. In the latter
1723 * case, we know the offset is zero.
1725 if (reg_type == SCALAR_VALUE)
1726 mark_reg_unknown(env, regs, value_regno);
1728 mark_reg_known_zero(env, regs,
1730 regs[value_regno].id = 0;
1731 regs[value_regno].off = 0;
1732 regs[value_regno].range = 0;
1733 regs[value_regno].type = reg_type;
1736 } else if (reg->type == PTR_TO_STACK) {
1737 /* stack accesses must be at a fixed offset, so that we can
1738 * determine what type of data were returned.
1739 * See check_stack_read().
1741 if (!tnum_is_const(reg->var_off)) {
1744 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1745 verbose(env, "variable stack access var_off=%s off=%d size=%d",
1749 off += reg->var_off.value;
1750 if (off >= 0 || off < -MAX_BPF_STACK) {
1751 verbose(env, "invalid stack off=%d size=%d\n", off,
1756 state = func(env, reg);
1757 err = update_stack_depth(env, state, off);
1762 err = check_stack_write(env, state, off, size,
1763 value_regno, insn_idx);
1765 err = check_stack_read(env, state, off, size,
1767 } else if (reg_is_pkt_pointer(reg)) {
1768 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
1769 verbose(env, "cannot write into packet\n");
1772 if (t == BPF_WRITE && value_regno >= 0 &&
1773 is_pointer_value(env, value_regno)) {
1774 verbose(env, "R%d leaks addr into packet\n",
1778 err = check_packet_access(env, regno, off, size, false);
1779 if (!err && t == BPF_READ && value_regno >= 0)
1780 mark_reg_unknown(env, regs, value_regno);
1782 verbose(env, "R%d invalid mem access '%s'\n", regno,
1783 reg_type_str[reg->type]);
1787 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
1788 regs[value_regno].type == SCALAR_VALUE) {
1789 /* b/h/w load zero-extends, mark upper bits as known 0 */
1790 coerce_reg_to_size(®s[value_regno], size);
1795 static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
1799 if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
1801 verbose(env, "BPF_XADD uses reserved fields\n");
1805 /* check src1 operand */
1806 err = check_reg_arg(env, insn->src_reg, SRC_OP);
1810 /* check src2 operand */
1811 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
1815 if (is_pointer_value(env, insn->src_reg)) {
1816 verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
1820 if (is_ctx_reg(env, insn->dst_reg) ||
1821 is_pkt_reg(env, insn->dst_reg)) {
1822 verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
1823 insn->dst_reg, is_ctx_reg(env, insn->dst_reg) ?
1824 "context" : "packet");
1828 /* check whether atomic_add can read the memory */
1829 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
1830 BPF_SIZE(insn->code), BPF_READ, -1, true);
1834 /* check whether atomic_add can write into the same memory */
1835 return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
1836 BPF_SIZE(insn->code), BPF_WRITE, -1, true);
1839 /* when register 'regno' is passed into function that will read 'access_size'
1840 * bytes from that pointer, make sure that it's within stack boundary
1841 * and all elements of stack are initialized.
1842 * Unlike most pointer bounds-checking functions, this one doesn't take an
1843 * 'off' argument, so it has to add in reg->off itself.
1845 static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
1846 int access_size, bool zero_size_allowed,
1847 struct bpf_call_arg_meta *meta)
1849 struct bpf_reg_state *reg = cur_regs(env) + regno;
1850 struct bpf_func_state *state = func(env, reg);
1851 int off, i, slot, spi;
1853 if (reg->type != PTR_TO_STACK) {
1854 /* Allow zero-byte read from NULL, regardless of pointer type */
1855 if (zero_size_allowed && access_size == 0 &&
1856 register_is_null(reg))
1859 verbose(env, "R%d type=%s expected=%s\n", regno,
1860 reg_type_str[reg->type],
1861 reg_type_str[PTR_TO_STACK]);
1865 /* Only allow fixed-offset stack reads */
1866 if (!tnum_is_const(reg->var_off)) {
1869 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1870 verbose(env, "invalid variable stack read R%d var_off=%s\n",
1874 off = reg->off + reg->var_off.value;
1875 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
1876 access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
1877 verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
1878 regno, off, access_size);
1882 if (meta && meta->raw_mode) {
1883 meta->access_size = access_size;
1884 meta->regno = regno;
1888 for (i = 0; i < access_size; i++) {
1891 slot = -(off + i) - 1;
1892 spi = slot / BPF_REG_SIZE;
1893 if (state->allocated_stack <= slot)
1895 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
1896 if (*stype == STACK_MISC)
1898 if (*stype == STACK_ZERO) {
1899 /* helper can write anything into the stack */
1900 *stype = STACK_MISC;
1904 verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
1905 off, i, access_size);
1908 /* reading any byte out of 8-byte 'spill_slot' will cause
1909 * the whole slot to be marked as 'read'
1911 mark_stack_slot_read(env, env->cur_state, env->cur_state->parent,
1912 spi, state->frameno);
1914 return update_stack_depth(env, state, off);
1917 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
1918 int access_size, bool zero_size_allowed,
1919 struct bpf_call_arg_meta *meta)
1921 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno];
1923 switch (reg->type) {
1925 case PTR_TO_PACKET_META:
1926 return check_packet_access(env, regno, reg->off, access_size,
1928 case PTR_TO_MAP_VALUE:
1929 return check_map_access(env, regno, reg->off, access_size,
1931 default: /* scalar_value|ptr_to_stack or invalid ptr */
1932 return check_stack_boundary(env, regno, access_size,
1933 zero_size_allowed, meta);
1937 static bool arg_type_is_mem_ptr(enum bpf_arg_type type)
1939 return type == ARG_PTR_TO_MEM ||
1940 type == ARG_PTR_TO_MEM_OR_NULL ||
1941 type == ARG_PTR_TO_UNINIT_MEM;
1944 static bool arg_type_is_mem_size(enum bpf_arg_type type)
1946 return type == ARG_CONST_SIZE ||
1947 type == ARG_CONST_SIZE_OR_ZERO;
1950 static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
1951 enum bpf_arg_type arg_type,
1952 struct bpf_call_arg_meta *meta)
1954 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno];
1955 enum bpf_reg_type expected_type, type = reg->type;
1958 if (arg_type == ARG_DONTCARE)
1961 err = check_reg_arg(env, regno, SRC_OP);
1965 if (arg_type == ARG_ANYTHING) {
1966 if (is_pointer_value(env, regno)) {
1967 verbose(env, "R%d leaks addr into helper function\n",
1974 if (type_is_pkt_pointer(type) &&
1975 !may_access_direct_pkt_data(env, meta, BPF_READ)) {
1976 verbose(env, "helper access to the packet is not allowed\n");
1980 if (arg_type == ARG_PTR_TO_MAP_KEY ||
1981 arg_type == ARG_PTR_TO_MAP_VALUE) {
1982 expected_type = PTR_TO_STACK;
1983 if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE &&
1984 type != expected_type)
1986 } else if (arg_type == ARG_CONST_SIZE ||
1987 arg_type == ARG_CONST_SIZE_OR_ZERO) {
1988 expected_type = SCALAR_VALUE;
1989 if (type != expected_type)
1991 } else if (arg_type == ARG_CONST_MAP_PTR) {
1992 expected_type = CONST_PTR_TO_MAP;
1993 if (type != expected_type)
1995 } else if (arg_type == ARG_PTR_TO_CTX) {
1996 expected_type = PTR_TO_CTX;
1997 if (type != expected_type)
1999 err = check_ctx_reg(env, reg, regno);
2002 } else if (arg_type_is_mem_ptr(arg_type)) {
2003 expected_type = PTR_TO_STACK;
2004 /* One exception here. In case function allows for NULL to be
2005 * passed in as argument, it's a SCALAR_VALUE type. Final test
2006 * happens during stack boundary checking.
2008 if (register_is_null(reg) &&
2009 arg_type == ARG_PTR_TO_MEM_OR_NULL)
2010 /* final test in check_stack_boundary() */;
2011 else if (!type_is_pkt_pointer(type) &&
2012 type != PTR_TO_MAP_VALUE &&
2013 type != expected_type)
2015 meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
2017 verbose(env, "unsupported arg_type %d\n", arg_type);
2021 if (arg_type == ARG_CONST_MAP_PTR) {
2022 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */
2023 meta->map_ptr = reg->map_ptr;
2024 } else if (arg_type == ARG_PTR_TO_MAP_KEY) {
2025 /* bpf_map_xxx(..., map_ptr, ..., key) call:
2026 * check that [key, key + map->key_size) are within
2027 * stack limits and initialized
2029 if (!meta->map_ptr) {
2030 /* in function declaration map_ptr must come before
2031 * map_key, so that it's verified and known before
2032 * we have to check map_key here. Otherwise it means
2033 * that kernel subsystem misconfigured verifier
2035 verbose(env, "invalid map_ptr to access map->key\n");
2038 err = check_helper_mem_access(env, regno,
2039 meta->map_ptr->key_size, false,
2041 } else if (arg_type == ARG_PTR_TO_MAP_VALUE) {
2042 /* bpf_map_xxx(..., map_ptr, ..., value) call:
2043 * check [value, value + map->value_size) validity
2045 if (!meta->map_ptr) {
2046 /* kernel subsystem misconfigured verifier */
2047 verbose(env, "invalid map_ptr to access map->value\n");
2050 err = check_helper_mem_access(env, regno,
2051 meta->map_ptr->value_size, false,
2053 } else if (arg_type_is_mem_size(arg_type)) {
2054 bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
2056 /* remember the mem_size which may be used later
2057 * to refine return values.
2059 meta->msize_smax_value = reg->smax_value;
2060 meta->msize_umax_value = reg->umax_value;
2062 /* The register is SCALAR_VALUE; the access check
2063 * happens using its boundaries.
2065 if (!tnum_is_const(reg->var_off))
2066 /* For unprivileged variable accesses, disable raw
2067 * mode so that the program is required to
2068 * initialize all the memory that the helper could
2069 * just partially fill up.
2073 if (reg->smin_value < 0) {
2074 verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
2079 if (reg->umin_value == 0) {
2080 err = check_helper_mem_access(env, regno - 1, 0,
2087 if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
2088 verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
2092 err = check_helper_mem_access(env, regno - 1,
2094 zero_size_allowed, meta);
2099 verbose(env, "R%d type=%s expected=%s\n", regno,
2100 reg_type_str[type], reg_type_str[expected_type]);
2104 static int check_map_func_compatibility(struct bpf_verifier_env *env,
2105 struct bpf_map *map, int func_id)
2110 /* We need a two way check, first is from map perspective ... */
2111 switch (map->map_type) {
2112 case BPF_MAP_TYPE_PROG_ARRAY:
2113 if (func_id != BPF_FUNC_tail_call)
2116 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
2117 if (func_id != BPF_FUNC_perf_event_read &&
2118 func_id != BPF_FUNC_perf_event_output &&
2119 func_id != BPF_FUNC_perf_event_read_value)
2122 case BPF_MAP_TYPE_STACK_TRACE:
2123 if (func_id != BPF_FUNC_get_stackid)
2126 case BPF_MAP_TYPE_CGROUP_ARRAY:
2127 if (func_id != BPF_FUNC_skb_under_cgroup &&
2128 func_id != BPF_FUNC_current_task_under_cgroup)
2131 case BPF_MAP_TYPE_CGROUP_STORAGE:
2132 if (func_id != BPF_FUNC_get_local_storage)
2135 /* devmap returns a pointer to a live net_device ifindex that we cannot
2136 * allow to be modified from bpf side. So do not allow lookup elements
2139 case BPF_MAP_TYPE_DEVMAP:
2140 if (func_id != BPF_FUNC_redirect_map)
2143 /* Restrict bpf side of cpumap and xskmap, open when use-cases
2146 case BPF_MAP_TYPE_CPUMAP:
2147 case BPF_MAP_TYPE_XSKMAP:
2148 if (func_id != BPF_FUNC_redirect_map)
2151 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
2152 case BPF_MAP_TYPE_HASH_OF_MAPS:
2153 if (func_id != BPF_FUNC_map_lookup_elem)
2156 case BPF_MAP_TYPE_SOCKMAP:
2157 if (func_id != BPF_FUNC_sk_redirect_map &&
2158 func_id != BPF_FUNC_sock_map_update &&
2159 func_id != BPF_FUNC_map_delete_elem &&
2160 func_id != BPF_FUNC_msg_redirect_map)
2163 case BPF_MAP_TYPE_SOCKHASH:
2164 if (func_id != BPF_FUNC_sk_redirect_hash &&
2165 func_id != BPF_FUNC_sock_hash_update &&
2166 func_id != BPF_FUNC_map_delete_elem &&
2167 func_id != BPF_FUNC_msg_redirect_hash)
2170 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
2171 if (func_id != BPF_FUNC_sk_select_reuseport)
2178 /* ... and second from the function itself. */
2180 case BPF_FUNC_tail_call:
2181 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
2183 if (env->subprog_cnt > 1) {
2184 verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n");
2188 case BPF_FUNC_perf_event_read:
2189 case BPF_FUNC_perf_event_output:
2190 case BPF_FUNC_perf_event_read_value:
2191 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
2194 case BPF_FUNC_get_stackid:
2195 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
2198 case BPF_FUNC_current_task_under_cgroup:
2199 case BPF_FUNC_skb_under_cgroup:
2200 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
2203 case BPF_FUNC_redirect_map:
2204 if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
2205 map->map_type != BPF_MAP_TYPE_CPUMAP &&
2206 map->map_type != BPF_MAP_TYPE_XSKMAP)
2209 case BPF_FUNC_sk_redirect_map:
2210 case BPF_FUNC_msg_redirect_map:
2211 case BPF_FUNC_sock_map_update:
2212 if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
2215 case BPF_FUNC_sk_redirect_hash:
2216 case BPF_FUNC_msg_redirect_hash:
2217 case BPF_FUNC_sock_hash_update:
2218 if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
2221 case BPF_FUNC_get_local_storage:
2222 if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE)
2225 case BPF_FUNC_sk_select_reuseport:
2226 if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY)
2235 verbose(env, "cannot pass map_type %d into func %s#%d\n",
2236 map->map_type, func_id_name(func_id), func_id);
2240 static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
2244 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
2246 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
2248 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
2250 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
2252 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
2255 /* We only support one arg being in raw mode at the moment,
2256 * which is sufficient for the helper functions we have
2262 static bool check_args_pair_invalid(enum bpf_arg_type arg_curr,
2263 enum bpf_arg_type arg_next)
2265 return (arg_type_is_mem_ptr(arg_curr) &&
2266 !arg_type_is_mem_size(arg_next)) ||
2267 (!arg_type_is_mem_ptr(arg_curr) &&
2268 arg_type_is_mem_size(arg_next));
2271 static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
2273 /* bpf_xxx(..., buf, len) call will access 'len'
2274 * bytes from memory 'buf'. Both arg types need
2275 * to be paired, so make sure there's no buggy
2276 * helper function specification.
2278 if (arg_type_is_mem_size(fn->arg1_type) ||
2279 arg_type_is_mem_ptr(fn->arg5_type) ||
2280 check_args_pair_invalid(fn->arg1_type, fn->arg2_type) ||
2281 check_args_pair_invalid(fn->arg2_type, fn->arg3_type) ||
2282 check_args_pair_invalid(fn->arg3_type, fn->arg4_type) ||
2283 check_args_pair_invalid(fn->arg4_type, fn->arg5_type))
2289 static int check_func_proto(const struct bpf_func_proto *fn)
2291 return check_raw_mode_ok(fn) &&
2292 check_arg_pair_ok(fn) ? 0 : -EINVAL;
2295 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
2296 * are now invalid, so turn them into unknown SCALAR_VALUE.
2298 static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
2299 struct bpf_func_state *state)
2301 struct bpf_reg_state *regs = state->regs, *reg;
2304 for (i = 0; i < MAX_BPF_REG; i++)
2305 if (reg_is_pkt_pointer_any(®s[i]))
2306 mark_reg_unknown(env, regs, i);
2308 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
2309 if (state->stack[i].slot_type[0] != STACK_SPILL)
2311 reg = &state->stack[i].spilled_ptr;
2312 if (reg_is_pkt_pointer_any(reg))
2313 __mark_reg_unknown(reg);
2317 static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
2319 struct bpf_verifier_state *vstate = env->cur_state;
2322 for (i = 0; i <= vstate->curframe; i++)
2323 __clear_all_pkt_pointers(env, vstate->frame[i]);
2326 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
2329 struct bpf_verifier_state *state = env->cur_state;
2330 struct bpf_func_state *caller, *callee;
2331 int i, subprog, target_insn;
2333 if (state->curframe + 1 >= MAX_CALL_FRAMES) {
2334 verbose(env, "the call stack of %d frames is too deep\n",
2335 state->curframe + 2);
2339 target_insn = *insn_idx + insn->imm;
2340 subprog = find_subprog(env, target_insn + 1);
2342 verbose(env, "verifier bug. No program starts at insn %d\n",
2347 caller = state->frame[state->curframe];
2348 if (state->frame[state->curframe + 1]) {
2349 verbose(env, "verifier bug. Frame %d already allocated\n",
2350 state->curframe + 1);
2354 callee = kzalloc(sizeof(*callee), GFP_KERNEL);
2357 state->frame[state->curframe + 1] = callee;
2359 /* callee cannot access r0, r6 - r9 for reading and has to write
2360 * into its own stack before reading from it.
2361 * callee can read/write into caller's stack
2363 init_func_state(env, callee,
2364 /* remember the callsite, it will be used by bpf_exit */
2365 *insn_idx /* callsite */,
2366 state->curframe + 1 /* frameno within this callchain */,
2367 subprog /* subprog number within this prog */);
2369 /* copy r1 - r5 args that callee can access */
2370 for (i = BPF_REG_1; i <= BPF_REG_5; i++)
2371 callee->regs[i] = caller->regs[i];
2373 /* after the call regsiters r0 - r5 were scratched */
2374 for (i = 0; i < CALLER_SAVED_REGS; i++) {
2375 mark_reg_not_init(env, caller->regs, caller_saved[i]);
2376 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
2379 /* only increment it after check_reg_arg() finished */
2382 /* and go analyze first insn of the callee */
2383 *insn_idx = target_insn;
2385 if (env->log.level) {
2386 verbose(env, "caller:\n");
2387 print_verifier_state(env, caller);
2388 verbose(env, "callee:\n");
2389 print_verifier_state(env, callee);
2394 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
2396 struct bpf_verifier_state *state = env->cur_state;
2397 struct bpf_func_state *caller, *callee;
2398 struct bpf_reg_state *r0;
2400 callee = state->frame[state->curframe];
2401 r0 = &callee->regs[BPF_REG_0];
2402 if (r0->type == PTR_TO_STACK) {
2403 /* technically it's ok to return caller's stack pointer
2404 * (or caller's caller's pointer) back to the caller,
2405 * since these pointers are valid. Only current stack
2406 * pointer will be invalid as soon as function exits,
2407 * but let's be conservative
2409 verbose(env, "cannot return stack pointer to the caller\n");
2414 caller = state->frame[state->curframe];
2415 /* return to the caller whatever r0 had in the callee */
2416 caller->regs[BPF_REG_0] = *r0;
2418 *insn_idx = callee->callsite + 1;
2419 if (env->log.level) {
2420 verbose(env, "returning from callee:\n");
2421 print_verifier_state(env, callee);
2422 verbose(env, "to caller at %d:\n", *insn_idx);
2423 print_verifier_state(env, caller);
2425 /* clear everything in the callee */
2426 free_func_state(callee);
2427 state->frame[state->curframe + 1] = NULL;
2431 static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
2433 struct bpf_call_arg_meta *meta)
2435 struct bpf_reg_state *ret_reg = ®s[BPF_REG_0];
2437 if (ret_type != RET_INTEGER ||
2438 (func_id != BPF_FUNC_get_stack &&
2439 func_id != BPF_FUNC_probe_read_str))
2442 ret_reg->smax_value = meta->msize_smax_value;
2443 ret_reg->umax_value = meta->msize_umax_value;
2444 __reg_deduce_bounds(ret_reg);
2445 __reg_bound_offset(ret_reg);
2449 record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
2450 int func_id, int insn_idx)
2452 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
2454 if (func_id != BPF_FUNC_tail_call &&
2455 func_id != BPF_FUNC_map_lookup_elem &&
2456 func_id != BPF_FUNC_map_update_elem &&
2457 func_id != BPF_FUNC_map_delete_elem)
2460 if (meta->map_ptr == NULL) {
2461 verbose(env, "kernel subsystem misconfigured verifier\n");
2465 if (!BPF_MAP_PTR(aux->map_state))
2466 bpf_map_ptr_store(aux, meta->map_ptr,
2467 meta->map_ptr->unpriv_array);
2468 else if (BPF_MAP_PTR(aux->map_state) != meta->map_ptr)
2469 bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
2470 meta->map_ptr->unpriv_array);
2474 static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
2476 const struct bpf_func_proto *fn = NULL;
2477 struct bpf_reg_state *regs;
2478 struct bpf_call_arg_meta meta;
2482 /* find function prototype */
2483 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
2484 verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
2489 if (env->ops->get_func_proto)
2490 fn = env->ops->get_func_proto(func_id, env->prog);
2492 verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
2497 /* eBPF programs must be GPL compatible to use GPL-ed functions */
2498 if (!env->prog->gpl_compatible && fn->gpl_only) {
2499 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
2503 /* With LD_ABS/IND some JITs save/restore skb from r1. */
2504 changes_data = bpf_helper_changes_pkt_data(fn->func);
2505 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
2506 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
2507 func_id_name(func_id), func_id);
2511 memset(&meta, 0, sizeof(meta));
2512 meta.pkt_access = fn->pkt_access;
2514 err = check_func_proto(fn);
2516 verbose(env, "kernel subsystem misconfigured func %s#%d\n",
2517 func_id_name(func_id), func_id);
2522 err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta);
2525 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
2528 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
2531 err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta);
2534 err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta);
2538 err = record_func_map(env, &meta, func_id, insn_idx);
2542 /* Mark slots with STACK_MISC in case of raw mode, stack offset
2543 * is inferred from register state.
2545 for (i = 0; i < meta.access_size; i++) {
2546 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
2547 BPF_WRITE, -1, false);
2552 regs = cur_regs(env);
2554 /* check that flags argument in get_local_storage(map, flags) is 0,
2555 * this is required because get_local_storage() can't return an error.
2557 if (func_id == BPF_FUNC_get_local_storage &&
2558 !register_is_null(®s[BPF_REG_2])) {
2559 verbose(env, "get_local_storage() doesn't support non-zero flags\n");
2563 /* reset caller saved regs */
2564 for (i = 0; i < CALLER_SAVED_REGS; i++) {
2565 mark_reg_not_init(env, regs, caller_saved[i]);
2566 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
2569 /* update return register (already marked as written above) */
2570 if (fn->ret_type == RET_INTEGER) {
2571 /* sets type to SCALAR_VALUE */
2572 mark_reg_unknown(env, regs, BPF_REG_0);
2573 } else if (fn->ret_type == RET_VOID) {
2574 regs[BPF_REG_0].type = NOT_INIT;
2575 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL ||
2576 fn->ret_type == RET_PTR_TO_MAP_VALUE) {
2577 if (fn->ret_type == RET_PTR_TO_MAP_VALUE)
2578 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE;
2580 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
2581 /* There is no offset yet applied, variable or fixed */
2582 mark_reg_known_zero(env, regs, BPF_REG_0);
2583 regs[BPF_REG_0].off = 0;
2584 /* remember map_ptr, so that check_map_access()
2585 * can check 'value_size' boundary of memory access
2586 * to map element returned from bpf_map_lookup_elem()
2588 if (meta.map_ptr == NULL) {
2590 "kernel subsystem misconfigured verifier\n");
2593 regs[BPF_REG_0].map_ptr = meta.map_ptr;
2594 regs[BPF_REG_0].id = ++env->id_gen;
2596 verbose(env, "unknown return type %d of func %s#%d\n",
2597 fn->ret_type, func_id_name(func_id), func_id);
2601 do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
2603 err = check_map_func_compatibility(env, meta.map_ptr, func_id);
2607 if (func_id == BPF_FUNC_get_stack && !env->prog->has_callchain_buf) {
2608 const char *err_str;
2610 #ifdef CONFIG_PERF_EVENTS
2611 err = get_callchain_buffers(sysctl_perf_event_max_stack);
2612 err_str = "cannot get callchain buffer for func %s#%d\n";
2615 err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
2618 verbose(env, err_str, func_id_name(func_id), func_id);
2622 env->prog->has_callchain_buf = true;
2626 clear_all_pkt_pointers(env);
2630 static bool signed_add_overflows(s64 a, s64 b)
2632 /* Do the add in u64, where overflow is well-defined */
2633 s64 res = (s64)((u64)a + (u64)b);
2640 static bool signed_sub_overflows(s64 a, s64 b)
2642 /* Do the sub in u64, where overflow is well-defined */
2643 s64 res = (s64)((u64)a - (u64)b);
2650 static bool check_reg_sane_offset(struct bpf_verifier_env *env,
2651 const struct bpf_reg_state *reg,
2652 enum bpf_reg_type type)
2654 bool known = tnum_is_const(reg->var_off);
2655 s64 val = reg->var_off.value;
2656 s64 smin = reg->smin_value;
2658 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
2659 verbose(env, "math between %s pointer and %lld is not allowed\n",
2660 reg_type_str[type], val);
2664 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
2665 verbose(env, "%s pointer offset %d is not allowed\n",
2666 reg_type_str[type], reg->off);
2670 if (smin == S64_MIN) {
2671 verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
2672 reg_type_str[type]);
2676 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
2677 verbose(env, "value %lld makes %s pointer be out of bounds\n",
2678 smin, reg_type_str[type]);
2685 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
2686 * Caller should also handle BPF_MOV case separately.
2687 * If we return -EACCES, caller may want to try again treating pointer as a
2688 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks.
2690 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
2691 struct bpf_insn *insn,
2692 const struct bpf_reg_state *ptr_reg,
2693 const struct bpf_reg_state *off_reg)
2695 struct bpf_verifier_state *vstate = env->cur_state;
2696 struct bpf_func_state *state = vstate->frame[vstate->curframe];
2697 struct bpf_reg_state *regs = state->regs, *dst_reg;
2698 bool known = tnum_is_const(off_reg->var_off);
2699 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
2700 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
2701 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
2702 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
2703 u8 opcode = BPF_OP(insn->code);
2704 u32 dst = insn->dst_reg;
2706 dst_reg = ®s[dst];
2708 if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
2709 smin_val > smax_val || umin_val > umax_val) {
2710 /* Taint dst register if offset had invalid bounds derived from
2711 * e.g. dead branches.
2713 __mark_reg_unknown(dst_reg);
2717 if (BPF_CLASS(insn->code) != BPF_ALU64) {
2718 /* 32-bit ALU ops on pointers produce (meaningless) scalars */
2720 "R%d 32-bit pointer arithmetic prohibited\n",
2725 if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
2726 verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
2730 if (ptr_reg->type == CONST_PTR_TO_MAP) {
2731 verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
2735 if (ptr_reg->type == PTR_TO_PACKET_END) {
2736 verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
2741 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
2742 * The id may be overwritten later if we create a new variable offset.
2744 dst_reg->type = ptr_reg->type;
2745 dst_reg->id = ptr_reg->id;
2747 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
2748 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
2753 /* We can take a fixed offset as long as it doesn't overflow
2754 * the s32 'off' field
2756 if (known && (ptr_reg->off + smin_val ==
2757 (s64)(s32)(ptr_reg->off + smin_val))) {
2758 /* pointer += K. Accumulate it into fixed offset */
2759 dst_reg->smin_value = smin_ptr;
2760 dst_reg->smax_value = smax_ptr;
2761 dst_reg->umin_value = umin_ptr;
2762 dst_reg->umax_value = umax_ptr;
2763 dst_reg->var_off = ptr_reg->var_off;
2764 dst_reg->off = ptr_reg->off + smin_val;
2765 dst_reg->range = ptr_reg->range;
2768 /* A new variable offset is created. Note that off_reg->off
2769 * == 0, since it's a scalar.
2770 * dst_reg gets the pointer type and since some positive
2771 * integer value was added to the pointer, give it a new 'id'
2772 * if it's a PTR_TO_PACKET.
2773 * this creates a new 'base' pointer, off_reg (variable) gets
2774 * added into the variable offset, and we copy the fixed offset
2777 if (signed_add_overflows(smin_ptr, smin_val) ||
2778 signed_add_overflows(smax_ptr, smax_val)) {
2779 dst_reg->smin_value = S64_MIN;
2780 dst_reg->smax_value = S64_MAX;
2782 dst_reg->smin_value = smin_ptr + smin_val;
2783 dst_reg->smax_value = smax_ptr + smax_val;
2785 if (umin_ptr + umin_val < umin_ptr ||
2786 umax_ptr + umax_val < umax_ptr) {
2787 dst_reg->umin_value = 0;
2788 dst_reg->umax_value = U64_MAX;
2790 dst_reg->umin_value = umin_ptr + umin_val;
2791 dst_reg->umax_value = umax_ptr + umax_val;
2793 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
2794 dst_reg->off = ptr_reg->off;
2795 if (reg_is_pkt_pointer(ptr_reg)) {
2796 dst_reg->id = ++env->id_gen;
2797 /* something was added to pkt_ptr, set range to zero */
2802 if (dst_reg == off_reg) {
2803 /* scalar -= pointer. Creates an unknown scalar */
2804 verbose(env, "R%d tried to subtract pointer from scalar\n",
2808 /* We don't allow subtraction from FP, because (according to
2809 * test_verifier.c test "invalid fp arithmetic", JITs might not
2810 * be able to deal with it.
2812 if (ptr_reg->type == PTR_TO_STACK) {
2813 verbose(env, "R%d subtraction from stack pointer prohibited\n",
2817 if (known && (ptr_reg->off - smin_val ==
2818 (s64)(s32)(ptr_reg->off - smin_val))) {
2819 /* pointer -= K. Subtract it from fixed offset */
2820 dst_reg->smin_value = smin_ptr;
2821 dst_reg->smax_value = smax_ptr;
2822 dst_reg->umin_value = umin_ptr;
2823 dst_reg->umax_value = umax_ptr;
2824 dst_reg->var_off = ptr_reg->var_off;
2825 dst_reg->id = ptr_reg->id;
2826 dst_reg->off = ptr_reg->off - smin_val;
2827 dst_reg->range = ptr_reg->range;
2830 /* A new variable offset is created. If the subtrahend is known
2831 * nonnegative, then any reg->range we had before is still good.
2833 if (signed_sub_overflows(smin_ptr, smax_val) ||
2834 signed_sub_overflows(smax_ptr, smin_val)) {
2835 /* Overflow possible, we know nothing */
2836 dst_reg->smin_value = S64_MIN;
2837 dst_reg->smax_value = S64_MAX;
2839 dst_reg->smin_value = smin_ptr - smax_val;
2840 dst_reg->smax_value = smax_ptr - smin_val;
2842 if (umin_ptr < umax_val) {
2843 /* Overflow possible, we know nothing */
2844 dst_reg->umin_value = 0;
2845 dst_reg->umax_value = U64_MAX;
2847 /* Cannot overflow (as long as bounds are consistent) */
2848 dst_reg->umin_value = umin_ptr - umax_val;
2849 dst_reg->umax_value = umax_ptr - umin_val;
2851 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
2852 dst_reg->off = ptr_reg->off;
2853 if (reg_is_pkt_pointer(ptr_reg)) {
2854 dst_reg->id = ++env->id_gen;
2855 /* something was added to pkt_ptr, set range to zero */
2863 /* bitwise ops on pointers are troublesome, prohibit. */
2864 verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
2865 dst, bpf_alu_string[opcode >> 4]);
2868 /* other operators (e.g. MUL,LSH) produce non-pointer results */
2869 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
2870 dst, bpf_alu_string[opcode >> 4]);
2874 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
2877 __update_reg_bounds(dst_reg);
2878 __reg_deduce_bounds(dst_reg);
2879 __reg_bound_offset(dst_reg);
2883 /* WARNING: This function does calculations on 64-bit values, but the actual
2884 * execution may occur on 32-bit values. Therefore, things like bitshifts
2885 * need extra checks in the 32-bit case.
2887 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2888 struct bpf_insn *insn,
2889 struct bpf_reg_state *dst_reg,
2890 struct bpf_reg_state src_reg)
2892 struct bpf_reg_state *regs = cur_regs(env);
2893 u8 opcode = BPF_OP(insn->code);
2894 bool src_known, dst_known;
2895 s64 smin_val, smax_val;
2896 u64 umin_val, umax_val;
2897 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
2899 smin_val = src_reg.smin_value;
2900 smax_val = src_reg.smax_value;
2901 umin_val = src_reg.umin_value;
2902 umax_val = src_reg.umax_value;
2903 src_known = tnum_is_const(src_reg.var_off);
2904 dst_known = tnum_is_const(dst_reg->var_off);
2906 if ((src_known && (smin_val != smax_val || umin_val != umax_val)) ||
2907 smin_val > smax_val || umin_val > umax_val) {
2908 /* Taint dst register if offset had invalid bounds derived from
2909 * e.g. dead branches.
2911 __mark_reg_unknown(dst_reg);
2916 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
2917 __mark_reg_unknown(dst_reg);
2923 if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
2924 signed_add_overflows(dst_reg->smax_value, smax_val)) {
2925 dst_reg->smin_value = S64_MIN;
2926 dst_reg->smax_value = S64_MAX;
2928 dst_reg->smin_value += smin_val;
2929 dst_reg->smax_value += smax_val;
2931 if (dst_reg->umin_value + umin_val < umin_val ||
2932 dst_reg->umax_value + umax_val < umax_val) {
2933 dst_reg->umin_value = 0;
2934 dst_reg->umax_value = U64_MAX;
2936 dst_reg->umin_value += umin_val;
2937 dst_reg->umax_value += umax_val;
2939 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
2942 if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
2943 signed_sub_overflows(dst_reg->smax_value, smin_val)) {
2944 /* Overflow possible, we know nothing */
2945 dst_reg->smin_value = S64_MIN;
2946 dst_reg->smax_value = S64_MAX;
2948 dst_reg->smin_value -= smax_val;
2949 dst_reg->smax_value -= smin_val;
2951 if (dst_reg->umin_value < umax_val) {
2952 /* Overflow possible, we know nothing */
2953 dst_reg->umin_value = 0;
2954 dst_reg->umax_value = U64_MAX;
2956 /* Cannot overflow (as long as bounds are consistent) */
2957 dst_reg->umin_value -= umax_val;
2958 dst_reg->umax_value -= umin_val;
2960 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
2963 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
2964 if (smin_val < 0 || dst_reg->smin_value < 0) {
2965 /* Ain't nobody got time to multiply that sign */
2966 __mark_reg_unbounded(dst_reg);
2967 __update_reg_bounds(dst_reg);
2970 /* Both values are positive, so we can work with unsigned and
2971 * copy the result to signed (unless it exceeds S64_MAX).
2973 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
2974 /* Potential overflow, we know nothing */
2975 __mark_reg_unbounded(dst_reg);
2976 /* (except what we can learn from the var_off) */
2977 __update_reg_bounds(dst_reg);
2980 dst_reg->umin_value *= umin_val;
2981 dst_reg->umax_value *= umax_val;
2982 if (dst_reg->umax_value > S64_MAX) {
2983 /* Overflow possible, we know nothing */
2984 dst_reg->smin_value = S64_MIN;
2985 dst_reg->smax_value = S64_MAX;
2987 dst_reg->smin_value = dst_reg->umin_value;
2988 dst_reg->smax_value = dst_reg->umax_value;
2992 if (src_known && dst_known) {
2993 __mark_reg_known(dst_reg, dst_reg->var_off.value &
2994 src_reg.var_off.value);
2997 /* We get our minimum from the var_off, since that's inherently
2998 * bitwise. Our maximum is the minimum of the operands' maxima.
3000 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
3001 dst_reg->umin_value = dst_reg->var_off.value;
3002 dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
3003 if (dst_reg->smin_value < 0 || smin_val < 0) {
3004 /* Lose signed bounds when ANDing negative numbers,
3005 * ain't nobody got time for that.
3007 dst_reg->smin_value = S64_MIN;
3008 dst_reg->smax_value = S64_MAX;
3010 /* ANDing two positives gives a positive, so safe to
3011 * cast result into s64.
3013 dst_reg->smin_value = dst_reg->umin_value;
3014 dst_reg->smax_value = dst_reg->umax_value;
3016 /* We may learn something more from the var_off */
3017 __update_reg_bounds(dst_reg);
3020 if (src_known && dst_known) {
3021 __mark_reg_known(dst_reg, dst_reg->var_off.value |
3022 src_reg.var_off.value);
3025 /* We get our maximum from the var_off, and our minimum is the
3026 * maximum of the operands' minima
3028 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
3029 dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
3030 dst_reg->umax_value = dst_reg->var_off.value |
3031 dst_reg->var_off.mask;
3032 if (dst_reg->smin_value < 0 || smin_val < 0) {
3033 /* Lose signed bounds when ORing negative numbers,
3034 * ain't nobody got time for that.
3036 dst_reg->smin_value = S64_MIN;
3037 dst_reg->smax_value = S64_MAX;
3039 /* ORing two positives gives a positive, so safe to
3040 * cast result into s64.
3042 dst_reg->smin_value = dst_reg->umin_value;
3043 dst_reg->smax_value = dst_reg->umax_value;
3045 /* We may learn something more from the var_off */
3046 __update_reg_bounds(dst_reg);
3049 if (umax_val >= insn_bitness) {
3050 /* Shifts greater than 31 or 63 are undefined.
3051 * This includes shifts by a negative number.
3053 mark_reg_unknown(env, regs, insn->dst_reg);
3056 /* We lose all sign bit information (except what we can pick
3059 dst_reg->smin_value = S64_MIN;
3060 dst_reg->smax_value = S64_MAX;
3061 /* If we might shift our top bit out, then we know nothing */
3062 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
3063 dst_reg->umin_value = 0;
3064 dst_reg->umax_value = U64_MAX;
3066 dst_reg->umin_value <<= umin_val;
3067 dst_reg->umax_value <<= umax_val;
3069 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
3070 /* We may learn something more from the var_off */
3071 __update_reg_bounds(dst_reg);
3074 if (umax_val >= insn_bitness) {
3075 /* Shifts greater than 31 or 63 are undefined.
3076 * This includes shifts by a negative number.
3078 mark_reg_unknown(env, regs, insn->dst_reg);
3081 /* BPF_RSH is an unsigned shift. If the value in dst_reg might
3082 * be negative, then either:
3083 * 1) src_reg might be zero, so the sign bit of the result is
3084 * unknown, so we lose our signed bounds
3085 * 2) it's known negative, thus the unsigned bounds capture the
3087 * 3) the signed bounds cross zero, so they tell us nothing
3089 * If the value in dst_reg is known nonnegative, then again the
3090 * unsigned bounts capture the signed bounds.
3091 * Thus, in all cases it suffices to blow away our signed bounds
3092 * and rely on inferring new ones from the unsigned bounds and
3093 * var_off of the result.
3095 dst_reg->smin_value = S64_MIN;
3096 dst_reg->smax_value = S64_MAX;
3097 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
3098 dst_reg->umin_value >>= umax_val;
3099 dst_reg->umax_value >>= umin_val;
3100 /* We may learn something more from the var_off */
3101 __update_reg_bounds(dst_reg);
3104 if (umax_val >= insn_bitness) {
3105 /* Shifts greater than 31 or 63 are undefined.
3106 * This includes shifts by a negative number.
3108 mark_reg_unknown(env, regs, insn->dst_reg);
3112 /* Upon reaching here, src_known is true and
3113 * umax_val is equal to umin_val.
3115 dst_reg->smin_value >>= umin_val;
3116 dst_reg->smax_value >>= umin_val;
3117 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val);
3119 /* blow away the dst_reg umin_value/umax_value and rely on
3120 * dst_reg var_off to refine the result.
3122 dst_reg->umin_value = 0;
3123 dst_reg->umax_value = U64_MAX;
3124 __update_reg_bounds(dst_reg);
3127 mark_reg_unknown(env, regs, insn->dst_reg);
3131 if (BPF_CLASS(insn->code) != BPF_ALU64) {
3132 /* 32-bit ALU ops are (32,32)->32 */
3133 coerce_reg_to_size(dst_reg, 4);
3134 coerce_reg_to_size(&src_reg, 4);
3137 __reg_deduce_bounds(dst_reg);
3138 __reg_bound_offset(dst_reg);
3142 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
3145 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
3146 struct bpf_insn *insn)
3148 struct bpf_verifier_state *vstate = env->cur_state;
3149 struct bpf_func_state *state = vstate->frame[vstate->curframe];
3150 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
3151 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
3152 u8 opcode = BPF_OP(insn->code);
3154 dst_reg = ®s[insn->dst_reg];
3156 if (dst_reg->type != SCALAR_VALUE)
3158 if (BPF_SRC(insn->code) == BPF_X) {
3159 src_reg = ®s[insn->src_reg];
3160 if (src_reg->type != SCALAR_VALUE) {
3161 if (dst_reg->type != SCALAR_VALUE) {
3162 /* Combining two pointers by any ALU op yields
3163 * an arbitrary scalar. Disallow all math except
3164 * pointer subtraction
3166 if (opcode == BPF_SUB){
3167 mark_reg_unknown(env, regs, insn->dst_reg);
3170 verbose(env, "R%d pointer %s pointer prohibited\n",
3172 bpf_alu_string[opcode >> 4]);
3175 /* scalar += pointer
3176 * This is legal, but we have to reverse our
3177 * src/dest handling in computing the range
3179 return adjust_ptr_min_max_vals(env, insn,
3182 } else if (ptr_reg) {
3183 /* pointer += scalar */
3184 return adjust_ptr_min_max_vals(env, insn,
3188 /* Pretend the src is a reg with a known value, since we only
3189 * need to be able to read from this state.
3191 off_reg.type = SCALAR_VALUE;
3192 __mark_reg_known(&off_reg, insn->imm);
3194 if (ptr_reg) /* pointer += K */
3195 return adjust_ptr_min_max_vals(env, insn,
3199 /* Got here implies adding two SCALAR_VALUEs */
3200 if (WARN_ON_ONCE(ptr_reg)) {
3201 print_verifier_state(env, state);
3202 verbose(env, "verifier internal error: unexpected ptr_reg\n");
3205 if (WARN_ON(!src_reg)) {
3206 print_verifier_state(env, state);
3207 verbose(env, "verifier internal error: no src_reg\n");
3210 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
3213 /* check validity of 32-bit and 64-bit arithmetic operations */
3214 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
3216 struct bpf_reg_state *regs = cur_regs(env);
3217 u8 opcode = BPF_OP(insn->code);
3220 if (opcode == BPF_END || opcode == BPF_NEG) {
3221 if (opcode == BPF_NEG) {
3222 if (BPF_SRC(insn->code) != 0 ||
3223 insn->src_reg != BPF_REG_0 ||
3224 insn->off != 0 || insn->imm != 0) {
3225 verbose(env, "BPF_NEG uses reserved fields\n");
3229 if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
3230 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
3231 BPF_CLASS(insn->code) == BPF_ALU64) {
3232 verbose(env, "BPF_END uses reserved fields\n");
3237 /* check src operand */
3238 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
3242 if (is_pointer_value(env, insn->dst_reg)) {
3243 verbose(env, "R%d pointer arithmetic prohibited\n",
3248 /* check dest operand */
3249 err = check_reg_arg(env, insn->dst_reg, DST_OP);
3253 } else if (opcode == BPF_MOV) {
3255 if (BPF_SRC(insn->code) == BPF_X) {
3256 if (insn->imm != 0 || insn->off != 0) {
3257 verbose(env, "BPF_MOV uses reserved fields\n");
3261 /* check src operand */
3262 err = check_reg_arg(env, insn->src_reg, SRC_OP);
3266 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
3267 verbose(env, "BPF_MOV uses reserved fields\n");
3272 /* check dest operand, mark as required later */
3273 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
3277 if (BPF_SRC(insn->code) == BPF_X) {
3278 if (BPF_CLASS(insn->code) == BPF_ALU64) {
3280 * copy register state to dest reg
3282 regs[insn->dst_reg] = regs[insn->src_reg];
3283 regs[insn->dst_reg].live |= REG_LIVE_WRITTEN;
3286 if (is_pointer_value(env, insn->src_reg)) {
3288 "R%d partial copy of pointer\n",
3292 mark_reg_unknown(env, regs, insn->dst_reg);
3293 coerce_reg_to_size(®s[insn->dst_reg], 4);
3297 * remember the value we stored into this reg
3299 /* clear any state __mark_reg_known doesn't set */
3300 mark_reg_unknown(env, regs, insn->dst_reg);
3301 regs[insn->dst_reg].type = SCALAR_VALUE;
3302 if (BPF_CLASS(insn->code) == BPF_ALU64) {
3303 __mark_reg_known(regs + insn->dst_reg,
3306 __mark_reg_known(regs + insn->dst_reg,
3311 } else if (opcode > BPF_END) {
3312 verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
3315 } else { /* all other ALU ops: and, sub, xor, add, ... */
3317 if (BPF_SRC(insn->code) == BPF_X) {
3318 if (insn->imm != 0 || insn->off != 0) {
3319 verbose(env, "BPF_ALU uses reserved fields\n");
3322 /* check src1 operand */
3323 err = check_reg_arg(env, insn->src_reg, SRC_OP);
3327 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
3328 verbose(env, "BPF_ALU uses reserved fields\n");
3333 /* check src2 operand */
3334 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
3338 if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
3339 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
3340 verbose(env, "div by zero\n");
3344 if (opcode == BPF_ARSH && BPF_CLASS(insn->code) != BPF_ALU64) {
3345 verbose(env, "BPF_ARSH not supported for 32 bit ALU\n");
3349 if ((opcode == BPF_LSH || opcode == BPF_RSH ||
3350 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
3351 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
3353 if (insn->imm < 0 || insn->imm >= size) {
3354 verbose(env, "invalid shift %d\n", insn->imm);
3359 /* check dest operand */
3360 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
3364 return adjust_reg_min_max_vals(env, insn);
3370 static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
3371 struct bpf_reg_state *dst_reg,
3372 enum bpf_reg_type type,
3373 bool range_right_open)
3375 struct bpf_func_state *state = vstate->frame[vstate->curframe];
3376 struct bpf_reg_state *regs = state->regs, *reg;
3380 if (dst_reg->off < 0 ||
3381 (dst_reg->off == 0 && range_right_open))
3382 /* This doesn't give us any range */
3385 if (dst_reg->umax_value > MAX_PACKET_OFF ||
3386 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
3387 /* Risk of overflow. For instance, ptr + (1<<63) may be less
3388 * than pkt_end, but that's because it's also less than pkt.
3392 new_range = dst_reg->off;
3393 if (range_right_open)
3396 /* Examples for register markings:
3398 * pkt_data in dst register:
3402 * if (r2 > pkt_end) goto <handle exception>
3407 * if (r2 < pkt_end) goto <access okay>
3408 * <handle exception>
3411 * r2 == dst_reg, pkt_end == src_reg
3412 * r2=pkt(id=n,off=8,r=0)
3413 * r3=pkt(id=n,off=0,r=0)
3415 * pkt_data in src register:
3419 * if (pkt_end >= r2) goto <access okay>
3420 * <handle exception>
3424 * if (pkt_end <= r2) goto <handle exception>
3428 * pkt_end == dst_reg, r2 == src_reg
3429 * r2=pkt(id=n,off=8,r=0)
3430 * r3=pkt(id=n,off=0,r=0)
3432 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
3433 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
3434 * and [r3, r3 + 8-1) respectively is safe to access depending on
3438 /* If our ids match, then we must have the same max_value. And we
3439 * don't care about the other reg's fixed offset, since if it's too big
3440 * the range won't allow anything.
3441 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
3443 for (i = 0; i < MAX_BPF_REG; i++)
3444 if (regs[i].type == type && regs[i].id == dst_reg->id)
3445 /* keep the maximum range already checked */
3446 regs[i].range = max(regs[i].range, new_range);
3448 for (j = 0; j <= vstate->curframe; j++) {
3449 state = vstate->frame[j];
3450 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
3451 if (state->stack[i].slot_type[0] != STACK_SPILL)
3453 reg = &state->stack[i].spilled_ptr;
3454 if (reg->type == type && reg->id == dst_reg->id)
3455 reg->range = max(reg->range, new_range);
3460 /* Adjusts the register min/max values in the case that the dst_reg is the
3461 * variable register that we are working on, and src_reg is a constant or we're
3462 * simply doing a BPF_K check.
3463 * In JEQ/JNE cases we also adjust the var_off values.
3465 static void reg_set_min_max(struct bpf_reg_state *true_reg,
3466 struct bpf_reg_state *false_reg, u64 val,
3469 /* If the dst_reg is a pointer, we can't learn anything about its
3470 * variable offset from the compare (unless src_reg were a pointer into
3471 * the same object, but we don't bother with that.
3472 * Since false_reg and true_reg have the same type by construction, we
3473 * only need to check one of them for pointerness.
3475 if (__is_pointer_value(false, false_reg))
3480 /* If this is false then we know nothing Jon Snow, but if it is
3481 * true then we know for sure.
3483 __mark_reg_known(true_reg, val);
3486 /* If this is true we know nothing Jon Snow, but if it is false
3487 * we know the value for sure;
3489 __mark_reg_known(false_reg, val);
3492 false_reg->umax_value = min(false_reg->umax_value, val);
3493 true_reg->umin_value = max(true_reg->umin_value, val + 1);
3496 false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
3497 true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
3500 false_reg->umin_value = max(false_reg->umin_value, val);
3501 true_reg->umax_value = min(true_reg->umax_value, val - 1);
3504 false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
3505 true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
3508 false_reg->umax_value = min(false_reg->umax_value, val - 1);
3509 true_reg->umin_value = max(true_reg->umin_value, val);
3512 false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
3513 true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
3516 false_reg->umin_value = max(false_reg->umin_value, val + 1);
3517 true_reg->umax_value = min(true_reg->umax_value, val);
3520 false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
3521 true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
3527 __reg_deduce_bounds(false_reg);
3528 __reg_deduce_bounds(true_reg);
3529 /* We might have learned some bits from the bounds. */
3530 __reg_bound_offset(false_reg);
3531 __reg_bound_offset(true_reg);
3532 /* Intersecting with the old var_off might have improved our bounds
3533 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
3534 * then new var_off is (0; 0x7f...fc) which improves our umax.
3536 __update_reg_bounds(false_reg);
3537 __update_reg_bounds(true_reg);
3540 /* Same as above, but for the case that dst_reg holds a constant and src_reg is
3543 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
3544 struct bpf_reg_state *false_reg, u64 val,
3547 if (__is_pointer_value(false, false_reg))
3552 /* If this is false then we know nothing Jon Snow, but if it is
3553 * true then we know for sure.
3555 __mark_reg_known(true_reg, val);
3558 /* If this is true we know nothing Jon Snow, but if it is false
3559 * we know the value for sure;
3561 __mark_reg_known(false_reg, val);
3564 true_reg->umax_value = min(true_reg->umax_value, val - 1);
3565 false_reg->umin_value = max(false_reg->umin_value, val);
3568 true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
3569 false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
3572 true_reg->umin_value = max(true_reg->umin_value, val + 1);
3573 false_reg->umax_value = min(false_reg->umax_value, val);
3576 true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
3577 false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
3580 true_reg->umax_value = min(true_reg->umax_value, val);
3581 false_reg->umin_value = max(false_reg->umin_value, val + 1);
3584 true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
3585 false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
3588 true_reg->umin_value = max(true_reg->umin_value, val);
3589 false_reg->umax_value = min(false_reg->umax_value, val - 1);
3592 true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
3593 false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
3599 __reg_deduce_bounds(false_reg);
3600 __reg_deduce_bounds(true_reg);
3601 /* We might have learned some bits from the bounds. */
3602 __reg_bound_offset(false_reg);
3603 __reg_bound_offset(true_reg);
3604 /* Intersecting with the old var_off might have improved our bounds
3605 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
3606 * then new var_off is (0; 0x7f...fc) which improves our umax.
3608 __update_reg_bounds(false_reg);
3609 __update_reg_bounds(true_reg);
3612 /* Regs are known to be equal, so intersect their min/max/var_off */
3613 static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
3614 struct bpf_reg_state *dst_reg)
3616 src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
3617 dst_reg->umin_value);
3618 src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
3619 dst_reg->umax_value);
3620 src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
3621 dst_reg->smin_value);
3622 src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
3623 dst_reg->smax_value);
3624 src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
3626 /* We might have learned new bounds from the var_off. */
3627 __update_reg_bounds(src_reg);
3628 __update_reg_bounds(dst_reg);
3629 /* We might have learned something about the sign bit. */
3630 __reg_deduce_bounds(src_reg);
3631 __reg_deduce_bounds(dst_reg);
3632 /* We might have learned some bits from the bounds. */
3633 __reg_bound_offset(src_reg);
3634 __reg_bound_offset(dst_reg);
3635 /* Intersecting with the old var_off might have improved our bounds
3636 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
3637 * then new var_off is (0; 0x7f...fc) which improves our umax.
3639 __update_reg_bounds(src_reg);
3640 __update_reg_bounds(dst_reg);
3643 static void reg_combine_min_max(struct bpf_reg_state *true_src,
3644 struct bpf_reg_state *true_dst,
3645 struct bpf_reg_state *false_src,
3646 struct bpf_reg_state *false_dst,
3651 __reg_combine_min_max(true_src, true_dst);
3654 __reg_combine_min_max(false_src, false_dst);
3659 static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
3662 struct bpf_reg_state *reg = ®s[regno];
3664 if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) {
3665 /* Old offset (both fixed and variable parts) should
3666 * have been known-zero, because we don't allow pointer
3667 * arithmetic on pointers that might be NULL.
3669 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
3670 !tnum_equals_const(reg->var_off, 0) ||
3672 __mark_reg_known_zero(reg);
3676 reg->type = SCALAR_VALUE;
3677 } else if (reg->map_ptr->inner_map_meta) {
3678 reg->type = CONST_PTR_TO_MAP;
3679 reg->map_ptr = reg->map_ptr->inner_map_meta;
3681 reg->type = PTR_TO_MAP_VALUE;
3683 /* We don't need id from this point onwards anymore, thus we
3684 * should better reset it, so that state pruning has chances
3691 /* The logic is similar to find_good_pkt_pointers(), both could eventually
3692 * be folded together at some point.
3694 static void mark_map_regs(struct bpf_verifier_state *vstate, u32 regno,
3697 struct bpf_func_state *state = vstate->frame[vstate->curframe];
3698 struct bpf_reg_state *regs = state->regs;
3699 u32 id = regs[regno].id;
3702 for (i = 0; i < MAX_BPF_REG; i++)
3703 mark_map_reg(regs, i, id, is_null);
3705 for (j = 0; j <= vstate->curframe; j++) {
3706 state = vstate->frame[j];
3707 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
3708 if (state->stack[i].slot_type[0] != STACK_SPILL)
3710 mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null);
3715 static bool try_match_pkt_pointers(const struct bpf_insn *insn,
3716 struct bpf_reg_state *dst_reg,
3717 struct bpf_reg_state *src_reg,
3718 struct bpf_verifier_state *this_branch,
3719 struct bpf_verifier_state *other_branch)
3721 if (BPF_SRC(insn->code) != BPF_X)
3724 switch (BPF_OP(insn->code)) {
3726 if ((dst_reg->type == PTR_TO_PACKET &&
3727 src_reg->type == PTR_TO_PACKET_END) ||
3728 (dst_reg->type == PTR_TO_PACKET_META &&
3729 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
3730 /* pkt_data' > pkt_end, pkt_meta' > pkt_data */
3731 find_good_pkt_pointers(this_branch, dst_reg,
3732 dst_reg->type, false);
3733 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
3734 src_reg->type == PTR_TO_PACKET) ||
3735 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
3736 src_reg->type == PTR_TO_PACKET_META)) {
3737 /* pkt_end > pkt_data', pkt_data > pkt_meta' */
3738 find_good_pkt_pointers(other_branch, src_reg,
3739 src_reg->type, true);
3745 if ((dst_reg->type == PTR_TO_PACKET &&
3746 src_reg->type == PTR_TO_PACKET_END) ||
3747 (dst_reg->type == PTR_TO_PACKET_META &&
3748 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
3749 /* pkt_data' < pkt_end, pkt_meta' < pkt_data */
3750 find_good_pkt_pointers(other_branch, dst_reg,
3751 dst_reg->type, true);
3752 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
3753 src_reg->type == PTR_TO_PACKET) ||
3754 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
3755 src_reg->type == PTR_TO_PACKET_META)) {
3756 /* pkt_end < pkt_data', pkt_data > pkt_meta' */
3757 find_good_pkt_pointers(this_branch, src_reg,
3758 src_reg->type, false);
3764 if ((dst_reg->type == PTR_TO_PACKET &&
3765 src_reg->type == PTR_TO_PACKET_END) ||
3766 (dst_reg->type == PTR_TO_PACKET_META &&
3767 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
3768 /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
3769 find_good_pkt_pointers(this_branch, dst_reg,
3770 dst_reg->type, true);
3771 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
3772 src_reg->type == PTR_TO_PACKET) ||
3773 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
3774 src_reg->type == PTR_TO_PACKET_META)) {
3775 /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
3776 find_good_pkt_pointers(other_branch, src_reg,
3777 src_reg->type, false);
3783 if ((dst_reg->type == PTR_TO_PACKET &&
3784 src_reg->type == PTR_TO_PACKET_END) ||
3785 (dst_reg->type == PTR_TO_PACKET_META &&
3786 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
3787 /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
3788 find_good_pkt_pointers(other_branch, dst_reg,
3789 dst_reg->type, false);
3790 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
3791 src_reg->type == PTR_TO_PACKET) ||
3792 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
3793 src_reg->type == PTR_TO_PACKET_META)) {
3794 /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
3795 find_good_pkt_pointers(this_branch, src_reg,
3796 src_reg->type, true);
3808 static int check_cond_jmp_op(struct bpf_verifier_env *env,
3809 struct bpf_insn *insn, int *insn_idx)
3811 struct bpf_verifier_state *this_branch = env->cur_state;
3812 struct bpf_verifier_state *other_branch;
3813 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
3814 struct bpf_reg_state *dst_reg, *other_branch_regs;
3815 u8 opcode = BPF_OP(insn->code);
3818 if (opcode > BPF_JSLE) {
3819 verbose(env, "invalid BPF_JMP opcode %x\n", opcode);
3823 if (BPF_SRC(insn->code) == BPF_X) {
3824 if (insn->imm != 0) {
3825 verbose(env, "BPF_JMP uses reserved fields\n");
3829 /* check src1 operand */
3830 err = check_reg_arg(env, insn->src_reg, SRC_OP);
3834 if (is_pointer_value(env, insn->src_reg)) {
3835 verbose(env, "R%d pointer comparison prohibited\n",
3840 if (insn->src_reg != BPF_REG_0) {
3841 verbose(env, "BPF_JMP uses reserved fields\n");
3846 /* check src2 operand */
3847 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
3851 dst_reg = ®s[insn->dst_reg];
3853 /* detect if R == 0 where R was initialized to zero earlier */
3854 if (BPF_SRC(insn->code) == BPF_K &&
3855 (opcode == BPF_JEQ || opcode == BPF_JNE) &&
3856 dst_reg->type == SCALAR_VALUE &&
3857 tnum_is_const(dst_reg->var_off)) {
3858 if ((opcode == BPF_JEQ && dst_reg->var_off.value == insn->imm) ||
3859 (opcode == BPF_JNE && dst_reg->var_off.value != insn->imm)) {
3860 /* if (imm == imm) goto pc+off;
3861 * only follow the goto, ignore fall-through
3863 *insn_idx += insn->off;
3866 /* if (imm != imm) goto pc+off;
3867 * only follow fall-through branch, since
3868 * that's where the program will go
3874 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
3877 other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
3879 /* detect if we are comparing against a constant value so we can adjust
3880 * our min/max values for our dst register.
3881 * this is only legit if both are scalars (or pointers to the same
3882 * object, I suppose, but we don't support that right now), because
3883 * otherwise the different base pointers mean the offsets aren't
3886 if (BPF_SRC(insn->code) == BPF_X) {
3887 if (dst_reg->type == SCALAR_VALUE &&
3888 regs[insn->src_reg].type == SCALAR_VALUE) {
3889 if (tnum_is_const(regs[insn->src_reg].var_off))
3890 reg_set_min_max(&other_branch_regs[insn->dst_reg],
3891 dst_reg, regs[insn->src_reg].var_off.value,
3893 else if (tnum_is_const(dst_reg->var_off))
3894 reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
3895 ®s[insn->src_reg],
3896 dst_reg->var_off.value, opcode);
3897 else if (opcode == BPF_JEQ || opcode == BPF_JNE)
3898 /* Comparing for equality, we can combine knowledge */
3899 reg_combine_min_max(&other_branch_regs[insn->src_reg],
3900 &other_branch_regs[insn->dst_reg],
3901 ®s[insn->src_reg],
3902 ®s[insn->dst_reg], opcode);
3904 } else if (dst_reg->type == SCALAR_VALUE) {
3905 reg_set_min_max(&other_branch_regs[insn->dst_reg],
3906 dst_reg, insn->imm, opcode);
3909 /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */
3910 if (BPF_SRC(insn->code) == BPF_K &&
3911 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
3912 dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
3913 /* Mark all identical map registers in each branch as either
3914 * safe or unknown depending R == 0 or R != 0 conditional.
3916 mark_map_regs(this_branch, insn->dst_reg, opcode == BPF_JNE);
3917 mark_map_regs(other_branch, insn->dst_reg, opcode == BPF_JEQ);
3918 } else if (!try_match_pkt_pointers(insn, dst_reg, ®s[insn->src_reg],
3919 this_branch, other_branch) &&
3920 is_pointer_value(env, insn->dst_reg)) {
3921 verbose(env, "R%d pointer comparison prohibited\n",
3926 print_verifier_state(env, this_branch->frame[this_branch->curframe]);
3930 /* return the map pointer stored inside BPF_LD_IMM64 instruction */
3931 static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn)
3933 u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32;
3935 return (struct bpf_map *) (unsigned long) imm64;
3938 /* verify BPF_LD_IMM64 instruction */
3939 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
3941 struct bpf_reg_state *regs = cur_regs(env);
3944 if (BPF_SIZE(insn->code) != BPF_DW) {
3945 verbose(env, "invalid BPF_LD_IMM insn\n");
3948 if (insn->off != 0) {
3949 verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
3953 err = check_reg_arg(env, insn->dst_reg, DST_OP);
3957 if (insn->src_reg == 0) {
3958 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
3960 regs[insn->dst_reg].type = SCALAR_VALUE;
3961 __mark_reg_known(®s[insn->dst_reg], imm);
3965 /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */
3966 BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD);
3968 regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
3969 regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn);
3973 static bool may_access_skb(enum bpf_prog_type type)
3976 case BPF_PROG_TYPE_SOCKET_FILTER:
3977 case BPF_PROG_TYPE_SCHED_CLS:
3978 case BPF_PROG_TYPE_SCHED_ACT:
3985 /* verify safety of LD_ABS|LD_IND instructions:
3986 * - they can only appear in the programs where ctx == skb
3987 * - since they are wrappers of function calls, they scratch R1-R5 registers,
3988 * preserve R6-R9, and store return value into R0
3991 * ctx == skb == R6 == CTX
3994 * SRC == any register
3995 * IMM == 32-bit immediate
3998 * R0 - 8/16/32-bit skb data converted to cpu endianness
4000 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
4002 struct bpf_reg_state *regs = cur_regs(env);
4003 u8 mode = BPF_MODE(insn->code);
4006 if (!may_access_skb(env->prog->type)) {
4007 verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
4011 if (!env->ops->gen_ld_abs) {
4012 verbose(env, "bpf verifier is misconfigured\n");
4016 if (env->subprog_cnt > 1) {
4017 /* when program has LD_ABS insn JITs and interpreter assume
4018 * that r1 == ctx == skb which is not the case for callees
4019 * that can have arbitrary arguments. It's problematic
4020 * for main prog as well since JITs would need to analyze
4021 * all functions in order to make proper register save/restore
4022 * decisions in the main prog. Hence disallow LD_ABS with calls
4024 verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n");
4028 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
4029 BPF_SIZE(insn->code) == BPF_DW ||
4030 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
4031 verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
4035 /* check whether implicit source operand (register R6) is readable */
4036 err = check_reg_arg(env, BPF_REG_6, SRC_OP);
4040 if (regs[BPF_REG_6].type != PTR_TO_CTX) {
4042 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
4046 if (mode == BPF_IND) {
4047 /* check explicit source operand */
4048 err = check_reg_arg(env, insn->src_reg, SRC_OP);
4053 /* reset caller saved regs to unreadable */
4054 for (i = 0; i < CALLER_SAVED_REGS; i++) {
4055 mark_reg_not_init(env, regs, caller_saved[i]);
4056 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
4059 /* mark destination R0 register as readable, since it contains
4060 * the value fetched from the packet.
4061 * Already marked as written above.
4063 mark_reg_unknown(env, regs, BPF_REG_0);
4067 static int check_return_code(struct bpf_verifier_env *env)
4069 struct bpf_reg_state *reg;
4070 struct tnum range = tnum_range(0, 1);
4072 switch (env->prog->type) {
4073 case BPF_PROG_TYPE_CGROUP_SKB:
4074 case BPF_PROG_TYPE_CGROUP_SOCK:
4075 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4076 case BPF_PROG_TYPE_SOCK_OPS:
4077 case BPF_PROG_TYPE_CGROUP_DEVICE:
4083 reg = cur_regs(env) + BPF_REG_0;
4084 if (reg->type != SCALAR_VALUE) {
4085 verbose(env, "At program exit the register R0 is not a known value (%s)\n",
4086 reg_type_str[reg->type]);
4090 if (!tnum_in(range, reg->var_off)) {
4091 verbose(env, "At program exit the register R0 ");
4092 if (!tnum_is_unknown(reg->var_off)) {
4095 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4096 verbose(env, "has value %s", tn_buf);
4098 verbose(env, "has unknown scalar value");
4100 verbose(env, " should have been 0 or 1\n");
4106 /* non-recursive DFS pseudo code
4107 * 1 procedure DFS-iterative(G,v):
4108 * 2 label v as discovered
4109 * 3 let S be a stack
4111 * 5 while S is not empty
4113 * 7 if t is what we're looking for:
4115 * 9 for all edges e in G.adjacentEdges(t) do
4116 * 10 if edge e is already labelled
4117 * 11 continue with the next edge
4118 * 12 w <- G.adjacentVertex(t,e)
4119 * 13 if vertex w is not discovered and not explored
4120 * 14 label e as tree-edge
4121 * 15 label w as discovered
4124 * 18 else if vertex w is discovered
4125 * 19 label e as back-edge
4127 * 21 // vertex w is explored
4128 * 22 label e as forward- or cross-edge
4129 * 23 label t as explored
4134 * 0x11 - discovered and fall-through edge labelled
4135 * 0x12 - discovered and fall-through and branch edges labelled
4146 #define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L)
4148 static int *insn_stack; /* stack of insns to process */
4149 static int cur_stack; /* current stack index */
4150 static int *insn_state;
4152 /* t, w, e - match pseudo-code above:
4153 * t - index of current instruction
4154 * w - next instruction
4157 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
4159 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
4162 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
4165 if (w < 0 || w >= env->prog->len) {
4166 verbose(env, "jump out of range from insn %d to %d\n", t, w);
4171 /* mark branch target for state pruning */
4172 env->explored_states[w] = STATE_LIST_MARK;
4174 if (insn_state[w] == 0) {
4176 insn_state[t] = DISCOVERED | e;
4177 insn_state[w] = DISCOVERED;
4178 if (cur_stack >= env->prog->len)
4180 insn_stack[cur_stack++] = w;
4182 } else if ((insn_state[w] & 0xF0) == DISCOVERED) {
4183 verbose(env, "back-edge from insn %d to %d\n", t, w);
4185 } else if (insn_state[w] == EXPLORED) {
4186 /* forward- or cross-edge */
4187 insn_state[t] = DISCOVERED | e;
4189 verbose(env, "insn state internal bug\n");
4195 /* non-recursive depth-first-search to detect loops in BPF program
4196 * loop == back-edge in directed graph
4198 static int check_cfg(struct bpf_verifier_env *env)
4200 struct bpf_insn *insns = env->prog->insnsi;
4201 int insn_cnt = env->prog->len;
4205 ret = check_subprogs(env);
4209 insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
4213 insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
4219 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
4220 insn_stack[0] = 0; /* 0 is the first instruction */
4226 t = insn_stack[cur_stack - 1];
4228 if (BPF_CLASS(insns[t].code) == BPF_JMP) {
4229 u8 opcode = BPF_OP(insns[t].code);
4231 if (opcode == BPF_EXIT) {
4233 } else if (opcode == BPF_CALL) {
4234 ret = push_insn(t, t + 1, FALLTHROUGH, env);
4239 if (t + 1 < insn_cnt)
4240 env->explored_states[t + 1] = STATE_LIST_MARK;
4241 if (insns[t].src_reg == BPF_PSEUDO_CALL) {
4242 env->explored_states[t] = STATE_LIST_MARK;
4243 ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env);
4249 } else if (opcode == BPF_JA) {
4250 if (BPF_SRC(insns[t].code) != BPF_K) {
4254 /* unconditional jump with single edge */
4255 ret = push_insn(t, t + insns[t].off + 1,
4261 /* tell verifier to check for equivalent states
4262 * after every call and jump
4264 if (t + 1 < insn_cnt)
4265 env->explored_states[t + 1] = STATE_LIST_MARK;
4267 /* conditional jump with two edges */
4268 env->explored_states[t] = STATE_LIST_MARK;
4269 ret = push_insn(t, t + 1, FALLTHROUGH, env);
4275 ret = push_insn(t, t + insns[t].off + 1, BRANCH, env);
4282 /* all other non-branch instructions with single
4285 ret = push_insn(t, t + 1, FALLTHROUGH, env);
4293 insn_state[t] = EXPLORED;
4294 if (cur_stack-- <= 0) {
4295 verbose(env, "pop stack internal bug\n");
4302 for (i = 0; i < insn_cnt; i++) {
4303 if (insn_state[i] != EXPLORED) {
4304 verbose(env, "unreachable insn %d\n", i);
4309 ret = 0; /* cfg looks good */
4317 /* check %cur's range satisfies %old's */
4318 static bool range_within(struct bpf_reg_state *old,
4319 struct bpf_reg_state *cur)
4321 return old->umin_value <= cur->umin_value &&
4322 old->umax_value >= cur->umax_value &&
4323 old->smin_value <= cur->smin_value &&
4324 old->smax_value >= cur->smax_value;
4327 /* Maximum number of register states that can exist at once */
4328 #define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
4334 /* If in the old state two registers had the same id, then they need to have
4335 * the same id in the new state as well. But that id could be different from
4336 * the old state, so we need to track the mapping from old to new ids.
4337 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
4338 * regs with old id 5 must also have new id 9 for the new state to be safe. But
4339 * regs with a different old id could still have new id 9, we don't care about
4341 * So we look through our idmap to see if this old id has been seen before. If
4342 * so, we require the new id to match; otherwise, we add the id pair to the map.
4344 static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
4348 for (i = 0; i < ID_MAP_SIZE; i++) {
4349 if (!idmap[i].old) {
4350 /* Reached an empty slot; haven't seen this id before */
4351 idmap[i].old = old_id;
4352 idmap[i].cur = cur_id;
4355 if (idmap[i].old == old_id)
4356 return idmap[i].cur == cur_id;
4358 /* We ran out of idmap slots, which should be impossible */
4363 /* Returns true if (rold safe implies rcur safe) */
4364 static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
4365 struct idpair *idmap)
4369 if (!(rold->live & REG_LIVE_READ))
4370 /* explored state didn't use this */
4373 equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, frameno)) == 0;
4375 if (rold->type == PTR_TO_STACK)
4376 /* two stack pointers are equal only if they're pointing to
4377 * the same stack frame, since fp-8 in foo != fp-8 in bar
4379 return equal && rold->frameno == rcur->frameno;
4384 if (rold->type == NOT_INIT)
4385 /* explored state can't have used this */
4387 if (rcur->type == NOT_INIT)
4389 switch (rold->type) {
4391 if (rcur->type == SCALAR_VALUE) {
4392 /* new val must satisfy old val knowledge */
4393 return range_within(rold, rcur) &&
4394 tnum_in(rold->var_off, rcur->var_off);
4396 /* We're trying to use a pointer in place of a scalar.
4397 * Even if the scalar was unbounded, this could lead to
4398 * pointer leaks because scalars are allowed to leak
4399 * while pointers are not. We could make this safe in
4400 * special cases if root is calling us, but it's
4401 * probably not worth the hassle.
4405 case PTR_TO_MAP_VALUE:
4406 /* If the new min/max/var_off satisfy the old ones and
4407 * everything else matches, we are OK.
4408 * We don't care about the 'id' value, because nothing
4409 * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL)
4411 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
4412 range_within(rold, rcur) &&
4413 tnum_in(rold->var_off, rcur->var_off);
4414 case PTR_TO_MAP_VALUE_OR_NULL:
4415 /* a PTR_TO_MAP_VALUE could be safe to use as a
4416 * PTR_TO_MAP_VALUE_OR_NULL into the same map.
4417 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
4418 * checked, doing so could have affected others with the same
4419 * id, and we can't check for that because we lost the id when
4420 * we converted to a PTR_TO_MAP_VALUE.
4422 if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL)
4424 if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
4426 /* Check our ids match any regs they're supposed to */
4427 return check_ids(rold->id, rcur->id, idmap);
4428 case PTR_TO_PACKET_META:
4430 if (rcur->type != rold->type)
4432 /* We must have at least as much range as the old ptr
4433 * did, so that any accesses which were safe before are
4434 * still safe. This is true even if old range < old off,
4435 * since someone could have accessed through (ptr - k), or
4436 * even done ptr -= k in a register, to get a safe access.
4438 if (rold->range > rcur->range)
4440 /* If the offsets don't match, we can't trust our alignment;
4441 * nor can we be sure that we won't fall out of range.
4443 if (rold->off != rcur->off)
4445 /* id relations must be preserved */
4446 if (rold->id && !check_ids(rold->id, rcur->id, idmap))
4448 /* new val must satisfy old val knowledge */
4449 return range_within(rold, rcur) &&
4450 tnum_in(rold->var_off, rcur->var_off);
4452 case CONST_PTR_TO_MAP:
4453 case PTR_TO_PACKET_END:
4454 /* Only valid matches are exact, which memcmp() above
4455 * would have accepted
4458 /* Don't know what's going on, just say it's not safe */
4462 /* Shouldn't get here; if we do, say it's not safe */
4467 static bool stacksafe(struct bpf_func_state *old,
4468 struct bpf_func_state *cur,
4469 struct idpair *idmap)
4473 /* if explored stack has more populated slots than current stack
4474 * such stacks are not equivalent
4476 if (old->allocated_stack > cur->allocated_stack)
4479 /* walk slots of the explored stack and ignore any additional
4480 * slots in the current stack, since explored(safe) state
4483 for (i = 0; i < old->allocated_stack; i++) {
4484 spi = i / BPF_REG_SIZE;
4486 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ))
4487 /* explored state didn't use this */
4490 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
4492 /* if old state was safe with misc data in the stack
4493 * it will be safe with zero-initialized stack.
4494 * The opposite is not true
4496 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
4497 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
4499 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
4500 cur->stack[spi].slot_type[i % BPF_REG_SIZE])
4501 /* Ex: old explored (safe) state has STACK_SPILL in
4502 * this stack slot, but current has has STACK_MISC ->
4503 * this verifier states are not equivalent,
4504 * return false to continue verification of this path
4507 if (i % BPF_REG_SIZE)
4509 if (old->stack[spi].slot_type[0] != STACK_SPILL)
4511 if (!regsafe(&old->stack[spi].spilled_ptr,
4512 &cur->stack[spi].spilled_ptr,
4514 /* when explored and current stack slot are both storing
4515 * spilled registers, check that stored pointers types
4516 * are the same as well.
4517 * Ex: explored safe path could have stored
4518 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
4519 * but current path has stored:
4520 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
4521 * such verifier states are not equivalent.
4522 * return false to continue verification of this path
4529 /* compare two verifier states
4531 * all states stored in state_list are known to be valid, since
4532 * verifier reached 'bpf_exit' instruction through them
4534 * this function is called when verifier exploring different branches of
4535 * execution popped from the state stack. If it sees an old state that has
4536 * more strict register state and more strict stack state then this execution
4537 * branch doesn't need to be explored further, since verifier already
4538 * concluded that more strict state leads to valid finish.
4540 * Therefore two states are equivalent if register state is more conservative
4541 * and explored stack state is more conservative than the current one.
4544 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
4545 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
4547 * In other words if current stack state (one being explored) has more
4548 * valid slots than old one that already passed validation, it means
4549 * the verifier can stop exploring and conclude that current state is valid too
4551 * Similarly with registers. If explored state has register type as invalid
4552 * whereas register type in current state is meaningful, it means that
4553 * the current state will reach 'bpf_exit' instruction safely
4555 static bool func_states_equal(struct bpf_func_state *old,
4556 struct bpf_func_state *cur)
4558 struct idpair *idmap;
4562 idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL);
4563 /* If we failed to allocate the idmap, just say it's not safe */
4567 for (i = 0; i < MAX_BPF_REG; i++) {
4568 if (!regsafe(&old->regs[i], &cur->regs[i], idmap))
4572 if (!stacksafe(old, cur, idmap))
4580 static bool states_equal(struct bpf_verifier_env *env,
4581 struct bpf_verifier_state *old,
4582 struct bpf_verifier_state *cur)
4586 if (old->curframe != cur->curframe)
4589 /* for states to be equal callsites have to be the same
4590 * and all frame states need to be equivalent
4592 for (i = 0; i <= old->curframe; i++) {
4593 if (old->frame[i]->callsite != cur->frame[i]->callsite)
4595 if (!func_states_equal(old->frame[i], cur->frame[i]))
4601 /* A write screens off any subsequent reads; but write marks come from the
4602 * straight-line code between a state and its parent. When we arrive at an
4603 * equivalent state (jump target or such) we didn't arrive by the straight-line
4604 * code, so read marks in the state must propagate to the parent regardless
4605 * of the state's write marks. That's what 'parent == state->parent' comparison
4606 * in mark_reg_read() and mark_stack_slot_read() is for.
4608 static int propagate_liveness(struct bpf_verifier_env *env,
4609 const struct bpf_verifier_state *vstate,
4610 struct bpf_verifier_state *vparent)
4612 int i, frame, err = 0;
4613 struct bpf_func_state *state, *parent;
4615 if (vparent->curframe != vstate->curframe) {
4616 WARN(1, "propagate_live: parent frame %d current frame %d\n",
4617 vparent->curframe, vstate->curframe);
4620 /* Propagate read liveness of registers... */
4621 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
4622 /* We don't need to worry about FP liveness because it's read-only */
4623 for (i = 0; i < BPF_REG_FP; i++) {
4624 if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ)
4626 if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) {
4627 err = mark_reg_read(env, vstate, vparent, i);
4633 /* ... and stack slots */
4634 for (frame = 0; frame <= vstate->curframe; frame++) {
4635 state = vstate->frame[frame];
4636 parent = vparent->frame[frame];
4637 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
4638 i < parent->allocated_stack / BPF_REG_SIZE; i++) {
4639 if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ)
4641 if (state->stack[i].spilled_ptr.live & REG_LIVE_READ)
4642 mark_stack_slot_read(env, vstate, vparent, i, frame);
4648 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
4650 struct bpf_verifier_state_list *new_sl;
4651 struct bpf_verifier_state_list *sl;
4652 struct bpf_verifier_state *cur = env->cur_state;
4655 sl = env->explored_states[insn_idx];
4657 /* this 'insn_idx' instruction wasn't marked, so we will not
4658 * be doing state search here
4662 while (sl != STATE_LIST_MARK) {
4663 if (states_equal(env, &sl->state, cur)) {
4664 /* reached equivalent register/stack state,
4666 * Registers read by the continuation are read by us.
4667 * If we have any write marks in env->cur_state, they
4668 * will prevent corresponding reads in the continuation
4669 * from reaching our parent (an explored_state). Our
4670 * own state will get the read marks recorded, but
4671 * they'll be immediately forgotten as we're pruning
4672 * this state and will pop a new one.
4674 err = propagate_liveness(env, &sl->state, cur);
4682 /* there were no equivalent states, remember current one.
4683 * technically the current state is not proven to be safe yet,
4684 * but it will either reach outer most bpf_exit (which means it's safe)
4685 * or it will be rejected. Since there are no loops, we won't be
4686 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
4687 * again on the way to bpf_exit
4689 new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
4693 /* add new state to the head of linked list */
4694 err = copy_verifier_state(&new_sl->state, cur);
4696 free_verifier_state(&new_sl->state, false);
4700 new_sl->next = env->explored_states[insn_idx];
4701 env->explored_states[insn_idx] = new_sl;
4702 /* connect new state to parentage chain */
4703 cur->parent = &new_sl->state;
4704 /* clear write marks in current state: the writes we did are not writes
4705 * our child did, so they don't screen off its reads from us.
4706 * (There are no read marks in current state, because reads always mark
4707 * their parent and current state never has children yet. Only
4708 * explored_states can get read marks.)
4710 for (i = 0; i < BPF_REG_FP; i++)
4711 cur->frame[cur->curframe]->regs[i].live = REG_LIVE_NONE;
4713 /* all stack frames are accessible from callee, clear them all */
4714 for (j = 0; j <= cur->curframe; j++) {
4715 struct bpf_func_state *frame = cur->frame[j];
4717 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++)
4718 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
4723 static int do_check(struct bpf_verifier_env *env)
4725 struct bpf_verifier_state *state;
4726 struct bpf_insn *insns = env->prog->insnsi;
4727 struct bpf_reg_state *regs;
4728 int insn_cnt = env->prog->len, i;
4729 int insn_idx, prev_insn_idx = 0;
4730 int insn_processed = 0;
4731 bool do_print_state = false;
4733 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
4736 state->curframe = 0;
4737 state->parent = NULL;
4738 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
4739 if (!state->frame[0]) {
4743 env->cur_state = state;
4744 init_func_state(env, state->frame[0],
4745 BPF_MAIN_FUNC /* callsite */,
4747 0 /* subprogno, zero == main subprog */);
4750 struct bpf_insn *insn;
4754 if (insn_idx >= insn_cnt) {
4755 verbose(env, "invalid insn idx %d insn_cnt %d\n",
4756 insn_idx, insn_cnt);
4760 insn = &insns[insn_idx];
4761 class = BPF_CLASS(insn->code);
4763 if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
4765 "BPF program is too large. Processed %d insn\n",
4770 err = is_state_visited(env, insn_idx);
4774 /* found equivalent state, can prune the search */
4775 if (env->log.level) {
4777 verbose(env, "\nfrom %d to %d: safe\n",
4778 prev_insn_idx, insn_idx);
4780 verbose(env, "%d: safe\n", insn_idx);
4782 goto process_bpf_exit;
4788 if (env->log.level > 1 || (env->log.level && do_print_state)) {
4789 if (env->log.level > 1)
4790 verbose(env, "%d:", insn_idx);
4792 verbose(env, "\nfrom %d to %d:",
4793 prev_insn_idx, insn_idx);
4794 print_verifier_state(env, state->frame[state->curframe]);
4795 do_print_state = false;
4798 if (env->log.level) {
4799 const struct bpf_insn_cbs cbs = {
4800 .cb_print = verbose,
4801 .private_data = env,
4804 verbose(env, "%d: ", insn_idx);
4805 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
4808 if (bpf_prog_is_dev_bound(env->prog->aux)) {
4809 err = bpf_prog_offload_verify_insn(env, insn_idx,
4815 regs = cur_regs(env);
4816 env->insn_aux_data[insn_idx].seen = true;
4817 if (class == BPF_ALU || class == BPF_ALU64) {
4818 err = check_alu_op(env, insn);
4822 } else if (class == BPF_LDX) {
4823 enum bpf_reg_type *prev_src_type, src_reg_type;
4825 /* check for reserved fields is already done */
4827 /* check src operand */
4828 err = check_reg_arg(env, insn->src_reg, SRC_OP);
4832 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
4836 src_reg_type = regs[insn->src_reg].type;
4838 /* check that memory (src_reg + off) is readable,
4839 * the state of dst_reg will be updated by this func
4841 err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
4842 BPF_SIZE(insn->code), BPF_READ,
4843 insn->dst_reg, false);
4847 prev_src_type = &env->insn_aux_data[insn_idx].ptr_type;
4849 if (*prev_src_type == NOT_INIT) {
4851 * dst_reg = *(u32 *)(src_reg + off)
4852 * save type to validate intersecting paths
4854 *prev_src_type = src_reg_type;
4856 } else if (src_reg_type != *prev_src_type &&
4857 (src_reg_type == PTR_TO_CTX ||
4858 *prev_src_type == PTR_TO_CTX)) {
4859 /* ABuser program is trying to use the same insn
4860 * dst_reg = *(u32*) (src_reg + off)
4861 * with different pointer types:
4862 * src_reg == ctx in one branch and
4863 * src_reg == stack|map in some other branch.
4866 verbose(env, "same insn cannot be used with different pointers\n");
4870 } else if (class == BPF_STX) {
4871 enum bpf_reg_type *prev_dst_type, dst_reg_type;
4873 if (BPF_MODE(insn->code) == BPF_XADD) {
4874 err = check_xadd(env, insn_idx, insn);
4881 /* check src1 operand */
4882 err = check_reg_arg(env, insn->src_reg, SRC_OP);
4885 /* check src2 operand */
4886 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
4890 dst_reg_type = regs[insn->dst_reg].type;
4892 /* check that memory (dst_reg + off) is writeable */
4893 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
4894 BPF_SIZE(insn->code), BPF_WRITE,
4895 insn->src_reg, false);
4899 prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type;
4901 if (*prev_dst_type == NOT_INIT) {
4902 *prev_dst_type = dst_reg_type;
4903 } else if (dst_reg_type != *prev_dst_type &&
4904 (dst_reg_type == PTR_TO_CTX ||
4905 *prev_dst_type == PTR_TO_CTX)) {
4906 verbose(env, "same insn cannot be used with different pointers\n");
4910 } else if (class == BPF_ST) {
4911 if (BPF_MODE(insn->code) != BPF_MEM ||
4912 insn->src_reg != BPF_REG_0) {
4913 verbose(env, "BPF_ST uses reserved fields\n");
4916 /* check src operand */
4917 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
4921 if (is_ctx_reg(env, insn->dst_reg)) {
4922 verbose(env, "BPF_ST stores into R%d context is not allowed\n",
4927 /* check that memory (dst_reg + off) is writeable */
4928 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
4929 BPF_SIZE(insn->code), BPF_WRITE,
4934 } else if (class == BPF_JMP) {
4935 u8 opcode = BPF_OP(insn->code);
4937 if (opcode == BPF_CALL) {
4938 if (BPF_SRC(insn->code) != BPF_K ||
4940 (insn->src_reg != BPF_REG_0 &&
4941 insn->src_reg != BPF_PSEUDO_CALL) ||
4942 insn->dst_reg != BPF_REG_0) {
4943 verbose(env, "BPF_CALL uses reserved fields\n");
4947 if (insn->src_reg == BPF_PSEUDO_CALL)
4948 err = check_func_call(env, insn, &insn_idx);
4950 err = check_helper_call(env, insn->imm, insn_idx);
4954 } else if (opcode == BPF_JA) {
4955 if (BPF_SRC(insn->code) != BPF_K ||
4957 insn->src_reg != BPF_REG_0 ||
4958 insn->dst_reg != BPF_REG_0) {
4959 verbose(env, "BPF_JA uses reserved fields\n");
4963 insn_idx += insn->off + 1;
4966 } else if (opcode == BPF_EXIT) {
4967 if (BPF_SRC(insn->code) != BPF_K ||
4969 insn->src_reg != BPF_REG_0 ||
4970 insn->dst_reg != BPF_REG_0) {
4971 verbose(env, "BPF_EXIT uses reserved fields\n");
4975 if (state->curframe) {
4976 /* exit from nested function */
4977 prev_insn_idx = insn_idx;
4978 err = prepare_func_exit(env, &insn_idx);
4981 do_print_state = true;
4985 /* eBPF calling convetion is such that R0 is used
4986 * to return the value from eBPF program.
4987 * Make sure that it's readable at this time
4988 * of bpf_exit, which means that program wrote
4989 * something into it earlier
4991 err = check_reg_arg(env, BPF_REG_0, SRC_OP);
4995 if (is_pointer_value(env, BPF_REG_0)) {
4996 verbose(env, "R0 leaks addr as return value\n");
5000 err = check_return_code(env);
5004 err = pop_stack(env, &prev_insn_idx, &insn_idx);
5010 do_print_state = true;
5014 err = check_cond_jmp_op(env, insn, &insn_idx);
5018 } else if (class == BPF_LD) {
5019 u8 mode = BPF_MODE(insn->code);
5021 if (mode == BPF_ABS || mode == BPF_IND) {
5022 err = check_ld_abs(env, insn);
5026 } else if (mode == BPF_IMM) {
5027 err = check_ld_imm(env, insn);
5032 env->insn_aux_data[insn_idx].seen = true;
5034 verbose(env, "invalid BPF_LD mode\n");
5038 verbose(env, "unknown insn class %d\n", class);
5045 verbose(env, "processed %d insns (limit %d), stack depth ",
5046 insn_processed, BPF_COMPLEXITY_LIMIT_INSNS);
5047 for (i = 0; i < env->subprog_cnt; i++) {
5048 u32 depth = env->subprog_info[i].stack_depth;
5050 verbose(env, "%d", depth);
5051 if (i + 1 < env->subprog_cnt)
5055 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
5059 static int check_map_prealloc(struct bpf_map *map)
5061 return (map->map_type != BPF_MAP_TYPE_HASH &&
5062 map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
5063 map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
5064 !(map->map_flags & BPF_F_NO_PREALLOC);
5067 static int check_map_prog_compatibility(struct bpf_verifier_env *env,
5068 struct bpf_map *map,
5069 struct bpf_prog *prog)
5072 /* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use
5073 * preallocated hash maps, since doing memory allocation
5074 * in overflow_handler can crash depending on where nmi got
5077 if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
5078 if (!check_map_prealloc(map)) {
5079 verbose(env, "perf_event programs can only use preallocated hash map\n");
5082 if (map->inner_map_meta &&
5083 !check_map_prealloc(map->inner_map_meta)) {
5084 verbose(env, "perf_event programs can only use preallocated inner hash map\n");
5089 if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
5090 !bpf_offload_prog_map_match(prog, map)) {
5091 verbose(env, "offload device mismatch between prog and map\n");
5098 /* look for pseudo eBPF instructions that access map FDs and
5099 * replace them with actual map pointers
5101 static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
5103 struct bpf_insn *insn = env->prog->insnsi;
5104 int insn_cnt = env->prog->len;
5107 err = bpf_prog_calc_tag(env->prog);
5111 for (i = 0; i < insn_cnt; i++, insn++) {
5112 if (BPF_CLASS(insn->code) == BPF_LDX &&
5113 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
5114 verbose(env, "BPF_LDX uses reserved fields\n");
5118 if (BPF_CLASS(insn->code) == BPF_STX &&
5119 ((BPF_MODE(insn->code) != BPF_MEM &&
5120 BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
5121 verbose(env, "BPF_STX uses reserved fields\n");
5125 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
5126 struct bpf_map *map;
5129 if (i == insn_cnt - 1 || insn[1].code != 0 ||
5130 insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
5132 verbose(env, "invalid bpf_ld_imm64 insn\n");
5136 if (insn->src_reg == 0)
5137 /* valid generic load 64-bit imm */
5140 if (insn->src_reg != BPF_PSEUDO_MAP_FD) {
5142 "unrecognized bpf_ld_imm64 insn\n");
5146 f = fdget(insn->imm);
5147 map = __bpf_map_get(f);
5149 verbose(env, "fd %d is not pointing to valid bpf_map\n",
5151 return PTR_ERR(map);
5154 err = check_map_prog_compatibility(env, map, env->prog);
5160 /* store map pointer inside BPF_LD_IMM64 instruction */
5161 insn[0].imm = (u32) (unsigned long) map;
5162 insn[1].imm = ((u64) (unsigned long) map) >> 32;
5164 /* check whether we recorded this map already */
5165 for (j = 0; j < env->used_map_cnt; j++)
5166 if (env->used_maps[j] == map) {
5171 if (env->used_map_cnt >= MAX_USED_MAPS) {
5176 /* hold the map. If the program is rejected by verifier,
5177 * the map will be released by release_maps() or it
5178 * will be used by the valid program until it's unloaded
5179 * and all maps are released in free_used_maps()
5181 map = bpf_map_inc(map, false);
5184 return PTR_ERR(map);
5186 env->used_maps[env->used_map_cnt++] = map;
5188 if (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE &&
5189 bpf_cgroup_storage_assign(env->prog, map)) {
5191 "only one cgroup storage is allowed\n");
5203 /* Basic sanity check before we invest more work here. */
5204 if (!bpf_opcode_in_insntable(insn->code)) {
5205 verbose(env, "unknown opcode %02x\n", insn->code);
5210 /* now all pseudo BPF_LD_IMM64 instructions load valid
5211 * 'struct bpf_map *' into a register instead of user map_fd.
5212 * These pointers will be used later by verifier to validate map access.
5217 /* drop refcnt of maps used by the rejected program */
5218 static void release_maps(struct bpf_verifier_env *env)
5222 if (env->prog->aux->cgroup_storage)
5223 bpf_cgroup_storage_release(env->prog,
5224 env->prog->aux->cgroup_storage);
5226 for (i = 0; i < env->used_map_cnt; i++)
5227 bpf_map_put(env->used_maps[i]);
5230 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
5231 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
5233 struct bpf_insn *insn = env->prog->insnsi;
5234 int insn_cnt = env->prog->len;
5237 for (i = 0; i < insn_cnt; i++, insn++)
5238 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW))
5242 /* single env->prog->insni[off] instruction was replaced with the range
5243 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
5244 * [0, off) and [off, end) to new locations, so the patched range stays zero
5246 static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
5249 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
5254 new_data = vzalloc(array_size(prog_len,
5255 sizeof(struct bpf_insn_aux_data)));
5258 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
5259 memcpy(new_data + off + cnt - 1, old_data + off,
5260 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
5261 for (i = off; i < off + cnt - 1; i++)
5262 new_data[i].seen = true;
5263 env->insn_aux_data = new_data;
5268 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
5274 /* NOTE: fake 'exit' subprog should be updated as well. */
5275 for (i = 0; i <= env->subprog_cnt; i++) {
5276 if (env->subprog_info[i].start < off)
5278 env->subprog_info[i].start += len - 1;
5282 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
5283 const struct bpf_insn *patch, u32 len)
5285 struct bpf_prog *new_prog;
5287 new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
5290 if (adjust_insn_aux_data(env, new_prog->len, off, len))
5292 adjust_subprog_starts(env, off, len);
5296 /* The verifier does more data flow analysis than llvm and will not
5297 * explore branches that are dead at run time. Malicious programs can
5298 * have dead code too. Therefore replace all dead at-run-time code
5301 * Just nops are not optimal, e.g. if they would sit at the end of the
5302 * program and through another bug we would manage to jump there, then
5303 * we'd execute beyond program memory otherwise. Returning exception
5304 * code also wouldn't work since we can have subprogs where the dead
5305 * code could be located.
5307 static void sanitize_dead_code(struct bpf_verifier_env *env)
5309 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
5310 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
5311 struct bpf_insn *insn = env->prog->insnsi;
5312 const int insn_cnt = env->prog->len;
5315 for (i = 0; i < insn_cnt; i++) {
5316 if (aux_data[i].seen)
5318 memcpy(insn + i, &trap, sizeof(trap));
5322 /* convert load instructions that access fields of 'struct __sk_buff'
5323 * into sequence of instructions that access fields of 'struct sk_buff'
5325 static int convert_ctx_accesses(struct bpf_verifier_env *env)
5327 const struct bpf_verifier_ops *ops = env->ops;
5328 int i, cnt, size, ctx_field_size, delta = 0;
5329 const int insn_cnt = env->prog->len;
5330 struct bpf_insn insn_buf[16], *insn;
5331 struct bpf_prog *new_prog;
5332 enum bpf_access_type type;
5333 bool is_narrower_load;
5336 if (ops->gen_prologue) {
5337 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
5339 if (cnt >= ARRAY_SIZE(insn_buf)) {
5340 verbose(env, "bpf verifier is misconfigured\n");
5343 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
5347 env->prog = new_prog;
5352 if (!ops->convert_ctx_access || bpf_prog_is_dev_bound(env->prog->aux))
5355 insn = env->prog->insnsi + delta;
5357 for (i = 0; i < insn_cnt; i++, insn++) {
5358 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
5359 insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
5360 insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
5361 insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
5363 else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
5364 insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
5365 insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
5366 insn->code == (BPF_STX | BPF_MEM | BPF_DW))
5371 if (type == BPF_WRITE &&
5372 env->insn_aux_data[i + delta].sanitize_stack_off) {
5373 struct bpf_insn patch[] = {
5374 /* Sanitize suspicious stack slot with zero.
5375 * There are no memory dependencies for this store,
5376 * since it's only using frame pointer and immediate
5379 BPF_ST_MEM(BPF_DW, BPF_REG_FP,
5380 env->insn_aux_data[i + delta].sanitize_stack_off,
5382 /* the original STX instruction will immediately
5383 * overwrite the same stack slot with appropriate value
5388 cnt = ARRAY_SIZE(patch);
5389 new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
5394 env->prog = new_prog;
5395 insn = new_prog->insnsi + i + delta;
5399 if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
5402 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
5403 size = BPF_LDST_BYTES(insn);
5405 /* If the read access is a narrower load of the field,
5406 * convert to a 4/8-byte load, to minimum program type specific
5407 * convert_ctx_access changes. If conversion is successful,
5408 * we will apply proper mask to the result.
5410 is_narrower_load = size < ctx_field_size;
5411 if (is_narrower_load) {
5412 u32 size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
5413 u32 off = insn->off;
5416 if (type == BPF_WRITE) {
5417 verbose(env, "bpf verifier narrow ctx access misconfigured\n");
5422 if (ctx_field_size == 4)
5424 else if (ctx_field_size == 8)
5427 insn->off = off & ~(size_default - 1);
5428 insn->code = BPF_LDX | BPF_MEM | size_code;
5432 cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog,
5434 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
5435 (ctx_field_size && !target_size)) {
5436 verbose(env, "bpf verifier is misconfigured\n");
5440 if (is_narrower_load && size < target_size) {
5441 if (ctx_field_size <= 4)
5442 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
5443 (1 << size * 8) - 1);
5445 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
5446 (1 << size * 8) - 1);
5449 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
5455 /* keep walking new program and skip insns we just inserted */
5456 env->prog = new_prog;
5457 insn = new_prog->insnsi + i + delta;
5463 static int jit_subprogs(struct bpf_verifier_env *env)
5465 struct bpf_prog *prog = env->prog, **func, *tmp;
5466 int i, j, subprog_start, subprog_end = 0, len, subprog;
5467 struct bpf_insn *insn;
5471 if (env->subprog_cnt <= 1)
5474 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
5475 if (insn->code != (BPF_JMP | BPF_CALL) ||
5476 insn->src_reg != BPF_PSEUDO_CALL)
5478 /* Upon error here we cannot fall back to interpreter but
5479 * need a hard reject of the program. Thus -EFAULT is
5480 * propagated in any case.
5482 subprog = find_subprog(env, i + insn->imm + 1);
5484 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
5488 /* temporarily remember subprog id inside insn instead of
5489 * aux_data, since next loop will split up all insns into funcs
5491 insn->off = subprog;
5492 /* remember original imm in case JIT fails and fallback
5493 * to interpreter will be needed
5495 env->insn_aux_data[i].call_imm = insn->imm;
5496 /* point imm to __bpf_call_base+1 from JITs point of view */
5500 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
5504 for (i = 0; i < env->subprog_cnt; i++) {
5505 subprog_start = subprog_end;
5506 subprog_end = env->subprog_info[i + 1].start;
5508 len = subprog_end - subprog_start;
5509 func[i] = bpf_prog_alloc(bpf_prog_size(len), GFP_USER);
5512 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
5513 len * sizeof(struct bpf_insn));
5514 func[i]->type = prog->type;
5516 if (bpf_prog_calc_tag(func[i]))
5518 func[i]->is_func = 1;
5519 /* Use bpf_prog_F_tag to indicate functions in stack traces.
5520 * Long term would need debug info to populate names
5522 func[i]->aux->name[0] = 'F';
5523 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
5524 func[i]->jit_requested = 1;
5525 func[i] = bpf_int_jit_compile(func[i]);
5526 if (!func[i]->jited) {
5532 /* at this point all bpf functions were successfully JITed
5533 * now populate all bpf_calls with correct addresses and
5534 * run last pass of JIT
5536 for (i = 0; i < env->subprog_cnt; i++) {
5537 insn = func[i]->insnsi;
5538 for (j = 0; j < func[i]->len; j++, insn++) {
5539 if (insn->code != (BPF_JMP | BPF_CALL) ||
5540 insn->src_reg != BPF_PSEUDO_CALL)
5542 subprog = insn->off;
5543 insn->imm = (u64 (*)(u64, u64, u64, u64, u64))
5544 func[subprog]->bpf_func -
5548 /* we use the aux data to keep a list of the start addresses
5549 * of the JITed images for each function in the program
5551 * for some architectures, such as powerpc64, the imm field
5552 * might not be large enough to hold the offset of the start
5553 * address of the callee's JITed image from __bpf_call_base
5555 * in such cases, we can lookup the start address of a callee
5556 * by using its subprog id, available from the off field of
5557 * the call instruction, as an index for this list
5559 func[i]->aux->func = func;
5560 func[i]->aux->func_cnt = env->subprog_cnt;
5562 for (i = 0; i < env->subprog_cnt; i++) {
5563 old_bpf_func = func[i]->bpf_func;
5564 tmp = bpf_int_jit_compile(func[i]);
5565 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
5566 verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
5573 /* finally lock prog and jit images for all functions and
5576 for (i = 0; i < env->subprog_cnt; i++) {
5577 bpf_prog_lock_ro(func[i]);
5578 bpf_prog_kallsyms_add(func[i]);
5581 /* Last step: make now unused interpreter insns from main
5582 * prog consistent for later dump requests, so they can
5583 * later look the same as if they were interpreted only.
5585 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
5586 if (insn->code != (BPF_JMP | BPF_CALL) ||
5587 insn->src_reg != BPF_PSEUDO_CALL)
5589 insn->off = env->insn_aux_data[i].call_imm;
5590 subprog = find_subprog(env, i + insn->off + 1);
5591 insn->imm = subprog;
5595 prog->bpf_func = func[0]->bpf_func;
5596 prog->aux->func = func;
5597 prog->aux->func_cnt = env->subprog_cnt;
5600 for (i = 0; i < env->subprog_cnt; i++)
5602 bpf_jit_free(func[i]);
5605 /* cleanup main prog to be interpreted */
5606 prog->jit_requested = 0;
5607 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
5608 if (insn->code != (BPF_JMP | BPF_CALL) ||
5609 insn->src_reg != BPF_PSEUDO_CALL)
5612 insn->imm = env->insn_aux_data[i].call_imm;
5617 static int fixup_call_args(struct bpf_verifier_env *env)
5619 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
5620 struct bpf_prog *prog = env->prog;
5621 struct bpf_insn *insn = prog->insnsi;
5627 if (env->prog->jit_requested) {
5628 err = jit_subprogs(env);
5634 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
5635 for (i = 0; i < prog->len; i++, insn++) {
5636 if (insn->code != (BPF_JMP | BPF_CALL) ||
5637 insn->src_reg != BPF_PSEUDO_CALL)
5639 depth = get_callee_stack_depth(env, insn, i);
5642 bpf_patch_call_args(insn, depth);
5649 /* fixup insn->imm field of bpf_call instructions
5650 * and inline eligible helpers as explicit sequence of BPF instructions
5652 * this function is called after eBPF program passed verification
5654 static int fixup_bpf_calls(struct bpf_verifier_env *env)
5656 struct bpf_prog *prog = env->prog;
5657 struct bpf_insn *insn = prog->insnsi;
5658 const struct bpf_func_proto *fn;
5659 const int insn_cnt = prog->len;
5660 const struct bpf_map_ops *ops;
5661 struct bpf_insn_aux_data *aux;
5662 struct bpf_insn insn_buf[16];
5663 struct bpf_prog *new_prog;
5664 struct bpf_map *map_ptr;
5665 int i, cnt, delta = 0;
5667 for (i = 0; i < insn_cnt; i++, insn++) {
5668 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
5669 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
5670 insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
5671 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
5672 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
5673 struct bpf_insn mask_and_div[] = {
5674 BPF_MOV32_REG(insn->src_reg, insn->src_reg),
5676 BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
5677 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
5678 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
5681 struct bpf_insn mask_and_mod[] = {
5682 BPF_MOV32_REG(insn->src_reg, insn->src_reg),
5683 /* Rx mod 0 -> Rx */
5684 BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
5687 struct bpf_insn *patchlet;
5689 if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
5690 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
5691 patchlet = mask_and_div + (is64 ? 1 : 0);
5692 cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
5694 patchlet = mask_and_mod + (is64 ? 1 : 0);
5695 cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
5698 new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
5703 env->prog = prog = new_prog;
5704 insn = new_prog->insnsi + i + delta;
5708 if (BPF_CLASS(insn->code) == BPF_LD &&
5709 (BPF_MODE(insn->code) == BPF_ABS ||
5710 BPF_MODE(insn->code) == BPF_IND)) {
5711 cnt = env->ops->gen_ld_abs(insn, insn_buf);
5712 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
5713 verbose(env, "bpf verifier is misconfigured\n");
5717 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
5722 env->prog = prog = new_prog;
5723 insn = new_prog->insnsi + i + delta;
5727 if (insn->code != (BPF_JMP | BPF_CALL))
5729 if (insn->src_reg == BPF_PSEUDO_CALL)
5732 if (insn->imm == BPF_FUNC_get_route_realm)
5733 prog->dst_needed = 1;
5734 if (insn->imm == BPF_FUNC_get_prandom_u32)
5735 bpf_user_rnd_init_once();
5736 if (insn->imm == BPF_FUNC_override_return)
5737 prog->kprobe_override = 1;
5738 if (insn->imm == BPF_FUNC_tail_call) {
5739 /* If we tail call into other programs, we
5740 * cannot make any assumptions since they can
5741 * be replaced dynamically during runtime in
5742 * the program array.
5744 prog->cb_access = 1;
5745 env->prog->aux->stack_depth = MAX_BPF_STACK;
5747 /* mark bpf_tail_call as different opcode to avoid
5748 * conditional branch in the interpeter for every normal
5749 * call and to prevent accidental JITing by JIT compiler
5750 * that doesn't support bpf_tail_call yet
5753 insn->code = BPF_JMP | BPF_TAIL_CALL;
5755 aux = &env->insn_aux_data[i + delta];
5756 if (!bpf_map_ptr_unpriv(aux))
5759 /* instead of changing every JIT dealing with tail_call
5760 * emit two extra insns:
5761 * if (index >= max_entries) goto out;
5762 * index &= array->index_mask;
5763 * to avoid out-of-bounds cpu speculation
5765 if (bpf_map_ptr_poisoned(aux)) {
5766 verbose(env, "tail_call abusing map_ptr\n");
5770 map_ptr = BPF_MAP_PTR(aux->map_state);
5771 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
5772 map_ptr->max_entries, 2);
5773 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
5774 container_of(map_ptr,
5777 insn_buf[2] = *insn;
5779 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
5784 env->prog = prog = new_prog;
5785 insn = new_prog->insnsi + i + delta;
5789 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
5790 * and other inlining handlers are currently limited to 64 bit
5793 if (prog->jit_requested && BITS_PER_LONG == 64 &&
5794 (insn->imm == BPF_FUNC_map_lookup_elem ||
5795 insn->imm == BPF_FUNC_map_update_elem ||
5796 insn->imm == BPF_FUNC_map_delete_elem)) {
5797 aux = &env->insn_aux_data[i + delta];
5798 if (bpf_map_ptr_poisoned(aux))
5799 goto patch_call_imm;
5801 map_ptr = BPF_MAP_PTR(aux->map_state);
5803 if (insn->imm == BPF_FUNC_map_lookup_elem &&
5804 ops->map_gen_lookup) {
5805 cnt = ops->map_gen_lookup(map_ptr, insn_buf);
5806 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
5807 verbose(env, "bpf verifier is misconfigured\n");
5811 new_prog = bpf_patch_insn_data(env, i + delta,
5817 env->prog = prog = new_prog;
5818 insn = new_prog->insnsi + i + delta;
5822 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
5823 (void *(*)(struct bpf_map *map, void *key))NULL));
5824 BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
5825 (int (*)(struct bpf_map *map, void *key))NULL));
5826 BUILD_BUG_ON(!__same_type(ops->map_update_elem,
5827 (int (*)(struct bpf_map *map, void *key, void *value,
5829 switch (insn->imm) {
5830 case BPF_FUNC_map_lookup_elem:
5831 insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) -
5834 case BPF_FUNC_map_update_elem:
5835 insn->imm = BPF_CAST_CALL(ops->map_update_elem) -
5838 case BPF_FUNC_map_delete_elem:
5839 insn->imm = BPF_CAST_CALL(ops->map_delete_elem) -
5844 goto patch_call_imm;
5848 fn = env->ops->get_func_proto(insn->imm, env->prog);
5849 /* all functions that have prototype and verifier allowed
5850 * programs to call them, must be real in-kernel functions
5854 "kernel subsystem misconfigured func %s#%d\n",
5855 func_id_name(insn->imm), insn->imm);
5858 insn->imm = fn->func - __bpf_call_base;
5864 static void free_states(struct bpf_verifier_env *env)
5866 struct bpf_verifier_state_list *sl, *sln;
5869 if (!env->explored_states)
5872 for (i = 0; i < env->prog->len; i++) {
5873 sl = env->explored_states[i];
5876 while (sl != STATE_LIST_MARK) {
5878 free_verifier_state(&sl->state, false);
5884 kfree(env->explored_states);
5887 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
5889 struct bpf_verifier_env *env;
5890 struct bpf_verifier_log *log;
5893 /* no program is valid */
5894 if (ARRAY_SIZE(bpf_verifier_ops) == 0)
5897 /* 'struct bpf_verifier_env' can be global, but since it's not small,
5898 * allocate/free it every time bpf_check() is called
5900 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
5905 env->insn_aux_data =
5906 vzalloc(array_size(sizeof(struct bpf_insn_aux_data),
5909 if (!env->insn_aux_data)
5912 env->ops = bpf_verifier_ops[env->prog->type];
5914 /* grab the mutex to protect few globals used by verifier */
5915 mutex_lock(&bpf_verifier_lock);
5917 if (attr->log_level || attr->log_buf || attr->log_size) {
5918 /* user requested verbose verifier output
5919 * and supplied buffer to store the verification trace
5921 log->level = attr->log_level;
5922 log->ubuf = (char __user *) (unsigned long) attr->log_buf;
5923 log->len_total = attr->log_size;
5926 /* log attributes have to be sane */
5927 if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
5928 !log->level || !log->ubuf)
5932 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
5933 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
5934 env->strict_alignment = true;
5936 ret = replace_map_fd_with_map_ptr(env);
5938 goto skip_full_check;
5940 if (bpf_prog_is_dev_bound(env->prog->aux)) {
5941 ret = bpf_prog_offload_verifier_prep(env);
5943 goto skip_full_check;
5946 env->explored_states = kcalloc(env->prog->len,
5947 sizeof(struct bpf_verifier_state_list *),
5950 if (!env->explored_states)
5951 goto skip_full_check;
5953 env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
5955 ret = check_cfg(env);
5957 goto skip_full_check;
5959 ret = do_check(env);
5960 if (env->cur_state) {
5961 free_verifier_state(env->cur_state, true);
5962 env->cur_state = NULL;
5966 while (!pop_stack(env, NULL, NULL));
5970 sanitize_dead_code(env);
5973 ret = check_max_stack_depth(env);
5976 /* program is valid, convert *(u32*)(ctx + off) accesses */
5977 ret = convert_ctx_accesses(env);
5980 ret = fixup_bpf_calls(env);
5983 ret = fixup_call_args(env);
5985 if (log->level && bpf_verifier_log_full(log))
5987 if (log->level && !log->ubuf) {
5989 goto err_release_maps;
5992 if (ret == 0 && env->used_map_cnt) {
5993 /* if program passed verifier, update used_maps in bpf_prog_info */
5994 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
5995 sizeof(env->used_maps[0]),
5998 if (!env->prog->aux->used_maps) {
6000 goto err_release_maps;
6003 memcpy(env->prog->aux->used_maps, env->used_maps,
6004 sizeof(env->used_maps[0]) * env->used_map_cnt);
6005 env->prog->aux->used_map_cnt = env->used_map_cnt;
6007 /* program is valid. Convert pseudo bpf_ld_imm64 into generic
6008 * bpf_ld_imm64 instructions
6010 convert_pseudo_ld_imm64(env);
6014 if (!env->prog->aux->used_maps)
6015 /* if we didn't copy map pointers into bpf_prog_info, release
6016 * them now. Otherwise free_used_maps() will release them.
6021 mutex_unlock(&bpf_verifier_lock);
6022 vfree(env->insn_aux_data);