1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 #ifndef _LINUX_BPF_VERIFIER_H
8 #define _LINUX_BPF_VERIFIER_H 1
10 #include <linux/bpf.h> /* for enum bpf_reg_type */
11 #include <linux/filter.h> /* for MAX_BPF_STACK */
12 #include <linux/tnum.h>
14 /* Maximum variable offset umax_value permitted when resolving memory accesses.
15 * In practice this is far bigger than any realistic pointer offset; this limit
16 * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
18 #define BPF_MAX_VAR_OFF (1 << 29)
19 /* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures
20 * that converting umax_value to int cannot overflow.
22 #define BPF_MAX_VAR_SIZ (1 << 29)
24 /* Liveness marks, used for registers and spilled-regs (in stack slots).
25 * Read marks propagate upwards until they find a write mark; they record that
26 * "one of this state's descendants read this reg" (and therefore the reg is
27 * relevant for states_equal() checks).
28 * Write marks collect downwards and do not propagate; they record that "the
29 * straight-line code that reached this state (from its parent) wrote this reg"
30 * (and therefore that reads propagated from this state or its descendants
31 * should not propagate to its parent).
32 * A state with a write mark can receive read marks; it just won't propagate
33 * them to its parent, since the write mark is a property, not of the state,
34 * but of the link between it and its parent. See mark_reg_read() and
35 * mark_stack_slot_read() in kernel/bpf/verifier.c.
37 enum bpf_reg_liveness {
38 REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */
39 REG_LIVE_READ, /* reg was read, so we're sensitive to initial value */
40 REG_LIVE_WRITTEN, /* reg was written first, screening off later reads */
43 struct bpf_reg_state {
44 /* Ordering of fields matters. See states_equal() */
45 enum bpf_reg_type type;
47 /* valid when type == PTR_TO_PACKET */
50 /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
51 * PTR_TO_MAP_VALUE_OR_NULL
53 struct bpf_map *map_ptr;
55 /* Fixed part of pointer offset, pointer types only */
57 /* For PTR_TO_PACKET, used to find other pointers with the same variable
58 * offset, so they can share range knowledge.
59 * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
60 * came from, when one is tested for != NULL.
61 * For PTR_TO_SOCKET this is used to share which pointers retain the
62 * same reference to the socket, to determine proper reference freeing.
65 /* For scalar types (SCALAR_VALUE), this represents our knowledge of
67 * For pointer types, this represents the variable part of the offset
68 * from the pointed-to object, and is shared with all bpf_reg_states
69 * with the same id as us.
72 /* Used to determine if any memory access using this register will
73 * result in a bad access.
74 * These refer to the same value as var_off, not necessarily the actual
75 * contents of the register.
77 s64 smin_value; /* minimum possible (s64)value */
78 s64 smax_value; /* maximum possible (s64)value */
79 u64 umin_value; /* minimum possible (u64)value */
80 u64 umax_value; /* maximum possible (u64)value */
81 /* parentage chain for liveness checking */
82 struct bpf_reg_state *parent;
83 /* Inside the callee two registers can be both PTR_TO_STACK like
84 * R1=fp-8 and R2=fp-8, but one of them points to this function stack
85 * while another to the caller's stack. To differentiate them 'frameno'
86 * is used which is an index in bpf_verifier_state->frame[] array
87 * pointing to bpf_func_state.
90 enum bpf_reg_liveness live;
93 enum bpf_stack_slot_type {
94 STACK_INVALID, /* nothing was stored in this stack slot */
95 STACK_SPILL, /* register spilled into stack */
96 STACK_MISC, /* BPF program wrote some data into this slot */
97 STACK_ZERO, /* BPF program wrote constant zero */
100 #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
102 struct bpf_stack_state {
103 struct bpf_reg_state spilled_ptr;
104 u8 slot_type[BPF_REG_SIZE];
107 struct bpf_reference_state {
108 /* Track each reference created with a unique id, even if the same
109 * instruction creates the reference multiple times (eg, via CALL).
112 /* Instruction where the allocation of this reference occurred. This
113 * is used purely to inform the user of a reference leak.
118 /* state of the program:
119 * type of all registers and stack info
121 struct bpf_func_state {
122 struct bpf_reg_state regs[MAX_BPF_REG];
123 /* index of call instruction that called into this func */
125 /* stack frame number of this function state from pov of
126 * enclosing bpf_verifier_state.
127 * 0 = main function, 1 = first callee.
130 /* subprog number == index within subprog_stack_depth
131 * zero == main subprog
135 /* The following fields should be last. See copy_func_state() */
137 struct bpf_reference_state *refs;
139 struct bpf_stack_state *stack;
142 #define MAX_CALL_FRAMES 8
143 struct bpf_verifier_state {
144 /* call stack tracking */
145 struct bpf_func_state *frame[MAX_CALL_FRAMES];
149 #define bpf_get_spilled_reg(slot, frame) \
150 (((slot < frame->allocated_stack / BPF_REG_SIZE) && \
151 (frame->stack[slot].slot_type[0] == STACK_SPILL)) \
152 ? &frame->stack[slot].spilled_ptr : NULL)
154 /* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */
155 #define bpf_for_each_spilled_reg(iter, frame, reg) \
156 for (iter = 0, reg = bpf_get_spilled_reg(iter, frame); \
157 iter < frame->allocated_stack / BPF_REG_SIZE; \
158 iter++, reg = bpf_get_spilled_reg(iter, frame))
160 /* linked list of verifier states used to prune search */
161 struct bpf_verifier_state_list {
162 struct bpf_verifier_state state;
163 struct bpf_verifier_state_list *next;
166 struct bpf_insn_aux_data {
168 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
169 unsigned long map_state; /* pointer/poison value for maps */
170 s32 call_imm; /* saved imm field of call insn */
172 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
173 int sanitize_stack_off; /* stack slot to be cleared */
174 bool seen; /* this insn was processed by the verifier */
177 #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
179 #define BPF_VERIFIER_TMP_LOG_SIZE 1024
181 struct bpf_verifier_log {
183 char kbuf[BPF_VERIFIER_TMP_LOG_SIZE];
189 static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log)
191 return log->len_used >= log->len_total - 1;
194 static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
196 return log->level && log->ubuf && !bpf_verifier_log_full(log);
199 #define BPF_MAX_SUBPROGS 256
201 struct bpf_subprog_info {
202 u32 start; /* insn idx of function entry point */
203 u16 stack_depth; /* max. stack depth used by this function */
206 /* single container for all structs
207 * one verifier_env per bpf_check() call
209 struct bpf_verifier_env {
210 struct bpf_prog *prog; /* eBPF program being verified */
211 const struct bpf_verifier_ops *ops;
212 struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
213 int stack_size; /* number of states to be processed */
214 bool strict_alignment; /* perform strict pointer alignment checks */
215 struct bpf_verifier_state *cur_state; /* current verifier state */
216 struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
217 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
218 u32 used_map_cnt; /* number of used maps */
219 u32 id_gen; /* used to generate unique reg IDs */
220 bool allow_ptr_leaks;
221 bool seen_direct_write;
222 struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
223 struct bpf_verifier_log log;
224 struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
228 __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
229 const char *fmt, va_list args);
230 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
231 const char *fmt, ...);
233 static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
235 struct bpf_verifier_state *cur = env->cur_state;
237 return cur->frame[cur->curframe];
240 static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
242 return cur_func(env)->regs;
245 int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env);
246 int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
247 int insn_idx, int prev_insn_idx);
248 int bpf_prog_offload_finalize(struct bpf_verifier_env *env);
250 #endif /* _LINUX_BPF_VERIFIER_H */