1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
4 #ifndef _LINUX_BPF_VERIFIER_H
5 #define _LINUX_BPF_VERIFIER_H 1
7 #include <linux/bpf.h> /* for enum bpf_reg_type */
8 #include <linux/btf.h> /* for struct btf and btf_id() */
9 #include <linux/filter.h> /* for MAX_BPF_STACK */
10 #include <linux/tnum.h>
12 /* Maximum variable offset umax_value permitted when resolving memory accesses.
13 * In practice this is far bigger than any realistic pointer offset; this limit
14 * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
16 #define BPF_MAX_VAR_OFF (1 << 29)
17 /* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures
18 * that converting umax_value to int cannot overflow.
20 #define BPF_MAX_VAR_SIZ (1 << 29)
21 /* size of type_str_buf in bpf_verifier. */
22 #define TYPE_STR_BUF_LEN 128
24 /* Liveness marks, used for registers and spilled-regs (in stack slots).
25 * Read marks propagate upwards until they find a write mark; they record that
26 * "one of this state's descendants read this reg" (and therefore the reg is
27 * relevant for states_equal() checks).
28 * Write marks collect downwards and do not propagate; they record that "the
29 * straight-line code that reached this state (from its parent) wrote this reg"
30 * (and therefore that reads propagated from this state or its descendants
31 * should not propagate to its parent).
32 * A state with a write mark can receive read marks; it just won't propagate
33 * them to its parent, since the write mark is a property, not of the state,
34 * but of the link between it and its parent. See mark_reg_read() and
35 * mark_stack_slot_read() in kernel/bpf/verifier.c.
37 enum bpf_reg_liveness {
38 REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */
39 REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */
40 REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */
41 REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64,
42 REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */
43 REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */
46 struct bpf_reg_state {
47 /* Ordering of fields matters. See states_equal() */
48 enum bpf_reg_type type;
49 /* Fixed part of pointer offset, pointer types only */
52 /* valid when type == PTR_TO_PACKET */
55 /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
56 * PTR_TO_MAP_VALUE_OR_NULL
59 struct bpf_map *map_ptr;
60 /* To distinguish map lookups from outer map
61 * the map_uid is non-zero for registers
62 * pointing to inner maps.
67 /* for PTR_TO_BTF_ID */
73 u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
75 /* For dynptr stack slots */
77 enum bpf_dynptr_type type;
78 /* A dynptr is 16 bytes so it takes up 2 stack slots.
79 * We need to track which slot is the first slot
80 * to protect against cases where the user may try to
81 * pass in an address starting at the second slot of the
87 /* Max size from any of the above. */
93 u32 subprogno; /* for PTR_TO_FUNC */
95 /* For PTR_TO_PACKET, used to find other pointers with the same variable
96 * offset, so they can share range knowledge.
97 * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
98 * came from, when one is tested for != NULL.
99 * For PTR_TO_MEM_OR_NULL this is used to identify memory allocation
100 * for the purpose of tracking that it's freed.
101 * For PTR_TO_SOCKET this is used to share which pointers retain the
102 * same reference to the socket, to determine proper reference freeing.
103 * For stack slots that are dynptrs, this is used to track references to
104 * the dynptr to determine proper reference freeing.
107 /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
108 * from a pointer-cast helper, bpf_sk_fullsock() and
111 * Consider the following where "sk" is a reference counted
112 * pointer returned from "sk = bpf_sk_lookup_tcp();":
114 * 1: sk = bpf_sk_lookup_tcp();
115 * 2: if (!sk) { return 0; }
116 * 3: fullsock = bpf_sk_fullsock(sk);
117 * 4: if (!fullsock) { bpf_sk_release(sk); return 0; }
118 * 5: tp = bpf_tcp_sock(fullsock);
119 * 6: if (!tp) { bpf_sk_release(sk); return 0; }
120 * 7: bpf_sk_release(sk);
121 * 8: snd_cwnd = tp->snd_cwnd; // verifier will complain
123 * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and
124 * "tp" ptr should be invalidated also. In order to do that,
125 * the reg holding "fullsock" and "sk" need to remember
126 * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id
127 * such that the verifier can reset all regs which have
128 * ref_obj_id matching the sk_reg->id.
130 * sk_reg->ref_obj_id is set to sk_reg->id at line 1.
131 * sk_reg->id will stay as NULL-marking purpose only.
132 * After NULL-marking is done, sk_reg->id can be reset to 0.
134 * After "fullsock = bpf_sk_fullsock(sk);" at line 3,
135 * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id.
137 * After "tp = bpf_tcp_sock(fullsock);" at line 5,
138 * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id
139 * which is the same as sk_reg->ref_obj_id.
141 * From the verifier perspective, if sk, fullsock and tp
142 * are not NULL, they are the same ptr with different
143 * reg->type. In particular, bpf_sk_release(tp) is also
144 * allowed and has the same effect as bpf_sk_release(sk).
147 /* For scalar types (SCALAR_VALUE), this represents our knowledge of
149 * For pointer types, this represents the variable part of the offset
150 * from the pointed-to object, and is shared with all bpf_reg_states
151 * with the same id as us.
154 /* Used to determine if any memory access using this register will
155 * result in a bad access.
156 * These refer to the same value as var_off, not necessarily the actual
157 * contents of the register.
159 s64 smin_value; /* minimum possible (s64)value */
160 s64 smax_value; /* maximum possible (s64)value */
161 u64 umin_value; /* minimum possible (u64)value */
162 u64 umax_value; /* maximum possible (u64)value */
163 s32 s32_min_value; /* minimum possible (s32)value */
164 s32 s32_max_value; /* maximum possible (s32)value */
165 u32 u32_min_value; /* minimum possible (u32)value */
166 u32 u32_max_value; /* maximum possible (u32)value */
167 /* parentage chain for liveness checking */
168 struct bpf_reg_state *parent;
169 /* Inside the callee two registers can be both PTR_TO_STACK like
170 * R1=fp-8 and R2=fp-8, but one of them points to this function stack
171 * while another to the caller's stack. To differentiate them 'frameno'
172 * is used which is an index in bpf_verifier_state->frame[] array
173 * pointing to bpf_func_state.
176 /* Tracks subreg definition. The stored value is the insn_idx of the
177 * writing insn. This is safe because subreg_def is used before any insn
178 * patching which only happens after main verification finished.
181 enum bpf_reg_liveness live;
182 /* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */
186 enum bpf_stack_slot_type {
187 STACK_INVALID, /* nothing was stored in this stack slot */
188 STACK_SPILL, /* register spilled into stack */
189 STACK_MISC, /* BPF program wrote some data into this slot */
190 STACK_ZERO, /* BPF program wrote constant zero */
191 /* A dynptr is stored in this stack slot. The type of dynptr
192 * is stored in bpf_stack_state->spilled_ptr.dynptr.type
197 #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
198 #define BPF_DYNPTR_SIZE sizeof(struct bpf_dynptr_kern)
199 #define BPF_DYNPTR_NR_SLOTS (BPF_DYNPTR_SIZE / BPF_REG_SIZE)
201 struct bpf_stack_state {
202 struct bpf_reg_state spilled_ptr;
203 u8 slot_type[BPF_REG_SIZE];
206 struct bpf_reference_state {
207 /* Track each reference created with a unique id, even if the same
208 * instruction creates the reference multiple times (eg, via CALL).
211 /* Instruction where the allocation of this reference occurred. This
212 * is used purely to inform the user of a reference leak.
215 /* There can be a case like:
220 * Hence for frame 4, if callback_ref just stored boolean, it would be
221 * impossible to distinguish nested callback refs. Hence store the
222 * frameno and compare that to callback_ref in check_reference_leak when
223 * exiting a callback function.
226 /* Mark the reference state to release the registers sharing the same id
227 * on bpf_spin_unlock (for nodes that we will lose ownership to but are
228 * safe to access inside the critical section).
230 bool release_on_unlock;
233 /* state of the program:
234 * type of all registers and stack info
236 struct bpf_func_state {
237 struct bpf_reg_state regs[MAX_BPF_REG];
238 /* index of call instruction that called into this func */
240 /* stack frame number of this function state from pov of
241 * enclosing bpf_verifier_state.
242 * 0 = main function, 1 = first callee.
245 /* subprog number == index within subprog_info
246 * zero == main subprog
249 /* Every bpf_timer_start will increment async_entry_cnt.
250 * It's used to distinguish:
251 * void foo(void) { for(;;); }
252 * void foo(void) { bpf_timer_set_callback(,foo); }
256 struct tnum callback_ret_range;
257 bool in_async_callback_fn;
259 /* The following fields should be last. See copy_func_state() */
261 struct bpf_reference_state *refs;
263 struct bpf_stack_state *stack;
266 struct bpf_idx_pair {
276 #define MAX_CALL_FRAMES 8
277 /* Maximum number of register states that can exist at once */
278 #define BPF_ID_MAP_SIZE ((MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) * MAX_CALL_FRAMES)
279 struct bpf_verifier_state {
280 /* call stack tracking */
281 struct bpf_func_state *frame[MAX_CALL_FRAMES];
282 struct bpf_verifier_state *parent;
284 * 'branches' field is the number of branches left to explore:
285 * 0 - all possible paths from this state reached bpf_exit or
287 * 1 - at least one path is being explored.
288 * This state hasn't reached bpf_exit
289 * 2 - at least two paths are being explored.
290 * This state is an immediate parent of two children.
291 * One is fallthrough branch with branches==1 and another
292 * state is pushed into stack (to be explored later) also with
293 * branches==1. The parent of this state has branches==1.
294 * The verifier state tree connected via 'parent' pointer looks like:
297 * 2 -> 1 (first 'if' pushed into stack)
299 * 2 -> 1 (second 'if' pushed into stack)
304 * Once do_check() reaches bpf_exit, it calls update_branch_counts()
305 * and the verifier state tree will look:
308 * 2 -> 1 (first 'if' pushed into stack)
310 * 1 -> 1 (second 'if' pushed into stack)
314 * After pop_stack() the do_check() will resume at second 'if'.
316 * If is_state_visited() sees a state with branches > 0 it means
317 * there is a loop. If such state is exactly equal to the current state
318 * it's an infinite loop. Note states_equal() checks for states
319 * equivalency, so two states being 'states_equal' does not mean
320 * infinite loop. The exact comparison is provided by
321 * states_maybe_looping() function. It's a stronger pre-check and
322 * much faster than states_equal().
324 * This algorithm may not find all possible infinite loops or
325 * loop iteration count may be too high.
326 * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in.
331 /* For every reg representing a map value or allocated object pointer,
332 * we consider the tuple of (ptr, id) for them to be unique in verifier
333 * context and conside them to not alias each other for the purposes of
334 * tracking lock state.
337 /* This can either be reg->map_ptr or reg->btf. If ptr is NULL,
338 * there's no active lock held, and other fields have no
339 * meaning. If non-NULL, it indicates that a lock is held and
340 * id member has the reg->id of the register which can be >= 0.
343 /* This will be reg->id */
347 bool active_rcu_lock;
349 /* first and last insn idx of this verifier state */
352 /* jmp history recorded from first to last.
353 * backtracking is using it to go from last to first.
354 * For most states jmp_history_cnt is [0-3].
355 * For loops can go up to ~40.
357 struct bpf_idx_pair *jmp_history;
361 #define bpf_get_spilled_reg(slot, frame) \
362 (((slot < frame->allocated_stack / BPF_REG_SIZE) && \
363 (frame->stack[slot].slot_type[0] == STACK_SPILL)) \
364 ? &frame->stack[slot].spilled_ptr : NULL)
366 /* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */
367 #define bpf_for_each_spilled_reg(iter, frame, reg) \
368 for (iter = 0, reg = bpf_get_spilled_reg(iter, frame); \
369 iter < frame->allocated_stack / BPF_REG_SIZE; \
370 iter++, reg = bpf_get_spilled_reg(iter, frame))
372 /* Invoke __expr over regsiters in __vst, setting __state and __reg */
373 #define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \
375 struct bpf_verifier_state *___vstate = __vst; \
377 for (___i = 0; ___i <= ___vstate->curframe; ___i++) { \
378 struct bpf_reg_state *___regs; \
379 __state = ___vstate->frame[___i]; \
380 ___regs = __state->regs; \
381 for (___j = 0; ___j < MAX_BPF_REG; ___j++) { \
382 __reg = &___regs[___j]; \
385 bpf_for_each_spilled_reg(___j, __state, __reg) { \
393 /* linked list of verifier states used to prune search */
394 struct bpf_verifier_state_list {
395 struct bpf_verifier_state state;
396 struct bpf_verifier_state_list *next;
397 int miss_cnt, hit_cnt;
400 struct bpf_loop_inline_state {
401 unsigned int initialized:1; /* set to true upon first entry */
402 unsigned int fit_for_inline:1; /* true if callback function is the same
403 * at each call and flags are always zero
405 u32 callback_subprogno; /* valid when fit_for_inline is true */
408 /* Possible states for alu_state member. */
409 #define BPF_ALU_SANITIZE_SRC (1U << 0)
410 #define BPF_ALU_SANITIZE_DST (1U << 1)
411 #define BPF_ALU_NEG_VALUE (1U << 2)
412 #define BPF_ALU_NON_POINTER (1U << 3)
413 #define BPF_ALU_IMMEDIATE (1U << 4)
414 #define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
415 BPF_ALU_SANITIZE_DST)
417 struct bpf_insn_aux_data {
419 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
420 unsigned long map_ptr_state; /* pointer/poison value for maps */
421 s32 call_imm; /* saved imm field of call insn */
422 u32 alu_limit; /* limit for add/sub register with pointer */
424 u32 map_index; /* index into used_maps[] */
425 u32 map_off; /* offset from value base address */
428 enum bpf_reg_type reg_type; /* type of pseudo_btf_id */
432 u32 btf_id; /* btf_id for struct typed var */
434 u32 mem_size; /* mem_size for non-struct typed var */
437 /* if instruction is a call to bpf_loop this field tracks
438 * the state of the relevant registers to make decision about inlining
440 struct bpf_loop_inline_state loop_inline_state;
442 u64 obj_new_size; /* remember the size of type passed to bpf_obj_new to rewrite R1 */
443 struct btf_struct_meta *kptr_struct_meta;
444 u64 map_key_state; /* constant (32 bit) key tracking for maps */
445 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
446 u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
447 bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */
448 bool zext_dst; /* this insn zero extends dst reg */
449 bool storage_get_func_atomic; /* bpf_*_storage_get() with atomic memory alloc */
450 u8 alu_state; /* used in combination with alu_limit */
452 /* below fields are initialized once */
453 unsigned int orig_idx; /* original instruction index */
458 #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
459 #define MAX_USED_BTFS 64 /* max number of BTFs accessed by one BPF program */
461 #define BPF_VERIFIER_TMP_LOG_SIZE 1024
463 struct bpf_verifier_log {
465 char kbuf[BPF_VERIFIER_TMP_LOG_SIZE];
471 static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log)
473 return log->len_used >= log->len_total - 1;
476 #define BPF_LOG_LEVEL1 1
477 #define BPF_LOG_LEVEL2 2
478 #define BPF_LOG_STATS 4
479 #define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2)
480 #define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS)
481 #define BPF_LOG_KERNEL (BPF_LOG_MASK + 1) /* kernel internal flag */
482 #define BPF_LOG_MIN_ALIGNMENT 8U
483 #define BPF_LOG_ALIGNMENT 40U
485 static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
488 ((log->level && log->ubuf && !bpf_verifier_log_full(log)) ||
489 log->level == BPF_LOG_KERNEL);
493 bpf_verifier_log_attr_valid(const struct bpf_verifier_log *log)
495 return log->len_total >= 128 && log->len_total <= UINT_MAX >> 2 &&
496 log->level && log->ubuf && !(log->level & ~BPF_LOG_MASK);
499 #define BPF_MAX_SUBPROGS 256
501 struct bpf_subprog_info {
502 /* 'start' has to be the first field otherwise find_subprog() won't work */
503 u32 start; /* insn idx of function entry point */
504 u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
505 u16 stack_depth; /* max. stack depth used by this function */
507 bool tail_call_reachable;
512 /* single container for all structs
513 * one verifier_env per bpf_check() call
515 struct bpf_verifier_env {
518 struct bpf_prog *prog; /* eBPF program being verified */
519 const struct bpf_verifier_ops *ops;
520 struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
521 int stack_size; /* number of states to be processed */
522 bool strict_alignment; /* perform strict pointer alignment checks */
523 bool test_state_freq; /* test verifier with different pruning frequency */
524 struct bpf_verifier_state *cur_state; /* current verifier state */
525 struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
526 struct bpf_verifier_state_list *free_list;
527 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
528 struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */
529 u32 used_map_cnt; /* number of used maps */
530 u32 used_btf_cnt; /* number of used BTF objects */
531 u32 id_gen; /* used to generate unique reg IDs */
532 bool explore_alu_limits;
533 bool allow_ptr_leaks;
534 bool allow_uninit_stack;
538 bool seen_direct_write;
539 bool rcu_tag_supported;
540 struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
541 const struct bpf_line_info *prev_linfo;
542 struct bpf_verifier_log log;
543 struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
544 struct bpf_id_pair idmap_scratch[BPF_ID_MAP_SIZE];
550 u32 pass_cnt; /* number of times do_check() was called */
552 /* number of instructions analyzed by the verifier */
553 u32 prev_insn_processed, insn_processed;
554 /* number of jmps, calls, exits analyzed so far */
555 u32 prev_jmps_processed, jmps_processed;
556 /* total verification time */
557 u64 verification_time;
558 /* maximum number of verifier states kept in 'branching' instructions */
559 u32 max_states_per_insn;
560 /* total number of allocated verifier states */
562 /* some states are freed during program analysis.
563 * this is peak number of states. this number dominates kernel
564 * memory consumption during verification
567 /* longest register parentage chain walked for liveness marking */
568 u32 longest_mark_read_walk;
571 /* bit mask to keep track of whether a register has been accessed
572 * since the last time the function state was printed
575 /* Same as scratched_regs but for stack slots */
576 u64 scratched_stack_slots;
577 u32 prev_log_len, prev_insn_print_len;
578 /* buffer used in reg_type_str() to generate reg_type string */
579 char type_str_buf[TYPE_STR_BUF_LEN];
582 __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
583 const char *fmt, va_list args);
584 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
585 const char *fmt, ...);
586 __printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
587 const char *fmt, ...);
589 static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
591 struct bpf_verifier_state *cur = env->cur_state;
593 return cur->frame[cur->curframe];
596 static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
598 return cur_func(env)->regs;
601 int bpf_prog_offload_verifier_prep(struct bpf_prog *prog);
602 int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
603 int insn_idx, int prev_insn_idx);
604 int bpf_prog_offload_finalize(struct bpf_verifier_env *env);
606 bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
607 struct bpf_insn *insn);
609 bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
611 int check_ptr_off_reg(struct bpf_verifier_env *env,
612 const struct bpf_reg_state *reg, int regno);
613 int check_func_arg_reg_off(struct bpf_verifier_env *env,
614 const struct bpf_reg_state *reg, int regno,
615 enum bpf_arg_type arg_type);
616 int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
617 u32 regno, u32 mem_size);
618 struct bpf_call_arg_meta;
619 int process_dynptr_func(struct bpf_verifier_env *env, int regno,
620 enum bpf_arg_type arg_type, struct bpf_call_arg_meta *meta);
622 /* this lives here instead of in bpf.h because it needs to dereference tgt_prog */
623 static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog,
624 struct btf *btf, u32 btf_id)
627 return ((u64)tgt_prog->aux->id << 32) | btf_id;
629 return ((u64)btf_obj_id(btf) << 32) | 0x80000000 | btf_id;
632 /* unpack the IDs from the key as constructed above */
633 static inline void bpf_trampoline_unpack_key(u64 key, u32 *obj_id, u32 *btf_id)
638 *btf_id = key & 0x7FFFFFFF;
641 int bpf_check_attach_target(struct bpf_verifier_log *log,
642 const struct bpf_prog *prog,
643 const struct bpf_prog *tgt_prog,
645 struct bpf_attach_target_info *tgt_info);
646 void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab);
648 int mark_chain_precision(struct bpf_verifier_env *env, int regno);
650 #define BPF_BASE_TYPE_MASK GENMASK(BPF_BASE_TYPE_BITS - 1, 0)
652 /* extract base type from bpf_{arg, return, reg}_type. */
653 static inline u32 base_type(u32 type)
655 return type & BPF_BASE_TYPE_MASK;
658 /* extract flags from an extended type. See bpf_type_flag in bpf.h. */
659 static inline u32 type_flag(u32 type)
661 return type & ~BPF_BASE_TYPE_MASK;
664 /* only use after check_attach_btf_id() */
665 static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog)
667 return prog->type == BPF_PROG_TYPE_EXT ?
668 prog->aux->dst_prog->type : prog->type;
671 static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)
673 switch (resolve_prog_type(prog)) {
674 case BPF_PROG_TYPE_TRACING:
675 return prog->expected_attach_type != BPF_TRACE_ITER;
676 case BPF_PROG_TYPE_STRUCT_OPS:
677 case BPF_PROG_TYPE_LSM:
684 #define BPF_REG_TRUSTED_MODIFIERS (MEM_ALLOC | PTR_TRUSTED)
686 static inline bool bpf_type_has_unsafe_modifiers(u32 type)
688 return type_flag(type) & ~BPF_REG_TRUSTED_MODIFIERS;
691 #endif /* _LINUX_BPF_VERIFIER_H */