1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
7 #include <uapi/linux/bpf.h>
8 #include <uapi/linux/filter.h>
10 #include <linux/workqueue.h>
11 #include <linux/file.h>
12 #include <linux/percpu.h>
13 #include <linux/err.h>
14 #include <linux/rbtree_latch.h>
15 #include <linux/numa.h>
16 #include <linux/mm_types.h>
17 #include <linux/wait.h>
18 #include <linux/refcount.h>
19 #include <linux/mutex.h>
20 #include <linux/module.h>
21 #include <linux/kallsyms.h>
22 #include <linux/capability.h>
23 #include <linux/sched/mm.h>
24 #include <linux/slab.h>
25 #include <linux/percpu-refcount.h>
26 #include <linux/stddef.h>
27 #include <linux/bpfptr.h>
28 #include <linux/btf.h>
29 #include <linux/rcupdate_trace.h>
30 #include <linux/static_call.h>
31 #include <linux/memcontrol.h>
32 #include <linux/cfi.h>
34 struct bpf_verifier_env;
35 struct bpf_verifier_log;
44 struct exception_table_entry;
45 struct seq_operations;
46 struct bpf_iter_aux_info;
47 struct bpf_local_storage;
48 struct bpf_local_storage_map;
52 struct bpf_func_state;
56 extern struct idr btf_idr;
57 extern spinlock_t btf_idr_lock;
58 extern struct kobject *btf_kobj;
59 extern struct bpf_mem_alloc bpf_global_ma, bpf_global_percpu_ma;
60 extern bool bpf_global_ma_set;
62 typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
63 typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
64 struct bpf_iter_aux_info *aux);
65 typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
66 typedef unsigned int (*bpf_func_t)(const void *,
67 const struct bpf_insn *);
68 struct bpf_iter_seq_info {
69 const struct seq_operations *seq_ops;
70 bpf_iter_init_seq_priv_t init_seq_private;
71 bpf_iter_fini_seq_priv_t fini_seq_private;
75 /* map is generic key/value storage optionally accessible by eBPF programs */
77 /* funcs callable from userspace (via syscall) */
78 int (*map_alloc_check)(union bpf_attr *attr);
79 struct bpf_map *(*map_alloc)(union bpf_attr *attr);
80 void (*map_release)(struct bpf_map *map, struct file *map_file);
81 void (*map_free)(struct bpf_map *map);
82 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
83 void (*map_release_uref)(struct bpf_map *map);
84 void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
85 int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
86 union bpf_attr __user *uattr);
87 int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key,
88 void *value, u64 flags);
89 int (*map_lookup_and_delete_batch)(struct bpf_map *map,
90 const union bpf_attr *attr,
91 union bpf_attr __user *uattr);
92 int (*map_update_batch)(struct bpf_map *map, struct file *map_file,
93 const union bpf_attr *attr,
94 union bpf_attr __user *uattr);
95 int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
96 union bpf_attr __user *uattr);
98 /* funcs callable from userspace and from eBPF programs */
99 void *(*map_lookup_elem)(struct bpf_map *map, void *key);
100 long (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
101 long (*map_delete_elem)(struct bpf_map *map, void *key);
102 long (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
103 long (*map_pop_elem)(struct bpf_map *map, void *value);
104 long (*map_peek_elem)(struct bpf_map *map, void *value);
105 void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu);
107 /* funcs called by prog_array and perf_event_array map */
108 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
110 /* If need_defer is true, the implementation should guarantee that
111 * the to-be-put element is still alive before the bpf program, which
112 * may manipulate it, exists.
114 void (*map_fd_put_ptr)(struct bpf_map *map, void *ptr, bool need_defer);
115 int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
116 u32 (*map_fd_sys_lookup_elem)(void *ptr);
117 void (*map_seq_show_elem)(struct bpf_map *map, void *key,
119 int (*map_check_btf)(const struct bpf_map *map,
120 const struct btf *btf,
121 const struct btf_type *key_type,
122 const struct btf_type *value_type);
124 /* Prog poke tracking helpers. */
125 int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
126 void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
127 void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
128 struct bpf_prog *new);
130 /* Direct value access helpers. */
131 int (*map_direct_value_addr)(const struct bpf_map *map,
133 int (*map_direct_value_meta)(const struct bpf_map *map,
135 int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
136 __poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
137 struct poll_table_struct *pts);
139 /* Functions called by bpf_local_storage maps */
140 int (*map_local_storage_charge)(struct bpf_local_storage_map *smap,
141 void *owner, u32 size);
142 void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap,
143 void *owner, u32 size);
144 struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner);
147 long (*map_redirect)(struct bpf_map *map, u64 key, u64 flags);
149 /* map_meta_equal must be implemented for maps that can be
150 * used as an inner map. It is a runtime check to ensure
151 * an inner map can be inserted to an outer map.
153 * Some properties of the inner map has been used during the
154 * verification time. When inserting an inner map at the runtime,
155 * map_meta_equal has to ensure the inserting map has the same
156 * properties that the verifier has used earlier.
158 bool (*map_meta_equal)(const struct bpf_map *meta0,
159 const struct bpf_map *meta1);
162 int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
163 struct bpf_func_state *caller,
164 struct bpf_func_state *callee);
165 long (*map_for_each_callback)(struct bpf_map *map,
166 bpf_callback_t callback_fn,
167 void *callback_ctx, u64 flags);
169 u64 (*map_mem_usage)(const struct bpf_map *map);
171 /* BTF id of struct allocated by map_alloc */
174 /* bpf_iter info used to open a seq_file */
175 const struct bpf_iter_seq_info *iter_seq_info;
179 /* Support at most 10 fields in a BTF type */
183 enum btf_field_type {
184 BPF_SPIN_LOCK = (1 << 0),
185 BPF_TIMER = (1 << 1),
186 BPF_KPTR_UNREF = (1 << 2),
187 BPF_KPTR_REF = (1 << 3),
188 BPF_KPTR_PERCPU = (1 << 4),
189 BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF | BPF_KPTR_PERCPU,
190 BPF_LIST_HEAD = (1 << 5),
191 BPF_LIST_NODE = (1 << 6),
192 BPF_RB_ROOT = (1 << 7),
193 BPF_RB_NODE = (1 << 8),
194 BPF_GRAPH_NODE = BPF_RB_NODE | BPF_LIST_NODE,
195 BPF_GRAPH_ROOT = BPF_RB_ROOT | BPF_LIST_HEAD,
196 BPF_REFCOUNT = (1 << 9),
199 typedef void (*btf_dtor_kfunc_t)(void *);
201 struct btf_field_kptr {
203 struct module *module;
204 /* dtor used if btf_is_kernel(btf), otherwise the type is
205 * program-allocated, dtor is NULL, and __bpf_obj_drop_impl is used
207 btf_dtor_kfunc_t dtor;
211 struct btf_field_graph_root {
215 struct btf_record *value_rec;
221 enum btf_field_type type;
223 struct btf_field_kptr kptr;
224 struct btf_field_graph_root graph_root;
234 struct btf_field fields[];
237 /* Non-opaque version of bpf_rb_node in uapi/linux/bpf.h */
238 struct bpf_rb_node_kern {
239 struct rb_node rb_node;
241 } __attribute__((aligned(8)));
243 /* Non-opaque version of bpf_list_node in uapi/linux/bpf.h */
244 struct bpf_list_node_kern {
245 struct list_head list_head;
247 } __attribute__((aligned(8)));
250 /* The first two cachelines with read-mostly members of which some
251 * are also accessed in fast-path (e.g. ops, max_entries).
253 const struct bpf_map_ops *ops ____cacheline_aligned;
254 struct bpf_map *inner_map_meta;
255 #ifdef CONFIG_SECURITY
258 enum bpf_map_type map_type;
262 u64 map_extra; /* any per-map-type extra fields */
265 struct btf_record *record;
268 u32 btf_value_type_id;
269 u32 btf_vmlinux_value_type_id;
271 #ifdef CONFIG_MEMCG_KMEM
272 struct obj_cgroup *objcg;
274 char name[BPF_OBJ_NAME_LEN];
275 /* The 3rd and 4th cacheline with misc members to avoid false sharing
276 * particularly with refcounting.
278 atomic64_t refcnt ____cacheline_aligned;
280 /* rcu is used before freeing and work is only used during freeing */
282 struct work_struct work;
285 struct mutex freeze_mutex;
287 /* 'Ownership' of program-containing map is claimed by the first program
288 * that is going to use this map or by the first program which FD is
289 * stored in the map to make sure that all callers and callees have the
290 * same prog type, JITed flag and xdp_has_frags flag.
294 enum bpf_prog_type type;
299 bool frozen; /* write-once; write-protected by freeze_mutex */
300 bool free_after_mult_rcu_gp;
301 bool free_after_rcu_gp;
302 atomic64_t sleepable_refcnt;
303 s64 __percpu *elem_count;
306 static inline const char *btf_field_type_name(enum btf_field_type type)
310 return "bpf_spin_lock";
316 case BPF_KPTR_PERCPU:
317 return "percpu_kptr";
319 return "bpf_list_head";
321 return "bpf_list_node";
323 return "bpf_rb_root";
325 return "bpf_rb_node";
327 return "bpf_refcount";
334 static inline u32 btf_field_type_size(enum btf_field_type type)
338 return sizeof(struct bpf_spin_lock);
340 return sizeof(struct bpf_timer);
343 case BPF_KPTR_PERCPU:
346 return sizeof(struct bpf_list_head);
348 return sizeof(struct bpf_list_node);
350 return sizeof(struct bpf_rb_root);
352 return sizeof(struct bpf_rb_node);
354 return sizeof(struct bpf_refcount);
361 static inline u32 btf_field_type_align(enum btf_field_type type)
365 return __alignof__(struct bpf_spin_lock);
367 return __alignof__(struct bpf_timer);
370 case BPF_KPTR_PERCPU:
371 return __alignof__(u64);
373 return __alignof__(struct bpf_list_head);
375 return __alignof__(struct bpf_list_node);
377 return __alignof__(struct bpf_rb_root);
379 return __alignof__(struct bpf_rb_node);
381 return __alignof__(struct bpf_refcount);
388 static inline void bpf_obj_init_field(const struct btf_field *field, void *addr)
390 memset(addr, 0, field->size);
392 switch (field->type) {
394 refcount_set((refcount_t *)addr, 1);
397 RB_CLEAR_NODE((struct rb_node *)addr);
401 INIT_LIST_HEAD((struct list_head *)addr);
404 /* RB_ROOT_CACHED 0-inits, no need to do anything after memset */
409 case BPF_KPTR_PERCPU:
417 static inline bool btf_record_has_field(const struct btf_record *rec, enum btf_field_type type)
419 if (IS_ERR_OR_NULL(rec))
421 return rec->field_mask & type;
424 static inline void bpf_obj_init(const struct btf_record *rec, void *obj)
428 if (IS_ERR_OR_NULL(rec))
430 for (i = 0; i < rec->cnt; i++)
431 bpf_obj_init_field(&rec->fields[i], obj + rec->fields[i].offset);
434 /* 'dst' must be a temporary buffer and should not point to memory that is being
435 * used in parallel by a bpf program or bpf syscall, otherwise the access from
436 * the bpf program or bpf syscall may be corrupted by the reinitialization,
437 * leading to weird problems. Even 'dst' is newly-allocated from bpf memory
438 * allocator, it is still possible for 'dst' to be used in parallel by a bpf
439 * program or bpf syscall.
441 static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
443 bpf_obj_init(map->record, dst);
446 /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
447 * forced to use 'long' read/writes to try to atomically copy long counters.
448 * Best-effort only. No barriers here, since it _will_ race with concurrent
449 * updates from BPF programs. Called from bpf syscall and mostly used with
450 * size 8 or 16 bytes, so ask compiler to inline it.
452 static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
454 const long *lsrc = src;
457 size /= sizeof(long);
459 data_race(*ldst++ = *lsrc++);
462 /* copy everything but bpf_spin_lock, bpf_timer, and kptrs. There could be one of each. */
463 static inline void bpf_obj_memcpy(struct btf_record *rec,
464 void *dst, void *src, u32 size,
470 if (IS_ERR_OR_NULL(rec)) {
472 bpf_long_memcpy(dst, src, round_up(size, 8));
474 memcpy(dst, src, size);
478 for (i = 0; i < rec->cnt; i++) {
479 u32 next_off = rec->fields[i].offset;
480 u32 sz = next_off - curr_off;
482 memcpy(dst + curr_off, src + curr_off, sz);
483 curr_off += rec->fields[i].size + sz;
485 memcpy(dst + curr_off, src + curr_off, size - curr_off);
488 static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
490 bpf_obj_memcpy(map->record, dst, src, map->value_size, false);
493 static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src)
495 bpf_obj_memcpy(map->record, dst, src, map->value_size, true);
498 static inline void bpf_obj_memzero(struct btf_record *rec, void *dst, u32 size)
503 if (IS_ERR_OR_NULL(rec)) {
504 memset(dst, 0, size);
508 for (i = 0; i < rec->cnt; i++) {
509 u32 next_off = rec->fields[i].offset;
510 u32 sz = next_off - curr_off;
512 memset(dst + curr_off, 0, sz);
513 curr_off += rec->fields[i].size + sz;
515 memset(dst + curr_off, 0, size - curr_off);
518 static inline void zero_map_value(struct bpf_map *map, void *dst)
520 bpf_obj_memzero(map->record, dst, map->value_size);
523 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
525 void bpf_timer_cancel_and_free(void *timer);
526 void bpf_list_head_free(const struct btf_field *field, void *list_head,
527 struct bpf_spin_lock *spin_lock);
528 void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
529 struct bpf_spin_lock *spin_lock);
532 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
534 struct bpf_offload_dev;
535 struct bpf_offloaded_map;
537 struct bpf_map_dev_ops {
538 int (*map_get_next_key)(struct bpf_offloaded_map *map,
539 void *key, void *next_key);
540 int (*map_lookup_elem)(struct bpf_offloaded_map *map,
541 void *key, void *value);
542 int (*map_update_elem)(struct bpf_offloaded_map *map,
543 void *key, void *value, u64 flags);
544 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
547 struct bpf_offloaded_map {
549 struct net_device *netdev;
550 const struct bpf_map_dev_ops *dev_ops;
552 struct list_head offloads;
555 static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
557 return container_of(map, struct bpf_offloaded_map, map);
560 static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
562 return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
565 static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
567 return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) &&
568 map->ops->map_seq_show_elem;
571 int map_check_no_btf(const struct bpf_map *map,
572 const struct btf *btf,
573 const struct btf_type *key_type,
574 const struct btf_type *value_type);
576 bool bpf_map_meta_equal(const struct bpf_map *meta0,
577 const struct bpf_map *meta1);
579 extern const struct bpf_map_ops bpf_map_offload_ops;
581 /* bpf_type_flag contains a set of flags that are applicable to the values of
582 * arg_type, ret_type and reg_type. For example, a pointer value may be null,
583 * or a memory is read-only. We classify types into two categories: base types
584 * and extended types. Extended types are base types combined with a type flag.
586 * Currently there are no more than 32 base types in arg_type, ret_type and
589 #define BPF_BASE_TYPE_BITS 8
592 /* PTR may be NULL. */
593 PTR_MAYBE_NULL = BIT(0 + BPF_BASE_TYPE_BITS),
595 /* MEM is read-only. When applied on bpf_arg, it indicates the arg is
596 * compatible with both mutable and immutable memory.
598 MEM_RDONLY = BIT(1 + BPF_BASE_TYPE_BITS),
600 /* MEM points to BPF ring buffer reservation. */
601 MEM_RINGBUF = BIT(2 + BPF_BASE_TYPE_BITS),
603 /* MEM is in user address space. */
604 MEM_USER = BIT(3 + BPF_BASE_TYPE_BITS),
606 /* MEM is a percpu memory. MEM_PERCPU tags PTR_TO_BTF_ID. When tagged
607 * with MEM_PERCPU, PTR_TO_BTF_ID _cannot_ be directly accessed. In
608 * order to drop this tag, it must be passed into bpf_per_cpu_ptr()
609 * or bpf_this_cpu_ptr(), which will return the pointer corresponding
610 * to the specified cpu.
612 MEM_PERCPU = BIT(4 + BPF_BASE_TYPE_BITS),
614 /* Indicates that the argument will be released. */
615 OBJ_RELEASE = BIT(5 + BPF_BASE_TYPE_BITS),
617 /* PTR is not trusted. This is only used with PTR_TO_BTF_ID, to mark
618 * unreferenced and referenced kptr loaded from map value using a load
619 * instruction, so that they can only be dereferenced but not escape the
620 * BPF program into the kernel (i.e. cannot be passed as arguments to
621 * kfunc or bpf helpers).
623 PTR_UNTRUSTED = BIT(6 + BPF_BASE_TYPE_BITS),
625 MEM_UNINIT = BIT(7 + BPF_BASE_TYPE_BITS),
627 /* DYNPTR points to memory local to the bpf program. */
628 DYNPTR_TYPE_LOCAL = BIT(8 + BPF_BASE_TYPE_BITS),
630 /* DYNPTR points to a kernel-produced ringbuf record. */
631 DYNPTR_TYPE_RINGBUF = BIT(9 + BPF_BASE_TYPE_BITS),
633 /* Size is known at compile time. */
634 MEM_FIXED_SIZE = BIT(10 + BPF_BASE_TYPE_BITS),
636 /* MEM is of an allocated object of type in program BTF. This is used to
637 * tag PTR_TO_BTF_ID allocated using bpf_obj_new.
639 MEM_ALLOC = BIT(11 + BPF_BASE_TYPE_BITS),
641 /* PTR was passed from the kernel in a trusted context, and may be
642 * passed to KF_TRUSTED_ARGS kfuncs or BPF helper functions.
643 * Confusingly, this is _not_ the opposite of PTR_UNTRUSTED above.
644 * PTR_UNTRUSTED refers to a kptr that was read directly from a map
645 * without invoking bpf_kptr_xchg(). What we really need to know is
646 * whether a pointer is safe to pass to a kfunc or BPF helper function.
647 * While PTR_UNTRUSTED pointers are unsafe to pass to kfuncs and BPF
648 * helpers, they do not cover all possible instances of unsafe
649 * pointers. For example, a pointer that was obtained from walking a
650 * struct will _not_ get the PTR_UNTRUSTED type modifier, despite the
651 * fact that it may be NULL, invalid, etc. This is due to backwards
652 * compatibility requirements, as this was the behavior that was first
653 * introduced when kptrs were added. The behavior is now considered
654 * deprecated, and PTR_UNTRUSTED will eventually be removed.
656 * PTR_TRUSTED, on the other hand, is a pointer that the kernel
657 * guarantees to be valid and safe to pass to kfuncs and BPF helpers.
658 * For example, pointers passed to tracepoint arguments are considered
659 * PTR_TRUSTED, as are pointers that are passed to struct_ops
660 * callbacks. As alluded to above, pointers that are obtained from
661 * walking PTR_TRUSTED pointers are _not_ trusted. For example, if a
662 * struct task_struct *task is PTR_TRUSTED, then accessing
663 * task->last_wakee will lose the PTR_TRUSTED modifier when it's stored
664 * in a BPF register. Similarly, pointers passed to certain programs
665 * types such as kretprobes are not guaranteed to be valid, as they may
666 * for example contain an object that was recently freed.
668 PTR_TRUSTED = BIT(12 + BPF_BASE_TYPE_BITS),
670 /* MEM is tagged with rcu and memory access needs rcu_read_lock protection. */
671 MEM_RCU = BIT(13 + BPF_BASE_TYPE_BITS),
673 /* Used to tag PTR_TO_BTF_ID | MEM_ALLOC references which are non-owning.
674 * Currently only valid for linked-list and rbtree nodes. If the nodes
675 * have a bpf_refcount_field, they must be tagged MEM_RCU as well.
677 NON_OWN_REF = BIT(14 + BPF_BASE_TYPE_BITS),
679 /* DYNPTR points to sk_buff */
680 DYNPTR_TYPE_SKB = BIT(15 + BPF_BASE_TYPE_BITS),
682 /* DYNPTR points to xdp_buff */
683 DYNPTR_TYPE_XDP = BIT(16 + BPF_BASE_TYPE_BITS),
686 __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1,
689 #define DYNPTR_TYPE_FLAG_MASK (DYNPTR_TYPE_LOCAL | DYNPTR_TYPE_RINGBUF | DYNPTR_TYPE_SKB \
692 /* Max number of base types. */
693 #define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS)
695 /* Max number of all types. */
696 #define BPF_TYPE_LIMIT (__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1))
698 /* function argument constraints */
700 ARG_DONTCARE = 0, /* unused argument in helper function */
702 /* the following constraints used to prototype
703 * bpf_map_lookup/update/delete_elem() functions
705 ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */
706 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
707 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
709 /* Used to prototype bpf_memcmp() and other functions that access data
710 * on eBPF program stack
712 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */
714 ARG_CONST_SIZE, /* number of bytes accessed from memory */
715 ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */
717 ARG_PTR_TO_CTX, /* pointer to context */
718 ARG_ANYTHING, /* any (initialized) argument is ok */
719 ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */
720 ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
721 ARG_PTR_TO_INT, /* pointer to int */
722 ARG_PTR_TO_LONG, /* pointer to long */
723 ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */
724 ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */
725 ARG_PTR_TO_RINGBUF_MEM, /* pointer to dynamically reserved ringbuf memory */
726 ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */
727 ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */
728 ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */
729 ARG_PTR_TO_FUNC, /* pointer to a bpf program function */
730 ARG_PTR_TO_STACK, /* pointer to stack */
731 ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */
732 ARG_PTR_TO_TIMER, /* pointer to bpf_timer */
733 ARG_PTR_TO_KPTR, /* pointer to referenced kptr */
734 ARG_PTR_TO_DYNPTR, /* pointer to bpf_dynptr. See bpf_type_flag for dynptr type */
737 /* Extended arg_types. */
738 ARG_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MAP_VALUE,
739 ARG_PTR_TO_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MEM,
740 ARG_PTR_TO_CTX_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_CTX,
741 ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET,
742 ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK,
743 ARG_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID,
744 /* pointer to memory does not need to be initialized, helper function must fill
745 * all bytes or clear them in error case.
747 ARG_PTR_TO_UNINIT_MEM = MEM_UNINIT | ARG_PTR_TO_MEM,
748 /* Pointer to valid memory of size known at compile time. */
749 ARG_PTR_TO_FIXED_SIZE_MEM = MEM_FIXED_SIZE | ARG_PTR_TO_MEM,
751 /* This must be the last entry. Its purpose is to ensure the enum is
752 * wide enough to hold the higher bits reserved for bpf_type_flag.
754 __BPF_ARG_TYPE_LIMIT = BPF_TYPE_LIMIT,
756 static_assert(__BPF_ARG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
758 /* type of values returned from helper functions */
759 enum bpf_return_type {
760 RET_INTEGER, /* function returns integer */
761 RET_VOID, /* function doesn't return anything */
762 RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
763 RET_PTR_TO_SOCKET, /* returns a pointer to a socket */
764 RET_PTR_TO_TCP_SOCK, /* returns a pointer to a tcp_sock */
765 RET_PTR_TO_SOCK_COMMON, /* returns a pointer to a sock_common */
766 RET_PTR_TO_MEM, /* returns a pointer to memory */
767 RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */
768 RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */
771 /* Extended ret_types. */
772 RET_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE,
773 RET_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCKET,
774 RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK,
775 RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON,
776 RET_PTR_TO_RINGBUF_MEM_OR_NULL = PTR_MAYBE_NULL | MEM_RINGBUF | RET_PTR_TO_MEM,
777 RET_PTR_TO_DYNPTR_MEM_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MEM,
778 RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID,
779 RET_PTR_TO_BTF_ID_TRUSTED = PTR_TRUSTED | RET_PTR_TO_BTF_ID,
781 /* This must be the last entry. Its purpose is to ensure the enum is
782 * wide enough to hold the higher bits reserved for bpf_type_flag.
784 __BPF_RET_TYPE_LIMIT = BPF_TYPE_LIMIT,
786 static_assert(__BPF_RET_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
788 /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
789 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
790 * instructions after verifying
792 struct bpf_func_proto {
793 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
797 enum bpf_return_type ret_type;
800 enum bpf_arg_type arg1_type;
801 enum bpf_arg_type arg2_type;
802 enum bpf_arg_type arg3_type;
803 enum bpf_arg_type arg4_type;
804 enum bpf_arg_type arg5_type;
806 enum bpf_arg_type arg_type[5];
826 int *ret_btf_id; /* return value btf_id */
827 bool (*allowed)(const struct bpf_prog *prog);
830 /* bpf_context is intentionally undefined structure. Pointer to bpf_context is
831 * the first argument to eBPF programs.
832 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
836 enum bpf_access_type {
841 /* types of values stored in eBPF registers */
842 /* Pointer types represent:
845 * pointer + (u16) var
846 * pointer + (u16) var + imm
847 * if (range > 0) then [ptr, ptr + range - off) is safe to access
848 * if (id > 0) means that some 'var' was added
849 * if (off > 0) means that 'imm' was added
852 NOT_INIT = 0, /* nothing was written into register */
853 SCALAR_VALUE, /* reg doesn't contain a valid pointer */
854 PTR_TO_CTX, /* reg points to bpf_context */
855 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
856 PTR_TO_MAP_VALUE, /* reg points to map element value */
857 PTR_TO_MAP_KEY, /* reg points to a map element key */
858 PTR_TO_STACK, /* reg == frame_pointer + offset */
859 PTR_TO_PACKET_META, /* skb->data - meta_len */
860 PTR_TO_PACKET, /* reg points to skb->data */
861 PTR_TO_PACKET_END, /* skb->data + headlen */
862 PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */
863 PTR_TO_SOCKET, /* reg points to struct bpf_sock */
864 PTR_TO_SOCK_COMMON, /* reg points to sock_common */
865 PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */
866 PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */
867 PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */
868 /* PTR_TO_BTF_ID points to a kernel struct that does not need
869 * to be null checked by the BPF program. This does not imply the
870 * pointer is _not_ null and in practice this can easily be a null
871 * pointer when reading pointer chains. The assumption is program
872 * context will handle null pointer dereference typically via fault
873 * handling. The verifier must keep this in mind and can make no
874 * assumptions about null or non-null when doing branch analysis.
875 * Further, when passed into helpers the helpers can not, without
876 * additional context, assume the value is non-null.
879 /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not
880 * been checked for null. Used primarily to inform the verifier
881 * an explicit null check is required for this struct.
883 PTR_TO_MEM, /* reg points to valid memory region */
884 PTR_TO_BUF, /* reg points to a read/write buffer */
885 PTR_TO_FUNC, /* reg points to a bpf program function */
886 CONST_PTR_TO_DYNPTR, /* reg points to a const struct bpf_dynptr */
889 /* Extended reg_types. */
890 PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MAP_VALUE,
891 PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET,
892 PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON,
893 PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK,
894 PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID,
896 /* This must be the last entry. Its purpose is to ensure the enum is
897 * wide enough to hold the higher bits reserved for bpf_type_flag.
899 __BPF_REG_TYPE_LIMIT = BPF_TYPE_LIMIT,
901 static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
903 /* The information passed from prog-specific *_is_valid_access
904 * back to the verifier.
906 struct bpf_insn_access_aux {
907 enum bpf_reg_type reg_type;
915 struct bpf_verifier_log *log; /* for verbose logs */
919 bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
921 aux->ctx_field_size = size;
924 static bool bpf_is_ldimm64(const struct bpf_insn *insn)
926 return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
929 static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
931 return bpf_is_ldimm64(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
934 struct bpf_prog_ops {
935 int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
936 union bpf_attr __user *uattr);
939 struct bpf_reg_state;
940 struct bpf_verifier_ops {
941 /* return eBPF function prototype for verification */
942 const struct bpf_func_proto *
943 (*get_func_proto)(enum bpf_func_id func_id,
944 const struct bpf_prog *prog);
946 /* return true if 'size' wide access at offset 'off' within bpf_context
947 * with 'type' (read or write) is allowed
949 bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
950 const struct bpf_prog *prog,
951 struct bpf_insn_access_aux *info);
952 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
953 const struct bpf_prog *prog);
954 int (*gen_ld_abs)(const struct bpf_insn *orig,
955 struct bpf_insn *insn_buf);
956 u32 (*convert_ctx_access)(enum bpf_access_type type,
957 const struct bpf_insn *src,
958 struct bpf_insn *dst,
959 struct bpf_prog *prog, u32 *target_size);
960 int (*btf_struct_access)(struct bpf_verifier_log *log,
961 const struct bpf_reg_state *reg,
965 struct bpf_prog_offload_ops {
966 /* verifier basic callbacks */
967 int (*insn_hook)(struct bpf_verifier_env *env,
968 int insn_idx, int prev_insn_idx);
969 int (*finalize)(struct bpf_verifier_env *env);
970 /* verifier optimization callbacks (called after .finalize) */
971 int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
972 struct bpf_insn *insn);
973 int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);
974 /* program management callbacks */
975 int (*prepare)(struct bpf_prog *prog);
976 int (*translate)(struct bpf_prog *prog);
977 void (*destroy)(struct bpf_prog *prog);
980 struct bpf_prog_offload {
981 struct bpf_prog *prog;
982 struct net_device *netdev;
983 struct bpf_offload_dev *offdev;
985 struct list_head offloads;
992 enum bpf_cgroup_storage_type {
993 BPF_CGROUP_STORAGE_SHARED,
994 BPF_CGROUP_STORAGE_PERCPU,
995 __BPF_CGROUP_STORAGE_MAX
998 #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
1000 /* The longest tracepoint has 12 args.
1001 * See include/trace/bpf_probe.h
1003 #define MAX_BPF_FUNC_ARGS 12
1005 /* The maximum number of arguments passed through registers
1006 * a single function may have.
1008 #define MAX_BPF_FUNC_REG_ARGS 5
1010 /* The argument is a structure. */
1011 #define BTF_FMODEL_STRUCT_ARG BIT(0)
1013 /* The argument is signed. */
1014 #define BTF_FMODEL_SIGNED_ARG BIT(1)
1016 struct btf_func_model {
1020 u8 arg_size[MAX_BPF_FUNC_ARGS];
1021 u8 arg_flags[MAX_BPF_FUNC_ARGS];
1024 /* Restore arguments before returning from trampoline to let original function
1025 * continue executing. This flag is used for fentry progs when there are no
1028 #define BPF_TRAMP_F_RESTORE_REGS BIT(0)
1029 /* Call original function after fentry progs, but before fexit progs.
1030 * Makes sense for fentry/fexit, normal calls and indirect calls.
1032 #define BPF_TRAMP_F_CALL_ORIG BIT(1)
1033 /* Skip current frame and return to parent. Makes sense for fentry/fexit
1034 * programs only. Should not be used with normal calls and indirect calls.
1036 #define BPF_TRAMP_F_SKIP_FRAME BIT(2)
1037 /* Store IP address of the caller on the trampoline stack,
1038 * so it's available for trampoline's programs.
1040 #define BPF_TRAMP_F_IP_ARG BIT(3)
1041 /* Return the return value of fentry prog. Only used by bpf_struct_ops. */
1042 #define BPF_TRAMP_F_RET_FENTRY_RET BIT(4)
1044 /* Get original function from stack instead of from provided direct address.
1045 * Makes sense for trampolines with fexit or fmod_ret programs.
1047 #define BPF_TRAMP_F_ORIG_STACK BIT(5)
1049 /* This trampoline is on a function with another ftrace_ops with IPMODIFY,
1050 * e.g., a live patch. This flag is set and cleared by ftrace call backs,
1052 #define BPF_TRAMP_F_SHARE_IPMODIFY BIT(6)
1054 /* Indicate that current trampoline is in a tail call context. Then, it has to
1055 * cache and restore tail_call_cnt to avoid infinite tail call loop.
1057 #define BPF_TRAMP_F_TAIL_CALL_CTX BIT(7)
1060 * Indicate the trampoline should be suitable to receive indirect calls;
1061 * without this indirectly calling the generated code can result in #UD/#CP,
1062 * depending on the CFI options.
1064 * Used by bpf_struct_ops.
1066 * Incompatible with FENTRY usage, overloads @func_addr argument.
1068 #define BPF_TRAMP_F_INDIRECT BIT(8)
1070 /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
1074 #if defined(__s390x__)
1075 BPF_MAX_TRAMP_LINKS = 27,
1077 BPF_MAX_TRAMP_LINKS = 38,
1081 struct bpf_tramp_links {
1082 struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS];
1086 struct bpf_tramp_run_ctx;
1088 /* Different use cases for BPF trampoline:
1089 * 1. replace nop at the function entry (kprobe equivalent)
1090 * flags = BPF_TRAMP_F_RESTORE_REGS
1091 * fentry = a set of programs to run before returning from trampoline
1093 * 2. replace nop at the function entry (kprobe + kretprobe equivalent)
1094 * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME
1095 * orig_call = fentry_ip + MCOUNT_INSN_SIZE
1096 * fentry = a set of program to run before calling original function
1097 * fexit = a set of program to run after original function
1099 * 3. replace direct call instruction anywhere in the function body
1100 * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid)
1102 * fentry = a set of programs to run before returning from trampoline
1103 * With flags = BPF_TRAMP_F_CALL_ORIG
1104 * orig_call = original callback addr or direct function addr
1105 * fentry = a set of program to run before calling original function
1106 * fexit = a set of program to run after original function
1108 struct bpf_tramp_image;
1109 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
1110 const struct btf_func_model *m, u32 flags,
1111 struct bpf_tramp_links *tlinks,
1113 void *arch_alloc_bpf_trampoline(unsigned int size);
1114 void arch_free_bpf_trampoline(void *image, unsigned int size);
1115 void arch_protect_bpf_trampoline(void *image, unsigned int size);
1116 void arch_unprotect_bpf_trampoline(void *image, unsigned int size);
1117 int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
1118 struct bpf_tramp_links *tlinks, void *func_addr);
1120 u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
1121 struct bpf_tramp_run_ctx *run_ctx);
1122 void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
1123 struct bpf_tramp_run_ctx *run_ctx);
1124 void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
1125 void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
1126 typedef u64 (*bpf_trampoline_enter_t)(struct bpf_prog *prog,
1127 struct bpf_tramp_run_ctx *run_ctx);
1128 typedef void (*bpf_trampoline_exit_t)(struct bpf_prog *prog, u64 start,
1129 struct bpf_tramp_run_ctx *run_ctx);
1130 bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog);
1131 bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog);
1134 unsigned long start;
1136 char name[KSYM_NAME_LEN];
1137 struct list_head lnode;
1138 struct latch_tree_node tnode;
1142 enum bpf_tramp_prog_type {
1145 BPF_TRAMP_MODIFY_RETURN,
1147 BPF_TRAMP_REPLACE, /* more than MAX */
1150 struct bpf_tramp_image {
1153 struct bpf_ksym ksym;
1154 struct percpu_ref pcref;
1155 void *ip_after_call;
1158 struct rcu_head rcu;
1159 struct work_struct work;
1163 struct bpf_trampoline {
1164 /* hlist for trampoline_table */
1165 struct hlist_node hlist;
1166 struct ftrace_ops *fops;
1167 /* serializes access to fields of this trampoline */
1173 struct btf_func_model model;
1175 bool ftrace_managed;
1177 /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF
1178 * program by replacing one of its functions. func.addr is the address
1179 * of the function it replaced.
1181 struct bpf_prog *extension_prog;
1182 /* list of BPF programs using this trampoline */
1183 struct hlist_head progs_hlist[BPF_TRAMP_MAX];
1184 /* Number of attached programs. A counter per kind. */
1185 int progs_cnt[BPF_TRAMP_MAX];
1186 /* Executable image of trampoline */
1187 struct bpf_tramp_image *cur_image;
1191 struct bpf_attach_target_info {
1192 struct btf_func_model fmodel;
1194 struct module *tgt_mod;
1195 const char *tgt_name;
1196 const struct btf_type *tgt_type;
1199 #define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
1201 struct bpf_dispatcher_prog {
1202 struct bpf_prog *prog;
1206 struct bpf_dispatcher {
1207 /* dispatcher mutex */
1210 struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
1215 struct bpf_ksym ksym;
1216 #ifdef CONFIG_HAVE_STATIC_CALL
1217 struct static_call_key *sc_key;
1223 #define __bpfcall __nocfi
1226 static __always_inline __bpfcall unsigned int bpf_dispatcher_nop_func(
1228 const struct bpf_insn *insnsi,
1229 bpf_func_t bpf_func)
1231 return bpf_func(ctx, insnsi);
1234 /* the implementation of the opaque uapi struct bpf_dynptr */
1235 struct bpf_dynptr_kern {
1237 /* Size represents the number of usable bytes of dynptr data.
1238 * If for example the offset is at 4 for a local dynptr whose data is
1239 * of type u64, the number of usable bytes is 4.
1241 * The upper 8 bits are reserved. It is as follows:
1242 * Bits 0 - 23 = size
1243 * Bits 24 - 30 = dynptr type
1244 * Bit 31 = whether dynptr is read-only
1250 enum bpf_dynptr_type {
1251 BPF_DYNPTR_TYPE_INVALID,
1252 /* Points to memory that is local to the bpf program */
1253 BPF_DYNPTR_TYPE_LOCAL,
1254 /* Underlying data is a ringbuf record */
1255 BPF_DYNPTR_TYPE_RINGBUF,
1256 /* Underlying data is a sk_buff */
1257 BPF_DYNPTR_TYPE_SKB,
1258 /* Underlying data is a xdp_buff */
1259 BPF_DYNPTR_TYPE_XDP,
1262 int bpf_dynptr_check_size(u32 size);
1263 u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr);
1264 const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len);
1265 void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len);
1267 #ifdef CONFIG_BPF_JIT
1268 int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
1269 int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
1270 struct bpf_trampoline *bpf_trampoline_get(u64 key,
1271 struct bpf_attach_target_info *tgt_info);
1272 void bpf_trampoline_put(struct bpf_trampoline *tr);
1273 int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs);
1276 * When the architecture supports STATIC_CALL replace the bpf_dispatcher_fn
1277 * indirection with a direct call to the bpf program. If the architecture does
1278 * not have STATIC_CALL, avoid a double-indirection.
1280 #ifdef CONFIG_HAVE_STATIC_CALL
1282 #define __BPF_DISPATCHER_SC_INIT(_name) \
1283 .sc_key = &STATIC_CALL_KEY(_name), \
1284 .sc_tramp = STATIC_CALL_TRAMP_ADDR(_name),
1286 #define __BPF_DISPATCHER_SC(name) \
1287 DEFINE_STATIC_CALL(bpf_dispatcher_##name##_call, bpf_dispatcher_nop_func)
1289 #define __BPF_DISPATCHER_CALL(name) \
1290 static_call(bpf_dispatcher_##name##_call)(ctx, insnsi, bpf_func)
1292 #define __BPF_DISPATCHER_UPDATE(_d, _new) \
1293 __static_call_update((_d)->sc_key, (_d)->sc_tramp, (_new))
1296 #define __BPF_DISPATCHER_SC_INIT(name)
1297 #define __BPF_DISPATCHER_SC(name)
1298 #define __BPF_DISPATCHER_CALL(name) bpf_func(ctx, insnsi)
1299 #define __BPF_DISPATCHER_UPDATE(_d, _new)
1302 #define BPF_DISPATCHER_INIT(_name) { \
1303 .mutex = __MUTEX_INITIALIZER(_name.mutex), \
1304 .func = &_name##_func, \
1311 .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \
1313 __BPF_DISPATCHER_SC_INIT(_name##_call) \
1316 #define DEFINE_BPF_DISPATCHER(name) \
1317 __BPF_DISPATCHER_SC(name); \
1318 noinline __bpfcall unsigned int bpf_dispatcher_##name##_func( \
1320 const struct bpf_insn *insnsi, \
1321 bpf_func_t bpf_func) \
1323 return __BPF_DISPATCHER_CALL(name); \
1325 EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \
1326 struct bpf_dispatcher bpf_dispatcher_##name = \
1327 BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
1329 #define DECLARE_BPF_DISPATCHER(name) \
1330 unsigned int bpf_dispatcher_##name##_func( \
1332 const struct bpf_insn *insnsi, \
1333 bpf_func_t bpf_func); \
1334 extern struct bpf_dispatcher bpf_dispatcher_##name;
1336 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
1337 #define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
1338 void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
1339 struct bpf_prog *to);
1340 /* Called only from JIT-enabled code, so there's no need for stubs. */
1341 void bpf_image_ksym_add(void *data, unsigned int size, struct bpf_ksym *ksym);
1342 void bpf_image_ksym_del(struct bpf_ksym *ksym);
1343 void bpf_ksym_add(struct bpf_ksym *ksym);
1344 void bpf_ksym_del(struct bpf_ksym *ksym);
1345 int bpf_jit_charge_modmem(u32 size);
1346 void bpf_jit_uncharge_modmem(u32 size);
1347 bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
1349 static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
1350 struct bpf_trampoline *tr)
1354 static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
1355 struct bpf_trampoline *tr)
1359 static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
1360 struct bpf_attach_target_info *tgt_info)
1364 static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
1365 #define DEFINE_BPF_DISPATCHER(name)
1366 #define DECLARE_BPF_DISPATCHER(name)
1367 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func
1368 #define BPF_DISPATCHER_PTR(name) NULL
1369 static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
1370 struct bpf_prog *from,
1371 struct bpf_prog *to) {}
1372 static inline bool is_bpf_image_address(unsigned long address)
1376 static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
1382 struct bpf_func_info_aux {
1389 enum bpf_jit_poke_reason {
1390 BPF_POKE_REASON_TAIL_CALL,
1393 /* Descriptor of pokes pointing /into/ the JITed image. */
1394 struct bpf_jit_poke_descriptor {
1395 void *tailcall_target;
1396 void *tailcall_bypass;
1401 struct bpf_map *map;
1405 bool tailcall_target_stable;
1411 /* reg_type info for ctx arguments */
1412 struct bpf_ctx_arg_aux {
1414 enum bpf_reg_type reg_type;
1418 struct btf_mod_pair {
1420 struct module *module;
1423 struct bpf_kfunc_desc_tab;
1425 struct bpf_prog_aux {
1434 u32 func_cnt; /* used by non-func prog as the number of func progs */
1435 u32 real_func_cnt; /* includes hidden progs, only used for JIT and freeing progs */
1436 u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
1437 u32 attach_btf_id; /* in-kernel BTF type id to attach to */
1438 u32 ctx_arg_info_size;
1439 u32 max_rdonly_access;
1440 u32 max_rdwr_access;
1441 struct btf *attach_btf;
1442 const struct bpf_ctx_arg_aux *ctx_arg_info;
1443 struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */
1444 struct bpf_prog *dst_prog;
1445 struct bpf_trampoline *dst_trampoline;
1446 enum bpf_prog_type saved_dst_prog_type;
1447 enum bpf_attach_type saved_dst_attach_type;
1448 bool verifier_zext; /* Zero extensions has been inserted by verifier. */
1449 bool dev_bound; /* Program is bound to the netdev. */
1450 bool offload_requested; /* Program is bound and offloaded to the netdev. */
1451 bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
1452 bool attach_tracing_prog; /* true if tracing another tracing program */
1453 bool func_proto_unreliable;
1455 bool tail_call_reachable;
1458 bool exception_boundary;
1459 /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
1460 const struct btf_type *attach_func_proto;
1461 /* function name for valid attach_btf_id */
1462 const char *attach_func_name;
1463 struct bpf_prog **func;
1464 void *jit_data; /* JIT specific data. arch dependent */
1465 struct bpf_jit_poke_descriptor *poke_tab;
1466 struct bpf_kfunc_desc_tab *kfunc_tab;
1467 struct bpf_kfunc_btf_tab *kfunc_btf_tab;
1469 #ifdef CONFIG_FINEIBT
1470 struct bpf_ksym ksym_prefix;
1472 struct bpf_ksym ksym;
1473 const struct bpf_prog_ops *ops;
1474 struct bpf_map **used_maps;
1475 struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */
1476 struct btf_mod_pair *used_btfs;
1477 struct bpf_prog *prog;
1478 struct user_struct *user;
1479 u64 load_time; /* ns since boottime */
1481 int cgroup_atype; /* enum cgroup_bpf_attach_type */
1482 struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
1483 char name[BPF_OBJ_NAME_LEN];
1484 u64 (*bpf_exception_cb)(u64 cookie, u64 sp, u64 bp, u64, u64);
1485 #ifdef CONFIG_SECURITY
1488 struct bpf_prog_offload *offload;
1490 struct bpf_func_info *func_info;
1491 struct bpf_func_info_aux *func_info_aux;
1492 /* bpf_line_info loaded from userspace. linfo->insn_off
1493 * has the xlated insn offset.
1494 * Both the main and sub prog share the same linfo.
1495 * The subprog can access its first linfo by
1496 * using the linfo_idx.
1498 struct bpf_line_info *linfo;
1499 /* jited_linfo is the jited addr of the linfo. It has a
1500 * one to one mapping to linfo:
1501 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
1502 * Both the main and sub prog share the same jited_linfo.
1503 * The subprog can access its first jited_linfo by
1504 * using the linfo_idx.
1509 /* subprog can use linfo_idx to access its first linfo and
1511 * main prog always has linfo_idx == 0
1516 struct exception_table_entry *extable;
1518 struct work_struct work;
1519 struct rcu_head rcu;
1524 u16 pages; /* Number of allocated pages */
1525 u16 jited:1, /* Is our filter JIT'ed? */
1526 jit_requested:1,/* archs need to JIT the prog */
1527 gpl_compatible:1, /* Is filter GPL compatible? */
1528 cb_access:1, /* Is control block accessed? */
1529 dst_needed:1, /* Do we need dst entry? */
1530 blinding_requested:1, /* needs constant blinding */
1531 blinded:1, /* Was blinded */
1532 is_func:1, /* program is a bpf function */
1533 kprobe_override:1, /* Do we override a kprobe? */
1534 has_callchain_buf:1, /* callchain buffer allocated? */
1535 enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
1536 call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */
1537 call_get_func_ip:1, /* Do we call get_func_ip() */
1538 tstamp_type_access:1; /* Accessed __sk_buff->tstamp_type */
1539 enum bpf_prog_type type; /* Type of BPF program */
1540 enum bpf_attach_type expected_attach_type; /* For some prog types */
1541 u32 len; /* Number of filter blocks */
1542 u32 jited_len; /* Size of jited insns in bytes */
1543 u8 tag[BPF_TAG_SIZE];
1544 struct bpf_prog_stats __percpu *stats;
1545 int __percpu *active;
1546 unsigned int (*bpf_func)(const void *ctx,
1547 const struct bpf_insn *insn);
1548 struct bpf_prog_aux *aux; /* Auxiliary fields */
1549 struct sock_fprog_kern *orig_prog; /* Original BPF program */
1550 /* Instructions for interpreter */
1552 DECLARE_FLEX_ARRAY(struct sock_filter, insns);
1553 DECLARE_FLEX_ARRAY(struct bpf_insn, insnsi);
1557 struct bpf_array_aux {
1558 /* Programs with direct jumps into programs part of this array. */
1559 struct list_head poke_progs;
1560 struct bpf_map *map;
1561 struct mutex poke_mutex;
1562 struct work_struct work;
1568 enum bpf_link_type type;
1569 const struct bpf_link_ops *ops;
1570 struct bpf_prog *prog;
1571 struct work_struct work;
1574 struct bpf_link_ops {
1575 void (*release)(struct bpf_link *link);
1576 void (*dealloc)(struct bpf_link *link);
1577 int (*detach)(struct bpf_link *link);
1578 int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
1579 struct bpf_prog *old_prog);
1580 void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq);
1581 int (*fill_link_info)(const struct bpf_link *link,
1582 struct bpf_link_info *info);
1583 int (*update_map)(struct bpf_link *link, struct bpf_map *new_map,
1584 struct bpf_map *old_map);
1587 struct bpf_tramp_link {
1588 struct bpf_link link;
1589 struct hlist_node tramp_hlist;
1593 struct bpf_shim_tramp_link {
1594 struct bpf_tramp_link link;
1595 struct bpf_trampoline *trampoline;
1598 struct bpf_tracing_link {
1599 struct bpf_tramp_link link;
1600 enum bpf_attach_type attach_type;
1601 struct bpf_trampoline *trampoline;
1602 struct bpf_prog *tgt_prog;
1605 struct bpf_link_primer {
1606 struct bpf_link *link;
1612 struct bpf_struct_ops_value;
1615 #define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
1617 * struct bpf_struct_ops - A structure of callbacks allowing a subsystem to
1618 * define a BPF_MAP_TYPE_STRUCT_OPS map type composed
1619 * of BPF_PROG_TYPE_STRUCT_OPS progs.
1620 * @verifier_ops: A structure of callbacks that are invoked by the verifier
1621 * when determining whether the struct_ops progs in the
1622 * struct_ops map are valid.
1623 * @init: A callback that is invoked a single time, and before any other
1624 * callback, to initialize the structure. A nonzero return value means
1625 * the subsystem could not be initialized.
1626 * @check_member: When defined, a callback invoked by the verifier to allow
1627 * the subsystem to determine if an entry in the struct_ops map
1628 * is valid. A nonzero return value means that the map is
1629 * invalid and should be rejected by the verifier.
1630 * @init_member: A callback that is invoked for each member of the struct_ops
1631 * map to allow the subsystem to initialize the member. A nonzero
1632 * value means the member could not be initialized. This callback
1633 * is exclusive with the @type, @type_id, @value_type, and
1635 * @reg: A callback that is invoked when the struct_ops map has been
1636 * initialized and is being attached to. Zero means the struct_ops map
1637 * has been successfully registered and is live. A nonzero return value
1638 * means the struct_ops map could not be registered.
1639 * @unreg: A callback that is invoked when the struct_ops map should be
1641 * @update: A callback that is invoked when the live struct_ops map is being
1642 * updated to contain new values. This callback is only invoked when
1643 * the struct_ops map is loaded with BPF_F_LINK. If not defined, the
1644 * it is assumed that the struct_ops map cannot be updated.
1645 * @validate: A callback that is invoked after all of the members have been
1646 * initialized. This callback should perform static checks on the
1647 * map, meaning that it should either fail or succeed
1648 * deterministically. A struct_ops map that has been validated may
1649 * not necessarily succeed in being registered if the call to @reg
1650 * fails. For example, a valid struct_ops map may be loaded, but
1651 * then fail to be registered due to there being another active
1652 * struct_ops map on the system in the subsystem already. For this
1653 * reason, if this callback is not defined, the check is skipped as
1654 * the struct_ops map will have final verification performed in
1657 * @value_type: Value type.
1658 * @name: The name of the struct bpf_struct_ops object.
1659 * @func_models: Func models
1660 * @type_id: BTF type id.
1661 * @value_id: BTF value id.
1663 struct bpf_struct_ops {
1664 const struct bpf_verifier_ops *verifier_ops;
1665 int (*init)(struct btf *btf);
1666 int (*check_member)(const struct btf_type *t,
1667 const struct btf_member *member,
1668 const struct bpf_prog *prog);
1669 int (*init_member)(const struct btf_type *t,
1670 const struct btf_member *member,
1671 void *kdata, const void *udata);
1672 int (*reg)(void *kdata);
1673 void (*unreg)(void *kdata);
1674 int (*update)(void *kdata, void *old_kdata);
1675 int (*validate)(void *kdata);
1676 const struct btf_type *type;
1677 const struct btf_type *value_type;
1679 struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
1685 #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
1686 #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
1687 const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id);
1688 void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log);
1689 bool bpf_struct_ops_get(const void *kdata);
1690 void bpf_struct_ops_put(const void *kdata);
1691 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
1693 int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
1694 struct bpf_tramp_link *link,
1695 const struct btf_func_model *model,
1697 void *image, void *image_end);
1698 static inline bool bpf_try_module_get(const void *data, struct module *owner)
1700 if (owner == BPF_MODULE_OWNER)
1701 return bpf_struct_ops_get(data);
1703 return try_module_get(owner);
1705 static inline void bpf_module_put(const void *data, struct module *owner)
1707 if (owner == BPF_MODULE_OWNER)
1708 bpf_struct_ops_put(data);
1712 int bpf_struct_ops_link_create(union bpf_attr *attr);
1715 /* Define it here to avoid the use of forward declaration */
1716 struct bpf_dummy_ops_state {
1720 struct bpf_dummy_ops {
1721 int (*test_1)(struct bpf_dummy_ops_state *cb);
1722 int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
1723 char a3, unsigned long a4);
1724 int (*test_sleepable)(struct bpf_dummy_ops_state *cb);
1727 int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
1728 union bpf_attr __user *uattr);
1731 static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
1735 static inline void bpf_struct_ops_init(struct btf *btf,
1736 struct bpf_verifier_log *log)
1739 static inline bool bpf_try_module_get(const void *data, struct module *owner)
1741 return try_module_get(owner);
1743 static inline void bpf_module_put(const void *data, struct module *owner)
1747 static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
1753 static inline int bpf_struct_ops_link_create(union bpf_attr *attr)
1760 #if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM)
1761 int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
1763 void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog);
1765 static inline int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
1770 static inline void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog)
1779 struct bpf_array_aux *aux;
1781 DECLARE_FLEX_ARRAY(char, value) __aligned(8);
1782 DECLARE_FLEX_ARRAY(void *, ptrs) __aligned(8);
1783 DECLARE_FLEX_ARRAY(void __percpu *, pptrs) __aligned(8);
1787 #define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */
1788 #define MAX_TAIL_CALL_CNT 33
1790 /* Maximum number of loops for bpf_loop and bpf_iter_num.
1791 * It's enum to expose it (and thus make it discoverable) through BTF.
1794 BPF_MAX_LOOPS = 8 * 1024 * 1024,
1797 #define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \
1798 BPF_F_RDONLY_PROG | \
1802 #define BPF_MAP_CAN_READ BIT(0)
1803 #define BPF_MAP_CAN_WRITE BIT(1)
1805 /* Maximum number of user-producer ring buffer samples that can be drained in
1806 * a call to bpf_user_ringbuf_drain().
1808 #define BPF_MAX_USER_RINGBUF_SAMPLES (128 * 1024)
1810 static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
1812 u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
1814 /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is
1817 if (access_flags & BPF_F_RDONLY_PROG)
1818 return BPF_MAP_CAN_READ;
1819 else if (access_flags & BPF_F_WRONLY_PROG)
1820 return BPF_MAP_CAN_WRITE;
1822 return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE;
1825 static inline bool bpf_map_flags_access_ok(u32 access_flags)
1827 return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) !=
1828 (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
1831 struct bpf_event_entry {
1832 struct perf_event *event;
1833 struct file *perf_file;
1834 struct file *map_file;
1835 struct rcu_head rcu;
1838 static inline bool map_type_contains_progs(struct bpf_map *map)
1840 return map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
1841 map->map_type == BPF_MAP_TYPE_DEVMAP ||
1842 map->map_type == BPF_MAP_TYPE_CPUMAP;
1845 bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp);
1846 int bpf_prog_calc_tag(struct bpf_prog *fp);
1848 const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
1849 const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);
1851 typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
1852 unsigned long off, unsigned long len);
1853 typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
1854 const struct bpf_insn *src,
1855 struct bpf_insn *dst,
1856 struct bpf_prog *prog,
1859 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
1860 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
1862 /* an array of programs to be executed under rcu_lock.
1865 * ret = bpf_prog_run_array(rcu_dereference(&bpf_prog_array), ctx, bpf_prog_run);
1867 * the structure returned by bpf_prog_array_alloc() should be populated
1868 * with program pointers and the last pointer must be NULL.
1869 * The user has to keep refcnt on the program and make sure the program
1870 * is removed from the array before bpf_prog_put().
1871 * The 'struct bpf_prog_array *' should only be replaced with xchg()
1872 * since other cpus are walking the array of pointers in parallel.
1874 struct bpf_prog_array_item {
1875 struct bpf_prog *prog;
1877 struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
1882 struct bpf_prog_array {
1883 struct rcu_head rcu;
1884 struct bpf_prog_array_item items[];
1887 struct bpf_empty_prog_array {
1888 struct bpf_prog_array hdr;
1889 struct bpf_prog *null_prog;
1892 /* to avoid allocating empty bpf_prog_array for cgroups that
1893 * don't have bpf program attached use one global 'bpf_empty_prog_array'
1894 * It will not be modified the caller of bpf_prog_array_alloc()
1895 * (since caller requested prog_cnt == 0)
1896 * that pointer should be 'freed' by bpf_prog_array_free()
1898 extern struct bpf_empty_prog_array bpf_empty_prog_array;
1900 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
1901 void bpf_prog_array_free(struct bpf_prog_array *progs);
1902 /* Use when traversal over the bpf_prog_array uses tasks_trace rcu */
1903 void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs);
1904 int bpf_prog_array_length(struct bpf_prog_array *progs);
1905 bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
1906 int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
1907 __u32 __user *prog_ids, u32 cnt);
1909 void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
1910 struct bpf_prog *old_prog);
1911 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index);
1912 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
1913 struct bpf_prog *prog);
1914 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
1915 u32 *prog_ids, u32 request_cnt,
1917 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
1918 struct bpf_prog *exclude_prog,
1919 struct bpf_prog *include_prog,
1921 struct bpf_prog_array **new_array);
1923 struct bpf_run_ctx {};
1925 struct bpf_cg_run_ctx {
1926 struct bpf_run_ctx run_ctx;
1927 const struct bpf_prog_array_item *prog_item;
1931 struct bpf_trace_run_ctx {
1932 struct bpf_run_ctx run_ctx;
1937 struct bpf_tramp_run_ctx {
1938 struct bpf_run_ctx run_ctx;
1940 struct bpf_run_ctx *saved_run_ctx;
1943 static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx)
1945 struct bpf_run_ctx *old_ctx = NULL;
1947 #ifdef CONFIG_BPF_SYSCALL
1948 old_ctx = current->bpf_ctx;
1949 current->bpf_ctx = new_ctx;
1954 static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx)
1956 #ifdef CONFIG_BPF_SYSCALL
1957 current->bpf_ctx = old_ctx;
1961 /* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */
1962 #define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0)
1963 /* BPF program asks to set CN on the packet. */
1964 #define BPF_RET_SET_CN (1 << 0)
1966 typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);
1968 static __always_inline u32
1969 bpf_prog_run_array(const struct bpf_prog_array *array,
1970 const void *ctx, bpf_prog_run_fn run_prog)
1972 const struct bpf_prog_array_item *item;
1973 const struct bpf_prog *prog;
1974 struct bpf_run_ctx *old_run_ctx;
1975 struct bpf_trace_run_ctx run_ctx;
1978 RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held");
1980 if (unlikely(!array))
1983 run_ctx.is_uprobe = false;
1986 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
1987 item = &array->items[0];
1988 while ((prog = READ_ONCE(item->prog))) {
1989 run_ctx.bpf_cookie = item->bpf_cookie;
1990 ret &= run_prog(prog, ctx);
1993 bpf_reset_run_ctx(old_run_ctx);
1998 /* Notes on RCU design for bpf_prog_arrays containing sleepable programs:
2000 * We use the tasks_trace rcu flavor read section to protect the bpf_prog_array
2001 * overall. As a result, we must use the bpf_prog_array_free_sleepable
2002 * in order to use the tasks_trace rcu grace period.
2004 * When a non-sleepable program is inside the array, we take the rcu read
2005 * section and disable preemption for that program alone, so it can access
2006 * rcu-protected dynamically sized maps.
2008 static __always_inline u32
2009 bpf_prog_run_array_uprobe(const struct bpf_prog_array __rcu *array_rcu,
2010 const void *ctx, bpf_prog_run_fn run_prog)
2012 const struct bpf_prog_array_item *item;
2013 const struct bpf_prog *prog;
2014 const struct bpf_prog_array *array;
2015 struct bpf_run_ctx *old_run_ctx;
2016 struct bpf_trace_run_ctx run_ctx;
2021 rcu_read_lock_trace();
2024 run_ctx.is_uprobe = true;
2026 array = rcu_dereference_check(array_rcu, rcu_read_lock_trace_held());
2027 if (unlikely(!array))
2029 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
2030 item = &array->items[0];
2031 while ((prog = READ_ONCE(item->prog))) {
2032 if (!prog->aux->sleepable)
2035 run_ctx.bpf_cookie = item->bpf_cookie;
2036 ret &= run_prog(prog, ctx);
2039 if (!prog->aux->sleepable)
2042 bpf_reset_run_ctx(old_run_ctx);
2045 rcu_read_unlock_trace();
2049 #ifdef CONFIG_BPF_SYSCALL
2050 DECLARE_PER_CPU(int, bpf_prog_active);
2051 extern struct mutex bpf_stats_enabled_mutex;
2054 * Block execution of BPF programs attached to instrumentation (perf,
2055 * kprobes, tracepoints) to prevent deadlocks on map operations as any of
2056 * these events can happen inside a region which holds a map bucket lock
2057 * and can deadlock on it.
2059 static inline void bpf_disable_instrumentation(void)
2062 this_cpu_inc(bpf_prog_active);
2065 static inline void bpf_enable_instrumentation(void)
2067 this_cpu_dec(bpf_prog_active);
2071 extern const struct file_operations bpf_map_fops;
2072 extern const struct file_operations bpf_prog_fops;
2073 extern const struct file_operations bpf_iter_fops;
2075 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
2076 extern const struct bpf_prog_ops _name ## _prog_ops; \
2077 extern const struct bpf_verifier_ops _name ## _verifier_ops;
2078 #define BPF_MAP_TYPE(_id, _ops) \
2079 extern const struct bpf_map_ops _ops;
2080 #define BPF_LINK_TYPE(_id, _name)
2081 #include <linux/bpf_types.h>
2082 #undef BPF_PROG_TYPE
2084 #undef BPF_LINK_TYPE
2086 extern const struct bpf_prog_ops bpf_offload_prog_ops;
2087 extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
2088 extern const struct bpf_verifier_ops xdp_analyzer_ops;
2090 struct bpf_prog *bpf_prog_get(u32 ufd);
2091 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
2093 void bpf_prog_add(struct bpf_prog *prog, int i);
2094 void bpf_prog_sub(struct bpf_prog *prog, int i);
2095 void bpf_prog_inc(struct bpf_prog *prog);
2096 struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
2097 void bpf_prog_put(struct bpf_prog *prog);
2099 void bpf_prog_free_id(struct bpf_prog *prog);
2100 void bpf_map_free_id(struct bpf_map *map);
2102 struct btf_field *btf_record_find(const struct btf_record *rec,
2103 u32 offset, u32 field_mask);
2104 void btf_record_free(struct btf_record *rec);
2105 void bpf_map_free_record(struct bpf_map *map);
2106 struct btf_record *btf_record_dup(const struct btf_record *rec);
2107 bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b);
2108 void bpf_obj_free_timer(const struct btf_record *rec, void *obj);
2109 void bpf_obj_free_fields(const struct btf_record *rec, void *obj);
2110 void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu);
2112 struct bpf_map *bpf_map_get(u32 ufd);
2113 struct bpf_map *bpf_map_get_with_uref(u32 ufd);
2114 struct bpf_map *__bpf_map_get(struct fd f);
2115 void bpf_map_inc(struct bpf_map *map);
2116 void bpf_map_inc_with_uref(struct bpf_map *map);
2117 struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref);
2118 struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
2119 void bpf_map_put_with_uref(struct bpf_map *map);
2120 void bpf_map_put(struct bpf_map *map);
2121 void *bpf_map_area_alloc(u64 size, int numa_node);
2122 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
2123 void bpf_map_area_free(void *base);
2124 bool bpf_map_write_active(const struct bpf_map *map);
2125 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
2126 int generic_map_lookup_batch(struct bpf_map *map,
2127 const union bpf_attr *attr,
2128 union bpf_attr __user *uattr);
2129 int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
2130 const union bpf_attr *attr,
2131 union bpf_attr __user *uattr);
2132 int generic_map_delete_batch(struct bpf_map *map,
2133 const union bpf_attr *attr,
2134 union bpf_attr __user *uattr);
2135 struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
2136 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
2138 #ifdef CONFIG_MEMCG_KMEM
2139 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
2141 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
2142 void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
2144 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
2145 size_t align, gfp_t flags);
2147 static inline void *
2148 bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
2151 return kmalloc_node(size, flags, node);
2154 static inline void *
2155 bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
2157 return kzalloc(size, flags);
2160 static inline void *
2161 bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, gfp_t flags)
2163 return kvcalloc(n, size, flags);
2166 static inline void __percpu *
2167 bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
2170 return __alloc_percpu_gfp(size, align, flags);
2175 bpf_map_init_elem_count(struct bpf_map *map)
2177 size_t size = sizeof(*map->elem_count), align = size;
2178 gfp_t flags = GFP_USER | __GFP_NOWARN;
2180 map->elem_count = bpf_map_alloc_percpu(map, size, align, flags);
2181 if (!map->elem_count)
2188 bpf_map_free_elem_count(struct bpf_map *map)
2190 free_percpu(map->elem_count);
2193 static inline void bpf_map_inc_elem_count(struct bpf_map *map)
2195 this_cpu_inc(*map->elem_count);
2198 static inline void bpf_map_dec_elem_count(struct bpf_map *map)
2200 this_cpu_dec(*map->elem_count);
2203 extern int sysctl_unprivileged_bpf_disabled;
2205 static inline bool bpf_allow_ptr_leaks(void)
2207 return perfmon_capable();
2210 static inline bool bpf_allow_uninit_stack(void)
2212 return perfmon_capable();
2215 static inline bool bpf_bypass_spec_v1(void)
2217 return cpu_mitigations_off() || perfmon_capable();
2220 static inline bool bpf_bypass_spec_v4(void)
2222 return cpu_mitigations_off() || perfmon_capable();
2225 int bpf_map_new_fd(struct bpf_map *map, int flags);
2226 int bpf_prog_new_fd(struct bpf_prog *prog);
2228 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
2229 const struct bpf_link_ops *ops, struct bpf_prog *prog);
2230 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
2231 int bpf_link_settle(struct bpf_link_primer *primer);
2232 void bpf_link_cleanup(struct bpf_link_primer *primer);
2233 void bpf_link_inc(struct bpf_link *link);
2234 void bpf_link_put(struct bpf_link *link);
2235 int bpf_link_new_fd(struct bpf_link *link);
2236 struct bpf_link *bpf_link_get_from_fd(u32 ufd);
2237 struct bpf_link *bpf_link_get_curr_or_next(u32 *id);
2239 int bpf_obj_pin_user(u32 ufd, int path_fd, const char __user *pathname);
2240 int bpf_obj_get_user(int path_fd, const char __user *pathname, int flags);
2242 #define BPF_ITER_FUNC_PREFIX "bpf_iter_"
2243 #define DEFINE_BPF_ITER_FUNC(target, args...) \
2244 extern int bpf_iter_ ## target(args); \
2245 int __init bpf_iter_ ## target(args) { return 0; }
2248 * The task type of iterators.
2250 * For BPF task iterators, they can be parameterized with various
2251 * parameters to visit only some of tasks.
2253 * BPF_TASK_ITER_ALL (default)
2254 * Iterate over resources of every task.
2257 * Iterate over resources of a task/tid.
2259 * BPF_TASK_ITER_TGID
2260 * Iterate over resources of every task of a process / task group.
2262 enum bpf_iter_task_type {
2263 BPF_TASK_ITER_ALL = 0,
2268 struct bpf_iter_aux_info {
2269 /* for map_elem iter */
2270 struct bpf_map *map;
2272 /* for cgroup iter */
2274 struct cgroup *start; /* starting cgroup */
2275 enum bpf_cgroup_iter_order order;
2278 enum bpf_iter_task_type type;
2283 typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog,
2284 union bpf_iter_link_info *linfo,
2285 struct bpf_iter_aux_info *aux);
2286 typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux);
2287 typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux,
2288 struct seq_file *seq);
2289 typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux,
2290 struct bpf_link_info *info);
2291 typedef const struct bpf_func_proto *
2292 (*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id,
2293 const struct bpf_prog *prog);
2295 enum bpf_iter_feature {
2296 BPF_ITER_RESCHED = BIT(0),
2299 #define BPF_ITER_CTX_ARG_MAX 2
2300 struct bpf_iter_reg {
2302 bpf_iter_attach_target_t attach_target;
2303 bpf_iter_detach_target_t detach_target;
2304 bpf_iter_show_fdinfo_t show_fdinfo;
2305 bpf_iter_fill_link_info_t fill_link_info;
2306 bpf_iter_get_func_proto_t get_func_proto;
2307 u32 ctx_arg_info_size;
2309 struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
2310 const struct bpf_iter_seq_info *seq_info;
2313 struct bpf_iter_meta {
2314 __bpf_md_ptr(struct seq_file *, seq);
2319 struct bpf_iter__bpf_map_elem {
2320 __bpf_md_ptr(struct bpf_iter_meta *, meta);
2321 __bpf_md_ptr(struct bpf_map *, map);
2322 __bpf_md_ptr(void *, key);
2323 __bpf_md_ptr(void *, value);
2326 int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
2327 void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
2328 bool bpf_iter_prog_supported(struct bpf_prog *prog);
2329 const struct bpf_func_proto *
2330 bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
2331 int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog);
2332 int bpf_iter_new_fd(struct bpf_link *link);
2333 bool bpf_link_is_iter(struct bpf_link *link);
2334 struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop);
2335 int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx);
2336 void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
2337 struct seq_file *seq);
2338 int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux,
2339 struct bpf_link_info *info);
2341 int map_set_for_each_callback_args(struct bpf_verifier_env *env,
2342 struct bpf_func_state *caller,
2343 struct bpf_func_state *callee);
2345 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
2346 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
2347 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
2349 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
2352 int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
2354 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
2355 void *key, void *value, u64 map_flags);
2356 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
2357 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
2358 void *key, void *value, u64 map_flags);
2359 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
2361 int bpf_get_file_flag(int flags);
2362 int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size,
2363 size_t actual_size);
2365 /* verify correctness of eBPF program */
2366 int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size);
2368 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
2369 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
2372 struct btf *bpf_get_btf_vmlinux(void);
2377 struct bpf_dtab_netdev;
2378 struct bpf_cpu_map_entry;
2380 void __dev_flush(void);
2381 int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
2382 struct net_device *dev_rx);
2383 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
2384 struct net_device *dev_rx);
2385 int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
2386 struct bpf_map *map, bool exclude_ingress);
2387 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
2388 struct bpf_prog *xdp_prog);
2389 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
2390 struct bpf_prog *xdp_prog, struct bpf_map *map,
2391 bool exclude_ingress);
2393 void __cpu_map_flush(void);
2394 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
2395 struct net_device *dev_rx);
2396 int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
2397 struct sk_buff *skb);
2399 /* Return map's numa specified by userspace */
2400 static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
2402 return (attr->map_flags & BPF_F_NUMA_NODE) ?
2403 attr->numa_node : NUMA_NO_NODE;
2406 struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
2407 int array_map_alloc_check(union bpf_attr *attr);
2409 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
2410 union bpf_attr __user *uattr);
2411 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
2412 union bpf_attr __user *uattr);
2413 int bpf_prog_test_run_tracing(struct bpf_prog *prog,
2414 const union bpf_attr *kattr,
2415 union bpf_attr __user *uattr);
2416 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
2417 const union bpf_attr *kattr,
2418 union bpf_attr __user *uattr);
2419 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
2420 const union bpf_attr *kattr,
2421 union bpf_attr __user *uattr);
2422 int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
2423 const union bpf_attr *kattr,
2424 union bpf_attr __user *uattr);
2425 int bpf_prog_test_run_nf(struct bpf_prog *prog,
2426 const union bpf_attr *kattr,
2427 union bpf_attr __user *uattr);
2428 bool btf_ctx_access(int off, int size, enum bpf_access_type type,
2429 const struct bpf_prog *prog,
2430 struct bpf_insn_access_aux *info);
2432 static inline bool bpf_tracing_ctx_access(int off, int size,
2433 enum bpf_access_type type)
2435 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
2437 if (type != BPF_READ)
2439 if (off % size != 0)
2444 static inline bool bpf_tracing_btf_ctx_access(int off, int size,
2445 enum bpf_access_type type,
2446 const struct bpf_prog *prog,
2447 struct bpf_insn_access_aux *info)
2449 if (!bpf_tracing_ctx_access(off, size, type))
2451 return btf_ctx_access(off, size, type, prog, info);
2454 int btf_struct_access(struct bpf_verifier_log *log,
2455 const struct bpf_reg_state *reg,
2456 int off, int size, enum bpf_access_type atype,
2457 u32 *next_btf_id, enum bpf_type_flag *flag, const char **field_name);
2458 bool btf_struct_ids_match(struct bpf_verifier_log *log,
2459 const struct btf *btf, u32 id, int off,
2460 const struct btf *need_btf, u32 need_type_id,
2463 int btf_distill_func_proto(struct bpf_verifier_log *log,
2465 const struct btf_type *func_proto,
2466 const char *func_name,
2467 struct btf_func_model *m);
2469 struct bpf_reg_state;
2470 int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog);
2471 int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
2472 struct btf *btf, const struct btf_type *t);
2473 const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt,
2474 int comp_idx, const char *tag_key);
2476 struct bpf_prog *bpf_prog_by_id(u32 id);
2477 struct bpf_link *bpf_link_by_id(u32 id);
2479 const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id);
2480 void bpf_task_storage_free(struct task_struct *task);
2481 void bpf_cgrp_storage_free(struct cgroup *cgroup);
2482 bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog);
2483 const struct btf_func_model *
2484 bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
2485 const struct bpf_insn *insn);
2486 int bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id,
2487 u16 btf_fd_idx, u8 **func_addr);
2489 struct bpf_core_ctx {
2490 struct bpf_verifier_log *log;
2491 const struct btf *btf;
2494 bool btf_nested_type_is_trusted(struct bpf_verifier_log *log,
2495 const struct bpf_reg_state *reg,
2496 const char *field_name, u32 btf_id, const char *suffix);
2498 bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log,
2499 const struct btf *reg_btf, u32 reg_id,
2500 const struct btf *arg_btf, u32 arg_id);
2502 int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
2503 int relo_idx, void *insn);
2505 static inline bool unprivileged_ebpf_enabled(void)
2507 return !sysctl_unprivileged_bpf_disabled;
2510 /* Not all bpf prog type has the bpf_ctx.
2511 * For the bpf prog type that has initialized the bpf_ctx,
2512 * this function can be used to decide if a kernel function
2513 * is called by a bpf program.
2515 static inline bool has_current_bpf_ctx(void)
2517 return !!current->bpf_ctx;
2520 void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog);
2522 void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
2523 enum bpf_dynptr_type type, u32 offset, u32 size);
2524 void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr);
2525 void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr);
2527 bool dev_check_flush(void);
2528 bool cpu_map_check_flush(void);
2529 #else /* !CONFIG_BPF_SYSCALL */
2530 static inline struct bpf_prog *bpf_prog_get(u32 ufd)
2532 return ERR_PTR(-EOPNOTSUPP);
2535 static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
2536 enum bpf_prog_type type,
2539 return ERR_PTR(-EOPNOTSUPP);
2542 static inline void bpf_prog_add(struct bpf_prog *prog, int i)
2546 static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
2550 static inline void bpf_prog_put(struct bpf_prog *prog)
2554 static inline void bpf_prog_inc(struct bpf_prog *prog)
2558 static inline struct bpf_prog *__must_check
2559 bpf_prog_inc_not_zero(struct bpf_prog *prog)
2561 return ERR_PTR(-EOPNOTSUPP);
2564 static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
2565 const struct bpf_link_ops *ops,
2566 struct bpf_prog *prog)
2570 static inline int bpf_link_prime(struct bpf_link *link,
2571 struct bpf_link_primer *primer)
2576 static inline int bpf_link_settle(struct bpf_link_primer *primer)
2581 static inline void bpf_link_cleanup(struct bpf_link_primer *primer)
2585 static inline void bpf_link_inc(struct bpf_link *link)
2589 static inline void bpf_link_put(struct bpf_link *link)
2593 static inline int bpf_obj_get_user(const char __user *pathname, int flags)
2598 static inline void __dev_flush(void)
2603 struct bpf_dtab_netdev;
2604 struct bpf_cpu_map_entry;
2607 int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
2608 struct net_device *dev_rx)
2614 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
2615 struct net_device *dev_rx)
2621 int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
2622 struct bpf_map *map, bool exclude_ingress)
2629 static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
2630 struct sk_buff *skb,
2631 struct bpf_prog *xdp_prog)
2637 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
2638 struct bpf_prog *xdp_prog, struct bpf_map *map,
2639 bool exclude_ingress)
2644 static inline void __cpu_map_flush(void)
2648 static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
2649 struct xdp_frame *xdpf,
2650 struct net_device *dev_rx)
2655 static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
2656 struct sk_buff *skb)
2661 static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
2662 enum bpf_prog_type type)
2664 return ERR_PTR(-EOPNOTSUPP);
2667 static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog,
2668 const union bpf_attr *kattr,
2669 union bpf_attr __user *uattr)
2674 static inline int bpf_prog_test_run_skb(struct bpf_prog *prog,
2675 const union bpf_attr *kattr,
2676 union bpf_attr __user *uattr)
2681 static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog,
2682 const union bpf_attr *kattr,
2683 union bpf_attr __user *uattr)
2688 static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
2689 const union bpf_attr *kattr,
2690 union bpf_attr __user *uattr)
2695 static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
2696 const union bpf_attr *kattr,
2697 union bpf_attr __user *uattr)
2702 static inline void bpf_map_put(struct bpf_map *map)
2706 static inline struct bpf_prog *bpf_prog_by_id(u32 id)
2708 return ERR_PTR(-ENOTSUPP);
2711 static inline int btf_struct_access(struct bpf_verifier_log *log,
2712 const struct bpf_reg_state *reg,
2713 int off, int size, enum bpf_access_type atype,
2714 u32 *next_btf_id, enum bpf_type_flag *flag,
2715 const char **field_name)
2720 static inline const struct bpf_func_proto *
2721 bpf_base_func_proto(enum bpf_func_id func_id)
2726 static inline void bpf_task_storage_free(struct task_struct *task)
2730 static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
2735 static inline const struct btf_func_model *
2736 bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
2737 const struct bpf_insn *insn)
2743 bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id,
2744 u16 btf_fd_idx, u8 **func_addr)
2749 static inline bool unprivileged_ebpf_enabled(void)
2754 static inline bool has_current_bpf_ctx(void)
2759 static inline void bpf_prog_inc_misses_counter(struct bpf_prog *prog)
2763 static inline void bpf_cgrp_storage_free(struct cgroup *cgroup)
2767 static inline void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
2768 enum bpf_dynptr_type type, u32 offset, u32 size)
2772 static inline void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
2776 static inline void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
2779 #endif /* CONFIG_BPF_SYSCALL */
2781 static __always_inline int
2782 bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
2786 if (IS_ENABLED(CONFIG_BPF_EVENTS))
2787 ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
2788 if (unlikely(ret < 0))
2789 memset(dst, 0, size);
2793 void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
2794 struct btf_mod_pair *used_btfs, u32 len);
2796 static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
2797 enum bpf_prog_type type)
2799 return bpf_prog_get_type_dev(ufd, type, false);
2802 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2803 struct bpf_map **used_maps, u32 len);
2805 bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
2807 int bpf_prog_offload_compile(struct bpf_prog *prog);
2808 void bpf_prog_dev_bound_destroy(struct bpf_prog *prog);
2809 int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
2810 struct bpf_prog *prog);
2812 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);
2814 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
2815 int bpf_map_offload_update_elem(struct bpf_map *map,
2816 void *key, void *value, u64 flags);
2817 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
2818 int bpf_map_offload_get_next_key(struct bpf_map *map,
2819 void *key, void *next_key);
2821 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
2823 struct bpf_offload_dev *
2824 bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv);
2825 void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
2826 void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev);
2827 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
2828 struct net_device *netdev);
2829 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
2830 struct net_device *netdev);
2831 bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
2833 void unpriv_ebpf_notify(int new_state);
2835 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
2836 int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
2837 struct bpf_prog_aux *prog_aux);
2838 void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id);
2839 int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr);
2840 int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, struct bpf_prog *old_prog);
2841 void bpf_dev_bound_netdev_unregister(struct net_device *dev);
2843 static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
2845 return aux->dev_bound;
2848 static inline bool bpf_prog_is_offloaded(const struct bpf_prog_aux *aux)
2850 return aux->offload_requested;
2853 bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs);
2855 static inline bool bpf_map_is_offloaded(struct bpf_map *map)
2857 return unlikely(map->ops == &bpf_map_offload_ops);
2860 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
2861 void bpf_map_offload_map_free(struct bpf_map *map);
2862 u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map);
2863 int bpf_prog_test_run_syscall(struct bpf_prog *prog,
2864 const union bpf_attr *kattr,
2865 union bpf_attr __user *uattr);
2867 int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
2868 int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
2869 int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
2870 int sock_map_bpf_prog_query(const union bpf_attr *attr,
2871 union bpf_attr __user *uattr);
2873 void sock_map_unhash(struct sock *sk);
2874 void sock_map_destroy(struct sock *sk);
2875 void sock_map_close(struct sock *sk, long timeout);
2877 static inline int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
2878 struct bpf_prog_aux *prog_aux)
2883 static inline void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog,
2889 static inline int bpf_prog_dev_bound_init(struct bpf_prog *prog,
2890 union bpf_attr *attr)
2895 static inline int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog,
2896 struct bpf_prog *old_prog)
2901 static inline void bpf_dev_bound_netdev_unregister(struct net_device *dev)
2905 static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
2910 static inline bool bpf_prog_is_offloaded(struct bpf_prog_aux *aux)
2915 static inline bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs)
2920 static inline bool bpf_map_is_offloaded(struct bpf_map *map)
2925 static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
2927 return ERR_PTR(-EOPNOTSUPP);
2930 static inline void bpf_map_offload_map_free(struct bpf_map *map)
2934 static inline u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map)
2939 static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog,
2940 const union bpf_attr *kattr,
2941 union bpf_attr __user *uattr)
2946 #ifdef CONFIG_BPF_SYSCALL
2947 static inline int sock_map_get_from_fd(const union bpf_attr *attr,
2948 struct bpf_prog *prog)
2953 static inline int sock_map_prog_detach(const union bpf_attr *attr,
2954 enum bpf_prog_type ptype)
2959 static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
2965 static inline int sock_map_bpf_prog_query(const union bpf_attr *attr,
2966 union bpf_attr __user *uattr)
2970 #endif /* CONFIG_BPF_SYSCALL */
2971 #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
2973 static __always_inline void
2974 bpf_prog_inc_misses_counters(const struct bpf_prog_array *array)
2976 const struct bpf_prog_array_item *item;
2977 struct bpf_prog *prog;
2979 if (unlikely(!array))
2982 item = &array->items[0];
2983 while ((prog = READ_ONCE(item->prog))) {
2984 bpf_prog_inc_misses_counter(prog);
2989 #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
2990 void bpf_sk_reuseport_detach(struct sock *sk);
2991 int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
2993 int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
2994 void *value, u64 map_flags);
2996 static inline void bpf_sk_reuseport_detach(struct sock *sk)
3000 #ifdef CONFIG_BPF_SYSCALL
3001 static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
3002 void *key, void *value)
3007 static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
3008 void *key, void *value,
3013 #endif /* CONFIG_BPF_SYSCALL */
3014 #endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */
3016 /* verifier prototypes for helper functions called from eBPF programs */
3017 extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
3018 extern const struct bpf_func_proto bpf_map_update_elem_proto;
3019 extern const struct bpf_func_proto bpf_map_delete_elem_proto;
3020 extern const struct bpf_func_proto bpf_map_push_elem_proto;
3021 extern const struct bpf_func_proto bpf_map_pop_elem_proto;
3022 extern const struct bpf_func_proto bpf_map_peek_elem_proto;
3023 extern const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto;
3025 extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
3026 extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
3027 extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
3028 extern const struct bpf_func_proto bpf_tail_call_proto;
3029 extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
3030 extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto;
3031 extern const struct bpf_func_proto bpf_ktime_get_tai_ns_proto;
3032 extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
3033 extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
3034 extern const struct bpf_func_proto bpf_get_current_comm_proto;
3035 extern const struct bpf_func_proto bpf_get_stackid_proto;
3036 extern const struct bpf_func_proto bpf_get_stack_proto;
3037 extern const struct bpf_func_proto bpf_get_task_stack_proto;
3038 extern const struct bpf_func_proto bpf_get_stackid_proto_pe;
3039 extern const struct bpf_func_proto bpf_get_stack_proto_pe;
3040 extern const struct bpf_func_proto bpf_sock_map_update_proto;
3041 extern const struct bpf_func_proto bpf_sock_hash_update_proto;
3042 extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
3043 extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
3044 extern const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto;
3045 extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
3046 extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
3047 extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
3048 extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
3049 extern const struct bpf_func_proto bpf_spin_lock_proto;
3050 extern const struct bpf_func_proto bpf_spin_unlock_proto;
3051 extern const struct bpf_func_proto bpf_get_local_storage_proto;
3052 extern const struct bpf_func_proto bpf_strtol_proto;
3053 extern const struct bpf_func_proto bpf_strtoul_proto;
3054 extern const struct bpf_func_proto bpf_tcp_sock_proto;
3055 extern const struct bpf_func_proto bpf_jiffies64_proto;
3056 extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto;
3057 extern const struct bpf_func_proto bpf_event_output_data_proto;
3058 extern const struct bpf_func_proto bpf_ringbuf_output_proto;
3059 extern const struct bpf_func_proto bpf_ringbuf_reserve_proto;
3060 extern const struct bpf_func_proto bpf_ringbuf_submit_proto;
3061 extern const struct bpf_func_proto bpf_ringbuf_discard_proto;
3062 extern const struct bpf_func_proto bpf_ringbuf_query_proto;
3063 extern const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto;
3064 extern const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto;
3065 extern const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto;
3066 extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto;
3067 extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
3068 extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
3069 extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto;
3070 extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto;
3071 extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto;
3072 extern const struct bpf_func_proto bpf_skc_to_mptcp_sock_proto;
3073 extern const struct bpf_func_proto bpf_copy_from_user_proto;
3074 extern const struct bpf_func_proto bpf_snprintf_btf_proto;
3075 extern const struct bpf_func_proto bpf_snprintf_proto;
3076 extern const struct bpf_func_proto bpf_per_cpu_ptr_proto;
3077 extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
3078 extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto;
3079 extern const struct bpf_func_proto bpf_sock_from_file_proto;
3080 extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto;
3081 extern const struct bpf_func_proto bpf_task_storage_get_recur_proto;
3082 extern const struct bpf_func_proto bpf_task_storage_get_proto;
3083 extern const struct bpf_func_proto bpf_task_storage_delete_recur_proto;
3084 extern const struct bpf_func_proto bpf_task_storage_delete_proto;
3085 extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
3086 extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
3087 extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
3088 extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
3089 extern const struct bpf_func_proto bpf_unlocked_sk_setsockopt_proto;
3090 extern const struct bpf_func_proto bpf_unlocked_sk_getsockopt_proto;
3091 extern const struct bpf_func_proto bpf_find_vma_proto;
3092 extern const struct bpf_func_proto bpf_loop_proto;
3093 extern const struct bpf_func_proto bpf_copy_from_user_task_proto;
3094 extern const struct bpf_func_proto bpf_set_retval_proto;
3095 extern const struct bpf_func_proto bpf_get_retval_proto;
3096 extern const struct bpf_func_proto bpf_user_ringbuf_drain_proto;
3097 extern const struct bpf_func_proto bpf_cgrp_storage_get_proto;
3098 extern const struct bpf_func_proto bpf_cgrp_storage_delete_proto;
3100 const struct bpf_func_proto *tracing_prog_func_proto(
3101 enum bpf_func_id func_id, const struct bpf_prog *prog);
3103 /* Shared helpers among cBPF and eBPF. */
3104 void bpf_user_rnd_init_once(void);
3105 u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
3106 u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
3108 #if defined(CONFIG_NET)
3109 bool bpf_sock_common_is_valid_access(int off, int size,
3110 enum bpf_access_type type,
3111 struct bpf_insn_access_aux *info);
3112 bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
3113 struct bpf_insn_access_aux *info);
3114 u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
3115 const struct bpf_insn *si,
3116 struct bpf_insn *insn_buf,
3117 struct bpf_prog *prog,
3119 int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags,
3120 struct bpf_dynptr_kern *ptr);
3122 static inline bool bpf_sock_common_is_valid_access(int off, int size,
3123 enum bpf_access_type type,
3124 struct bpf_insn_access_aux *info)
3128 static inline bool bpf_sock_is_valid_access(int off, int size,
3129 enum bpf_access_type type,
3130 struct bpf_insn_access_aux *info)
3134 static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
3135 const struct bpf_insn *si,
3136 struct bpf_insn *insn_buf,
3137 struct bpf_prog *prog,
3142 static inline int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags,
3143 struct bpf_dynptr_kern *ptr)
3150 struct sk_reuseport_kern {
3151 struct sk_buff *skb;
3153 struct sock *selected_sk;
3154 struct sock *migrating_sk;
3160 bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
3161 struct bpf_insn_access_aux *info);
3163 u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
3164 const struct bpf_insn *si,
3165 struct bpf_insn *insn_buf,
3166 struct bpf_prog *prog,
3169 bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
3170 struct bpf_insn_access_aux *info);
3172 u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
3173 const struct bpf_insn *si,
3174 struct bpf_insn *insn_buf,
3175 struct bpf_prog *prog,
3178 static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
3179 enum bpf_access_type type,
3180 struct bpf_insn_access_aux *info)
3185 static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
3186 const struct bpf_insn *si,
3187 struct bpf_insn *insn_buf,
3188 struct bpf_prog *prog,
3193 static inline bool bpf_xdp_sock_is_valid_access(int off, int size,
3194 enum bpf_access_type type,
3195 struct bpf_insn_access_aux *info)
3200 static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
3201 const struct bpf_insn *si,
3202 struct bpf_insn *insn_buf,
3203 struct bpf_prog *prog,
3208 #endif /* CONFIG_INET */
3210 enum bpf_text_poke_type {
3215 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
3216 void *addr1, void *addr2);
3218 void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
3219 struct bpf_prog *new, struct bpf_prog *old);
3221 void *bpf_arch_text_copy(void *dst, void *src, size_t len);
3222 int bpf_arch_text_invalidate(void *dst, size_t len);
3225 bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
3227 #define MAX_BPRINTF_VARARGS 12
3228 #define MAX_BPRINTF_BUF 1024
3230 struct bpf_bprintf_data {
3237 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
3238 u32 num_args, struct bpf_bprintf_data *data);
3239 void bpf_bprintf_cleanup(struct bpf_bprintf_data *data);
3241 #ifdef CONFIG_BPF_LSM
3242 void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype);
3243 void bpf_cgroup_atype_put(int cgroup_atype);
3245 static inline void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype) {}
3246 static inline void bpf_cgroup_atype_put(int cgroup_atype) {}
3247 #endif /* CONFIG_BPF_LSM */
3256 #endif /* CONFIG_KEYS */
3258 static inline bool type_is_alloc(u32 type)
3260 return type & MEM_ALLOC;
3263 static inline gfp_t bpf_memcg_flags(gfp_t flags)
3265 if (memcg_bpf_enabled())
3266 return flags | __GFP_ACCOUNT;
3270 static inline bool bpf_is_subprog(const struct bpf_prog *prog)
3272 return prog->aux->func_idx != 0;
3275 #endif /* _LINUX_BPF_H */