1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Linux Socket Filter - Kernel level socket filtering
5 * Based on the design of the Berkeley Packet Filter. The new
6 * internal format has been designed by PLUMgrid:
8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
12 * Jay Schulist <jschlst@samba.org>
13 * Alexei Starovoitov <ast@plumgrid.com>
14 * Daniel Borkmann <dborkman@redhat.com>
16 * Andi Kleen - Fix a few bad bugs and races.
17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
20 #include <uapi/linux/btf.h>
21 #include <linux/filter.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/random.h>
25 #include <linux/moduleloader.h>
26 #include <linux/bpf.h>
27 #include <linux/btf.h>
28 #include <linux/objtool.h>
29 #include <linux/rbtree_latch.h>
30 #include <linux/kallsyms.h>
31 #include <linux/rcupdate.h>
32 #include <linux/perf_event.h>
33 #include <linux/extable.h>
34 #include <linux/log2.h>
35 #include <asm/unaligned.h>
38 #define BPF_R0 regs[BPF_REG_0]
39 #define BPF_R1 regs[BPF_REG_1]
40 #define BPF_R2 regs[BPF_REG_2]
41 #define BPF_R3 regs[BPF_REG_3]
42 #define BPF_R4 regs[BPF_REG_4]
43 #define BPF_R5 regs[BPF_REG_5]
44 #define BPF_R6 regs[BPF_REG_6]
45 #define BPF_R7 regs[BPF_REG_7]
46 #define BPF_R8 regs[BPF_REG_8]
47 #define BPF_R9 regs[BPF_REG_9]
48 #define BPF_R10 regs[BPF_REG_10]
51 #define DST regs[insn->dst_reg]
52 #define SRC regs[insn->src_reg]
53 #define FP regs[BPF_REG_FP]
54 #define AX regs[BPF_REG_AX]
55 #define ARG1 regs[BPF_REG_ARG1]
56 #define CTX regs[BPF_REG_CTX]
59 /* No hurry in this branch
61 * Exported for the bpf jit load helper.
63 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
68 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
69 else if (k >= SKF_LL_OFF)
70 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
72 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
78 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
80 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
81 struct bpf_prog_aux *aux;
84 size = round_up(size, PAGE_SIZE);
85 fp = __vmalloc(size, gfp_flags);
89 aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT | gfp_extra_flags);
94 fp->active = alloc_percpu_gfp(int, GFP_KERNEL_ACCOUNT | gfp_extra_flags);
101 fp->pages = size / PAGE_SIZE;
104 fp->jit_requested = ebpf_jit_enabled();
106 INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
107 mutex_init(&fp->aux->used_maps_mutex);
108 mutex_init(&fp->aux->dst_mutex);
113 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
115 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
116 struct bpf_prog *prog;
119 prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
123 prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
125 free_percpu(prog->active);
131 for_each_possible_cpu(cpu) {
132 struct bpf_prog_stats *pstats;
134 pstats = per_cpu_ptr(prog->stats, cpu);
135 u64_stats_init(&pstats->syncp);
139 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
141 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
143 if (!prog->aux->nr_linfo || !prog->jit_requested)
146 prog->aux->jited_linfo = kcalloc(prog->aux->nr_linfo,
147 sizeof(*prog->aux->jited_linfo),
148 GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
149 if (!prog->aux->jited_linfo)
155 void bpf_prog_free_jited_linfo(struct bpf_prog *prog)
157 kfree(prog->aux->jited_linfo);
158 prog->aux->jited_linfo = NULL;
161 void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog)
163 if (prog->aux->jited_linfo && !prog->aux->jited_linfo[0])
164 bpf_prog_free_jited_linfo(prog);
167 /* The jit engine is responsible to provide an array
168 * for insn_off to the jited_off mapping (insn_to_jit_off).
170 * The idx to this array is the insn_off. Hence, the insn_off
171 * here is relative to the prog itself instead of the main prog.
172 * This array has one entry for each xlated bpf insn.
174 * jited_off is the byte off to the last byte of the jited insn.
178 * The first bpf insn off of the prog. The insn off
179 * here is relative to the main prog.
180 * e.g. if prog is a subprog, insn_start > 0
182 * The prog's idx to prog->aux->linfo and jited_linfo
184 * jited_linfo[linfo_idx] = prog->bpf_func
188 * jited_linfo[i] = prog->bpf_func +
189 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
191 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
192 const u32 *insn_to_jit_off)
194 u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
195 const struct bpf_line_info *linfo;
198 if (!prog->aux->jited_linfo)
199 /* Userspace did not provide linfo */
202 linfo_idx = prog->aux->linfo_idx;
203 linfo = &prog->aux->linfo[linfo_idx];
204 insn_start = linfo[0].insn_off;
205 insn_end = insn_start + prog->len;
207 jited_linfo = &prog->aux->jited_linfo[linfo_idx];
208 jited_linfo[0] = prog->bpf_func;
210 nr_linfo = prog->aux->nr_linfo - linfo_idx;
212 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
213 /* The verifier ensures that linfo[i].insn_off is
214 * strictly increasing
216 jited_linfo[i] = prog->bpf_func +
217 insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
220 void bpf_prog_free_linfo(struct bpf_prog *prog)
222 bpf_prog_free_jited_linfo(prog);
223 kvfree(prog->aux->linfo);
226 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
227 gfp_t gfp_extra_flags)
229 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
233 size = round_up(size, PAGE_SIZE);
234 pages = size / PAGE_SIZE;
235 if (pages <= fp_old->pages)
238 fp = __vmalloc(size, gfp_flags);
240 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
244 /* We keep fp->aux from fp_old around in the new
245 * reallocated structure.
248 fp_old->stats = NULL;
249 fp_old->active = NULL;
250 __bpf_prog_free(fp_old);
256 void __bpf_prog_free(struct bpf_prog *fp)
259 mutex_destroy(&fp->aux->used_maps_mutex);
260 mutex_destroy(&fp->aux->dst_mutex);
261 kfree(fp->aux->poke_tab);
264 free_percpu(fp->stats);
265 free_percpu(fp->active);
269 int bpf_prog_calc_tag(struct bpf_prog *fp)
271 const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
272 u32 raw_size = bpf_prog_tag_scratch_size(fp);
273 u32 digest[SHA1_DIGEST_WORDS];
274 u32 ws[SHA1_WORKSPACE_WORDS];
275 u32 i, bsize, psize, blocks;
276 struct bpf_insn *dst;
282 raw = vmalloc(raw_size);
287 memset(ws, 0, sizeof(ws));
289 /* We need to take out the map fd for the digest calculation
290 * since they are unstable from user space side.
293 for (i = 0, was_ld_map = false; i < fp->len; i++) {
294 dst[i] = fp->insnsi[i];
296 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
297 (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
298 dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
301 } else if (was_ld_map &&
303 dst[i].dst_reg == 0 &&
304 dst[i].src_reg == 0 &&
313 psize = bpf_prog_insn_size(fp);
314 memset(&raw[psize], 0, raw_size - psize);
317 bsize = round_up(psize, SHA1_BLOCK_SIZE);
318 blocks = bsize / SHA1_BLOCK_SIZE;
320 if (bsize - psize >= sizeof(__be64)) {
321 bits = (__be64 *)(todo + bsize - sizeof(__be64));
323 bits = (__be64 *)(todo + bsize + bits_offset);
326 *bits = cpu_to_be64((psize - 1) << 3);
329 sha1_transform(digest, todo, ws);
330 todo += SHA1_BLOCK_SIZE;
333 result = (__force __be32 *)digest;
334 for (i = 0; i < SHA1_DIGEST_WORDS; i++)
335 result[i] = cpu_to_be32(digest[i]);
336 memcpy(fp->tag, result, sizeof(fp->tag));
342 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
343 s32 end_new, s32 curr, const bool probe_pass)
345 const s64 imm_min = S32_MIN, imm_max = S32_MAX;
346 s32 delta = end_new - end_old;
349 if (curr < pos && curr + imm + 1 >= end_old)
351 else if (curr >= end_new && curr + imm + 1 < end_new)
353 if (imm < imm_min || imm > imm_max)
360 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
361 s32 end_new, s32 curr, const bool probe_pass)
363 const s32 off_min = S16_MIN, off_max = S16_MAX;
364 s32 delta = end_new - end_old;
367 if (curr < pos && curr + off + 1 >= end_old)
369 else if (curr >= end_new && curr + off + 1 < end_new)
371 if (off < off_min || off > off_max)
378 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
379 s32 end_new, const bool probe_pass)
381 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
382 struct bpf_insn *insn = prog->insnsi;
385 for (i = 0; i < insn_cnt; i++, insn++) {
388 /* In the probing pass we still operate on the original,
389 * unpatched image in order to check overflows before we
390 * do any other adjustments. Therefore skip the patchlet.
392 if (probe_pass && i == pos) {
394 insn = prog->insnsi + end_old;
397 if ((BPF_CLASS(code) != BPF_JMP &&
398 BPF_CLASS(code) != BPF_JMP32) ||
399 BPF_OP(code) == BPF_EXIT)
401 /* Adjust offset of jmps if we cross patch boundaries. */
402 if (BPF_OP(code) == BPF_CALL) {
403 if (insn->src_reg != BPF_PSEUDO_CALL)
405 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
406 end_new, i, probe_pass);
408 ret = bpf_adj_delta_to_off(insn, pos, end_old,
409 end_new, i, probe_pass);
418 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
420 struct bpf_line_info *linfo;
423 nr_linfo = prog->aux->nr_linfo;
424 if (!nr_linfo || !delta)
427 linfo = prog->aux->linfo;
429 for (i = 0; i < nr_linfo; i++)
430 if (off < linfo[i].insn_off)
433 /* Push all off < linfo[i].insn_off by delta */
434 for (; i < nr_linfo; i++)
435 linfo[i].insn_off += delta;
438 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
439 const struct bpf_insn *patch, u32 len)
441 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
442 const u32 cnt_max = S16_MAX;
443 struct bpf_prog *prog_adj;
446 /* Since our patchlet doesn't expand the image, we're done. */
447 if (insn_delta == 0) {
448 memcpy(prog->insnsi + off, patch, sizeof(*patch));
452 insn_adj_cnt = prog->len + insn_delta;
454 /* Reject anything that would potentially let the insn->off
455 * target overflow when we have excessive program expansions.
456 * We need to probe here before we do any reallocation where
457 * we afterwards may not fail anymore.
459 if (insn_adj_cnt > cnt_max &&
460 (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
463 /* Several new instructions need to be inserted. Make room
464 * for them. Likely, there's no need for a new allocation as
465 * last page could have large enough tailroom.
467 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
470 return ERR_PTR(-ENOMEM);
472 prog_adj->len = insn_adj_cnt;
474 /* Patching happens in 3 steps:
476 * 1) Move over tail of insnsi from next instruction onwards,
477 * so we can patch the single target insn with one or more
478 * new ones (patching is always from 1 to n insns, n > 0).
479 * 2) Inject new instructions at the target location.
480 * 3) Adjust branch offsets if necessary.
482 insn_rest = insn_adj_cnt - off - len;
484 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
485 sizeof(*patch) * insn_rest);
486 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
488 /* We are guaranteed to not fail at this point, otherwise
489 * the ship has sailed to reverse to the original state. An
490 * overflow cannot happen at this point.
492 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
494 bpf_adj_linfo(prog_adj, off, insn_delta);
499 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
501 /* Branch offsets can't overflow when program is shrinking, no need
502 * to call bpf_adj_branches(..., true) here
504 memmove(prog->insnsi + off, prog->insnsi + off + cnt,
505 sizeof(struct bpf_insn) * (prog->len - off - cnt));
508 return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
511 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
515 for (i = 0; i < fp->aux->func_cnt; i++)
516 bpf_prog_kallsyms_del(fp->aux->func[i]);
519 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
521 bpf_prog_kallsyms_del_subprogs(fp);
522 bpf_prog_kallsyms_del(fp);
525 #ifdef CONFIG_BPF_JIT
526 /* All BPF JIT sysctl knobs here. */
527 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
528 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
529 int bpf_jit_harden __read_mostly;
530 long bpf_jit_limit __read_mostly;
533 bpf_prog_ksym_set_addr(struct bpf_prog *prog)
535 const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
536 unsigned long addr = (unsigned long)hdr;
538 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
540 prog->aux->ksym.start = (unsigned long) prog->bpf_func;
541 prog->aux->ksym.end = addr + hdr->pages * PAGE_SIZE;
545 bpf_prog_ksym_set_name(struct bpf_prog *prog)
547 char *sym = prog->aux->ksym.name;
548 const char *end = sym + KSYM_NAME_LEN;
549 const struct btf_type *type;
550 const char *func_name;
552 BUILD_BUG_ON(sizeof("bpf_prog_") +
553 sizeof(prog->tag) * 2 +
554 /* name has been null terminated.
555 * We should need +1 for the '_' preceding
556 * the name. However, the null character
557 * is double counted between the name and the
558 * sizeof("bpf_prog_") above, so we omit
561 sizeof(prog->aux->name) > KSYM_NAME_LEN);
563 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
564 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
566 /* prog->aux->name will be ignored if full btf name is available */
567 if (prog->aux->func_info_cnt) {
568 type = btf_type_by_id(prog->aux->btf,
569 prog->aux->func_info[prog->aux->func_idx].type_id);
570 func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
571 snprintf(sym, (size_t)(end - sym), "_%s", func_name);
575 if (prog->aux->name[0])
576 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
581 static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
583 return container_of(n, struct bpf_ksym, tnode)->start;
586 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
587 struct latch_tree_node *b)
589 return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
592 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
594 unsigned long val = (unsigned long)key;
595 const struct bpf_ksym *ksym;
597 ksym = container_of(n, struct bpf_ksym, tnode);
599 if (val < ksym->start)
601 if (val >= ksym->end)
607 static const struct latch_tree_ops bpf_tree_ops = {
608 .less = bpf_tree_less,
609 .comp = bpf_tree_comp,
612 static DEFINE_SPINLOCK(bpf_lock);
613 static LIST_HEAD(bpf_kallsyms);
614 static struct latch_tree_root bpf_tree __cacheline_aligned;
616 void bpf_ksym_add(struct bpf_ksym *ksym)
618 spin_lock_bh(&bpf_lock);
619 WARN_ON_ONCE(!list_empty(&ksym->lnode));
620 list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
621 latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
622 spin_unlock_bh(&bpf_lock);
625 static void __bpf_ksym_del(struct bpf_ksym *ksym)
627 if (list_empty(&ksym->lnode))
630 latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
631 list_del_rcu(&ksym->lnode);
634 void bpf_ksym_del(struct bpf_ksym *ksym)
636 spin_lock_bh(&bpf_lock);
637 __bpf_ksym_del(ksym);
638 spin_unlock_bh(&bpf_lock);
641 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
643 return fp->jited && !bpf_prog_was_classic(fp);
646 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
648 return list_empty(&fp->aux->ksym.lnode) ||
649 fp->aux->ksym.lnode.prev == LIST_POISON2;
652 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
654 if (!bpf_prog_kallsyms_candidate(fp) ||
658 bpf_prog_ksym_set_addr(fp);
659 bpf_prog_ksym_set_name(fp);
660 fp->aux->ksym.prog = true;
662 bpf_ksym_add(&fp->aux->ksym);
665 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
667 if (!bpf_prog_kallsyms_candidate(fp))
670 bpf_ksym_del(&fp->aux->ksym);
673 static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
675 struct latch_tree_node *n;
677 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
678 return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
681 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
682 unsigned long *off, char *sym)
684 struct bpf_ksym *ksym;
688 ksym = bpf_ksym_find(addr);
690 unsigned long symbol_start = ksym->start;
691 unsigned long symbol_end = ksym->end;
693 strncpy(sym, ksym->name, KSYM_NAME_LEN);
697 *size = symbol_end - symbol_start;
699 *off = addr - symbol_start;
706 bool is_bpf_text_address(unsigned long addr)
711 ret = bpf_ksym_find(addr) != NULL;
717 static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
719 struct bpf_ksym *ksym = bpf_ksym_find(addr);
721 return ksym && ksym->prog ?
722 container_of(ksym, struct bpf_prog_aux, ksym)->prog :
726 const struct exception_table_entry *search_bpf_extables(unsigned long addr)
728 const struct exception_table_entry *e = NULL;
729 struct bpf_prog *prog;
732 prog = bpf_prog_ksym_find(addr);
735 if (!prog->aux->num_exentries)
738 e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
744 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
747 struct bpf_ksym *ksym;
751 if (!bpf_jit_kallsyms_enabled())
755 list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
759 strncpy(sym, ksym->name, KSYM_NAME_LEN);
761 *value = ksym->start;
762 *type = BPF_SYM_ELF_TYPE;
772 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
773 struct bpf_jit_poke_descriptor *poke)
775 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
776 static const u32 poke_tab_max = 1024;
777 u32 slot = prog->aux->size_poke_tab;
780 if (size > poke_tab_max)
782 if (poke->tailcall_target || poke->tailcall_target_stable ||
783 poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
786 switch (poke->reason) {
787 case BPF_POKE_REASON_TAIL_CALL:
788 if (!poke->tail_call.map)
795 tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
799 memcpy(&tab[slot], poke, sizeof(*poke));
800 prog->aux->size_poke_tab = size;
801 prog->aux->poke_tab = tab;
806 static atomic_long_t bpf_jit_current;
808 /* Can be overridden by an arch's JIT compiler if it has a custom,
809 * dedicated BPF backend memory area, or if neither of the two
812 u64 __weak bpf_jit_alloc_exec_limit(void)
814 #if defined(MODULES_VADDR)
815 return MODULES_END - MODULES_VADDR;
817 return VMALLOC_END - VMALLOC_START;
821 static int __init bpf_jit_charge_init(void)
823 /* Only used as heuristic here to derive limit. */
824 bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
825 PAGE_SIZE), LONG_MAX);
828 pure_initcall(bpf_jit_charge_init);
830 int bpf_jit_charge_modmem(u32 pages)
832 if (atomic_long_add_return(pages, &bpf_jit_current) >
833 (bpf_jit_limit >> PAGE_SHIFT)) {
834 if (!capable(CAP_SYS_ADMIN)) {
835 atomic_long_sub(pages, &bpf_jit_current);
843 void bpf_jit_uncharge_modmem(u32 pages)
845 atomic_long_sub(pages, &bpf_jit_current);
848 void *__weak bpf_jit_alloc_exec(unsigned long size)
850 return module_alloc(size);
853 void __weak bpf_jit_free_exec(void *addr)
855 module_memfree(addr);
858 struct bpf_binary_header *
859 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
860 unsigned int alignment,
861 bpf_jit_fill_hole_t bpf_fill_ill_insns)
863 struct bpf_binary_header *hdr;
864 u32 size, hole, start, pages;
866 WARN_ON_ONCE(!is_power_of_2(alignment) ||
867 alignment > BPF_IMAGE_ALIGNMENT);
869 /* Most of BPF filters are really small, but if some of them
870 * fill a page, allow at least 128 extra bytes to insert a
871 * random section of illegal instructions.
873 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
874 pages = size / PAGE_SIZE;
876 if (bpf_jit_charge_modmem(pages))
878 hdr = bpf_jit_alloc_exec(size);
880 bpf_jit_uncharge_modmem(pages);
884 /* Fill space with illegal/arch-dep instructions. */
885 bpf_fill_ill_insns(hdr, size);
888 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
889 PAGE_SIZE - sizeof(*hdr));
890 start = (get_random_int() % hole) & ~(alignment - 1);
892 /* Leave a random number of instructions before BPF code. */
893 *image_ptr = &hdr->image[start];
898 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
900 u32 pages = hdr->pages;
902 bpf_jit_free_exec(hdr);
903 bpf_jit_uncharge_modmem(pages);
906 /* This symbol is only overridden by archs that have different
907 * requirements than the usual eBPF JITs, f.e. when they only
908 * implement cBPF JIT, do not set images read-only, etc.
910 void __weak bpf_jit_free(struct bpf_prog *fp)
913 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
915 bpf_jit_binary_free(hdr);
917 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
920 bpf_prog_unlock_free(fp);
923 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
924 const struct bpf_insn *insn, bool extra_pass,
925 u64 *func_addr, bool *func_addr_fixed)
931 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
932 if (!*func_addr_fixed) {
933 /* Place-holder address till the last pass has collected
934 * all addresses for JITed subprograms in which case we
935 * can pick them up from prog->aux.
939 else if (prog->aux->func &&
940 off >= 0 && off < prog->aux->func_cnt)
941 addr = (u8 *)prog->aux->func[off]->bpf_func;
945 /* Address of a BPF helper call. Since part of the core
946 * kernel, it's always at a fixed location. __bpf_call_base
947 * and the helper with imm relative to it are both in core
950 addr = (u8 *)__bpf_call_base + imm;
953 *func_addr = (unsigned long)addr;
957 static int bpf_jit_blind_insn(const struct bpf_insn *from,
958 const struct bpf_insn *aux,
959 struct bpf_insn *to_buff,
962 struct bpf_insn *to = to_buff;
963 u32 imm_rnd = get_random_int();
966 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
967 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
969 /* Constraints on AX register:
971 * AX register is inaccessible from user space. It is mapped in
972 * all JITs, and used here for constant blinding rewrites. It is
973 * typically "stateless" meaning its contents are only valid within
974 * the executed instruction, but not across several instructions.
975 * There are a few exceptions however which are further detailed
978 * Constant blinding is only used by JITs, not in the interpreter.
979 * The interpreter uses AX in some occasions as a local temporary
980 * register e.g. in DIV or MOD instructions.
982 * In restricted circumstances, the verifier can also use the AX
983 * register for rewrites as long as they do not interfere with
986 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
989 if (from->imm == 0 &&
990 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
991 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
992 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
996 switch (from->code) {
997 case BPF_ALU | BPF_ADD | BPF_K:
998 case BPF_ALU | BPF_SUB | BPF_K:
999 case BPF_ALU | BPF_AND | BPF_K:
1000 case BPF_ALU | BPF_OR | BPF_K:
1001 case BPF_ALU | BPF_XOR | BPF_K:
1002 case BPF_ALU | BPF_MUL | BPF_K:
1003 case BPF_ALU | BPF_MOV | BPF_K:
1004 case BPF_ALU | BPF_DIV | BPF_K:
1005 case BPF_ALU | BPF_MOD | BPF_K:
1006 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1007 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1008 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
1011 case BPF_ALU64 | BPF_ADD | BPF_K:
1012 case BPF_ALU64 | BPF_SUB | BPF_K:
1013 case BPF_ALU64 | BPF_AND | BPF_K:
1014 case BPF_ALU64 | BPF_OR | BPF_K:
1015 case BPF_ALU64 | BPF_XOR | BPF_K:
1016 case BPF_ALU64 | BPF_MUL | BPF_K:
1017 case BPF_ALU64 | BPF_MOV | BPF_K:
1018 case BPF_ALU64 | BPF_DIV | BPF_K:
1019 case BPF_ALU64 | BPF_MOD | BPF_K:
1020 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1021 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1022 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
1025 case BPF_JMP | BPF_JEQ | BPF_K:
1026 case BPF_JMP | BPF_JNE | BPF_K:
1027 case BPF_JMP | BPF_JGT | BPF_K:
1028 case BPF_JMP | BPF_JLT | BPF_K:
1029 case BPF_JMP | BPF_JGE | BPF_K:
1030 case BPF_JMP | BPF_JLE | BPF_K:
1031 case BPF_JMP | BPF_JSGT | BPF_K:
1032 case BPF_JMP | BPF_JSLT | BPF_K:
1033 case BPF_JMP | BPF_JSGE | BPF_K:
1034 case BPF_JMP | BPF_JSLE | BPF_K:
1035 case BPF_JMP | BPF_JSET | BPF_K:
1036 /* Accommodate for extra offset in case of a backjump. */
1040 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1041 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1042 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1045 case BPF_JMP32 | BPF_JEQ | BPF_K:
1046 case BPF_JMP32 | BPF_JNE | BPF_K:
1047 case BPF_JMP32 | BPF_JGT | BPF_K:
1048 case BPF_JMP32 | BPF_JLT | BPF_K:
1049 case BPF_JMP32 | BPF_JGE | BPF_K:
1050 case BPF_JMP32 | BPF_JLE | BPF_K:
1051 case BPF_JMP32 | BPF_JSGT | BPF_K:
1052 case BPF_JMP32 | BPF_JSLT | BPF_K:
1053 case BPF_JMP32 | BPF_JSGE | BPF_K:
1054 case BPF_JMP32 | BPF_JSLE | BPF_K:
1055 case BPF_JMP32 | BPF_JSET | BPF_K:
1056 /* Accommodate for extra offset in case of a backjump. */
1060 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1061 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1062 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1066 case BPF_LD | BPF_IMM | BPF_DW:
1067 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1068 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1069 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1070 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1072 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1073 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1074 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1076 *to++ = BPF_ZEXT_REG(BPF_REG_AX);
1077 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
1080 case BPF_ST | BPF_MEM | BPF_DW:
1081 case BPF_ST | BPF_MEM | BPF_W:
1082 case BPF_ST | BPF_MEM | BPF_H:
1083 case BPF_ST | BPF_MEM | BPF_B:
1084 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1085 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1086 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1090 return to - to_buff;
1093 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1094 gfp_t gfp_extra_flags)
1096 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1097 struct bpf_prog *fp;
1099 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
1101 /* aux->prog still points to the fp_other one, so
1102 * when promoting the clone to the real program,
1103 * this still needs to be adapted.
1105 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1111 static void bpf_prog_clone_free(struct bpf_prog *fp)
1113 /* aux was stolen by the other clone, so we cannot free
1114 * it from this path! It will be freed eventually by the
1115 * other program on release.
1117 * At this point, we don't need a deferred release since
1118 * clone is guaranteed to not be locked.
1123 __bpf_prog_free(fp);
1126 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1128 /* We have to repoint aux->prog to self, as we don't
1129 * know whether fp here is the clone or the original.
1132 bpf_prog_clone_free(fp_other);
1135 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1137 struct bpf_insn insn_buff[16], aux[2];
1138 struct bpf_prog *clone, *tmp;
1139 int insn_delta, insn_cnt;
1140 struct bpf_insn *insn;
1143 if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
1146 clone = bpf_prog_clone_create(prog, GFP_USER);
1148 return ERR_PTR(-ENOMEM);
1150 insn_cnt = clone->len;
1151 insn = clone->insnsi;
1153 for (i = 0; i < insn_cnt; i++, insn++) {
1154 /* We temporarily need to hold the original ld64 insn
1155 * so that we can still access the first part in the
1156 * second blinding run.
1158 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1160 memcpy(aux, insn, sizeof(aux));
1162 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1163 clone->aux->verifier_zext);
1167 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1169 /* Patching may have repointed aux->prog during
1170 * realloc from the original one, so we need to
1171 * fix it up here on error.
1173 bpf_jit_prog_release_other(prog, clone);
1178 insn_delta = rewritten - 1;
1180 /* Walk new program and skip insns we just inserted. */
1181 insn = clone->insnsi + i + insn_delta;
1182 insn_cnt += insn_delta;
1189 #endif /* CONFIG_BPF_JIT */
1191 /* Base function for offset calculation. Needs to go into .text section,
1192 * therefore keeping it non-static as well; will also be used by JITs
1193 * anyway later on, so do not let the compiler omit it. This also needs
1194 * to go into kallsyms for correlation from e.g. bpftool, so naming
1197 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1201 EXPORT_SYMBOL_GPL(__bpf_call_base);
1203 /* All UAPI available opcodes. */
1204 #define BPF_INSN_MAP(INSN_2, INSN_3) \
1205 /* 32 bit ALU operations. */ \
1206 /* Register based. */ \
1207 INSN_3(ALU, ADD, X), \
1208 INSN_3(ALU, SUB, X), \
1209 INSN_3(ALU, AND, X), \
1210 INSN_3(ALU, OR, X), \
1211 INSN_3(ALU, LSH, X), \
1212 INSN_3(ALU, RSH, X), \
1213 INSN_3(ALU, XOR, X), \
1214 INSN_3(ALU, MUL, X), \
1215 INSN_3(ALU, MOV, X), \
1216 INSN_3(ALU, ARSH, X), \
1217 INSN_3(ALU, DIV, X), \
1218 INSN_3(ALU, MOD, X), \
1220 INSN_3(ALU, END, TO_BE), \
1221 INSN_3(ALU, END, TO_LE), \
1222 /* Immediate based. */ \
1223 INSN_3(ALU, ADD, K), \
1224 INSN_3(ALU, SUB, K), \
1225 INSN_3(ALU, AND, K), \
1226 INSN_3(ALU, OR, K), \
1227 INSN_3(ALU, LSH, K), \
1228 INSN_3(ALU, RSH, K), \
1229 INSN_3(ALU, XOR, K), \
1230 INSN_3(ALU, MUL, K), \
1231 INSN_3(ALU, MOV, K), \
1232 INSN_3(ALU, ARSH, K), \
1233 INSN_3(ALU, DIV, K), \
1234 INSN_3(ALU, MOD, K), \
1235 /* 64 bit ALU operations. */ \
1236 /* Register based. */ \
1237 INSN_3(ALU64, ADD, X), \
1238 INSN_3(ALU64, SUB, X), \
1239 INSN_3(ALU64, AND, X), \
1240 INSN_3(ALU64, OR, X), \
1241 INSN_3(ALU64, LSH, X), \
1242 INSN_3(ALU64, RSH, X), \
1243 INSN_3(ALU64, XOR, X), \
1244 INSN_3(ALU64, MUL, X), \
1245 INSN_3(ALU64, MOV, X), \
1246 INSN_3(ALU64, ARSH, X), \
1247 INSN_3(ALU64, DIV, X), \
1248 INSN_3(ALU64, MOD, X), \
1249 INSN_2(ALU64, NEG), \
1250 /* Immediate based. */ \
1251 INSN_3(ALU64, ADD, K), \
1252 INSN_3(ALU64, SUB, K), \
1253 INSN_3(ALU64, AND, K), \
1254 INSN_3(ALU64, OR, K), \
1255 INSN_3(ALU64, LSH, K), \
1256 INSN_3(ALU64, RSH, K), \
1257 INSN_3(ALU64, XOR, K), \
1258 INSN_3(ALU64, MUL, K), \
1259 INSN_3(ALU64, MOV, K), \
1260 INSN_3(ALU64, ARSH, K), \
1261 INSN_3(ALU64, DIV, K), \
1262 INSN_3(ALU64, MOD, K), \
1263 /* Call instruction. */ \
1264 INSN_2(JMP, CALL), \
1265 /* Exit instruction. */ \
1266 INSN_2(JMP, EXIT), \
1267 /* 32-bit Jump instructions. */ \
1268 /* Register based. */ \
1269 INSN_3(JMP32, JEQ, X), \
1270 INSN_3(JMP32, JNE, X), \
1271 INSN_3(JMP32, JGT, X), \
1272 INSN_3(JMP32, JLT, X), \
1273 INSN_3(JMP32, JGE, X), \
1274 INSN_3(JMP32, JLE, X), \
1275 INSN_3(JMP32, JSGT, X), \
1276 INSN_3(JMP32, JSLT, X), \
1277 INSN_3(JMP32, JSGE, X), \
1278 INSN_3(JMP32, JSLE, X), \
1279 INSN_3(JMP32, JSET, X), \
1280 /* Immediate based. */ \
1281 INSN_3(JMP32, JEQ, K), \
1282 INSN_3(JMP32, JNE, K), \
1283 INSN_3(JMP32, JGT, K), \
1284 INSN_3(JMP32, JLT, K), \
1285 INSN_3(JMP32, JGE, K), \
1286 INSN_3(JMP32, JLE, K), \
1287 INSN_3(JMP32, JSGT, K), \
1288 INSN_3(JMP32, JSLT, K), \
1289 INSN_3(JMP32, JSGE, K), \
1290 INSN_3(JMP32, JSLE, K), \
1291 INSN_3(JMP32, JSET, K), \
1292 /* Jump instructions. */ \
1293 /* Register based. */ \
1294 INSN_3(JMP, JEQ, X), \
1295 INSN_3(JMP, JNE, X), \
1296 INSN_3(JMP, JGT, X), \
1297 INSN_3(JMP, JLT, X), \
1298 INSN_3(JMP, JGE, X), \
1299 INSN_3(JMP, JLE, X), \
1300 INSN_3(JMP, JSGT, X), \
1301 INSN_3(JMP, JSLT, X), \
1302 INSN_3(JMP, JSGE, X), \
1303 INSN_3(JMP, JSLE, X), \
1304 INSN_3(JMP, JSET, X), \
1305 /* Immediate based. */ \
1306 INSN_3(JMP, JEQ, K), \
1307 INSN_3(JMP, JNE, K), \
1308 INSN_3(JMP, JGT, K), \
1309 INSN_3(JMP, JLT, K), \
1310 INSN_3(JMP, JGE, K), \
1311 INSN_3(JMP, JLE, K), \
1312 INSN_3(JMP, JSGT, K), \
1313 INSN_3(JMP, JSLT, K), \
1314 INSN_3(JMP, JSGE, K), \
1315 INSN_3(JMP, JSLE, K), \
1316 INSN_3(JMP, JSET, K), \
1318 /* Store instructions. */ \
1319 /* Register based. */ \
1320 INSN_3(STX, MEM, B), \
1321 INSN_3(STX, MEM, H), \
1322 INSN_3(STX, MEM, W), \
1323 INSN_3(STX, MEM, DW), \
1324 INSN_3(STX, ATOMIC, W), \
1325 INSN_3(STX, ATOMIC, DW), \
1326 /* Immediate based. */ \
1327 INSN_3(ST, MEM, B), \
1328 INSN_3(ST, MEM, H), \
1329 INSN_3(ST, MEM, W), \
1330 INSN_3(ST, MEM, DW), \
1331 /* Load instructions. */ \
1332 /* Register based. */ \
1333 INSN_3(LDX, MEM, B), \
1334 INSN_3(LDX, MEM, H), \
1335 INSN_3(LDX, MEM, W), \
1336 INSN_3(LDX, MEM, DW), \
1337 /* Immediate based. */ \
1340 bool bpf_opcode_in_insntable(u8 code)
1342 #define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
1343 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1344 static const bool public_insntable[256] = {
1345 [0 ... 255] = false,
1346 /* Now overwrite non-defaults ... */
1347 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1348 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1349 [BPF_LD | BPF_ABS | BPF_B] = true,
1350 [BPF_LD | BPF_ABS | BPF_H] = true,
1351 [BPF_LD | BPF_ABS | BPF_W] = true,
1352 [BPF_LD | BPF_IND | BPF_B] = true,
1353 [BPF_LD | BPF_IND | BPF_H] = true,
1354 [BPF_LD | BPF_IND | BPF_W] = true,
1356 #undef BPF_INSN_3_TBL
1357 #undef BPF_INSN_2_TBL
1358 return public_insntable[code];
1361 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1362 u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
1364 memset(dst, 0, size);
1369 * __bpf_prog_run - run eBPF program on a given context
1370 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1371 * @insn: is the array of eBPF instructions
1372 * @stack: is the eBPF storage stack
1374 * Decode and execute eBPF instructions.
1376 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
1378 #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
1379 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1380 static const void * const jumptable[256] __annotate_jump_table = {
1381 [0 ... 255] = &&default_label,
1382 /* Now overwrite non-defaults ... */
1383 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1384 /* Non-UAPI available opcodes. */
1385 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1386 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1387 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1388 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1389 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1390 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1392 #undef BPF_INSN_3_LBL
1393 #undef BPF_INSN_2_LBL
1394 u32 tail_call_cnt = 0;
1396 #define CONT ({ insn++; goto select_insn; })
1397 #define CONT_JMP ({ insn++; goto select_insn; })
1400 goto *jumptable[insn->code];
1403 #define ALU(OPCODE, OP) \
1404 ALU64_##OPCODE##_X: \
1408 DST = (u32) DST OP (u32) SRC; \
1410 ALU64_##OPCODE##_K: \
1414 DST = (u32) DST OP (u32) IMM; \
1445 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1449 DST = (u64) (u32) (((s32) DST) >> SRC);
1452 DST = (u64) (u32) (((s32) DST) >> IMM);
1455 (*(s64 *) &DST) >>= SRC;
1458 (*(s64 *) &DST) >>= IMM;
1461 div64_u64_rem(DST, SRC, &AX);
1466 DST = do_div(AX, (u32) SRC);
1469 div64_u64_rem(DST, IMM, &AX);
1474 DST = do_div(AX, (u32) IMM);
1477 DST = div64_u64(DST, SRC);
1481 do_div(AX, (u32) SRC);
1485 DST = div64_u64(DST, IMM);
1489 do_div(AX, (u32) IMM);
1495 DST = (__force u16) cpu_to_be16(DST);
1498 DST = (__force u32) cpu_to_be32(DST);
1501 DST = (__force u64) cpu_to_be64(DST);
1508 DST = (__force u16) cpu_to_le16(DST);
1511 DST = (__force u32) cpu_to_le32(DST);
1514 DST = (__force u64) cpu_to_le64(DST);
1521 /* Function call scratches BPF_R1-BPF_R5 registers,
1522 * preserves BPF_R6-BPF_R9, and stores return value
1525 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1530 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1533 insn + insn->off + 1);
1537 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1538 struct bpf_array *array = container_of(map, struct bpf_array, map);
1539 struct bpf_prog *prog;
1542 if (unlikely(index >= array->map.max_entries))
1544 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1549 prog = READ_ONCE(array->ptrs[index]);
1553 /* ARG1 at this point is guaranteed to point to CTX from
1554 * the verifier side due to the fact that the tail call is
1555 * handled like a helper, that is, bpf_tail_call_proto,
1556 * where arg1_type is ARG_PTR_TO_CTX.
1558 insn = prog->insnsi;
1569 #define COND_JMP(SIGN, OPCODE, CMP_OP) \
1571 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \
1572 insn += insn->off; \
1576 JMP32_##OPCODE##_X: \
1577 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \
1578 insn += insn->off; \
1583 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \
1584 insn += insn->off; \
1588 JMP32_##OPCODE##_K: \
1589 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \
1590 insn += insn->off; \
1594 COND_JMP(u, JEQ, ==)
1595 COND_JMP(u, JNE, !=)
1598 COND_JMP(u, JGE, >=)
1599 COND_JMP(u, JLE, <=)
1600 COND_JMP(u, JSET, &)
1601 COND_JMP(s, JSGT, >)
1602 COND_JMP(s, JSLT, <)
1603 COND_JMP(s, JSGE, >=)
1604 COND_JMP(s, JSLE, <=)
1606 /* STX and ST and LDX*/
1607 #define LDST(SIZEOP, SIZE) \
1609 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
1612 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
1615 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
1623 #define LDX_PROBE(SIZEOP, SIZE) \
1624 LDX_PROBE_MEM_##SIZEOP: \
1625 bpf_probe_read_kernel(&DST, SIZE, (const void *)(long) (SRC + insn->off)); \
1633 #define ATOMIC_ALU_OP(BOP, KOP) \
1635 if (BPF_SIZE(insn->code) == BPF_W) \
1636 atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
1637 (DST + insn->off)); \
1639 atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
1640 (DST + insn->off)); \
1642 case BOP | BPF_FETCH: \
1643 if (BPF_SIZE(insn->code) == BPF_W) \
1644 SRC = (u32) atomic_fetch_##KOP( \
1646 (atomic_t *)(unsigned long) (DST + insn->off)); \
1648 SRC = (u64) atomic64_fetch_##KOP( \
1650 (atomic64_t *)(unsigned long) (DST + insn->off)); \
1656 ATOMIC_ALU_OP(BPF_ADD, add)
1657 ATOMIC_ALU_OP(BPF_AND, and)
1658 ATOMIC_ALU_OP(BPF_OR, or)
1659 ATOMIC_ALU_OP(BPF_XOR, xor)
1660 #undef ATOMIC_ALU_OP
1663 if (BPF_SIZE(insn->code) == BPF_W)
1664 SRC = (u32) atomic_xchg(
1665 (atomic_t *)(unsigned long) (DST + insn->off),
1668 SRC = (u64) atomic64_xchg(
1669 (atomic64_t *)(unsigned long) (DST + insn->off),
1673 if (BPF_SIZE(insn->code) == BPF_W)
1674 BPF_R0 = (u32) atomic_cmpxchg(
1675 (atomic_t *)(unsigned long) (DST + insn->off),
1676 (u32) BPF_R0, (u32) SRC);
1678 BPF_R0 = (u64) atomic64_cmpxchg(
1679 (atomic64_t *)(unsigned long) (DST + insn->off),
1680 (u64) BPF_R0, (u64) SRC);
1689 /* If we ever reach this, we have a bug somewhere. Die hard here
1690 * instead of just returning 0; we could be somewhere in a subprog,
1691 * so execution could continue otherwise which we do /not/ want.
1693 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1695 pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
1696 insn->code, insn->imm);
1701 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1702 #define DEFINE_BPF_PROG_RUN(stack_size) \
1703 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1705 u64 stack[stack_size / sizeof(u64)]; \
1706 u64 regs[MAX_BPF_EXT_REG]; \
1708 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1709 ARG1 = (u64) (unsigned long) ctx; \
1710 return ___bpf_prog_run(regs, insn, stack); \
1713 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1714 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1715 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1716 const struct bpf_insn *insn) \
1718 u64 stack[stack_size / sizeof(u64)]; \
1719 u64 regs[MAX_BPF_EXT_REG]; \
1721 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1727 return ___bpf_prog_run(regs, insn, stack); \
1730 #define EVAL1(FN, X) FN(X)
1731 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1732 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1733 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1734 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1735 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1737 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1738 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1739 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1741 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1742 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1743 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1745 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1747 static unsigned int (*interpreters[])(const void *ctx,
1748 const struct bpf_insn *insn) = {
1749 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1750 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1751 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1753 #undef PROG_NAME_LIST
1754 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1755 static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1756 const struct bpf_insn *insn) = {
1757 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1758 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1759 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1761 #undef PROG_NAME_LIST
1763 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1765 stack_depth = max_t(u32, stack_depth, 1);
1766 insn->off = (s16) insn->imm;
1767 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1768 __bpf_call_base_args;
1769 insn->code = BPF_JMP | BPF_CALL_ARGS;
1773 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1774 const struct bpf_insn *insn)
1776 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1777 * is not working properly, so warn about it!
1784 bool bpf_prog_array_compatible(struct bpf_array *array,
1785 const struct bpf_prog *fp)
1787 if (fp->kprobe_override)
1790 if (!array->aux->type) {
1791 /* There's no owner yet where we could check for
1794 array->aux->type = fp->type;
1795 array->aux->jited = fp->jited;
1799 return array->aux->type == fp->type &&
1800 array->aux->jited == fp->jited;
1803 static int bpf_check_tail_call(const struct bpf_prog *fp)
1805 struct bpf_prog_aux *aux = fp->aux;
1808 mutex_lock(&aux->used_maps_mutex);
1809 for (i = 0; i < aux->used_map_cnt; i++) {
1810 struct bpf_map *map = aux->used_maps[i];
1811 struct bpf_array *array;
1813 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1816 array = container_of(map, struct bpf_array, map);
1817 if (!bpf_prog_array_compatible(array, fp)) {
1824 mutex_unlock(&aux->used_maps_mutex);
1828 static void bpf_prog_select_func(struct bpf_prog *fp)
1830 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1831 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1833 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1835 fp->bpf_func = __bpf_prog_ret0_warn;
1840 * bpf_prog_select_runtime - select exec runtime for BPF program
1841 * @fp: bpf_prog populated with internal BPF program
1842 * @err: pointer to error variable
1844 * Try to JIT eBPF program, if JIT is not available, use interpreter.
1845 * The BPF program will be executed via BPF_PROG_RUN() macro.
1847 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1849 /* In case of BPF to BPF calls, verifier did all the prep
1850 * work with regards to JITing, etc.
1855 bpf_prog_select_func(fp);
1857 /* eBPF JITs can rewrite the program in case constant
1858 * blinding is active. However, in case of error during
1859 * blinding, bpf_int_jit_compile() must always return a
1860 * valid program, which in this case would simply not
1861 * be JITed, but falls back to the interpreter.
1863 if (!bpf_prog_is_dev_bound(fp->aux)) {
1864 *err = bpf_prog_alloc_jited_linfo(fp);
1868 fp = bpf_int_jit_compile(fp);
1870 bpf_prog_free_jited_linfo(fp);
1871 #ifdef CONFIG_BPF_JIT_ALWAYS_ON
1876 bpf_prog_free_unused_jited_linfo(fp);
1879 *err = bpf_prog_offload_compile(fp);
1885 bpf_prog_lock_ro(fp);
1887 /* The tail call compatibility check can only be done at
1888 * this late stage as we need to determine, if we deal
1889 * with JITed or non JITed program concatenations and not
1890 * all eBPF JITs might immediately support all features.
1892 *err = bpf_check_tail_call(fp);
1896 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1898 static unsigned int __bpf_prog_ret1(const void *ctx,
1899 const struct bpf_insn *insn)
1904 static struct bpf_prog_dummy {
1905 struct bpf_prog prog;
1906 } dummy_bpf_prog = {
1908 .bpf_func = __bpf_prog_ret1,
1912 /* to avoid allocating empty bpf_prog_array for cgroups that
1913 * don't have bpf program attached use one global 'empty_prog_array'
1914 * It will not be modified the caller of bpf_prog_array_alloc()
1915 * (since caller requested prog_cnt == 0)
1916 * that pointer should be 'freed' by bpf_prog_array_free()
1919 struct bpf_prog_array hdr;
1920 struct bpf_prog *null_prog;
1921 } empty_prog_array = {
1925 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1928 return kzalloc(sizeof(struct bpf_prog_array) +
1929 sizeof(struct bpf_prog_array_item) *
1933 return &empty_prog_array.hdr;
1936 void bpf_prog_array_free(struct bpf_prog_array *progs)
1938 if (!progs || progs == &empty_prog_array.hdr)
1940 kfree_rcu(progs, rcu);
1943 int bpf_prog_array_length(struct bpf_prog_array *array)
1945 struct bpf_prog_array_item *item;
1948 for (item = array->items; item->prog; item++)
1949 if (item->prog != &dummy_bpf_prog.prog)
1954 bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
1956 struct bpf_prog_array_item *item;
1958 for (item = array->items; item->prog; item++)
1959 if (item->prog != &dummy_bpf_prog.prog)
1964 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
1968 struct bpf_prog_array_item *item;
1971 for (item = array->items; item->prog; item++) {
1972 if (item->prog == &dummy_bpf_prog.prog)
1974 prog_ids[i] = item->prog->aux->id;
1975 if (++i == request_cnt) {
1981 return !!(item->prog);
1984 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
1985 __u32 __user *prog_ids, u32 cnt)
1987 unsigned long err = 0;
1991 /* users of this function are doing:
1992 * cnt = bpf_prog_array_length();
1994 * bpf_prog_array_copy_to_user(..., cnt);
1995 * so below kcalloc doesn't need extra cnt > 0 check.
1997 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
2000 nospc = bpf_prog_array_copy_core(array, ids, cnt);
2001 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
2010 void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
2011 struct bpf_prog *old_prog)
2013 struct bpf_prog_array_item *item;
2015 for (item = array->items; item->prog; item++)
2016 if (item->prog == old_prog) {
2017 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
2023 * bpf_prog_array_delete_safe_at() - Replaces the program at the given
2024 * index into the program array with
2025 * a dummy no-op program.
2026 * @array: a bpf_prog_array
2027 * @index: the index of the program to replace
2029 * Skips over dummy programs, by not counting them, when calculating
2030 * the position of the program to replace.
2034 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2035 * * -ENOENT - Index out of range
2037 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
2039 return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
2043 * bpf_prog_array_update_at() - Updates the program at the given index
2044 * into the program array.
2045 * @array: a bpf_prog_array
2046 * @index: the index of the program to update
2047 * @prog: the program to insert into the array
2049 * Skips over dummy programs, by not counting them, when calculating
2050 * the position of the program to update.
2054 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2055 * * -ENOENT - Index out of range
2057 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2058 struct bpf_prog *prog)
2060 struct bpf_prog_array_item *item;
2062 if (unlikely(index < 0))
2065 for (item = array->items; item->prog; item++) {
2066 if (item->prog == &dummy_bpf_prog.prog)
2069 WRITE_ONCE(item->prog, prog);
2077 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2078 struct bpf_prog *exclude_prog,
2079 struct bpf_prog *include_prog,
2080 struct bpf_prog_array **new_array)
2082 int new_prog_cnt, carry_prog_cnt = 0;
2083 struct bpf_prog_array_item *existing;
2084 struct bpf_prog_array *array;
2085 bool found_exclude = false;
2086 int new_prog_idx = 0;
2088 /* Figure out how many existing progs we need to carry over to
2092 existing = old_array->items;
2093 for (; existing->prog; existing++) {
2094 if (existing->prog == exclude_prog) {
2095 found_exclude = true;
2098 if (existing->prog != &dummy_bpf_prog.prog)
2100 if (existing->prog == include_prog)
2105 if (exclude_prog && !found_exclude)
2108 /* How many progs (not NULL) will be in the new array? */
2109 new_prog_cnt = carry_prog_cnt;
2113 /* Do we have any prog (not NULL) in the new array? */
2114 if (!new_prog_cnt) {
2119 /* +1 as the end of prog_array is marked with NULL */
2120 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2124 /* Fill in the new prog array */
2125 if (carry_prog_cnt) {
2126 existing = old_array->items;
2127 for (; existing->prog; existing++)
2128 if (existing->prog != exclude_prog &&
2129 existing->prog != &dummy_bpf_prog.prog) {
2130 array->items[new_prog_idx++].prog =
2135 array->items[new_prog_idx++].prog = include_prog;
2136 array->items[new_prog_idx].prog = NULL;
2141 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2142 u32 *prog_ids, u32 request_cnt,
2148 cnt = bpf_prog_array_length(array);
2152 /* return early if user requested only program count or nothing to copy */
2153 if (!request_cnt || !cnt)
2156 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2157 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2161 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2162 struct bpf_map **used_maps, u32 len)
2164 struct bpf_map *map;
2167 for (i = 0; i < len; i++) {
2169 if (map->ops->map_poke_untrack)
2170 map->ops->map_poke_untrack(map, aux);
2175 static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2177 __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2178 kfree(aux->used_maps);
2181 void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
2182 struct btf_mod_pair *used_btfs, u32 len)
2184 #ifdef CONFIG_BPF_SYSCALL
2185 struct btf_mod_pair *btf_mod;
2188 for (i = 0; i < len; i++) {
2189 btf_mod = &used_btfs[i];
2190 if (btf_mod->module)
2191 module_put(btf_mod->module);
2192 btf_put(btf_mod->btf);
2197 static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
2199 __bpf_free_used_btfs(aux, aux->used_btfs, aux->used_btf_cnt);
2200 kfree(aux->used_btfs);
2203 static void bpf_prog_free_deferred(struct work_struct *work)
2205 struct bpf_prog_aux *aux;
2208 aux = container_of(work, struct bpf_prog_aux, work);
2209 bpf_free_used_maps(aux);
2210 bpf_free_used_btfs(aux);
2211 if (bpf_prog_is_dev_bound(aux))
2212 bpf_prog_offload_destroy(aux->prog);
2213 #ifdef CONFIG_PERF_EVENTS
2214 if (aux->prog->has_callchain_buf)
2215 put_callchain_buffers();
2217 if (aux->dst_trampoline)
2218 bpf_trampoline_put(aux->dst_trampoline);
2219 for (i = 0; i < aux->func_cnt; i++)
2220 bpf_jit_free(aux->func[i]);
2221 if (aux->func_cnt) {
2223 bpf_prog_unlock_free(aux->prog);
2225 bpf_jit_free(aux->prog);
2229 /* Free internal BPF program */
2230 void bpf_prog_free(struct bpf_prog *fp)
2232 struct bpf_prog_aux *aux = fp->aux;
2235 bpf_prog_put(aux->dst_prog);
2236 INIT_WORK(&aux->work, bpf_prog_free_deferred);
2237 schedule_work(&aux->work);
2239 EXPORT_SYMBOL_GPL(bpf_prog_free);
2241 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
2242 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2244 void bpf_user_rnd_init_once(void)
2246 prandom_init_once(&bpf_user_rnd_state);
2249 BPF_CALL_0(bpf_user_rnd_u32)
2251 /* Should someone ever have the rather unwise idea to use some
2252 * of the registers passed into this function, then note that
2253 * this function is called from native eBPF and classic-to-eBPF
2254 * transformations. Register assignments from both sides are
2255 * different, f.e. classic always sets fn(ctx, A, X) here.
2257 struct rnd_state *state;
2260 state = &get_cpu_var(bpf_user_rnd_state);
2261 res = prandom_u32_state(state);
2262 put_cpu_var(bpf_user_rnd_state);
2267 BPF_CALL_0(bpf_get_raw_cpu_id)
2269 return raw_smp_processor_id();
2272 /* Weak definitions of helper functions in case we don't have bpf syscall. */
2273 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2274 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2275 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2276 const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2277 const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2278 const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2279 const struct bpf_func_proto bpf_spin_lock_proto __weak;
2280 const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2281 const struct bpf_func_proto bpf_jiffies64_proto __weak;
2283 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2284 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2285 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2286 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2287 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
2288 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
2290 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2291 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2292 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2293 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2294 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
2295 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2296 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
2297 const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
2298 const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
2300 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2306 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2307 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2311 EXPORT_SYMBOL_GPL(bpf_event_output);
2313 /* Always built-in helper functions. */
2314 const struct bpf_func_proto bpf_tail_call_proto = {
2317 .ret_type = RET_VOID,
2318 .arg1_type = ARG_PTR_TO_CTX,
2319 .arg2_type = ARG_CONST_MAP_PTR,
2320 .arg3_type = ARG_ANYTHING,
2323 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2324 * It is encouraged to implement bpf_int_jit_compile() instead, so that
2325 * eBPF and implicitly also cBPF can get JITed!
2327 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2332 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
2333 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2335 void __weak bpf_jit_compile(struct bpf_prog *prog)
2339 bool __weak bpf_helper_changes_pkt_data(void *func)
2344 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2345 * analysis code and wants explicit zero extension inserted by verifier.
2346 * Otherwise, return FALSE.
2348 * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if
2349 * you don't override this. JITs that don't want these extra insns can detect
2350 * them using insn_is_zext.
2352 bool __weak bpf_jit_needs_zext(void)
2357 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2358 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2360 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2366 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2367 void *addr1, void *addr2)
2372 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2373 EXPORT_SYMBOL(bpf_stats_enabled_key);
2375 /* All definitions of tracepoints related to BPF. */
2376 #define CREATE_TRACE_POINTS
2377 #include <linux/bpf_trace.h>
2379 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2380 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);