2 * Linux Socket Filter - Kernel level socket filtering
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
20 * Andi Kleen - Fix a few bad bugs and races.
21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
24 #include <uapi/linux/btf.h>
25 #include <linux/filter.h>
26 #include <linux/skbuff.h>
27 #include <linux/vmalloc.h>
28 #include <linux/random.h>
29 #include <linux/moduleloader.h>
30 #include <linux/bpf.h>
31 #include <linux/btf.h>
32 #include <linux/frame.h>
33 #include <linux/rbtree_latch.h>
34 #include <linux/kallsyms.h>
35 #include <linux/rcupdate.h>
36 #include <linux/perf_event.h>
38 #include <asm/unaligned.h>
41 #define BPF_R0 regs[BPF_REG_0]
42 #define BPF_R1 regs[BPF_REG_1]
43 #define BPF_R2 regs[BPF_REG_2]
44 #define BPF_R3 regs[BPF_REG_3]
45 #define BPF_R4 regs[BPF_REG_4]
46 #define BPF_R5 regs[BPF_REG_5]
47 #define BPF_R6 regs[BPF_REG_6]
48 #define BPF_R7 regs[BPF_REG_7]
49 #define BPF_R8 regs[BPF_REG_8]
50 #define BPF_R9 regs[BPF_REG_9]
51 #define BPF_R10 regs[BPF_REG_10]
54 #define DST regs[insn->dst_reg]
55 #define SRC regs[insn->src_reg]
56 #define FP regs[BPF_REG_FP]
57 #define AX regs[BPF_REG_AX]
58 #define ARG1 regs[BPF_REG_ARG1]
59 #define CTX regs[BPF_REG_CTX]
62 /* No hurry in this branch
64 * Exported for the bpf jit load helper.
66 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
71 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
72 else if (k >= SKF_LL_OFF)
73 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
75 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
81 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
83 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
84 struct bpf_prog_aux *aux;
87 size = round_up(size, PAGE_SIZE);
88 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
92 aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
98 fp->pages = size / PAGE_SIZE;
101 fp->jit_requested = ebpf_jit_enabled();
103 INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
107 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
109 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
111 if (!prog->aux->nr_linfo || !prog->jit_requested)
114 prog->aux->jited_linfo = kcalloc(prog->aux->nr_linfo,
115 sizeof(*prog->aux->jited_linfo),
116 GFP_KERNEL | __GFP_NOWARN);
117 if (!prog->aux->jited_linfo)
123 void bpf_prog_free_jited_linfo(struct bpf_prog *prog)
125 kfree(prog->aux->jited_linfo);
126 prog->aux->jited_linfo = NULL;
129 void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog)
131 if (prog->aux->jited_linfo && !prog->aux->jited_linfo[0])
132 bpf_prog_free_jited_linfo(prog);
135 /* The jit engine is responsible to provide an array
136 * for insn_off to the jited_off mapping (insn_to_jit_off).
138 * The idx to this array is the insn_off. Hence, the insn_off
139 * here is relative to the prog itself instead of the main prog.
140 * This array has one entry for each xlated bpf insn.
142 * jited_off is the byte off to the last byte of the jited insn.
146 * The first bpf insn off of the prog. The insn off
147 * here is relative to the main prog.
148 * e.g. if prog is a subprog, insn_start > 0
150 * The prog's idx to prog->aux->linfo and jited_linfo
152 * jited_linfo[linfo_idx] = prog->bpf_func
156 * jited_linfo[i] = prog->bpf_func +
157 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
159 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
160 const u32 *insn_to_jit_off)
162 u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
163 const struct bpf_line_info *linfo;
166 if (!prog->aux->jited_linfo)
167 /* Userspace did not provide linfo */
170 linfo_idx = prog->aux->linfo_idx;
171 linfo = &prog->aux->linfo[linfo_idx];
172 insn_start = linfo[0].insn_off;
173 insn_end = insn_start + prog->len;
175 jited_linfo = &prog->aux->jited_linfo[linfo_idx];
176 jited_linfo[0] = prog->bpf_func;
178 nr_linfo = prog->aux->nr_linfo - linfo_idx;
180 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
181 /* The verifier ensures that linfo[i].insn_off is
182 * strictly increasing
184 jited_linfo[i] = prog->bpf_func +
185 insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
188 void bpf_prog_free_linfo(struct bpf_prog *prog)
190 bpf_prog_free_jited_linfo(prog);
191 kvfree(prog->aux->linfo);
194 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
195 gfp_t gfp_extra_flags)
197 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
202 BUG_ON(fp_old == NULL);
204 size = round_up(size, PAGE_SIZE);
205 pages = size / PAGE_SIZE;
206 if (pages <= fp_old->pages)
209 delta = pages - fp_old->pages;
210 ret = __bpf_prog_charge(fp_old->aux->user, delta);
214 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
216 __bpf_prog_uncharge(fp_old->aux->user, delta);
218 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
222 /* We keep fp->aux from fp_old around in the new
223 * reallocated structure.
226 __bpf_prog_free(fp_old);
232 void __bpf_prog_free(struct bpf_prog *fp)
238 int bpf_prog_calc_tag(struct bpf_prog *fp)
240 const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
241 u32 raw_size = bpf_prog_tag_scratch_size(fp);
242 u32 digest[SHA_DIGEST_WORDS];
243 u32 ws[SHA_WORKSPACE_WORDS];
244 u32 i, bsize, psize, blocks;
245 struct bpf_insn *dst;
251 raw = vmalloc(raw_size);
256 memset(ws, 0, sizeof(ws));
258 /* We need to take out the map fd for the digest calculation
259 * since they are unstable from user space side.
262 for (i = 0, was_ld_map = false; i < fp->len; i++) {
263 dst[i] = fp->insnsi[i];
265 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
266 dst[i].src_reg == BPF_PSEUDO_MAP_FD) {
269 } else if (was_ld_map &&
271 dst[i].dst_reg == 0 &&
272 dst[i].src_reg == 0 &&
281 psize = bpf_prog_insn_size(fp);
282 memset(&raw[psize], 0, raw_size - psize);
285 bsize = round_up(psize, SHA_MESSAGE_BYTES);
286 blocks = bsize / SHA_MESSAGE_BYTES;
288 if (bsize - psize >= sizeof(__be64)) {
289 bits = (__be64 *)(todo + bsize - sizeof(__be64));
291 bits = (__be64 *)(todo + bsize + bits_offset);
294 *bits = cpu_to_be64((psize - 1) << 3);
297 sha_transform(digest, todo, ws);
298 todo += SHA_MESSAGE_BYTES;
301 result = (__force __be32 *)digest;
302 for (i = 0; i < SHA_DIGEST_WORDS; i++)
303 result[i] = cpu_to_be32(digest[i]);
304 memcpy(fp->tag, result, sizeof(fp->tag));
310 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
311 s32 end_new, u32 curr, const bool probe_pass)
313 const s64 imm_min = S32_MIN, imm_max = S32_MAX;
314 s32 delta = end_new - end_old;
317 if (curr < pos && curr + imm + 1 >= end_old)
319 else if (curr >= end_new && curr + imm + 1 < end_new)
321 if (imm < imm_min || imm > imm_max)
328 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
329 s32 end_new, u32 curr, const bool probe_pass)
331 const s32 off_min = S16_MIN, off_max = S16_MAX;
332 s32 delta = end_new - end_old;
335 if (curr < pos && curr + off + 1 >= end_old)
337 else if (curr >= end_new && curr + off + 1 < end_new)
339 if (off < off_min || off > off_max)
346 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
347 s32 end_new, const bool probe_pass)
349 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
350 struct bpf_insn *insn = prog->insnsi;
353 for (i = 0; i < insn_cnt; i++, insn++) {
356 /* In the probing pass we still operate on the original,
357 * unpatched image in order to check overflows before we
358 * do any other adjustments. Therefore skip the patchlet.
360 if (probe_pass && i == pos) {
362 insn = prog->insnsi + end_old;
365 if ((BPF_CLASS(code) != BPF_JMP &&
366 BPF_CLASS(code) != BPF_JMP32) ||
367 BPF_OP(code) == BPF_EXIT)
369 /* Adjust offset of jmps if we cross patch boundaries. */
370 if (BPF_OP(code) == BPF_CALL) {
371 if (insn->src_reg != BPF_PSEUDO_CALL)
373 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
374 end_new, i, probe_pass);
376 ret = bpf_adj_delta_to_off(insn, pos, end_old,
377 end_new, i, probe_pass);
386 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
388 struct bpf_line_info *linfo;
391 nr_linfo = prog->aux->nr_linfo;
392 if (!nr_linfo || !delta)
395 linfo = prog->aux->linfo;
397 for (i = 0; i < nr_linfo; i++)
398 if (off < linfo[i].insn_off)
401 /* Push all off < linfo[i].insn_off by delta */
402 for (; i < nr_linfo; i++)
403 linfo[i].insn_off += delta;
406 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
407 const struct bpf_insn *patch, u32 len)
409 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
410 const u32 cnt_max = S16_MAX;
411 struct bpf_prog *prog_adj;
413 /* Since our patchlet doesn't expand the image, we're done. */
414 if (insn_delta == 0) {
415 memcpy(prog->insnsi + off, patch, sizeof(*patch));
419 insn_adj_cnt = prog->len + insn_delta;
421 /* Reject anything that would potentially let the insn->off
422 * target overflow when we have excessive program expansions.
423 * We need to probe here before we do any reallocation where
424 * we afterwards may not fail anymore.
426 if (insn_adj_cnt > cnt_max &&
427 bpf_adj_branches(prog, off, off + 1, off + len, true))
430 /* Several new instructions need to be inserted. Make room
431 * for them. Likely, there's no need for a new allocation as
432 * last page could have large enough tailroom.
434 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
439 prog_adj->len = insn_adj_cnt;
441 /* Patching happens in 3 steps:
443 * 1) Move over tail of insnsi from next instruction onwards,
444 * so we can patch the single target insn with one or more
445 * new ones (patching is always from 1 to n insns, n > 0).
446 * 2) Inject new instructions at the target location.
447 * 3) Adjust branch offsets if necessary.
449 insn_rest = insn_adj_cnt - off - len;
451 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
452 sizeof(*patch) * insn_rest);
453 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
455 /* We are guaranteed to not fail at this point, otherwise
456 * the ship has sailed to reverse to the original state. An
457 * overflow cannot happen at this point.
459 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
461 bpf_adj_linfo(prog_adj, off, insn_delta);
466 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
468 /* Branch offsets can't overflow when program is shrinking, no need
469 * to call bpf_adj_branches(..., true) here
471 memmove(prog->insnsi + off, prog->insnsi + off + cnt,
472 sizeof(struct bpf_insn) * (prog->len - off - cnt));
475 return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
478 void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
482 for (i = 0; i < fp->aux->func_cnt; i++)
483 bpf_prog_kallsyms_del(fp->aux->func[i]);
486 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
488 bpf_prog_kallsyms_del_subprogs(fp);
489 bpf_prog_kallsyms_del(fp);
492 #ifdef CONFIG_BPF_JIT
493 /* All BPF JIT sysctl knobs here. */
494 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
495 int bpf_jit_harden __read_mostly;
496 int bpf_jit_kallsyms __read_mostly;
497 long bpf_jit_limit __read_mostly;
499 static __always_inline void
500 bpf_get_prog_addr_region(const struct bpf_prog *prog,
501 unsigned long *symbol_start,
502 unsigned long *symbol_end)
504 const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
505 unsigned long addr = (unsigned long)hdr;
507 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
509 *symbol_start = addr;
510 *symbol_end = addr + hdr->pages * PAGE_SIZE;
513 static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
515 const char *end = sym + KSYM_NAME_LEN;
516 const struct btf_type *type;
517 const char *func_name;
519 BUILD_BUG_ON(sizeof("bpf_prog_") +
520 sizeof(prog->tag) * 2 +
521 /* name has been null terminated.
522 * We should need +1 for the '_' preceding
523 * the name. However, the null character
524 * is double counted between the name and the
525 * sizeof("bpf_prog_") above, so we omit
528 sizeof(prog->aux->name) > KSYM_NAME_LEN);
530 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
531 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
533 /* prog->aux->name will be ignored if full btf name is available */
534 if (prog->aux->func_info_cnt) {
535 type = btf_type_by_id(prog->aux->btf,
536 prog->aux->func_info[prog->aux->func_idx].type_id);
537 func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
538 snprintf(sym, (size_t)(end - sym), "_%s", func_name);
542 if (prog->aux->name[0])
543 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
548 static __always_inline unsigned long
549 bpf_get_prog_addr_start(struct latch_tree_node *n)
551 unsigned long symbol_start, symbol_end;
552 const struct bpf_prog_aux *aux;
554 aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
555 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
560 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
561 struct latch_tree_node *b)
563 return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
566 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
568 unsigned long val = (unsigned long)key;
569 unsigned long symbol_start, symbol_end;
570 const struct bpf_prog_aux *aux;
572 aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
573 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
575 if (val < symbol_start)
577 if (val >= symbol_end)
583 static const struct latch_tree_ops bpf_tree_ops = {
584 .less = bpf_tree_less,
585 .comp = bpf_tree_comp,
588 static DEFINE_SPINLOCK(bpf_lock);
589 static LIST_HEAD(bpf_kallsyms);
590 static struct latch_tree_root bpf_tree __cacheline_aligned;
592 static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
594 WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
595 list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
596 latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
599 static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
601 if (list_empty(&aux->ksym_lnode))
604 latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
605 list_del_rcu(&aux->ksym_lnode);
608 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
610 return fp->jited && !bpf_prog_was_classic(fp);
613 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
615 return list_empty(&fp->aux->ksym_lnode) ||
616 fp->aux->ksym_lnode.prev == LIST_POISON2;
619 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
621 if (!bpf_prog_kallsyms_candidate(fp) ||
622 !capable(CAP_SYS_ADMIN))
625 spin_lock_bh(&bpf_lock);
626 bpf_prog_ksym_node_add(fp->aux);
627 spin_unlock_bh(&bpf_lock);
630 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
632 if (!bpf_prog_kallsyms_candidate(fp))
635 spin_lock_bh(&bpf_lock);
636 bpf_prog_ksym_node_del(fp->aux);
637 spin_unlock_bh(&bpf_lock);
640 static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
642 struct latch_tree_node *n;
644 if (!bpf_jit_kallsyms_enabled())
647 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
649 container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
653 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
654 unsigned long *off, char *sym)
656 unsigned long symbol_start, symbol_end;
657 struct bpf_prog *prog;
661 prog = bpf_prog_kallsyms_find(addr);
663 bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
664 bpf_get_prog_name(prog, sym);
668 *size = symbol_end - symbol_start;
670 *off = addr - symbol_start;
677 bool is_bpf_text_address(unsigned long addr)
682 ret = bpf_prog_kallsyms_find(addr) != NULL;
688 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
691 struct bpf_prog_aux *aux;
695 if (!bpf_jit_kallsyms_enabled())
699 list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
703 bpf_get_prog_name(aux->prog, sym);
705 *value = (unsigned long)aux->prog->bpf_func;
706 *type = BPF_SYM_ELF_TYPE;
716 static atomic_long_t bpf_jit_current;
718 /* Can be overridden by an arch's JIT compiler if it has a custom,
719 * dedicated BPF backend memory area, or if neither of the two
722 u64 __weak bpf_jit_alloc_exec_limit(void)
724 #if defined(MODULES_VADDR)
725 return MODULES_END - MODULES_VADDR;
727 return VMALLOC_END - VMALLOC_START;
731 static int __init bpf_jit_charge_init(void)
733 /* Only used as heuristic here to derive limit. */
734 bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
735 PAGE_SIZE), LONG_MAX);
738 pure_initcall(bpf_jit_charge_init);
740 static int bpf_jit_charge_modmem(u32 pages)
742 if (atomic_long_add_return(pages, &bpf_jit_current) >
743 (bpf_jit_limit >> PAGE_SHIFT)) {
744 if (!capable(CAP_SYS_ADMIN)) {
745 atomic_long_sub(pages, &bpf_jit_current);
753 static void bpf_jit_uncharge_modmem(u32 pages)
755 atomic_long_sub(pages, &bpf_jit_current);
758 void *__weak bpf_jit_alloc_exec(unsigned long size)
760 return module_alloc(size);
763 void __weak bpf_jit_free_exec(void *addr)
765 module_memfree(addr);
768 struct bpf_binary_header *
769 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
770 unsigned int alignment,
771 bpf_jit_fill_hole_t bpf_fill_ill_insns)
773 struct bpf_binary_header *hdr;
774 u32 size, hole, start, pages;
776 /* Most of BPF filters are really small, but if some of them
777 * fill a page, allow at least 128 extra bytes to insert a
778 * random section of illegal instructions.
780 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
781 pages = size / PAGE_SIZE;
783 if (bpf_jit_charge_modmem(pages))
785 hdr = bpf_jit_alloc_exec(size);
787 bpf_jit_uncharge_modmem(pages);
791 /* Fill space with illegal/arch-dep instructions. */
792 bpf_fill_ill_insns(hdr, size);
795 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
796 PAGE_SIZE - sizeof(*hdr));
797 start = (get_random_int() % hole) & ~(alignment - 1);
799 /* Leave a random number of instructions before BPF code. */
800 *image_ptr = &hdr->image[start];
805 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
807 u32 pages = hdr->pages;
809 bpf_jit_free_exec(hdr);
810 bpf_jit_uncharge_modmem(pages);
813 /* This symbol is only overridden by archs that have different
814 * requirements than the usual eBPF JITs, f.e. when they only
815 * implement cBPF JIT, do not set images read-only, etc.
817 void __weak bpf_jit_free(struct bpf_prog *fp)
820 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
822 bpf_jit_binary_unlock_ro(hdr);
823 bpf_jit_binary_free(hdr);
825 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
828 bpf_prog_unlock_free(fp);
831 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
832 const struct bpf_insn *insn, bool extra_pass,
833 u64 *func_addr, bool *func_addr_fixed)
839 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
840 if (!*func_addr_fixed) {
841 /* Place-holder address till the last pass has collected
842 * all addresses for JITed subprograms in which case we
843 * can pick them up from prog->aux.
847 else if (prog->aux->func &&
848 off >= 0 && off < prog->aux->func_cnt)
849 addr = (u8 *)prog->aux->func[off]->bpf_func;
853 /* Address of a BPF helper call. Since part of the core
854 * kernel, it's always at a fixed location. __bpf_call_base
855 * and the helper with imm relative to it are both in core
858 addr = (u8 *)__bpf_call_base + imm;
861 *func_addr = (unsigned long)addr;
865 static int bpf_jit_blind_insn(const struct bpf_insn *from,
866 const struct bpf_insn *aux,
867 struct bpf_insn *to_buff)
869 struct bpf_insn *to = to_buff;
870 u32 imm_rnd = get_random_int();
873 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
874 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
876 /* Constraints on AX register:
878 * AX register is inaccessible from user space. It is mapped in
879 * all JITs, and used here for constant blinding rewrites. It is
880 * typically "stateless" meaning its contents are only valid within
881 * the executed instruction, but not across several instructions.
882 * There are a few exceptions however which are further detailed
885 * Constant blinding is only used by JITs, not in the interpreter.
886 * The interpreter uses AX in some occasions as a local temporary
887 * register e.g. in DIV or MOD instructions.
889 * In restricted circumstances, the verifier can also use the AX
890 * register for rewrites as long as they do not interfere with
893 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
896 if (from->imm == 0 &&
897 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
898 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
899 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
903 switch (from->code) {
904 case BPF_ALU | BPF_ADD | BPF_K:
905 case BPF_ALU | BPF_SUB | BPF_K:
906 case BPF_ALU | BPF_AND | BPF_K:
907 case BPF_ALU | BPF_OR | BPF_K:
908 case BPF_ALU | BPF_XOR | BPF_K:
909 case BPF_ALU | BPF_MUL | BPF_K:
910 case BPF_ALU | BPF_MOV | BPF_K:
911 case BPF_ALU | BPF_DIV | BPF_K:
912 case BPF_ALU | BPF_MOD | BPF_K:
913 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
914 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
915 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
918 case BPF_ALU64 | BPF_ADD | BPF_K:
919 case BPF_ALU64 | BPF_SUB | BPF_K:
920 case BPF_ALU64 | BPF_AND | BPF_K:
921 case BPF_ALU64 | BPF_OR | BPF_K:
922 case BPF_ALU64 | BPF_XOR | BPF_K:
923 case BPF_ALU64 | BPF_MUL | BPF_K:
924 case BPF_ALU64 | BPF_MOV | BPF_K:
925 case BPF_ALU64 | BPF_DIV | BPF_K:
926 case BPF_ALU64 | BPF_MOD | BPF_K:
927 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
928 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
929 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
932 case BPF_JMP | BPF_JEQ | BPF_K:
933 case BPF_JMP | BPF_JNE | BPF_K:
934 case BPF_JMP | BPF_JGT | BPF_K:
935 case BPF_JMP | BPF_JLT | BPF_K:
936 case BPF_JMP | BPF_JGE | BPF_K:
937 case BPF_JMP | BPF_JLE | BPF_K:
938 case BPF_JMP | BPF_JSGT | BPF_K:
939 case BPF_JMP | BPF_JSLT | BPF_K:
940 case BPF_JMP | BPF_JSGE | BPF_K:
941 case BPF_JMP | BPF_JSLE | BPF_K:
942 case BPF_JMP | BPF_JSET | BPF_K:
943 /* Accommodate for extra offset in case of a backjump. */
947 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
948 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
949 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
952 case BPF_JMP32 | BPF_JEQ | BPF_K:
953 case BPF_JMP32 | BPF_JNE | BPF_K:
954 case BPF_JMP32 | BPF_JGT | BPF_K:
955 case BPF_JMP32 | BPF_JLT | BPF_K:
956 case BPF_JMP32 | BPF_JGE | BPF_K:
957 case BPF_JMP32 | BPF_JLE | BPF_K:
958 case BPF_JMP32 | BPF_JSGT | BPF_K:
959 case BPF_JMP32 | BPF_JSLT | BPF_K:
960 case BPF_JMP32 | BPF_JSGE | BPF_K:
961 case BPF_JMP32 | BPF_JSLE | BPF_K:
962 case BPF_JMP32 | BPF_JSET | BPF_K:
963 /* Accommodate for extra offset in case of a backjump. */
967 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
968 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
969 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
973 case BPF_LD | BPF_IMM | BPF_DW:
974 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
975 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
976 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
977 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
979 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
980 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
981 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
982 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
985 case BPF_ST | BPF_MEM | BPF_DW:
986 case BPF_ST | BPF_MEM | BPF_W:
987 case BPF_ST | BPF_MEM | BPF_H:
988 case BPF_ST | BPF_MEM | BPF_B:
989 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
990 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
991 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
998 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
999 gfp_t gfp_extra_flags)
1001 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1002 struct bpf_prog *fp;
1004 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
1006 /* aux->prog still points to the fp_other one, so
1007 * when promoting the clone to the real program,
1008 * this still needs to be adapted.
1010 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1016 static void bpf_prog_clone_free(struct bpf_prog *fp)
1018 /* aux was stolen by the other clone, so we cannot free
1019 * it from this path! It will be freed eventually by the
1020 * other program on release.
1022 * At this point, we don't need a deferred release since
1023 * clone is guaranteed to not be locked.
1026 __bpf_prog_free(fp);
1029 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1031 /* We have to repoint aux->prog to self, as we don't
1032 * know whether fp here is the clone or the original.
1035 bpf_prog_clone_free(fp_other);
1038 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1040 struct bpf_insn insn_buff[16], aux[2];
1041 struct bpf_prog *clone, *tmp;
1042 int insn_delta, insn_cnt;
1043 struct bpf_insn *insn;
1046 if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
1049 clone = bpf_prog_clone_create(prog, GFP_USER);
1051 return ERR_PTR(-ENOMEM);
1053 insn_cnt = clone->len;
1054 insn = clone->insnsi;
1056 for (i = 0; i < insn_cnt; i++, insn++) {
1057 /* We temporarily need to hold the original ld64 insn
1058 * so that we can still access the first part in the
1059 * second blinding run.
1061 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1063 memcpy(aux, insn, sizeof(aux));
1065 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);
1069 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1071 /* Patching may have repointed aux->prog during
1072 * realloc from the original one, so we need to
1073 * fix it up here on error.
1075 bpf_jit_prog_release_other(prog, clone);
1076 return ERR_PTR(-ENOMEM);
1080 insn_delta = rewritten - 1;
1082 /* Walk new program and skip insns we just inserted. */
1083 insn = clone->insnsi + i + insn_delta;
1084 insn_cnt += insn_delta;
1091 #endif /* CONFIG_BPF_JIT */
1093 /* Base function for offset calculation. Needs to go into .text section,
1094 * therefore keeping it non-static as well; will also be used by JITs
1095 * anyway later on, so do not let the compiler omit it. This also needs
1096 * to go into kallsyms for correlation from e.g. bpftool, so naming
1099 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1103 EXPORT_SYMBOL_GPL(__bpf_call_base);
1105 /* All UAPI available opcodes. */
1106 #define BPF_INSN_MAP(INSN_2, INSN_3) \
1107 /* 32 bit ALU operations. */ \
1108 /* Register based. */ \
1109 INSN_3(ALU, ADD, X), \
1110 INSN_3(ALU, SUB, X), \
1111 INSN_3(ALU, AND, X), \
1112 INSN_3(ALU, OR, X), \
1113 INSN_3(ALU, LSH, X), \
1114 INSN_3(ALU, RSH, X), \
1115 INSN_3(ALU, XOR, X), \
1116 INSN_3(ALU, MUL, X), \
1117 INSN_3(ALU, MOV, X), \
1118 INSN_3(ALU, ARSH, X), \
1119 INSN_3(ALU, DIV, X), \
1120 INSN_3(ALU, MOD, X), \
1122 INSN_3(ALU, END, TO_BE), \
1123 INSN_3(ALU, END, TO_LE), \
1124 /* Immediate based. */ \
1125 INSN_3(ALU, ADD, K), \
1126 INSN_3(ALU, SUB, K), \
1127 INSN_3(ALU, AND, K), \
1128 INSN_3(ALU, OR, K), \
1129 INSN_3(ALU, LSH, K), \
1130 INSN_3(ALU, RSH, K), \
1131 INSN_3(ALU, XOR, K), \
1132 INSN_3(ALU, MUL, K), \
1133 INSN_3(ALU, MOV, K), \
1134 INSN_3(ALU, ARSH, K), \
1135 INSN_3(ALU, DIV, K), \
1136 INSN_3(ALU, MOD, K), \
1137 /* 64 bit ALU operations. */ \
1138 /* Register based. */ \
1139 INSN_3(ALU64, ADD, X), \
1140 INSN_3(ALU64, SUB, X), \
1141 INSN_3(ALU64, AND, X), \
1142 INSN_3(ALU64, OR, X), \
1143 INSN_3(ALU64, LSH, X), \
1144 INSN_3(ALU64, RSH, X), \
1145 INSN_3(ALU64, XOR, X), \
1146 INSN_3(ALU64, MUL, X), \
1147 INSN_3(ALU64, MOV, X), \
1148 INSN_3(ALU64, ARSH, X), \
1149 INSN_3(ALU64, DIV, X), \
1150 INSN_3(ALU64, MOD, X), \
1151 INSN_2(ALU64, NEG), \
1152 /* Immediate based. */ \
1153 INSN_3(ALU64, ADD, K), \
1154 INSN_3(ALU64, SUB, K), \
1155 INSN_3(ALU64, AND, K), \
1156 INSN_3(ALU64, OR, K), \
1157 INSN_3(ALU64, LSH, K), \
1158 INSN_3(ALU64, RSH, K), \
1159 INSN_3(ALU64, XOR, K), \
1160 INSN_3(ALU64, MUL, K), \
1161 INSN_3(ALU64, MOV, K), \
1162 INSN_3(ALU64, ARSH, K), \
1163 INSN_3(ALU64, DIV, K), \
1164 INSN_3(ALU64, MOD, K), \
1165 /* Call instruction. */ \
1166 INSN_2(JMP, CALL), \
1167 /* Exit instruction. */ \
1168 INSN_2(JMP, EXIT), \
1169 /* 32-bit Jump instructions. */ \
1170 /* Register based. */ \
1171 INSN_3(JMP32, JEQ, X), \
1172 INSN_3(JMP32, JNE, X), \
1173 INSN_3(JMP32, JGT, X), \
1174 INSN_3(JMP32, JLT, X), \
1175 INSN_3(JMP32, JGE, X), \
1176 INSN_3(JMP32, JLE, X), \
1177 INSN_3(JMP32, JSGT, X), \
1178 INSN_3(JMP32, JSLT, X), \
1179 INSN_3(JMP32, JSGE, X), \
1180 INSN_3(JMP32, JSLE, X), \
1181 INSN_3(JMP32, JSET, X), \
1182 /* Immediate based. */ \
1183 INSN_3(JMP32, JEQ, K), \
1184 INSN_3(JMP32, JNE, K), \
1185 INSN_3(JMP32, JGT, K), \
1186 INSN_3(JMP32, JLT, K), \
1187 INSN_3(JMP32, JGE, K), \
1188 INSN_3(JMP32, JLE, K), \
1189 INSN_3(JMP32, JSGT, K), \
1190 INSN_3(JMP32, JSLT, K), \
1191 INSN_3(JMP32, JSGE, K), \
1192 INSN_3(JMP32, JSLE, K), \
1193 INSN_3(JMP32, JSET, K), \
1194 /* Jump instructions. */ \
1195 /* Register based. */ \
1196 INSN_3(JMP, JEQ, X), \
1197 INSN_3(JMP, JNE, X), \
1198 INSN_3(JMP, JGT, X), \
1199 INSN_3(JMP, JLT, X), \
1200 INSN_3(JMP, JGE, X), \
1201 INSN_3(JMP, JLE, X), \
1202 INSN_3(JMP, JSGT, X), \
1203 INSN_3(JMP, JSLT, X), \
1204 INSN_3(JMP, JSGE, X), \
1205 INSN_3(JMP, JSLE, X), \
1206 INSN_3(JMP, JSET, X), \
1207 /* Immediate based. */ \
1208 INSN_3(JMP, JEQ, K), \
1209 INSN_3(JMP, JNE, K), \
1210 INSN_3(JMP, JGT, K), \
1211 INSN_3(JMP, JLT, K), \
1212 INSN_3(JMP, JGE, K), \
1213 INSN_3(JMP, JLE, K), \
1214 INSN_3(JMP, JSGT, K), \
1215 INSN_3(JMP, JSLT, K), \
1216 INSN_3(JMP, JSGE, K), \
1217 INSN_3(JMP, JSLE, K), \
1218 INSN_3(JMP, JSET, K), \
1220 /* Store instructions. */ \
1221 /* Register based. */ \
1222 INSN_3(STX, MEM, B), \
1223 INSN_3(STX, MEM, H), \
1224 INSN_3(STX, MEM, W), \
1225 INSN_3(STX, MEM, DW), \
1226 INSN_3(STX, XADD, W), \
1227 INSN_3(STX, XADD, DW), \
1228 /* Immediate based. */ \
1229 INSN_3(ST, MEM, B), \
1230 INSN_3(ST, MEM, H), \
1231 INSN_3(ST, MEM, W), \
1232 INSN_3(ST, MEM, DW), \
1233 /* Load instructions. */ \
1234 /* Register based. */ \
1235 INSN_3(LDX, MEM, B), \
1236 INSN_3(LDX, MEM, H), \
1237 INSN_3(LDX, MEM, W), \
1238 INSN_3(LDX, MEM, DW), \
1239 /* Immediate based. */ \
1242 bool bpf_opcode_in_insntable(u8 code)
1244 #define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
1245 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1246 static const bool public_insntable[256] = {
1247 [0 ... 255] = false,
1248 /* Now overwrite non-defaults ... */
1249 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1250 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1251 [BPF_LD | BPF_ABS | BPF_B] = true,
1252 [BPF_LD | BPF_ABS | BPF_H] = true,
1253 [BPF_LD | BPF_ABS | BPF_W] = true,
1254 [BPF_LD | BPF_IND | BPF_B] = true,
1255 [BPF_LD | BPF_IND | BPF_H] = true,
1256 [BPF_LD | BPF_IND | BPF_W] = true,
1258 #undef BPF_INSN_3_TBL
1259 #undef BPF_INSN_2_TBL
1260 return public_insntable[code];
1263 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1265 * __bpf_prog_run - run eBPF program on a given context
1266 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1267 * @insn: is the array of eBPF instructions
1268 * @stack: is the eBPF storage stack
1270 * Decode and execute eBPF instructions.
1272 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
1274 #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
1275 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1276 static const void *jumptable[256] = {
1277 [0 ... 255] = &&default_label,
1278 /* Now overwrite non-defaults ... */
1279 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1280 /* Non-UAPI available opcodes. */
1281 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1282 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1284 #undef BPF_INSN_3_LBL
1285 #undef BPF_INSN_2_LBL
1286 u32 tail_call_cnt = 0;
1288 #define CONT ({ insn++; goto select_insn; })
1289 #define CONT_JMP ({ insn++; goto select_insn; })
1292 goto *jumptable[insn->code];
1295 #define ALU(OPCODE, OP) \
1296 ALU64_##OPCODE##_X: \
1300 DST = (u32) DST OP (u32) SRC; \
1302 ALU64_##OPCODE##_K: \
1306 DST = (u32) DST OP (u32) IMM; \
1337 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1341 DST = (u64) (u32) ((*(s32 *) &DST) >> SRC);
1344 DST = (u64) (u32) ((*(s32 *) &DST) >> IMM);
1347 (*(s64 *) &DST) >>= SRC;
1350 (*(s64 *) &DST) >>= IMM;
1353 div64_u64_rem(DST, SRC, &AX);
1358 DST = do_div(AX, (u32) SRC);
1361 div64_u64_rem(DST, IMM, &AX);
1366 DST = do_div(AX, (u32) IMM);
1369 DST = div64_u64(DST, SRC);
1373 do_div(AX, (u32) SRC);
1377 DST = div64_u64(DST, IMM);
1381 do_div(AX, (u32) IMM);
1387 DST = (__force u16) cpu_to_be16(DST);
1390 DST = (__force u32) cpu_to_be32(DST);
1393 DST = (__force u64) cpu_to_be64(DST);
1400 DST = (__force u16) cpu_to_le16(DST);
1403 DST = (__force u32) cpu_to_le32(DST);
1406 DST = (__force u64) cpu_to_le64(DST);
1413 /* Function call scratches BPF_R1-BPF_R5 registers,
1414 * preserves BPF_R6-BPF_R9, and stores return value
1417 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1422 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1425 insn + insn->off + 1);
1429 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1430 struct bpf_array *array = container_of(map, struct bpf_array, map);
1431 struct bpf_prog *prog;
1434 if (unlikely(index >= array->map.max_entries))
1436 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1441 prog = READ_ONCE(array->ptrs[index]);
1445 /* ARG1 at this point is guaranteed to point to CTX from
1446 * the verifier side due to the fact that the tail call is
1447 * handeled like a helper, that is, bpf_tail_call_proto,
1448 * where arg1_type is ARG_PTR_TO_CTX.
1450 insn = prog->insnsi;
1461 #define COND_JMP(SIGN, OPCODE, CMP_OP) \
1463 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \
1464 insn += insn->off; \
1468 JMP32_##OPCODE##_X: \
1469 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \
1470 insn += insn->off; \
1475 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \
1476 insn += insn->off; \
1480 JMP32_##OPCODE##_K: \
1481 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \
1482 insn += insn->off; \
1486 COND_JMP(u, JEQ, ==)
1487 COND_JMP(u, JNE, !=)
1490 COND_JMP(u, JGE, >=)
1491 COND_JMP(u, JLE, <=)
1492 COND_JMP(u, JSET, &)
1493 COND_JMP(s, JSGT, >)
1494 COND_JMP(s, JSLT, <)
1495 COND_JMP(s, JSGE, >=)
1496 COND_JMP(s, JSLE, <=)
1498 /* STX and ST and LDX*/
1499 #define LDST(SIZEOP, SIZE) \
1501 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
1504 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
1507 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
1515 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1516 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1519 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1520 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1525 /* If we ever reach this, we have a bug somewhere. Die hard here
1526 * instead of just returning 0; we could be somewhere in a subprog,
1527 * so execution could continue otherwise which we do /not/ want.
1529 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1531 pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
1535 STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
1537 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1538 #define DEFINE_BPF_PROG_RUN(stack_size) \
1539 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1541 u64 stack[stack_size / sizeof(u64)]; \
1542 u64 regs[MAX_BPF_EXT_REG]; \
1544 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1545 ARG1 = (u64) (unsigned long) ctx; \
1546 return ___bpf_prog_run(regs, insn, stack); \
1549 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1550 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1551 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1552 const struct bpf_insn *insn) \
1554 u64 stack[stack_size / sizeof(u64)]; \
1555 u64 regs[MAX_BPF_EXT_REG]; \
1557 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1563 return ___bpf_prog_run(regs, insn, stack); \
1566 #define EVAL1(FN, X) FN(X)
1567 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1568 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1569 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1570 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1571 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1573 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1574 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1575 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1577 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1578 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1579 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1581 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1583 static unsigned int (*interpreters[])(const void *ctx,
1584 const struct bpf_insn *insn) = {
1585 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1586 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1587 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1589 #undef PROG_NAME_LIST
1590 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1591 static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1592 const struct bpf_insn *insn) = {
1593 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1594 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1595 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1597 #undef PROG_NAME_LIST
1599 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1601 stack_depth = max_t(u32, stack_depth, 1);
1602 insn->off = (s16) insn->imm;
1603 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1604 __bpf_call_base_args;
1605 insn->code = BPF_JMP | BPF_CALL_ARGS;
1609 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1610 const struct bpf_insn *insn)
1612 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1613 * is not working properly, so warn about it!
1620 bool bpf_prog_array_compatible(struct bpf_array *array,
1621 const struct bpf_prog *fp)
1623 if (fp->kprobe_override)
1626 if (!array->owner_prog_type) {
1627 /* There's no owner yet where we could check for
1630 array->owner_prog_type = fp->type;
1631 array->owner_jited = fp->jited;
1636 return array->owner_prog_type == fp->type &&
1637 array->owner_jited == fp->jited;
1640 static int bpf_check_tail_call(const struct bpf_prog *fp)
1642 struct bpf_prog_aux *aux = fp->aux;
1645 for (i = 0; i < aux->used_map_cnt; i++) {
1646 struct bpf_map *map = aux->used_maps[i];
1647 struct bpf_array *array;
1649 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1652 array = container_of(map, struct bpf_array, map);
1653 if (!bpf_prog_array_compatible(array, fp))
1660 static void bpf_prog_select_func(struct bpf_prog *fp)
1662 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1663 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1665 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1667 fp->bpf_func = __bpf_prog_ret0_warn;
1672 * bpf_prog_select_runtime - select exec runtime for BPF program
1673 * @fp: bpf_prog populated with internal BPF program
1674 * @err: pointer to error variable
1676 * Try to JIT eBPF program, if JIT is not available, use interpreter.
1677 * The BPF program will be executed via BPF_PROG_RUN() macro.
1679 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1681 /* In case of BPF to BPF calls, verifier did all the prep
1682 * work with regards to JITing, etc.
1687 bpf_prog_select_func(fp);
1689 /* eBPF JITs can rewrite the program in case constant
1690 * blinding is active. However, in case of error during
1691 * blinding, bpf_int_jit_compile() must always return a
1692 * valid program, which in this case would simply not
1693 * be JITed, but falls back to the interpreter.
1695 if (!bpf_prog_is_dev_bound(fp->aux)) {
1696 *err = bpf_prog_alloc_jited_linfo(fp);
1700 fp = bpf_int_jit_compile(fp);
1702 bpf_prog_free_jited_linfo(fp);
1703 #ifdef CONFIG_BPF_JIT_ALWAYS_ON
1708 bpf_prog_free_unused_jited_linfo(fp);
1711 *err = bpf_prog_offload_compile(fp);
1717 bpf_prog_lock_ro(fp);
1719 /* The tail call compatibility check can only be done at
1720 * this late stage as we need to determine, if we deal
1721 * with JITed or non JITed program concatenations and not
1722 * all eBPF JITs might immediately support all features.
1724 *err = bpf_check_tail_call(fp);
1728 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1730 static unsigned int __bpf_prog_ret1(const void *ctx,
1731 const struct bpf_insn *insn)
1736 static struct bpf_prog_dummy {
1737 struct bpf_prog prog;
1738 } dummy_bpf_prog = {
1740 .bpf_func = __bpf_prog_ret1,
1744 /* to avoid allocating empty bpf_prog_array for cgroups that
1745 * don't have bpf program attached use one global 'empty_prog_array'
1746 * It will not be modified the caller of bpf_prog_array_alloc()
1747 * (since caller requested prog_cnt == 0)
1748 * that pointer should be 'freed' by bpf_prog_array_free()
1751 struct bpf_prog_array hdr;
1752 struct bpf_prog *null_prog;
1753 } empty_prog_array = {
1757 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1760 return kzalloc(sizeof(struct bpf_prog_array) +
1761 sizeof(struct bpf_prog_array_item) *
1765 return &empty_prog_array.hdr;
1768 void bpf_prog_array_free(struct bpf_prog_array __rcu *progs)
1771 progs == (struct bpf_prog_array __rcu *)&empty_prog_array.hdr)
1773 kfree_rcu(progs, rcu);
1776 int bpf_prog_array_length(struct bpf_prog_array __rcu *array)
1778 struct bpf_prog_array_item *item;
1782 item = rcu_dereference(array)->items;
1783 for (; item->prog; item++)
1784 if (item->prog != &dummy_bpf_prog.prog)
1791 static bool bpf_prog_array_copy_core(struct bpf_prog_array __rcu *array,
1795 struct bpf_prog_array_item *item;
1798 item = rcu_dereference_check(array, 1)->items;
1799 for (; item->prog; item++) {
1800 if (item->prog == &dummy_bpf_prog.prog)
1802 prog_ids[i] = item->prog->aux->id;
1803 if (++i == request_cnt) {
1809 return !!(item->prog);
1812 int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *array,
1813 __u32 __user *prog_ids, u32 cnt)
1815 unsigned long err = 0;
1819 /* users of this function are doing:
1820 * cnt = bpf_prog_array_length();
1822 * bpf_prog_array_copy_to_user(..., cnt);
1823 * so below kcalloc doesn't need extra cnt > 0 check, but
1824 * bpf_prog_array_length() releases rcu lock and
1825 * prog array could have been swapped with empty or larger array,
1826 * so always copy 'cnt' prog_ids to the user.
1827 * In a rare race the user will see zero prog_ids
1829 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
1833 nospc = bpf_prog_array_copy_core(array, ids, cnt);
1835 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
1844 void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *array,
1845 struct bpf_prog *old_prog)
1847 struct bpf_prog_array_item *item = array->items;
1849 for (; item->prog; item++)
1850 if (item->prog == old_prog) {
1851 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
1856 int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
1857 struct bpf_prog *exclude_prog,
1858 struct bpf_prog *include_prog,
1859 struct bpf_prog_array **new_array)
1861 int new_prog_cnt, carry_prog_cnt = 0;
1862 struct bpf_prog_array_item *existing;
1863 struct bpf_prog_array *array;
1864 bool found_exclude = false;
1865 int new_prog_idx = 0;
1867 /* Figure out how many existing progs we need to carry over to
1871 existing = old_array->items;
1872 for (; existing->prog; existing++) {
1873 if (existing->prog == exclude_prog) {
1874 found_exclude = true;
1877 if (existing->prog != &dummy_bpf_prog.prog)
1879 if (existing->prog == include_prog)
1884 if (exclude_prog && !found_exclude)
1887 /* How many progs (not NULL) will be in the new array? */
1888 new_prog_cnt = carry_prog_cnt;
1892 /* Do we have any prog (not NULL) in the new array? */
1893 if (!new_prog_cnt) {
1898 /* +1 as the end of prog_array is marked with NULL */
1899 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
1903 /* Fill in the new prog array */
1904 if (carry_prog_cnt) {
1905 existing = old_array->items;
1906 for (; existing->prog; existing++)
1907 if (existing->prog != exclude_prog &&
1908 existing->prog != &dummy_bpf_prog.prog) {
1909 array->items[new_prog_idx++].prog =
1914 array->items[new_prog_idx++].prog = include_prog;
1915 array->items[new_prog_idx].prog = NULL;
1920 int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
1921 u32 *prog_ids, u32 request_cnt,
1927 cnt = bpf_prog_array_length(array);
1931 /* return early if user requested only program count or nothing to copy */
1932 if (!request_cnt || !cnt)
1935 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
1936 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
1940 static void bpf_prog_free_deferred(struct work_struct *work)
1942 struct bpf_prog_aux *aux;
1945 aux = container_of(work, struct bpf_prog_aux, work);
1946 if (bpf_prog_is_dev_bound(aux))
1947 bpf_prog_offload_destroy(aux->prog);
1948 #ifdef CONFIG_PERF_EVENTS
1949 if (aux->prog->has_callchain_buf)
1950 put_callchain_buffers();
1952 for (i = 0; i < aux->func_cnt; i++)
1953 bpf_jit_free(aux->func[i]);
1954 if (aux->func_cnt) {
1956 bpf_prog_unlock_free(aux->prog);
1958 bpf_jit_free(aux->prog);
1962 /* Free internal BPF program */
1963 void bpf_prog_free(struct bpf_prog *fp)
1965 struct bpf_prog_aux *aux = fp->aux;
1967 INIT_WORK(&aux->work, bpf_prog_free_deferred);
1968 schedule_work(&aux->work);
1970 EXPORT_SYMBOL_GPL(bpf_prog_free);
1972 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
1973 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
1975 void bpf_user_rnd_init_once(void)
1977 prandom_init_once(&bpf_user_rnd_state);
1980 BPF_CALL_0(bpf_user_rnd_u32)
1982 /* Should someone ever have the rather unwise idea to use some
1983 * of the registers passed into this function, then note that
1984 * this function is called from native eBPF and classic-to-eBPF
1985 * transformations. Register assignments from both sides are
1986 * different, f.e. classic always sets fn(ctx, A, X) here.
1988 struct rnd_state *state;
1991 state = &get_cpu_var(bpf_user_rnd_state);
1992 res = prandom_u32_state(state);
1993 put_cpu_var(bpf_user_rnd_state);
1998 /* Weak definitions of helper functions in case we don't have bpf syscall. */
1999 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2000 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2001 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2002 const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2003 const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2004 const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2005 const struct bpf_func_proto bpf_spin_lock_proto __weak;
2006 const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2008 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2009 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2010 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2011 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2013 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2014 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2015 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2016 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2017 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2019 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2025 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2026 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2030 EXPORT_SYMBOL_GPL(bpf_event_output);
2032 /* Always built-in helper functions. */
2033 const struct bpf_func_proto bpf_tail_call_proto = {
2036 .ret_type = RET_VOID,
2037 .arg1_type = ARG_PTR_TO_CTX,
2038 .arg2_type = ARG_CONST_MAP_PTR,
2039 .arg3_type = ARG_ANYTHING,
2042 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2043 * It is encouraged to implement bpf_int_jit_compile() instead, so that
2044 * eBPF and implicitly also cBPF can get JITed!
2046 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2051 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
2052 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2054 void __weak bpf_jit_compile(struct bpf_prog *prog)
2058 bool __weak bpf_helper_changes_pkt_data(void *func)
2063 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2064 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2066 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2072 /* All definitions of tracepoints related to BPF. */
2073 #define CREATE_TRACE_POINTS
2074 #include <linux/bpf_trace.h>
2076 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);