1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2019 Facebook */
3 #include <linux/hash.h>
5 #include <linux/filter.h>
6 #include <linux/ftrace.h>
7 #include <linux/rbtree_latch.h>
8 #include <linux/perf_event.h>
10 #include <linux/rcupdate_trace.h>
11 #include <linux/rcupdate_wait.h>
12 #include <linux/module.h>
13 #include <linux/static_call.h>
14 #include <linux/bpf_verifier.h>
15 #include <linux/bpf_lsm.h>
16 #include <linux/delay.h>
18 /* dummy _ops. The verifier will operate on target program's ops. */
19 const struct bpf_verifier_ops bpf_extension_verifier_ops = {
21 const struct bpf_prog_ops bpf_extension_prog_ops = {
24 /* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */
25 #define TRAMPOLINE_HASH_BITS 10
26 #define TRAMPOLINE_TABLE_SIZE (1 << TRAMPOLINE_HASH_BITS)
28 static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE];
30 /* serializes access to trampoline_table */
31 static DEFINE_MUTEX(trampoline_mutex);
33 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
34 static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mutex);
36 static int bpf_tramp_ftrace_ops_func(struct ftrace_ops *ops, enum ftrace_ops_cmd cmd)
38 struct bpf_trampoline *tr = ops->private;
41 if (cmd == FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF) {
42 /* This is called inside register_ftrace_direct_multi(), so
43 * tr->mutex is already locked.
45 lockdep_assert_held_once(&tr->mutex);
47 /* Instead of updating the trampoline here, we propagate
48 * -EAGAIN to register_ftrace_direct_multi(). Then we can
49 * retry register_ftrace_direct_multi() after updating the
52 if ((tr->flags & BPF_TRAMP_F_CALL_ORIG) &&
53 !(tr->flags & BPF_TRAMP_F_ORIG_STACK)) {
54 if (WARN_ON_ONCE(tr->flags & BPF_TRAMP_F_SHARE_IPMODIFY))
57 tr->flags |= BPF_TRAMP_F_SHARE_IPMODIFY;
64 /* The normal locking order is
65 * tr->mutex => direct_mutex (ftrace.c) => ftrace_lock (ftrace.c)
67 * The following two commands are called from
69 * prepare_direct_functions_for_ipmodify
70 * cleanup_direct_functions_after_ipmodify
72 * In both cases, direct_mutex is already locked. Use
73 * mutex_trylock(&tr->mutex) to avoid deadlock in race condition
74 * (something else is making changes to this same trampoline).
76 if (!mutex_trylock(&tr->mutex)) {
77 /* sleep 1 ms to make sure whatever holding tr->mutex makes
85 case FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER:
86 tr->flags |= BPF_TRAMP_F_SHARE_IPMODIFY;
88 if ((tr->flags & BPF_TRAMP_F_CALL_ORIG) &&
89 !(tr->flags & BPF_TRAMP_F_ORIG_STACK))
90 ret = bpf_trampoline_update(tr, false /* lock_direct_mutex */);
92 case FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER:
93 tr->flags &= ~BPF_TRAMP_F_SHARE_IPMODIFY;
95 if (tr->flags & BPF_TRAMP_F_ORIG_STACK)
96 ret = bpf_trampoline_update(tr, false /* lock_direct_mutex */);
103 mutex_unlock(&tr->mutex);
108 bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
110 enum bpf_attach_type eatype = prog->expected_attach_type;
111 enum bpf_prog_type ptype = prog->type;
113 return (ptype == BPF_PROG_TYPE_TRACING &&
114 (eatype == BPF_TRACE_FENTRY || eatype == BPF_TRACE_FEXIT ||
115 eatype == BPF_MODIFY_RETURN)) ||
116 (ptype == BPF_PROG_TYPE_LSM && eatype == BPF_LSM_MAC);
119 void *bpf_jit_alloc_exec_page(void)
123 image = bpf_jit_alloc_exec(PAGE_SIZE);
127 set_vm_flush_reset_perms(image);
128 /* Keep image as writeable. The alternative is to keep flipping ro/rw
129 * every time new program is attached or detached.
131 set_memory_x((long)image, 1);
135 void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym)
137 ksym->start = (unsigned long) data;
138 ksym->end = ksym->start + PAGE_SIZE;
140 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
141 PAGE_SIZE, false, ksym->name);
144 void bpf_image_ksym_del(struct bpf_ksym *ksym)
147 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
148 PAGE_SIZE, true, ksym->name);
151 static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
153 struct bpf_trampoline *tr;
154 struct hlist_head *head;
157 mutex_lock(&trampoline_mutex);
158 head = &trampoline_table[hash_64(key, TRAMPOLINE_HASH_BITS)];
159 hlist_for_each_entry(tr, head, hlist) {
160 if (tr->key == key) {
161 refcount_inc(&tr->refcnt);
165 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
168 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
169 tr->fops = kzalloc(sizeof(struct ftrace_ops), GFP_KERNEL);
175 tr->fops->private = tr;
176 tr->fops->ops_func = bpf_tramp_ftrace_ops_func;
180 INIT_HLIST_NODE(&tr->hlist);
181 hlist_add_head(&tr->hlist, head);
182 refcount_set(&tr->refcnt, 1);
183 mutex_init(&tr->mutex);
184 for (i = 0; i < BPF_TRAMP_MAX; i++)
185 INIT_HLIST_HEAD(&tr->progs_hlist[i]);
187 mutex_unlock(&trampoline_mutex);
191 static int bpf_trampoline_module_get(struct bpf_trampoline *tr)
197 mod = __module_text_address((unsigned long) tr->func.addr);
198 if (mod && !try_module_get(mod))
205 static void bpf_trampoline_module_put(struct bpf_trampoline *tr)
211 static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
213 void *ip = tr->func.addr;
216 if (tr->func.ftrace_managed)
217 ret = unregister_ftrace_direct_multi(tr->fops, (long)old_addr);
219 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
222 bpf_trampoline_module_put(tr);
226 static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_addr,
227 bool lock_direct_mutex)
229 void *ip = tr->func.addr;
232 if (tr->func.ftrace_managed) {
233 if (lock_direct_mutex)
234 ret = modify_ftrace_direct_multi(tr->fops, (long)new_addr);
236 ret = modify_ftrace_direct_multi_nolock(tr->fops, (long)new_addr);
238 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr);
243 /* first time registering */
244 static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
246 void *ip = tr->func.addr;
250 faddr = ftrace_location((unsigned long)ip);
254 tr->func.ftrace_managed = true;
257 if (bpf_trampoline_module_get(tr))
260 if (tr->func.ftrace_managed) {
261 ftrace_set_filter_ip(tr->fops, (unsigned long)ip, 0, 1);
262 ret = register_ftrace_direct_multi(tr->fops, (long)new_addr);
264 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
268 bpf_trampoline_module_put(tr);
272 static struct bpf_tramp_links *
273 bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total, bool *ip_arg)
275 struct bpf_tramp_link *link;
276 struct bpf_tramp_links *tlinks;
277 struct bpf_tramp_link **links;
281 tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
283 return ERR_PTR(-ENOMEM);
285 for (kind = 0; kind < BPF_TRAMP_MAX; kind++) {
286 tlinks[kind].nr_links = tr->progs_cnt[kind];
287 *total += tr->progs_cnt[kind];
288 links = tlinks[kind].links;
290 hlist_for_each_entry(link, &tr->progs_hlist[kind], tramp_hlist) {
291 *ip_arg |= link->link.prog->call_get_func_ip;
298 static void __bpf_tramp_image_put_deferred(struct work_struct *work)
300 struct bpf_tramp_image *im;
302 im = container_of(work, struct bpf_tramp_image, work);
303 bpf_image_ksym_del(&im->ksym);
304 bpf_jit_free_exec(im->image);
305 bpf_jit_uncharge_modmem(PAGE_SIZE);
306 percpu_ref_exit(&im->pcref);
310 /* callback, fexit step 3 or fentry step 2 */
311 static void __bpf_tramp_image_put_rcu(struct rcu_head *rcu)
313 struct bpf_tramp_image *im;
315 im = container_of(rcu, struct bpf_tramp_image, rcu);
316 INIT_WORK(&im->work, __bpf_tramp_image_put_deferred);
317 schedule_work(&im->work);
320 /* callback, fexit step 2. Called after percpu_ref_kill confirms. */
321 static void __bpf_tramp_image_release(struct percpu_ref *pcref)
323 struct bpf_tramp_image *im;
325 im = container_of(pcref, struct bpf_tramp_image, pcref);
326 call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
329 /* callback, fexit or fentry step 1 */
330 static void __bpf_tramp_image_put_rcu_tasks(struct rcu_head *rcu)
332 struct bpf_tramp_image *im;
334 im = container_of(rcu, struct bpf_tramp_image, rcu);
335 if (im->ip_after_call)
336 /* the case of fmod_ret/fexit trampoline and CONFIG_PREEMPTION=y */
337 percpu_ref_kill(&im->pcref);
339 /* the case of fentry trampoline */
340 call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
343 static void bpf_tramp_image_put(struct bpf_tramp_image *im)
345 /* The trampoline image that calls original function is using:
346 * rcu_read_lock_trace to protect sleepable bpf progs
347 * rcu_read_lock to protect normal bpf progs
348 * percpu_ref to protect trampoline itself
349 * rcu tasks to protect trampoline asm not covered by percpu_ref
350 * (which are few asm insns before __bpf_tramp_enter and
351 * after __bpf_tramp_exit)
353 * The trampoline is unreachable before bpf_tramp_image_put().
355 * First, patch the trampoline to avoid calling into fexit progs.
356 * The progs will be freed even if the original function is still
357 * executing or sleeping.
358 * In case of CONFIG_PREEMPT=y use call_rcu_tasks() to wait on
359 * first few asm instructions to execute and call into
360 * __bpf_tramp_enter->percpu_ref_get.
361 * Then use percpu_ref_kill to wait for the trampoline and the original
362 * function to finish.
363 * Then use call_rcu_tasks() to make sure few asm insns in
364 * the trampoline epilogue are done as well.
366 * In !PREEMPT case the task that got interrupted in the first asm
367 * insns won't go through an RCU quiescent state which the
368 * percpu_ref_kill will be waiting for. Hence the first
369 * call_rcu_tasks() is not necessary.
371 if (im->ip_after_call) {
372 int err = bpf_arch_text_poke(im->ip_after_call, BPF_MOD_JUMP,
373 NULL, im->ip_epilogue);
375 if (IS_ENABLED(CONFIG_PREEMPTION))
376 call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
378 percpu_ref_kill(&im->pcref);
382 /* The trampoline without fexit and fmod_ret progs doesn't call original
383 * function and doesn't use percpu_ref.
384 * Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
385 * Then use call_rcu_tasks() to wait for the rest of trampoline asm
388 call_rcu_tasks_trace(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
391 static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx)
393 struct bpf_tramp_image *im;
394 struct bpf_ksym *ksym;
398 im = kzalloc(sizeof(*im), GFP_KERNEL);
402 err = bpf_jit_charge_modmem(PAGE_SIZE);
407 im->image = image = bpf_jit_alloc_exec_page();
411 err = percpu_ref_init(&im->pcref, __bpf_tramp_image_release, 0, GFP_KERNEL);
416 INIT_LIST_HEAD_RCU(&ksym->lnode);
417 snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu_%u", key, idx);
418 bpf_image_ksym_add(image, ksym);
422 bpf_jit_free_exec(im->image);
424 bpf_jit_uncharge_modmem(PAGE_SIZE);
431 static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mutex)
433 struct bpf_tramp_image *im;
434 struct bpf_tramp_links *tlinks;
435 u32 orig_flags = tr->flags;
439 tlinks = bpf_trampoline_get_progs(tr, &total, &ip_arg);
441 return PTR_ERR(tlinks);
444 err = unregister_fentry(tr, tr->cur_image->image);
445 bpf_tramp_image_put(tr->cur_image);
446 tr->cur_image = NULL;
451 im = bpf_tramp_image_alloc(tr->key, tr->selector);
457 /* clear all bits except SHARE_IPMODIFY */
458 tr->flags &= BPF_TRAMP_F_SHARE_IPMODIFY;
460 if (tlinks[BPF_TRAMP_FEXIT].nr_links ||
461 tlinks[BPF_TRAMP_MODIFY_RETURN].nr_links) {
462 /* NOTE: BPF_TRAMP_F_RESTORE_REGS and BPF_TRAMP_F_SKIP_FRAME
463 * should not be set together.
465 tr->flags |= BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME;
467 tr->flags |= BPF_TRAMP_F_RESTORE_REGS;
471 tr->flags |= BPF_TRAMP_F_IP_ARG;
473 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
475 if ((tr->flags & BPF_TRAMP_F_SHARE_IPMODIFY) &&
476 (tr->flags & BPF_TRAMP_F_CALL_ORIG))
477 tr->flags |= BPF_TRAMP_F_ORIG_STACK;
480 err = arch_prepare_bpf_trampoline(im, im->image, im->image + PAGE_SIZE,
481 &tr->func.model, tr->flags, tlinks,
486 WARN_ON(tr->cur_image && tr->selector == 0);
487 WARN_ON(!tr->cur_image && tr->selector);
489 /* progs already running at this address */
490 err = modify_fentry(tr, tr->cur_image->image, im->image, lock_direct_mutex);
492 /* first time registering */
493 err = register_fentry(tr, im->image);
495 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
496 if (err == -EAGAIN) {
497 /* -EAGAIN from bpf_tramp_ftrace_ops_func. Now
498 * BPF_TRAMP_F_SHARE_IPMODIFY is set, we can generate the
499 * trampoline again, and retry register.
501 /* reset fops->func and fops->trampoline for re-register */
502 tr->fops->func = NULL;
503 tr->fops->trampoline = 0;
511 bpf_tramp_image_put(tr->cur_image);
515 /* If any error happens, restore previous flags */
517 tr->flags = orig_flags;
522 static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog)
524 switch (prog->expected_attach_type) {
525 case BPF_TRACE_FENTRY:
526 return BPF_TRAMP_FENTRY;
527 case BPF_MODIFY_RETURN:
528 return BPF_TRAMP_MODIFY_RETURN;
529 case BPF_TRACE_FEXIT:
530 return BPF_TRAMP_FEXIT;
532 if (!prog->aux->attach_func_proto->type)
533 /* The function returns void, we cannot modify its
536 return BPF_TRAMP_FEXIT;
538 return BPF_TRAMP_MODIFY_RETURN;
540 return BPF_TRAMP_REPLACE;
544 static int __bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
546 enum bpf_tramp_prog_type kind;
547 struct bpf_tramp_link *link_exiting;
551 kind = bpf_attach_type_to_tramp(link->link.prog);
552 if (tr->extension_prog)
553 /* cannot attach fentry/fexit if extension prog is attached.
554 * cannot overwrite extension prog either.
558 for (i = 0; i < BPF_TRAMP_MAX; i++)
559 cnt += tr->progs_cnt[i];
561 if (kind == BPF_TRAMP_REPLACE) {
562 /* Cannot attach extension if fentry/fexit are in use. */
565 tr->extension_prog = link->link.prog;
566 return bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL,
567 link->link.prog->bpf_func);
569 if (cnt >= BPF_MAX_TRAMP_LINKS)
571 if (!hlist_unhashed(&link->tramp_hlist))
572 /* prog already linked */
574 hlist_for_each_entry(link_exiting, &tr->progs_hlist[kind], tramp_hlist) {
575 if (link_exiting->link.prog != link->link.prog)
577 /* prog already linked */
581 hlist_add_head(&link->tramp_hlist, &tr->progs_hlist[kind]);
582 tr->progs_cnt[kind]++;
583 err = bpf_trampoline_update(tr, true /* lock_direct_mutex */);
585 hlist_del_init(&link->tramp_hlist);
586 tr->progs_cnt[kind]--;
591 int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
595 mutex_lock(&tr->mutex);
596 err = __bpf_trampoline_link_prog(link, tr);
597 mutex_unlock(&tr->mutex);
601 static int __bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
603 enum bpf_tramp_prog_type kind;
606 kind = bpf_attach_type_to_tramp(link->link.prog);
607 if (kind == BPF_TRAMP_REPLACE) {
608 WARN_ON_ONCE(!tr->extension_prog);
609 err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP,
610 tr->extension_prog->bpf_func, NULL);
611 tr->extension_prog = NULL;
614 hlist_del_init(&link->tramp_hlist);
615 tr->progs_cnt[kind]--;
616 return bpf_trampoline_update(tr, true /* lock_direct_mutex */);
619 /* bpf_trampoline_unlink_prog() should never fail. */
620 int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
624 mutex_lock(&tr->mutex);
625 err = __bpf_trampoline_unlink_prog(link, tr);
626 mutex_unlock(&tr->mutex);
630 #if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM)
631 static void bpf_shim_tramp_link_release(struct bpf_link *link)
633 struct bpf_shim_tramp_link *shim_link =
634 container_of(link, struct bpf_shim_tramp_link, link.link);
636 /* paired with 'shim_link->trampoline = tr' in bpf_trampoline_link_cgroup_shim */
637 if (!shim_link->trampoline)
640 WARN_ON_ONCE(bpf_trampoline_unlink_prog(&shim_link->link, shim_link->trampoline));
641 bpf_trampoline_put(shim_link->trampoline);
644 static void bpf_shim_tramp_link_dealloc(struct bpf_link *link)
646 struct bpf_shim_tramp_link *shim_link =
647 container_of(link, struct bpf_shim_tramp_link, link.link);
652 static const struct bpf_link_ops bpf_shim_tramp_link_lops = {
653 .release = bpf_shim_tramp_link_release,
654 .dealloc = bpf_shim_tramp_link_dealloc,
657 static struct bpf_shim_tramp_link *cgroup_shim_alloc(const struct bpf_prog *prog,
661 struct bpf_shim_tramp_link *shim_link = NULL;
664 shim_link = kzalloc(sizeof(*shim_link), GFP_USER);
668 p = bpf_prog_alloc(1, 0);
675 p->bpf_func = bpf_func;
677 p->aux->cgroup_atype = cgroup_atype;
678 p->aux->attach_func_proto = prog->aux->attach_func_proto;
679 p->aux->attach_btf_id = prog->aux->attach_btf_id;
680 p->aux->attach_btf = prog->aux->attach_btf;
681 btf_get(p->aux->attach_btf);
682 p->type = BPF_PROG_TYPE_LSM;
683 p->expected_attach_type = BPF_LSM_MAC;
685 bpf_link_init(&shim_link->link.link, BPF_LINK_TYPE_UNSPEC,
686 &bpf_shim_tramp_link_lops, p);
687 bpf_cgroup_atype_get(p->aux->attach_btf_id, cgroup_atype);
692 static struct bpf_shim_tramp_link *cgroup_shim_find(struct bpf_trampoline *tr,
695 struct bpf_tramp_link *link;
698 for (kind = 0; kind < BPF_TRAMP_MAX; kind++) {
699 hlist_for_each_entry(link, &tr->progs_hlist[kind], tramp_hlist) {
700 struct bpf_prog *p = link->link.prog;
702 if (p->bpf_func == bpf_func)
703 return container_of(link, struct bpf_shim_tramp_link, link);
710 int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
713 struct bpf_shim_tramp_link *shim_link = NULL;
714 struct bpf_attach_target_info tgt_info = {};
715 struct bpf_trampoline *tr;
720 err = bpf_check_attach_target(NULL, prog, NULL,
721 prog->aux->attach_btf_id,
726 key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf,
727 prog->aux->attach_btf_id);
729 bpf_lsm_find_cgroup_shim(prog, &bpf_func);
730 tr = bpf_trampoline_get(key, &tgt_info);
734 mutex_lock(&tr->mutex);
736 shim_link = cgroup_shim_find(tr, bpf_func);
738 /* Reusing existing shim attached by the other program. */
739 bpf_link_inc(&shim_link->link.link);
741 mutex_unlock(&tr->mutex);
742 bpf_trampoline_put(tr); /* bpf_trampoline_get above */
746 /* Allocate and install new shim. */
748 shim_link = cgroup_shim_alloc(prog, bpf_func, cgroup_atype);
754 err = __bpf_trampoline_link_prog(&shim_link->link, tr);
758 shim_link->trampoline = tr;
759 /* note, we're still holding tr refcnt from above */
761 mutex_unlock(&tr->mutex);
765 mutex_unlock(&tr->mutex);
768 bpf_link_put(&shim_link->link.link);
770 /* have to release tr while _not_ holding its mutex */
771 bpf_trampoline_put(tr); /* bpf_trampoline_get above */
776 void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog)
778 struct bpf_shim_tramp_link *shim_link = NULL;
779 struct bpf_trampoline *tr;
783 key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf,
784 prog->aux->attach_btf_id);
786 bpf_lsm_find_cgroup_shim(prog, &bpf_func);
787 tr = bpf_trampoline_lookup(key);
788 if (WARN_ON_ONCE(!tr))
791 mutex_lock(&tr->mutex);
792 shim_link = cgroup_shim_find(tr, bpf_func);
793 mutex_unlock(&tr->mutex);
796 bpf_link_put(&shim_link->link.link);
798 bpf_trampoline_put(tr); /* bpf_trampoline_lookup above */
802 struct bpf_trampoline *bpf_trampoline_get(u64 key,
803 struct bpf_attach_target_info *tgt_info)
805 struct bpf_trampoline *tr;
807 tr = bpf_trampoline_lookup(key);
811 mutex_lock(&tr->mutex);
815 memcpy(&tr->func.model, &tgt_info->fmodel, sizeof(tgt_info->fmodel));
816 tr->func.addr = (void *)tgt_info->tgt_addr;
818 mutex_unlock(&tr->mutex);
822 void bpf_trampoline_put(struct bpf_trampoline *tr)
828 mutex_lock(&trampoline_mutex);
829 if (!refcount_dec_and_test(&tr->refcnt))
831 WARN_ON_ONCE(mutex_is_locked(&tr->mutex));
833 for (i = 0; i < BPF_TRAMP_MAX; i++)
834 if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[i])))
837 /* This code will be executed even when the last bpf_tramp_image
838 * is alive. All progs are detached from the trampoline and the
839 * trampoline image is patched with jmp into epilogue to skip
840 * fexit progs. The fentry-only trampoline will be freed via
841 * multiple rcu callbacks.
843 hlist_del(&tr->hlist);
847 mutex_unlock(&trampoline_mutex);
850 #define NO_START_TIME 1
851 static __always_inline u64 notrace bpf_prog_start_time(void)
853 u64 start = NO_START_TIME;
855 if (static_branch_unlikely(&bpf_stats_enabled_key)) {
856 start = sched_clock();
857 if (unlikely(!start))
858 start = NO_START_TIME;
863 static void notrace inc_misses_counter(struct bpf_prog *prog)
865 struct bpf_prog_stats *stats;
868 stats = this_cpu_ptr(prog->stats);
869 flags = u64_stats_update_begin_irqsave(&stats->syncp);
870 u64_stats_inc(&stats->misses);
871 u64_stats_update_end_irqrestore(&stats->syncp, flags);
874 /* The logic is similar to bpf_prog_run(), but with an explicit
875 * rcu_read_lock() and migrate_disable() which are required
876 * for the trampoline. The macro is split into
877 * call __bpf_prog_enter
878 * call prog->bpf_func
879 * call __bpf_prog_exit
881 * __bpf_prog_enter returns:
882 * 0 - skip execution of the bpf prog
883 * 1 - execute bpf prog
884 * [2..MAX_U64] - execute bpf prog and record execution time.
885 * This is start time.
887 u64 notrace __bpf_prog_enter(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
893 run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
895 if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
896 inc_misses_counter(prog);
899 return bpf_prog_start_time();
902 static void notrace update_prog_stats(struct bpf_prog *prog,
905 struct bpf_prog_stats *stats;
907 if (static_branch_unlikely(&bpf_stats_enabled_key) &&
908 /* static_key could be enabled in __bpf_prog_enter*
909 * and disabled in __bpf_prog_exit*.
911 * Hence check that 'start' is valid.
913 start > NO_START_TIME) {
916 stats = this_cpu_ptr(prog->stats);
917 flags = u64_stats_update_begin_irqsave(&stats->syncp);
918 u64_stats_inc(&stats->cnt);
919 u64_stats_add(&stats->nsecs, sched_clock() - start);
920 u64_stats_update_end_irqrestore(&stats->syncp, flags);
924 void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_run_ctx *run_ctx)
927 bpf_reset_run_ctx(run_ctx->saved_run_ctx);
929 update_prog_stats(prog, start);
930 __this_cpu_dec(*(prog->active));
935 u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
936 struct bpf_tramp_run_ctx *run_ctx)
939 /* Runtime stats are exported via actual BPF_LSM_CGROUP
940 * programs, not the shims.
945 run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
947 return NO_START_TIME;
950 void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
951 struct bpf_tramp_run_ctx *run_ctx)
954 bpf_reset_run_ctx(run_ctx->saved_run_ctx);
960 u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
962 rcu_read_lock_trace();
966 if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
967 inc_misses_counter(prog);
971 run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
973 return bpf_prog_start_time();
976 void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
977 struct bpf_tramp_run_ctx *run_ctx)
979 bpf_reset_run_ctx(run_ctx->saved_run_ctx);
981 update_prog_stats(prog, start);
982 __this_cpu_dec(*(prog->active));
984 rcu_read_unlock_trace();
987 void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr)
989 percpu_ref_get(&tr->pcref);
992 void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr)
994 percpu_ref_put(&tr->pcref);
998 arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
999 const struct btf_func_model *m, u32 flags,
1000 struct bpf_tramp_links *tlinks,
1006 static int __init init_trampolines(void)
1010 for (i = 0; i < TRAMPOLINE_TABLE_SIZE; i++)
1011 INIT_HLIST_HEAD(&trampoline_table[i]);
1014 late_initcall(init_trampolines);