1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2019 Facebook */
3 #include <linux/hash.h>
5 #include <linux/filter.h>
6 #include <linux/ftrace.h>
7 #include <linux/rbtree_latch.h>
8 #include <linux/perf_event.h>
10 #include <linux/rcupdate_trace.h>
11 #include <linux/rcupdate_wait.h>
12 #include <linux/module.h>
13 #include <linux/static_call.h>
15 /* dummy _ops. The verifier will operate on target program's ops. */
16 const struct bpf_verifier_ops bpf_extension_verifier_ops = {
18 const struct bpf_prog_ops bpf_extension_prog_ops = {
21 /* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */
22 #define TRAMPOLINE_HASH_BITS 10
23 #define TRAMPOLINE_TABLE_SIZE (1 << TRAMPOLINE_HASH_BITS)
25 static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE];
27 /* serializes access to trampoline_table */
28 static DEFINE_MUTEX(trampoline_mutex);
30 bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
32 enum bpf_attach_type eatype = prog->expected_attach_type;
34 return eatype == BPF_TRACE_FENTRY || eatype == BPF_TRACE_FEXIT ||
35 eatype == BPF_MODIFY_RETURN;
38 void *bpf_jit_alloc_exec_page(void)
42 image = bpf_jit_alloc_exec(PAGE_SIZE);
46 set_vm_flush_reset_perms(image);
47 /* Keep image as writeable. The alternative is to keep flipping ro/rw
48 * every time new program is attached or detached.
50 set_memory_x((long)image, 1);
54 void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym)
56 ksym->start = (unsigned long) data;
57 ksym->end = ksym->start + PAGE_SIZE;
59 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
60 PAGE_SIZE, false, ksym->name);
63 void bpf_image_ksym_del(struct bpf_ksym *ksym)
66 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
67 PAGE_SIZE, true, ksym->name);
70 static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
72 struct bpf_trampoline *tr;
73 struct hlist_head *head;
76 mutex_lock(&trampoline_mutex);
77 head = &trampoline_table[hash_64(key, TRAMPOLINE_HASH_BITS)];
78 hlist_for_each_entry(tr, head, hlist) {
80 refcount_inc(&tr->refcnt);
84 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
89 INIT_HLIST_NODE(&tr->hlist);
90 hlist_add_head(&tr->hlist, head);
91 refcount_set(&tr->refcnt, 1);
92 mutex_init(&tr->mutex);
93 for (i = 0; i < BPF_TRAMP_MAX; i++)
94 INIT_HLIST_HEAD(&tr->progs_hlist[i]);
96 mutex_unlock(&trampoline_mutex);
100 static int bpf_trampoline_module_get(struct bpf_trampoline *tr)
106 mod = __module_text_address((unsigned long) tr->func.addr);
107 if (mod && !try_module_get(mod))
114 static void bpf_trampoline_module_put(struct bpf_trampoline *tr)
120 static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
122 void *ip = tr->func.addr;
125 if (tr->func.ftrace_managed)
126 ret = unregister_ftrace_direct((long)ip, (long)old_addr);
128 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
131 bpf_trampoline_module_put(tr);
135 static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_addr)
137 void *ip = tr->func.addr;
140 if (tr->func.ftrace_managed)
141 ret = modify_ftrace_direct((long)ip, (long)old_addr, (long)new_addr);
143 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr);
147 /* first time registering */
148 static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
150 void *ip = tr->func.addr;
154 faddr = ftrace_location((unsigned long)ip);
156 tr->func.ftrace_managed = true;
158 if (bpf_trampoline_module_get(tr))
161 if (tr->func.ftrace_managed)
162 ret = register_ftrace_direct((long)ip, (long)new_addr);
164 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
167 bpf_trampoline_module_put(tr);
171 static struct bpf_tramp_links *
172 bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total, bool *ip_arg)
174 struct bpf_tramp_link *link;
175 struct bpf_tramp_links *tlinks;
176 struct bpf_tramp_link **links;
180 tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
182 return ERR_PTR(-ENOMEM);
184 for (kind = 0; kind < BPF_TRAMP_MAX; kind++) {
185 tlinks[kind].nr_links = tr->progs_cnt[kind];
186 *total += tr->progs_cnt[kind];
187 links = tlinks[kind].links;
189 hlist_for_each_entry(link, &tr->progs_hlist[kind], tramp_hlist) {
190 *ip_arg |= link->link.prog->call_get_func_ip;
197 static void __bpf_tramp_image_put_deferred(struct work_struct *work)
199 struct bpf_tramp_image *im;
201 im = container_of(work, struct bpf_tramp_image, work);
202 bpf_image_ksym_del(&im->ksym);
203 bpf_jit_free_exec(im->image);
204 bpf_jit_uncharge_modmem(PAGE_SIZE);
205 percpu_ref_exit(&im->pcref);
209 /* callback, fexit step 3 or fentry step 2 */
210 static void __bpf_tramp_image_put_rcu(struct rcu_head *rcu)
212 struct bpf_tramp_image *im;
214 im = container_of(rcu, struct bpf_tramp_image, rcu);
215 INIT_WORK(&im->work, __bpf_tramp_image_put_deferred);
216 schedule_work(&im->work);
219 /* callback, fexit step 2. Called after percpu_ref_kill confirms. */
220 static void __bpf_tramp_image_release(struct percpu_ref *pcref)
222 struct bpf_tramp_image *im;
224 im = container_of(pcref, struct bpf_tramp_image, pcref);
225 call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
228 /* callback, fexit or fentry step 1 */
229 static void __bpf_tramp_image_put_rcu_tasks(struct rcu_head *rcu)
231 struct bpf_tramp_image *im;
233 im = container_of(rcu, struct bpf_tramp_image, rcu);
234 if (im->ip_after_call)
235 /* the case of fmod_ret/fexit trampoline and CONFIG_PREEMPTION=y */
236 percpu_ref_kill(&im->pcref);
238 /* the case of fentry trampoline */
239 call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
242 static void bpf_tramp_image_put(struct bpf_tramp_image *im)
244 /* The trampoline image that calls original function is using:
245 * rcu_read_lock_trace to protect sleepable bpf progs
246 * rcu_read_lock to protect normal bpf progs
247 * percpu_ref to protect trampoline itself
248 * rcu tasks to protect trampoline asm not covered by percpu_ref
249 * (which are few asm insns before __bpf_tramp_enter and
250 * after __bpf_tramp_exit)
252 * The trampoline is unreachable before bpf_tramp_image_put().
254 * First, patch the trampoline to avoid calling into fexit progs.
255 * The progs will be freed even if the original function is still
256 * executing or sleeping.
257 * In case of CONFIG_PREEMPT=y use call_rcu_tasks() to wait on
258 * first few asm instructions to execute and call into
259 * __bpf_tramp_enter->percpu_ref_get.
260 * Then use percpu_ref_kill to wait for the trampoline and the original
261 * function to finish.
262 * Then use call_rcu_tasks() to make sure few asm insns in
263 * the trampoline epilogue are done as well.
265 * In !PREEMPT case the task that got interrupted in the first asm
266 * insns won't go through an RCU quiescent state which the
267 * percpu_ref_kill will be waiting for. Hence the first
268 * call_rcu_tasks() is not necessary.
270 if (im->ip_after_call) {
271 int err = bpf_arch_text_poke(im->ip_after_call, BPF_MOD_JUMP,
272 NULL, im->ip_epilogue);
274 if (IS_ENABLED(CONFIG_PREEMPTION))
275 call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
277 percpu_ref_kill(&im->pcref);
281 /* The trampoline without fexit and fmod_ret progs doesn't call original
282 * function and doesn't use percpu_ref.
283 * Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
284 * Then use call_rcu_tasks() to wait for the rest of trampoline asm
287 call_rcu_tasks_trace(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
290 static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx)
292 struct bpf_tramp_image *im;
293 struct bpf_ksym *ksym;
297 im = kzalloc(sizeof(*im), GFP_KERNEL);
301 err = bpf_jit_charge_modmem(PAGE_SIZE);
306 im->image = image = bpf_jit_alloc_exec_page();
310 err = percpu_ref_init(&im->pcref, __bpf_tramp_image_release, 0, GFP_KERNEL);
315 INIT_LIST_HEAD_RCU(&ksym->lnode);
316 snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu_%u", key, idx);
317 bpf_image_ksym_add(image, ksym);
321 bpf_jit_free_exec(im->image);
323 bpf_jit_uncharge_modmem(PAGE_SIZE);
330 static int bpf_trampoline_update(struct bpf_trampoline *tr)
332 struct bpf_tramp_image *im;
333 struct bpf_tramp_links *tlinks;
334 u32 flags = BPF_TRAMP_F_RESTORE_REGS;
338 tlinks = bpf_trampoline_get_progs(tr, &total, &ip_arg);
340 return PTR_ERR(tlinks);
343 err = unregister_fentry(tr, tr->cur_image->image);
344 bpf_tramp_image_put(tr->cur_image);
345 tr->cur_image = NULL;
350 im = bpf_tramp_image_alloc(tr->key, tr->selector);
356 if (tlinks[BPF_TRAMP_FEXIT].nr_links ||
357 tlinks[BPF_TRAMP_MODIFY_RETURN].nr_links)
358 flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME;
361 flags |= BPF_TRAMP_F_IP_ARG;
363 err = arch_prepare_bpf_trampoline(im, im->image, im->image + PAGE_SIZE,
364 &tr->func.model, flags, tlinks,
369 WARN_ON(tr->cur_image && tr->selector == 0);
370 WARN_ON(!tr->cur_image && tr->selector);
372 /* progs already running at this address */
373 err = modify_fentry(tr, tr->cur_image->image, im->image);
375 /* first time registering */
376 err = register_fentry(tr, im->image);
380 bpf_tramp_image_put(tr->cur_image);
388 static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog)
390 switch (prog->expected_attach_type) {
391 case BPF_TRACE_FENTRY:
392 return BPF_TRAMP_FENTRY;
393 case BPF_MODIFY_RETURN:
394 return BPF_TRAMP_MODIFY_RETURN;
395 case BPF_TRACE_FEXIT:
396 return BPF_TRAMP_FEXIT;
398 if (!prog->aux->attach_func_proto->type)
399 /* The function returns void, we cannot modify its
402 return BPF_TRAMP_FEXIT;
404 return BPF_TRAMP_MODIFY_RETURN;
406 return BPF_TRAMP_REPLACE;
410 int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
412 enum bpf_tramp_prog_type kind;
413 struct bpf_tramp_link *link_exiting;
417 kind = bpf_attach_type_to_tramp(link->link.prog);
418 mutex_lock(&tr->mutex);
419 if (tr->extension_prog) {
420 /* cannot attach fentry/fexit if extension prog is attached.
421 * cannot overwrite extension prog either.
426 cnt = tr->progs_cnt[BPF_TRAMP_FENTRY] + tr->progs_cnt[BPF_TRAMP_FEXIT];
427 if (kind == BPF_TRAMP_REPLACE) {
428 /* Cannot attach extension if fentry/fexit are in use. */
433 tr->extension_prog = link->link.prog;
434 err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL,
435 link->link.prog->bpf_func);
438 if (cnt >= BPF_MAX_TRAMP_LINKS) {
442 if (!hlist_unhashed(&link->tramp_hlist)) {
443 /* prog already linked */
447 hlist_for_each_entry(link_exiting, &tr->progs_hlist[kind], tramp_hlist) {
448 if (link_exiting->link.prog != link->link.prog)
450 /* prog already linked */
455 hlist_add_head(&link->tramp_hlist, &tr->progs_hlist[kind]);
456 tr->progs_cnt[kind]++;
457 err = bpf_trampoline_update(tr);
459 hlist_del_init(&link->tramp_hlist);
460 tr->progs_cnt[kind]--;
463 mutex_unlock(&tr->mutex);
467 /* bpf_trampoline_unlink_prog() should never fail. */
468 int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
470 enum bpf_tramp_prog_type kind;
473 kind = bpf_attach_type_to_tramp(link->link.prog);
474 mutex_lock(&tr->mutex);
475 if (kind == BPF_TRAMP_REPLACE) {
476 WARN_ON_ONCE(!tr->extension_prog);
477 err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP,
478 tr->extension_prog->bpf_func, NULL);
479 tr->extension_prog = NULL;
482 hlist_del_init(&link->tramp_hlist);
483 tr->progs_cnt[kind]--;
484 err = bpf_trampoline_update(tr);
486 mutex_unlock(&tr->mutex);
490 struct bpf_trampoline *bpf_trampoline_get(u64 key,
491 struct bpf_attach_target_info *tgt_info)
493 struct bpf_trampoline *tr;
495 tr = bpf_trampoline_lookup(key);
499 mutex_lock(&tr->mutex);
503 memcpy(&tr->func.model, &tgt_info->fmodel, sizeof(tgt_info->fmodel));
504 tr->func.addr = (void *)tgt_info->tgt_addr;
506 mutex_unlock(&tr->mutex);
510 void bpf_trampoline_put(struct bpf_trampoline *tr)
514 mutex_lock(&trampoline_mutex);
515 if (!refcount_dec_and_test(&tr->refcnt))
517 WARN_ON_ONCE(mutex_is_locked(&tr->mutex));
518 if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FENTRY])))
520 if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT])))
522 /* This code will be executed even when the last bpf_tramp_image
523 * is alive. All progs are detached from the trampoline and the
524 * trampoline image is patched with jmp into epilogue to skip
525 * fexit progs. The fentry-only trampoline will be freed via
526 * multiple rcu callbacks.
528 hlist_del(&tr->hlist);
531 mutex_unlock(&trampoline_mutex);
534 #define NO_START_TIME 1
535 static __always_inline u64 notrace bpf_prog_start_time(void)
537 u64 start = NO_START_TIME;
539 if (static_branch_unlikely(&bpf_stats_enabled_key)) {
540 start = sched_clock();
541 if (unlikely(!start))
542 start = NO_START_TIME;
547 static void notrace inc_misses_counter(struct bpf_prog *prog)
549 struct bpf_prog_stats *stats;
552 stats = this_cpu_ptr(prog->stats);
553 flags = u64_stats_update_begin_irqsave(&stats->syncp);
554 u64_stats_inc(&stats->misses);
555 u64_stats_update_end_irqrestore(&stats->syncp, flags);
558 /* The logic is similar to bpf_prog_run(), but with an explicit
559 * rcu_read_lock() and migrate_disable() which are required
560 * for the trampoline. The macro is split into
561 * call __bpf_prog_enter
562 * call prog->bpf_func
563 * call __bpf_prog_exit
565 * __bpf_prog_enter returns:
566 * 0 - skip execution of the bpf prog
567 * 1 - execute bpf prog
568 * [2..MAX_U64] - execute bpf prog and record execution time.
569 * This is start time.
571 u64 notrace __bpf_prog_enter(struct bpf_prog *prog)
576 if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
577 inc_misses_counter(prog);
580 return bpf_prog_start_time();
583 static void notrace update_prog_stats(struct bpf_prog *prog,
586 struct bpf_prog_stats *stats;
588 if (static_branch_unlikely(&bpf_stats_enabled_key) &&
589 /* static_key could be enabled in __bpf_prog_enter*
590 * and disabled in __bpf_prog_exit*.
592 * Hence check that 'start' is valid.
594 start > NO_START_TIME) {
597 stats = this_cpu_ptr(prog->stats);
598 flags = u64_stats_update_begin_irqsave(&stats->syncp);
599 u64_stats_inc(&stats->cnt);
600 u64_stats_add(&stats->nsecs, sched_clock() - start);
601 u64_stats_update_end_irqrestore(&stats->syncp, flags);
605 void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start)
608 update_prog_stats(prog, start);
609 __this_cpu_dec(*(prog->active));
614 u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog)
616 rcu_read_lock_trace();
619 if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
620 inc_misses_counter(prog);
623 return bpf_prog_start_time();
626 void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start)
628 update_prog_stats(prog, start);
629 __this_cpu_dec(*(prog->active));
631 rcu_read_unlock_trace();
634 void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr)
636 percpu_ref_get(&tr->pcref);
639 void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr)
641 percpu_ref_put(&tr->pcref);
645 arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
646 const struct btf_func_model *m, u32 flags,
647 struct bpf_tramp_links *tlinks,
653 static int __init init_trampolines(void)
657 for (i = 0; i < TRAMPOLINE_TABLE_SIZE; i++)
658 INIT_HLIST_HEAD(&trampoline_table[i]);
661 late_initcall(init_trampolines);