1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2019 Facebook */
3 #include <linux/hash.h>
5 #include <linux/filter.h>
6 #include <linux/ftrace.h>
7 #include <linux/rbtree_latch.h>
8 #include <linux/perf_event.h>
10 #include <linux/rcupdate_trace.h>
11 #include <linux/rcupdate_wait.h>
12 #include <linux/module.h>
14 /* dummy _ops. The verifier will operate on target program's ops. */
15 const struct bpf_verifier_ops bpf_extension_verifier_ops = {
17 const struct bpf_prog_ops bpf_extension_prog_ops = {
20 /* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */
21 #define TRAMPOLINE_HASH_BITS 10
22 #define TRAMPOLINE_TABLE_SIZE (1 << TRAMPOLINE_HASH_BITS)
24 static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE];
26 /* serializes access to trampoline_table */
27 static DEFINE_MUTEX(trampoline_mutex);
29 void *bpf_jit_alloc_exec_page(void)
33 image = bpf_jit_alloc_exec(PAGE_SIZE);
37 set_vm_flush_reset_perms(image);
38 /* Keep image as writeable. The alternative is to keep flipping ro/rw
39 * everytime new program is attached or detached.
41 set_memory_x((long)image, 1);
45 void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym)
47 ksym->start = (unsigned long) data;
48 ksym->end = ksym->start + PAGE_SIZE;
50 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
51 PAGE_SIZE, false, ksym->name);
54 void bpf_image_ksym_del(struct bpf_ksym *ksym)
57 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
58 PAGE_SIZE, true, ksym->name);
61 static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
63 struct bpf_trampoline *tr;
64 struct hlist_head *head;
67 mutex_lock(&trampoline_mutex);
68 head = &trampoline_table[hash_64(key, TRAMPOLINE_HASH_BITS)];
69 hlist_for_each_entry(tr, head, hlist) {
71 refcount_inc(&tr->refcnt);
75 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
80 INIT_HLIST_NODE(&tr->hlist);
81 hlist_add_head(&tr->hlist, head);
82 refcount_set(&tr->refcnt, 1);
83 mutex_init(&tr->mutex);
84 for (i = 0; i < BPF_TRAMP_MAX; i++)
85 INIT_HLIST_HEAD(&tr->progs_hlist[i]);
87 mutex_unlock(&trampoline_mutex);
91 static int bpf_trampoline_module_get(struct bpf_trampoline *tr)
97 mod = __module_text_address((unsigned long) tr->func.addr);
98 if (mod && !try_module_get(mod))
105 static void bpf_trampoline_module_put(struct bpf_trampoline *tr)
111 static int is_ftrace_location(void *ip)
115 addr = ftrace_location((long)ip);
118 if (WARN_ON_ONCE(addr != (long)ip))
123 static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
125 void *ip = tr->func.addr;
128 if (tr->func.ftrace_managed)
129 ret = unregister_ftrace_direct((long)ip, (long)old_addr);
131 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
134 bpf_trampoline_module_put(tr);
138 static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_addr)
140 void *ip = tr->func.addr;
143 if (tr->func.ftrace_managed)
144 ret = modify_ftrace_direct((long)ip, (long)old_addr, (long)new_addr);
146 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr);
150 /* first time registering */
151 static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
153 void *ip = tr->func.addr;
156 ret = is_ftrace_location(ip);
159 tr->func.ftrace_managed = ret;
161 if (bpf_trampoline_module_get(tr))
164 if (tr->func.ftrace_managed)
165 ret = register_ftrace_direct((long)ip, (long)new_addr);
167 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
170 bpf_trampoline_module_put(tr);
174 static struct bpf_tramp_progs *
175 bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total, bool *ip_arg)
177 const struct bpf_prog_aux *aux;
178 struct bpf_tramp_progs *tprogs;
179 struct bpf_prog **progs;
183 tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL);
185 return ERR_PTR(-ENOMEM);
187 for (kind = 0; kind < BPF_TRAMP_MAX; kind++) {
188 tprogs[kind].nr_progs = tr->progs_cnt[kind];
189 *total += tr->progs_cnt[kind];
190 progs = tprogs[kind].progs;
192 hlist_for_each_entry(aux, &tr->progs_hlist[kind], tramp_hlist) {
193 *ip_arg |= aux->prog->call_get_func_ip;
194 *progs++ = aux->prog;
200 static void __bpf_tramp_image_put_deferred(struct work_struct *work)
202 struct bpf_tramp_image *im;
204 im = container_of(work, struct bpf_tramp_image, work);
205 bpf_image_ksym_del(&im->ksym);
206 bpf_jit_free_exec(im->image);
207 bpf_jit_uncharge_modmem(1);
208 percpu_ref_exit(&im->pcref);
212 /* callback, fexit step 3 or fentry step 2 */
213 static void __bpf_tramp_image_put_rcu(struct rcu_head *rcu)
215 struct bpf_tramp_image *im;
217 im = container_of(rcu, struct bpf_tramp_image, rcu);
218 INIT_WORK(&im->work, __bpf_tramp_image_put_deferred);
219 schedule_work(&im->work);
222 /* callback, fexit step 2. Called after percpu_ref_kill confirms. */
223 static void __bpf_tramp_image_release(struct percpu_ref *pcref)
225 struct bpf_tramp_image *im;
227 im = container_of(pcref, struct bpf_tramp_image, pcref);
228 call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
231 /* callback, fexit or fentry step 1 */
232 static void __bpf_tramp_image_put_rcu_tasks(struct rcu_head *rcu)
234 struct bpf_tramp_image *im;
236 im = container_of(rcu, struct bpf_tramp_image, rcu);
237 if (im->ip_after_call)
238 /* the case of fmod_ret/fexit trampoline and CONFIG_PREEMPTION=y */
239 percpu_ref_kill(&im->pcref);
241 /* the case of fentry trampoline */
242 call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
245 static void bpf_tramp_image_put(struct bpf_tramp_image *im)
247 /* The trampoline image that calls original function is using:
248 * rcu_read_lock_trace to protect sleepable bpf progs
249 * rcu_read_lock to protect normal bpf progs
250 * percpu_ref to protect trampoline itself
251 * rcu tasks to protect trampoline asm not covered by percpu_ref
252 * (which are few asm insns before __bpf_tramp_enter and
253 * after __bpf_tramp_exit)
255 * The trampoline is unreachable before bpf_tramp_image_put().
257 * First, patch the trampoline to avoid calling into fexit progs.
258 * The progs will be freed even if the original function is still
259 * executing or sleeping.
260 * In case of CONFIG_PREEMPT=y use call_rcu_tasks() to wait on
261 * first few asm instructions to execute and call into
262 * __bpf_tramp_enter->percpu_ref_get.
263 * Then use percpu_ref_kill to wait for the trampoline and the original
264 * function to finish.
265 * Then use call_rcu_tasks() to make sure few asm insns in
266 * the trampoline epilogue are done as well.
268 * In !PREEMPT case the task that got interrupted in the first asm
269 * insns won't go through an RCU quiescent state which the
270 * percpu_ref_kill will be waiting for. Hence the first
271 * call_rcu_tasks() is not necessary.
273 if (im->ip_after_call) {
274 int err = bpf_arch_text_poke(im->ip_after_call, BPF_MOD_JUMP,
275 NULL, im->ip_epilogue);
277 if (IS_ENABLED(CONFIG_PREEMPTION))
278 call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
280 percpu_ref_kill(&im->pcref);
284 /* The trampoline without fexit and fmod_ret progs doesn't call original
285 * function and doesn't use percpu_ref.
286 * Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
287 * Then use call_rcu_tasks() to wait for the rest of trampoline asm
290 call_rcu_tasks_trace(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
293 static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx)
295 struct bpf_tramp_image *im;
296 struct bpf_ksym *ksym;
300 im = kzalloc(sizeof(*im), GFP_KERNEL);
304 err = bpf_jit_charge_modmem(1);
309 im->image = image = bpf_jit_alloc_exec_page();
313 err = percpu_ref_init(&im->pcref, __bpf_tramp_image_release, 0, GFP_KERNEL);
318 INIT_LIST_HEAD_RCU(&ksym->lnode);
319 snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu_%u", key, idx);
320 bpf_image_ksym_add(image, ksym);
324 bpf_jit_free_exec(im->image);
326 bpf_jit_uncharge_modmem(1);
333 static int bpf_trampoline_update(struct bpf_trampoline *tr)
335 struct bpf_tramp_image *im;
336 struct bpf_tramp_progs *tprogs;
337 u32 flags = BPF_TRAMP_F_RESTORE_REGS;
341 tprogs = bpf_trampoline_get_progs(tr, &total, &ip_arg);
343 return PTR_ERR(tprogs);
346 err = unregister_fentry(tr, tr->cur_image->image);
347 bpf_tramp_image_put(tr->cur_image);
348 tr->cur_image = NULL;
353 im = bpf_tramp_image_alloc(tr->key, tr->selector);
359 if (tprogs[BPF_TRAMP_FEXIT].nr_progs ||
360 tprogs[BPF_TRAMP_MODIFY_RETURN].nr_progs)
361 flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME;
364 flags |= BPF_TRAMP_F_IP_ARG;
366 err = arch_prepare_bpf_trampoline(im, im->image, im->image + PAGE_SIZE,
367 &tr->func.model, flags, tprogs,
372 WARN_ON(tr->cur_image && tr->selector == 0);
373 WARN_ON(!tr->cur_image && tr->selector);
375 /* progs already running at this address */
376 err = modify_fentry(tr, tr->cur_image->image, im->image);
378 /* first time registering */
379 err = register_fentry(tr, im->image);
383 bpf_tramp_image_put(tr->cur_image);
391 static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog)
393 switch (prog->expected_attach_type) {
394 case BPF_TRACE_FENTRY:
395 return BPF_TRAMP_FENTRY;
396 case BPF_MODIFY_RETURN:
397 return BPF_TRAMP_MODIFY_RETURN;
398 case BPF_TRACE_FEXIT:
399 return BPF_TRAMP_FEXIT;
401 if (!prog->aux->attach_func_proto->type)
402 /* The function returns void, we cannot modify its
405 return BPF_TRAMP_FEXIT;
407 return BPF_TRAMP_MODIFY_RETURN;
409 return BPF_TRAMP_REPLACE;
413 int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
415 enum bpf_tramp_prog_type kind;
419 kind = bpf_attach_type_to_tramp(prog);
420 mutex_lock(&tr->mutex);
421 if (tr->extension_prog) {
422 /* cannot attach fentry/fexit if extension prog is attached.
423 * cannot overwrite extension prog either.
428 cnt = tr->progs_cnt[BPF_TRAMP_FENTRY] + tr->progs_cnt[BPF_TRAMP_FEXIT];
429 if (kind == BPF_TRAMP_REPLACE) {
430 /* Cannot attach extension if fentry/fexit are in use. */
435 tr->extension_prog = prog;
436 err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL,
440 if (cnt >= BPF_MAX_TRAMP_PROGS) {
444 if (!hlist_unhashed(&prog->aux->tramp_hlist)) {
445 /* prog already linked */
449 hlist_add_head(&prog->aux->tramp_hlist, &tr->progs_hlist[kind]);
450 tr->progs_cnt[kind]++;
451 err = bpf_trampoline_update(tr);
453 hlist_del_init(&prog->aux->tramp_hlist);
454 tr->progs_cnt[kind]--;
457 mutex_unlock(&tr->mutex);
461 /* bpf_trampoline_unlink_prog() should never fail. */
462 int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
464 enum bpf_tramp_prog_type kind;
467 kind = bpf_attach_type_to_tramp(prog);
468 mutex_lock(&tr->mutex);
469 if (kind == BPF_TRAMP_REPLACE) {
470 WARN_ON_ONCE(!tr->extension_prog);
471 err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP,
472 tr->extension_prog->bpf_func, NULL);
473 tr->extension_prog = NULL;
476 hlist_del_init(&prog->aux->tramp_hlist);
477 tr->progs_cnt[kind]--;
478 err = bpf_trampoline_update(tr);
480 mutex_unlock(&tr->mutex);
484 struct bpf_trampoline *bpf_trampoline_get(u64 key,
485 struct bpf_attach_target_info *tgt_info)
487 struct bpf_trampoline *tr;
489 tr = bpf_trampoline_lookup(key);
493 mutex_lock(&tr->mutex);
497 memcpy(&tr->func.model, &tgt_info->fmodel, sizeof(tgt_info->fmodel));
498 tr->func.addr = (void *)tgt_info->tgt_addr;
500 mutex_unlock(&tr->mutex);
504 void bpf_trampoline_put(struct bpf_trampoline *tr)
508 mutex_lock(&trampoline_mutex);
509 if (!refcount_dec_and_test(&tr->refcnt))
511 WARN_ON_ONCE(mutex_is_locked(&tr->mutex));
512 if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FENTRY])))
514 if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT])))
516 /* This code will be executed even when the last bpf_tramp_image
517 * is alive. All progs are detached from the trampoline and the
518 * trampoline image is patched with jmp into epilogue to skip
519 * fexit progs. The fentry-only trampoline will be freed via
520 * multiple rcu callbacks.
522 hlist_del(&tr->hlist);
525 mutex_unlock(&trampoline_mutex);
528 #define NO_START_TIME 1
529 static u64 notrace bpf_prog_start_time(void)
531 u64 start = NO_START_TIME;
533 if (static_branch_unlikely(&bpf_stats_enabled_key)) {
534 start = sched_clock();
535 if (unlikely(!start))
536 start = NO_START_TIME;
541 static void notrace inc_misses_counter(struct bpf_prog *prog)
543 struct bpf_prog_stats *stats;
545 stats = this_cpu_ptr(prog->stats);
546 u64_stats_update_begin(&stats->syncp);
548 u64_stats_update_end(&stats->syncp);
551 /* The logic is similar to bpf_prog_run(), but with an explicit
552 * rcu_read_lock() and migrate_disable() which are required
553 * for the trampoline. The macro is split into
554 * call __bpf_prog_enter
555 * call prog->bpf_func
556 * call __bpf_prog_exit
558 * __bpf_prog_enter returns:
559 * 0 - skip execution of the bpf prog
560 * 1 - execute bpf prog
561 * [2..MAX_U64] - execute bpf prog and record execution time.
562 * This is start time.
564 u64 notrace __bpf_prog_enter(struct bpf_prog *prog)
569 if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
570 inc_misses_counter(prog);
573 return bpf_prog_start_time();
576 static void notrace update_prog_stats(struct bpf_prog *prog,
579 struct bpf_prog_stats *stats;
581 if (static_branch_unlikely(&bpf_stats_enabled_key) &&
582 /* static_key could be enabled in __bpf_prog_enter*
583 * and disabled in __bpf_prog_exit*.
585 * Hence check that 'start' is valid.
587 start > NO_START_TIME) {
588 stats = this_cpu_ptr(prog->stats);
589 u64_stats_update_begin(&stats->syncp);
591 stats->nsecs += sched_clock() - start;
592 u64_stats_update_end(&stats->syncp);
596 void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start)
599 update_prog_stats(prog, start);
600 __this_cpu_dec(*(prog->active));
605 u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog)
607 rcu_read_lock_trace();
610 if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
611 inc_misses_counter(prog);
614 return bpf_prog_start_time();
617 void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start)
619 update_prog_stats(prog, start);
620 __this_cpu_dec(*(prog->active));
622 rcu_read_unlock_trace();
625 void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr)
627 percpu_ref_get(&tr->pcref);
630 void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr)
632 percpu_ref_put(&tr->pcref);
636 arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
637 const struct btf_func_model *m, u32 flags,
638 struct bpf_tramp_progs *tprogs,
644 static int __init init_trampolines(void)
648 for (i = 0; i < TRAMPOLINE_TABLE_SIZE; i++)
649 INIT_HLIST_HEAD(&trampoline_table[i]);
652 late_initcall(init_trampolines);