1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
5 #include <linux/bpf-cgroup.h>
6 #include <linux/rcupdate.h>
7 #include <linux/random.h>
9 #include <linux/topology.h>
10 #include <linux/ktime.h>
11 #include <linux/sched.h>
12 #include <linux/uidgid.h>
13 #include <linux/filter.h>
14 #include <linux/ctype.h>
15 #include <linux/jiffies.h>
16 #include <linux/pid_namespace.h>
17 #include <linux/proc_ns.h>
18 #include <linux/security.h>
20 #include "../../lib/kstrtox.h"
22 /* If kernel subsystem is allowing eBPF programs to call this function,
23 * inside its own verifier_ops->get_func_proto() callback it should return
24 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
26 * Different map implementations will rely on rcu in map methods
27 * lookup/update/delete, therefore eBPF programs must run under rcu lock
28 * if program is allowed to access maps, so check rcu_read_lock_held in
29 * all three functions.
31 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
33 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
34 return (unsigned long) map->ops->map_lookup_elem(map, key);
37 const struct bpf_func_proto bpf_map_lookup_elem_proto = {
38 .func = bpf_map_lookup_elem,
41 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
42 .arg1_type = ARG_CONST_MAP_PTR,
43 .arg2_type = ARG_PTR_TO_MAP_KEY,
46 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
47 void *, value, u64, flags)
49 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
50 return map->ops->map_update_elem(map, key, value, flags);
53 const struct bpf_func_proto bpf_map_update_elem_proto = {
54 .func = bpf_map_update_elem,
57 .ret_type = RET_INTEGER,
58 .arg1_type = ARG_CONST_MAP_PTR,
59 .arg2_type = ARG_PTR_TO_MAP_KEY,
60 .arg3_type = ARG_PTR_TO_MAP_VALUE,
61 .arg4_type = ARG_ANYTHING,
64 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
66 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
67 return map->ops->map_delete_elem(map, key);
70 const struct bpf_func_proto bpf_map_delete_elem_proto = {
71 .func = bpf_map_delete_elem,
74 .ret_type = RET_INTEGER,
75 .arg1_type = ARG_CONST_MAP_PTR,
76 .arg2_type = ARG_PTR_TO_MAP_KEY,
79 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
81 return map->ops->map_push_elem(map, value, flags);
84 const struct bpf_func_proto bpf_map_push_elem_proto = {
85 .func = bpf_map_push_elem,
88 .ret_type = RET_INTEGER,
89 .arg1_type = ARG_CONST_MAP_PTR,
90 .arg2_type = ARG_PTR_TO_MAP_VALUE,
91 .arg3_type = ARG_ANYTHING,
94 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
96 return map->ops->map_pop_elem(map, value);
99 const struct bpf_func_proto bpf_map_pop_elem_proto = {
100 .func = bpf_map_pop_elem,
102 .ret_type = RET_INTEGER,
103 .arg1_type = ARG_CONST_MAP_PTR,
104 .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
107 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
109 return map->ops->map_peek_elem(map, value);
112 const struct bpf_func_proto bpf_map_peek_elem_proto = {
113 .func = bpf_map_peek_elem,
115 .ret_type = RET_INTEGER,
116 .arg1_type = ARG_CONST_MAP_PTR,
117 .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
120 const struct bpf_func_proto bpf_get_prandom_u32_proto = {
121 .func = bpf_user_rnd_u32,
123 .ret_type = RET_INTEGER,
126 BPF_CALL_0(bpf_get_smp_processor_id)
128 return smp_processor_id();
131 const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
132 .func = bpf_get_smp_processor_id,
134 .ret_type = RET_INTEGER,
137 BPF_CALL_0(bpf_get_numa_node_id)
139 return numa_node_id();
142 const struct bpf_func_proto bpf_get_numa_node_id_proto = {
143 .func = bpf_get_numa_node_id,
145 .ret_type = RET_INTEGER,
148 BPF_CALL_0(bpf_ktime_get_ns)
150 /* NMI safe access to clock monotonic */
151 return ktime_get_mono_fast_ns();
154 const struct bpf_func_proto bpf_ktime_get_ns_proto = {
155 .func = bpf_ktime_get_ns,
157 .ret_type = RET_INTEGER,
160 BPF_CALL_0(bpf_ktime_get_boot_ns)
162 /* NMI safe access to clock boottime */
163 return ktime_get_boot_fast_ns();
166 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = {
167 .func = bpf_ktime_get_boot_ns,
169 .ret_type = RET_INTEGER,
172 BPF_CALL_0(bpf_ktime_get_coarse_ns)
174 return ktime_get_coarse_ns();
177 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = {
178 .func = bpf_ktime_get_coarse_ns,
180 .ret_type = RET_INTEGER,
183 BPF_CALL_0(bpf_get_current_pid_tgid)
185 struct task_struct *task = current;
190 return (u64) task->tgid << 32 | task->pid;
193 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
194 .func = bpf_get_current_pid_tgid,
196 .ret_type = RET_INTEGER,
199 BPF_CALL_0(bpf_get_current_uid_gid)
201 struct task_struct *task = current;
208 current_uid_gid(&uid, &gid);
209 return (u64) from_kgid(&init_user_ns, gid) << 32 |
210 from_kuid(&init_user_ns, uid);
213 const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
214 .func = bpf_get_current_uid_gid,
216 .ret_type = RET_INTEGER,
219 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
221 struct task_struct *task = current;
226 strncpy(buf, task->comm, size);
228 /* Verifier guarantees that size > 0. For task->comm exceeding
229 * size, guarantee that buf is %NUL-terminated. Unconditionally
230 * done here to save the size test.
235 memset(buf, 0, size);
239 const struct bpf_func_proto bpf_get_current_comm_proto = {
240 .func = bpf_get_current_comm,
242 .ret_type = RET_INTEGER,
243 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
244 .arg2_type = ARG_CONST_SIZE,
247 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
249 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
251 arch_spinlock_t *l = (void *)lock;
254 arch_spinlock_t lock;
255 } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
257 compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
258 BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
259 BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
263 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
265 arch_spinlock_t *l = (void *)lock;
272 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
274 atomic_t *l = (void *)lock;
276 BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
278 atomic_cond_read_relaxed(l, !VAL);
279 } while (atomic_xchg(l, 1));
282 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
284 atomic_t *l = (void *)lock;
286 atomic_set_release(l, 0);
291 static DEFINE_PER_CPU(unsigned long, irqsave_flags);
293 static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
297 local_irq_save(flags);
298 __bpf_spin_lock(lock);
299 __this_cpu_write(irqsave_flags, flags);
302 notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
304 __bpf_spin_lock_irqsave(lock);
308 const struct bpf_func_proto bpf_spin_lock_proto = {
309 .func = bpf_spin_lock,
311 .ret_type = RET_VOID,
312 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
315 static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
319 flags = __this_cpu_read(irqsave_flags);
320 __bpf_spin_unlock(lock);
321 local_irq_restore(flags);
324 notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
326 __bpf_spin_unlock_irqrestore(lock);
330 const struct bpf_func_proto bpf_spin_unlock_proto = {
331 .func = bpf_spin_unlock,
333 .ret_type = RET_VOID,
334 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
337 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
340 struct bpf_spin_lock *lock;
343 lock = src + map->spin_lock_off;
345 lock = dst + map->spin_lock_off;
347 __bpf_spin_lock_irqsave(lock);
348 copy_map_value(map, dst, src);
349 __bpf_spin_unlock_irqrestore(lock);
353 BPF_CALL_0(bpf_jiffies64)
355 return get_jiffies_64();
358 const struct bpf_func_proto bpf_jiffies64_proto = {
359 .func = bpf_jiffies64,
361 .ret_type = RET_INTEGER,
364 #ifdef CONFIG_CGROUPS
365 BPF_CALL_0(bpf_get_current_cgroup_id)
371 cgrp = task_dfl_cgroup(current);
372 cgrp_id = cgroup_id(cgrp);
378 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
379 .func = bpf_get_current_cgroup_id,
381 .ret_type = RET_INTEGER,
384 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
387 struct cgroup *ancestor;
391 cgrp = task_dfl_cgroup(current);
392 ancestor = cgroup_ancestor(cgrp, ancestor_level);
393 cgrp_id = ancestor ? cgroup_id(ancestor) : 0;
399 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
400 .func = bpf_get_current_ancestor_cgroup_id,
402 .ret_type = RET_INTEGER,
403 .arg1_type = ARG_ANYTHING,
406 #ifdef CONFIG_CGROUP_BPF
408 BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
410 /* flags argument is not used now,
411 * but provides an ability to extend the API.
412 * verifier checks that its value is correct.
414 enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
415 struct bpf_cgroup_storage *storage;
416 struct bpf_cg_run_ctx *ctx;
419 /* get current cgroup storage from BPF run context */
420 ctx = container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
421 storage = ctx->prog_item->cgroup_storage[stype];
423 if (stype == BPF_CGROUP_STORAGE_SHARED)
424 ptr = &READ_ONCE(storage->buf)->data[0];
426 ptr = this_cpu_ptr(storage->percpu_buf);
428 return (unsigned long)ptr;
431 const struct bpf_func_proto bpf_get_local_storage_proto = {
432 .func = bpf_get_local_storage,
434 .ret_type = RET_PTR_TO_MAP_VALUE,
435 .arg1_type = ARG_CONST_MAP_PTR,
436 .arg2_type = ARG_ANYTHING,
440 #define BPF_STRTOX_BASE_MASK 0x1F
442 static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
443 unsigned long long *res, bool *is_negative)
445 unsigned int base = flags & BPF_STRTOX_BASE_MASK;
446 const char *cur_buf = buf;
447 size_t cur_len = buf_len;
448 unsigned int consumed;
452 if (!buf || !buf_len || !res || !is_negative)
455 if (base != 0 && base != 8 && base != 10 && base != 16)
458 if (flags & ~BPF_STRTOX_BASE_MASK)
461 while (cur_buf < buf + buf_len && isspace(*cur_buf))
464 *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
468 consumed = cur_buf - buf;
473 cur_len = min(cur_len, sizeof(str) - 1);
474 memcpy(str, cur_buf, cur_len);
478 cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
479 val_len = _parse_integer(cur_buf, base, res);
481 if (val_len & KSTRTOX_OVERFLOW)
488 consumed += cur_buf - str;
493 static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
496 unsigned long long _res;
500 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
504 if ((long long)-_res > 0)
508 if ((long long)_res < 0)
515 BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
521 err = __bpf_strtoll(buf, buf_len, flags, &_res);
524 if (_res != (long)_res)
530 const struct bpf_func_proto bpf_strtol_proto = {
533 .ret_type = RET_INTEGER,
534 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
535 .arg2_type = ARG_CONST_SIZE,
536 .arg3_type = ARG_ANYTHING,
537 .arg4_type = ARG_PTR_TO_LONG,
540 BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
541 unsigned long *, res)
543 unsigned long long _res;
547 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
552 if (_res != (unsigned long)_res)
558 const struct bpf_func_proto bpf_strtoul_proto = {
561 .ret_type = RET_INTEGER,
562 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
563 .arg2_type = ARG_CONST_SIZE,
564 .arg3_type = ARG_ANYTHING,
565 .arg4_type = ARG_PTR_TO_LONG,
569 BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2)
571 return strncmp(s1, s2, s1_sz);
574 const struct bpf_func_proto bpf_strncmp_proto = {
577 .ret_type = RET_INTEGER,
578 .arg1_type = ARG_PTR_TO_MEM,
579 .arg2_type = ARG_CONST_SIZE,
580 .arg3_type = ARG_PTR_TO_CONST_STR,
583 BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
584 struct bpf_pidns_info *, nsdata, u32, size)
586 struct task_struct *task = current;
587 struct pid_namespace *pidns;
590 if (unlikely(size != sizeof(struct bpf_pidns_info)))
593 if (unlikely((u64)(dev_t)dev != dev))
599 pidns = task_active_pid_ns(task);
600 if (unlikely(!pidns)) {
605 if (!ns_match(&pidns->ns, (dev_t)dev, ino))
608 nsdata->pid = task_pid_nr_ns(task, pidns);
609 nsdata->tgid = task_tgid_nr_ns(task, pidns);
612 memset((void *)nsdata, 0, (size_t) size);
616 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
617 .func = bpf_get_ns_current_pid_tgid,
619 .ret_type = RET_INTEGER,
620 .arg1_type = ARG_ANYTHING,
621 .arg2_type = ARG_ANYTHING,
622 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
623 .arg4_type = ARG_CONST_SIZE,
626 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
627 .func = bpf_get_raw_cpu_id,
629 .ret_type = RET_INTEGER,
632 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
633 u64, flags, void *, data, u64, size)
635 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
638 return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
641 const struct bpf_func_proto bpf_event_output_data_proto = {
642 .func = bpf_event_output_data,
644 .ret_type = RET_INTEGER,
645 .arg1_type = ARG_PTR_TO_CTX,
646 .arg2_type = ARG_CONST_MAP_PTR,
647 .arg3_type = ARG_ANYTHING,
648 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
649 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
652 BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
653 const void __user *, user_ptr)
655 int ret = copy_from_user(dst, user_ptr, size);
658 memset(dst, 0, size);
665 const struct bpf_func_proto bpf_copy_from_user_proto = {
666 .func = bpf_copy_from_user,
668 .ret_type = RET_INTEGER,
669 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
670 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
671 .arg3_type = ARG_ANYTHING,
674 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
676 if (cpu >= nr_cpu_ids)
677 return (unsigned long)NULL;
679 return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu);
682 const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
683 .func = bpf_per_cpu_ptr,
685 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY,
686 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
687 .arg2_type = ARG_ANYTHING,
690 BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
692 return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr);
695 const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
696 .func = bpf_this_cpu_ptr,
698 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY,
699 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
702 static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
705 void __user *user_ptr = (__force void __user *)unsafe_ptr;
711 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
712 if ((unsigned long)unsafe_ptr < TASK_SIZE)
713 return strncpy_from_user_nofault(buf, user_ptr, bufsz);
717 return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
719 return strncpy_from_user_nofault(buf, user_ptr, bufsz);
725 /* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary
726 * arguments representation.
728 #define MAX_BPRINTF_BUF_LEN 512
730 /* Support executing three nested bprintf helper calls on a given CPU */
731 #define MAX_BPRINTF_NEST_LEVEL 3
732 struct bpf_bprintf_buffers {
733 char tmp_bufs[MAX_BPRINTF_NEST_LEVEL][MAX_BPRINTF_BUF_LEN];
735 static DEFINE_PER_CPU(struct bpf_bprintf_buffers, bpf_bprintf_bufs);
736 static DEFINE_PER_CPU(int, bpf_bprintf_nest_level);
738 static int try_get_fmt_tmp_buf(char **tmp_buf)
740 struct bpf_bprintf_buffers *bufs;
744 nest_level = this_cpu_inc_return(bpf_bprintf_nest_level);
745 if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) {
746 this_cpu_dec(bpf_bprintf_nest_level);
750 bufs = this_cpu_ptr(&bpf_bprintf_bufs);
751 *tmp_buf = bufs->tmp_bufs[nest_level - 1];
756 void bpf_bprintf_cleanup(void)
758 if (this_cpu_read(bpf_bprintf_nest_level)) {
759 this_cpu_dec(bpf_bprintf_nest_level);
765 * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers
767 * Returns a negative value if fmt is an invalid format string or 0 otherwise.
769 * This can be used in two ways:
770 * - Format string verification only: when bin_args is NULL
771 * - Arguments preparation: in addition to the above verification, it writes in
772 * bin_args a binary representation of arguments usable by bstr_printf where
773 * pointers from BPF have been sanitized.
775 * In argument preparation mode, if 0 is returned, safe temporary buffers are
776 * allocated and bpf_bprintf_cleanup should be called to free them after use.
778 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
779 u32 **bin_args, u32 num_args)
781 char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end;
782 size_t sizeof_cur_arg, sizeof_cur_ip;
783 int err, i, num_spec = 0;
785 char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX";
787 fmt_end = strnchr(fmt, fmt_size, 0);
790 fmt_size = fmt_end - fmt;
793 if (num_args && try_get_fmt_tmp_buf(&tmp_buf))
796 tmp_buf_end = tmp_buf + MAX_BPRINTF_BUF_LEN;
797 *bin_args = (u32 *)tmp_buf;
800 for (i = 0; i < fmt_size; i++) {
801 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
809 if (fmt[i + 1] == '%') {
814 if (num_spec >= num_args) {
819 /* The string is zero-terminated so if fmt[i] != 0, we can
820 * always access fmt[i + 1], in the worst case it will be a 0
824 /* skip optional "[0 +-][num]" width formatting field */
825 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' ||
828 if (fmt[i] >= '1' && fmt[i] <= '9') {
830 while (fmt[i] >= '0' && fmt[i] <= '9')
835 sizeof_cur_arg = sizeof(long);
837 if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') &&
839 fmt_ptype = fmt[i + 1];
844 if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
845 ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' ||
846 fmt[i + 1] == 'x' || fmt[i + 1] == 's' ||
848 /* just kernel pointers */
850 cur_arg = raw_args[num_spec];
855 if (fmt[i + 1] == 'B') {
857 err = snprintf(tmp_buf,
858 (tmp_buf_end - tmp_buf),
860 (void *)(long)raw_args[num_spec]);
861 tmp_buf += (err + 1);
869 /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
870 if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') ||
871 (fmt[i + 2] != '4' && fmt[i + 2] != '6')) {
880 sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16;
881 if (tmp_buf_end - tmp_buf < sizeof_cur_ip) {
886 unsafe_ptr = (char *)(long)raw_args[num_spec];
887 err = copy_from_kernel_nofault(cur_ip, unsafe_ptr,
890 memset(cur_ip, 0, sizeof_cur_ip);
892 /* hack: bstr_printf expects IP addresses to be
893 * pre-formatted as strings, ironically, the easiest way
894 * to do that is to call snprintf.
896 ip_spec[2] = fmt[i - 1];
898 err = snprintf(tmp_buf, tmp_buf_end - tmp_buf,
905 } else if (fmt[i] == 's') {
908 if (fmt[i + 1] != 0 &&
909 !isspace(fmt[i + 1]) &&
910 !ispunct(fmt[i + 1])) {
918 if (tmp_buf_end == tmp_buf) {
923 unsafe_ptr = (char *)(long)raw_args[num_spec];
924 err = bpf_trace_copy_string(tmp_buf, unsafe_ptr,
926 tmp_buf_end - tmp_buf);
936 } else if (fmt[i] == 'c') {
940 if (tmp_buf_end == tmp_buf) {
945 *tmp_buf = raw_args[num_spec];
952 sizeof_cur_arg = sizeof(int);
955 sizeof_cur_arg = sizeof(long);
959 sizeof_cur_arg = sizeof(long long);
963 if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' &&
964 fmt[i] != 'x' && fmt[i] != 'X') {
970 cur_arg = raw_args[num_spec];
973 tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32));
974 if (tmp_buf_end - tmp_buf < sizeof_cur_arg) {
979 if (sizeof_cur_arg == 8) {
980 *(u32 *)tmp_buf = *(u32 *)&cur_arg;
981 *(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1);
983 *(u32 *)tmp_buf = (u32)(long)cur_arg;
985 tmp_buf += sizeof_cur_arg;
993 bpf_bprintf_cleanup();
997 BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
998 const void *, data, u32, data_len)
1003 if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 ||
1004 (data_len && !data))
1006 num_args = data_len / 8;
1008 /* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we
1009 * can safely give an unbounded size.
1011 err = bpf_bprintf_prepare(fmt, UINT_MAX, data, &bin_args, num_args);
1015 err = bstr_printf(str, str_size, fmt, bin_args);
1017 bpf_bprintf_cleanup();
1022 const struct bpf_func_proto bpf_snprintf_proto = {
1023 .func = bpf_snprintf,
1025 .ret_type = RET_INTEGER,
1026 .arg1_type = ARG_PTR_TO_MEM_OR_NULL,
1027 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1028 .arg3_type = ARG_PTR_TO_CONST_STR,
1029 .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
1030 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1033 /* BPF map elements can contain 'struct bpf_timer'.
1034 * Such map owns all of its BPF timers.
1035 * 'struct bpf_timer' is allocated as part of map element allocation
1036 * and it's zero initialized.
1037 * That space is used to keep 'struct bpf_timer_kern'.
1038 * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and
1039 * remembers 'struct bpf_map *' pointer it's part of.
1040 * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn.
1041 * bpf_timer_start() arms the timer.
1042 * If user space reference to a map goes to zero at this point
1043 * ops->map_release_uref callback is responsible for cancelling the timers,
1044 * freeing their memory, and decrementing prog's refcnts.
1045 * bpf_timer_cancel() cancels the timer and decrements prog's refcnt.
1046 * Inner maps can contain bpf timers as well. ops->map_release_uref is
1047 * freeing the timers when inner map is replaced or deleted by user space.
1049 struct bpf_hrtimer {
1050 struct hrtimer timer;
1051 struct bpf_map *map;
1052 struct bpf_prog *prog;
1053 void __rcu *callback_fn;
1057 /* the actual struct hidden inside uapi struct bpf_timer */
1058 struct bpf_timer_kern {
1059 struct bpf_hrtimer *timer;
1060 /* bpf_spin_lock is used here instead of spinlock_t to make
1061 * sure that it always fits into space resereved by struct bpf_timer
1062 * regardless of LOCKDEP and spinlock debug flags.
1064 struct bpf_spin_lock lock;
1065 } __attribute__((aligned(8)));
1067 static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running);
1069 static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
1071 struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer);
1072 struct bpf_map *map = t->map;
1073 void *value = t->value;
1074 bpf_callback_t callback_fn;
1078 callback_fn = rcu_dereference_check(t->callback_fn, rcu_read_lock_bh_held());
1082 /* bpf_timer_cb() runs in hrtimer_run_softirq. It doesn't migrate and
1083 * cannot be preempted by another bpf_timer_cb() on the same cpu.
1084 * Remember the timer this callback is servicing to prevent
1085 * deadlock if callback_fn() calls bpf_timer_cancel() or
1086 * bpf_map_delete_elem() on the same timer.
1088 this_cpu_write(hrtimer_running, t);
1089 if (map->map_type == BPF_MAP_TYPE_ARRAY) {
1090 struct bpf_array *array = container_of(map, struct bpf_array, map);
1092 /* compute the key */
1093 idx = ((char *)value - array->value) / array->elem_size;
1095 } else { /* hash or lru */
1096 key = value - round_up(map->key_size, 8);
1099 callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
1100 /* The verifier checked that return value is zero. */
1102 this_cpu_write(hrtimer_running, NULL);
1104 return HRTIMER_NORESTART;
1107 BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map,
1110 clockid_t clockid = flags & (MAX_CLOCKS - 1);
1111 struct bpf_hrtimer *t;
1114 BUILD_BUG_ON(MAX_CLOCKS != 16);
1115 BUILD_BUG_ON(sizeof(struct bpf_timer_kern) > sizeof(struct bpf_timer));
1116 BUILD_BUG_ON(__alignof__(struct bpf_timer_kern) != __alignof__(struct bpf_timer));
1121 if (flags >= MAX_CLOCKS ||
1122 /* similar to timerfd except _ALARM variants are not supported */
1123 (clockid != CLOCK_MONOTONIC &&
1124 clockid != CLOCK_REALTIME &&
1125 clockid != CLOCK_BOOTTIME))
1127 __bpf_spin_lock_irqsave(&timer->lock);
1133 if (!atomic64_read(&map->usercnt)) {
1134 /* maps with timers must be either held by user space
1135 * or pinned in bpffs.
1140 /* allocate hrtimer via map_kmalloc to use memcg accounting */
1141 t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node);
1146 t->value = (void *)timer - map->timer_off;
1149 rcu_assign_pointer(t->callback_fn, NULL);
1150 hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
1151 t->timer.function = bpf_timer_cb;
1154 __bpf_spin_unlock_irqrestore(&timer->lock);
1158 static const struct bpf_func_proto bpf_timer_init_proto = {
1159 .func = bpf_timer_init,
1161 .ret_type = RET_INTEGER,
1162 .arg1_type = ARG_PTR_TO_TIMER,
1163 .arg2_type = ARG_CONST_MAP_PTR,
1164 .arg3_type = ARG_ANYTHING,
1167 BPF_CALL_3(bpf_timer_set_callback, struct bpf_timer_kern *, timer, void *, callback_fn,
1168 struct bpf_prog_aux *, aux)
1170 struct bpf_prog *prev, *prog = aux->prog;
1171 struct bpf_hrtimer *t;
1176 __bpf_spin_lock_irqsave(&timer->lock);
1182 if (!atomic64_read(&t->map->usercnt)) {
1183 /* maps with timers must be either held by user space
1184 * or pinned in bpffs. Otherwise timer might still be
1185 * running even when bpf prog is detached and user space
1186 * is gone, since map_release_uref won't ever be called.
1193 /* Bump prog refcnt once. Every bpf_timer_set_callback()
1194 * can pick different callback_fn-s within the same prog.
1196 prog = bpf_prog_inc_not_zero(prog);
1198 ret = PTR_ERR(prog);
1202 /* Drop prev prog refcnt when swapping with new prog */
1206 rcu_assign_pointer(t->callback_fn, callback_fn);
1208 __bpf_spin_unlock_irqrestore(&timer->lock);
1212 static const struct bpf_func_proto bpf_timer_set_callback_proto = {
1213 .func = bpf_timer_set_callback,
1215 .ret_type = RET_INTEGER,
1216 .arg1_type = ARG_PTR_TO_TIMER,
1217 .arg2_type = ARG_PTR_TO_FUNC,
1220 BPF_CALL_3(bpf_timer_start, struct bpf_timer_kern *, timer, u64, nsecs, u64, flags)
1222 struct bpf_hrtimer *t;
1229 __bpf_spin_lock_irqsave(&timer->lock);
1231 if (!t || !t->prog) {
1235 hrtimer_start(&t->timer, ns_to_ktime(nsecs), HRTIMER_MODE_REL_SOFT);
1237 __bpf_spin_unlock_irqrestore(&timer->lock);
1241 static const struct bpf_func_proto bpf_timer_start_proto = {
1242 .func = bpf_timer_start,
1244 .ret_type = RET_INTEGER,
1245 .arg1_type = ARG_PTR_TO_TIMER,
1246 .arg2_type = ARG_ANYTHING,
1247 .arg3_type = ARG_ANYTHING,
1250 static void drop_prog_refcnt(struct bpf_hrtimer *t)
1252 struct bpf_prog *prog = t->prog;
1257 rcu_assign_pointer(t->callback_fn, NULL);
1261 BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer)
1263 struct bpf_hrtimer *t;
1268 __bpf_spin_lock_irqsave(&timer->lock);
1274 if (this_cpu_read(hrtimer_running) == t) {
1275 /* If bpf callback_fn is trying to bpf_timer_cancel()
1276 * its own timer the hrtimer_cancel() will deadlock
1277 * since it waits for callback_fn to finish
1282 drop_prog_refcnt(t);
1284 __bpf_spin_unlock_irqrestore(&timer->lock);
1285 /* Cancel the timer and wait for associated callback to finish
1286 * if it was running.
1288 ret = ret ?: hrtimer_cancel(&t->timer);
1292 static const struct bpf_func_proto bpf_timer_cancel_proto = {
1293 .func = bpf_timer_cancel,
1295 .ret_type = RET_INTEGER,
1296 .arg1_type = ARG_PTR_TO_TIMER,
1299 /* This function is called by map_delete/update_elem for individual element and
1300 * by ops->map_release_uref when the user space reference to a map reaches zero.
1302 void bpf_timer_cancel_and_free(void *val)
1304 struct bpf_timer_kern *timer = val;
1305 struct bpf_hrtimer *t;
1307 /* Performance optimization: read timer->timer without lock first. */
1308 if (!READ_ONCE(timer->timer))
1311 __bpf_spin_lock_irqsave(&timer->lock);
1312 /* re-read it under lock */
1316 drop_prog_refcnt(t);
1317 /* The subsequent bpf_timer_start/cancel() helpers won't be able to use
1318 * this timer, since it won't be initialized.
1320 timer->timer = NULL;
1322 __bpf_spin_unlock_irqrestore(&timer->lock);
1325 /* Cancel the timer and wait for callback to complete if it was running.
1326 * If hrtimer_cancel() can be safely called it's safe to call kfree(t)
1327 * right after for both preallocated and non-preallocated maps.
1328 * The timer->timer = NULL was already done and no code path can
1329 * see address 't' anymore.
1331 * Check that bpf_map_delete/update_elem() wasn't called from timer
1332 * callback_fn. In such case don't call hrtimer_cancel() (since it will
1333 * deadlock) and don't call hrtimer_try_to_cancel() (since it will just
1334 * return -1). Though callback_fn is still running on this cpu it's
1335 * safe to do kfree(t) because bpf_timer_cb() read everything it needed
1336 * from 't'. The bpf subprog callback_fn won't be able to access 't',
1337 * since timer->timer = NULL was already done. The timer will be
1338 * effectively cancelled because bpf_timer_cb() will return
1339 * HRTIMER_NORESTART.
1341 if (this_cpu_read(hrtimer_running) != t)
1342 hrtimer_cancel(&t->timer);
1346 const struct bpf_func_proto bpf_get_current_task_proto __weak;
1347 const struct bpf_func_proto bpf_get_current_task_btf_proto __weak;
1348 const struct bpf_func_proto bpf_probe_read_user_proto __weak;
1349 const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
1350 const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
1351 const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
1352 const struct bpf_func_proto bpf_task_pt_regs_proto __weak;
1354 const struct bpf_func_proto *
1355 bpf_base_func_proto(enum bpf_func_id func_id)
1358 case BPF_FUNC_map_lookup_elem:
1359 return &bpf_map_lookup_elem_proto;
1360 case BPF_FUNC_map_update_elem:
1361 return &bpf_map_update_elem_proto;
1362 case BPF_FUNC_map_delete_elem:
1363 return &bpf_map_delete_elem_proto;
1364 case BPF_FUNC_map_push_elem:
1365 return &bpf_map_push_elem_proto;
1366 case BPF_FUNC_map_pop_elem:
1367 return &bpf_map_pop_elem_proto;
1368 case BPF_FUNC_map_peek_elem:
1369 return &bpf_map_peek_elem_proto;
1370 case BPF_FUNC_get_prandom_u32:
1371 return &bpf_get_prandom_u32_proto;
1372 case BPF_FUNC_get_smp_processor_id:
1373 return &bpf_get_raw_smp_processor_id_proto;
1374 case BPF_FUNC_get_numa_node_id:
1375 return &bpf_get_numa_node_id_proto;
1376 case BPF_FUNC_tail_call:
1377 return &bpf_tail_call_proto;
1378 case BPF_FUNC_ktime_get_ns:
1379 return &bpf_ktime_get_ns_proto;
1380 case BPF_FUNC_ktime_get_boot_ns:
1381 return &bpf_ktime_get_boot_ns_proto;
1382 case BPF_FUNC_ringbuf_output:
1383 return &bpf_ringbuf_output_proto;
1384 case BPF_FUNC_ringbuf_reserve:
1385 return &bpf_ringbuf_reserve_proto;
1386 case BPF_FUNC_ringbuf_submit:
1387 return &bpf_ringbuf_submit_proto;
1388 case BPF_FUNC_ringbuf_discard:
1389 return &bpf_ringbuf_discard_proto;
1390 case BPF_FUNC_ringbuf_query:
1391 return &bpf_ringbuf_query_proto;
1392 case BPF_FUNC_for_each_map_elem:
1393 return &bpf_for_each_map_elem_proto;
1395 return &bpf_loop_proto;
1396 case BPF_FUNC_strncmp:
1397 return &bpf_strncmp_proto;
1406 case BPF_FUNC_spin_lock:
1407 return &bpf_spin_lock_proto;
1408 case BPF_FUNC_spin_unlock:
1409 return &bpf_spin_unlock_proto;
1410 case BPF_FUNC_jiffies64:
1411 return &bpf_jiffies64_proto;
1412 case BPF_FUNC_per_cpu_ptr:
1413 return &bpf_per_cpu_ptr_proto;
1414 case BPF_FUNC_this_cpu_ptr:
1415 return &bpf_this_cpu_ptr_proto;
1416 case BPF_FUNC_timer_init:
1417 return &bpf_timer_init_proto;
1418 case BPF_FUNC_timer_set_callback:
1419 return &bpf_timer_set_callback_proto;
1420 case BPF_FUNC_timer_start:
1421 return &bpf_timer_start_proto;
1422 case BPF_FUNC_timer_cancel:
1423 return &bpf_timer_cancel_proto;
1428 if (!perfmon_capable())
1432 case BPF_FUNC_trace_printk:
1433 return bpf_get_trace_printk_proto();
1434 case BPF_FUNC_get_current_task:
1435 return &bpf_get_current_task_proto;
1436 case BPF_FUNC_get_current_task_btf:
1437 return &bpf_get_current_task_btf_proto;
1438 case BPF_FUNC_probe_read_user:
1439 return &bpf_probe_read_user_proto;
1440 case BPF_FUNC_probe_read_kernel:
1441 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1442 NULL : &bpf_probe_read_kernel_proto;
1443 case BPF_FUNC_probe_read_user_str:
1444 return &bpf_probe_read_user_str_proto;
1445 case BPF_FUNC_probe_read_kernel_str:
1446 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1447 NULL : &bpf_probe_read_kernel_str_proto;
1448 case BPF_FUNC_snprintf_btf:
1449 return &bpf_snprintf_btf_proto;
1450 case BPF_FUNC_snprintf:
1451 return &bpf_snprintf_proto;
1452 case BPF_FUNC_task_pt_regs:
1453 return &bpf_task_pt_regs_proto;
1454 case BPF_FUNC_trace_vprintk:
1455 return bpf_get_trace_vprintk_proto();