1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
5 #include <linux/rcupdate.h>
6 #include <linux/random.h>
8 #include <linux/topology.h>
9 #include <linux/ktime.h>
10 #include <linux/sched.h>
11 #include <linux/uidgid.h>
12 #include <linux/filter.h>
13 #include <linux/ctype.h>
14 #include <linux/jiffies.h>
15 #include <linux/pid_namespace.h>
16 #include <linux/proc_ns.h>
18 #include "../../lib/kstrtox.h"
20 /* If kernel subsystem is allowing eBPF programs to call this function,
21 * inside its own verifier_ops->get_func_proto() callback it should return
22 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
24 * Different map implementations will rely on rcu in map methods
25 * lookup/update/delete, therefore eBPF programs must run under rcu lock
26 * if program is allowed to access maps, so check rcu_read_lock_held in
27 * all three functions.
29 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
31 WARN_ON_ONCE(!rcu_read_lock_held());
32 return (unsigned long) map->ops->map_lookup_elem(map, key);
35 const struct bpf_func_proto bpf_map_lookup_elem_proto = {
36 .func = bpf_map_lookup_elem,
39 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
40 .arg1_type = ARG_CONST_MAP_PTR,
41 .arg2_type = ARG_PTR_TO_MAP_KEY,
44 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
45 void *, value, u64, flags)
47 WARN_ON_ONCE(!rcu_read_lock_held());
48 return map->ops->map_update_elem(map, key, value, flags);
51 const struct bpf_func_proto bpf_map_update_elem_proto = {
52 .func = bpf_map_update_elem,
55 .ret_type = RET_INTEGER,
56 .arg1_type = ARG_CONST_MAP_PTR,
57 .arg2_type = ARG_PTR_TO_MAP_KEY,
58 .arg3_type = ARG_PTR_TO_MAP_VALUE,
59 .arg4_type = ARG_ANYTHING,
62 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
64 WARN_ON_ONCE(!rcu_read_lock_held());
65 return map->ops->map_delete_elem(map, key);
68 const struct bpf_func_proto bpf_map_delete_elem_proto = {
69 .func = bpf_map_delete_elem,
72 .ret_type = RET_INTEGER,
73 .arg1_type = ARG_CONST_MAP_PTR,
74 .arg2_type = ARG_PTR_TO_MAP_KEY,
77 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
79 return map->ops->map_push_elem(map, value, flags);
82 const struct bpf_func_proto bpf_map_push_elem_proto = {
83 .func = bpf_map_push_elem,
86 .ret_type = RET_INTEGER,
87 .arg1_type = ARG_CONST_MAP_PTR,
88 .arg2_type = ARG_PTR_TO_MAP_VALUE,
89 .arg3_type = ARG_ANYTHING,
92 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
94 return map->ops->map_pop_elem(map, value);
97 const struct bpf_func_proto bpf_map_pop_elem_proto = {
98 .func = bpf_map_pop_elem,
100 .ret_type = RET_INTEGER,
101 .arg1_type = ARG_CONST_MAP_PTR,
102 .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
105 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
107 return map->ops->map_peek_elem(map, value);
110 const struct bpf_func_proto bpf_map_peek_elem_proto = {
111 .func = bpf_map_pop_elem,
113 .ret_type = RET_INTEGER,
114 .arg1_type = ARG_CONST_MAP_PTR,
115 .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
118 const struct bpf_func_proto bpf_get_prandom_u32_proto = {
119 .func = bpf_user_rnd_u32,
121 .ret_type = RET_INTEGER,
124 BPF_CALL_0(bpf_get_smp_processor_id)
126 return smp_processor_id();
129 const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
130 .func = bpf_get_smp_processor_id,
132 .ret_type = RET_INTEGER,
135 BPF_CALL_0(bpf_get_numa_node_id)
137 return numa_node_id();
140 const struct bpf_func_proto bpf_get_numa_node_id_proto = {
141 .func = bpf_get_numa_node_id,
143 .ret_type = RET_INTEGER,
146 BPF_CALL_0(bpf_ktime_get_ns)
148 /* NMI safe access to clock monotonic */
149 return ktime_get_mono_fast_ns();
152 const struct bpf_func_proto bpf_ktime_get_ns_proto = {
153 .func = bpf_ktime_get_ns,
155 .ret_type = RET_INTEGER,
158 BPF_CALL_0(bpf_get_current_pid_tgid)
160 struct task_struct *task = current;
165 return (u64) task->tgid << 32 | task->pid;
168 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
169 .func = bpf_get_current_pid_tgid,
171 .ret_type = RET_INTEGER,
174 BPF_CALL_0(bpf_get_current_uid_gid)
176 struct task_struct *task = current;
183 current_uid_gid(&uid, &gid);
184 return (u64) from_kgid(&init_user_ns, gid) << 32 |
185 from_kuid(&init_user_ns, uid);
188 const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
189 .func = bpf_get_current_uid_gid,
191 .ret_type = RET_INTEGER,
194 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
196 struct task_struct *task = current;
201 strncpy(buf, task->comm, size);
203 /* Verifier guarantees that size > 0. For task->comm exceeding
204 * size, guarantee that buf is %NUL-terminated. Unconditionally
205 * done here to save the size test.
210 memset(buf, 0, size);
214 const struct bpf_func_proto bpf_get_current_comm_proto = {
215 .func = bpf_get_current_comm,
217 .ret_type = RET_INTEGER,
218 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
219 .arg2_type = ARG_CONST_SIZE,
222 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
224 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
226 arch_spinlock_t *l = (void *)lock;
229 arch_spinlock_t lock;
230 } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
232 compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
233 BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
234 BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
238 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
240 arch_spinlock_t *l = (void *)lock;
247 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
249 atomic_t *l = (void *)lock;
251 BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
253 atomic_cond_read_relaxed(l, !VAL);
254 } while (atomic_xchg(l, 1));
257 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
259 atomic_t *l = (void *)lock;
261 atomic_set_release(l, 0);
266 static DEFINE_PER_CPU(unsigned long, irqsave_flags);
268 notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
272 local_irq_save(flags);
273 __bpf_spin_lock(lock);
274 __this_cpu_write(irqsave_flags, flags);
278 const struct bpf_func_proto bpf_spin_lock_proto = {
279 .func = bpf_spin_lock,
281 .ret_type = RET_VOID,
282 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
285 notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
289 flags = __this_cpu_read(irqsave_flags);
290 __bpf_spin_unlock(lock);
291 local_irq_restore(flags);
295 const struct bpf_func_proto bpf_spin_unlock_proto = {
296 .func = bpf_spin_unlock,
298 .ret_type = RET_VOID,
299 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
302 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
305 struct bpf_spin_lock *lock;
308 lock = src + map->spin_lock_off;
310 lock = dst + map->spin_lock_off;
312 ____bpf_spin_lock(lock);
313 copy_map_value(map, dst, src);
314 ____bpf_spin_unlock(lock);
318 BPF_CALL_0(bpf_jiffies64)
320 return get_jiffies_64();
323 const struct bpf_func_proto bpf_jiffies64_proto = {
324 .func = bpf_jiffies64,
326 .ret_type = RET_INTEGER,
329 #ifdef CONFIG_CGROUPS
330 BPF_CALL_0(bpf_get_current_cgroup_id)
332 struct cgroup *cgrp = task_dfl_cgroup(current);
334 return cgroup_id(cgrp);
337 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
338 .func = bpf_get_current_cgroup_id,
340 .ret_type = RET_INTEGER,
343 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
345 struct cgroup *cgrp = task_dfl_cgroup(current);
346 struct cgroup *ancestor;
348 ancestor = cgroup_ancestor(cgrp, ancestor_level);
351 return cgroup_id(ancestor);
354 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
355 .func = bpf_get_current_ancestor_cgroup_id,
357 .ret_type = RET_INTEGER,
358 .arg1_type = ARG_ANYTHING,
361 #ifdef CONFIG_CGROUP_BPF
362 DECLARE_PER_CPU(struct bpf_cgroup_storage*,
363 bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
365 BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
367 /* flags argument is not used now,
368 * but provides an ability to extend the API.
369 * verifier checks that its value is correct.
371 enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
372 struct bpf_cgroup_storage *storage;
375 storage = this_cpu_read(bpf_cgroup_storage[stype]);
377 if (stype == BPF_CGROUP_STORAGE_SHARED)
378 ptr = &READ_ONCE(storage->buf)->data[0];
380 ptr = this_cpu_ptr(storage->percpu_buf);
382 return (unsigned long)ptr;
385 const struct bpf_func_proto bpf_get_local_storage_proto = {
386 .func = bpf_get_local_storage,
388 .ret_type = RET_PTR_TO_MAP_VALUE,
389 .arg1_type = ARG_CONST_MAP_PTR,
390 .arg2_type = ARG_ANYTHING,
394 #define BPF_STRTOX_BASE_MASK 0x1F
396 static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
397 unsigned long long *res, bool *is_negative)
399 unsigned int base = flags & BPF_STRTOX_BASE_MASK;
400 const char *cur_buf = buf;
401 size_t cur_len = buf_len;
402 unsigned int consumed;
406 if (!buf || !buf_len || !res || !is_negative)
409 if (base != 0 && base != 8 && base != 10 && base != 16)
412 if (flags & ~BPF_STRTOX_BASE_MASK)
415 while (cur_buf < buf + buf_len && isspace(*cur_buf))
418 *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
422 consumed = cur_buf - buf;
427 cur_len = min(cur_len, sizeof(str) - 1);
428 memcpy(str, cur_buf, cur_len);
432 cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
433 val_len = _parse_integer(cur_buf, base, res);
435 if (val_len & KSTRTOX_OVERFLOW)
442 consumed += cur_buf - str;
447 static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
450 unsigned long long _res;
454 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
458 if ((long long)-_res > 0)
462 if ((long long)_res < 0)
469 BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
475 err = __bpf_strtoll(buf, buf_len, flags, &_res);
478 if (_res != (long)_res)
484 const struct bpf_func_proto bpf_strtol_proto = {
487 .ret_type = RET_INTEGER,
488 .arg1_type = ARG_PTR_TO_MEM,
489 .arg2_type = ARG_CONST_SIZE,
490 .arg3_type = ARG_ANYTHING,
491 .arg4_type = ARG_PTR_TO_LONG,
494 BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
495 unsigned long *, res)
497 unsigned long long _res;
501 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
506 if (_res != (unsigned long)_res)
512 const struct bpf_func_proto bpf_strtoul_proto = {
515 .ret_type = RET_INTEGER,
516 .arg1_type = ARG_PTR_TO_MEM,
517 .arg2_type = ARG_CONST_SIZE,
518 .arg3_type = ARG_ANYTHING,
519 .arg4_type = ARG_PTR_TO_LONG,
523 BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
524 struct bpf_pidns_info *, nsdata, u32, size)
526 struct task_struct *task = current;
527 struct pid_namespace *pidns;
530 if (unlikely(size != sizeof(struct bpf_pidns_info)))
533 if (unlikely((u64)(dev_t)dev != dev))
539 pidns = task_active_pid_ns(task);
540 if (unlikely(!pidns)) {
545 if (!ns_match(&pidns->ns, (dev_t)dev, ino))
548 nsdata->pid = task_pid_nr_ns(task, pidns);
549 nsdata->tgid = task_tgid_nr_ns(task, pidns);
552 memset((void *)nsdata, 0, (size_t) size);
556 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
557 .func = bpf_get_ns_current_pid_tgid,
559 .ret_type = RET_INTEGER,
560 .arg1_type = ARG_ANYTHING,
561 .arg2_type = ARG_ANYTHING,
562 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
563 .arg4_type = ARG_CONST_SIZE,