phy: qcom-qmp: add sc8280xp UFS PHY
[linux-2.6-microblaze.git] / kernel / bpf / helpers.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #include <linux/bpf.h>
5 #include <linux/bpf-cgroup.h>
6 #include <linux/rcupdate.h>
7 #include <linux/random.h>
8 #include <linux/smp.h>
9 #include <linux/topology.h>
10 #include <linux/ktime.h>
11 #include <linux/sched.h>
12 #include <linux/uidgid.h>
13 #include <linux/filter.h>
14 #include <linux/ctype.h>
15 #include <linux/jiffies.h>
16 #include <linux/pid_namespace.h>
17 #include <linux/proc_ns.h>
18 #include <linux/security.h>
19
20 #include "../../lib/kstrtox.h"
21
22 /* If kernel subsystem is allowing eBPF programs to call this function,
23  * inside its own verifier_ops->get_func_proto() callback it should return
24  * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
25  *
26  * Different map implementations will rely on rcu in map methods
27  * lookup/update/delete, therefore eBPF programs must run under rcu lock
28  * if program is allowed to access maps, so check rcu_read_lock_held in
29  * all three functions.
30  */
31 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
32 {
33         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
34         return (unsigned long) map->ops->map_lookup_elem(map, key);
35 }
36
37 const struct bpf_func_proto bpf_map_lookup_elem_proto = {
38         .func           = bpf_map_lookup_elem,
39         .gpl_only       = false,
40         .pkt_access     = true,
41         .ret_type       = RET_PTR_TO_MAP_VALUE_OR_NULL,
42         .arg1_type      = ARG_CONST_MAP_PTR,
43         .arg2_type      = ARG_PTR_TO_MAP_KEY,
44 };
45
46 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
47            void *, value, u64, flags)
48 {
49         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
50         return map->ops->map_update_elem(map, key, value, flags);
51 }
52
53 const struct bpf_func_proto bpf_map_update_elem_proto = {
54         .func           = bpf_map_update_elem,
55         .gpl_only       = false,
56         .pkt_access     = true,
57         .ret_type       = RET_INTEGER,
58         .arg1_type      = ARG_CONST_MAP_PTR,
59         .arg2_type      = ARG_PTR_TO_MAP_KEY,
60         .arg3_type      = ARG_PTR_TO_MAP_VALUE,
61         .arg4_type      = ARG_ANYTHING,
62 };
63
64 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
65 {
66         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
67         return map->ops->map_delete_elem(map, key);
68 }
69
70 const struct bpf_func_proto bpf_map_delete_elem_proto = {
71         .func           = bpf_map_delete_elem,
72         .gpl_only       = false,
73         .pkt_access     = true,
74         .ret_type       = RET_INTEGER,
75         .arg1_type      = ARG_CONST_MAP_PTR,
76         .arg2_type      = ARG_PTR_TO_MAP_KEY,
77 };
78
79 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
80 {
81         return map->ops->map_push_elem(map, value, flags);
82 }
83
84 const struct bpf_func_proto bpf_map_push_elem_proto = {
85         .func           = bpf_map_push_elem,
86         .gpl_only       = false,
87         .pkt_access     = true,
88         .ret_type       = RET_INTEGER,
89         .arg1_type      = ARG_CONST_MAP_PTR,
90         .arg2_type      = ARG_PTR_TO_MAP_VALUE,
91         .arg3_type      = ARG_ANYTHING,
92 };
93
94 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
95 {
96         return map->ops->map_pop_elem(map, value);
97 }
98
99 const struct bpf_func_proto bpf_map_pop_elem_proto = {
100         .func           = bpf_map_pop_elem,
101         .gpl_only       = false,
102         .ret_type       = RET_INTEGER,
103         .arg1_type      = ARG_CONST_MAP_PTR,
104         .arg2_type      = ARG_PTR_TO_UNINIT_MAP_VALUE,
105 };
106
107 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
108 {
109         return map->ops->map_peek_elem(map, value);
110 }
111
112 const struct bpf_func_proto bpf_map_peek_elem_proto = {
113         .func           = bpf_map_peek_elem,
114         .gpl_only       = false,
115         .ret_type       = RET_INTEGER,
116         .arg1_type      = ARG_CONST_MAP_PTR,
117         .arg2_type      = ARG_PTR_TO_UNINIT_MAP_VALUE,
118 };
119
120 const struct bpf_func_proto bpf_get_prandom_u32_proto = {
121         .func           = bpf_user_rnd_u32,
122         .gpl_only       = false,
123         .ret_type       = RET_INTEGER,
124 };
125
126 BPF_CALL_0(bpf_get_smp_processor_id)
127 {
128         return smp_processor_id();
129 }
130
131 const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
132         .func           = bpf_get_smp_processor_id,
133         .gpl_only       = false,
134         .ret_type       = RET_INTEGER,
135 };
136
137 BPF_CALL_0(bpf_get_numa_node_id)
138 {
139         return numa_node_id();
140 }
141
142 const struct bpf_func_proto bpf_get_numa_node_id_proto = {
143         .func           = bpf_get_numa_node_id,
144         .gpl_only       = false,
145         .ret_type       = RET_INTEGER,
146 };
147
148 BPF_CALL_0(bpf_ktime_get_ns)
149 {
150         /* NMI safe access to clock monotonic */
151         return ktime_get_mono_fast_ns();
152 }
153
154 const struct bpf_func_proto bpf_ktime_get_ns_proto = {
155         .func           = bpf_ktime_get_ns,
156         .gpl_only       = false,
157         .ret_type       = RET_INTEGER,
158 };
159
160 BPF_CALL_0(bpf_ktime_get_boot_ns)
161 {
162         /* NMI safe access to clock boottime */
163         return ktime_get_boot_fast_ns();
164 }
165
166 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = {
167         .func           = bpf_ktime_get_boot_ns,
168         .gpl_only       = false,
169         .ret_type       = RET_INTEGER,
170 };
171
172 BPF_CALL_0(bpf_ktime_get_coarse_ns)
173 {
174         return ktime_get_coarse_ns();
175 }
176
177 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = {
178         .func           = bpf_ktime_get_coarse_ns,
179         .gpl_only       = false,
180         .ret_type       = RET_INTEGER,
181 };
182
183 BPF_CALL_0(bpf_get_current_pid_tgid)
184 {
185         struct task_struct *task = current;
186
187         if (unlikely(!task))
188                 return -EINVAL;
189
190         return (u64) task->tgid << 32 | task->pid;
191 }
192
193 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
194         .func           = bpf_get_current_pid_tgid,
195         .gpl_only       = false,
196         .ret_type       = RET_INTEGER,
197 };
198
199 BPF_CALL_0(bpf_get_current_uid_gid)
200 {
201         struct task_struct *task = current;
202         kuid_t uid;
203         kgid_t gid;
204
205         if (unlikely(!task))
206                 return -EINVAL;
207
208         current_uid_gid(&uid, &gid);
209         return (u64) from_kgid(&init_user_ns, gid) << 32 |
210                      from_kuid(&init_user_ns, uid);
211 }
212
213 const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
214         .func           = bpf_get_current_uid_gid,
215         .gpl_only       = false,
216         .ret_type       = RET_INTEGER,
217 };
218
219 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
220 {
221         struct task_struct *task = current;
222
223         if (unlikely(!task))
224                 goto err_clear;
225
226         strncpy(buf, task->comm, size);
227
228         /* Verifier guarantees that size > 0. For task->comm exceeding
229          * size, guarantee that buf is %NUL-terminated. Unconditionally
230          * done here to save the size test.
231          */
232         buf[size - 1] = 0;
233         return 0;
234 err_clear:
235         memset(buf, 0, size);
236         return -EINVAL;
237 }
238
239 const struct bpf_func_proto bpf_get_current_comm_proto = {
240         .func           = bpf_get_current_comm,
241         .gpl_only       = false,
242         .ret_type       = RET_INTEGER,
243         .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
244         .arg2_type      = ARG_CONST_SIZE,
245 };
246
247 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
248
249 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
250 {
251         arch_spinlock_t *l = (void *)lock;
252         union {
253                 __u32 val;
254                 arch_spinlock_t lock;
255         } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
256
257         compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
258         BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
259         BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
260         arch_spin_lock(l);
261 }
262
263 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
264 {
265         arch_spinlock_t *l = (void *)lock;
266
267         arch_spin_unlock(l);
268 }
269
270 #else
271
272 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
273 {
274         atomic_t *l = (void *)lock;
275
276         BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
277         do {
278                 atomic_cond_read_relaxed(l, !VAL);
279         } while (atomic_xchg(l, 1));
280 }
281
282 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
283 {
284         atomic_t *l = (void *)lock;
285
286         atomic_set_release(l, 0);
287 }
288
289 #endif
290
291 static DEFINE_PER_CPU(unsigned long, irqsave_flags);
292
293 static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
294 {
295         unsigned long flags;
296
297         local_irq_save(flags);
298         __bpf_spin_lock(lock);
299         __this_cpu_write(irqsave_flags, flags);
300 }
301
302 notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
303 {
304         __bpf_spin_lock_irqsave(lock);
305         return 0;
306 }
307
308 const struct bpf_func_proto bpf_spin_lock_proto = {
309         .func           = bpf_spin_lock,
310         .gpl_only       = false,
311         .ret_type       = RET_VOID,
312         .arg1_type      = ARG_PTR_TO_SPIN_LOCK,
313 };
314
315 static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
316 {
317         unsigned long flags;
318
319         flags = __this_cpu_read(irqsave_flags);
320         __bpf_spin_unlock(lock);
321         local_irq_restore(flags);
322 }
323
324 notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
325 {
326         __bpf_spin_unlock_irqrestore(lock);
327         return 0;
328 }
329
330 const struct bpf_func_proto bpf_spin_unlock_proto = {
331         .func           = bpf_spin_unlock,
332         .gpl_only       = false,
333         .ret_type       = RET_VOID,
334         .arg1_type      = ARG_PTR_TO_SPIN_LOCK,
335 };
336
337 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
338                            bool lock_src)
339 {
340         struct bpf_spin_lock *lock;
341
342         if (lock_src)
343                 lock = src + map->spin_lock_off;
344         else
345                 lock = dst + map->spin_lock_off;
346         preempt_disable();
347         __bpf_spin_lock_irqsave(lock);
348         copy_map_value(map, dst, src);
349         __bpf_spin_unlock_irqrestore(lock);
350         preempt_enable();
351 }
352
353 BPF_CALL_0(bpf_jiffies64)
354 {
355         return get_jiffies_64();
356 }
357
358 const struct bpf_func_proto bpf_jiffies64_proto = {
359         .func           = bpf_jiffies64,
360         .gpl_only       = false,
361         .ret_type       = RET_INTEGER,
362 };
363
364 #ifdef CONFIG_CGROUPS
365 BPF_CALL_0(bpf_get_current_cgroup_id)
366 {
367         struct cgroup *cgrp;
368         u64 cgrp_id;
369
370         rcu_read_lock();
371         cgrp = task_dfl_cgroup(current);
372         cgrp_id = cgroup_id(cgrp);
373         rcu_read_unlock();
374
375         return cgrp_id;
376 }
377
378 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
379         .func           = bpf_get_current_cgroup_id,
380         .gpl_only       = false,
381         .ret_type       = RET_INTEGER,
382 };
383
384 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
385 {
386         struct cgroup *cgrp;
387         struct cgroup *ancestor;
388         u64 cgrp_id;
389
390         rcu_read_lock();
391         cgrp = task_dfl_cgroup(current);
392         ancestor = cgroup_ancestor(cgrp, ancestor_level);
393         cgrp_id = ancestor ? cgroup_id(ancestor) : 0;
394         rcu_read_unlock();
395
396         return cgrp_id;
397 }
398
399 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
400         .func           = bpf_get_current_ancestor_cgroup_id,
401         .gpl_only       = false,
402         .ret_type       = RET_INTEGER,
403         .arg1_type      = ARG_ANYTHING,
404 };
405
406 #ifdef CONFIG_CGROUP_BPF
407
408 BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
409 {
410         /* flags argument is not used now,
411          * but provides an ability to extend the API.
412          * verifier checks that its value is correct.
413          */
414         enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
415         struct bpf_cgroup_storage *storage;
416         struct bpf_cg_run_ctx *ctx;
417         void *ptr;
418
419         /* get current cgroup storage from BPF run context */
420         ctx = container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
421         storage = ctx->prog_item->cgroup_storage[stype];
422
423         if (stype == BPF_CGROUP_STORAGE_SHARED)
424                 ptr = &READ_ONCE(storage->buf)->data[0];
425         else
426                 ptr = this_cpu_ptr(storage->percpu_buf);
427
428         return (unsigned long)ptr;
429 }
430
431 const struct bpf_func_proto bpf_get_local_storage_proto = {
432         .func           = bpf_get_local_storage,
433         .gpl_only       = false,
434         .ret_type       = RET_PTR_TO_MAP_VALUE,
435         .arg1_type      = ARG_CONST_MAP_PTR,
436         .arg2_type      = ARG_ANYTHING,
437 };
438 #endif
439
440 #define BPF_STRTOX_BASE_MASK 0x1F
441
442 static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
443                           unsigned long long *res, bool *is_negative)
444 {
445         unsigned int base = flags & BPF_STRTOX_BASE_MASK;
446         const char *cur_buf = buf;
447         size_t cur_len = buf_len;
448         unsigned int consumed;
449         size_t val_len;
450         char str[64];
451
452         if (!buf || !buf_len || !res || !is_negative)
453                 return -EINVAL;
454
455         if (base != 0 && base != 8 && base != 10 && base != 16)
456                 return -EINVAL;
457
458         if (flags & ~BPF_STRTOX_BASE_MASK)
459                 return -EINVAL;
460
461         while (cur_buf < buf + buf_len && isspace(*cur_buf))
462                 ++cur_buf;
463
464         *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
465         if (*is_negative)
466                 ++cur_buf;
467
468         consumed = cur_buf - buf;
469         cur_len -= consumed;
470         if (!cur_len)
471                 return -EINVAL;
472
473         cur_len = min(cur_len, sizeof(str) - 1);
474         memcpy(str, cur_buf, cur_len);
475         str[cur_len] = '\0';
476         cur_buf = str;
477
478         cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
479         val_len = _parse_integer(cur_buf, base, res);
480
481         if (val_len & KSTRTOX_OVERFLOW)
482                 return -ERANGE;
483
484         if (val_len == 0)
485                 return -EINVAL;
486
487         cur_buf += val_len;
488         consumed += cur_buf - str;
489
490         return consumed;
491 }
492
493 static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
494                          long long *res)
495 {
496         unsigned long long _res;
497         bool is_negative;
498         int err;
499
500         err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
501         if (err < 0)
502                 return err;
503         if (is_negative) {
504                 if ((long long)-_res > 0)
505                         return -ERANGE;
506                 *res = -_res;
507         } else {
508                 if ((long long)_res < 0)
509                         return -ERANGE;
510                 *res = _res;
511         }
512         return err;
513 }
514
515 BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
516            long *, res)
517 {
518         long long _res;
519         int err;
520
521         err = __bpf_strtoll(buf, buf_len, flags, &_res);
522         if (err < 0)
523                 return err;
524         if (_res != (long)_res)
525                 return -ERANGE;
526         *res = _res;
527         return err;
528 }
529
530 const struct bpf_func_proto bpf_strtol_proto = {
531         .func           = bpf_strtol,
532         .gpl_only       = false,
533         .ret_type       = RET_INTEGER,
534         .arg1_type      = ARG_PTR_TO_MEM | MEM_RDONLY,
535         .arg2_type      = ARG_CONST_SIZE,
536         .arg3_type      = ARG_ANYTHING,
537         .arg4_type      = ARG_PTR_TO_LONG,
538 };
539
540 BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
541            unsigned long *, res)
542 {
543         unsigned long long _res;
544         bool is_negative;
545         int err;
546
547         err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
548         if (err < 0)
549                 return err;
550         if (is_negative)
551                 return -EINVAL;
552         if (_res != (unsigned long)_res)
553                 return -ERANGE;
554         *res = _res;
555         return err;
556 }
557
558 const struct bpf_func_proto bpf_strtoul_proto = {
559         .func           = bpf_strtoul,
560         .gpl_only       = false,
561         .ret_type       = RET_INTEGER,
562         .arg1_type      = ARG_PTR_TO_MEM | MEM_RDONLY,
563         .arg2_type      = ARG_CONST_SIZE,
564         .arg3_type      = ARG_ANYTHING,
565         .arg4_type      = ARG_PTR_TO_LONG,
566 };
567 #endif
568
569 BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2)
570 {
571         return strncmp(s1, s2, s1_sz);
572 }
573
574 const struct bpf_func_proto bpf_strncmp_proto = {
575         .func           = bpf_strncmp,
576         .gpl_only       = false,
577         .ret_type       = RET_INTEGER,
578         .arg1_type      = ARG_PTR_TO_MEM,
579         .arg2_type      = ARG_CONST_SIZE,
580         .arg3_type      = ARG_PTR_TO_CONST_STR,
581 };
582
583 BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
584            struct bpf_pidns_info *, nsdata, u32, size)
585 {
586         struct task_struct *task = current;
587         struct pid_namespace *pidns;
588         int err = -EINVAL;
589
590         if (unlikely(size != sizeof(struct bpf_pidns_info)))
591                 goto clear;
592
593         if (unlikely((u64)(dev_t)dev != dev))
594                 goto clear;
595
596         if (unlikely(!task))
597                 goto clear;
598
599         pidns = task_active_pid_ns(task);
600         if (unlikely(!pidns)) {
601                 err = -ENOENT;
602                 goto clear;
603         }
604
605         if (!ns_match(&pidns->ns, (dev_t)dev, ino))
606                 goto clear;
607
608         nsdata->pid = task_pid_nr_ns(task, pidns);
609         nsdata->tgid = task_tgid_nr_ns(task, pidns);
610         return 0;
611 clear:
612         memset((void *)nsdata, 0, (size_t) size);
613         return err;
614 }
615
616 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
617         .func           = bpf_get_ns_current_pid_tgid,
618         .gpl_only       = false,
619         .ret_type       = RET_INTEGER,
620         .arg1_type      = ARG_ANYTHING,
621         .arg2_type      = ARG_ANYTHING,
622         .arg3_type      = ARG_PTR_TO_UNINIT_MEM,
623         .arg4_type      = ARG_CONST_SIZE,
624 };
625
626 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
627         .func           = bpf_get_raw_cpu_id,
628         .gpl_only       = false,
629         .ret_type       = RET_INTEGER,
630 };
631
632 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
633            u64, flags, void *, data, u64, size)
634 {
635         if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
636                 return -EINVAL;
637
638         return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
639 }
640
641 const struct bpf_func_proto bpf_event_output_data_proto =  {
642         .func           = bpf_event_output_data,
643         .gpl_only       = true,
644         .ret_type       = RET_INTEGER,
645         .arg1_type      = ARG_PTR_TO_CTX,
646         .arg2_type      = ARG_CONST_MAP_PTR,
647         .arg3_type      = ARG_ANYTHING,
648         .arg4_type      = ARG_PTR_TO_MEM | MEM_RDONLY,
649         .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
650 };
651
652 BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
653            const void __user *, user_ptr)
654 {
655         int ret = copy_from_user(dst, user_ptr, size);
656
657         if (unlikely(ret)) {
658                 memset(dst, 0, size);
659                 ret = -EFAULT;
660         }
661
662         return ret;
663 }
664
665 const struct bpf_func_proto bpf_copy_from_user_proto = {
666         .func           = bpf_copy_from_user,
667         .gpl_only       = false,
668         .ret_type       = RET_INTEGER,
669         .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
670         .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
671         .arg3_type      = ARG_ANYTHING,
672 };
673
674 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
675 {
676         if (cpu >= nr_cpu_ids)
677                 return (unsigned long)NULL;
678
679         return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu);
680 }
681
682 const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
683         .func           = bpf_per_cpu_ptr,
684         .gpl_only       = false,
685         .ret_type       = RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY,
686         .arg1_type      = ARG_PTR_TO_PERCPU_BTF_ID,
687         .arg2_type      = ARG_ANYTHING,
688 };
689
690 BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
691 {
692         return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr);
693 }
694
695 const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
696         .func           = bpf_this_cpu_ptr,
697         .gpl_only       = false,
698         .ret_type       = RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY,
699         .arg1_type      = ARG_PTR_TO_PERCPU_BTF_ID,
700 };
701
702 static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
703                 size_t bufsz)
704 {
705         void __user *user_ptr = (__force void __user *)unsafe_ptr;
706
707         buf[0] = 0;
708
709         switch (fmt_ptype) {
710         case 's':
711 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
712                 if ((unsigned long)unsafe_ptr < TASK_SIZE)
713                         return strncpy_from_user_nofault(buf, user_ptr, bufsz);
714                 fallthrough;
715 #endif
716         case 'k':
717                 return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
718         case 'u':
719                 return strncpy_from_user_nofault(buf, user_ptr, bufsz);
720         }
721
722         return -EINVAL;
723 }
724
725 /* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary
726  * arguments representation.
727  */
728 #define MAX_BPRINTF_BUF_LEN     512
729
730 /* Support executing three nested bprintf helper calls on a given CPU */
731 #define MAX_BPRINTF_NEST_LEVEL  3
732 struct bpf_bprintf_buffers {
733         char tmp_bufs[MAX_BPRINTF_NEST_LEVEL][MAX_BPRINTF_BUF_LEN];
734 };
735 static DEFINE_PER_CPU(struct bpf_bprintf_buffers, bpf_bprintf_bufs);
736 static DEFINE_PER_CPU(int, bpf_bprintf_nest_level);
737
738 static int try_get_fmt_tmp_buf(char **tmp_buf)
739 {
740         struct bpf_bprintf_buffers *bufs;
741         int nest_level;
742
743         preempt_disable();
744         nest_level = this_cpu_inc_return(bpf_bprintf_nest_level);
745         if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) {
746                 this_cpu_dec(bpf_bprintf_nest_level);
747                 preempt_enable();
748                 return -EBUSY;
749         }
750         bufs = this_cpu_ptr(&bpf_bprintf_bufs);
751         *tmp_buf = bufs->tmp_bufs[nest_level - 1];
752
753         return 0;
754 }
755
756 void bpf_bprintf_cleanup(void)
757 {
758         if (this_cpu_read(bpf_bprintf_nest_level)) {
759                 this_cpu_dec(bpf_bprintf_nest_level);
760                 preempt_enable();
761         }
762 }
763
764 /*
765  * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers
766  *
767  * Returns a negative value if fmt is an invalid format string or 0 otherwise.
768  *
769  * This can be used in two ways:
770  * - Format string verification only: when bin_args is NULL
771  * - Arguments preparation: in addition to the above verification, it writes in
772  *   bin_args a binary representation of arguments usable by bstr_printf where
773  *   pointers from BPF have been sanitized.
774  *
775  * In argument preparation mode, if 0 is returned, safe temporary buffers are
776  * allocated and bpf_bprintf_cleanup should be called to free them after use.
777  */
778 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
779                         u32 **bin_args, u32 num_args)
780 {
781         char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end;
782         size_t sizeof_cur_arg, sizeof_cur_ip;
783         int err, i, num_spec = 0;
784         u64 cur_arg;
785         char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX";
786
787         fmt_end = strnchr(fmt, fmt_size, 0);
788         if (!fmt_end)
789                 return -EINVAL;
790         fmt_size = fmt_end - fmt;
791
792         if (bin_args) {
793                 if (num_args && try_get_fmt_tmp_buf(&tmp_buf))
794                         return -EBUSY;
795
796                 tmp_buf_end = tmp_buf + MAX_BPRINTF_BUF_LEN;
797                 *bin_args = (u32 *)tmp_buf;
798         }
799
800         for (i = 0; i < fmt_size; i++) {
801                 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
802                         err = -EINVAL;
803                         goto out;
804                 }
805
806                 if (fmt[i] != '%')
807                         continue;
808
809                 if (fmt[i + 1] == '%') {
810                         i++;
811                         continue;
812                 }
813
814                 if (num_spec >= num_args) {
815                         err = -EINVAL;
816                         goto out;
817                 }
818
819                 /* The string is zero-terminated so if fmt[i] != 0, we can
820                  * always access fmt[i + 1], in the worst case it will be a 0
821                  */
822                 i++;
823
824                 /* skip optional "[0 +-][num]" width formatting field */
825                 while (fmt[i] == '0' || fmt[i] == '+'  || fmt[i] == '-' ||
826                        fmt[i] == ' ')
827                         i++;
828                 if (fmt[i] >= '1' && fmt[i] <= '9') {
829                         i++;
830                         while (fmt[i] >= '0' && fmt[i] <= '9')
831                                 i++;
832                 }
833
834                 if (fmt[i] == 'p') {
835                         sizeof_cur_arg = sizeof(long);
836
837                         if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') &&
838                             fmt[i + 2] == 's') {
839                                 fmt_ptype = fmt[i + 1];
840                                 i += 2;
841                                 goto fmt_str;
842                         }
843
844                         if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
845                             ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' ||
846                             fmt[i + 1] == 'x' || fmt[i + 1] == 's' ||
847                             fmt[i + 1] == 'S') {
848                                 /* just kernel pointers */
849                                 if (tmp_buf)
850                                         cur_arg = raw_args[num_spec];
851                                 i++;
852                                 goto nocopy_fmt;
853                         }
854
855                         if (fmt[i + 1] == 'B') {
856                                 if (tmp_buf)  {
857                                         err = snprintf(tmp_buf,
858                                                        (tmp_buf_end - tmp_buf),
859                                                        "%pB",
860                                                        (void *)(long)raw_args[num_spec]);
861                                         tmp_buf += (err + 1);
862                                 }
863
864                                 i++;
865                                 num_spec++;
866                                 continue;
867                         }
868
869                         /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
870                         if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') ||
871                             (fmt[i + 2] != '4' && fmt[i + 2] != '6')) {
872                                 err = -EINVAL;
873                                 goto out;
874                         }
875
876                         i += 2;
877                         if (!tmp_buf)
878                                 goto nocopy_fmt;
879
880                         sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16;
881                         if (tmp_buf_end - tmp_buf < sizeof_cur_ip) {
882                                 err = -ENOSPC;
883                                 goto out;
884                         }
885
886                         unsafe_ptr = (char *)(long)raw_args[num_spec];
887                         err = copy_from_kernel_nofault(cur_ip, unsafe_ptr,
888                                                        sizeof_cur_ip);
889                         if (err < 0)
890                                 memset(cur_ip, 0, sizeof_cur_ip);
891
892                         /* hack: bstr_printf expects IP addresses to be
893                          * pre-formatted as strings, ironically, the easiest way
894                          * to do that is to call snprintf.
895                          */
896                         ip_spec[2] = fmt[i - 1];
897                         ip_spec[3] = fmt[i];
898                         err = snprintf(tmp_buf, tmp_buf_end - tmp_buf,
899                                        ip_spec, &cur_ip);
900
901                         tmp_buf += err + 1;
902                         num_spec++;
903
904                         continue;
905                 } else if (fmt[i] == 's') {
906                         fmt_ptype = fmt[i];
907 fmt_str:
908                         if (fmt[i + 1] != 0 &&
909                             !isspace(fmt[i + 1]) &&
910                             !ispunct(fmt[i + 1])) {
911                                 err = -EINVAL;
912                                 goto out;
913                         }
914
915                         if (!tmp_buf)
916                                 goto nocopy_fmt;
917
918                         if (tmp_buf_end == tmp_buf) {
919                                 err = -ENOSPC;
920                                 goto out;
921                         }
922
923                         unsafe_ptr = (char *)(long)raw_args[num_spec];
924                         err = bpf_trace_copy_string(tmp_buf, unsafe_ptr,
925                                                     fmt_ptype,
926                                                     tmp_buf_end - tmp_buf);
927                         if (err < 0) {
928                                 tmp_buf[0] = '\0';
929                                 err = 1;
930                         }
931
932                         tmp_buf += err;
933                         num_spec++;
934
935                         continue;
936                 } else if (fmt[i] == 'c') {
937                         if (!tmp_buf)
938                                 goto nocopy_fmt;
939
940                         if (tmp_buf_end == tmp_buf) {
941                                 err = -ENOSPC;
942                                 goto out;
943                         }
944
945                         *tmp_buf = raw_args[num_spec];
946                         tmp_buf++;
947                         num_spec++;
948
949                         continue;
950                 }
951
952                 sizeof_cur_arg = sizeof(int);
953
954                 if (fmt[i] == 'l') {
955                         sizeof_cur_arg = sizeof(long);
956                         i++;
957                 }
958                 if (fmt[i] == 'l') {
959                         sizeof_cur_arg = sizeof(long long);
960                         i++;
961                 }
962
963                 if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' &&
964                     fmt[i] != 'x' && fmt[i] != 'X') {
965                         err = -EINVAL;
966                         goto out;
967                 }
968
969                 if (tmp_buf)
970                         cur_arg = raw_args[num_spec];
971 nocopy_fmt:
972                 if (tmp_buf) {
973                         tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32));
974                         if (tmp_buf_end - tmp_buf < sizeof_cur_arg) {
975                                 err = -ENOSPC;
976                                 goto out;
977                         }
978
979                         if (sizeof_cur_arg == 8) {
980                                 *(u32 *)tmp_buf = *(u32 *)&cur_arg;
981                                 *(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1);
982                         } else {
983                                 *(u32 *)tmp_buf = (u32)(long)cur_arg;
984                         }
985                         tmp_buf += sizeof_cur_arg;
986                 }
987                 num_spec++;
988         }
989
990         err = 0;
991 out:
992         if (err)
993                 bpf_bprintf_cleanup();
994         return err;
995 }
996
997 BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
998            const void *, data, u32, data_len)
999 {
1000         int err, num_args;
1001         u32 *bin_args;
1002
1003         if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 ||
1004             (data_len && !data))
1005                 return -EINVAL;
1006         num_args = data_len / 8;
1007
1008         /* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we
1009          * can safely give an unbounded size.
1010          */
1011         err = bpf_bprintf_prepare(fmt, UINT_MAX, data, &bin_args, num_args);
1012         if (err < 0)
1013                 return err;
1014
1015         err = bstr_printf(str, str_size, fmt, bin_args);
1016
1017         bpf_bprintf_cleanup();
1018
1019         return err + 1;
1020 }
1021
1022 const struct bpf_func_proto bpf_snprintf_proto = {
1023         .func           = bpf_snprintf,
1024         .gpl_only       = true,
1025         .ret_type       = RET_INTEGER,
1026         .arg1_type      = ARG_PTR_TO_MEM_OR_NULL,
1027         .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
1028         .arg3_type      = ARG_PTR_TO_CONST_STR,
1029         .arg4_type      = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
1030         .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
1031 };
1032
1033 /* BPF map elements can contain 'struct bpf_timer'.
1034  * Such map owns all of its BPF timers.
1035  * 'struct bpf_timer' is allocated as part of map element allocation
1036  * and it's zero initialized.
1037  * That space is used to keep 'struct bpf_timer_kern'.
1038  * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and
1039  * remembers 'struct bpf_map *' pointer it's part of.
1040  * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn.
1041  * bpf_timer_start() arms the timer.
1042  * If user space reference to a map goes to zero at this point
1043  * ops->map_release_uref callback is responsible for cancelling the timers,
1044  * freeing their memory, and decrementing prog's refcnts.
1045  * bpf_timer_cancel() cancels the timer and decrements prog's refcnt.
1046  * Inner maps can contain bpf timers as well. ops->map_release_uref is
1047  * freeing the timers when inner map is replaced or deleted by user space.
1048  */
1049 struct bpf_hrtimer {
1050         struct hrtimer timer;
1051         struct bpf_map *map;
1052         struct bpf_prog *prog;
1053         void __rcu *callback_fn;
1054         void *value;
1055 };
1056
1057 /* the actual struct hidden inside uapi struct bpf_timer */
1058 struct bpf_timer_kern {
1059         struct bpf_hrtimer *timer;
1060         /* bpf_spin_lock is used here instead of spinlock_t to make
1061          * sure that it always fits into space resereved by struct bpf_timer
1062          * regardless of LOCKDEP and spinlock debug flags.
1063          */
1064         struct bpf_spin_lock lock;
1065 } __attribute__((aligned(8)));
1066
1067 static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running);
1068
1069 static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
1070 {
1071         struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer);
1072         struct bpf_map *map = t->map;
1073         void *value = t->value;
1074         bpf_callback_t callback_fn;
1075         void *key;
1076         u32 idx;
1077
1078         callback_fn = rcu_dereference_check(t->callback_fn, rcu_read_lock_bh_held());
1079         if (!callback_fn)
1080                 goto out;
1081
1082         /* bpf_timer_cb() runs in hrtimer_run_softirq. It doesn't migrate and
1083          * cannot be preempted by another bpf_timer_cb() on the same cpu.
1084          * Remember the timer this callback is servicing to prevent
1085          * deadlock if callback_fn() calls bpf_timer_cancel() or
1086          * bpf_map_delete_elem() on the same timer.
1087          */
1088         this_cpu_write(hrtimer_running, t);
1089         if (map->map_type == BPF_MAP_TYPE_ARRAY) {
1090                 struct bpf_array *array = container_of(map, struct bpf_array, map);
1091
1092                 /* compute the key */
1093                 idx = ((char *)value - array->value) / array->elem_size;
1094                 key = &idx;
1095         } else { /* hash or lru */
1096                 key = value - round_up(map->key_size, 8);
1097         }
1098
1099         callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
1100         /* The verifier checked that return value is zero. */
1101
1102         this_cpu_write(hrtimer_running, NULL);
1103 out:
1104         return HRTIMER_NORESTART;
1105 }
1106
1107 BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map,
1108            u64, flags)
1109 {
1110         clockid_t clockid = flags & (MAX_CLOCKS - 1);
1111         struct bpf_hrtimer *t;
1112         int ret = 0;
1113
1114         BUILD_BUG_ON(MAX_CLOCKS != 16);
1115         BUILD_BUG_ON(sizeof(struct bpf_timer_kern) > sizeof(struct bpf_timer));
1116         BUILD_BUG_ON(__alignof__(struct bpf_timer_kern) != __alignof__(struct bpf_timer));
1117
1118         if (in_nmi())
1119                 return -EOPNOTSUPP;
1120
1121         if (flags >= MAX_CLOCKS ||
1122             /* similar to timerfd except _ALARM variants are not supported */
1123             (clockid != CLOCK_MONOTONIC &&
1124              clockid != CLOCK_REALTIME &&
1125              clockid != CLOCK_BOOTTIME))
1126                 return -EINVAL;
1127         __bpf_spin_lock_irqsave(&timer->lock);
1128         t = timer->timer;
1129         if (t) {
1130                 ret = -EBUSY;
1131                 goto out;
1132         }
1133         if (!atomic64_read(&map->usercnt)) {
1134                 /* maps with timers must be either held by user space
1135                  * or pinned in bpffs.
1136                  */
1137                 ret = -EPERM;
1138                 goto out;
1139         }
1140         /* allocate hrtimer via map_kmalloc to use memcg accounting */
1141         t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node);
1142         if (!t) {
1143                 ret = -ENOMEM;
1144                 goto out;
1145         }
1146         t->value = (void *)timer - map->timer_off;
1147         t->map = map;
1148         t->prog = NULL;
1149         rcu_assign_pointer(t->callback_fn, NULL);
1150         hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
1151         t->timer.function = bpf_timer_cb;
1152         timer->timer = t;
1153 out:
1154         __bpf_spin_unlock_irqrestore(&timer->lock);
1155         return ret;
1156 }
1157
1158 static const struct bpf_func_proto bpf_timer_init_proto = {
1159         .func           = bpf_timer_init,
1160         .gpl_only       = true,
1161         .ret_type       = RET_INTEGER,
1162         .arg1_type      = ARG_PTR_TO_TIMER,
1163         .arg2_type      = ARG_CONST_MAP_PTR,
1164         .arg3_type      = ARG_ANYTHING,
1165 };
1166
1167 BPF_CALL_3(bpf_timer_set_callback, struct bpf_timer_kern *, timer, void *, callback_fn,
1168            struct bpf_prog_aux *, aux)
1169 {
1170         struct bpf_prog *prev, *prog = aux->prog;
1171         struct bpf_hrtimer *t;
1172         int ret = 0;
1173
1174         if (in_nmi())
1175                 return -EOPNOTSUPP;
1176         __bpf_spin_lock_irqsave(&timer->lock);
1177         t = timer->timer;
1178         if (!t) {
1179                 ret = -EINVAL;
1180                 goto out;
1181         }
1182         if (!atomic64_read(&t->map->usercnt)) {
1183                 /* maps with timers must be either held by user space
1184                  * or pinned in bpffs. Otherwise timer might still be
1185                  * running even when bpf prog is detached and user space
1186                  * is gone, since map_release_uref won't ever be called.
1187                  */
1188                 ret = -EPERM;
1189                 goto out;
1190         }
1191         prev = t->prog;
1192         if (prev != prog) {
1193                 /* Bump prog refcnt once. Every bpf_timer_set_callback()
1194                  * can pick different callback_fn-s within the same prog.
1195                  */
1196                 prog = bpf_prog_inc_not_zero(prog);
1197                 if (IS_ERR(prog)) {
1198                         ret = PTR_ERR(prog);
1199                         goto out;
1200                 }
1201                 if (prev)
1202                         /* Drop prev prog refcnt when swapping with new prog */
1203                         bpf_prog_put(prev);
1204                 t->prog = prog;
1205         }
1206         rcu_assign_pointer(t->callback_fn, callback_fn);
1207 out:
1208         __bpf_spin_unlock_irqrestore(&timer->lock);
1209         return ret;
1210 }
1211
1212 static const struct bpf_func_proto bpf_timer_set_callback_proto = {
1213         .func           = bpf_timer_set_callback,
1214         .gpl_only       = true,
1215         .ret_type       = RET_INTEGER,
1216         .arg1_type      = ARG_PTR_TO_TIMER,
1217         .arg2_type      = ARG_PTR_TO_FUNC,
1218 };
1219
1220 BPF_CALL_3(bpf_timer_start, struct bpf_timer_kern *, timer, u64, nsecs, u64, flags)
1221 {
1222         struct bpf_hrtimer *t;
1223         int ret = 0;
1224
1225         if (in_nmi())
1226                 return -EOPNOTSUPP;
1227         if (flags)
1228                 return -EINVAL;
1229         __bpf_spin_lock_irqsave(&timer->lock);
1230         t = timer->timer;
1231         if (!t || !t->prog) {
1232                 ret = -EINVAL;
1233                 goto out;
1234         }
1235         hrtimer_start(&t->timer, ns_to_ktime(nsecs), HRTIMER_MODE_REL_SOFT);
1236 out:
1237         __bpf_spin_unlock_irqrestore(&timer->lock);
1238         return ret;
1239 }
1240
1241 static const struct bpf_func_proto bpf_timer_start_proto = {
1242         .func           = bpf_timer_start,
1243         .gpl_only       = true,
1244         .ret_type       = RET_INTEGER,
1245         .arg1_type      = ARG_PTR_TO_TIMER,
1246         .arg2_type      = ARG_ANYTHING,
1247         .arg3_type      = ARG_ANYTHING,
1248 };
1249
1250 static void drop_prog_refcnt(struct bpf_hrtimer *t)
1251 {
1252         struct bpf_prog *prog = t->prog;
1253
1254         if (prog) {
1255                 bpf_prog_put(prog);
1256                 t->prog = NULL;
1257                 rcu_assign_pointer(t->callback_fn, NULL);
1258         }
1259 }
1260
1261 BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer)
1262 {
1263         struct bpf_hrtimer *t;
1264         int ret = 0;
1265
1266         if (in_nmi())
1267                 return -EOPNOTSUPP;
1268         __bpf_spin_lock_irqsave(&timer->lock);
1269         t = timer->timer;
1270         if (!t) {
1271                 ret = -EINVAL;
1272                 goto out;
1273         }
1274         if (this_cpu_read(hrtimer_running) == t) {
1275                 /* If bpf callback_fn is trying to bpf_timer_cancel()
1276                  * its own timer the hrtimer_cancel() will deadlock
1277                  * since it waits for callback_fn to finish
1278                  */
1279                 ret = -EDEADLK;
1280                 goto out;
1281         }
1282         drop_prog_refcnt(t);
1283 out:
1284         __bpf_spin_unlock_irqrestore(&timer->lock);
1285         /* Cancel the timer and wait for associated callback to finish
1286          * if it was running.
1287          */
1288         ret = ret ?: hrtimer_cancel(&t->timer);
1289         return ret;
1290 }
1291
1292 static const struct bpf_func_proto bpf_timer_cancel_proto = {
1293         .func           = bpf_timer_cancel,
1294         .gpl_only       = true,
1295         .ret_type       = RET_INTEGER,
1296         .arg1_type      = ARG_PTR_TO_TIMER,
1297 };
1298
1299 /* This function is called by map_delete/update_elem for individual element and
1300  * by ops->map_release_uref when the user space reference to a map reaches zero.
1301  */
1302 void bpf_timer_cancel_and_free(void *val)
1303 {
1304         struct bpf_timer_kern *timer = val;
1305         struct bpf_hrtimer *t;
1306
1307         /* Performance optimization: read timer->timer without lock first. */
1308         if (!READ_ONCE(timer->timer))
1309                 return;
1310
1311         __bpf_spin_lock_irqsave(&timer->lock);
1312         /* re-read it under lock */
1313         t = timer->timer;
1314         if (!t)
1315                 goto out;
1316         drop_prog_refcnt(t);
1317         /* The subsequent bpf_timer_start/cancel() helpers won't be able to use
1318          * this timer, since it won't be initialized.
1319          */
1320         timer->timer = NULL;
1321 out:
1322         __bpf_spin_unlock_irqrestore(&timer->lock);
1323         if (!t)
1324                 return;
1325         /* Cancel the timer and wait for callback to complete if it was running.
1326          * If hrtimer_cancel() can be safely called it's safe to call kfree(t)
1327          * right after for both preallocated and non-preallocated maps.
1328          * The timer->timer = NULL was already done and no code path can
1329          * see address 't' anymore.
1330          *
1331          * Check that bpf_map_delete/update_elem() wasn't called from timer
1332          * callback_fn. In such case don't call hrtimer_cancel() (since it will
1333          * deadlock) and don't call hrtimer_try_to_cancel() (since it will just
1334          * return -1). Though callback_fn is still running on this cpu it's
1335          * safe to do kfree(t) because bpf_timer_cb() read everything it needed
1336          * from 't'. The bpf subprog callback_fn won't be able to access 't',
1337          * since timer->timer = NULL was already done. The timer will be
1338          * effectively cancelled because bpf_timer_cb() will return
1339          * HRTIMER_NORESTART.
1340          */
1341         if (this_cpu_read(hrtimer_running) != t)
1342                 hrtimer_cancel(&t->timer);
1343         kfree(t);
1344 }
1345
1346 const struct bpf_func_proto bpf_get_current_task_proto __weak;
1347 const struct bpf_func_proto bpf_get_current_task_btf_proto __weak;
1348 const struct bpf_func_proto bpf_probe_read_user_proto __weak;
1349 const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
1350 const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
1351 const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
1352 const struct bpf_func_proto bpf_task_pt_regs_proto __weak;
1353
1354 const struct bpf_func_proto *
1355 bpf_base_func_proto(enum bpf_func_id func_id)
1356 {
1357         switch (func_id) {
1358         case BPF_FUNC_map_lookup_elem:
1359                 return &bpf_map_lookup_elem_proto;
1360         case BPF_FUNC_map_update_elem:
1361                 return &bpf_map_update_elem_proto;
1362         case BPF_FUNC_map_delete_elem:
1363                 return &bpf_map_delete_elem_proto;
1364         case BPF_FUNC_map_push_elem:
1365                 return &bpf_map_push_elem_proto;
1366         case BPF_FUNC_map_pop_elem:
1367                 return &bpf_map_pop_elem_proto;
1368         case BPF_FUNC_map_peek_elem:
1369                 return &bpf_map_peek_elem_proto;
1370         case BPF_FUNC_get_prandom_u32:
1371                 return &bpf_get_prandom_u32_proto;
1372         case BPF_FUNC_get_smp_processor_id:
1373                 return &bpf_get_raw_smp_processor_id_proto;
1374         case BPF_FUNC_get_numa_node_id:
1375                 return &bpf_get_numa_node_id_proto;
1376         case BPF_FUNC_tail_call:
1377                 return &bpf_tail_call_proto;
1378         case BPF_FUNC_ktime_get_ns:
1379                 return &bpf_ktime_get_ns_proto;
1380         case BPF_FUNC_ktime_get_boot_ns:
1381                 return &bpf_ktime_get_boot_ns_proto;
1382         case BPF_FUNC_ringbuf_output:
1383                 return &bpf_ringbuf_output_proto;
1384         case BPF_FUNC_ringbuf_reserve:
1385                 return &bpf_ringbuf_reserve_proto;
1386         case BPF_FUNC_ringbuf_submit:
1387                 return &bpf_ringbuf_submit_proto;
1388         case BPF_FUNC_ringbuf_discard:
1389                 return &bpf_ringbuf_discard_proto;
1390         case BPF_FUNC_ringbuf_query:
1391                 return &bpf_ringbuf_query_proto;
1392         case BPF_FUNC_for_each_map_elem:
1393                 return &bpf_for_each_map_elem_proto;
1394         case BPF_FUNC_loop:
1395                 return &bpf_loop_proto;
1396         case BPF_FUNC_strncmp:
1397                 return &bpf_strncmp_proto;
1398         default:
1399                 break;
1400         }
1401
1402         if (!bpf_capable())
1403                 return NULL;
1404
1405         switch (func_id) {
1406         case BPF_FUNC_spin_lock:
1407                 return &bpf_spin_lock_proto;
1408         case BPF_FUNC_spin_unlock:
1409                 return &bpf_spin_unlock_proto;
1410         case BPF_FUNC_jiffies64:
1411                 return &bpf_jiffies64_proto;
1412         case BPF_FUNC_per_cpu_ptr:
1413                 return &bpf_per_cpu_ptr_proto;
1414         case BPF_FUNC_this_cpu_ptr:
1415                 return &bpf_this_cpu_ptr_proto;
1416         case BPF_FUNC_timer_init:
1417                 return &bpf_timer_init_proto;
1418         case BPF_FUNC_timer_set_callback:
1419                 return &bpf_timer_set_callback_proto;
1420         case BPF_FUNC_timer_start:
1421                 return &bpf_timer_start_proto;
1422         case BPF_FUNC_timer_cancel:
1423                 return &bpf_timer_cancel_proto;
1424         default:
1425                 break;
1426         }
1427
1428         if (!perfmon_capable())
1429                 return NULL;
1430
1431         switch (func_id) {
1432         case BPF_FUNC_trace_printk:
1433                 return bpf_get_trace_printk_proto();
1434         case BPF_FUNC_get_current_task:
1435                 return &bpf_get_current_task_proto;
1436         case BPF_FUNC_get_current_task_btf:
1437                 return &bpf_get_current_task_btf_proto;
1438         case BPF_FUNC_probe_read_user:
1439                 return &bpf_probe_read_user_proto;
1440         case BPF_FUNC_probe_read_kernel:
1441                 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1442                        NULL : &bpf_probe_read_kernel_proto;
1443         case BPF_FUNC_probe_read_user_str:
1444                 return &bpf_probe_read_user_str_proto;
1445         case BPF_FUNC_probe_read_kernel_str:
1446                 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1447                        NULL : &bpf_probe_read_kernel_str_proto;
1448         case BPF_FUNC_snprintf_btf:
1449                 return &bpf_snprintf_btf_proto;
1450         case BPF_FUNC_snprintf:
1451                 return &bpf_snprintf_proto;
1452         case BPF_FUNC_task_pt_regs:
1453                 return &bpf_task_pt_regs_proto;
1454         case BPF_FUNC_trace_vprintk:
1455                 return bpf_get_trace_vprintk_proto();
1456         default:
1457                 return NULL;
1458         }
1459 }