kasan: open-code kasan_unpoison_slab
[linux-2.6-microblaze.git] / include / linux / bpf-cgroup.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BPF_CGROUP_H
3 #define _BPF_CGROUP_H
4
5 #include <linux/bpf.h>
6 #include <linux/errno.h>
7 #include <linux/jump_label.h>
8 #include <linux/percpu.h>
9 #include <linux/percpu-refcount.h>
10 #include <linux/rbtree.h>
11 #include <uapi/linux/bpf.h>
12
13 struct sock;
14 struct sockaddr;
15 struct cgroup;
16 struct sk_buff;
17 struct bpf_map;
18 struct bpf_prog;
19 struct bpf_sock_ops_kern;
20 struct bpf_cgroup_storage;
21 struct ctl_table;
22 struct ctl_table_header;
23
24 #ifdef CONFIG_CGROUP_BPF
25
26 extern struct static_key_false cgroup_bpf_enabled_key;
27 #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
28
29 DECLARE_PER_CPU(struct bpf_cgroup_storage*,
30                 bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
31
32 #define for_each_cgroup_storage_type(stype) \
33         for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
34
35 struct bpf_cgroup_storage_map;
36
37 struct bpf_storage_buffer {
38         struct rcu_head rcu;
39         char data[];
40 };
41
42 struct bpf_cgroup_storage {
43         union {
44                 struct bpf_storage_buffer *buf;
45                 void __percpu *percpu_buf;
46         };
47         struct bpf_cgroup_storage_map *map;
48         struct bpf_cgroup_storage_key key;
49         struct list_head list_map;
50         struct list_head list_cg;
51         struct rb_node node;
52         struct rcu_head rcu;
53 };
54
55 struct bpf_cgroup_link {
56         struct bpf_link link;
57         struct cgroup *cgroup;
58         enum bpf_attach_type type;
59 };
60
61 struct bpf_prog_list {
62         struct list_head node;
63         struct bpf_prog *prog;
64         struct bpf_cgroup_link *link;
65         struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
66 };
67
68 struct bpf_prog_array;
69
70 struct cgroup_bpf {
71         /* array of effective progs in this cgroup */
72         struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
73
74         /* attached progs to this cgroup and attach flags
75          * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
76          * have either zero or one element
77          * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
78          */
79         struct list_head progs[MAX_BPF_ATTACH_TYPE];
80         u32 flags[MAX_BPF_ATTACH_TYPE];
81
82         /* list of cgroup shared storages */
83         struct list_head storages;
84
85         /* temp storage for effective prog array used by prog_attach/detach */
86         struct bpf_prog_array *inactive;
87
88         /* reference counter used to detach bpf programs after cgroup removal */
89         struct percpu_ref refcnt;
90
91         /* cgroup_bpf is released using a work queue */
92         struct work_struct release_work;
93 };
94
95 int cgroup_bpf_inherit(struct cgroup *cgrp);
96 void cgroup_bpf_offline(struct cgroup *cgrp);
97
98 int __cgroup_bpf_attach(struct cgroup *cgrp,
99                         struct bpf_prog *prog, struct bpf_prog *replace_prog,
100                         struct bpf_cgroup_link *link,
101                         enum bpf_attach_type type, u32 flags);
102 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
103                         struct bpf_cgroup_link *link,
104                         enum bpf_attach_type type);
105 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
106                        union bpf_attr __user *uattr);
107
108 /* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
109 int cgroup_bpf_attach(struct cgroup *cgrp,
110                       struct bpf_prog *prog, struct bpf_prog *replace_prog,
111                       struct bpf_cgroup_link *link, enum bpf_attach_type type,
112                       u32 flags);
113 int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
114                       enum bpf_attach_type type);
115 int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
116                      union bpf_attr __user *uattr);
117
118 int __cgroup_bpf_run_filter_skb(struct sock *sk,
119                                 struct sk_buff *skb,
120                                 enum bpf_attach_type type);
121
122 int __cgroup_bpf_run_filter_sk(struct sock *sk,
123                                enum bpf_attach_type type);
124
125 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
126                                       struct sockaddr *uaddr,
127                                       enum bpf_attach_type type,
128                                       void *t_ctx);
129
130 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
131                                      struct bpf_sock_ops_kern *sock_ops,
132                                      enum bpf_attach_type type);
133
134 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
135                                       short access, enum bpf_attach_type type);
136
137 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
138                                    struct ctl_table *table, int write,
139                                    char **buf, size_t *pcount, loff_t *ppos,
140                                    enum bpf_attach_type type);
141
142 int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
143                                        int *optname, char __user *optval,
144                                        int *optlen, char **kernel_optval);
145 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
146                                        int optname, char __user *optval,
147                                        int __user *optlen, int max_optlen,
148                                        int retval);
149
150 static inline enum bpf_cgroup_storage_type cgroup_storage_type(
151         struct bpf_map *map)
152 {
153         if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
154                 return BPF_CGROUP_STORAGE_PERCPU;
155
156         return BPF_CGROUP_STORAGE_SHARED;
157 }
158
159 static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage
160                                           *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
161 {
162         enum bpf_cgroup_storage_type stype;
163
164         for_each_cgroup_storage_type(stype)
165                 this_cpu_write(bpf_cgroup_storage[stype], storage[stype]);
166 }
167
168 struct bpf_cgroup_storage *
169 cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
170                       void *key, bool locked);
171 struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
172                                         enum bpf_cgroup_storage_type stype);
173 void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
174 void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
175                              struct cgroup *cgroup,
176                              enum bpf_attach_type type);
177 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
178 int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
179
180 int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
181 int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
182                                      void *value, u64 flags);
183
184 /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
185 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb)                             \
186 ({                                                                            \
187         int __ret = 0;                                                        \
188         if (cgroup_bpf_enabled)                                               \
189                 __ret = __cgroup_bpf_run_filter_skb(sk, skb,                  \
190                                                     BPF_CGROUP_INET_INGRESS); \
191                                                                               \
192         __ret;                                                                \
193 })
194
195 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb)                               \
196 ({                                                                             \
197         int __ret = 0;                                                         \
198         if (cgroup_bpf_enabled && sk && sk == skb->sk) {                       \
199                 typeof(sk) __sk = sk_to_full_sk(sk);                           \
200                 if (sk_fullsock(__sk))                                         \
201                         __ret = __cgroup_bpf_run_filter_skb(__sk, skb,         \
202                                                       BPF_CGROUP_INET_EGRESS); \
203         }                                                                      \
204         __ret;                                                                 \
205 })
206
207 #define BPF_CGROUP_RUN_SK_PROG(sk, type)                                       \
208 ({                                                                             \
209         int __ret = 0;                                                         \
210         if (cgroup_bpf_enabled) {                                              \
211                 __ret = __cgroup_bpf_run_filter_sk(sk, type);                  \
212         }                                                                      \
213         __ret;                                                                 \
214 })
215
216 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk)                                      \
217         BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
218
219 #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk)                              \
220         BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_RELEASE)
221
222 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk)                                \
223         BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
224
225 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk)                                \
226         BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
227
228 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type)                                \
229 ({                                                                             \
230         int __ret = 0;                                                         \
231         if (cgroup_bpf_enabled)                                                \
232                 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type,     \
233                                                           NULL);               \
234         __ret;                                                                 \
235 })
236
237 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx)                    \
238 ({                                                                             \
239         int __ret = 0;                                                         \
240         if (cgroup_bpf_enabled) {                                              \
241                 lock_sock(sk);                                                 \
242                 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type,     \
243                                                           t_ctx);              \
244                 release_sock(sk);                                              \
245         }                                                                      \
246         __ret;                                                                 \
247 })
248
249 #define BPF_CGROUP_RUN_PROG_INET4_BIND_LOCK(sk, uaddr)                         \
250         BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_BIND, NULL)
251
252 #define BPF_CGROUP_RUN_PROG_INET6_BIND_LOCK(sk, uaddr)                         \
253         BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_BIND, NULL)
254
255 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
256                                             sk->sk_prot->pre_connect)
257
258 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr)                           \
259         BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
260
261 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr)                           \
262         BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
263
264 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr)                      \
265         BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
266
267 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr)                      \
268         BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
269
270 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx)                \
271         BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
272
273 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx)                \
274         BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
275
276 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr)                        \
277         BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL)
278
279 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr)                        \
280         BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL)
281
282 /* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
283  * fullsock and its parent fullsock cannot be traced by
284  * sk_to_full_sk().
285  *
286  * e.g. sock_ops->sk is a request_sock and it is under syncookie mode.
287  * Its listener-sk is not attached to the rsk_listener.
288  * In this case, the caller holds the listener-sk (unlocked),
289  * set its sock_ops->sk to req_sk, and call this SOCK_OPS"_SK" with
290  * the listener-sk such that the cgroup-bpf-progs of the
291  * listener-sk will be run.
292  *
293  * Regardless of syncookie mode or not,
294  * calling bpf_setsockopt on listener-sk will not make sense anyway,
295  * so passing 'sock_ops->sk == req_sk' to the bpf prog is appropriate here.
296  */
297 #define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk)                   \
298 ({                                                                      \
299         int __ret = 0;                                                  \
300         if (cgroup_bpf_enabled)                                         \
301                 __ret = __cgroup_bpf_run_filter_sock_ops(sk,            \
302                                                          sock_ops,      \
303                                                          BPF_CGROUP_SOCK_OPS); \
304         __ret;                                                          \
305 })
306
307 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops)                                 \
308 ({                                                                             \
309         int __ret = 0;                                                         \
310         if (cgroup_bpf_enabled && (sock_ops)->sk) {            \
311                 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk);               \
312                 if (__sk && sk_fullsock(__sk))                                 \
313                         __ret = __cgroup_bpf_run_filter_sock_ops(__sk,         \
314                                                                  sock_ops,     \
315                                                          BPF_CGROUP_SOCK_OPS); \
316         }                                                                      \
317         __ret;                                                                 \
318 })
319
320 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access)         \
321 ({                                                                            \
322         int __ret = 0;                                                        \
323         if (cgroup_bpf_enabled)                                               \
324                 __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
325                                                           access,             \
326                                                           BPF_CGROUP_DEVICE); \
327                                                                               \
328         __ret;                                                                \
329 })
330
331
332 #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos)  \
333 ({                                                                             \
334         int __ret = 0;                                                         \
335         if (cgroup_bpf_enabled)                                                \
336                 __ret = __cgroup_bpf_run_filter_sysctl(head, table, write,     \
337                                                        buf, count, pos,        \
338                                                        BPF_CGROUP_SYSCTL);     \
339         __ret;                                                                 \
340 })
341
342 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen,   \
343                                        kernel_optval)                          \
344 ({                                                                             \
345         int __ret = 0;                                                         \
346         if (cgroup_bpf_enabled)                                                \
347                 __ret = __cgroup_bpf_run_filter_setsockopt(sock, level,        \
348                                                            optname, optval,    \
349                                                            optlen,             \
350                                                            kernel_optval);     \
351         __ret;                                                                 \
352 })
353
354 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen)                               \
355 ({                                                                             \
356         int __ret = 0;                                                         \
357         if (cgroup_bpf_enabled)                                                \
358                 get_user(__ret, optlen);                                       \
359         __ret;                                                                 \
360 })
361
362 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen,   \
363                                        max_optlen, retval)                     \
364 ({                                                                             \
365         int __ret = retval;                                                    \
366         if (cgroup_bpf_enabled)                                                \
367                 __ret = __cgroup_bpf_run_filter_getsockopt(sock, level,        \
368                                                            optname, optval,    \
369                                                            optlen, max_optlen, \
370                                                            retval);            \
371         __ret;                                                                 \
372 })
373
374 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
375                            enum bpf_prog_type ptype, struct bpf_prog *prog);
376 int cgroup_bpf_prog_detach(const union bpf_attr *attr,
377                            enum bpf_prog_type ptype);
378 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
379 int cgroup_bpf_prog_query(const union bpf_attr *attr,
380                           union bpf_attr __user *uattr);
381 #else
382
383 struct bpf_prog;
384 struct cgroup_bpf {};
385 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
386 static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
387
388 static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
389                                          enum bpf_prog_type ptype,
390                                          struct bpf_prog *prog)
391 {
392         return -EINVAL;
393 }
394
395 static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
396                                          enum bpf_prog_type ptype)
397 {
398         return -EINVAL;
399 }
400
401 static inline int cgroup_bpf_link_attach(const union bpf_attr *attr,
402                                          struct bpf_prog *prog)
403 {
404         return -EINVAL;
405 }
406
407 static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
408                                         union bpf_attr __user *uattr)
409 {
410         return -EINVAL;
411 }
412
413 static inline void bpf_cgroup_storage_set(
414         struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {}
415 static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
416                                             struct bpf_map *map) { return 0; }
417 static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
418         struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
419 static inline void bpf_cgroup_storage_free(
420         struct bpf_cgroup_storage *storage) {}
421 static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
422                                                  void *value) {
423         return 0;
424 }
425 static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
426                                         void *key, void *value, u64 flags) {
427         return 0;
428 }
429
430 #define cgroup_bpf_enabled (0)
431 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) ({ 0; })
432 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
433 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
434 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
435 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
436 #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
437 #define BPF_CGROUP_RUN_PROG_INET4_BIND_LOCK(sk, uaddr) ({ 0; })
438 #define BPF_CGROUP_RUN_PROG_INET6_BIND_LOCK(sk, uaddr) ({ 0; })
439 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
440 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
441 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
442 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
443 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
444 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
445 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
446 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
447 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
448 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
449 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
450 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
451 #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
452 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
453 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
454                                        optlen, max_optlen, retval) ({ retval; })
455 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
456                                        kernel_optval) ({ 0; })
457
458 #define for_each_cgroup_storage_type(stype) for (; false; )
459
460 #endif /* CONFIG_CGROUP_BPF */
461
462 #endif /* _BPF_CGROUP_H */