qed: FW 8.42.2.0 iscsi/fcoe changes
[linux-2.6-microblaze.git] / include / linux / bpf-cgroup.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BPF_CGROUP_H
3 #define _BPF_CGROUP_H
4
5 #include <linux/bpf.h>
6 #include <linux/errno.h>
7 #include <linux/jump_label.h>
8 #include <linux/percpu.h>
9 #include <linux/percpu-refcount.h>
10 #include <linux/rbtree.h>
11 #include <uapi/linux/bpf.h>
12
13 struct sock;
14 struct sockaddr;
15 struct cgroup;
16 struct sk_buff;
17 struct bpf_map;
18 struct bpf_prog;
19 struct bpf_sock_ops_kern;
20 struct bpf_cgroup_storage;
21 struct ctl_table;
22 struct ctl_table_header;
23
24 #ifdef CONFIG_CGROUP_BPF
25
26 extern struct static_key_false cgroup_bpf_enabled_key;
27 #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
28
29 DECLARE_PER_CPU(struct bpf_cgroup_storage*,
30                 bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
31
32 #define for_each_cgroup_storage_type(stype) \
33         for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
34
35 struct bpf_cgroup_storage_map;
36
37 struct bpf_storage_buffer {
38         struct rcu_head rcu;
39         char data[0];
40 };
41
42 struct bpf_cgroup_storage {
43         union {
44                 struct bpf_storage_buffer *buf;
45                 void __percpu *percpu_buf;
46         };
47         struct bpf_cgroup_storage_map *map;
48         struct bpf_cgroup_storage_key key;
49         struct list_head list;
50         struct rb_node node;
51         struct rcu_head rcu;
52 };
53
54 struct bpf_prog_list {
55         struct list_head node;
56         struct bpf_prog *prog;
57         struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
58 };
59
60 struct bpf_prog_array;
61
62 struct cgroup_bpf {
63         /* array of effective progs in this cgroup */
64         struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
65
66         /* attached progs to this cgroup and attach flags
67          * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
68          * have either zero or one element
69          * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
70          */
71         struct list_head progs[MAX_BPF_ATTACH_TYPE];
72         u32 flags[MAX_BPF_ATTACH_TYPE];
73
74         /* temp storage for effective prog array used by prog_attach/detach */
75         struct bpf_prog_array *inactive;
76
77         /* reference counter used to detach bpf programs after cgroup removal */
78         struct percpu_ref refcnt;
79
80         /* cgroup_bpf is released using a work queue */
81         struct work_struct release_work;
82 };
83
84 int cgroup_bpf_inherit(struct cgroup *cgrp);
85 void cgroup_bpf_offline(struct cgroup *cgrp);
86
87 int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
88                         struct bpf_prog *replace_prog,
89                         enum bpf_attach_type type, u32 flags);
90 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
91                         enum bpf_attach_type type);
92 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
93                        union bpf_attr __user *uattr);
94
95 /* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
96 int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
97                       struct bpf_prog *replace_prog, enum bpf_attach_type type,
98                       u32 flags);
99 int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
100                       enum bpf_attach_type type, u32 flags);
101 int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
102                      union bpf_attr __user *uattr);
103
104 int __cgroup_bpf_run_filter_skb(struct sock *sk,
105                                 struct sk_buff *skb,
106                                 enum bpf_attach_type type);
107
108 int __cgroup_bpf_run_filter_sk(struct sock *sk,
109                                enum bpf_attach_type type);
110
111 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
112                                       struct sockaddr *uaddr,
113                                       enum bpf_attach_type type,
114                                       void *t_ctx);
115
116 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
117                                      struct bpf_sock_ops_kern *sock_ops,
118                                      enum bpf_attach_type type);
119
120 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
121                                       short access, enum bpf_attach_type type);
122
123 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
124                                    struct ctl_table *table, int write,
125                                    void __user *buf, size_t *pcount,
126                                    loff_t *ppos, void **new_buf,
127                                    enum bpf_attach_type type);
128
129 int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
130                                        int *optname, char __user *optval,
131                                        int *optlen, char **kernel_optval);
132 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
133                                        int optname, char __user *optval,
134                                        int __user *optlen, int max_optlen,
135                                        int retval);
136
137 static inline enum bpf_cgroup_storage_type cgroup_storage_type(
138         struct bpf_map *map)
139 {
140         if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
141                 return BPF_CGROUP_STORAGE_PERCPU;
142
143         return BPF_CGROUP_STORAGE_SHARED;
144 }
145
146 static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage
147                                           *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
148 {
149         enum bpf_cgroup_storage_type stype;
150
151         for_each_cgroup_storage_type(stype)
152                 this_cpu_write(bpf_cgroup_storage[stype], storage[stype]);
153 }
154
155 struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
156                                         enum bpf_cgroup_storage_type stype);
157 void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
158 void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
159                              struct cgroup *cgroup,
160                              enum bpf_attach_type type);
161 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
162 int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
163 void bpf_cgroup_storage_release(struct bpf_prog_aux *aux, struct bpf_map *map);
164
165 int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
166 int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
167                                      void *value, u64 flags);
168
169 /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
170 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb)                             \
171 ({                                                                            \
172         int __ret = 0;                                                        \
173         if (cgroup_bpf_enabled)                                               \
174                 __ret = __cgroup_bpf_run_filter_skb(sk, skb,                  \
175                                                     BPF_CGROUP_INET_INGRESS); \
176                                                                               \
177         __ret;                                                                \
178 })
179
180 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb)                               \
181 ({                                                                             \
182         int __ret = 0;                                                         \
183         if (cgroup_bpf_enabled && sk && sk == skb->sk) {                       \
184                 typeof(sk) __sk = sk_to_full_sk(sk);                           \
185                 if (sk_fullsock(__sk))                                         \
186                         __ret = __cgroup_bpf_run_filter_skb(__sk, skb,         \
187                                                       BPF_CGROUP_INET_EGRESS); \
188         }                                                                      \
189         __ret;                                                                 \
190 })
191
192 #define BPF_CGROUP_RUN_SK_PROG(sk, type)                                       \
193 ({                                                                             \
194         int __ret = 0;                                                         \
195         if (cgroup_bpf_enabled) {                                              \
196                 __ret = __cgroup_bpf_run_filter_sk(sk, type);                  \
197         }                                                                      \
198         __ret;                                                                 \
199 })
200
201 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk)                                      \
202         BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
203
204 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk)                                \
205         BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
206
207 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk)                                \
208         BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
209
210 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type)                                \
211 ({                                                                             \
212         int __ret = 0;                                                         \
213         if (cgroup_bpf_enabled)                                                \
214                 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type,     \
215                                                           NULL);               \
216         __ret;                                                                 \
217 })
218
219 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx)                    \
220 ({                                                                             \
221         int __ret = 0;                                                         \
222         if (cgroup_bpf_enabled) {                                              \
223                 lock_sock(sk);                                                 \
224                 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type,     \
225                                                           t_ctx);              \
226                 release_sock(sk);                                              \
227         }                                                                      \
228         __ret;                                                                 \
229 })
230
231 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr)                              \
232         BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
233
234 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr)                              \
235         BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
236
237 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
238                                             sk->sk_prot->pre_connect)
239
240 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr)                           \
241         BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
242
243 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr)                           \
244         BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
245
246 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr)                      \
247         BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
248
249 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr)                      \
250         BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
251
252 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx)                \
253         BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
254
255 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx)                \
256         BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
257
258 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr)                        \
259         BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL)
260
261 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr)                        \
262         BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL)
263
264 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops)                                 \
265 ({                                                                             \
266         int __ret = 0;                                                         \
267         if (cgroup_bpf_enabled && (sock_ops)->sk) {            \
268                 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk);               \
269                 if (__sk && sk_fullsock(__sk))                                 \
270                         __ret = __cgroup_bpf_run_filter_sock_ops(__sk,         \
271                                                                  sock_ops,     \
272                                                          BPF_CGROUP_SOCK_OPS); \
273         }                                                                      \
274         __ret;                                                                 \
275 })
276
277 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access)         \
278 ({                                                                            \
279         int __ret = 0;                                                        \
280         if (cgroup_bpf_enabled)                                               \
281                 __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
282                                                           access,             \
283                                                           BPF_CGROUP_DEVICE); \
284                                                                               \
285         __ret;                                                                \
286 })
287
288
289 #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos, nbuf)  \
290 ({                                                                             \
291         int __ret = 0;                                                         \
292         if (cgroup_bpf_enabled)                                                \
293                 __ret = __cgroup_bpf_run_filter_sysctl(head, table, write,     \
294                                                        buf, count, pos, nbuf,  \
295                                                        BPF_CGROUP_SYSCTL);     \
296         __ret;                                                                 \
297 })
298
299 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen,   \
300                                        kernel_optval)                          \
301 ({                                                                             \
302         int __ret = 0;                                                         \
303         if (cgroup_bpf_enabled)                                                \
304                 __ret = __cgroup_bpf_run_filter_setsockopt(sock, level,        \
305                                                            optname, optval,    \
306                                                            optlen,             \
307                                                            kernel_optval);     \
308         __ret;                                                                 \
309 })
310
311 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen)                               \
312 ({                                                                             \
313         int __ret = 0;                                                         \
314         if (cgroup_bpf_enabled)                                                \
315                 get_user(__ret, optlen);                                       \
316         __ret;                                                                 \
317 })
318
319 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen,   \
320                                        max_optlen, retval)                     \
321 ({                                                                             \
322         int __ret = retval;                                                    \
323         if (cgroup_bpf_enabled)                                                \
324                 __ret = __cgroup_bpf_run_filter_getsockopt(sock, level,        \
325                                                            optname, optval,    \
326                                                            optlen, max_optlen, \
327                                                            retval);            \
328         __ret;                                                                 \
329 })
330
331 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
332                            enum bpf_prog_type ptype, struct bpf_prog *prog);
333 int cgroup_bpf_prog_detach(const union bpf_attr *attr,
334                            enum bpf_prog_type ptype);
335 int cgroup_bpf_prog_query(const union bpf_attr *attr,
336                           union bpf_attr __user *uattr);
337 #else
338
339 struct bpf_prog;
340 struct cgroup_bpf {};
341 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
342 static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
343
344 static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
345                                          enum bpf_prog_type ptype,
346                                          struct bpf_prog *prog)
347 {
348         return -EINVAL;
349 }
350
351 static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
352                                          enum bpf_prog_type ptype)
353 {
354         return -EINVAL;
355 }
356
357 static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
358                                         union bpf_attr __user *uattr)
359 {
360         return -EINVAL;
361 }
362
363 static inline void bpf_cgroup_storage_set(
364         struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {}
365 static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
366                                             struct bpf_map *map) { return 0; }
367 static inline void bpf_cgroup_storage_release(struct bpf_prog_aux *aux,
368                                               struct bpf_map *map) {}
369 static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
370         struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
371 static inline void bpf_cgroup_storage_free(
372         struct bpf_cgroup_storage *storage) {}
373 static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
374                                                  void *value) {
375         return 0;
376 }
377 static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
378                                         void *key, void *value, u64 flags) {
379         return 0;
380 }
381
382 #define cgroup_bpf_enabled (0)
383 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
384 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
385 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
386 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
387 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
388 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
389 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
390 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
391 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
392 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
393 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
394 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
395 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
396 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
397 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
398 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
399 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
400 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
401 #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos,nbuf) ({ 0; })
402 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
403 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
404                                        optlen, max_optlen, retval) ({ retval; })
405 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
406                                        kernel_optval) ({ 0; })
407
408 #define for_each_cgroup_storage_type(stype) for (; false; )
409
410 #endif /* CONFIG_CGROUP_BPF */
411
412 #endif /* _BPF_CGROUP_H */