1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/errno.h>
6 #include <linux/jump_label.h>
7 #include <uapi/linux/bpf.h>
13 struct bpf_sock_ops_kern;
15 #ifdef CONFIG_CGROUP_BPF
17 extern struct static_key_false cgroup_bpf_enabled_key;
18 #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
20 struct bpf_prog_list {
21 struct list_head node;
22 struct bpf_prog *prog;
25 struct bpf_prog_array;
28 /* array of effective progs in this cgroup */
29 struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
31 /* attached progs to this cgroup and attach flags
32 * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
33 * have either zero or one element
34 * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
36 struct list_head progs[MAX_BPF_ATTACH_TYPE];
37 u32 flags[MAX_BPF_ATTACH_TYPE];
39 /* temp storage for effective prog array used by prog_attach/detach */
40 struct bpf_prog_array __rcu *inactive;
43 void cgroup_bpf_put(struct cgroup *cgrp);
44 int cgroup_bpf_inherit(struct cgroup *cgrp);
46 int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
47 enum bpf_attach_type type, u32 flags);
48 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
49 enum bpf_attach_type type, u32 flags);
50 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
51 union bpf_attr __user *uattr);
53 /* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
54 int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
55 enum bpf_attach_type type, u32 flags);
56 int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
57 enum bpf_attach_type type, u32 flags);
58 int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
59 union bpf_attr __user *uattr);
61 int __cgroup_bpf_run_filter_skb(struct sock *sk,
63 enum bpf_attach_type type);
65 int __cgroup_bpf_run_filter_sk(struct sock *sk,
66 enum bpf_attach_type type);
68 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
69 struct sockaddr *uaddr,
70 enum bpf_attach_type type,
73 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
74 struct bpf_sock_ops_kern *sock_ops,
75 enum bpf_attach_type type);
77 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
78 short access, enum bpf_attach_type type);
80 /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
81 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
84 if (cgroup_bpf_enabled) \
85 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
86 BPF_CGROUP_INET_INGRESS); \
91 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
94 if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
95 typeof(sk) __sk = sk_to_full_sk(sk); \
96 if (sk_fullsock(__sk)) \
97 __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
98 BPF_CGROUP_INET_EGRESS); \
103 #define BPF_CGROUP_RUN_SK_PROG(sk, type) \
106 if (cgroup_bpf_enabled) { \
107 __ret = __cgroup_bpf_run_filter_sk(sk, type); \
112 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
113 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
115 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
116 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
118 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
119 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
121 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
124 if (cgroup_bpf_enabled) \
125 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
130 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
133 if (cgroup_bpf_enabled) { \
135 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
142 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \
143 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
145 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \
146 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
148 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
149 sk->sk_prot->pre_connect)
151 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
152 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
154 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
155 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
157 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
158 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
160 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
161 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
163 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
164 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
166 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
167 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
169 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
172 if (cgroup_bpf_enabled && (sock_ops)->sk) { \
173 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
174 if (__sk && sk_fullsock(__sk)) \
175 __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
177 BPF_CGROUP_SOCK_OPS); \
182 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
185 if (cgroup_bpf_enabled) \
186 __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
188 BPF_CGROUP_DEVICE); \
192 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
193 enum bpf_prog_type ptype, struct bpf_prog *prog);
194 int cgroup_bpf_prog_detach(const union bpf_attr *attr,
195 enum bpf_prog_type ptype);
196 int cgroup_bpf_prog_query(const union bpf_attr *attr,
197 union bpf_attr __user *uattr);
201 struct cgroup_bpf {};
202 static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
203 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
205 static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
206 enum bpf_prog_type ptype,
207 struct bpf_prog *prog)
212 static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
213 enum bpf_prog_type ptype)
218 static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
219 union bpf_attr __user *uattr)
224 #define cgroup_bpf_enabled (0)
225 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
226 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
227 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
228 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
229 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
230 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
231 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
232 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
233 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
234 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
235 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
236 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
237 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
238 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
239 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
240 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
242 #endif /* CONFIG_CGROUP_BPF */
244 #endif /* _BPF_CGROUP_H */