1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook */
4 #include <linux/types.h>
5 #include <linux/bpf_verifier.h>
8 #include <linux/btf_ids.h>
9 #include <linux/filter.h>
11 #include <net/bpf_sk_storage.h>
13 static u32 optional_ops[] = {
14 offsetof(struct tcp_congestion_ops, init),
15 offsetof(struct tcp_congestion_ops, release),
16 offsetof(struct tcp_congestion_ops, set_state),
17 offsetof(struct tcp_congestion_ops, cwnd_event),
18 offsetof(struct tcp_congestion_ops, in_ack_event),
19 offsetof(struct tcp_congestion_ops, pkts_acked),
20 offsetof(struct tcp_congestion_ops, min_tso_segs),
21 offsetof(struct tcp_congestion_ops, sndbuf_expand),
22 offsetof(struct tcp_congestion_ops, cong_control),
25 static u32 unsupported_ops[] = {
26 offsetof(struct tcp_congestion_ops, get_info),
29 static const struct btf_type *tcp_sock_type;
30 static u32 tcp_sock_id, sock_id;
32 static int bpf_tcp_ca_init(struct btf *btf)
36 type_id = btf_find_by_name_kind(btf, "sock", BTF_KIND_STRUCT);
41 type_id = btf_find_by_name_kind(btf, "tcp_sock", BTF_KIND_STRUCT);
44 tcp_sock_id = type_id;
45 tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
50 static bool is_optional(u32 member_offset)
54 for (i = 0; i < ARRAY_SIZE(optional_ops); i++) {
55 if (member_offset == optional_ops[i])
62 static bool is_unsupported(u32 member_offset)
66 for (i = 0; i < ARRAY_SIZE(unsupported_ops); i++) {
67 if (member_offset == unsupported_ops[i])
74 extern struct btf *btf_vmlinux;
76 static bool bpf_tcp_ca_is_valid_access(int off, int size,
77 enum bpf_access_type type,
78 const struct bpf_prog *prog,
79 struct bpf_insn_access_aux *info)
81 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
88 if (!btf_ctx_access(off, size, type, prog, info))
91 if (info->reg_type == PTR_TO_BTF_ID && info->btf_id == sock_id)
92 /* promote it to tcp_sock */
93 info->btf_id = tcp_sock_id;
98 static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
99 const struct btf *btf,
100 const struct btf_type *t, int off,
101 int size, enum bpf_access_type atype,
106 if (atype == BPF_READ)
107 return btf_struct_access(log, btf, t, off, size, atype, next_btf_id);
109 if (t != tcp_sock_type) {
110 bpf_log(log, "only read is supported\n");
115 case bpf_ctx_range(struct inet_connection_sock, icsk_ca_priv):
116 end = offsetofend(struct inet_connection_sock, icsk_ca_priv);
118 case offsetof(struct inet_connection_sock, icsk_ack.pending):
119 end = offsetofend(struct inet_connection_sock,
122 case offsetof(struct tcp_sock, snd_cwnd):
123 end = offsetofend(struct tcp_sock, snd_cwnd);
125 case offsetof(struct tcp_sock, snd_cwnd_cnt):
126 end = offsetofend(struct tcp_sock, snd_cwnd_cnt);
128 case offsetof(struct tcp_sock, snd_ssthresh):
129 end = offsetofend(struct tcp_sock, snd_ssthresh);
131 case offsetof(struct tcp_sock, ecn_flags):
132 end = offsetofend(struct tcp_sock, ecn_flags);
135 bpf_log(log, "no write support to tcp_sock at off %d\n", off);
139 if (off + size > end) {
141 "write access at off %d with size %d beyond the member of tcp_sock ended at %zu\n",
149 BPF_CALL_2(bpf_tcp_send_ack, struct tcp_sock *, tp, u32, rcv_nxt)
151 /* bpf_tcp_ca prog cannot have NULL tp */
152 __tcp_send_ack((struct sock *)tp, rcv_nxt);
156 static const struct bpf_func_proto bpf_tcp_send_ack_proto = {
157 .func = bpf_tcp_send_ack,
159 /* In case we want to report error later */
160 .ret_type = RET_INTEGER,
161 .arg1_type = ARG_PTR_TO_BTF_ID,
162 .arg1_btf_id = &tcp_sock_id,
163 .arg2_type = ARG_ANYTHING,
166 static const struct bpf_func_proto *
167 bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
168 const struct bpf_prog *prog)
171 case BPF_FUNC_tcp_send_ack:
172 return &bpf_tcp_send_ack_proto;
173 case BPF_FUNC_sk_storage_get:
174 return &bpf_sk_storage_get_proto;
175 case BPF_FUNC_sk_storage_delete:
176 return &bpf_sk_storage_delete_proto;
178 return bpf_base_func_proto(func_id);
182 BTF_SET_START(bpf_tcp_ca_kfunc_ids)
183 BTF_ID(func, tcp_reno_ssthresh)
184 BTF_ID(func, tcp_reno_cong_avoid)
185 BTF_ID(func, tcp_reno_undo_cwnd)
186 BTF_ID(func, tcp_slow_start)
187 BTF_ID(func, tcp_cong_avoid_ai)
189 #ifdef CONFIG_DYNAMIC_FTRACE
190 #if IS_BUILTIN(CONFIG_TCP_CONG_CUBIC)
191 BTF_ID(func, cubictcp_init)
192 BTF_ID(func, cubictcp_recalc_ssthresh)
193 BTF_ID(func, cubictcp_cong_avoid)
194 BTF_ID(func, cubictcp_state)
195 BTF_ID(func, cubictcp_cwnd_event)
196 BTF_ID(func, cubictcp_acked)
198 #if IS_BUILTIN(CONFIG_TCP_CONG_DCTCP)
199 BTF_ID(func, dctcp_init)
200 BTF_ID(func, dctcp_update_alpha)
201 BTF_ID(func, dctcp_cwnd_event)
202 BTF_ID(func, dctcp_ssthresh)
203 BTF_ID(func, dctcp_cwnd_undo)
204 BTF_ID(func, dctcp_state)
206 #if IS_BUILTIN(CONFIG_TCP_CONG_BBR)
207 BTF_ID(func, bbr_init)
208 BTF_ID(func, bbr_main)
209 BTF_ID(func, bbr_sndbuf_expand)
210 BTF_ID(func, bbr_undo_cwnd)
211 BTF_ID(func, bbr_cwnd_event)
212 BTF_ID(func, bbr_ssthresh)
213 BTF_ID(func, bbr_min_tso_segs)
214 BTF_ID(func, bbr_set_state)
216 #endif /* CONFIG_DYNAMIC_FTRACE */
217 #endif /* CONFIG_X86 */
218 BTF_SET_END(bpf_tcp_ca_kfunc_ids)
220 static bool bpf_tcp_ca_check_kfunc_call(u32 kfunc_btf_id)
222 return btf_id_set_contains(&bpf_tcp_ca_kfunc_ids, kfunc_btf_id);
225 static const struct bpf_verifier_ops bpf_tcp_ca_verifier_ops = {
226 .get_func_proto = bpf_tcp_ca_get_func_proto,
227 .is_valid_access = bpf_tcp_ca_is_valid_access,
228 .btf_struct_access = bpf_tcp_ca_btf_struct_access,
229 .check_kfunc_call = bpf_tcp_ca_check_kfunc_call,
232 static int bpf_tcp_ca_init_member(const struct btf_type *t,
233 const struct btf_member *member,
234 void *kdata, const void *udata)
236 const struct tcp_congestion_ops *utcp_ca;
237 struct tcp_congestion_ops *tcp_ca;
241 utcp_ca = (const struct tcp_congestion_ops *)udata;
242 tcp_ca = (struct tcp_congestion_ops *)kdata;
244 moff = btf_member_bit_offset(t, member) / 8;
246 case offsetof(struct tcp_congestion_ops, flags):
247 if (utcp_ca->flags & ~TCP_CONG_MASK)
249 tcp_ca->flags = utcp_ca->flags;
251 case offsetof(struct tcp_congestion_ops, name):
252 if (bpf_obj_name_cpy(tcp_ca->name, utcp_ca->name,
253 sizeof(tcp_ca->name)) <= 0)
255 if (tcp_ca_find(utcp_ca->name))
260 if (!btf_type_resolve_func_ptr(btf_vmlinux, member->type, NULL))
263 /* Ensure bpf_prog is provided for compulsory func ptr */
264 prog_fd = (int)(*(unsigned long *)(udata + moff));
265 if (!prog_fd && !is_optional(moff) && !is_unsupported(moff))
271 static int bpf_tcp_ca_check_member(const struct btf_type *t,
272 const struct btf_member *member)
274 if (is_unsupported(btf_member_bit_offset(t, member) / 8))
279 static int bpf_tcp_ca_reg(void *kdata)
281 return tcp_register_congestion_control(kdata);
284 static void bpf_tcp_ca_unreg(void *kdata)
286 tcp_unregister_congestion_control(kdata);
289 /* Avoid sparse warning. It is only used in bpf_struct_ops.c. */
290 extern struct bpf_struct_ops bpf_tcp_congestion_ops;
292 struct bpf_struct_ops bpf_tcp_congestion_ops = {
293 .verifier_ops = &bpf_tcp_ca_verifier_ops,
294 .reg = bpf_tcp_ca_reg,
295 .unreg = bpf_tcp_ca_unreg,
296 .check_member = bpf_tcp_ca_check_member,
297 .init_member = bpf_tcp_ca_init_member,
298 .init = bpf_tcp_ca_init,
299 .name = "tcp_congestion_ops",