1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Facebook
5 #include <linux/slab.h>
6 #include <linux/vmalloc.h>
7 #include <linux/etherdevice.h>
8 #include <linux/filter.h>
9 #include <linux/sched/signal.h>
10 #include <net/bpf_sk_storage.h>
13 #include <linux/error-injection.h>
15 #define CREATE_TRACE_POINTS
16 #include <trace/events/bpf_test_run.h>
18 static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
19 u32 *retval, u32 *time, bool xdp)
21 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
22 enum bpf_cgroup_storage_type stype;
23 u64 time_start, time_spent = 0;
27 for_each_cgroup_storage_type(stype) {
28 storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
29 if (IS_ERR(storage[stype])) {
30 storage[stype] = NULL;
31 for_each_cgroup_storage_type(stype)
32 bpf_cgroup_storage_free(storage[stype]);
42 time_start = ktime_get_ns();
43 for (i = 0; i < repeat; i++) {
44 bpf_cgroup_storage_set(storage);
47 *retval = bpf_prog_run_xdp(prog, ctx);
49 *retval = BPF_PROG_RUN(prog, ctx);
51 if (signal_pending(current)) {
57 time_spent += ktime_get_ns() - time_start;
65 time_start = ktime_get_ns();
68 time_spent += ktime_get_ns() - time_start;
72 do_div(time_spent, repeat);
73 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
75 for_each_cgroup_storage_type(stype)
76 bpf_cgroup_storage_free(storage[stype]);
81 static int bpf_test_finish(const union bpf_attr *kattr,
82 union bpf_attr __user *uattr, const void *data,
83 u32 size, u32 retval, u32 duration)
85 void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
89 /* Clamp copy if the user has provided a size hint, but copy the full
90 * buffer if not to retain old behaviour.
92 if (kattr->test.data_size_out &&
93 copy_size > kattr->test.data_size_out) {
94 copy_size = kattr->test.data_size_out;
98 if (data_out && copy_to_user(data_out, data, copy_size))
100 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
102 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
104 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
109 trace_bpf_test_finish(&err);
113 /* Integer types of various sizes and pointer combinations cover variety of
114 * architecture dependent calling conventions. 7+ can be supported in the
118 __diag_ignore(GCC, 8, "-Wmissing-prototypes",
119 "Global functions as their definitions will be in vmlinux BTF");
120 int noinline bpf_fentry_test1(int a)
125 int noinline bpf_fentry_test2(int a, u64 b)
130 int noinline bpf_fentry_test3(char a, int b, u64 c)
135 int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
137 return (long)a + b + c + d;
140 int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
142 return a + (long)b + c + d + e;
145 int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
147 return a + (long)b + c + d + (long)e + f;
150 int noinline bpf_modify_return_test(int a, int *b)
157 ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO);
159 static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
160 u32 headroom, u32 tailroom)
162 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
163 u32 user_size = kattr->test.data_size_in;
166 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
167 return ERR_PTR(-EINVAL);
169 if (user_size > size)
170 return ERR_PTR(-EMSGSIZE);
172 data = kzalloc(size + headroom + tailroom, GFP_USER);
174 return ERR_PTR(-ENOMEM);
176 if (copy_from_user(data + headroom, data_in, user_size)) {
178 return ERR_PTR(-EFAULT);
184 int bpf_prog_test_run_tracing(struct bpf_prog *prog,
185 const union bpf_attr *kattr,
186 union bpf_attr __user *uattr)
188 u16 side_effect = 0, ret = 0;
189 int b = 2, err = -EFAULT;
192 switch (prog->expected_attach_type) {
193 case BPF_TRACE_FENTRY:
194 case BPF_TRACE_FEXIT:
195 if (bpf_fentry_test1(1) != 2 ||
196 bpf_fentry_test2(2, 3) != 5 ||
197 bpf_fentry_test3(4, 5, 6) != 15 ||
198 bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
199 bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
200 bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111)
203 case BPF_MODIFY_RETURN:
204 ret = bpf_modify_return_test(1, &b);
212 retval = ((u32)side_effect << 16) | ret;
213 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
218 trace_bpf_test_finish(&err);
222 static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
224 void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
225 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
226 u32 size = kattr->test.ctx_size_in;
230 if (!data_in && !data_out)
233 data = kzalloc(max_size, GFP_USER);
235 return ERR_PTR(-ENOMEM);
238 err = bpf_check_uarg_tail_zero(data_in, max_size, size);
244 size = min_t(u32, max_size, size);
245 if (copy_from_user(data, data_in, size)) {
247 return ERR_PTR(-EFAULT);
253 static int bpf_ctx_finish(const union bpf_attr *kattr,
254 union bpf_attr __user *uattr, const void *data,
257 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
259 u32 copy_size = size;
261 if (!data || !data_out)
264 if (copy_size > kattr->test.ctx_size_out) {
265 copy_size = kattr->test.ctx_size_out;
269 if (copy_to_user(data_out, data, copy_size))
271 if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
280 * range_is_zero - test whether buffer is initialized
281 * @buf: buffer to check
282 * @from: check from this position
283 * @to: check up until (excluding) this position
285 * This function returns true if the there is a non-zero byte
286 * in the buf in the range [from,to).
288 static inline bool range_is_zero(void *buf, size_t from, size_t to)
290 return !memchr_inv((u8 *)buf + from, 0, to - from);
293 static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
295 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
300 /* make sure the fields we don't use are zeroed */
301 if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
304 /* mark is allowed */
306 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
307 offsetof(struct __sk_buff, priority)))
310 /* priority is allowed */
312 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, priority),
313 offsetof(struct __sk_buff, cb)))
318 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
319 offsetof(struct __sk_buff, tstamp)))
322 /* tstamp is allowed */
323 /* wire_len is allowed */
324 /* gso_segs is allowed */
326 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
327 offsetof(struct __sk_buff, gso_size)))
330 /* gso_size is allowed */
332 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
333 sizeof(struct __sk_buff)))
336 skb->mark = __skb->mark;
337 skb->priority = __skb->priority;
338 skb->tstamp = __skb->tstamp;
339 memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
341 if (__skb->wire_len == 0) {
342 cb->pkt_len = skb->len;
344 if (__skb->wire_len < skb->len ||
345 __skb->wire_len > GSO_MAX_SIZE)
347 cb->pkt_len = __skb->wire_len;
350 if (__skb->gso_segs > GSO_MAX_SEGS)
352 skb_shinfo(skb)->gso_segs = __skb->gso_segs;
353 skb_shinfo(skb)->gso_size = __skb->gso_size;
358 static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
360 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
365 __skb->mark = skb->mark;
366 __skb->priority = skb->priority;
367 __skb->tstamp = skb->tstamp;
368 memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
369 __skb->wire_len = cb->pkt_len;
370 __skb->gso_segs = skb_shinfo(skb)->gso_segs;
373 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
374 union bpf_attr __user *uattr)
376 bool is_l2 = false, is_direct_pkt_access = false;
377 u32 size = kattr->test.data_size_in;
378 u32 repeat = kattr->test.repeat;
379 struct __sk_buff *ctx = NULL;
380 u32 retval, duration;
381 int hh_len = ETH_HLEN;
387 data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
388 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
390 return PTR_ERR(data);
392 ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
398 switch (prog->type) {
399 case BPF_PROG_TYPE_SCHED_CLS:
400 case BPF_PROG_TYPE_SCHED_ACT:
403 case BPF_PROG_TYPE_LWT_IN:
404 case BPF_PROG_TYPE_LWT_OUT:
405 case BPF_PROG_TYPE_LWT_XMIT:
406 is_direct_pkt_access = true;
412 sk = kzalloc(sizeof(struct sock), GFP_USER);
418 sock_net_set(sk, current->nsproxy->net_ns);
419 sock_init_data(NULL, sk);
421 skb = build_skb(data, 0);
430 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
431 __skb_put(skb, size);
432 skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
433 skb_reset_network_header(skb);
436 __skb_push(skb, hh_len);
437 if (is_direct_pkt_access)
438 bpf_compute_data_pointers(skb);
439 ret = convert___skb_to_skb(skb, ctx);
442 ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
446 if (skb_headroom(skb) < hh_len) {
447 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
449 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
454 memset(__skb_push(skb, hh_len), 0, hh_len);
456 convert_skb_to___skb(skb, ctx);
459 /* bpf program can never convert linear skb to non-linear */
460 if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
461 size = skb_headlen(skb);
462 ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
464 ret = bpf_ctx_finish(kattr, uattr, ctx,
465 sizeof(struct __sk_buff));
468 bpf_sk_storage_free(sk);
474 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
475 union bpf_attr __user *uattr)
477 u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
478 u32 headroom = XDP_PACKET_HEADROOM;
479 u32 size = kattr->test.data_size_in;
480 u32 repeat = kattr->test.repeat;
481 struct netdev_rx_queue *rxqueue;
482 struct xdp_buff xdp = {};
483 u32 retval, duration;
488 if (kattr->test.ctx_in || kattr->test.ctx_out)
491 /* XDP have extra tailroom as (most) drivers use full page */
492 max_data_sz = 4096 - headroom - tailroom;
494 data = bpf_test_init(kattr, max_data_sz, headroom, tailroom);
496 return PTR_ERR(data);
498 xdp.data_hard_start = data;
499 xdp.data = data + headroom;
500 xdp.data_meta = xdp.data;
501 xdp.data_end = xdp.data + size;
502 xdp.frame_sz = headroom + max_data_sz + tailroom;
504 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
505 xdp.rxq = &rxqueue->xdp_rxq;
506 bpf_prog_change_xdp(NULL, prog);
507 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
510 if (xdp.data != data + headroom || xdp.data_end != xdp.data + size)
511 size = xdp.data_end - xdp.data;
512 ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
514 bpf_prog_change_xdp(prog, NULL);
519 static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
521 /* make sure the fields we don't use are zeroed */
522 if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
525 /* flags is allowed */
527 if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
528 sizeof(struct bpf_flow_keys)))
534 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
535 const union bpf_attr *kattr,
536 union bpf_attr __user *uattr)
538 u32 size = kattr->test.data_size_in;
539 struct bpf_flow_dissector ctx = {};
540 u32 repeat = kattr->test.repeat;
541 struct bpf_flow_keys *user_ctx;
542 struct bpf_flow_keys flow_keys;
543 u64 time_start, time_spent = 0;
544 const struct ethhdr *eth;
545 unsigned int flags = 0;
546 u32 retval, duration;
551 if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
557 data = bpf_test_init(kattr, size, 0, 0);
559 return PTR_ERR(data);
561 eth = (struct ethhdr *)data;
566 user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
567 if (IS_ERR(user_ctx)) {
569 return PTR_ERR(user_ctx);
572 ret = verify_user_bpf_flow_keys(user_ctx);
575 flags = user_ctx->flags;
578 ctx.flow_keys = &flow_keys;
580 ctx.data_end = (__u8 *)data + size;
584 time_start = ktime_get_ns();
585 for (i = 0; i < repeat; i++) {
586 retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
589 if (signal_pending(current)) {
597 if (need_resched()) {
598 time_spent += ktime_get_ns() - time_start;
606 time_start = ktime_get_ns();
609 time_spent += ktime_get_ns() - time_start;
613 do_div(time_spent, repeat);
614 duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
616 ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
619 ret = bpf_ctx_finish(kattr, uattr, user_ctx,
620 sizeof(struct bpf_flow_keys));