1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Facebook
5 #include <linux/slab.h>
6 #include <linux/vmalloc.h>
7 #include <linux/etherdevice.h>
8 #include <linux/filter.h>
9 #include <linux/sched/signal.h>
10 #include <net/bpf_sk_storage.h>
13 #include <net/net_namespace.h>
14 #include <linux/error-injection.h>
15 #include <linux/smp.h>
16 #include <linux/sock_diag.h>
18 #define CREATE_TRACE_POINTS
19 #include <trace/events/bpf_test_run.h>
21 struct bpf_test_timer {
22 enum { NO_PREEMPT, NO_MIGRATE } mode;
24 u64 time_start, time_spent;
27 static void bpf_test_timer_enter(struct bpf_test_timer *t)
31 if (t->mode == NO_PREEMPT)
36 t->time_start = ktime_get_ns();
39 static void bpf_test_timer_leave(struct bpf_test_timer *t)
44 if (t->mode == NO_PREEMPT)
51 static bool bpf_test_timer_continue(struct bpf_test_timer *t, u32 repeat, int *err, u32 *duration)
57 t->time_spent += ktime_get_ns() - t->time_start;
58 do_div(t->time_spent, t->i);
59 *duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent;
64 if (signal_pending(current)) {
65 /* During iteration: we've been cancelled, abort. */
71 /* During iteration: we need to reschedule between runs. */
72 t->time_spent += ktime_get_ns() - t->time_start;
73 bpf_test_timer_leave(t);
75 bpf_test_timer_enter(t);
78 /* Do another round. */
86 static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
87 u32 *retval, u32 *time, bool xdp)
89 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
90 struct bpf_test_timer t = { NO_MIGRATE };
91 enum bpf_cgroup_storage_type stype;
94 for_each_cgroup_storage_type(stype) {
95 storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
96 if (IS_ERR(storage[stype])) {
97 storage[stype] = NULL;
98 for_each_cgroup_storage_type(stype)
99 bpf_cgroup_storage_free(storage[stype]);
107 bpf_test_timer_enter(&t);
109 bpf_cgroup_storage_set(storage);
112 *retval = bpf_prog_run_xdp(prog, ctx);
114 *retval = BPF_PROG_RUN(prog, ctx);
115 } while (bpf_test_timer_continue(&t, repeat, &ret, time));
116 bpf_test_timer_leave(&t);
118 for_each_cgroup_storage_type(stype)
119 bpf_cgroup_storage_free(storage[stype]);
124 static int bpf_test_finish(const union bpf_attr *kattr,
125 union bpf_attr __user *uattr, const void *data,
126 u32 size, u32 retval, u32 duration)
128 void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
130 u32 copy_size = size;
132 /* Clamp copy if the user has provided a size hint, but copy the full
133 * buffer if not to retain old behaviour.
135 if (kattr->test.data_size_out &&
136 copy_size > kattr->test.data_size_out) {
137 copy_size = kattr->test.data_size_out;
141 if (data_out && copy_to_user(data_out, data, copy_size))
143 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
145 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
147 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
152 trace_bpf_test_finish(&err);
156 /* Integer types of various sizes and pointer combinations cover variety of
157 * architecture dependent calling conventions. 7+ can be supported in the
161 __diag_ignore(GCC, 8, "-Wmissing-prototypes",
162 "Global functions as their definitions will be in vmlinux BTF");
163 int noinline bpf_fentry_test1(int a)
168 int noinline bpf_fentry_test2(int a, u64 b)
173 int noinline bpf_fentry_test3(char a, int b, u64 c)
178 int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
180 return (long)a + b + c + d;
183 int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
185 return a + (long)b + c + d + e;
188 int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
190 return a + (long)b + c + d + (long)e + f;
193 struct bpf_fentry_test_t {
194 struct bpf_fentry_test_t *a;
197 int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
202 int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
207 int noinline bpf_modify_return_test(int a, int *b)
214 ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO);
216 static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
217 u32 headroom, u32 tailroom)
219 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
220 u32 user_size = kattr->test.data_size_in;
223 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
224 return ERR_PTR(-EINVAL);
226 if (user_size > size)
227 return ERR_PTR(-EMSGSIZE);
229 data = kzalloc(size + headroom + tailroom, GFP_USER);
231 return ERR_PTR(-ENOMEM);
233 if (copy_from_user(data + headroom, data_in, user_size)) {
235 return ERR_PTR(-EFAULT);
241 int bpf_prog_test_run_tracing(struct bpf_prog *prog,
242 const union bpf_attr *kattr,
243 union bpf_attr __user *uattr)
245 struct bpf_fentry_test_t arg = {};
246 u16 side_effect = 0, ret = 0;
247 int b = 2, err = -EFAULT;
250 if (kattr->test.flags || kattr->test.cpu)
253 switch (prog->expected_attach_type) {
254 case BPF_TRACE_FENTRY:
255 case BPF_TRACE_FEXIT:
256 if (bpf_fentry_test1(1) != 2 ||
257 bpf_fentry_test2(2, 3) != 5 ||
258 bpf_fentry_test3(4, 5, 6) != 15 ||
259 bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
260 bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
261 bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
262 bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
263 bpf_fentry_test8(&arg) != 0)
266 case BPF_MODIFY_RETURN:
267 ret = bpf_modify_return_test(1, &b);
275 retval = ((u32)side_effect << 16) | ret;
276 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
281 trace_bpf_test_finish(&err);
285 struct bpf_raw_tp_test_run_info {
286 struct bpf_prog *prog;
292 __bpf_prog_test_run_raw_tp(void *data)
294 struct bpf_raw_tp_test_run_info *info = data;
297 info->retval = BPF_PROG_RUN(info->prog, info->ctx);
301 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
302 const union bpf_attr *kattr,
303 union bpf_attr __user *uattr)
305 void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
306 __u32 ctx_size_in = kattr->test.ctx_size_in;
307 struct bpf_raw_tp_test_run_info info;
308 int cpu = kattr->test.cpu, err = 0;
311 /* doesn't support data_in/out, ctx_out, duration, or repeat */
312 if (kattr->test.data_in || kattr->test.data_out ||
313 kattr->test.ctx_out || kattr->test.duration ||
317 if (ctx_size_in < prog->aux->max_ctx_offset ||
318 ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
321 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
325 info.ctx = kzalloc(ctx_size_in, GFP_USER);
328 if (copy_from_user(info.ctx, ctx_in, ctx_size_in)) {
338 current_cpu = get_cpu();
339 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
340 cpu == current_cpu) {
341 __bpf_prog_test_run_raw_tp(&info);
342 } else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
343 /* smp_call_function_single() also checks cpu_online()
344 * after csd_lock(). However, since cpu is from user
345 * space, let's do an extra quick check to filter out
346 * invalid value before smp_call_function_single().
350 err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
356 copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
364 static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
366 void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
367 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
368 u32 size = kattr->test.ctx_size_in;
372 if (!data_in && !data_out)
375 data = kzalloc(max_size, GFP_USER);
377 return ERR_PTR(-ENOMEM);
380 err = bpf_check_uarg_tail_zero(data_in, max_size, size);
386 size = min_t(u32, max_size, size);
387 if (copy_from_user(data, data_in, size)) {
389 return ERR_PTR(-EFAULT);
395 static int bpf_ctx_finish(const union bpf_attr *kattr,
396 union bpf_attr __user *uattr, const void *data,
399 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
401 u32 copy_size = size;
403 if (!data || !data_out)
406 if (copy_size > kattr->test.ctx_size_out) {
407 copy_size = kattr->test.ctx_size_out;
411 if (copy_to_user(data_out, data, copy_size))
413 if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
422 * range_is_zero - test whether buffer is initialized
423 * @buf: buffer to check
424 * @from: check from this position
425 * @to: check up until (excluding) this position
427 * This function returns true if the there is a non-zero byte
428 * in the buf in the range [from,to).
430 static inline bool range_is_zero(void *buf, size_t from, size_t to)
432 return !memchr_inv((u8 *)buf + from, 0, to - from);
435 static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
437 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
442 /* make sure the fields we don't use are zeroed */
443 if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
446 /* mark is allowed */
448 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
449 offsetof(struct __sk_buff, priority)))
452 /* priority is allowed */
454 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, priority),
455 offsetof(struct __sk_buff, ifindex)))
458 /* ifindex is allowed */
460 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex),
461 offsetof(struct __sk_buff, cb)))
466 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
467 offsetof(struct __sk_buff, tstamp)))
470 /* tstamp is allowed */
471 /* wire_len is allowed */
472 /* gso_segs is allowed */
474 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
475 offsetof(struct __sk_buff, gso_size)))
478 /* gso_size is allowed */
480 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
481 sizeof(struct __sk_buff)))
484 skb->mark = __skb->mark;
485 skb->priority = __skb->priority;
486 skb->tstamp = __skb->tstamp;
487 memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
489 if (__skb->wire_len == 0) {
490 cb->pkt_len = skb->len;
492 if (__skb->wire_len < skb->len ||
493 __skb->wire_len > GSO_MAX_SIZE)
495 cb->pkt_len = __skb->wire_len;
498 if (__skb->gso_segs > GSO_MAX_SEGS)
500 skb_shinfo(skb)->gso_segs = __skb->gso_segs;
501 skb_shinfo(skb)->gso_size = __skb->gso_size;
506 static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
508 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
513 __skb->mark = skb->mark;
514 __skb->priority = skb->priority;
515 __skb->ifindex = skb->dev->ifindex;
516 __skb->tstamp = skb->tstamp;
517 memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
518 __skb->wire_len = cb->pkt_len;
519 __skb->gso_segs = skb_shinfo(skb)->gso_segs;
522 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
523 union bpf_attr __user *uattr)
525 bool is_l2 = false, is_direct_pkt_access = false;
526 struct net *net = current->nsproxy->net_ns;
527 struct net_device *dev = net->loopback_dev;
528 u32 size = kattr->test.data_size_in;
529 u32 repeat = kattr->test.repeat;
530 struct __sk_buff *ctx = NULL;
531 u32 retval, duration;
532 int hh_len = ETH_HLEN;
538 if (kattr->test.flags || kattr->test.cpu)
541 data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
542 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
544 return PTR_ERR(data);
546 ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
552 switch (prog->type) {
553 case BPF_PROG_TYPE_SCHED_CLS:
554 case BPF_PROG_TYPE_SCHED_ACT:
557 case BPF_PROG_TYPE_LWT_IN:
558 case BPF_PROG_TYPE_LWT_OUT:
559 case BPF_PROG_TYPE_LWT_XMIT:
560 is_direct_pkt_access = true;
566 sk = kzalloc(sizeof(struct sock), GFP_USER);
572 sock_net_set(sk, net);
573 sock_init_data(NULL, sk);
575 skb = build_skb(data, 0);
584 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
585 __skb_put(skb, size);
586 if (ctx && ctx->ifindex > 1) {
587 dev = dev_get_by_index(net, ctx->ifindex);
593 skb->protocol = eth_type_trans(skb, dev);
594 skb_reset_network_header(skb);
596 switch (skb->protocol) {
597 case htons(ETH_P_IP):
598 sk->sk_family = AF_INET;
599 if (sizeof(struct iphdr) <= skb_headlen(skb)) {
600 sk->sk_rcv_saddr = ip_hdr(skb)->saddr;
601 sk->sk_daddr = ip_hdr(skb)->daddr;
604 #if IS_ENABLED(CONFIG_IPV6)
605 case htons(ETH_P_IPV6):
606 sk->sk_family = AF_INET6;
607 if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) {
608 sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr;
609 sk->sk_v6_daddr = ipv6_hdr(skb)->daddr;
618 __skb_push(skb, hh_len);
619 if (is_direct_pkt_access)
620 bpf_compute_data_pointers(skb);
621 ret = convert___skb_to_skb(skb, ctx);
624 ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
628 if (skb_headroom(skb) < hh_len) {
629 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
631 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
636 memset(__skb_push(skb, hh_len), 0, hh_len);
638 convert_skb_to___skb(skb, ctx);
641 /* bpf program can never convert linear skb to non-linear */
642 if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
643 size = skb_headlen(skb);
644 ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
646 ret = bpf_ctx_finish(kattr, uattr, ctx,
647 sizeof(struct __sk_buff));
649 if (dev && dev != net->loopback_dev)
652 bpf_sk_storage_free(sk);
658 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
659 union bpf_attr __user *uattr)
661 u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
662 u32 headroom = XDP_PACKET_HEADROOM;
663 u32 size = kattr->test.data_size_in;
664 u32 repeat = kattr->test.repeat;
665 struct netdev_rx_queue *rxqueue;
666 struct xdp_buff xdp = {};
667 u32 retval, duration;
672 if (kattr->test.ctx_in || kattr->test.ctx_out)
675 /* XDP have extra tailroom as (most) drivers use full page */
676 max_data_sz = 4096 - headroom - tailroom;
678 data = bpf_test_init(kattr, max_data_sz, headroom, tailroom);
680 return PTR_ERR(data);
682 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
683 xdp_init_buff(&xdp, headroom + max_data_sz + tailroom,
685 xdp_prepare_buff(&xdp, data, headroom, size, true);
687 bpf_prog_change_xdp(NULL, prog);
688 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
691 if (xdp.data != data + headroom || xdp.data_end != xdp.data + size)
692 size = xdp.data_end - xdp.data;
693 ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
695 bpf_prog_change_xdp(prog, NULL);
700 static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
702 /* make sure the fields we don't use are zeroed */
703 if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
706 /* flags is allowed */
708 if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
709 sizeof(struct bpf_flow_keys)))
715 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
716 const union bpf_attr *kattr,
717 union bpf_attr __user *uattr)
719 struct bpf_test_timer t = { NO_PREEMPT };
720 u32 size = kattr->test.data_size_in;
721 struct bpf_flow_dissector ctx = {};
722 u32 repeat = kattr->test.repeat;
723 struct bpf_flow_keys *user_ctx;
724 struct bpf_flow_keys flow_keys;
725 const struct ethhdr *eth;
726 unsigned int flags = 0;
727 u32 retval, duration;
731 if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
734 if (kattr->test.flags || kattr->test.cpu)
740 data = bpf_test_init(kattr, size, 0, 0);
742 return PTR_ERR(data);
744 eth = (struct ethhdr *)data;
749 user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
750 if (IS_ERR(user_ctx)) {
752 return PTR_ERR(user_ctx);
755 ret = verify_user_bpf_flow_keys(user_ctx);
758 flags = user_ctx->flags;
761 ctx.flow_keys = &flow_keys;
763 ctx.data_end = (__u8 *)data + size;
765 bpf_test_timer_enter(&t);
767 retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
769 } while (bpf_test_timer_continue(&t, repeat, &ret, &duration));
770 bpf_test_timer_leave(&t);
775 ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
778 ret = bpf_ctx_finish(kattr, uattr, user_ctx,
779 sizeof(struct bpf_flow_keys));
787 int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr,
788 union bpf_attr __user *uattr)
790 struct bpf_test_timer t = { NO_PREEMPT };
791 struct bpf_prog_array *progs = NULL;
792 struct bpf_sk_lookup_kern ctx = {};
793 u32 repeat = kattr->test.repeat;
794 struct bpf_sk_lookup *user_ctx;
795 u32 retval, duration;
798 if (prog->type != BPF_PROG_TYPE_SK_LOOKUP)
801 if (kattr->test.flags || kattr->test.cpu)
804 if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out ||
805 kattr->test.data_size_out)
811 user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx));
812 if (IS_ERR(user_ctx))
813 return PTR_ERR(user_ctx);
821 if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx)))
824 if (user_ctx->local_port > U16_MAX || user_ctx->remote_port > U16_MAX) {
829 ctx.family = (u16)user_ctx->family;
830 ctx.protocol = (u16)user_ctx->protocol;
831 ctx.dport = (u16)user_ctx->local_port;
832 ctx.sport = (__force __be16)user_ctx->remote_port;
834 switch (ctx.family) {
836 ctx.v4.daddr = (__force __be32)user_ctx->local_ip4;
837 ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4;
840 #if IS_ENABLED(CONFIG_IPV6)
842 ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6;
843 ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6;
852 progs = bpf_prog_array_alloc(1, GFP_KERNEL);
858 progs->items[0].prog = prog;
860 bpf_test_timer_enter(&t);
862 ctx.selected_sk = NULL;
863 retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, BPF_PROG_RUN);
864 } while (bpf_test_timer_continue(&t, repeat, &ret, &duration));
865 bpf_test_timer_leave(&t);
870 user_ctx->cookie = 0;
871 if (ctx.selected_sk) {
872 if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) {
877 user_ctx->cookie = sock_gen_cookie(ctx.selected_sk);
880 ret = bpf_test_finish(kattr, uattr, NULL, 0, retval, duration);
882 ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx));
885 bpf_prog_array_free(progs);