1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3 #include <network_helpers.h>
6 #include <linux/if_tun.h>
9 #include "bpf_flow.skel.h"
15 #define CHECK_FLOW_KEYS(desc, got, expected) \
16 CHECK_ATTR(memcmp(&got, &expected, sizeof(got)) != 0, \
20 "addr_proto=0x%x/0x%x " \
22 "is_first_frag=%u/%u " \
24 "ip_proto=0x%x/0x%x " \
25 "n_proto=0x%x/0x%x " \
26 "flow_label=0x%x/0x%x " \
29 got.nhoff, expected.nhoff, \
30 got.thoff, expected.thoff, \
31 got.addr_proto, expected.addr_proto, \
32 got.is_frag, expected.is_frag, \
33 got.is_first_frag, expected.is_first_frag, \
34 got.is_encap, expected.is_encap, \
35 got.ip_proto, expected.ip_proto, \
36 got.n_proto, expected.n_proto, \
37 got.flow_label, expected.flow_label, \
38 got.sport, expected.sport, \
39 got.dport, expected.dport)
50 struct iphdr iph_inner;
54 struct svlan_ipv4_pkt {
68 struct ipv6_frag_pkt {
75 __be32 identification;
80 struct dvlan_ipv6_pkt {
94 struct svlan_ipv4_pkt svlan_ipv4;
97 struct ipv6_frag_pkt ipv6_frag;
98 struct dvlan_ipv6_pkt dvlan_ipv6;
100 struct bpf_flow_keys keys;
106 static __u32 duration;
107 struct test tests[] = {
111 .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
113 .iph.protocol = IPPROTO_TCP,
114 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
121 .thoff = ETH_HLEN + sizeof(struct iphdr),
122 .addr_proto = ETH_P_IP,
123 .ip_proto = IPPROTO_TCP,
124 .n_proto = __bpf_constant_htons(ETH_P_IP),
132 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
133 .iph.nexthdr = IPPROTO_TCP,
134 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
141 .thoff = ETH_HLEN + sizeof(struct ipv6hdr),
142 .addr_proto = ETH_P_IPV6,
143 .ip_proto = IPPROTO_TCP,
144 .n_proto = __bpf_constant_htons(ETH_P_IPV6),
150 .name = "802.1q-ipv4",
152 .eth.h_proto = __bpf_constant_htons(ETH_P_8021Q),
153 .vlan_proto = __bpf_constant_htons(ETH_P_IP),
155 .iph.protocol = IPPROTO_TCP,
156 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
162 .nhoff = ETH_HLEN + VLAN_HLEN,
163 .thoff = ETH_HLEN + VLAN_HLEN + sizeof(struct iphdr),
164 .addr_proto = ETH_P_IP,
165 .ip_proto = IPPROTO_TCP,
166 .n_proto = __bpf_constant_htons(ETH_P_IP),
172 .name = "802.1ad-ipv6",
174 .eth.h_proto = __bpf_constant_htons(ETH_P_8021AD),
175 .vlan_proto = __bpf_constant_htons(ETH_P_8021Q),
176 .vlan_proto2 = __bpf_constant_htons(ETH_P_IPV6),
177 .iph.nexthdr = IPPROTO_TCP,
178 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
184 .nhoff = ETH_HLEN + VLAN_HLEN * 2,
185 .thoff = ETH_HLEN + VLAN_HLEN * 2 +
186 sizeof(struct ipv6hdr),
187 .addr_proto = ETH_P_IPV6,
188 .ip_proto = IPPROTO_TCP,
189 .n_proto = __bpf_constant_htons(ETH_P_IPV6),
197 .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
199 .iph.protocol = IPPROTO_TCP,
200 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
201 .iph.frag_off = __bpf_constant_htons(IP_MF),
207 .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
209 .thoff = ETH_HLEN + sizeof(struct iphdr),
210 .addr_proto = ETH_P_IP,
211 .ip_proto = IPPROTO_TCP,
212 .n_proto = __bpf_constant_htons(ETH_P_IP),
214 .is_first_frag = true,
218 .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
221 .name = "ipv4-no-frag",
223 .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
225 .iph.protocol = IPPROTO_TCP,
226 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
227 .iph.frag_off = __bpf_constant_htons(IP_MF),
234 .thoff = ETH_HLEN + sizeof(struct iphdr),
235 .addr_proto = ETH_P_IP,
236 .ip_proto = IPPROTO_TCP,
237 .n_proto = __bpf_constant_htons(ETH_P_IP),
239 .is_first_frag = true,
245 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
246 .iph.nexthdr = IPPROTO_FRAGMENT,
247 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
248 .ipf.nexthdr = IPPROTO_TCP,
254 .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
256 .thoff = ETH_HLEN + sizeof(struct ipv6hdr) +
257 sizeof(struct frag_hdr),
258 .addr_proto = ETH_P_IPV6,
259 .ip_proto = IPPROTO_TCP,
260 .n_proto = __bpf_constant_htons(ETH_P_IPV6),
262 .is_first_frag = true,
266 .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
269 .name = "ipv6-no-frag",
271 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
272 .iph.nexthdr = IPPROTO_FRAGMENT,
273 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
274 .ipf.nexthdr = IPPROTO_TCP,
281 .thoff = ETH_HLEN + sizeof(struct ipv6hdr) +
282 sizeof(struct frag_hdr),
283 .addr_proto = ETH_P_IPV6,
284 .ip_proto = IPPROTO_TCP,
285 .n_proto = __bpf_constant_htons(ETH_P_IPV6),
287 .is_first_frag = true,
291 .name = "ipv6-flow-label",
293 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
294 .iph.nexthdr = IPPROTO_TCP,
295 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
296 .iph.flow_lbl = { 0xb, 0xee, 0xef },
303 .thoff = ETH_HLEN + sizeof(struct ipv6hdr),
304 .addr_proto = ETH_P_IPV6,
305 .ip_proto = IPPROTO_TCP,
306 .n_proto = __bpf_constant_htons(ETH_P_IPV6),
309 .flow_label = __bpf_constant_htonl(0xbeeef),
313 .name = "ipv6-no-flow-label",
315 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
316 .iph.nexthdr = IPPROTO_TCP,
317 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
318 .iph.flow_lbl = { 0xb, 0xee, 0xef },
324 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
326 .thoff = ETH_HLEN + sizeof(struct ipv6hdr),
327 .addr_proto = ETH_P_IPV6,
328 .ip_proto = IPPROTO_TCP,
329 .n_proto = __bpf_constant_htons(ETH_P_IPV6),
330 .flow_label = __bpf_constant_htonl(0xbeeef),
332 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
335 .name = "ipip-encap",
337 .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
339 .iph.protocol = IPPROTO_IPIP,
340 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
342 .iph_inner.protocol = IPPROTO_TCP,
344 __bpf_constant_htons(MAGIC_BYTES) -
345 sizeof(struct iphdr),
352 .thoff = ETH_HLEN + sizeof(struct iphdr) +
353 sizeof(struct iphdr),
354 .addr_proto = ETH_P_IP,
355 .ip_proto = IPPROTO_TCP,
356 .n_proto = __bpf_constant_htons(ETH_P_IP),
363 .name = "ipip-no-encap",
365 .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
367 .iph.protocol = IPPROTO_IPIP,
368 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
370 .iph_inner.protocol = IPPROTO_TCP,
372 __bpf_constant_htons(MAGIC_BYTES) -
373 sizeof(struct iphdr),
379 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
381 .thoff = ETH_HLEN + sizeof(struct iphdr),
382 .addr_proto = ETH_P_IP,
383 .ip_proto = IPPROTO_IPIP,
384 .n_proto = __bpf_constant_htons(ETH_P_IP),
387 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
391 static int create_tap(const char *ifname)
394 .ifr_flags = IFF_TAP | IFF_NO_PI | IFF_NAPI | IFF_NAPI_FRAGS,
398 strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
400 fd = open("/dev/net/tun", O_RDWR);
404 ret = ioctl(fd, TUNSETIFF, &ifr);
411 static int tx_tap(int fd, void *pkt, size_t len)
413 struct iovec iov[] = {
419 return writev(fd, iov, ARRAY_SIZE(iov));
422 static int ifup(const char *ifname)
424 struct ifreq ifr = {};
427 strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
429 sk = socket(PF_INET, SOCK_DGRAM, 0);
433 ret = ioctl(sk, SIOCGIFFLAGS, &ifr);
439 ifr.ifr_flags |= IFF_UP;
440 ret = ioctl(sk, SIOCSIFFLAGS, &ifr);
450 static int init_prog_array(struct bpf_object *obj, struct bpf_map *prog_array)
452 int i, err, map_fd, prog_fd;
453 struct bpf_program *prog;
456 map_fd = bpf_map__fd(prog_array);
460 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
461 snprintf(prog_name, sizeof(prog_name), "flow_dissector/%i", i);
463 prog = bpf_object__find_program_by_title(obj, prog_name);
467 prog_fd = bpf_program__fd(prog);
471 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
478 static void run_tests_skb_less(int tap_fd, struct bpf_map *keys)
482 keys_fd = bpf_map__fd(keys);
483 if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd))
486 for (i = 0; i < ARRAY_SIZE(tests); i++) {
487 /* Keep in sync with 'flags' from eth_get_headlen. */
488 __u32 eth_get_headlen_flags =
489 BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG;
490 struct bpf_prog_test_run_attr tattr = {};
491 struct bpf_flow_keys flow_keys = {};
492 __u32 key = (__u32)(tests[i].keys.sport) << 16 |
495 /* For skb-less case we can't pass input flags; run
496 * only the tests that have a matching set of flags.
499 if (tests[i].flags != eth_get_headlen_flags)
502 err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt));
503 CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno);
505 err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys);
506 CHECK_ATTR(err, tests[i].name, "bpf_map_lookup_elem %d\n", err);
508 CHECK_ATTR(err, tests[i].name, "skb-less err %d\n", err);
509 CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
511 err = bpf_map_delete_elem(keys_fd, &key);
512 CHECK_ATTR(err, tests[i].name, "bpf_map_delete_elem %d\n", err);
516 static void test_skb_less_prog_attach(struct bpf_flow *skel, int tap_fd)
520 prog_fd = bpf_program__fd(skel->progs._dissect);
521 if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd))
524 err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
525 if (CHECK(err, "bpf_prog_attach", "err %d errno %d\n", err, errno))
528 run_tests_skb_less(tap_fd, skel->maps.last_dissection);
530 err = bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
531 CHECK(err, "bpf_prog_detach2", "err %d errno %d\n", err, errno);
534 static void test_skb_less_link_create(struct bpf_flow *skel, int tap_fd)
536 struct bpf_link *link;
539 net_fd = open("/proc/self/ns/net", O_RDONLY);
540 if (CHECK(net_fd < 0, "open(/proc/self/ns/net)", "err %d\n", errno))
543 link = bpf_program__attach_netns(skel->progs._dissect, net_fd);
544 if (CHECK(IS_ERR(link), "attach_netns", "err %ld\n", PTR_ERR(link)))
547 run_tests_skb_less(tap_fd, skel->maps.last_dissection);
549 err = bpf_link__destroy(link);
550 CHECK(err, "bpf_link__destroy", "err %d\n", err);
555 void test_flow_dissector(void)
557 int i, err, prog_fd, keys_fd = -1, tap_fd;
558 struct bpf_flow *skel;
560 skel = bpf_flow__open_and_load();
561 if (CHECK(!skel, "skel", "failed to open/load skeleton\n"))
564 prog_fd = bpf_program__fd(skel->progs._dissect);
565 if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd))
566 goto out_destroy_skel;
567 keys_fd = bpf_map__fd(skel->maps.last_dissection);
568 if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd))
569 goto out_destroy_skel;
570 err = init_prog_array(skel->obj, skel->maps.jmp_table);
571 if (CHECK(err, "init_prog_array", "err %d\n", err))
572 goto out_destroy_skel;
574 for (i = 0; i < ARRAY_SIZE(tests); i++) {
575 struct bpf_flow_keys flow_keys;
576 struct bpf_prog_test_run_attr tattr = {
578 .data_in = &tests[i].pkt,
579 .data_size_in = sizeof(tests[i].pkt),
580 .data_out = &flow_keys,
582 static struct bpf_flow_keys ctx = {};
584 if (tests[i].flags) {
586 tattr.ctx_size_in = sizeof(ctx);
587 ctx.flags = tests[i].flags;
590 err = bpf_prog_test_run_xattr(&tattr);
591 CHECK_ATTR(tattr.data_size_out != sizeof(flow_keys) ||
592 err || tattr.retval != 1,
594 "err %d errno %d retval %d duration %d size %u/%lu\n",
595 err, errno, tattr.retval, tattr.duration,
596 tattr.data_size_out, sizeof(flow_keys));
597 CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
600 /* Do the same tests but for skb-less flow dissector.
601 * We use a known path in the net/tun driver that calls
602 * eth_get_headlen and we manually export bpf_flow_keys
603 * via BPF map in this case.
606 tap_fd = create_tap("tap0");
607 CHECK(tap_fd < 0, "create_tap", "tap_fd %d errno %d\n", tap_fd, errno);
609 CHECK(err, "ifup", "err %d errno %d\n", err, errno);
611 /* Test direct prog attachment */
612 test_skb_less_prog_attach(skel, tap_fd);
613 /* Test indirect prog attachment via link */
614 test_skb_less_link_create(skel, tap_fd);
618 bpf_flow__destroy(skel);