1 /* Copyright (c) 2017 Facebook
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
15 #include <linux/types.h>
16 typedef __u16 __sum16;
17 #include <arpa/inet.h>
18 #include <linux/if_ether.h>
19 #include <linux/if_packet.h>
21 #include <linux/ipv6.h>
22 #include <linux/tcp.h>
23 #include <linux/filter.h>
24 #include <linux/perf_event.h>
25 #include <linux/unistd.h>
27 #include <sys/ioctl.h>
29 #include <sys/types.h>
32 #include <linux/bpf.h>
33 #include <linux/err.h>
35 #include <bpf/libbpf.h>
37 #include "test_iptunnel_common.h"
39 #include "bpf_endian.h"
40 #include "bpf_rlimit.h"
42 static int error_cnt, pass_cnt;
44 #define MAGIC_BYTES 123
46 /* ipv4 test vector */
52 .eth.h_proto = bpf_htons(ETH_P_IP),
55 .iph.tot_len = bpf_htons(MAGIC_BYTES),
59 /* ipv6 test vector */
65 .eth.h_proto = bpf_htons(ETH_P_IPV6),
67 .iph.payload_len = bpf_htons(MAGIC_BYTES),
71 #define CHECK(condition, tag, format...) ({ \
72 int __ret = !!(condition); \
75 printf("%s:FAIL:%s ", __func__, tag); \
79 printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\
84 static int bpf_find_map(const char *test, struct bpf_object *obj,
89 map = bpf_object__find_map_by_name(obj, name);
91 printf("%s:FAIL:map '%s' not found\n", test, name);
95 return bpf_map__fd(map);
98 static void test_pkt_access(void)
100 const char *file = "./test_pkt_access.o";
101 struct bpf_object *obj;
102 __u32 duration, retval;
105 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
111 err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4),
112 NULL, NULL, &retval, &duration);
113 CHECK(err || errno || retval, "ipv4",
114 "err %d errno %d retval %d duration %d\n",
115 err, errno, retval, duration);
117 err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6),
118 NULL, NULL, &retval, &duration);
119 CHECK(err || errno || retval, "ipv6",
120 "err %d errno %d retval %d duration %d\n",
121 err, errno, retval, duration);
122 bpf_object__close(obj);
125 static void test_xdp(void)
127 struct vip key4 = {.protocol = 6, .family = AF_INET};
128 struct vip key6 = {.protocol = 6, .family = AF_INET6};
129 struct iptnl_info value4 = {.family = AF_INET};
130 struct iptnl_info value6 = {.family = AF_INET6};
131 const char *file = "./test_xdp.o";
132 struct bpf_object *obj;
134 struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr);
135 struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
136 __u32 duration, retval, size;
137 int err, prog_fd, map_fd;
139 err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
145 map_fd = bpf_find_map(__func__, obj, "vip2tnl");
148 bpf_map_update_elem(map_fd, &key4, &value4, 0);
149 bpf_map_update_elem(map_fd, &key6, &value6, 0);
151 err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
152 buf, &size, &retval, &duration);
154 CHECK(err || errno || retval != XDP_TX || size != 74 ||
155 iph->protocol != IPPROTO_IPIP, "ipv4",
156 "err %d errno %d retval %d size %d\n",
157 err, errno, retval, size);
159 err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
160 buf, &size, &retval, &duration);
161 CHECK(err || errno || retval != XDP_TX || size != 114 ||
162 iph6->nexthdr != IPPROTO_IPV6, "ipv6",
163 "err %d errno %d retval %d size %d\n",
164 err, errno, retval, size);
166 bpf_object__close(obj);
169 static void test_xdp_adjust_tail(void)
171 const char *file = "./test_adjust_tail.o";
172 struct bpf_object *obj;
174 __u32 duration, retval, size;
177 err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
183 err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
184 buf, &size, &retval, &duration);
186 CHECK(err || errno || retval != XDP_DROP,
187 "ipv4", "err %d errno %d retval %d size %d\n",
188 err, errno, retval, size);
190 err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
191 buf, &size, &retval, &duration);
192 CHECK(err || errno || retval != XDP_TX || size != 54,
193 "ipv6", "err %d errno %d retval %d size %d\n",
194 err, errno, retval, size);
195 bpf_object__close(obj);
200 #define MAGIC_VAL 0x1234
201 #define NUM_ITER 100000
204 static void test_l4lb(const char *file)
206 unsigned int nr_cpus = bpf_num_possible_cpus();
207 struct vip key = {.protocol = 6};
211 } value = {.vip_num = VIP_NUM};
212 __u32 stats_key = VIP_NUM;
217 struct real_definition {
223 } real_def = {.dst = MAGIC_VAL};
224 __u32 ch_key = 11, real_num = 3;
225 __u32 duration, retval, size;
226 int err, i, prog_fd, map_fd;
227 __u64 bytes = 0, pkts = 0;
228 struct bpf_object *obj;
230 u32 *magic = (u32 *)buf;
232 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
238 map_fd = bpf_find_map(__func__, obj, "vip_map");
241 bpf_map_update_elem(map_fd, &key, &value, 0);
243 map_fd = bpf_find_map(__func__, obj, "ch_rings");
246 bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
248 map_fd = bpf_find_map(__func__, obj, "reals");
251 bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
253 err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
254 buf, &size, &retval, &duration);
255 CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 ||
256 *magic != MAGIC_VAL, "ipv4",
257 "err %d errno %d retval %d size %d magic %x\n",
258 err, errno, retval, size, *magic);
260 err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
261 buf, &size, &retval, &duration);
262 CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 ||
263 *magic != MAGIC_VAL, "ipv6",
264 "err %d errno %d retval %d size %d magic %x\n",
265 err, errno, retval, size, *magic);
267 map_fd = bpf_find_map(__func__, obj, "stats");
270 bpf_map_lookup_elem(map_fd, &stats_key, stats);
271 for (i = 0; i < nr_cpus; i++) {
272 bytes += stats[i].bytes;
273 pkts += stats[i].pkts;
275 if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
277 printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts);
280 bpf_object__close(obj);
283 static void test_l4lb_all(void)
285 const char *file1 = "./test_l4lb.o";
286 const char *file2 = "./test_l4lb_noinline.o";
292 static void test_xdp_noinline(void)
294 const char *file = "./test_xdp_noinline.o";
295 unsigned int nr_cpus = bpf_num_possible_cpus();
296 struct vip key = {.protocol = 6};
300 } value = {.vip_num = VIP_NUM};
301 __u32 stats_key = VIP_NUM;
306 struct real_definition {
312 } real_def = {.dst = MAGIC_VAL};
313 __u32 ch_key = 11, real_num = 3;
314 __u32 duration, retval, size;
315 int err, i, prog_fd, map_fd;
316 __u64 bytes = 0, pkts = 0;
317 struct bpf_object *obj;
319 u32 *magic = (u32 *)buf;
321 err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
327 map_fd = bpf_find_map(__func__, obj, "vip_map");
330 bpf_map_update_elem(map_fd, &key, &value, 0);
332 map_fd = bpf_find_map(__func__, obj, "ch_rings");
335 bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
337 map_fd = bpf_find_map(__func__, obj, "reals");
340 bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
342 err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
343 buf, &size, &retval, &duration);
344 CHECK(err || errno || retval != 1 || size != 54 ||
345 *magic != MAGIC_VAL, "ipv4",
346 "err %d errno %d retval %d size %d magic %x\n",
347 err, errno, retval, size, *magic);
349 err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
350 buf, &size, &retval, &duration);
351 CHECK(err || errno || retval != 1 || size != 74 ||
352 *magic != MAGIC_VAL, "ipv6",
353 "err %d errno %d retval %d size %d magic %x\n",
354 err, errno, retval, size, *magic);
356 map_fd = bpf_find_map(__func__, obj, "stats");
359 bpf_map_lookup_elem(map_fd, &stats_key, stats);
360 for (i = 0; i < nr_cpus; i++) {
361 bytes += stats[i].bytes;
362 pkts += stats[i].pkts;
364 if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
366 printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts);
369 bpf_object__close(obj);
372 static void test_tcp_estats(void)
374 const char *file = "./test_tcp_estats.o";
376 struct bpf_object *obj;
379 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
380 CHECK(err, "", "err %d errno %d\n", err, errno);
386 bpf_object__close(obj);
389 static inline __u64 ptr_to_u64(const void *ptr)
391 return (__u64) (unsigned long) ptr;
394 static void test_bpf_obj_id(void)
396 const __u64 array_magic_value = 0xfaceb00c;
397 const __u32 array_key = 0;
398 const int nr_iters = 2;
399 const char *file = "./test_obj_id.o";
400 const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
401 const char *expected_prog_name = "test_obj_id";
402 const char *expected_map_name = "test_map_id";
403 const __u64 nsec_per_sec = 1000000000;
405 struct bpf_object *objs[nr_iters];
406 int prog_fds[nr_iters], map_fds[nr_iters];
407 /* +1 to test for the info_len returned by kernel */
408 struct bpf_prog_info prog_infos[nr_iters + 1];
409 struct bpf_map_info map_infos[nr_iters + 1];
410 /* Each prog only uses one map. +1 to test nr_map_ids
411 * returned by kernel.
413 __u32 map_ids[nr_iters + 1];
414 char jited_insns[128], xlated_insns[128], zeros[128];
415 __u32 i, next_id, info_len, nr_id_found, duration = 0;
416 struct timespec real_time_ts, boot_time_ts;
417 int sysctl_fd, jit_enabled = 0, err = 0;
419 uid_t my_uid = getuid();
420 time_t now, load_time;
422 sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
423 if (sysctl_fd != -1) {
426 if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
427 jit_enabled = (tmpc != '0');
431 err = bpf_prog_get_fd_by_id(0);
432 CHECK(err >= 0 || errno != ENOENT,
433 "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno);
435 err = bpf_map_get_fd_by_id(0);
436 CHECK(err >= 0 || errno != ENOENT,
437 "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno);
439 for (i = 0; i < nr_iters; i++)
442 /* Check bpf_obj_get_info_by_fd() */
443 bzero(zeros, sizeof(zeros));
444 for (i = 0; i < nr_iters; i++) {
446 err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER,
447 &objs[i], &prog_fds[i]);
448 /* test_obj_id.o is a dumb prog. It should never fail
455 /* Insert a magic value to the map */
456 map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id");
457 assert(map_fds[i] >= 0);
458 err = bpf_map_update_elem(map_fds[i], &array_key,
459 &array_magic_value, 0);
462 /* Check getting map info */
463 info_len = sizeof(struct bpf_map_info) * 2;
464 bzero(&map_infos[i], info_len);
465 err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i],
468 map_infos[i].type != BPF_MAP_TYPE_ARRAY ||
469 map_infos[i].key_size != sizeof(__u32) ||
470 map_infos[i].value_size != sizeof(__u64) ||
471 map_infos[i].max_entries != 1 ||
472 map_infos[i].map_flags != 0 ||
473 info_len != sizeof(struct bpf_map_info) ||
474 strcmp((char *)map_infos[i].name, expected_map_name),
476 "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
478 map_infos[i].type, BPF_MAP_TYPE_ARRAY,
479 info_len, sizeof(struct bpf_map_info),
480 map_infos[i].key_size,
481 map_infos[i].value_size,
482 map_infos[i].max_entries,
483 map_infos[i].map_flags,
484 map_infos[i].name, expected_map_name))
487 /* Check getting prog info */
488 info_len = sizeof(struct bpf_prog_info) * 2;
489 bzero(&prog_infos[i], info_len);
490 bzero(jited_insns, sizeof(jited_insns));
491 bzero(xlated_insns, sizeof(xlated_insns));
492 prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns);
493 prog_infos[i].jited_prog_len = sizeof(jited_insns);
494 prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns);
495 prog_infos[i].xlated_prog_len = sizeof(xlated_insns);
496 prog_infos[i].map_ids = ptr_to_u64(map_ids + i);
497 prog_infos[i].nr_map_ids = 2;
498 err = clock_gettime(CLOCK_REALTIME, &real_time_ts);
500 err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts);
502 err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i],
504 load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec)
505 + (prog_infos[i].load_time / nsec_per_sec);
507 prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER ||
508 info_len != sizeof(struct bpf_prog_info) ||
509 (jit_enabled && !prog_infos[i].jited_prog_len) ||
511 !memcmp(jited_insns, zeros, sizeof(zeros))) ||
512 !prog_infos[i].xlated_prog_len ||
513 !memcmp(xlated_insns, zeros, sizeof(zeros)) ||
514 load_time < now - 60 || load_time > now + 60 ||
515 prog_infos[i].created_by_uid != my_uid ||
516 prog_infos[i].nr_map_ids != 1 ||
517 *(int *)prog_infos[i].map_ids != map_infos[i].id ||
518 strcmp((char *)prog_infos[i].name, expected_prog_name),
520 "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
522 prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
523 info_len, sizeof(struct bpf_prog_info),
525 prog_infos[i].jited_prog_len,
526 prog_infos[i].xlated_prog_len,
527 !!memcmp(jited_insns, zeros, sizeof(zeros)),
528 !!memcmp(xlated_insns, zeros, sizeof(zeros)),
530 prog_infos[i].created_by_uid, my_uid,
531 prog_infos[i].nr_map_ids, 1,
532 *(int *)prog_infos[i].map_ids, map_infos[i].id,
533 prog_infos[i].name, expected_prog_name))
537 /* Check bpf_prog_get_next_id() */
540 while (!bpf_prog_get_next_id(next_id, &next_id)) {
541 struct bpf_prog_info prog_info = {};
545 info_len = sizeof(prog_info);
547 prog_fd = bpf_prog_get_fd_by_id(next_id);
548 if (prog_fd < 0 && errno == ENOENT)
549 /* The bpf_prog is in the dead row */
551 if (CHECK(prog_fd < 0, "get-prog-fd(next_id)",
552 "prog_fd %d next_id %d errno %d\n",
553 prog_fd, next_id, errno))
556 for (i = 0; i < nr_iters; i++)
557 if (prog_infos[i].id == next_id)
566 * prog_info.nr_map_ids = 1
567 * prog_info.map_ids = NULL
569 prog_info.nr_map_ids = 1;
570 err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
571 if (CHECK(!err || errno != EFAULT,
572 "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)",
575 bzero(&prog_info, sizeof(prog_info));
576 info_len = sizeof(prog_info);
578 saved_map_id = *(int *)(prog_infos[i].map_ids);
579 prog_info.map_ids = prog_infos[i].map_ids;
580 prog_info.nr_map_ids = 2;
581 err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
582 prog_infos[i].jited_prog_insns = 0;
583 prog_infos[i].xlated_prog_insns = 0;
584 CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
585 memcmp(&prog_info, &prog_infos[i], info_len) ||
586 *(int *)prog_info.map_ids != saved_map_id,
587 "get-prog-info(next_id->fd)",
588 "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
589 err, errno, info_len, sizeof(struct bpf_prog_info),
590 memcmp(&prog_info, &prog_infos[i], info_len),
591 *(int *)prog_info.map_ids, saved_map_id);
594 CHECK(nr_id_found != nr_iters,
595 "check total prog id found by get_next_id",
596 "nr_id_found %u(%u)\n",
597 nr_id_found, nr_iters);
599 /* Check bpf_map_get_next_id() */
602 while (!bpf_map_get_next_id(next_id, &next_id)) {
603 struct bpf_map_info map_info = {};
606 info_len = sizeof(map_info);
608 map_fd = bpf_map_get_fd_by_id(next_id);
609 if (map_fd < 0 && errno == ENOENT)
610 /* The bpf_map is in the dead row */
612 if (CHECK(map_fd < 0, "get-map-fd(next_id)",
613 "map_fd %d next_id %u errno %d\n",
614 map_fd, next_id, errno))
617 for (i = 0; i < nr_iters; i++)
618 if (map_infos[i].id == next_id)
626 err = bpf_map_lookup_elem(map_fd, &array_key, &array_value);
629 err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
630 CHECK(err || info_len != sizeof(struct bpf_map_info) ||
631 memcmp(&map_info, &map_infos[i], info_len) ||
632 array_value != array_magic_value,
633 "check get-map-info(next_id->fd)",
634 "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
635 err, errno, info_len, sizeof(struct bpf_map_info),
636 memcmp(&map_info, &map_infos[i], info_len),
637 array_value, array_magic_value);
641 CHECK(nr_id_found != nr_iters,
642 "check total map id found by get_next_id",
643 "nr_id_found %u(%u)\n",
644 nr_id_found, nr_iters);
647 for (i = 0; i < nr_iters; i++)
648 bpf_object__close(objs[i]);
651 static void test_pkt_md_access(void)
653 const char *file = "./test_pkt_md_access.o";
654 struct bpf_object *obj;
655 __u32 duration, retval;
658 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
664 err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
665 NULL, NULL, &retval, &duration);
666 CHECK(err || retval, "",
667 "err %d errno %d retval %d duration %d\n",
668 err, errno, retval, duration);
670 bpf_object__close(obj);
673 static void test_obj_name(void)
681 { "_123456789ABCDE", 1, 0 },
682 { "_123456789ABCDEF", 0, EINVAL },
683 { "_123456789ABCD\n", 0, EINVAL },
685 struct bpf_insn prog[] = {
686 BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
692 for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
693 size_t name_len = strlen(tests[i].name) + 1;
698 /* test different attr.prog_name during BPF_PROG_LOAD */
699 ncopy = name_len < sizeof(attr.prog_name) ?
700 name_len : sizeof(attr.prog_name);
701 bzero(&attr, sizeof(attr));
702 attr.prog_type = BPF_PROG_TYPE_SCHED_CLS;
704 attr.insns = ptr_to_u64(prog);
705 attr.license = ptr_to_u64("");
706 memcpy(attr.prog_name, tests[i].name, ncopy);
708 fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
709 CHECK((tests[i].success && fd < 0) ||
710 (!tests[i].success && fd != -1) ||
711 (!tests[i].success && errno != tests[i].expected_errno),
712 "check-bpf-prog-name",
713 "fd %d(%d) errno %d(%d)\n",
714 fd, tests[i].success, errno, tests[i].expected_errno);
719 /* test different attr.map_name during BPF_MAP_CREATE */
720 ncopy = name_len < sizeof(attr.map_name) ?
721 name_len : sizeof(attr.map_name);
722 bzero(&attr, sizeof(attr));
723 attr.map_type = BPF_MAP_TYPE_ARRAY;
726 attr.max_entries = 1;
728 memcpy(attr.map_name, tests[i].name, ncopy);
729 fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
730 CHECK((tests[i].success && fd < 0) ||
731 (!tests[i].success && fd != -1) ||
732 (!tests[i].success && errno != tests[i].expected_errno),
733 "check-bpf-map-name",
734 "fd %d(%d) errno %d(%d)\n",
735 fd, tests[i].success, errno, tests[i].expected_errno);
742 static void test_tp_attach_query(void)
744 const int num_progs = 3;
745 int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
746 __u32 duration = 0, info_len, saved_prog_ids[num_progs];
747 const char *file = "./test_tracepoint.o";
748 struct perf_event_query_bpf *query;
749 struct perf_event_attr attr = {};
750 struct bpf_object *obj[num_progs];
751 struct bpf_prog_info prog_info;
754 snprintf(buf, sizeof(buf),
755 "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
756 efd = open(buf, O_RDONLY, 0);
757 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
759 bytes = read(efd, buf, sizeof(buf));
761 if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
762 "read", "bytes %d errno %d\n", bytes, errno))
765 attr.config = strtol(buf, NULL, 0);
766 attr.type = PERF_TYPE_TRACEPOINT;
767 attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
768 attr.sample_period = 1;
769 attr.wakeup_events = 1;
771 query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
772 for (i = 0; i < num_progs; i++) {
773 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
775 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
778 bzero(&prog_info, sizeof(prog_info));
779 prog_info.jited_prog_len = 0;
780 prog_info.xlated_prog_len = 0;
781 prog_info.nr_map_ids = 0;
782 info_len = sizeof(prog_info);
783 err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
784 if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
787 saved_prog_ids[i] = prog_info.id;
789 pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
790 0 /* cpu 0 */, -1 /* group id */,
792 if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
795 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
796 if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
801 /* check NULL prog array query */
802 query->ids_len = num_progs;
803 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
804 if (CHECK(err || query->prog_cnt != 0,
805 "perf_event_ioc_query_bpf",
806 "err %d errno %d query->prog_cnt %u\n",
807 err, errno, query->prog_cnt))
811 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
812 if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
817 /* try to get # of programs only */
819 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
820 if (CHECK(err || query->prog_cnt != 2,
821 "perf_event_ioc_query_bpf",
822 "err %d errno %d query->prog_cnt %u\n",
823 err, errno, query->prog_cnt))
826 /* try a few negative tests */
827 /* invalid query pointer */
828 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
829 (struct perf_event_query_bpf *)0x1);
830 if (CHECK(!err || errno != EFAULT,
831 "perf_event_ioc_query_bpf",
832 "err %d errno %d\n", err, errno))
835 /* no enough space */
837 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
838 if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
839 "perf_event_ioc_query_bpf",
840 "err %d errno %d query->prog_cnt %u\n",
841 err, errno, query->prog_cnt))
845 query->ids_len = num_progs;
846 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
847 if (CHECK(err || query->prog_cnt != (i + 1),
848 "perf_event_ioc_query_bpf",
849 "err %d errno %d query->prog_cnt %u\n",
850 err, errno, query->prog_cnt))
852 for (j = 0; j < i + 1; j++)
853 if (CHECK(saved_prog_ids[j] != query->ids[j],
854 "perf_event_ioc_query_bpf",
855 "#%d saved_prog_id %x query prog_id %x\n",
856 j, saved_prog_ids[j], query->ids[j]))
861 for (; i >= 0; i--) {
863 ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
867 bpf_object__close(obj[i]);
872 static int compare_map_keys(int map1_fd, int map2_fd)
875 char val_buf[PERF_MAX_STACK_DEPTH *
876 sizeof(struct bpf_stack_build_id)];
879 err = bpf_map_get_next_key(map1_fd, NULL, &key);
882 err = bpf_map_lookup_elem(map2_fd, &key, val_buf);
886 while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) {
887 err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf);
899 static void test_stacktrace_map()
901 int control_map_fd, stackid_hmap_fd, stackmap_fd;
902 const char *file = "./test_stacktrace_map.o";
903 int bytes, efd, err, pmu_fd, prog_fd;
904 struct perf_event_attr attr = {};
905 __u32 key, val, duration = 0;
906 struct bpf_object *obj;
909 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
910 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
913 /* Get the ID for the sched/sched_switch tracepoint */
914 snprintf(buf, sizeof(buf),
915 "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
916 efd = open(buf, O_RDONLY, 0);
917 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
920 bytes = read(efd, buf, sizeof(buf));
922 if (bytes <= 0 || bytes >= sizeof(buf))
925 /* Open the perf event and attach bpf progrram */
926 attr.config = strtol(buf, NULL, 0);
927 attr.type = PERF_TYPE_TRACEPOINT;
928 attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
929 attr.sample_period = 1;
930 attr.wakeup_events = 1;
931 pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
932 0 /* cpu 0 */, -1 /* group id */,
934 if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
938 err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
942 err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
947 control_map_fd = bpf_find_map(__func__, obj, "control_map");
948 if (control_map_fd < 0)
951 stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
952 if (stackid_hmap_fd < 0)
955 stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
959 /* give some time for bpf program run */
962 /* disable stack trace collection */
965 bpf_map_update_elem(control_map_fd, &key, &val, 0);
967 /* for every element in stackid_hmap, we can find a corresponding one
968 * in stackmap, and vise versa.
970 err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
971 if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
972 "err %d errno %d\n", err, errno))
973 goto disable_pmu_noerr;
975 err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
976 if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
977 "err %d errno %d\n", err, errno))
978 goto disable_pmu_noerr;
980 goto disable_pmu_noerr;
984 ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
987 bpf_object__close(obj);
990 static void test_stacktrace_map_raw_tp()
992 int control_map_fd, stackid_hmap_fd, stackmap_fd;
993 const char *file = "./test_stacktrace_map.o";
994 int efd, err, prog_fd;
995 __u32 key, val, duration = 0;
996 struct bpf_object *obj;
998 err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
999 if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
1002 efd = bpf_raw_tracepoint_open("sched_switch", prog_fd);
1003 if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
1007 control_map_fd = bpf_find_map(__func__, obj, "control_map");
1008 if (control_map_fd < 0)
1011 stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
1012 if (stackid_hmap_fd < 0)
1015 stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
1016 if (stackmap_fd < 0)
1019 /* give some time for bpf program run */
1022 /* disable stack trace collection */
1025 bpf_map_update_elem(control_map_fd, &key, &val, 0);
1027 /* for every element in stackid_hmap, we can find a corresponding one
1028 * in stackmap, and vise versa.
1030 err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
1031 if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
1032 "err %d errno %d\n", err, errno))
1035 err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
1036 if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
1037 "err %d errno %d\n", err, errno))
1040 goto close_prog_noerr;
1044 bpf_object__close(obj);
1047 static int extract_build_id(char *build_id, size_t size)
1053 fp = popen("readelf -n ./urandom_read | grep 'Build ID'", "r");
1057 if (getline(&line, &len, fp) == -1)
1063 memcpy(build_id, line, len);
1064 build_id[len] = '\0';
1071 static void test_stacktrace_build_id(void)
1073 int control_map_fd, stackid_hmap_fd, stackmap_fd;
1074 const char *file = "./test_stacktrace_build_id.o";
1075 int bytes, efd, err, pmu_fd, prog_fd;
1076 struct perf_event_attr attr = {};
1077 __u32 key, previous_key, val, duration = 0;
1078 struct bpf_object *obj;
1081 struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
1082 int build_id_matches = 0;
1084 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
1085 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
1088 /* Get the ID for the sched/sched_switch tracepoint */
1089 snprintf(buf, sizeof(buf),
1090 "/sys/kernel/debug/tracing/events/random/urandom_read/id");
1091 efd = open(buf, O_RDONLY, 0);
1092 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
1095 bytes = read(efd, buf, sizeof(buf));
1097 if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
1098 "read", "bytes %d errno %d\n", bytes, errno))
1101 /* Open the perf event and attach bpf progrram */
1102 attr.config = strtol(buf, NULL, 0);
1103 attr.type = PERF_TYPE_TRACEPOINT;
1104 attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
1105 attr.sample_period = 1;
1106 attr.wakeup_events = 1;
1107 pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
1108 0 /* cpu 0 */, -1 /* group id */,
1110 if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
1114 err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
1115 if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
1119 err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
1120 if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
1125 control_map_fd = bpf_find_map(__func__, obj, "control_map");
1126 if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
1127 "err %d errno %d\n", err, errno))
1130 stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
1131 if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
1132 "err %d errno %d\n", err, errno))
1135 stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
1136 if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
1140 assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
1142 assert(system("./urandom_read if=/dev/urandom of=/dev/zero count=4 2> /dev/null") == 0);
1143 /* disable stack trace collection */
1146 bpf_map_update_elem(control_map_fd, &key, &val, 0);
1148 /* for every element in stackid_hmap, we can find a corresponding one
1149 * in stackmap, and vise versa.
1151 err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
1152 if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
1153 "err %d errno %d\n", err, errno))
1156 err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
1157 if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
1158 "err %d errno %d\n", err, errno))
1161 err = extract_build_id(buf, 256);
1163 if (CHECK(err, "get build_id with readelf",
1164 "err %d errno %d\n", err, errno))
1167 err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
1168 if (CHECK(err, "get_next_key from stackmap",
1169 "err %d, errno %d\n", err, errno))
1175 err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
1176 if (CHECK(err, "lookup_elem from stackmap",
1177 "err %d, errno %d\n", err, errno))
1179 for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
1180 if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
1181 id_offs[i].offset != 0) {
1182 for (j = 0; j < 20; ++j)
1183 sprintf(build_id + 2 * j, "%02x",
1184 id_offs[i].build_id[j] & 0xff);
1185 if (strstr(buf, build_id) != NULL)
1186 build_id_matches = 1;
1189 } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
1191 CHECK(build_id_matches < 1, "build id match",
1192 "Didn't find expected build ID from the map");
1195 ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
1201 bpf_object__close(obj);
1211 test_xdp_adjust_tail();
1213 test_xdp_noinline();
1216 test_pkt_md_access();
1218 test_tp_attach_query();
1219 test_stacktrace_map();
1220 test_stacktrace_build_id();
1221 test_stacktrace_map_raw_tp();
1223 printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
1224 return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;