1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat, Inc.
4 static const char *__doc__ =
5 " XDP redirect with a CPU-map type \"BPF_MAP_TYPE_CPUMAP\"";
15 #include <sys/resource.h>
16 #include <sys/sysinfo.h>
20 #include <linux/limits.h>
22 #include <arpa/inet.h>
23 #include <linux/if_link.h>
25 /* How many xdp_progs are defined in _kern.c */
29 #include <bpf/libbpf.h>
33 static int ifindex = -1;
34 static char ifname_buf[IF_NAMESIZE];
38 static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
40 static int cpu_map_fd;
41 static int rx_cnt_map_fd;
42 static int redirect_err_cnt_map_fd;
43 static int cpumap_enqueue_cnt_map_fd;
44 static int cpumap_kthread_cnt_map_fd;
45 static int cpus_available_map_fd;
46 static int cpus_count_map_fd;
47 static int cpus_iterator_map_fd;
48 static int exception_cnt_map_fd;
51 struct bpf_link *tp_links[NUM_TP] = { 0 };
52 static int tp_cnt = 0;
54 /* Exit return codes */
57 #define EXIT_FAIL_OPTION 2
58 #define EXIT_FAIL_XDP 3
59 #define EXIT_FAIL_BPF 4
60 #define EXIT_FAIL_MEM 5
62 static const struct option long_options[] = {
63 {"help", no_argument, NULL, 'h' },
64 {"dev", required_argument, NULL, 'd' },
65 {"skb-mode", no_argument, NULL, 'S' },
66 {"sec", required_argument, NULL, 's' },
67 {"progname", required_argument, NULL, 'p' },
68 {"qsize", required_argument, NULL, 'q' },
69 {"cpu", required_argument, NULL, 'c' },
70 {"stress-mode", no_argument, NULL, 'x' },
71 {"no-separators", no_argument, NULL, 'z' },
72 {"force", no_argument, NULL, 'F' },
76 static void int_exit(int sig)
78 __u32 curr_prog_id = 0;
81 if (bpf_get_link_xdp_id(ifindex, &curr_prog_id, xdp_flags)) {
82 printf("bpf_get_link_xdp_id failed\n");
85 if (prog_id == curr_prog_id) {
87 "Interrupted: Removing XDP program on ifindex:%d device:%s\n",
89 bpf_set_link_xdp_fd(ifindex, -1, xdp_flags);
90 } else if (!curr_prog_id) {
91 printf("couldn't find a prog id on a given iface\n");
93 printf("program on interface changed, not removing\n");
96 /* Detach tracepoints */
98 bpf_link__destroy(tp_links[--tp_cnt]);
103 static void print_avail_progs(struct bpf_object *obj)
105 struct bpf_program *pos;
107 bpf_object__for_each_program(pos, obj) {
108 if (bpf_program__is_xdp(pos))
109 printf(" %s\n", bpf_program__title(pos, false));
113 static void usage(char *argv[], struct bpf_object *obj)
117 printf("\nDOCUMENTATION:\n%s\n", __doc__);
119 printf(" Usage: %s (options-see-below)\n", argv[0]);
120 printf(" Listing options:\n");
121 for (i = 0; long_options[i].name != 0; i++) {
122 printf(" --%-12s", long_options[i].name);
123 if (long_options[i].flag != NULL)
124 printf(" flag (internal value:%d)",
125 *long_options[i].flag);
127 printf(" short-option: -%c",
128 long_options[i].val);
131 printf("\n Programs to be used for --progname:\n");
132 print_avail_progs(obj);
136 /* gettime returns the current time of day in nanoseconds.
137 * Cost: clock_gettime (ns) => 26ns (CLOCK_MONOTONIC)
138 * clock_gettime (ns) => 9ns (CLOCK_MONOTONIC_COARSE)
140 #define NANOSEC_PER_SEC 1000000000 /* 10^9 */
141 static __u64 gettime(void)
146 res = clock_gettime(CLOCK_MONOTONIC, &t);
148 fprintf(stderr, "Error with gettimeofday! (%i)\n", res);
151 return (__u64) t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec;
154 /* Common stats data record shared with _kern.c */
162 struct datarec total;
165 struct stats_record {
166 struct record rx_cnt;
167 struct record redir_err;
168 struct record kthread;
169 struct record exception;
173 static bool map_collect_percpu(int fd, __u32 key, struct record *rec)
175 /* For percpu maps, userspace gets a value per possible CPU */
176 unsigned int nr_cpus = bpf_num_possible_cpus();
177 struct datarec values[nr_cpus];
178 __u64 sum_processed = 0;
179 __u64 sum_dropped = 0;
183 if ((bpf_map_lookup_elem(fd, &key, values)) != 0) {
185 "ERR: bpf_map_lookup_elem failed key:0x%X\n", key);
188 /* Get time as close as possible to reading map contents */
189 rec->timestamp = gettime();
191 /* Record and sum values from each CPU */
192 for (i = 0; i < nr_cpus; i++) {
193 rec->cpu[i].processed = values[i].processed;
194 sum_processed += values[i].processed;
195 rec->cpu[i].dropped = values[i].dropped;
196 sum_dropped += values[i].dropped;
197 rec->cpu[i].issue = values[i].issue;
198 sum_issue += values[i].issue;
200 rec->total.processed = sum_processed;
201 rec->total.dropped = sum_dropped;
202 rec->total.issue = sum_issue;
206 static struct datarec *alloc_record_per_cpu(void)
208 unsigned int nr_cpus = bpf_num_possible_cpus();
209 struct datarec *array;
211 array = calloc(nr_cpus, sizeof(struct datarec));
213 fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
219 static struct stats_record *alloc_stats_record(void)
221 struct stats_record *rec;
224 size = sizeof(*rec) + n_cpus * sizeof(struct record);
227 fprintf(stderr, "Mem alloc error\n");
230 memset(rec, 0, size);
231 rec->rx_cnt.cpu = alloc_record_per_cpu();
232 rec->redir_err.cpu = alloc_record_per_cpu();
233 rec->kthread.cpu = alloc_record_per_cpu();
234 rec->exception.cpu = alloc_record_per_cpu();
235 for (i = 0; i < n_cpus; i++)
236 rec->enq[i].cpu = alloc_record_per_cpu();
241 static void free_stats_record(struct stats_record *r)
245 for (i = 0; i < n_cpus; i++)
247 free(r->exception.cpu);
248 free(r->kthread.cpu);
249 free(r->redir_err.cpu);
254 static double calc_period(struct record *r, struct record *p)
259 period = r->timestamp - p->timestamp;
261 period_ = ((double) period / NANOSEC_PER_SEC);
266 static __u64 calc_pps(struct datarec *r, struct datarec *p, double period_)
272 packets = r->processed - p->processed;
273 pps = packets / period_;
278 static __u64 calc_drop_pps(struct datarec *r, struct datarec *p, double period_)
284 packets = r->dropped - p->dropped;
285 pps = packets / period_;
290 static __u64 calc_errs_pps(struct datarec *r,
291 struct datarec *p, double period_)
297 packets = r->issue - p->issue;
298 pps = packets / period_;
303 static void stats_print(struct stats_record *stats_rec,
304 struct stats_record *stats_prev,
307 unsigned int nr_cpus = bpf_num_possible_cpus();
308 double pps = 0, drop = 0, err = 0;
309 struct record *rec, *prev;
315 printf("Running XDP/eBPF prog_name:%s\n", prog_name);
316 printf("%-15s %-7s %-14s %-11s %-9s\n",
317 "XDP-cpumap", "CPU:to", "pps", "drop-pps", "extra-info");
321 char *fmt_rx = "%-15s %-7d %'-14.0f %'-11.0f %'-10.0f %s\n";
322 char *fm2_rx = "%-15s %-7s %'-14.0f %'-11.0f\n";
325 rec = &stats_rec->rx_cnt;
326 prev = &stats_prev->rx_cnt;
327 t = calc_period(rec, prev);
328 for (i = 0; i < nr_cpus; i++) {
329 struct datarec *r = &rec->cpu[i];
330 struct datarec *p = &prev->cpu[i];
332 pps = calc_pps(r, p, t);
333 drop = calc_drop_pps(r, p, t);
334 err = calc_errs_pps(r, p, t);
336 errstr = "cpu-dest/err";
338 printf(fmt_rx, "XDP-RX",
339 i, pps, drop, err, errstr);
341 pps = calc_pps(&rec->total, &prev->total, t);
342 drop = calc_drop_pps(&rec->total, &prev->total, t);
343 err = calc_errs_pps(&rec->total, &prev->total, t);
344 printf(fm2_rx, "XDP-RX", "total", pps, drop);
347 /* cpumap enqueue stats */
348 for (to_cpu = 0; to_cpu < n_cpus; to_cpu++) {
349 char *fmt = "%-15s %3d:%-3d %'-14.0f %'-11.0f %'-10.2f %s\n";
350 char *fm2 = "%-15s %3s:%-3d %'-14.0f %'-11.0f %'-10.2f %s\n";
353 rec = &stats_rec->enq[to_cpu];
354 prev = &stats_prev->enq[to_cpu];
355 t = calc_period(rec, prev);
356 for (i = 0; i < nr_cpus; i++) {
357 struct datarec *r = &rec->cpu[i];
358 struct datarec *p = &prev->cpu[i];
360 pps = calc_pps(r, p, t);
361 drop = calc_drop_pps(r, p, t);
362 err = calc_errs_pps(r, p, t);
364 errstr = "bulk-average";
365 err = pps / err; /* calc average bulk size */
368 printf(fmt, "cpumap-enqueue",
369 i, to_cpu, pps, drop, err, errstr);
371 pps = calc_pps(&rec->total, &prev->total, t);
373 drop = calc_drop_pps(&rec->total, &prev->total, t);
374 err = calc_errs_pps(&rec->total, &prev->total, t);
376 errstr = "bulk-average";
377 err = pps / err; /* calc average bulk size */
379 printf(fm2, "cpumap-enqueue",
380 "sum", to_cpu, pps, drop, err, errstr);
384 /* cpumap kthread stats */
386 char *fmt_k = "%-15s %-7d %'-14.0f %'-11.0f %'-10.0f %s\n";
387 char *fm2_k = "%-15s %-7s %'-14.0f %'-11.0f %'-10.0f %s\n";
390 rec = &stats_rec->kthread;
391 prev = &stats_prev->kthread;
392 t = calc_period(rec, prev);
393 for (i = 0; i < nr_cpus; i++) {
394 struct datarec *r = &rec->cpu[i];
395 struct datarec *p = &prev->cpu[i];
397 pps = calc_pps(r, p, t);
398 drop = calc_drop_pps(r, p, t);
399 err = calc_errs_pps(r, p, t);
403 printf(fmt_k, "cpumap_kthread",
404 i, pps, drop, err, e_str);
406 pps = calc_pps(&rec->total, &prev->total, t);
407 drop = calc_drop_pps(&rec->total, &prev->total, t);
408 err = calc_errs_pps(&rec->total, &prev->total, t);
411 printf(fm2_k, "cpumap_kthread", "total", pps, drop, err, e_str);
414 /* XDP redirect err tracepoints (very unlikely) */
416 char *fmt_err = "%-15s %-7d %'-14.0f %'-11.0f\n";
417 char *fm2_err = "%-15s %-7s %'-14.0f %'-11.0f\n";
419 rec = &stats_rec->redir_err;
420 prev = &stats_prev->redir_err;
421 t = calc_period(rec, prev);
422 for (i = 0; i < nr_cpus; i++) {
423 struct datarec *r = &rec->cpu[i];
424 struct datarec *p = &prev->cpu[i];
426 pps = calc_pps(r, p, t);
427 drop = calc_drop_pps(r, p, t);
429 printf(fmt_err, "redirect_err", i, pps, drop);
431 pps = calc_pps(&rec->total, &prev->total, t);
432 drop = calc_drop_pps(&rec->total, &prev->total, t);
433 printf(fm2_err, "redirect_err", "total", pps, drop);
436 /* XDP general exception tracepoints */
438 char *fmt_err = "%-15s %-7d %'-14.0f %'-11.0f\n";
439 char *fm2_err = "%-15s %-7s %'-14.0f %'-11.0f\n";
441 rec = &stats_rec->exception;
442 prev = &stats_prev->exception;
443 t = calc_period(rec, prev);
444 for (i = 0; i < nr_cpus; i++) {
445 struct datarec *r = &rec->cpu[i];
446 struct datarec *p = &prev->cpu[i];
448 pps = calc_pps(r, p, t);
449 drop = calc_drop_pps(r, p, t);
451 printf(fmt_err, "xdp_exception", i, pps, drop);
453 pps = calc_pps(&rec->total, &prev->total, t);
454 drop = calc_drop_pps(&rec->total, &prev->total, t);
455 printf(fm2_err, "xdp_exception", "total", pps, drop);
462 static void stats_collect(struct stats_record *rec)
467 map_collect_percpu(fd, 0, &rec->rx_cnt);
469 fd = redirect_err_cnt_map_fd;
470 map_collect_percpu(fd, 1, &rec->redir_err);
472 fd = cpumap_enqueue_cnt_map_fd;
473 for (i = 0; i < n_cpus; i++)
474 map_collect_percpu(fd, i, &rec->enq[i]);
476 fd = cpumap_kthread_cnt_map_fd;
477 map_collect_percpu(fd, 0, &rec->kthread);
479 fd = exception_cnt_map_fd;
480 map_collect_percpu(fd, 0, &rec->exception);
484 /* Pointer swap trick */
485 static inline void swap(struct stats_record **a, struct stats_record **b)
487 struct stats_record *tmp;
494 static int create_cpu_entry(__u32 cpu, __u32 queue_size,
495 __u32 avail_idx, bool new)
497 __u32 curr_cpus_count = 0;
501 /* Add a CPU entry to cpumap, as this allocate a cpu entry in
502 * the kernel for the cpu.
504 ret = bpf_map_update_elem(cpu_map_fd, &cpu, &queue_size, 0);
506 fprintf(stderr, "Create CPU entry failed (err:%d)\n", ret);
510 /* Inform bpf_prog's that a new CPU is available to select
511 * from via some control maps.
513 ret = bpf_map_update_elem(cpus_available_map_fd, &avail_idx, &cpu, 0);
515 fprintf(stderr, "Add to avail CPUs failed\n");
519 /* When not replacing/updating existing entry, bump the count */
520 ret = bpf_map_lookup_elem(cpus_count_map_fd, &key, &curr_cpus_count);
522 fprintf(stderr, "Failed reading curr cpus_count\n");
527 ret = bpf_map_update_elem(cpus_count_map_fd, &key,
528 &curr_cpus_count, 0);
530 fprintf(stderr, "Failed write curr cpus_count\n");
534 /* map_fd[7] = cpus_iterator */
535 printf("%s CPU:%u as idx:%u queue_size:%d (total cpus_count:%u)\n",
536 new ? "Add-new":"Replace", cpu, avail_idx,
537 queue_size, curr_cpus_count);
542 /* CPUs are zero-indexed. Thus, add a special sentinel default value
543 * in map cpus_available to mark CPU index'es not configured
545 static void mark_cpus_unavailable(void)
547 __u32 invalid_cpu = n_cpus;
550 for (i = 0; i < n_cpus; i++) {
551 ret = bpf_map_update_elem(cpus_available_map_fd, &i,
554 fprintf(stderr, "Failed marking CPU unavailable\n");
560 /* Stress cpumap management code by concurrently changing underlying cpumap */
561 static void stress_cpumap(void)
563 /* Changing qsize will cause kernel to free and alloc a new
564 * bpf_cpu_map_entry, with an associated/complicated tear-down
567 create_cpu_entry(1, 1024, 0, false);
568 create_cpu_entry(1, 8, 0, false);
569 create_cpu_entry(1, 16000, 0, false);
572 static void stats_poll(int interval, bool use_separators, char *prog_name,
575 struct stats_record *record, *prev;
577 record = alloc_stats_record();
578 prev = alloc_stats_record();
579 stats_collect(record);
581 /* Trick to pretty printf with thousands separators use %' */
583 setlocale(LC_NUMERIC, "en_US");
586 swap(&prev, &record);
587 stats_collect(record);
588 stats_print(record, prev, prog_name);
594 free_stats_record(record);
595 free_stats_record(prev);
598 static struct bpf_link * attach_tp(struct bpf_object *obj,
599 const char *tp_category,
602 struct bpf_program *prog;
603 struct bpf_link *link;
604 char sec_name[PATH_MAX];
607 len = snprintf(sec_name, PATH_MAX, "tracepoint/%s/%s",
608 tp_category, tp_name);
612 prog = bpf_object__find_program_by_title(obj, sec_name);
614 fprintf(stderr, "ERR: finding progsec: %s\n", sec_name);
618 link = bpf_program__attach_tracepoint(prog, tp_category, tp_name);
619 if (libbpf_get_error(link))
625 static void init_tracepoints(struct bpf_object *obj) {
626 tp_links[tp_cnt++] = attach_tp(obj, "xdp", "xdp_redirect_err");
627 tp_links[tp_cnt++] = attach_tp(obj, "xdp", "xdp_redirect_map_err");
628 tp_links[tp_cnt++] = attach_tp(obj, "xdp", "xdp_exception");
629 tp_links[tp_cnt++] = attach_tp(obj, "xdp", "xdp_cpumap_enqueue");
630 tp_links[tp_cnt++] = attach_tp(obj, "xdp", "xdp_cpumap_kthread");
633 static int init_map_fds(struct bpf_object *obj)
635 /* Maps updated by tracepoints */
636 redirect_err_cnt_map_fd =
637 bpf_object__find_map_fd_by_name(obj, "redirect_err_cnt");
638 exception_cnt_map_fd =
639 bpf_object__find_map_fd_by_name(obj, "exception_cnt");
640 cpumap_enqueue_cnt_map_fd =
641 bpf_object__find_map_fd_by_name(obj, "cpumap_enqueue_cnt");
642 cpumap_kthread_cnt_map_fd =
643 bpf_object__find_map_fd_by_name(obj, "cpumap_kthread_cnt");
645 /* Maps used by XDP */
646 rx_cnt_map_fd = bpf_object__find_map_fd_by_name(obj, "rx_cnt");
647 cpu_map_fd = bpf_object__find_map_fd_by_name(obj, "cpu_map");
648 cpus_available_map_fd =
649 bpf_object__find_map_fd_by_name(obj, "cpus_available");
650 cpus_count_map_fd = bpf_object__find_map_fd_by_name(obj, "cpus_count");
651 cpus_iterator_map_fd =
652 bpf_object__find_map_fd_by_name(obj, "cpus_iterator");
654 if (cpu_map_fd < 0 || rx_cnt_map_fd < 0 ||
655 redirect_err_cnt_map_fd < 0 || cpumap_enqueue_cnt_map_fd < 0 ||
656 cpumap_kthread_cnt_map_fd < 0 || cpus_available_map_fd < 0 ||
657 cpus_count_map_fd < 0 || cpus_iterator_map_fd < 0 ||
658 exception_cnt_map_fd < 0)
664 int main(int argc, char **argv)
666 struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY};
667 char *prog_name = "xdp_cpu_map5_lb_hash_ip_pairs";
668 struct bpf_prog_load_attr prog_load_attr = {
669 .prog_type = BPF_PROG_TYPE_UNSPEC,
671 struct bpf_prog_info info = {};
672 __u32 info_len = sizeof(info);
673 bool use_separators = true;
674 bool stress_mode = false;
675 struct bpf_program *prog;
676 struct bpf_object *obj;
686 n_cpus = get_nprocs_conf();
688 /* Notice: choosing he queue size is very important with the
689 * ixgbe driver, because it's driver page recycling trick is
690 * dependend on pages being returned quickly. The number of
691 * out-standing packets in the system must be less-than 2x
696 snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
697 prog_load_attr.file = filename;
699 if (setrlimit(RLIMIT_MEMLOCK, &r)) {
700 perror("setrlimit(RLIMIT_MEMLOCK)");
704 if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
708 fprintf(stderr, "ERR: bpf_prog_load_xattr: %s\n",
712 init_tracepoints(obj);
713 if (init_map_fds(obj) < 0) {
714 fprintf(stderr, "bpf_object__find_map_fd_by_name failed\n");
717 mark_cpus_unavailable();
719 /* Parse commands line args */
720 while ((opt = getopt_long(argc, argv, "hSd:s:p:q:c:xzF",
721 long_options, &longindex)) != -1) {
724 if (strlen(optarg) >= IF_NAMESIZE) {
725 fprintf(stderr, "ERR: --dev name too long\n");
728 ifname = (char *)&ifname_buf;
729 strncpy(ifname, optarg, IF_NAMESIZE);
730 ifindex = if_nametoindex(ifname);
733 "ERR: --dev name unknown err(%d):%s\n",
734 errno, strerror(errno));
739 interval = atoi(optarg);
742 xdp_flags |= XDP_FLAGS_SKB_MODE;
748 use_separators = false;
751 /* Selecting eBPF prog to load */
755 /* Add multiple CPUs */
756 add_cpu = strtoul(optarg, NULL, 0);
757 if (add_cpu >= n_cpus) {
759 "--cpu nr too large for cpumap err(%d):%s\n",
760 errno, strerror(errno));
763 create_cpu_entry(add_cpu, qsize, added_cpus, true);
767 qsize = atoi(optarg);
770 xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
776 return EXIT_FAIL_OPTION;
780 if (!(xdp_flags & XDP_FLAGS_SKB_MODE))
781 xdp_flags |= XDP_FLAGS_DRV_MODE;
783 /* Required option */
785 fprintf(stderr, "ERR: required option --dev missing\n");
787 return EXIT_FAIL_OPTION;
789 /* Required option */
791 fprintf(stderr, "ERR: required option --cpu missing\n");
792 fprintf(stderr, " Specify multiple --cpu option to add more\n");
794 return EXIT_FAIL_OPTION;
797 /* Remove XDP program when program is interrupted or killed */
798 signal(SIGINT, int_exit);
799 signal(SIGTERM, int_exit);
801 prog = bpf_object__find_program_by_title(obj, prog_name);
803 fprintf(stderr, "bpf_object__find_program_by_title failed\n");
807 prog_fd = bpf_program__fd(prog);
809 fprintf(stderr, "bpf_program__fd failed\n");
813 if (bpf_set_link_xdp_fd(ifindex, prog_fd, xdp_flags) < 0) {
814 fprintf(stderr, "link set xdp fd failed\n");
815 return EXIT_FAIL_XDP;
818 err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
820 printf("can't get prog info - %s\n", strerror(errno));
825 stats_poll(interval, use_separators, prog_name, stress_mode);