1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat, Inc.
4 static const char *__doc__ =
5 " XDP redirect with a CPU-map type \"BPF_MAP_TYPE_CPUMAP\"";
15 #include <sys/resource.h>
16 #include <sys/sysinfo.h>
20 #include <linux/limits.h>
22 #include <arpa/inet.h>
23 #include <linux/if_link.h>
25 /* How many xdp_progs are defined in _kern.c */
29 #include <bpf/libbpf.h>
33 static int ifindex = -1;
34 static char ifname_buf[IF_NAMESIZE];
38 static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
53 static const char *const map_type_strings[] = {
54 [CPU_MAP] = "cpu_map",
56 [REDIRECT_ERR_CNT] = "redirect_err_cnt",
57 [CPUMAP_ENQUEUE_CNT] = "cpumap_enqueue_cnt",
58 [CPUMAP_KTHREAD_CNT] = "cpumap_kthread_cnt",
59 [CPUS_AVAILABLE] = "cpus_available",
60 [CPUS_COUNT] = "cpus_count",
61 [CPUS_ITERATOR] = "cpus_iterator",
62 [EXCEPTION_CNT] = "exception_cnt",
67 struct bpf_link *tp_links[NUM_TP] = {};
68 static int map_fds[NUM_MAP];
69 static int tp_cnt = 0;
71 /* Exit return codes */
74 #define EXIT_FAIL_OPTION 2
75 #define EXIT_FAIL_XDP 3
76 #define EXIT_FAIL_BPF 4
77 #define EXIT_FAIL_MEM 5
79 static const struct option long_options[] = {
80 {"help", no_argument, NULL, 'h' },
81 {"dev", required_argument, NULL, 'd' },
82 {"skb-mode", no_argument, NULL, 'S' },
83 {"sec", required_argument, NULL, 's' },
84 {"progname", required_argument, NULL, 'p' },
85 {"qsize", required_argument, NULL, 'q' },
86 {"cpu", required_argument, NULL, 'c' },
87 {"stress-mode", no_argument, NULL, 'x' },
88 {"no-separators", no_argument, NULL, 'z' },
89 {"force", no_argument, NULL, 'F' },
90 {"mprog-disable", no_argument, NULL, 'n' },
91 {"mprog-name", required_argument, NULL, 'e' },
92 {"mprog-filename", required_argument, NULL, 'f' },
93 {"redirect-device", required_argument, NULL, 'r' },
94 {"redirect-map", required_argument, NULL, 'm' },
98 static void int_exit(int sig)
100 __u32 curr_prog_id = 0;
103 if (bpf_get_link_xdp_id(ifindex, &curr_prog_id, xdp_flags)) {
104 printf("bpf_get_link_xdp_id failed\n");
107 if (prog_id == curr_prog_id) {
109 "Interrupted: Removing XDP program on ifindex:%d device:%s\n",
111 bpf_set_link_xdp_fd(ifindex, -1, xdp_flags);
112 } else if (!curr_prog_id) {
113 printf("couldn't find a prog id on a given iface\n");
115 printf("program on interface changed, not removing\n");
118 /* Detach tracepoints */
120 bpf_link__destroy(tp_links[--tp_cnt]);
125 static void print_avail_progs(struct bpf_object *obj)
127 struct bpf_program *pos;
129 bpf_object__for_each_program(pos, obj) {
130 if (bpf_program__is_xdp(pos))
131 printf(" %s\n", bpf_program__section_name(pos));
135 static void usage(char *argv[], struct bpf_object *obj)
139 printf("\nDOCUMENTATION:\n%s\n", __doc__);
141 printf(" Usage: %s (options-see-below)\n", argv[0]);
142 printf(" Listing options:\n");
143 for (i = 0; long_options[i].name != 0; i++) {
144 printf(" --%-12s", long_options[i].name);
145 if (long_options[i].flag != NULL)
146 printf(" flag (internal value:%d)",
147 *long_options[i].flag);
149 printf(" short-option: -%c",
150 long_options[i].val);
153 printf("\n Programs to be used for --progname:\n");
154 print_avail_progs(obj);
158 /* gettime returns the current time of day in nanoseconds.
159 * Cost: clock_gettime (ns) => 26ns (CLOCK_MONOTONIC)
160 * clock_gettime (ns) => 9ns (CLOCK_MONOTONIC_COARSE)
162 #define NANOSEC_PER_SEC 1000000000 /* 10^9 */
163 static __u64 gettime(void)
168 res = clock_gettime(CLOCK_MONOTONIC, &t);
170 fprintf(stderr, "Error with gettimeofday! (%i)\n", res);
173 return (__u64) t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec;
176 /* Common stats data record shared with _kern.c */
187 struct datarec total;
190 struct stats_record {
191 struct record rx_cnt;
192 struct record redir_err;
193 struct record kthread;
194 struct record exception;
198 static bool map_collect_percpu(int fd, __u32 key, struct record *rec)
200 /* For percpu maps, userspace gets a value per possible CPU */
201 unsigned int nr_cpus = bpf_num_possible_cpus();
202 struct datarec values[nr_cpus];
203 __u64 sum_xdp_redirect = 0;
204 __u64 sum_xdp_pass = 0;
205 __u64 sum_xdp_drop = 0;
206 __u64 sum_processed = 0;
207 __u64 sum_dropped = 0;
211 if ((bpf_map_lookup_elem(fd, &key, values)) != 0) {
213 "ERR: bpf_map_lookup_elem failed key:0x%X\n", key);
216 /* Get time as close as possible to reading map contents */
217 rec->timestamp = gettime();
219 /* Record and sum values from each CPU */
220 for (i = 0; i < nr_cpus; i++) {
221 rec->cpu[i].processed = values[i].processed;
222 sum_processed += values[i].processed;
223 rec->cpu[i].dropped = values[i].dropped;
224 sum_dropped += values[i].dropped;
225 rec->cpu[i].issue = values[i].issue;
226 sum_issue += values[i].issue;
227 rec->cpu[i].xdp_pass = values[i].xdp_pass;
228 sum_xdp_pass += values[i].xdp_pass;
229 rec->cpu[i].xdp_drop = values[i].xdp_drop;
230 sum_xdp_drop += values[i].xdp_drop;
231 rec->cpu[i].xdp_redirect = values[i].xdp_redirect;
232 sum_xdp_redirect += values[i].xdp_redirect;
234 rec->total.processed = sum_processed;
235 rec->total.dropped = sum_dropped;
236 rec->total.issue = sum_issue;
237 rec->total.xdp_pass = sum_xdp_pass;
238 rec->total.xdp_drop = sum_xdp_drop;
239 rec->total.xdp_redirect = sum_xdp_redirect;
243 static struct datarec *alloc_record_per_cpu(void)
245 unsigned int nr_cpus = bpf_num_possible_cpus();
246 struct datarec *array;
248 array = calloc(nr_cpus, sizeof(struct datarec));
250 fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
256 static struct stats_record *alloc_stats_record(void)
258 struct stats_record *rec;
261 size = sizeof(*rec) + n_cpus * sizeof(struct record);
264 fprintf(stderr, "Mem alloc error\n");
267 memset(rec, 0, size);
268 rec->rx_cnt.cpu = alloc_record_per_cpu();
269 rec->redir_err.cpu = alloc_record_per_cpu();
270 rec->kthread.cpu = alloc_record_per_cpu();
271 rec->exception.cpu = alloc_record_per_cpu();
272 for (i = 0; i < n_cpus; i++)
273 rec->enq[i].cpu = alloc_record_per_cpu();
278 static void free_stats_record(struct stats_record *r)
282 for (i = 0; i < n_cpus; i++)
284 free(r->exception.cpu);
285 free(r->kthread.cpu);
286 free(r->redir_err.cpu);
291 static double calc_period(struct record *r, struct record *p)
296 period = r->timestamp - p->timestamp;
298 period_ = ((double) period / NANOSEC_PER_SEC);
303 static __u64 calc_pps(struct datarec *r, struct datarec *p, double period_)
309 packets = r->processed - p->processed;
310 pps = packets / period_;
315 static __u64 calc_drop_pps(struct datarec *r, struct datarec *p, double period_)
321 packets = r->dropped - p->dropped;
322 pps = packets / period_;
327 static __u64 calc_errs_pps(struct datarec *r,
328 struct datarec *p, double period_)
334 packets = r->issue - p->issue;
335 pps = packets / period_;
340 static void calc_xdp_pps(struct datarec *r, struct datarec *p,
341 double *xdp_pass, double *xdp_drop,
342 double *xdp_redirect, double period_)
344 *xdp_pass = 0, *xdp_drop = 0, *xdp_redirect = 0;
346 *xdp_redirect = (r->xdp_redirect - p->xdp_redirect) / period_;
347 *xdp_pass = (r->xdp_pass - p->xdp_pass) / period_;
348 *xdp_drop = (r->xdp_drop - p->xdp_drop) / period_;
352 static void stats_print(struct stats_record *stats_rec,
353 struct stats_record *stats_prev,
354 char *prog_name, char *mprog_name, int mprog_fd)
356 unsigned int nr_cpus = bpf_num_possible_cpus();
357 double pps = 0, drop = 0, err = 0;
358 bool mprog_enabled = false;
359 struct record *rec, *prev;
365 mprog_enabled = true;
368 printf("Running XDP/eBPF prog_name:%s\n", prog_name);
369 printf("%-15s %-7s %-14s %-11s %-9s\n",
370 "XDP-cpumap", "CPU:to", "pps", "drop-pps", "extra-info");
374 char *fmt_rx = "%-15s %-7d %'-14.0f %'-11.0f %'-10.0f %s\n";
375 char *fm2_rx = "%-15s %-7s %'-14.0f %'-11.0f\n";
378 rec = &stats_rec->rx_cnt;
379 prev = &stats_prev->rx_cnt;
380 t = calc_period(rec, prev);
381 for (i = 0; i < nr_cpus; i++) {
382 struct datarec *r = &rec->cpu[i];
383 struct datarec *p = &prev->cpu[i];
385 pps = calc_pps(r, p, t);
386 drop = calc_drop_pps(r, p, t);
387 err = calc_errs_pps(r, p, t);
389 errstr = "cpu-dest/err";
391 printf(fmt_rx, "XDP-RX",
392 i, pps, drop, err, errstr);
394 pps = calc_pps(&rec->total, &prev->total, t);
395 drop = calc_drop_pps(&rec->total, &prev->total, t);
396 err = calc_errs_pps(&rec->total, &prev->total, t);
397 printf(fm2_rx, "XDP-RX", "total", pps, drop);
400 /* cpumap enqueue stats */
401 for (to_cpu = 0; to_cpu < n_cpus; to_cpu++) {
402 char *fmt = "%-15s %3d:%-3d %'-14.0f %'-11.0f %'-10.2f %s\n";
403 char *fm2 = "%-15s %3s:%-3d %'-14.0f %'-11.0f %'-10.2f %s\n";
406 rec = &stats_rec->enq[to_cpu];
407 prev = &stats_prev->enq[to_cpu];
408 t = calc_period(rec, prev);
409 for (i = 0; i < nr_cpus; i++) {
410 struct datarec *r = &rec->cpu[i];
411 struct datarec *p = &prev->cpu[i];
413 pps = calc_pps(r, p, t);
414 drop = calc_drop_pps(r, p, t);
415 err = calc_errs_pps(r, p, t);
417 errstr = "bulk-average";
418 err = pps / err; /* calc average bulk size */
421 printf(fmt, "cpumap-enqueue",
422 i, to_cpu, pps, drop, err, errstr);
424 pps = calc_pps(&rec->total, &prev->total, t);
426 drop = calc_drop_pps(&rec->total, &prev->total, t);
427 err = calc_errs_pps(&rec->total, &prev->total, t);
429 errstr = "bulk-average";
430 err = pps / err; /* calc average bulk size */
432 printf(fm2, "cpumap-enqueue",
433 "sum", to_cpu, pps, drop, err, errstr);
437 /* cpumap kthread stats */
439 char *fmt_k = "%-15s %-7d %'-14.0f %'-11.0f %'-10.0f %s\n";
440 char *fm2_k = "%-15s %-7s %'-14.0f %'-11.0f %'-10.0f %s\n";
443 rec = &stats_rec->kthread;
444 prev = &stats_prev->kthread;
445 t = calc_period(rec, prev);
446 for (i = 0; i < nr_cpus; i++) {
447 struct datarec *r = &rec->cpu[i];
448 struct datarec *p = &prev->cpu[i];
450 pps = calc_pps(r, p, t);
451 drop = calc_drop_pps(r, p, t);
452 err = calc_errs_pps(r, p, t);
456 printf(fmt_k, "cpumap_kthread",
457 i, pps, drop, err, e_str);
459 pps = calc_pps(&rec->total, &prev->total, t);
460 drop = calc_drop_pps(&rec->total, &prev->total, t);
461 err = calc_errs_pps(&rec->total, &prev->total, t);
464 printf(fm2_k, "cpumap_kthread", "total", pps, drop, err, e_str);
467 /* XDP redirect err tracepoints (very unlikely) */
469 char *fmt_err = "%-15s %-7d %'-14.0f %'-11.0f\n";
470 char *fm2_err = "%-15s %-7s %'-14.0f %'-11.0f\n";
472 rec = &stats_rec->redir_err;
473 prev = &stats_prev->redir_err;
474 t = calc_period(rec, prev);
475 for (i = 0; i < nr_cpus; i++) {
476 struct datarec *r = &rec->cpu[i];
477 struct datarec *p = &prev->cpu[i];
479 pps = calc_pps(r, p, t);
480 drop = calc_drop_pps(r, p, t);
482 printf(fmt_err, "redirect_err", i, pps, drop);
484 pps = calc_pps(&rec->total, &prev->total, t);
485 drop = calc_drop_pps(&rec->total, &prev->total, t);
486 printf(fm2_err, "redirect_err", "total", pps, drop);
489 /* XDP general exception tracepoints */
491 char *fmt_err = "%-15s %-7d %'-14.0f %'-11.0f\n";
492 char *fm2_err = "%-15s %-7s %'-14.0f %'-11.0f\n";
494 rec = &stats_rec->exception;
495 prev = &stats_prev->exception;
496 t = calc_period(rec, prev);
497 for (i = 0; i < nr_cpus; i++) {
498 struct datarec *r = &rec->cpu[i];
499 struct datarec *p = &prev->cpu[i];
501 pps = calc_pps(r, p, t);
502 drop = calc_drop_pps(r, p, t);
504 printf(fmt_err, "xdp_exception", i, pps, drop);
506 pps = calc_pps(&rec->total, &prev->total, t);
507 drop = calc_drop_pps(&rec->total, &prev->total, t);
508 printf(fm2_err, "xdp_exception", "total", pps, drop);
511 /* CPUMAP attached XDP program that runs on remote/destination CPU */
513 char *fmt_k = "%-15s %-7d %'-14.0f %'-11.0f %'-10.0f\n";
514 char *fm2_k = "%-15s %-7s %'-14.0f %'-11.0f %'-10.0f\n";
515 double xdp_pass, xdp_drop, xdp_redirect;
517 printf("\n2nd remote XDP/eBPF prog_name: %s\n", mprog_name);
518 printf("%-15s %-7s %-14s %-11s %-9s\n",
519 "XDP-cpumap", "CPU:to", "xdp-pass", "xdp-drop", "xdp-redir");
521 rec = &stats_rec->kthread;
522 prev = &stats_prev->kthread;
523 t = calc_period(rec, prev);
524 for (i = 0; i < nr_cpus; i++) {
525 struct datarec *r = &rec->cpu[i];
526 struct datarec *p = &prev->cpu[i];
528 calc_xdp_pps(r, p, &xdp_pass, &xdp_drop,
530 if (xdp_pass > 0 || xdp_drop > 0 || xdp_redirect > 0)
531 printf(fmt_k, "xdp-in-kthread", i, xdp_pass, xdp_drop,
534 calc_xdp_pps(&rec->total, &prev->total, &xdp_pass, &xdp_drop,
536 printf(fm2_k, "xdp-in-kthread", "total", xdp_pass, xdp_drop, xdp_redirect);
543 static void stats_collect(struct stats_record *rec)
547 fd = map_fds[RX_CNT];
548 map_collect_percpu(fd, 0, &rec->rx_cnt);
550 fd = map_fds[REDIRECT_ERR_CNT];
551 map_collect_percpu(fd, 1, &rec->redir_err);
553 fd = map_fds[CPUMAP_ENQUEUE_CNT];
554 for (i = 0; i < n_cpus; i++)
555 map_collect_percpu(fd, i, &rec->enq[i]);
557 fd = map_fds[CPUMAP_KTHREAD_CNT];
558 map_collect_percpu(fd, 0, &rec->kthread);
560 fd = map_fds[EXCEPTION_CNT];
561 map_collect_percpu(fd, 0, &rec->exception);
565 /* Pointer swap trick */
566 static inline void swap(struct stats_record **a, struct stats_record **b)
568 struct stats_record *tmp;
575 static int create_cpu_entry(__u32 cpu, struct bpf_cpumap_val *value,
576 __u32 avail_idx, bool new)
578 __u32 curr_cpus_count = 0;
582 /* Add a CPU entry to cpumap, as this allocate a cpu entry in
583 * the kernel for the cpu.
585 ret = bpf_map_update_elem(map_fds[CPU_MAP], &cpu, value, 0);
587 fprintf(stderr, "Create CPU entry failed (err:%d)\n", ret);
591 /* Inform bpf_prog's that a new CPU is available to select
592 * from via some control maps.
594 ret = bpf_map_update_elem(map_fds[CPUS_AVAILABLE], &avail_idx, &cpu, 0);
596 fprintf(stderr, "Add to avail CPUs failed\n");
600 /* When not replacing/updating existing entry, bump the count */
601 ret = bpf_map_lookup_elem(map_fds[CPUS_COUNT], &key, &curr_cpus_count);
603 fprintf(stderr, "Failed reading curr cpus_count\n");
608 ret = bpf_map_update_elem(map_fds[CPUS_COUNT], &key,
609 &curr_cpus_count, 0);
611 fprintf(stderr, "Failed write curr cpus_count\n");
615 /* map_fd[7] = cpus_iterator */
616 printf("%s CPU:%u as idx:%u qsize:%d prog_fd: %d (cpus_count:%u)\n",
617 new ? "Add-new":"Replace", cpu, avail_idx,
618 value->qsize, value->bpf_prog.fd, curr_cpus_count);
623 /* CPUs are zero-indexed. Thus, add a special sentinel default value
624 * in map cpus_available to mark CPU index'es not configured
626 static void mark_cpus_unavailable(void)
628 __u32 invalid_cpu = n_cpus;
631 for (i = 0; i < n_cpus; i++) {
632 ret = bpf_map_update_elem(map_fds[CPUS_AVAILABLE], &i,
635 fprintf(stderr, "Failed marking CPU unavailable\n");
641 /* Stress cpumap management code by concurrently changing underlying cpumap */
642 static void stress_cpumap(struct bpf_cpumap_val *value)
644 /* Changing qsize will cause kernel to free and alloc a new
645 * bpf_cpu_map_entry, with an associated/complicated tear-down
649 create_cpu_entry(1, value, 0, false);
651 create_cpu_entry(1, value, 0, false);
652 value->qsize = 16000;
653 create_cpu_entry(1, value, 0, false);
656 static void stats_poll(int interval, bool use_separators, char *prog_name,
657 char *mprog_name, struct bpf_cpumap_val *value,
660 struct stats_record *record, *prev;
663 record = alloc_stats_record();
664 prev = alloc_stats_record();
665 stats_collect(record);
667 /* Trick to pretty printf with thousands separators use %' */
669 setlocale(LC_NUMERIC, "en_US");
672 swap(&prev, &record);
673 mprog_fd = value->bpf_prog.fd;
674 stats_collect(record);
675 stats_print(record, prev, prog_name, mprog_name, mprog_fd);
678 stress_cpumap(value);
681 free_stats_record(record);
682 free_stats_record(prev);
685 static int init_tracepoints(struct bpf_object *obj)
687 struct bpf_program *prog;
689 bpf_object__for_each_program(prog, obj) {
690 if (bpf_program__is_tracepoint(prog) != true)
693 tp_links[tp_cnt] = bpf_program__attach(prog);
694 if (libbpf_get_error(tp_links[tp_cnt])) {
695 tp_links[tp_cnt] = NULL;
704 static int init_map_fds(struct bpf_object *obj)
708 for (type = 0; type < NUM_MAP; type++) {
710 bpf_object__find_map_fd_by_name(obj,
711 map_type_strings[type]);
713 if (map_fds[type] < 0)
720 static int load_cpumap_prog(char *file_name, char *prog_name,
721 char *redir_interface, char *redir_map)
723 struct bpf_prog_load_attr prog_load_attr = {
724 .prog_type = BPF_PROG_TYPE_XDP,
725 .expected_attach_type = BPF_XDP_CPUMAP,
728 struct bpf_program *prog;
729 struct bpf_object *obj;
732 if (bpf_prog_load_xattr(&prog_load_attr, &obj, &fd))
736 fprintf(stderr, "ERR: bpf_prog_load_xattr: %s\n",
741 if (redir_interface && redir_map) {
742 int err, map_fd, ifindex_out, key = 0;
744 map_fd = bpf_object__find_map_fd_by_name(obj, redir_map);
748 ifindex_out = if_nametoindex(redir_interface);
752 err = bpf_map_update_elem(map_fd, &key, &ifindex_out, 0);
757 prog = bpf_object__find_program_by_title(obj, prog_name);
759 fprintf(stderr, "bpf_object__find_program_by_title failed\n");
763 return bpf_program__fd(prog);
766 int main(int argc, char **argv)
768 char *prog_name = "xdp_cpu_map5_lb_hash_ip_pairs";
769 char *mprog_filename = "xdp_redirect_kern.o";
770 char *redir_interface = NULL, *redir_map = NULL;
771 char *mprog_name = "xdp_redirect_dummy";
772 bool mprog_disable = false;
773 struct bpf_prog_load_attr prog_load_attr = {
774 .prog_type = BPF_PROG_TYPE_UNSPEC,
776 struct bpf_prog_info info = {};
777 __u32 info_len = sizeof(info);
778 struct bpf_cpumap_val value;
779 bool use_separators = true;
780 bool stress_mode = false;
781 struct bpf_program *prog;
782 struct bpf_object *obj;
793 n_cpus = get_nprocs_conf();
795 /* Notice: choosing he queue size is very important with the
796 * ixgbe driver, because it's driver page recycling trick is
797 * dependend on pages being returned quickly. The number of
798 * out-standing packets in the system must be less-than 2x
803 snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
804 prog_load_attr.file = filename;
806 if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
810 fprintf(stderr, "ERR: bpf_prog_load_xattr: %s\n",
815 if (init_tracepoints(obj) < 0) {
816 fprintf(stderr, "ERR: bpf_program__attach failed\n");
820 if (init_map_fds(obj) < 0) {
821 fprintf(stderr, "bpf_object__find_map_fd_by_name failed\n");
824 mark_cpus_unavailable();
826 cpu = malloc(n_cpus * sizeof(int));
828 fprintf(stderr, "failed to allocate cpu array\n");
831 memset(cpu, 0, n_cpus * sizeof(int));
833 /* Parse commands line args */
834 while ((opt = getopt_long(argc, argv, "hSd:s:p:q:c:xzFf:e:r:m:",
835 long_options, &longindex)) != -1) {
838 if (strlen(optarg) >= IF_NAMESIZE) {
839 fprintf(stderr, "ERR: --dev name too long\n");
842 ifname = (char *)&ifname_buf;
843 strncpy(ifname, optarg, IF_NAMESIZE);
844 ifindex = if_nametoindex(ifname);
847 "ERR: --dev name unknown err(%d):%s\n",
848 errno, strerror(errno));
853 interval = atoi(optarg);
856 xdp_flags |= XDP_FLAGS_SKB_MODE;
862 use_separators = false;
865 /* Selecting eBPF prog to load */
869 mprog_disable = true;
872 mprog_filename = optarg;
878 redir_interface = optarg;
884 /* Add multiple CPUs */
885 add_cpu = strtoul(optarg, NULL, 0);
886 if (add_cpu >= n_cpus) {
888 "--cpu nr too large for cpumap err(%d):%s\n",
889 errno, strerror(errno));
892 cpu[added_cpus++] = add_cpu;
895 qsize = atoi(optarg);
898 xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
905 return EXIT_FAIL_OPTION;
909 if (!(xdp_flags & XDP_FLAGS_SKB_MODE))
910 xdp_flags |= XDP_FLAGS_DRV_MODE;
912 /* Required option */
914 fprintf(stderr, "ERR: required option --dev missing\n");
916 err = EXIT_FAIL_OPTION;
919 /* Required option */
921 fprintf(stderr, "ERR: required option --cpu missing\n");
922 fprintf(stderr, " Specify multiple --cpu option to add more\n");
924 err = EXIT_FAIL_OPTION;
928 value.bpf_prog.fd = 0;
930 value.bpf_prog.fd = load_cpumap_prog(mprog_filename, mprog_name,
931 redir_interface, redir_map);
932 if (value.bpf_prog.fd < 0) {
933 err = value.bpf_prog.fd;
938 for (i = 0; i < added_cpus; i++)
939 create_cpu_entry(cpu[i], &value, i, true);
941 /* Remove XDP program when program is interrupted or killed */
942 signal(SIGINT, int_exit);
943 signal(SIGTERM, int_exit);
945 prog = bpf_object__find_program_by_title(obj, prog_name);
947 fprintf(stderr, "bpf_object__find_program_by_title failed\n");
951 prog_fd = bpf_program__fd(prog);
953 fprintf(stderr, "bpf_program__fd failed\n");
957 if (bpf_set_link_xdp_fd(ifindex, prog_fd, xdp_flags) < 0) {
958 fprintf(stderr, "link set xdp fd failed\n");
963 err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
965 printf("can't get prog info - %s\n", strerror(errno));
970 stats_poll(interval, use_separators, prog_name, mprog_name,
971 &value, stress_mode);