1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2018 Intel Corporation. */
8 #include <linux/if_link.h>
9 #include <linux/if_xdp.h>
10 #include <linux/if_ether.h>
12 #include <linux/limits.h>
13 #include <linux/udp.h>
14 #include <arpa/inet.h>
16 #include <net/ethernet.h>
17 #include <netinet/ether.h>
26 #include <sys/capability.h>
28 #include <sys/resource.h>
29 #include <sys/socket.h>
30 #include <sys/types.h>
36 #include <bpf/libbpf.h>
41 /* libbpf APIs for AF_XDP are deprecated starting from v0.7 */
42 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
56 #define NUM_FRAMES (4 * 1024)
57 #define MIN_PKT_SIZE 64
59 #define DEBUG_HEXDUMP 0
61 #define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */
62 #define VLAN_PRIO_SHIFT 13
63 #define VLAN_VID_MASK 0x0fff /* VLAN Identifier */
64 #define VLAN_VID__DEFAULT 1
65 #define VLAN_PRI__DEFAULT 0
67 #define NSEC_PER_SEC 1000000000UL
68 #define NSEC_PER_USEC 1000
70 #define SCHED_PRI__DEFAULT 0
77 static unsigned long prev_time;
78 static long tx_cycle_diff_min;
79 static long tx_cycle_diff_max;
80 static double tx_cycle_diff_ave;
81 static long tx_cycle_cnt;
89 static enum benchmark_type opt_bench = BENCH_RXDROP;
90 static u32 opt_xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
91 static const char *opt_if = "";
92 static int opt_ifindex;
94 static unsigned long opt_duration;
95 static unsigned long start_time;
96 static bool benchmark_done;
97 static u32 opt_batch_size = 64;
98 static int opt_pkt_count;
99 static u16 opt_pkt_size = MIN_PKT_SIZE;
100 static u32 opt_pkt_fill_pattern = 0x12345678;
101 static bool opt_vlan_tag;
102 static u16 opt_pkt_vlan_id = VLAN_VID__DEFAULT;
103 static u16 opt_pkt_vlan_pri = VLAN_PRI__DEFAULT;
104 static struct ether_addr opt_txdmac = {{ 0x3c, 0xfd, 0xfe,
106 static struct ether_addr opt_txsmac = {{ 0xec, 0xb1, 0xd7,
108 static bool opt_extra_stats;
109 static bool opt_quiet;
110 static bool opt_app_stats;
111 static const char *opt_irq_str = "";
113 static int irqs_at_init = -1;
116 static int opt_interval = 1;
117 static int opt_retries = 3;
118 static u32 opt_xdp_bind_flags = XDP_USE_NEED_WAKEUP;
119 static u32 opt_umem_flags;
120 static int opt_unaligned_chunks;
121 static int opt_mmap_flags;
122 static int opt_xsk_frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
123 static int opt_timeout = 1000;
124 static bool opt_need_wakeup = true;
125 static u32 opt_num_xsks = 1;
127 static bool opt_busy_poll;
128 static bool opt_reduced_cap;
129 static clockid_t opt_clock = CLOCK_MONOTONIC;
130 static unsigned long opt_tx_cycle_ns;
131 static int opt_schpolicy = SCHED_OTHER;
132 static int opt_schprio = SCHED_PRI__DEFAULT;
133 static bool opt_tstamp;
136 unsigned char h_dest[6];
137 unsigned char h_source[6];
140 __be16 h_vlan_encapsulated_proto;
143 #define PKTGEN_MAGIC 0xbe9be955
151 struct xsk_ring_stats {
152 unsigned long rx_npkts;
153 unsigned long tx_npkts;
154 unsigned long rx_dropped_npkts;
155 unsigned long rx_invalid_npkts;
156 unsigned long tx_invalid_npkts;
157 unsigned long rx_full_npkts;
158 unsigned long rx_fill_empty_npkts;
159 unsigned long tx_empty_npkts;
160 unsigned long prev_rx_npkts;
161 unsigned long prev_tx_npkts;
162 unsigned long prev_rx_dropped_npkts;
163 unsigned long prev_rx_invalid_npkts;
164 unsigned long prev_tx_invalid_npkts;
165 unsigned long prev_rx_full_npkts;
166 unsigned long prev_rx_fill_empty_npkts;
167 unsigned long prev_tx_empty_npkts;
170 struct xsk_driver_stats {
172 unsigned long prev_intrs;
175 struct xsk_app_stats {
176 unsigned long rx_empty_polls;
177 unsigned long fill_fail_polls;
178 unsigned long copy_tx_sendtos;
179 unsigned long tx_wakeup_sendtos;
180 unsigned long opt_polls;
181 unsigned long prev_rx_empty_polls;
182 unsigned long prev_fill_fail_polls;
183 unsigned long prev_copy_tx_sendtos;
184 unsigned long prev_tx_wakeup_sendtos;
185 unsigned long prev_opt_polls;
188 struct xsk_umem_info {
189 struct xsk_ring_prod fq;
190 struct xsk_ring_cons cq;
191 struct xsk_umem *umem;
195 struct xsk_socket_info {
196 struct xsk_ring_cons rx;
197 struct xsk_ring_prod tx;
198 struct xsk_umem_info *umem;
199 struct xsk_socket *xsk;
200 struct xsk_ring_stats ring_stats;
201 struct xsk_app_stats app_stats;
202 struct xsk_driver_stats drv_stats;
206 static const struct clockid_map {
210 { "REALTIME", CLOCK_REALTIME },
211 { "TAI", CLOCK_TAI },
212 { "BOOTTIME", CLOCK_BOOTTIME },
213 { "MONOTONIC", CLOCK_MONOTONIC },
217 static const struct sched_map {
221 { "OTHER", SCHED_OTHER },
222 { "FIFO", SCHED_FIFO },
226 static int num_socks;
227 struct xsk_socket_info *xsks[MAX_SOCKS];
230 static int get_clockid(clockid_t *id, const char *name)
232 const struct clockid_map *clk;
234 for (clk = clockids_map; clk->name; clk++) {
235 if (strcasecmp(clk->name, name) == 0) {
244 static int get_schpolicy(int *policy, const char *name)
246 const struct sched_map *sch;
248 for (sch = schmap; sch->name; sch++) {
249 if (strcasecmp(sch->name, name) == 0) {
250 *policy = sch->policy;
258 static unsigned long get_nsecs(void)
262 clock_gettime(opt_clock, &ts);
263 return ts.tv_sec * 1000000000UL + ts.tv_nsec;
266 static void print_benchmark(bool running)
268 const char *bench_str = "INVALID";
270 if (opt_bench == BENCH_RXDROP)
271 bench_str = "rxdrop";
272 else if (opt_bench == BENCH_TXONLY)
273 bench_str = "txonly";
274 else if (opt_bench == BENCH_L2FWD)
277 printf("%s:%d %s ", opt_if, opt_queue, bench_str);
278 if (opt_xdp_flags & XDP_FLAGS_SKB_MODE)
280 else if (opt_xdp_flags & XDP_FLAGS_DRV_MODE)
289 printf("running...");
294 static int xsk_get_xdp_stats(int fd, struct xsk_socket_info *xsk)
296 struct xdp_statistics stats;
300 optlen = sizeof(stats);
301 err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen);
305 if (optlen == sizeof(struct xdp_statistics)) {
306 xsk->ring_stats.rx_dropped_npkts = stats.rx_dropped;
307 xsk->ring_stats.rx_invalid_npkts = stats.rx_invalid_descs;
308 xsk->ring_stats.tx_invalid_npkts = stats.tx_invalid_descs;
309 xsk->ring_stats.rx_full_npkts = stats.rx_ring_full;
310 xsk->ring_stats.rx_fill_empty_npkts = stats.rx_fill_ring_empty_descs;
311 xsk->ring_stats.tx_empty_npkts = stats.tx_ring_empty_descs;
318 static void dump_app_stats(long dt)
322 for (i = 0; i < num_socks && xsks[i]; i++) {
323 char *fmt = "%-18s %'-14.0f %'-14lu\n";
324 double rx_empty_polls_ps, fill_fail_polls_ps, copy_tx_sendtos_ps,
325 tx_wakeup_sendtos_ps, opt_polls_ps;
327 rx_empty_polls_ps = (xsks[i]->app_stats.rx_empty_polls -
328 xsks[i]->app_stats.prev_rx_empty_polls) * 1000000000. / dt;
329 fill_fail_polls_ps = (xsks[i]->app_stats.fill_fail_polls -
330 xsks[i]->app_stats.prev_fill_fail_polls) * 1000000000. / dt;
331 copy_tx_sendtos_ps = (xsks[i]->app_stats.copy_tx_sendtos -
332 xsks[i]->app_stats.prev_copy_tx_sendtos) * 1000000000. / dt;
333 tx_wakeup_sendtos_ps = (xsks[i]->app_stats.tx_wakeup_sendtos -
334 xsks[i]->app_stats.prev_tx_wakeup_sendtos)
336 opt_polls_ps = (xsks[i]->app_stats.opt_polls -
337 xsks[i]->app_stats.prev_opt_polls) * 1000000000. / dt;
339 printf("\n%-18s %-14s %-14s\n", "", "calls/s", "count");
340 printf(fmt, "rx empty polls", rx_empty_polls_ps, xsks[i]->app_stats.rx_empty_polls);
341 printf(fmt, "fill fail polls", fill_fail_polls_ps,
342 xsks[i]->app_stats.fill_fail_polls);
343 printf(fmt, "copy tx sendtos", copy_tx_sendtos_ps,
344 xsks[i]->app_stats.copy_tx_sendtos);
345 printf(fmt, "tx wakeup sendtos", tx_wakeup_sendtos_ps,
346 xsks[i]->app_stats.tx_wakeup_sendtos);
347 printf(fmt, "opt polls", opt_polls_ps, xsks[i]->app_stats.opt_polls);
349 xsks[i]->app_stats.prev_rx_empty_polls = xsks[i]->app_stats.rx_empty_polls;
350 xsks[i]->app_stats.prev_fill_fail_polls = xsks[i]->app_stats.fill_fail_polls;
351 xsks[i]->app_stats.prev_copy_tx_sendtos = xsks[i]->app_stats.copy_tx_sendtos;
352 xsks[i]->app_stats.prev_tx_wakeup_sendtos = xsks[i]->app_stats.tx_wakeup_sendtos;
353 xsks[i]->app_stats.prev_opt_polls = xsks[i]->app_stats.opt_polls;
356 if (opt_tx_cycle_ns) {
357 printf("\n%-18s %-10s %-10s %-10s %-10s %-10s\n",
358 "", "period", "min", "ave", "max", "cycle");
359 printf("%-18s %-10lu %-10lu %-10lu %-10lu %-10lu\n",
360 "Cyclic TX", opt_tx_cycle_ns, tx_cycle_diff_min,
361 (long)(tx_cycle_diff_ave / tx_cycle_cnt),
362 tx_cycle_diff_max, tx_cycle_cnt);
366 static bool get_interrupt_number(void)
372 f_int_proc = fopen("/proc/interrupts", "r");
373 if (f_int_proc == NULL) {
374 printf("Failed to open /proc/interrupts.\n");
378 while (!feof(f_int_proc) && !found) {
379 /* Make sure to read a full line at a time */
380 if (fgets(line, sizeof(line), f_int_proc) == NULL ||
381 line[strlen(line) - 1] != '\n') {
382 printf("Error reading from interrupts file\n");
386 /* Extract interrupt number from line */
387 if (strstr(line, opt_irq_str) != NULL) {
399 static int get_irqs(void)
401 char count_path[PATH_MAX];
402 int total_intrs = -1;
406 snprintf(count_path, sizeof(count_path),
407 "/sys/kernel/irq/%i/per_cpu_count", irq_no);
408 f_count_proc = fopen(count_path, "r");
409 if (f_count_proc == NULL) {
410 printf("Failed to open %s\n", count_path);
414 if (fgets(line, sizeof(line), f_count_proc) == NULL ||
415 line[strlen(line) - 1] != '\n') {
416 printf("Error reading from %s\n", count_path);
418 static const char com[2] = ",";
422 token = strtok(line, com);
423 while (token != NULL) {
424 /* sum up interrupts across all cores */
425 total_intrs += atoi(token);
426 token = strtok(NULL, com);
430 fclose(f_count_proc);
435 static void dump_driver_stats(long dt)
439 for (i = 0; i < num_socks && xsks[i]; i++) {
440 char *fmt = "%-18s %'-14.0f %'-14lu\n";
442 int n_ints = get_irqs();
445 printf("error getting intr info for intr %i\n", irq_no);
448 xsks[i]->drv_stats.intrs = n_ints - irqs_at_init;
450 intrs_ps = (xsks[i]->drv_stats.intrs - xsks[i]->drv_stats.prev_intrs) *
453 printf("\n%-18s %-14s %-14s\n", "", "intrs/s", "count");
454 printf(fmt, "irqs", intrs_ps, xsks[i]->drv_stats.intrs);
456 xsks[i]->drv_stats.prev_intrs = xsks[i]->drv_stats.intrs;
460 static void dump_stats(void)
462 unsigned long now = get_nsecs();
463 long dt = now - prev_time;
468 for (i = 0; i < num_socks && xsks[i]; i++) {
469 char *fmt = "%-18s %'-14.0f %'-14lu\n";
470 double rx_pps, tx_pps, dropped_pps, rx_invalid_pps, full_pps, fill_empty_pps,
471 tx_invalid_pps, tx_empty_pps;
473 rx_pps = (xsks[i]->ring_stats.rx_npkts - xsks[i]->ring_stats.prev_rx_npkts) *
475 tx_pps = (xsks[i]->ring_stats.tx_npkts - xsks[i]->ring_stats.prev_tx_npkts) *
478 printf("\n sock%d@", i);
479 print_benchmark(false);
482 printf("%-18s %-14s %-14s %-14.2f\n", "", "pps", "pkts",
484 printf(fmt, "rx", rx_pps, xsks[i]->ring_stats.rx_npkts);
485 printf(fmt, "tx", tx_pps, xsks[i]->ring_stats.tx_npkts);
487 xsks[i]->ring_stats.prev_rx_npkts = xsks[i]->ring_stats.rx_npkts;
488 xsks[i]->ring_stats.prev_tx_npkts = xsks[i]->ring_stats.tx_npkts;
490 if (opt_extra_stats) {
491 if (!xsk_get_xdp_stats(xsk_socket__fd(xsks[i]->xsk), xsks[i])) {
492 dropped_pps = (xsks[i]->ring_stats.rx_dropped_npkts -
493 xsks[i]->ring_stats.prev_rx_dropped_npkts) *
495 rx_invalid_pps = (xsks[i]->ring_stats.rx_invalid_npkts -
496 xsks[i]->ring_stats.prev_rx_invalid_npkts) *
498 tx_invalid_pps = (xsks[i]->ring_stats.tx_invalid_npkts -
499 xsks[i]->ring_stats.prev_tx_invalid_npkts) *
501 full_pps = (xsks[i]->ring_stats.rx_full_npkts -
502 xsks[i]->ring_stats.prev_rx_full_npkts) *
504 fill_empty_pps = (xsks[i]->ring_stats.rx_fill_empty_npkts -
505 xsks[i]->ring_stats.prev_rx_fill_empty_npkts) *
507 tx_empty_pps = (xsks[i]->ring_stats.tx_empty_npkts -
508 xsks[i]->ring_stats.prev_tx_empty_npkts) *
511 printf(fmt, "rx dropped", dropped_pps,
512 xsks[i]->ring_stats.rx_dropped_npkts);
513 printf(fmt, "rx invalid", rx_invalid_pps,
514 xsks[i]->ring_stats.rx_invalid_npkts);
515 printf(fmt, "tx invalid", tx_invalid_pps,
516 xsks[i]->ring_stats.tx_invalid_npkts);
517 printf(fmt, "rx queue full", full_pps,
518 xsks[i]->ring_stats.rx_full_npkts);
519 printf(fmt, "fill ring empty", fill_empty_pps,
520 xsks[i]->ring_stats.rx_fill_empty_npkts);
521 printf(fmt, "tx ring empty", tx_empty_pps,
522 xsks[i]->ring_stats.tx_empty_npkts);
524 xsks[i]->ring_stats.prev_rx_dropped_npkts =
525 xsks[i]->ring_stats.rx_dropped_npkts;
526 xsks[i]->ring_stats.prev_rx_invalid_npkts =
527 xsks[i]->ring_stats.rx_invalid_npkts;
528 xsks[i]->ring_stats.prev_tx_invalid_npkts =
529 xsks[i]->ring_stats.tx_invalid_npkts;
530 xsks[i]->ring_stats.prev_rx_full_npkts =
531 xsks[i]->ring_stats.rx_full_npkts;
532 xsks[i]->ring_stats.prev_rx_fill_empty_npkts =
533 xsks[i]->ring_stats.rx_fill_empty_npkts;
534 xsks[i]->ring_stats.prev_tx_empty_npkts =
535 xsks[i]->ring_stats.tx_empty_npkts;
537 printf("%-15s\n", "Error retrieving extra stats");
545 dump_driver_stats(dt);
548 static bool is_benchmark_done(void)
550 if (opt_duration > 0) {
551 unsigned long dt = (get_nsecs() - start_time);
553 if (dt >= opt_duration)
554 benchmark_done = true;
556 return benchmark_done;
559 static void *poller(void *arg)
562 while (!is_benchmark_done()) {
570 static void remove_xdp_program(void)
572 u32 curr_prog_id = 0;
574 if (bpf_xdp_query_id(opt_ifindex, opt_xdp_flags, &curr_prog_id)) {
575 printf("bpf_xdp_query_id failed\n");
579 if (prog_id == curr_prog_id)
580 bpf_xdp_detach(opt_ifindex, opt_xdp_flags, NULL);
581 else if (!curr_prog_id)
582 printf("couldn't find a prog id on a given interface\n");
584 printf("program on interface changed, not removing\n");
587 static void int_exit(int sig)
589 benchmark_done = true;
592 static void __exit_with_error(int error, const char *file, const char *func,
595 fprintf(stderr, "%s:%s:%i: errno: %d/\"%s\"\n", file, func,
596 line, error, strerror(error));
598 if (opt_num_xsks > 1)
599 remove_xdp_program();
603 #define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, __LINE__)
605 static void xdpsock_cleanup(void)
607 struct xsk_umem *umem = xsks[0]->umem->umem;
608 int i, cmd = CLOSE_CONN;
611 for (i = 0; i < num_socks; i++)
612 xsk_socket__delete(xsks[i]->xsk);
613 (void)xsk_umem__delete(umem);
615 if (opt_reduced_cap) {
616 if (write(sock, &cmd, sizeof(int)) < 0)
617 exit_with_error(errno);
620 if (opt_num_xsks > 1)
621 remove_xdp_program();
624 static void swap_mac_addresses(void *data)
626 struct ether_header *eth = (struct ether_header *)data;
627 struct ether_addr *src_addr = (struct ether_addr *)ð->ether_shost;
628 struct ether_addr *dst_addr = (struct ether_addr *)ð->ether_dhost;
629 struct ether_addr tmp;
632 *src_addr = *dst_addr;
636 static void hex_dump(void *pkt, size_t length, u64 addr)
638 const unsigned char *address = (unsigned char *)pkt;
639 const unsigned char *line = address;
640 size_t line_size = 32;
648 sprintf(buf, "addr=%llu", addr);
649 printf("length = %zu\n", length);
650 printf("%s | ", buf);
651 while (length-- > 0) {
652 printf("%02X ", *address++);
653 if (!(++i % line_size) || (length == 0 && i % line_size)) {
655 while (i++ % line_size)
658 printf(" | "); /* right close */
659 while (line < address) {
661 printf("%c", (c < 33 || c == 255) ? 0x2E : c);
665 printf("%s | ", buf);
671 static void *memset32_htonl(void *dest, u32 val, u32 size)
673 u32 *ptr = (u32 *)dest;
678 for (i = 0; i < (size & (~0x3)); i += 4)
681 for (; i < size; i++)
682 ((char *)dest)[i] = ((char *)&val)[i & 3];
688 * This function code has been taken from
689 * Linux kernel lib/checksum.c
691 static inline unsigned short from32to16(unsigned int x)
693 /* add up 16-bit and 16-bit for 16+c bit */
694 x = (x & 0xffff) + (x >> 16);
696 x = (x & 0xffff) + (x >> 16);
701 * This function code has been taken from
702 * Linux kernel lib/checksum.c
704 static unsigned int do_csum(const unsigned char *buff, int len)
706 unsigned int result = 0;
711 odd = 1 & (unsigned long)buff;
713 #ifdef __LITTLE_ENDIAN
714 result += (*buff << 8);
722 if (2 & (unsigned long)buff) {
723 result += *(unsigned short *)buff;
728 const unsigned char *end = buff +
729 ((unsigned int)len & ~3);
730 unsigned int carry = 0;
733 unsigned int w = *(unsigned int *)buff;
738 carry = (w > result);
739 } while (buff < end);
741 result = (result & 0xffff) + (result >> 16);
744 result += *(unsigned short *)buff;
749 #ifdef __LITTLE_ENDIAN
752 result += (*buff << 8);
754 result = from32to16(result);
756 result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
762 * This is a version of ip_compute_csum() optimized for IP headers,
763 * which always checksum on 4 octet boundaries.
764 * This function code has been taken from
765 * Linux kernel lib/checksum.c
767 static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
769 return (__sum16)~do_csum(iph, ihl * 4);
773 * Fold a partial checksum
774 * This function code has been taken from
775 * Linux kernel include/asm-generic/checksum.h
777 static inline __sum16 csum_fold(__wsum csum)
781 sum = (sum & 0xffff) + (sum >> 16);
782 sum = (sum & 0xffff) + (sum >> 16);
783 return (__sum16)~sum;
787 * This function code has been taken from
788 * Linux kernel lib/checksum.c
790 static inline u32 from64to32(u64 x)
792 /* add up 32-bit and 32-bit for 32+c bit */
793 x = (x & 0xffffffff) + (x >> 32);
795 x = (x & 0xffffffff) + (x >> 32);
799 __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
800 __u32 len, __u8 proto, __wsum sum);
803 * This function code has been taken from
804 * Linux kernel lib/checksum.c
806 __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
807 __u32 len, __u8 proto, __wsum sum)
809 unsigned long long s = (u32)sum;
813 #ifdef __BIG_ENDIAN__
816 s += (proto + len) << 8;
818 return (__wsum)from64to32(s);
822 * This function has been taken from
823 * Linux kernel include/asm-generic/checksum.h
825 static inline __sum16
826 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
827 __u8 proto, __wsum sum)
829 return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
832 static inline u16 udp_csum(u32 saddr, u32 daddr, u32 len,
833 u8 proto, u16 *udp_pkt)
838 /* udp hdr and data */
839 for (; cnt < len; cnt += 2)
840 csum += udp_pkt[cnt >> 1];
842 return csum_tcpudp_magic(saddr, daddr, len, proto, csum);
845 #define ETH_FCS_SIZE 4
847 #define ETH_HDR_SIZE (opt_vlan_tag ? sizeof(struct vlan_ethhdr) : \
848 sizeof(struct ethhdr))
849 #define PKTGEN_HDR_SIZE (opt_tstamp ? sizeof(struct pktgen_hdr) : 0)
850 #define PKT_HDR_SIZE (ETH_HDR_SIZE + sizeof(struct iphdr) + \
851 sizeof(struct udphdr) + PKTGEN_HDR_SIZE)
852 #define PKTGEN_HDR_OFFSET (ETH_HDR_SIZE + sizeof(struct iphdr) + \
853 sizeof(struct udphdr))
854 #define PKTGEN_SIZE_MIN (PKTGEN_HDR_OFFSET + sizeof(struct pktgen_hdr) + \
857 #define PKT_SIZE (opt_pkt_size - ETH_FCS_SIZE)
858 #define IP_PKT_SIZE (PKT_SIZE - ETH_HDR_SIZE)
859 #define UDP_PKT_SIZE (IP_PKT_SIZE - sizeof(struct iphdr))
860 #define UDP_PKT_DATA_SIZE (UDP_PKT_SIZE - \
861 (sizeof(struct udphdr) + PKTGEN_HDR_SIZE))
863 static u8 pkt_data[XSK_UMEM__DEFAULT_FRAME_SIZE];
865 static void gen_eth_hdr_data(void)
867 struct pktgen_hdr *pktgen_hdr;
868 struct udphdr *udp_hdr;
869 struct iphdr *ip_hdr;
872 struct vlan_ethhdr *veth_hdr = (struct vlan_ethhdr *)pkt_data;
875 udp_hdr = (struct udphdr *)(pkt_data +
876 sizeof(struct vlan_ethhdr) +
877 sizeof(struct iphdr));
878 ip_hdr = (struct iphdr *)(pkt_data +
879 sizeof(struct vlan_ethhdr));
880 pktgen_hdr = (struct pktgen_hdr *)(pkt_data +
881 sizeof(struct vlan_ethhdr) +
882 sizeof(struct iphdr) +
883 sizeof(struct udphdr));
884 /* ethernet & VLAN header */
885 memcpy(veth_hdr->h_dest, &opt_txdmac, ETH_ALEN);
886 memcpy(veth_hdr->h_source, &opt_txsmac, ETH_ALEN);
887 veth_hdr->h_vlan_proto = htons(ETH_P_8021Q);
888 vlan_tci = opt_pkt_vlan_id & VLAN_VID_MASK;
889 vlan_tci |= (opt_pkt_vlan_pri << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK;
890 veth_hdr->h_vlan_TCI = htons(vlan_tci);
891 veth_hdr->h_vlan_encapsulated_proto = htons(ETH_P_IP);
893 struct ethhdr *eth_hdr = (struct ethhdr *)pkt_data;
895 udp_hdr = (struct udphdr *)(pkt_data +
896 sizeof(struct ethhdr) +
897 sizeof(struct iphdr));
898 ip_hdr = (struct iphdr *)(pkt_data +
899 sizeof(struct ethhdr));
900 pktgen_hdr = (struct pktgen_hdr *)(pkt_data +
901 sizeof(struct ethhdr) +
902 sizeof(struct iphdr) +
903 sizeof(struct udphdr));
904 /* ethernet header */
905 memcpy(eth_hdr->h_dest, &opt_txdmac, ETH_ALEN);
906 memcpy(eth_hdr->h_source, &opt_txsmac, ETH_ALEN);
907 eth_hdr->h_proto = htons(ETH_P_IP);
912 ip_hdr->version = IPVERSION;
913 ip_hdr->ihl = 0x5; /* 20 byte header */
915 ip_hdr->tot_len = htons(IP_PKT_SIZE);
917 ip_hdr->frag_off = 0;
918 ip_hdr->ttl = IPDEFTTL;
919 ip_hdr->protocol = IPPROTO_UDP;
920 ip_hdr->saddr = htonl(0x0a0a0a10);
921 ip_hdr->daddr = htonl(0x0a0a0a20);
923 /* IP header checksum */
925 ip_hdr->check = ip_fast_csum((const void *)ip_hdr, ip_hdr->ihl);
928 udp_hdr->source = htons(0x1000);
929 udp_hdr->dest = htons(0x1000);
930 udp_hdr->len = htons(UDP_PKT_SIZE);
933 pktgen_hdr->pgh_magic = htonl(PKTGEN_MAGIC);
936 memset32_htonl(pkt_data + PKT_HDR_SIZE, opt_pkt_fill_pattern,
939 /* UDP header checksum */
941 udp_hdr->check = udp_csum(ip_hdr->saddr, ip_hdr->daddr, UDP_PKT_SIZE,
942 IPPROTO_UDP, (u16 *)udp_hdr);
945 static void gen_eth_frame(struct xsk_umem_info *umem, u64 addr)
947 memcpy(xsk_umem__get_data(umem->buffer, addr), pkt_data,
951 static struct xsk_umem_info *xsk_configure_umem(void *buffer, u64 size)
953 struct xsk_umem_info *umem;
954 struct xsk_umem_config cfg = {
955 /* We recommend that you set the fill ring size >= HW RX ring size +
956 * AF_XDP RX ring size. Make sure you fill up the fill ring
957 * with buffers at regular intervals, and you will with this setting
958 * avoid allocation failures in the driver. These are usually quite
959 * expensive since drivers have not been written to assume that
960 * allocation failures are common. For regular sockets, kernel
961 * allocated memory is used that only runs out in OOM situations
962 * that should be rare.
964 .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS * 2,
965 .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
966 .frame_size = opt_xsk_frame_size,
967 .frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM,
968 .flags = opt_umem_flags
972 umem = calloc(1, sizeof(*umem));
974 exit_with_error(errno);
976 ret = xsk_umem__create(&umem->umem, buffer, size, &umem->fq, &umem->cq,
979 exit_with_error(-ret);
981 umem->buffer = buffer;
985 static void xsk_populate_fill_ring(struct xsk_umem_info *umem)
990 ret = xsk_ring_prod__reserve(&umem->fq,
991 XSK_RING_PROD__DEFAULT_NUM_DESCS * 2, &idx);
992 if (ret != XSK_RING_PROD__DEFAULT_NUM_DESCS * 2)
993 exit_with_error(-ret);
994 for (i = 0; i < XSK_RING_PROD__DEFAULT_NUM_DESCS * 2; i++)
995 *xsk_ring_prod__fill_addr(&umem->fq, idx++) =
996 i * opt_xsk_frame_size;
997 xsk_ring_prod__submit(&umem->fq, XSK_RING_PROD__DEFAULT_NUM_DESCS * 2);
1000 static struct xsk_socket_info *xsk_configure_socket(struct xsk_umem_info *umem,
1003 struct xsk_socket_config cfg;
1004 struct xsk_socket_info *xsk;
1005 struct xsk_ring_cons *rxr;
1006 struct xsk_ring_prod *txr;
1009 xsk = calloc(1, sizeof(*xsk));
1011 exit_with_error(errno);
1014 cfg.rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
1015 cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
1016 if (opt_num_xsks > 1 || opt_reduced_cap)
1017 cfg.libbpf_flags = XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD;
1019 cfg.libbpf_flags = 0;
1020 cfg.xdp_flags = opt_xdp_flags;
1021 cfg.bind_flags = opt_xdp_bind_flags;
1023 rxr = rx ? &xsk->rx : NULL;
1024 txr = tx ? &xsk->tx : NULL;
1025 ret = xsk_socket__create(&xsk->xsk, opt_if, opt_queue, umem->umem,
1028 exit_with_error(-ret);
1030 ret = bpf_xdp_query_id(opt_ifindex, opt_xdp_flags, &prog_id);
1032 exit_with_error(-ret);
1034 xsk->app_stats.rx_empty_polls = 0;
1035 xsk->app_stats.fill_fail_polls = 0;
1036 xsk->app_stats.copy_tx_sendtos = 0;
1037 xsk->app_stats.tx_wakeup_sendtos = 0;
1038 xsk->app_stats.opt_polls = 0;
1039 xsk->app_stats.prev_rx_empty_polls = 0;
1040 xsk->app_stats.prev_fill_fail_polls = 0;
1041 xsk->app_stats.prev_copy_tx_sendtos = 0;
1042 xsk->app_stats.prev_tx_wakeup_sendtos = 0;
1043 xsk->app_stats.prev_opt_polls = 0;
1048 static struct option long_options[] = {
1049 {"rxdrop", no_argument, 0, 'r'},
1050 {"txonly", no_argument, 0, 't'},
1051 {"l2fwd", no_argument, 0, 'l'},
1052 {"interface", required_argument, 0, 'i'},
1053 {"queue", required_argument, 0, 'q'},
1054 {"poll", no_argument, 0, 'p'},
1055 {"xdp-skb", no_argument, 0, 'S'},
1056 {"xdp-native", no_argument, 0, 'N'},
1057 {"interval", required_argument, 0, 'n'},
1058 {"retries", required_argument, 0, 'O'},
1059 {"zero-copy", no_argument, 0, 'z'},
1060 {"copy", no_argument, 0, 'c'},
1061 {"frame-size", required_argument, 0, 'f'},
1062 {"no-need-wakeup", no_argument, 0, 'm'},
1063 {"unaligned", no_argument, 0, 'u'},
1064 {"shared-umem", no_argument, 0, 'M'},
1065 {"force", no_argument, 0, 'F'},
1066 {"duration", required_argument, 0, 'd'},
1067 {"clock", required_argument, 0, 'w'},
1068 {"batch-size", required_argument, 0, 'b'},
1069 {"tx-pkt-count", required_argument, 0, 'C'},
1070 {"tx-pkt-size", required_argument, 0, 's'},
1071 {"tx-pkt-pattern", required_argument, 0, 'P'},
1072 {"tx-vlan", no_argument, 0, 'V'},
1073 {"tx-vlan-id", required_argument, 0, 'J'},
1074 {"tx-vlan-pri", required_argument, 0, 'K'},
1075 {"tx-dmac", required_argument, 0, 'G'},
1076 {"tx-smac", required_argument, 0, 'H'},
1077 {"tx-cycle", required_argument, 0, 'T'},
1078 {"tstamp", no_argument, 0, 'y'},
1079 {"policy", required_argument, 0, 'W'},
1080 {"schpri", required_argument, 0, 'U'},
1081 {"extra-stats", no_argument, 0, 'x'},
1082 {"quiet", no_argument, 0, 'Q'},
1083 {"app-stats", no_argument, 0, 'a'},
1084 {"irq-string", no_argument, 0, 'I'},
1085 {"busy-poll", no_argument, 0, 'B'},
1086 {"reduce-cap", no_argument, 0, 'R'},
1090 static void usage(const char *prog)
1093 " Usage: %s [OPTIONS]\n"
1095 " -r, --rxdrop Discard all incoming packets (default)\n"
1096 " -t, --txonly Only send packets\n"
1097 " -l, --l2fwd MAC swap L2 forwarding\n"
1098 " -i, --interface=n Run on interface n\n"
1099 " -q, --queue=n Use queue n (default 0)\n"
1100 " -p, --poll Use poll syscall\n"
1101 " -S, --xdp-skb=n Use XDP skb-mod\n"
1102 " -N, --xdp-native=n Enforce XDP native mode\n"
1103 " -n, --interval=n Specify statistics update interval (default 1 sec).\n"
1104 " -O, --retries=n Specify time-out retries (1s interval) attempt (default 3).\n"
1105 " -z, --zero-copy Force zero-copy mode.\n"
1106 " -c, --copy Force copy mode.\n"
1107 " -m, --no-need-wakeup Turn off use of driver need wakeup flag.\n"
1108 " -f, --frame-size=n Set the frame size (must be a power of two in aligned mode, default is %d).\n"
1109 " -u, --unaligned Enable unaligned chunk placement\n"
1110 " -M, --shared-umem Enable XDP_SHARED_UMEM (cannot be used with -R)\n"
1111 " -F, --force Force loading the XDP prog\n"
1112 " -d, --duration=n Duration in secs to run command.\n"
1113 " Default: forever.\n"
1114 " -w, --clock=CLOCK Clock NAME (default MONOTONIC).\n"
1115 " -b, --batch-size=n Batch size for sending or receiving\n"
1116 " packets. Default: %d\n"
1117 " -C, --tx-pkt-count=n Number of packets to send.\n"
1118 " Default: Continuous packets.\n"
1119 " -s, --tx-pkt-size=n Transmit packet size.\n"
1120 " (Default: %d bytes)\n"
1121 " Min size: %d, Max size %d.\n"
1122 " -P, --tx-pkt-pattern=nPacket fill pattern. Default: 0x%x\n"
1123 " -V, --tx-vlan Send VLAN tagged packets (For -t|--txonly)\n"
1124 " -J, --tx-vlan-id=n Tx VLAN ID [1-4095]. Default: %d (For -V|--tx-vlan)\n"
1125 " -K, --tx-vlan-pri=n Tx VLAN Priority [0-7]. Default: %d (For -V|--tx-vlan)\n"
1126 " -G, --tx-dmac=<MAC> Dest MAC addr of TX frame in aa:bb:cc:dd:ee:ff format (For -V|--tx-vlan)\n"
1127 " -H, --tx-smac=<MAC> Src MAC addr of TX frame in aa:bb:cc:dd:ee:ff format (For -V|--tx-vlan)\n"
1128 " -T, --tx-cycle=n Tx cycle time in micro-seconds (For -t|--txonly).\n"
1129 " -y, --tstamp Add time-stamp to packet (For -t|--txonly).\n"
1130 " -W, --policy=POLICY Schedule policy. Default: SCHED_OTHER\n"
1131 " -U, --schpri=n Schedule priority. Default: %d\n"
1132 " -x, --extra-stats Display extra statistics.\n"
1133 " -Q, --quiet Do not display any stats.\n"
1134 " -a, --app-stats Display application (syscall) statistics.\n"
1135 " -I, --irq-string Display driver interrupt statistics for interface associated with irq-string.\n"
1136 " -B, --busy-poll Busy poll.\n"
1137 " -R, --reduce-cap Use reduced capabilities (cannot be used with -M)\n"
1139 fprintf(stderr, str, prog, XSK_UMEM__DEFAULT_FRAME_SIZE,
1140 opt_batch_size, MIN_PKT_SIZE, MIN_PKT_SIZE,
1141 XSK_UMEM__DEFAULT_FRAME_SIZE, opt_pkt_fill_pattern,
1142 VLAN_VID__DEFAULT, VLAN_PRI__DEFAULT,
1143 SCHED_PRI__DEFAULT);
1148 static void parse_command_line(int argc, char **argv)
1150 int option_index, c;
1155 c = getopt_long(argc, argv,
1156 "Frtli:q:pSNn:w:O:czf:muMd:b:C:s:P:VJ:K:G:H:T:yW:U:xQaI:BR",
1157 long_options, &option_index);
1163 opt_bench = BENCH_RXDROP;
1166 opt_bench = BENCH_TXONLY;
1169 opt_bench = BENCH_L2FWD;
1175 opt_queue = atoi(optarg);
1181 opt_xdp_flags |= XDP_FLAGS_SKB_MODE;
1182 opt_xdp_bind_flags |= XDP_COPY;
1185 /* default, set below */
1188 opt_interval = atoi(optarg);
1191 if (get_clockid(&opt_clock, optarg)) {
1193 "ERROR: Invalid clock %s. Default to CLOCK_MONOTONIC.\n",
1195 opt_clock = CLOCK_MONOTONIC;
1199 opt_retries = atoi(optarg);
1202 opt_xdp_bind_flags |= XDP_ZEROCOPY;
1205 opt_xdp_bind_flags |= XDP_COPY;
1208 opt_umem_flags |= XDP_UMEM_UNALIGNED_CHUNK_FLAG;
1209 opt_unaligned_chunks = 1;
1210 opt_mmap_flags = MAP_HUGETLB;
1213 opt_xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
1216 opt_xsk_frame_size = atoi(optarg);
1219 opt_need_wakeup = false;
1220 opt_xdp_bind_flags &= ~XDP_USE_NEED_WAKEUP;
1223 opt_num_xsks = MAX_SOCKS;
1226 opt_duration = atoi(optarg);
1227 opt_duration *= 1000000000;
1230 opt_batch_size = atoi(optarg);
1233 opt_pkt_count = atoi(optarg);
1236 opt_pkt_size = atoi(optarg);
1237 if (opt_pkt_size > (XSK_UMEM__DEFAULT_FRAME_SIZE) ||
1238 opt_pkt_size < MIN_PKT_SIZE) {
1240 "ERROR: Invalid frame size %d\n",
1242 usage(basename(argv[0]));
1246 opt_pkt_fill_pattern = strtol(optarg, NULL, 16);
1249 opt_vlan_tag = true;
1252 opt_pkt_vlan_id = atoi(optarg);
1255 opt_pkt_vlan_pri = atoi(optarg);
1258 if (!ether_aton_r(optarg,
1259 (struct ether_addr *)&opt_txdmac)) {
1260 fprintf(stderr, "Invalid dmac address:%s\n",
1262 usage(basename(argv[0]));
1266 if (!ether_aton_r(optarg,
1267 (struct ether_addr *)&opt_txsmac)) {
1268 fprintf(stderr, "Invalid smac address:%s\n",
1270 usage(basename(argv[0]));
1274 opt_tx_cycle_ns = atoi(optarg);
1275 opt_tx_cycle_ns *= NSEC_PER_USEC;
1281 if (get_schpolicy(&opt_schpolicy, optarg)) {
1283 "ERROR: Invalid policy %s. Default to SCHED_OTHER.\n",
1285 opt_schpolicy = SCHED_OTHER;
1289 opt_schprio = atoi(optarg);
1292 opt_extra_stats = 1;
1301 opt_irq_str = optarg;
1302 if (get_interrupt_number())
1303 irqs_at_init = get_irqs();
1304 if (irqs_at_init < 0) {
1305 fprintf(stderr, "ERROR: Failed to get irqs for %s\n", opt_irq_str);
1306 usage(basename(argv[0]));
1313 opt_reduced_cap = true;
1316 usage(basename(argv[0]));
1320 if (!(opt_xdp_flags & XDP_FLAGS_SKB_MODE))
1321 opt_xdp_flags |= XDP_FLAGS_DRV_MODE;
1323 opt_ifindex = if_nametoindex(opt_if);
1325 fprintf(stderr, "ERROR: interface \"%s\" does not exist\n",
1327 usage(basename(argv[0]));
1330 if ((opt_xsk_frame_size & (opt_xsk_frame_size - 1)) &&
1331 !opt_unaligned_chunks) {
1332 fprintf(stderr, "--frame-size=%d is not a power of two\n",
1333 opt_xsk_frame_size);
1334 usage(basename(argv[0]));
1337 if (opt_reduced_cap && opt_num_xsks > 1) {
1338 fprintf(stderr, "ERROR: -M and -R cannot be used together\n");
1339 usage(basename(argv[0]));
1343 static void kick_tx(struct xsk_socket_info *xsk)
1347 ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
1348 if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN ||
1349 errno == EBUSY || errno == ENETDOWN)
1351 exit_with_error(errno);
1354 static inline void complete_tx_l2fwd(struct xsk_socket_info *xsk)
1356 struct xsk_umem_info *umem = xsk->umem;
1357 u32 idx_cq = 0, idx_fq = 0;
1361 if (!xsk->outstanding_tx)
1364 /* In copy mode, Tx is driven by a syscall so we need to use e.g. sendto() to
1365 * really send the packets. In zero-copy mode we do not have to do this, since Tx
1366 * is driven by the NAPI loop. So as an optimization, we do not have to call
1367 * sendto() all the time in zero-copy mode for l2fwd.
1369 if (opt_xdp_bind_flags & XDP_COPY) {
1370 xsk->app_stats.copy_tx_sendtos++;
1374 ndescs = (xsk->outstanding_tx > opt_batch_size) ? opt_batch_size :
1375 xsk->outstanding_tx;
1377 /* re-add completed Tx buffers */
1378 rcvd = xsk_ring_cons__peek(&umem->cq, ndescs, &idx_cq);
1383 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
1384 while (ret != rcvd) {
1386 exit_with_error(-ret);
1387 if (opt_busy_poll || xsk_ring_prod__needs_wakeup(&umem->fq)) {
1388 xsk->app_stats.fill_fail_polls++;
1389 recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL,
1392 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
1395 for (i = 0; i < rcvd; i++)
1396 *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) =
1397 *xsk_ring_cons__comp_addr(&umem->cq, idx_cq++);
1399 xsk_ring_prod__submit(&xsk->umem->fq, rcvd);
1400 xsk_ring_cons__release(&xsk->umem->cq, rcvd);
1401 xsk->outstanding_tx -= rcvd;
1405 static inline void complete_tx_only(struct xsk_socket_info *xsk,
1411 if (!xsk->outstanding_tx)
1414 if (!opt_need_wakeup || xsk_ring_prod__needs_wakeup(&xsk->tx)) {
1415 xsk->app_stats.tx_wakeup_sendtos++;
1419 rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx);
1421 xsk_ring_cons__release(&xsk->umem->cq, rcvd);
1422 xsk->outstanding_tx -= rcvd;
1426 static void rx_drop(struct xsk_socket_info *xsk)
1428 unsigned int rcvd, i;
1429 u32 idx_rx = 0, idx_fq = 0;
1432 rcvd = xsk_ring_cons__peek(&xsk->rx, opt_batch_size, &idx_rx);
1434 if (opt_busy_poll || xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) {
1435 xsk->app_stats.rx_empty_polls++;
1436 recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL);
1441 ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
1442 while (ret != rcvd) {
1444 exit_with_error(-ret);
1445 if (opt_busy_poll || xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) {
1446 xsk->app_stats.fill_fail_polls++;
1447 recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL);
1449 ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
1452 for (i = 0; i < rcvd; i++) {
1453 u64 addr = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx)->addr;
1454 u32 len = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++)->len;
1455 u64 orig = xsk_umem__extract_addr(addr);
1457 addr = xsk_umem__add_offset_to_addr(addr);
1458 char *pkt = xsk_umem__get_data(xsk->umem->buffer, addr);
1460 hex_dump(pkt, len, addr);
1461 *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) = orig;
1464 xsk_ring_prod__submit(&xsk->umem->fq, rcvd);
1465 xsk_ring_cons__release(&xsk->rx, rcvd);
1466 xsk->ring_stats.rx_npkts += rcvd;
1469 static void rx_drop_all(void)
1471 struct pollfd fds[MAX_SOCKS] = {};
1474 for (i = 0; i < num_socks; i++) {
1475 fds[i].fd = xsk_socket__fd(xsks[i]->xsk);
1476 fds[i].events = POLLIN;
1481 for (i = 0; i < num_socks; i++)
1482 xsks[i]->app_stats.opt_polls++;
1483 ret = poll(fds, num_socks, opt_timeout);
1488 for (i = 0; i < num_socks; i++)
1496 static int tx_only(struct xsk_socket_info *xsk, u32 *frame_nb,
1497 int batch_size, unsigned long tx_ns)
1499 u32 idx, tv_sec, tv_usec;
1502 while (xsk_ring_prod__reserve(&xsk->tx, batch_size, &idx) <
1504 complete_tx_only(xsk, batch_size);
1510 tv_sec = (u32)(tx_ns / NSEC_PER_SEC);
1511 tv_usec = (u32)((tx_ns % NSEC_PER_SEC) / 1000);
1514 for (i = 0; i < batch_size; i++) {
1515 struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx,
1517 tx_desc->addr = (*frame_nb + i) * opt_xsk_frame_size;
1518 tx_desc->len = PKT_SIZE;
1521 struct pktgen_hdr *pktgen_hdr;
1522 u64 addr = tx_desc->addr;
1525 pkt = xsk_umem__get_data(xsk->umem->buffer, addr);
1526 pktgen_hdr = (struct pktgen_hdr *)(pkt + PKTGEN_HDR_OFFSET);
1528 pktgen_hdr->seq_num = htonl(sequence++);
1529 pktgen_hdr->tv_sec = htonl(tv_sec);
1530 pktgen_hdr->tv_usec = htonl(tv_usec);
1532 hex_dump(pkt, PKT_SIZE, addr);
1536 xsk_ring_prod__submit(&xsk->tx, batch_size);
1537 xsk->ring_stats.tx_npkts += batch_size;
1538 xsk->outstanding_tx += batch_size;
1539 *frame_nb += batch_size;
1540 *frame_nb %= NUM_FRAMES;
1541 complete_tx_only(xsk, batch_size);
1546 static inline int get_batch_size(int pkt_cnt)
1549 return opt_batch_size;
1551 if (pkt_cnt + opt_batch_size <= opt_pkt_count)
1552 return opt_batch_size;
1554 return opt_pkt_count - pkt_cnt;
1557 static void complete_tx_only_all(void)
1564 for (i = 0; i < num_socks; i++) {
1565 if (xsks[i]->outstanding_tx) {
1566 complete_tx_only(xsks[i], opt_batch_size);
1567 pending = !!xsks[i]->outstanding_tx;
1571 } while (pending && opt_retries-- > 0);
1574 static void tx_only_all(void)
1576 struct pollfd fds[MAX_SOCKS] = {};
1577 u32 frame_nb[MAX_SOCKS] = {};
1578 unsigned long next_tx_ns = 0;
1582 if (opt_poll && opt_tx_cycle_ns) {
1584 "Error: --poll and --tx-cycles are both set\n");
1588 for (i = 0; i < num_socks; i++) {
1589 fds[0].fd = xsk_socket__fd(xsks[i]->xsk);
1590 fds[0].events = POLLOUT;
1593 if (opt_tx_cycle_ns) {
1594 /* Align Tx time to micro-second boundary */
1595 next_tx_ns = (get_nsecs() / NSEC_PER_USEC + 1) *
1597 next_tx_ns += opt_tx_cycle_ns;
1599 /* Initialize periodic Tx scheduling variance */
1600 tx_cycle_diff_min = 1000000000;
1601 tx_cycle_diff_max = 0;
1602 tx_cycle_diff_ave = 0.0;
1605 while ((opt_pkt_count && pkt_cnt < opt_pkt_count) || !opt_pkt_count) {
1606 int batch_size = get_batch_size(pkt_cnt);
1607 unsigned long tx_ns = 0;
1608 struct timespec next;
1614 for (i = 0; i < num_socks; i++)
1615 xsks[i]->app_stats.opt_polls++;
1616 ret = poll(fds, num_socks, opt_timeout);
1620 if (!(fds[0].revents & POLLOUT))
1624 if (opt_tx_cycle_ns) {
1625 next.tv_sec = next_tx_ns / NSEC_PER_SEC;
1626 next.tv_nsec = next_tx_ns % NSEC_PER_SEC;
1627 err = clock_nanosleep(opt_clock, TIMER_ABSTIME, &next, NULL);
1631 "clock_nanosleep failed. Err:%d errno:%d\n",
1636 /* Measure periodic Tx scheduling variance */
1637 tx_ns = get_nsecs();
1638 diff = tx_ns - next_tx_ns;
1639 if (diff < tx_cycle_diff_min)
1640 tx_cycle_diff_min = diff;
1642 if (diff > tx_cycle_diff_max)
1643 tx_cycle_diff_max = diff;
1645 tx_cycle_diff_ave += (double)diff;
1647 } else if (opt_tstamp) {
1648 tx_ns = get_nsecs();
1651 for (i = 0; i < num_socks; i++)
1652 tx_cnt += tx_only(xsks[i], &frame_nb[i], batch_size, tx_ns);
1659 if (opt_tx_cycle_ns)
1660 next_tx_ns += opt_tx_cycle_ns;
1664 complete_tx_only_all();
1667 static void l2fwd(struct xsk_socket_info *xsk)
1669 unsigned int rcvd, i;
1670 u32 idx_rx = 0, idx_tx = 0;
1673 complete_tx_l2fwd(xsk);
1675 rcvd = xsk_ring_cons__peek(&xsk->rx, opt_batch_size, &idx_rx);
1677 if (opt_busy_poll || xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) {
1678 xsk->app_stats.rx_empty_polls++;
1679 recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL);
1683 xsk->ring_stats.rx_npkts += rcvd;
1685 ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx);
1686 while (ret != rcvd) {
1688 exit_with_error(-ret);
1689 complete_tx_l2fwd(xsk);
1690 if (opt_busy_poll || xsk_ring_prod__needs_wakeup(&xsk->tx)) {
1691 xsk->app_stats.tx_wakeup_sendtos++;
1694 ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx);
1697 for (i = 0; i < rcvd; i++) {
1698 u64 addr = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx)->addr;
1699 u32 len = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++)->len;
1702 addr = xsk_umem__add_offset_to_addr(addr);
1703 char *pkt = xsk_umem__get_data(xsk->umem->buffer, addr);
1705 swap_mac_addresses(pkt);
1707 hex_dump(pkt, len, addr);
1708 xsk_ring_prod__tx_desc(&xsk->tx, idx_tx)->addr = orig;
1709 xsk_ring_prod__tx_desc(&xsk->tx, idx_tx++)->len = len;
1712 xsk_ring_prod__submit(&xsk->tx, rcvd);
1713 xsk_ring_cons__release(&xsk->rx, rcvd);
1715 xsk->ring_stats.tx_npkts += rcvd;
1716 xsk->outstanding_tx += rcvd;
1719 static void l2fwd_all(void)
1721 struct pollfd fds[MAX_SOCKS] = {};
1726 for (i = 0; i < num_socks; i++) {
1727 fds[i].fd = xsk_socket__fd(xsks[i]->xsk);
1728 fds[i].events = POLLOUT | POLLIN;
1729 xsks[i]->app_stats.opt_polls++;
1731 ret = poll(fds, num_socks, opt_timeout);
1736 for (i = 0; i < num_socks; i++)
1744 static void load_xdp_program(char **argv, struct bpf_object **obj)
1746 struct bpf_prog_load_attr prog_load_attr = {
1747 .prog_type = BPF_PROG_TYPE_XDP,
1749 char xdp_filename[256];
1752 snprintf(xdp_filename, sizeof(xdp_filename), "%s_kern.o", argv[0]);
1753 prog_load_attr.file = xdp_filename;
1755 if (bpf_prog_load_xattr(&prog_load_attr, obj, &prog_fd))
1758 fprintf(stderr, "ERROR: no program found: %s\n",
1763 if (bpf_xdp_attach(opt_ifindex, prog_fd, opt_xdp_flags, NULL) < 0) {
1764 fprintf(stderr, "ERROR: link set xdp fd failed\n");
1769 static void enter_xsks_into_map(struct bpf_object *obj)
1771 struct bpf_map *map;
1774 map = bpf_object__find_map_by_name(obj, "xsks_map");
1775 xsks_map = bpf_map__fd(map);
1777 fprintf(stderr, "ERROR: no xsks map found: %s\n",
1778 strerror(xsks_map));
1782 for (i = 0; i < num_socks; i++) {
1783 int fd = xsk_socket__fd(xsks[i]->xsk);
1787 ret = bpf_map_update_elem(xsks_map, &key, &fd, 0);
1789 fprintf(stderr, "ERROR: bpf_map_update_elem %d\n", i);
1795 static void apply_setsockopt(struct xsk_socket_info *xsk)
1803 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_PREFER_BUSY_POLL,
1804 (void *)&sock_opt, sizeof(sock_opt)) < 0)
1805 exit_with_error(errno);
1808 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL,
1809 (void *)&sock_opt, sizeof(sock_opt)) < 0)
1810 exit_with_error(errno);
1812 sock_opt = opt_batch_size;
1813 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL_BUDGET,
1814 (void *)&sock_opt, sizeof(sock_opt)) < 0)
1815 exit_with_error(errno);
1818 static int recv_xsks_map_fd_from_ctrl_node(int sock, int *_fd)
1820 char cms[CMSG_SPACE(sizeof(int))];
1821 struct cmsghdr *cmsg;
1827 iov.iov_base = &value;
1828 iov.iov_len = sizeof(int);
1831 msg.msg_namelen = 0;
1835 msg.msg_control = (caddr_t)cms;
1836 msg.msg_controllen = sizeof(cms);
1838 len = recvmsg(sock, &msg, 0);
1841 fprintf(stderr, "Recvmsg failed length incorrect.\n");
1846 fprintf(stderr, "Recvmsg failed no data\n");
1850 cmsg = CMSG_FIRSTHDR(&msg);
1851 *_fd = *(int *)CMSG_DATA(cmsg);
1857 recv_xsks_map_fd(int *xsks_map_fd)
1859 struct sockaddr_un server;
1862 sock = socket(AF_UNIX, SOCK_STREAM, 0);
1864 fprintf(stderr, "Error opening socket stream: %s", strerror(errno));
1868 server.sun_family = AF_UNIX;
1869 strcpy(server.sun_path, SOCKET_NAME);
1871 if (connect(sock, (struct sockaddr *)&server, sizeof(struct sockaddr_un)) < 0) {
1873 fprintf(stderr, "Error connecting stream socket: %s", strerror(errno));
1877 err = recv_xsks_map_fd_from_ctrl_node(sock, xsks_map_fd);
1879 fprintf(stderr, "Error %d receiving fd\n", err);
1885 int main(int argc, char **argv)
1887 struct __user_cap_header_struct hdr = { _LINUX_CAPABILITY_VERSION_3, 0 };
1888 struct __user_cap_data_struct data[2] = { { 0 } };
1889 struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
1890 bool rx = false, tx = false;
1891 struct sched_param schparam;
1892 struct xsk_umem_info *umem;
1893 struct bpf_object *obj;
1894 int xsks_map_fd = 0;
1899 parse_command_line(argc, argv);
1901 if (opt_reduced_cap) {
1902 if (capget(&hdr, data) < 0)
1903 fprintf(stderr, "Error getting capabilities\n");
1905 data->effective &= CAP_TO_MASK(CAP_NET_RAW);
1906 data->permitted &= CAP_TO_MASK(CAP_NET_RAW);
1908 if (capset(&hdr, data) < 0)
1909 fprintf(stderr, "Setting capabilities failed\n");
1911 if (capget(&hdr, data) < 0) {
1912 fprintf(stderr, "Error getting capabilities\n");
1914 fprintf(stderr, "Capabilities EFF %x Caps INH %x Caps Per %x\n",
1915 data[0].effective, data[0].inheritable, data[0].permitted);
1916 fprintf(stderr, "Capabilities EFF %x Caps INH %x Caps Per %x\n",
1917 data[1].effective, data[1].inheritable, data[1].permitted);
1920 if (setrlimit(RLIMIT_MEMLOCK, &r)) {
1921 fprintf(stderr, "ERROR: setrlimit(RLIMIT_MEMLOCK) \"%s\"\n",
1926 if (opt_num_xsks > 1)
1927 load_xdp_program(argv, &obj);
1930 /* Reserve memory for the umem. Use hugepages if unaligned chunk mode */
1931 bufs = mmap(NULL, NUM_FRAMES * opt_xsk_frame_size,
1932 PROT_READ | PROT_WRITE,
1933 MAP_PRIVATE | MAP_ANONYMOUS | opt_mmap_flags, -1, 0);
1934 if (bufs == MAP_FAILED) {
1935 printf("ERROR: mmap failed\n");
1939 /* Create sockets... */
1940 umem = xsk_configure_umem(bufs, NUM_FRAMES * opt_xsk_frame_size);
1941 if (opt_bench == BENCH_RXDROP || opt_bench == BENCH_L2FWD) {
1943 xsk_populate_fill_ring(umem);
1945 if (opt_bench == BENCH_L2FWD || opt_bench == BENCH_TXONLY)
1947 for (i = 0; i < opt_num_xsks; i++)
1948 xsks[num_socks++] = xsk_configure_socket(umem, rx, tx);
1950 for (i = 0; i < opt_num_xsks; i++)
1951 apply_setsockopt(xsks[i]);
1953 if (opt_bench == BENCH_TXONLY) {
1954 if (opt_tstamp && opt_pkt_size < PKTGEN_SIZE_MIN)
1955 opt_pkt_size = PKTGEN_SIZE_MIN;
1959 for (i = 0; i < NUM_FRAMES; i++)
1960 gen_eth_frame(umem, i * opt_xsk_frame_size);
1963 if (opt_num_xsks > 1 && opt_bench != BENCH_TXONLY)
1964 enter_xsks_into_map(obj);
1966 if (opt_reduced_cap) {
1967 ret = recv_xsks_map_fd(&xsks_map_fd);
1969 fprintf(stderr, "Error %d receiving xsks_map_fd\n", ret);
1970 exit_with_error(ret);
1973 ret = xsk_socket__update_xskmap(xsks[0]->xsk, xsks_map_fd);
1975 fprintf(stderr, "Update of BPF map failed(%d)\n", ret);
1976 exit_with_error(ret);
1981 signal(SIGINT, int_exit);
1982 signal(SIGTERM, int_exit);
1983 signal(SIGABRT, int_exit);
1985 setlocale(LC_ALL, "");
1987 prev_time = get_nsecs();
1988 start_time = prev_time;
1991 ret = pthread_create(&pt, NULL, poller, NULL);
1993 exit_with_error(ret);
1996 /* Configure sched priority for better wake-up accuracy */
1997 memset(&schparam, 0, sizeof(schparam));
1998 schparam.sched_priority = opt_schprio;
1999 ret = sched_setscheduler(0, opt_schpolicy, &schparam);
2001 fprintf(stderr, "Error(%d) in setting priority(%d): %s\n",
2002 errno, opt_schprio, strerror(errno));
2006 if (opt_bench == BENCH_RXDROP)
2008 else if (opt_bench == BENCH_TXONLY)
2014 benchmark_done = true;
2017 pthread_join(pt, NULL);
2021 munmap(bufs, NUM_FRAMES * opt_xsk_frame_size);