1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
4 * AF_XDP user-space access library.
6 * Copyright(c) 2018 - 2019 Intel Corporation.
8 * Author(s): Magnus Karlsson <magnus.karlsson@intel.com>
15 #include <arpa/inet.h>
16 #include <asm/barrier.h>
17 #include <linux/compiler.h>
18 #include <linux/ethtool.h>
19 #include <linux/filter.h>
20 #include <linux/if_ether.h>
21 #include <linux/if_packet.h>
22 #include <linux/if_xdp.h>
23 #include <linux/sockios.h>
25 #include <sys/ioctl.h>
27 #include <sys/socket.h>
28 #include <sys/types.h>
32 #include "libbpf_util.h"
48 struct xsk_ring_prod *fill;
49 struct xsk_ring_cons *comp;
51 struct xsk_umem_config config;
57 struct xsk_ring_cons *rx;
58 struct xsk_ring_prod *tx;
60 struct xsk_umem *umem;
61 struct xsk_socket_config config;
69 char ifname[IFNAMSIZ];
73 bool xdp_prog_attached;
78 /* For 32-bit systems, we need to use mmap2 as the offsets are 64-bit.
79 * Unfortunately, it is not part of glibc.
81 static inline void *xsk_mmap(void *addr, size_t length, int prot, int flags,
85 unsigned int page_shift = __builtin_ffs(getpagesize()) - 1;
86 long ret = syscall(__NR_mmap2, addr, length, prot, flags, fd,
87 (off_t)(offset >> page_shift));
91 return mmap(addr, length, prot, flags, fd, offset);
95 int xsk_umem__fd(const struct xsk_umem *umem)
97 return umem ? umem->fd : -EINVAL;
100 int xsk_socket__fd(const struct xsk_socket *xsk)
102 return xsk ? xsk->fd : -EINVAL;
105 static bool xsk_page_aligned(void *buffer)
107 unsigned long addr = (unsigned long)buffer;
109 return !(addr & (getpagesize() - 1));
112 static void xsk_set_umem_config(struct xsk_umem_config *cfg,
113 const struct xsk_umem_config *usr_cfg)
116 cfg->fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
117 cfg->comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
118 cfg->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
119 cfg->frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM;
123 cfg->fill_size = usr_cfg->fill_size;
124 cfg->comp_size = usr_cfg->comp_size;
125 cfg->frame_size = usr_cfg->frame_size;
126 cfg->frame_headroom = usr_cfg->frame_headroom;
129 static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
130 const struct xsk_socket_config *usr_cfg)
133 cfg->rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
134 cfg->tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
135 cfg->libbpf_flags = 0;
141 if (usr_cfg->libbpf_flags & ~XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)
144 cfg->rx_size = usr_cfg->rx_size;
145 cfg->tx_size = usr_cfg->tx_size;
146 cfg->libbpf_flags = usr_cfg->libbpf_flags;
147 cfg->xdp_flags = usr_cfg->xdp_flags;
148 cfg->bind_flags = usr_cfg->bind_flags;
153 int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area, __u64 size,
154 struct xsk_ring_prod *fill, struct xsk_ring_cons *comp,
155 const struct xsk_umem_config *usr_config)
157 struct xdp_mmap_offsets off;
158 struct xdp_umem_reg mr;
159 struct xsk_umem *umem;
164 if (!umem_area || !umem_ptr || !fill || !comp)
166 if (!size && !xsk_page_aligned(umem_area))
169 umem = calloc(1, sizeof(*umem));
173 umem->fd = socket(AF_XDP, SOCK_RAW, 0);
179 umem->umem_area = umem_area;
180 xsk_set_umem_config(&umem->config, usr_config);
182 mr.addr = (uintptr_t)umem_area;
184 mr.chunk_size = umem->config.frame_size;
185 mr.headroom = umem->config.frame_headroom;
187 err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr));
192 err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_FILL_RING,
193 &umem->config.fill_size,
194 sizeof(umem->config.fill_size));
199 err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_COMPLETION_RING,
200 &umem->config.comp_size,
201 sizeof(umem->config.comp_size));
207 optlen = sizeof(off);
208 err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
214 map = xsk_mmap(NULL, off.fr.desc +
215 umem->config.fill_size * sizeof(__u64),
216 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
217 umem->fd, XDP_UMEM_PGOFF_FILL_RING);
218 if (map == MAP_FAILED) {
224 fill->mask = umem->config.fill_size - 1;
225 fill->size = umem->config.fill_size;
226 fill->producer = map + off.fr.producer;
227 fill->consumer = map + off.fr.consumer;
228 fill->ring = map + off.fr.desc;
229 fill->cached_cons = umem->config.fill_size;
232 off.cr.desc + umem->config.comp_size * sizeof(__u64),
233 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
234 umem->fd, XDP_UMEM_PGOFF_COMPLETION_RING);
235 if (map == MAP_FAILED) {
241 comp->mask = umem->config.comp_size - 1;
242 comp->size = umem->config.comp_size;
243 comp->producer = map + off.cr.producer;
244 comp->consumer = map + off.cr.consumer;
245 comp->ring = map + off.cr.desc;
251 munmap(map, off.fr.desc + umem->config.fill_size * sizeof(__u64));
259 static int xsk_load_xdp_prog(struct xsk_socket *xsk)
261 static const int log_buf_size = 16 * 1024;
262 char log_buf[log_buf_size];
265 /* This is the C-program:
266 * SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
268 * int *qidconf, index = ctx->rx_queue_index;
270 * // A set entry here means that the correspnding queue_id
271 * // has an active AF_XDP socket bound to it.
272 * qidconf = bpf_map_lookup_elem(&qidconf_map, &index);
274 * return XDP_ABORTED;
277 * return bpf_redirect_map(&xsks_map, index, 0);
282 struct bpf_insn prog[] = {
283 /* r1 = *(u32 *)(r1 + 16) */
284 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 16),
285 /* *(u32 *)(r10 - 4) = r1 */
286 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_1, -4),
287 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
288 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
289 BPF_LD_MAP_FD(BPF_REG_1, xsk->qidconf_map_fd),
290 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
291 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
292 BPF_MOV32_IMM(BPF_REG_0, 0),
293 /* if r1 == 0 goto +8 */
294 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
295 BPF_MOV32_IMM(BPF_REG_0, 2),
296 /* r1 = *(u32 *)(r1 + 0) */
297 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 0),
298 /* if r1 == 0 goto +5 */
299 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 5),
300 /* r2 = *(u32 *)(r10 - 4) */
301 BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd),
302 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10, -4),
303 BPF_MOV32_IMM(BPF_REG_3, 0),
304 BPF_EMIT_CALL(BPF_FUNC_redirect_map),
305 /* The jumps are to this instruction */
308 size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
310 prog_fd = bpf_load_program(BPF_PROG_TYPE_XDP, prog, insns_cnt,
311 "LGPL-2.1 or BSD-2-Clause", 0, log_buf,
314 pr_warning("BPF log buffer:\n%s", log_buf);
318 err = bpf_set_link_xdp_fd(xsk->ifindex, prog_fd, xsk->config.xdp_flags);
324 xsk->prog_fd = prog_fd;
328 static int xsk_get_max_queues(struct xsk_socket *xsk)
330 struct ethtool_channels channels;
334 fd = socket(AF_INET, SOCK_DGRAM, 0);
338 channels.cmd = ETHTOOL_GCHANNELS;
339 ifr.ifr_data = (void *)&channels;
340 strncpy(ifr.ifr_name, xsk->ifname, IFNAMSIZ);
341 err = ioctl(fd, SIOCETHTOOL, &ifr);
342 if (err && errno != EOPNOTSUPP) {
347 if (channels.max_combined == 0 || errno == EOPNOTSUPP)
348 /* If the device says it has no channels, then all traffic
349 * is sent to a single stream, so max queues = 1.
353 ret = channels.max_combined;
360 static int xsk_create_bpf_maps(struct xsk_socket *xsk)
365 max_queues = xsk_get_max_queues(xsk);
369 fd = bpf_create_map_name(BPF_MAP_TYPE_ARRAY, "qidconf_map",
370 sizeof(int), sizeof(int), max_queues, 0);
373 xsk->qidconf_map_fd = fd;
375 fd = bpf_create_map_name(BPF_MAP_TYPE_XSKMAP, "xsks_map",
376 sizeof(int), sizeof(int), max_queues, 0);
378 close(xsk->qidconf_map_fd);
381 xsk->xsks_map_fd = fd;
386 static void xsk_delete_bpf_maps(struct xsk_socket *xsk)
388 close(xsk->qidconf_map_fd);
389 close(xsk->xsks_map_fd);
390 xsk->qidconf_map_fd = -1;
391 xsk->xsks_map_fd = -1;
394 static int xsk_lookup_bpf_maps(struct xsk_socket *xsk)
396 __u32 i, *map_ids, num_maps, prog_len = sizeof(struct bpf_prog_info);
397 __u32 map_len = sizeof(struct bpf_map_info);
398 struct bpf_prog_info prog_info = {};
399 struct bpf_map_info map_info;
402 err = bpf_obj_get_info_by_fd(xsk->prog_fd, &prog_info, &prog_len);
406 num_maps = prog_info.nr_map_ids;
408 map_ids = calloc(prog_info.nr_map_ids, sizeof(*map_ids));
412 memset(&prog_info, 0, prog_len);
413 prog_info.nr_map_ids = num_maps;
414 prog_info.map_ids = (__u64)(unsigned long)map_ids;
416 err = bpf_obj_get_info_by_fd(xsk->prog_fd, &prog_info, &prog_len);
420 for (i = 0; i < prog_info.nr_map_ids; i++) {
421 if (xsk->qidconf_map_fd != -1 && xsk->xsks_map_fd != -1)
424 fd = bpf_map_get_fd_by_id(map_ids[i]);
428 err = bpf_obj_get_info_by_fd(fd, &map_info, &map_len);
434 if (!strcmp(map_info.name, "qidconf_map")) {
435 xsk->qidconf_map_fd = fd;
439 if (!strcmp(map_info.name, "xsks_map")) {
440 xsk->xsks_map_fd = fd;
448 if (xsk->qidconf_map_fd < 0 || xsk->xsks_map_fd < 0) {
450 xsk_delete_bpf_maps(xsk);
458 static void xsk_clear_bpf_maps(struct xsk_socket *xsk)
462 bpf_map_update_elem(xsk->qidconf_map_fd, &xsk->queue_id, &qid, 0);
463 bpf_map_delete_elem(xsk->xsks_map_fd, &xsk->queue_id);
466 static int xsk_set_bpf_maps(struct xsk_socket *xsk)
468 int qid = true, fd = xsk->fd, err;
470 err = bpf_map_update_elem(xsk->qidconf_map_fd, &xsk->queue_id, &qid, 0);
474 err = bpf_map_update_elem(xsk->xsks_map_fd, &xsk->queue_id, &fd, 0);
480 xsk_clear_bpf_maps(xsk);
484 static int xsk_setup_xdp_prog(struct xsk_socket *xsk)
489 err = bpf_get_link_xdp_id(xsk->ifindex, &prog_id,
490 xsk->config.xdp_flags);
495 err = xsk_create_bpf_maps(xsk);
499 err = xsk_load_xdp_prog(xsk);
503 xsk->prog_fd = bpf_prog_get_fd_by_id(prog_id);
504 err = xsk_lookup_bpf_maps(xsk);
509 err = xsk_set_bpf_maps(xsk);
518 xsk_delete_bpf_maps(xsk);
522 int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
523 __u32 queue_id, struct xsk_umem *umem,
524 struct xsk_ring_cons *rx, struct xsk_ring_prod *tx,
525 const struct xsk_socket_config *usr_config)
527 void *rx_map = NULL, *tx_map = NULL;
528 struct sockaddr_xdp sxdp = {};
529 struct xdp_mmap_offsets off;
530 struct xsk_socket *xsk;
534 if (!umem || !xsk_ptr || !rx || !tx)
537 if (umem->refcount) {
538 pr_warning("Error: shared umems not supported by libbpf.\n");
542 xsk = calloc(1, sizeof(*xsk));
546 if (umem->refcount++ > 0) {
547 xsk->fd = socket(AF_XDP, SOCK_RAW, 0);
556 xsk->outstanding_tx = 0;
557 xsk->queue_id = queue_id;
559 xsk->ifindex = if_nametoindex(ifname);
564 strncpy(xsk->ifname, ifname, IFNAMSIZ);
566 err = xsk_set_xdp_socket_config(&xsk->config, usr_config);
571 err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
572 &xsk->config.rx_size,
573 sizeof(xsk->config.rx_size));
580 err = setsockopt(xsk->fd, SOL_XDP, XDP_TX_RING,
581 &xsk->config.tx_size,
582 sizeof(xsk->config.tx_size));
589 optlen = sizeof(off);
590 err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
597 rx_map = xsk_mmap(NULL, off.rx.desc +
598 xsk->config.rx_size * sizeof(struct xdp_desc),
599 PROT_READ | PROT_WRITE,
600 MAP_SHARED | MAP_POPULATE,
601 xsk->fd, XDP_PGOFF_RX_RING);
602 if (rx_map == MAP_FAILED) {
607 rx->mask = xsk->config.rx_size - 1;
608 rx->size = xsk->config.rx_size;
609 rx->producer = rx_map + off.rx.producer;
610 rx->consumer = rx_map + off.rx.consumer;
611 rx->ring = rx_map + off.rx.desc;
616 tx_map = xsk_mmap(NULL, off.tx.desc +
617 xsk->config.tx_size * sizeof(struct xdp_desc),
618 PROT_READ | PROT_WRITE,
619 MAP_SHARED | MAP_POPULATE,
620 xsk->fd, XDP_PGOFF_TX_RING);
621 if (tx_map == MAP_FAILED) {
626 tx->mask = xsk->config.tx_size - 1;
627 tx->size = xsk->config.tx_size;
628 tx->producer = tx_map + off.tx.producer;
629 tx->consumer = tx_map + off.tx.consumer;
630 tx->ring = tx_map + off.tx.desc;
631 tx->cached_cons = xsk->config.tx_size;
635 sxdp.sxdp_family = PF_XDP;
636 sxdp.sxdp_ifindex = xsk->ifindex;
637 sxdp.sxdp_queue_id = xsk->queue_id;
638 sxdp.sxdp_flags = xsk->config.bind_flags;
640 err = bind(xsk->fd, (struct sockaddr *)&sxdp, sizeof(sxdp));
646 xsk->qidconf_map_fd = -1;
647 xsk->xsks_map_fd = -1;
649 if (!(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) {
650 err = xsk_setup_xdp_prog(xsk);
660 munmap(tx_map, off.tx.desc +
661 xsk->config.tx_size * sizeof(struct xdp_desc));
664 munmap(rx_map, off.rx.desc +
665 xsk->config.rx_size * sizeof(struct xdp_desc));
667 if (--umem->refcount)
674 int xsk_umem__delete(struct xsk_umem *umem)
676 struct xdp_mmap_offsets off;
686 optlen = sizeof(off);
687 err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
689 munmap(umem->fill->ring - off.fr.desc,
690 off.fr.desc + umem->config.fill_size * sizeof(__u64));
691 munmap(umem->comp->ring - off.cr.desc,
692 off.cr.desc + umem->config.comp_size * sizeof(__u64));
701 void xsk_socket__delete(struct xsk_socket *xsk)
703 size_t desc_sz = sizeof(struct xdp_desc);
704 struct xdp_mmap_offsets off;
711 xsk_clear_bpf_maps(xsk);
712 xsk_delete_bpf_maps(xsk);
714 optlen = sizeof(off);
715 err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
718 munmap(xsk->rx->ring - off.rx.desc,
719 off.rx.desc + xsk->config.rx_size * desc_sz);
722 munmap(xsk->tx->ring - off.tx.desc,
723 off.tx.desc + xsk->config.tx_size * desc_sz);
728 xsk->umem->refcount--;
729 /* Do not close an fd that also has an associated umem connected
732 if (xsk->fd != xsk->umem->fd)