1 // SPDX-License-Identifier: GPL-2.0
4 * AF_XDP sockets allows a channel between XDP programs and userspace
6 * Copyright(c) 2018 Intel Corporation.
8 * Author(s): Björn Töpel <bjorn.topel@intel.com>
9 * Magnus Karlsson <magnus.karlsson@intel.com>
12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
14 #include <linux/if_xdp.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/task.h>
19 #include <linux/socket.h>
20 #include <linux/file.h>
21 #include <linux/uaccess.h>
22 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/rculist.h>
25 #include <net/xdp_sock_drv.h>
28 #include "xsk_queue.h"
32 #define TX_BATCH_SIZE 16
34 static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
36 bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
38 return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) &&
39 (xs->pool->fq || READ_ONCE(xs->fq_tmp));
42 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
44 if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
47 pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
48 pool->cached_need_wakeup |= XDP_WAKEUP_RX;
50 EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
52 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
56 if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
60 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
61 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
65 pool->cached_need_wakeup |= XDP_WAKEUP_TX;
67 EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
69 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
71 if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
74 pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
75 pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
77 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
79 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
83 if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
87 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
88 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
92 pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
94 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
96 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
98 return pool->uses_need_wakeup;
100 EXPORT_SYMBOL(xsk_uses_need_wakeup);
102 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
105 if (queue_id < dev->real_num_rx_queues)
106 return dev->_rx[queue_id].pool;
107 if (queue_id < dev->real_num_tx_queues)
108 return dev->_tx[queue_id].pool;
112 EXPORT_SYMBOL(xsk_get_pool_from_qid);
114 void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
116 if (queue_id < dev->real_num_rx_queues)
117 dev->_rx[queue_id].pool = NULL;
118 if (queue_id < dev->real_num_tx_queues)
119 dev->_tx[queue_id].pool = NULL;
122 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do
123 * not know if the device has more tx queues than rx, or the opposite.
124 * This might also change during run time.
126 int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
129 if (queue_id >= max_t(unsigned int,
130 dev->real_num_rx_queues,
131 dev->real_num_tx_queues))
134 if (queue_id < dev->real_num_rx_queues)
135 dev->_rx[queue_id].pool = pool;
136 if (queue_id < dev->real_num_tx_queues)
137 dev->_tx[queue_id].pool = pool;
142 void xp_release(struct xdp_buff_xsk *xskb)
144 xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
147 static u64 xp_get_handle(struct xdp_buff_xsk *xskb)
149 u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
151 offset += xskb->pool->headroom;
152 if (!xskb->pool->unaligned)
153 return xskb->orig_addr + offset;
154 return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
157 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
159 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
163 addr = xp_get_handle(xskb);
164 err = xskq_prod_reserve_desc(xs->rx, addr, len);
174 static void xsk_copy_xdp(struct xdp_buff *to, struct xdp_buff *from, u32 len)
176 void *from_buf, *to_buf;
179 if (unlikely(xdp_data_meta_unsupported(from))) {
180 from_buf = from->data;
184 from_buf = from->data_meta;
185 metalen = from->data - from->data_meta;
186 to_buf = to->data - metalen;
189 memcpy(to_buf, from_buf, len + metalen);
192 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len,
195 struct xdp_buff *xsk_xdp;
198 if (len > xsk_pool_get_rx_frame_size(xs->pool)) {
203 xsk_xdp = xsk_buff_alloc(xs->pool);
209 xsk_copy_xdp(xsk_xdp, xdp, len);
210 err = __xsk_rcv_zc(xs, xsk_xdp, len);
212 xsk_buff_free(xsk_xdp);
216 xdp_return_buff(xdp);
220 static bool xsk_is_bound(struct xdp_sock *xs)
222 if (READ_ONCE(xs->state) == XSK_BOUND) {
223 /* Matches smp_wmb() in bind(). */
230 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp,
235 if (!xsk_is_bound(xs))
238 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
241 len = xdp->data_end - xdp->data;
243 return xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL ?
244 __xsk_rcv_zc(xs, xdp, len) :
245 __xsk_rcv(xs, xdp, len, explicit_free);
248 static void xsk_flush(struct xdp_sock *xs)
250 xskq_prod_submit(xs->rx);
251 __xskq_cons_release(xs->pool->fq);
252 sock_def_readable(&xs->sk);
255 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
259 spin_lock_bh(&xs->rx_lock);
260 err = xsk_rcv(xs, xdp, false);
262 spin_unlock_bh(&xs->rx_lock);
266 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
268 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
271 err = xsk_rcv(xs, xdp, true);
275 if (!xs->flush_node.prev)
276 list_add(&xs->flush_node, flush_list);
281 void __xsk_map_flush(void)
283 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
284 struct xdp_sock *xs, *tmp;
286 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
288 __list_del_clearprev(&xs->flush_node);
292 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
294 xskq_prod_submit_n(pool->cq, nb_entries);
296 EXPORT_SYMBOL(xsk_tx_completed);
298 void xsk_tx_release(struct xsk_buff_pool *pool)
303 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
304 __xskq_cons_release(xs->tx);
305 xs->sk.sk_write_space(&xs->sk);
309 EXPORT_SYMBOL(xsk_tx_release);
311 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
316 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
317 if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
318 xs->tx->queue_empty_descs++;
322 /* This is the backpressure mechanism for the Tx path.
323 * Reserve space in the completion queue and only proceed
324 * if there is space in it. This avoids having to implement
325 * any buffering in the Tx path.
327 if (xskq_prod_reserve_addr(pool->cq, desc->addr))
330 xskq_cons_release(xs->tx);
339 EXPORT_SYMBOL(xsk_tx_peek_desc);
341 static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
343 struct net_device *dev = xs->dev;
347 err = dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
353 static int xsk_zc_xmit(struct xdp_sock *xs)
355 return xsk_wakeup(xs, XDP_WAKEUP_TX);
358 static void xsk_destruct_skb(struct sk_buff *skb)
360 u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
361 struct xdp_sock *xs = xdp_sk(skb->sk);
364 spin_lock_irqsave(&xs->tx_completion_lock, flags);
365 xskq_prod_submit_addr(xs->pool->cq, addr);
366 spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
371 static int xsk_generic_xmit(struct sock *sk)
373 struct xdp_sock *xs = xdp_sk(sk);
374 u32 max_batch = TX_BATCH_SIZE;
375 bool sent_frame = false;
376 struct xdp_desc desc;
380 mutex_lock(&xs->mutex);
382 if (xs->queue_id >= xs->dev->real_num_tx_queues)
385 while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
390 if (max_batch-- == 0) {
396 skb = sock_alloc_send_skb(sk, len, 1, &err);
402 buffer = xsk_buff_raw_get_data(xs->pool, addr);
403 err = skb_store_bits(skb, 0, buffer, len);
404 /* This is the backpressure mechanism for the Tx path.
405 * Reserve space in the completion queue and only proceed
406 * if there is space in it. This avoids having to implement
407 * any buffering in the Tx path.
409 if (unlikely(err) || xskq_prod_reserve(xs->pool->cq)) {
415 skb->priority = sk->sk_priority;
416 skb->mark = sk->sk_mark;
417 skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
418 skb->destructor = xsk_destruct_skb;
420 err = dev_direct_xmit(skb, xs->queue_id);
421 xskq_cons_release(xs->tx);
422 /* Ignore NET_XMIT_CN as packet might have been sent */
423 if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) {
424 /* SKB completed but not sent */
432 xs->tx->queue_empty_descs++;
436 sk->sk_write_space(sk);
438 mutex_unlock(&xs->mutex);
442 static int __xsk_sendmsg(struct sock *sk)
444 struct xdp_sock *xs = xdp_sk(sk);
446 if (unlikely(!(xs->dev->flags & IFF_UP)))
448 if (unlikely(!xs->tx))
451 return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk);
454 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
456 bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
457 struct sock *sk = sock->sk;
458 struct xdp_sock *xs = xdp_sk(sk);
460 if (unlikely(!xsk_is_bound(xs)))
462 if (unlikely(need_wait))
465 return __xsk_sendmsg(sk);
468 static __poll_t xsk_poll(struct file *file, struct socket *sock,
469 struct poll_table_struct *wait)
471 __poll_t mask = datagram_poll(file, sock, wait);
472 struct sock *sk = sock->sk;
473 struct xdp_sock *xs = xdp_sk(sk);
474 struct xsk_buff_pool *pool;
476 if (unlikely(!xsk_is_bound(xs)))
481 if (pool->cached_need_wakeup) {
483 xsk_wakeup(xs, pool->cached_need_wakeup);
485 /* Poll needs to drive Tx also in copy mode */
489 if (xs->rx && !xskq_prod_is_empty(xs->rx))
490 mask |= EPOLLIN | EPOLLRDNORM;
491 if (xs->tx && !xskq_cons_is_full(xs->tx))
492 mask |= EPOLLOUT | EPOLLWRNORM;
497 static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
502 if (entries == 0 || *queue || !is_power_of_2(entries))
505 q = xskq_create(entries, umem_queue);
509 /* Make sure queue is ready before it can be seen by others */
511 WRITE_ONCE(*queue, q);
515 static void xsk_unbind_dev(struct xdp_sock *xs)
517 struct net_device *dev = xs->dev;
519 if (xs->state != XSK_BOUND)
521 WRITE_ONCE(xs->state, XSK_UNBOUND);
523 /* Wait for driver to stop using the xdp socket. */
524 xp_del_xsk(xs->pool, xs);
530 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
531 struct xdp_sock ***map_entry)
533 struct xsk_map *map = NULL;
534 struct xsk_map_node *node;
538 spin_lock_bh(&xs->map_list_lock);
539 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
542 WARN_ON(xsk_map_inc(node->map));
544 *map_entry = node->map_entry;
546 spin_unlock_bh(&xs->map_list_lock);
550 static void xsk_delete_from_maps(struct xdp_sock *xs)
552 /* This function removes the current XDP socket from all the
553 * maps it resides in. We need to take extra care here, due to
554 * the two locks involved. Each map has a lock synchronizing
555 * updates to the entries, and each socket has a lock that
556 * synchronizes access to the list of maps (map_list). For
557 * deadlock avoidance the locks need to be taken in the order
558 * "map lock"->"socket map list lock". We start off by
559 * accessing the socket map list, and take a reference to the
560 * map to guarantee existence between the
561 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
562 * calls. Then we ask the map to remove the socket, which
563 * tries to remove the socket from the map. Note that there
564 * might be updates to the map between
565 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
567 struct xdp_sock **map_entry = NULL;
570 while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
571 xsk_map_try_sock_delete(map, xs, map_entry);
576 static int xsk_release(struct socket *sock)
578 struct sock *sk = sock->sk;
579 struct xdp_sock *xs = xdp_sk(sk);
587 mutex_lock(&net->xdp.lock);
588 sk_del_node_init_rcu(sk);
589 mutex_unlock(&net->xdp.lock);
592 sock_prot_inuse_add(net, sk->sk_prot, -1);
595 xsk_delete_from_maps(xs);
596 mutex_lock(&xs->mutex);
598 mutex_unlock(&xs->mutex);
600 xskq_destroy(xs->rx);
601 xskq_destroy(xs->tx);
602 xskq_destroy(xs->fq_tmp);
603 xskq_destroy(xs->cq_tmp);
608 sk_refcnt_debug_release(sk);
614 static struct socket *xsk_lookup_xsk_from_fd(int fd)
619 sock = sockfd_lookup(fd, &err);
621 return ERR_PTR(-ENOTSOCK);
623 if (sock->sk->sk_family != PF_XDP) {
625 return ERR_PTR(-ENOPROTOOPT);
631 static bool xsk_validate_queues(struct xdp_sock *xs)
633 return xs->fq_tmp && xs->cq_tmp;
636 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
638 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
639 struct sock *sk = sock->sk;
640 struct xdp_sock *xs = xdp_sk(sk);
641 struct net_device *dev;
645 if (addr_len < sizeof(struct sockaddr_xdp))
647 if (sxdp->sxdp_family != AF_XDP)
650 flags = sxdp->sxdp_flags;
651 if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
652 XDP_USE_NEED_WAKEUP))
656 mutex_lock(&xs->mutex);
657 if (xs->state != XSK_READY) {
662 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
668 if (!xs->rx && !xs->tx) {
673 qid = sxdp->sxdp_queue_id;
675 if (flags & XDP_SHARED_UMEM) {
676 struct xdp_sock *umem_xs;
679 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
680 (flags & XDP_USE_NEED_WAKEUP)) {
681 /* Cannot specify flags for shared sockets. */
687 /* We have already our own. */
692 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
698 umem_xs = xdp_sk(sock->sk);
699 if (!xsk_is_bound(umem_xs)) {
704 if (umem_xs->dev != dev) {
710 if (umem_xs->queue_id != qid) {
711 /* Share the umem with another socket on another qid */
712 xs->pool = xp_create_and_assign_umem(xs,
719 err = xp_assign_dev_shared(xs->pool, umem_xs->umem,
722 xp_destroy(xs->pool);
727 /* Share the buffer pool with the other socket. */
728 if (xs->fq_tmp || xs->cq_tmp) {
729 /* Do not allow setting your own fq or cq. */
735 xp_get_pool(umem_xs->pool);
736 xs->pool = umem_xs->pool;
739 xdp_get_umem(umem_xs->umem);
740 WRITE_ONCE(xs->umem, umem_xs->umem);
742 } else if (!xs->umem || !xsk_validate_queues(xs)) {
746 /* This xsk has its own umem. */
747 xs->pool = xp_create_and_assign_umem(xs, xs->umem);
753 err = xp_assign_dev(xs->pool, dev, qid, flags);
755 xp_destroy(xs->pool);
762 xs->zc = xs->umem->zc;
764 xp_add_xsk(xs->pool, xs);
770 /* Matches smp_rmb() in bind() for shared umem
771 * sockets, and xsk_is_bound().
774 WRITE_ONCE(xs->state, XSK_BOUND);
777 mutex_unlock(&xs->mutex);
782 struct xdp_umem_reg_v1 {
783 __u64 addr; /* Start of packet data area */
784 __u64 len; /* Length of packet data area */
789 static int xsk_setsockopt(struct socket *sock, int level, int optname,
790 sockptr_t optval, unsigned int optlen)
792 struct sock *sk = sock->sk;
793 struct xdp_sock *xs = xdp_sk(sk);
796 if (level != SOL_XDP)
803 struct xsk_queue **q;
806 if (optlen < sizeof(entries))
808 if (copy_from_sockptr(&entries, optval, sizeof(entries)))
811 mutex_lock(&xs->mutex);
812 if (xs->state != XSK_READY) {
813 mutex_unlock(&xs->mutex);
816 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
817 err = xsk_init_queue(entries, q, false);
818 if (!err && optname == XDP_TX_RING)
819 /* Tx needs to be explicitly woken up the first time */
820 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
821 mutex_unlock(&xs->mutex);
826 size_t mr_size = sizeof(struct xdp_umem_reg);
827 struct xdp_umem_reg mr = {};
828 struct xdp_umem *umem;
830 if (optlen < sizeof(struct xdp_umem_reg_v1))
832 else if (optlen < sizeof(mr))
833 mr_size = sizeof(struct xdp_umem_reg_v1);
835 if (copy_from_sockptr(&mr, optval, mr_size))
838 mutex_lock(&xs->mutex);
839 if (xs->state != XSK_READY || xs->umem) {
840 mutex_unlock(&xs->mutex);
844 umem = xdp_umem_create(&mr);
846 mutex_unlock(&xs->mutex);
847 return PTR_ERR(umem);
850 /* Make sure umem is ready before it can be seen by others */
852 WRITE_ONCE(xs->umem, umem);
853 mutex_unlock(&xs->mutex);
856 case XDP_UMEM_FILL_RING:
857 case XDP_UMEM_COMPLETION_RING:
859 struct xsk_queue **q;
862 if (copy_from_sockptr(&entries, optval, sizeof(entries)))
865 mutex_lock(&xs->mutex);
866 if (xs->state != XSK_READY) {
867 mutex_unlock(&xs->mutex);
871 q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
873 err = xsk_init_queue(entries, q, true);
874 mutex_unlock(&xs->mutex);
884 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
886 ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
887 ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
888 ring->desc = offsetof(struct xdp_rxtx_ring, desc);
891 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
893 ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
894 ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
895 ring->desc = offsetof(struct xdp_umem_ring, desc);
898 struct xdp_statistics_v1 {
900 __u64 rx_invalid_descs;
901 __u64 tx_invalid_descs;
904 static int xsk_getsockopt(struct socket *sock, int level, int optname,
905 char __user *optval, int __user *optlen)
907 struct sock *sk = sock->sk;
908 struct xdp_sock *xs = xdp_sk(sk);
911 if (level != SOL_XDP)
914 if (get_user(len, optlen))
922 struct xdp_statistics stats = {};
923 bool extra_stats = true;
926 if (len < sizeof(struct xdp_statistics_v1)) {
928 } else if (len < sizeof(stats)) {
930 stats_size = sizeof(struct xdp_statistics_v1);
932 stats_size = sizeof(stats);
935 mutex_lock(&xs->mutex);
936 stats.rx_dropped = xs->rx_dropped;
938 stats.rx_ring_full = xs->rx_queue_full;
939 stats.rx_fill_ring_empty_descs =
940 xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
941 stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
943 stats.rx_dropped += xs->rx_queue_full;
945 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
946 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
947 mutex_unlock(&xs->mutex);
949 if (copy_to_user(optval, &stats, stats_size))
951 if (put_user(stats_size, optlen))
956 case XDP_MMAP_OFFSETS:
958 struct xdp_mmap_offsets off;
959 struct xdp_mmap_offsets_v1 off_v1;
960 bool flags_supported = true;
963 if (len < sizeof(off_v1))
965 else if (len < sizeof(off))
966 flags_supported = false;
968 if (flags_supported) {
969 /* xdp_ring_offset is identical to xdp_ring_offset_v1
970 * except for the flags field added to the end.
972 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
974 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
976 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
978 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
980 off.rx.flags = offsetof(struct xdp_rxtx_ring,
982 off.tx.flags = offsetof(struct xdp_rxtx_ring,
984 off.fr.flags = offsetof(struct xdp_umem_ring,
986 off.cr.flags = offsetof(struct xdp_umem_ring,
992 xsk_enter_rxtx_offsets(&off_v1.rx);
993 xsk_enter_rxtx_offsets(&off_v1.tx);
994 xsk_enter_umem_offsets(&off_v1.fr);
995 xsk_enter_umem_offsets(&off_v1.cr);
997 len = sizeof(off_v1);
1001 if (copy_to_user(optval, to_copy, len))
1003 if (put_user(len, optlen))
1010 struct xdp_options opts = {};
1012 if (len < sizeof(opts))
1015 mutex_lock(&xs->mutex);
1017 opts.flags |= XDP_OPTIONS_ZEROCOPY;
1018 mutex_unlock(&xs->mutex);
1021 if (copy_to_user(optval, &opts, len))
1023 if (put_user(len, optlen))
1035 static int xsk_mmap(struct file *file, struct socket *sock,
1036 struct vm_area_struct *vma)
1038 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1039 unsigned long size = vma->vm_end - vma->vm_start;
1040 struct xdp_sock *xs = xdp_sk(sock->sk);
1041 struct xsk_queue *q = NULL;
1045 if (READ_ONCE(xs->state) != XSK_READY)
1048 if (offset == XDP_PGOFF_RX_RING) {
1049 q = READ_ONCE(xs->rx);
1050 } else if (offset == XDP_PGOFF_TX_RING) {
1051 q = READ_ONCE(xs->tx);
1053 /* Matches the smp_wmb() in XDP_UMEM_REG */
1055 if (offset == XDP_UMEM_PGOFF_FILL_RING)
1056 q = READ_ONCE(xs->fq_tmp);
1057 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
1058 q = READ_ONCE(xs->cq_tmp);
1064 /* Matches the smp_wmb() in xsk_init_queue */
1066 qpg = virt_to_head_page(q->ring);
1067 if (size > page_size(qpg))
1070 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
1071 return remap_pfn_range(vma, vma->vm_start, pfn,
1072 size, vma->vm_page_prot);
1075 static int xsk_notifier(struct notifier_block *this,
1076 unsigned long msg, void *ptr)
1078 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1079 struct net *net = dev_net(dev);
1083 case NETDEV_UNREGISTER:
1084 mutex_lock(&net->xdp.lock);
1085 sk_for_each(sk, &net->xdp.list) {
1086 struct xdp_sock *xs = xdp_sk(sk);
1088 mutex_lock(&xs->mutex);
1089 if (xs->dev == dev) {
1090 sk->sk_err = ENETDOWN;
1091 if (!sock_flag(sk, SOCK_DEAD))
1092 sk->sk_error_report(sk);
1096 /* Clear device references. */
1097 xp_clear_dev(xs->pool);
1099 mutex_unlock(&xs->mutex);
1101 mutex_unlock(&net->xdp.lock);
1107 static struct proto xsk_proto = {
1109 .owner = THIS_MODULE,
1110 .obj_size = sizeof(struct xdp_sock),
1113 static const struct proto_ops xsk_proto_ops = {
1115 .owner = THIS_MODULE,
1116 .release = xsk_release,
1118 .connect = sock_no_connect,
1119 .socketpair = sock_no_socketpair,
1120 .accept = sock_no_accept,
1121 .getname = sock_no_getname,
1123 .ioctl = sock_no_ioctl,
1124 .listen = sock_no_listen,
1125 .shutdown = sock_no_shutdown,
1126 .setsockopt = xsk_setsockopt,
1127 .getsockopt = xsk_getsockopt,
1128 .sendmsg = xsk_sendmsg,
1129 .recvmsg = sock_no_recvmsg,
1131 .sendpage = sock_no_sendpage,
1134 static void xsk_destruct(struct sock *sk)
1136 struct xdp_sock *xs = xdp_sk(sk);
1138 if (!sock_flag(sk, SOCK_DEAD))
1141 xp_put_pool(xs->pool);
1143 sk_refcnt_debug_dec(sk);
1146 static int xsk_create(struct net *net, struct socket *sock, int protocol,
1149 struct xdp_sock *xs;
1152 if (!ns_capable(net->user_ns, CAP_NET_RAW))
1154 if (sock->type != SOCK_RAW)
1155 return -ESOCKTNOSUPPORT;
1158 return -EPROTONOSUPPORT;
1160 sock->state = SS_UNCONNECTED;
1162 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1166 sock->ops = &xsk_proto_ops;
1168 sock_init_data(sock, sk);
1170 sk->sk_family = PF_XDP;
1172 sk->sk_destruct = xsk_destruct;
1173 sk_refcnt_debug_inc(sk);
1175 sock_set_flag(sk, SOCK_RCU_FREE);
1178 xs->state = XSK_READY;
1179 mutex_init(&xs->mutex);
1180 spin_lock_init(&xs->rx_lock);
1181 spin_lock_init(&xs->tx_completion_lock);
1183 INIT_LIST_HEAD(&xs->map_list);
1184 spin_lock_init(&xs->map_list_lock);
1186 mutex_lock(&net->xdp.lock);
1187 sk_add_node_rcu(sk, &net->xdp.list);
1188 mutex_unlock(&net->xdp.lock);
1191 sock_prot_inuse_add(net, &xsk_proto, 1);
1197 static const struct net_proto_family xsk_family_ops = {
1199 .create = xsk_create,
1200 .owner = THIS_MODULE,
1203 static struct notifier_block xsk_netdev_notifier = {
1204 .notifier_call = xsk_notifier,
1207 static int __net_init xsk_net_init(struct net *net)
1209 mutex_init(&net->xdp.lock);
1210 INIT_HLIST_HEAD(&net->xdp.list);
1214 static void __net_exit xsk_net_exit(struct net *net)
1216 WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1219 static struct pernet_operations xsk_net_ops = {
1220 .init = xsk_net_init,
1221 .exit = xsk_net_exit,
1224 static int __init xsk_init(void)
1228 err = proto_register(&xsk_proto, 0 /* no slab */);
1232 err = sock_register(&xsk_family_ops);
1236 err = register_pernet_subsys(&xsk_net_ops);
1240 err = register_netdevice_notifier(&xsk_netdev_notifier);
1244 for_each_possible_cpu(cpu)
1245 INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
1249 unregister_pernet_subsys(&xsk_net_ops);
1251 sock_unregister(PF_XDP);
1253 proto_unregister(&xsk_proto);
1258 fs_initcall(xsk_init);