1 // SPDX-License-Identifier: GPL-2.0
4 * AF_XDP sockets allows a channel between XDP programs and userspace
6 * Copyright(c) 2018 Intel Corporation.
8 * Author(s): Björn Töpel <bjorn.topel@intel.com>
9 * Magnus Karlsson <magnus.karlsson@intel.com>
12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
14 #include <linux/if_xdp.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/task.h>
19 #include <linux/socket.h>
20 #include <linux/file.h>
21 #include <linux/uaccess.h>
22 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/rculist.h>
25 #include <net/xdp_sock.h>
28 #include "xsk_queue.h"
32 #define TX_BATCH_SIZE 16
34 bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
36 return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) &&
37 READ_ONCE(xs->umem->fq);
40 bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
42 return xskq_has_addrs(umem->fq, cnt);
44 EXPORT_SYMBOL(xsk_umem_has_addrs);
46 u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
48 return xskq_peek_addr(umem->fq, addr, umem);
50 EXPORT_SYMBOL(xsk_umem_peek_addr);
52 void xsk_umem_discard_addr(struct xdp_umem *umem)
54 xskq_discard_addr(umem->fq);
56 EXPORT_SYMBOL(xsk_umem_discard_addr);
58 void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
60 if (umem->need_wakeup & XDP_WAKEUP_RX)
63 umem->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
64 umem->need_wakeup |= XDP_WAKEUP_RX;
66 EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
68 void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
72 if (umem->need_wakeup & XDP_WAKEUP_TX)
76 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
77 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
81 umem->need_wakeup |= XDP_WAKEUP_TX;
83 EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
85 void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
87 if (!(umem->need_wakeup & XDP_WAKEUP_RX))
90 umem->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
91 umem->need_wakeup &= ~XDP_WAKEUP_RX;
93 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
95 void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
99 if (!(umem->need_wakeup & XDP_WAKEUP_TX))
103 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
104 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
108 umem->need_wakeup &= ~XDP_WAKEUP_TX;
110 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
112 bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
114 return umem->flags & XDP_UMEM_USES_NEED_WAKEUP;
116 EXPORT_SYMBOL(xsk_umem_uses_need_wakeup);
118 /* If a buffer crosses a page boundary, we need to do 2 memcpy's, one for
119 * each page. This is only required in copy mode.
121 static void __xsk_rcv_memcpy(struct xdp_umem *umem, u64 addr, void *from_buf,
122 u32 len, u32 metalen)
124 void *to_buf = xdp_umem_get_data(umem, addr);
126 addr = xsk_umem_add_offset_to_addr(addr);
127 if (xskq_crosses_non_contig_pg(umem, addr, len + metalen)) {
128 void *next_pg_addr = umem->pages[(addr >> PAGE_SHIFT) + 1].addr;
129 u64 page_start = addr & ~(PAGE_SIZE - 1);
130 u64 first_len = PAGE_SIZE - (addr - page_start);
132 memcpy(to_buf, from_buf, first_len + metalen);
133 memcpy(next_pg_addr, from_buf + first_len, len - first_len);
138 memcpy(to_buf, from_buf, len + metalen);
141 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
143 u64 offset = xs->umem->headroom;
144 u64 addr, memcpy_addr;
149 if (!xskq_peek_addr(xs->umem->fq, &addr, xs->umem) ||
150 len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
155 if (unlikely(xdp_data_meta_unsupported(xdp))) {
156 from_buf = xdp->data;
159 from_buf = xdp->data_meta;
160 metalen = xdp->data - xdp->data_meta;
163 memcpy_addr = xsk_umem_adjust_offset(xs->umem, addr, offset);
164 __xsk_rcv_memcpy(xs->umem, memcpy_addr, from_buf, len, metalen);
167 addr = xsk_umem_adjust_offset(xs->umem, addr, offset);
168 err = xskq_produce_batch_desc(xs->rx, addr, len);
170 xskq_discard_addr(xs->umem->fq);
171 xdp_return_buff(xdp);
179 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
181 int err = xskq_produce_batch_desc(xs->rx, (u64)xdp->handle, len);
189 int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
193 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
196 len = xdp->data_end - xdp->data;
198 return (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) ?
199 __xsk_rcv_zc(xs, xdp, len) : __xsk_rcv(xs, xdp, len);
202 void xsk_flush(struct xdp_sock *xs)
204 xskq_produce_flush_desc(xs->rx);
205 xs->sk.sk_data_ready(&xs->sk);
208 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
210 u32 metalen = xdp->data - xdp->data_meta;
211 u32 len = xdp->data_end - xdp->data;
212 u64 offset = xs->umem->headroom;
217 spin_lock_bh(&xs->rx_lock);
219 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) {
224 if (!xskq_peek_addr(xs->umem->fq, &addr, xs->umem) ||
225 len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
230 addr = xsk_umem_adjust_offset(xs->umem, addr, offset);
231 buffer = xdp_umem_get_data(xs->umem, addr);
232 memcpy(buffer, xdp->data_meta, len + metalen);
234 addr = xsk_umem_adjust_offset(xs->umem, addr, metalen);
235 err = xskq_produce_batch_desc(xs->rx, addr, len);
239 xskq_discard_addr(xs->umem->fq);
240 xskq_produce_flush_desc(xs->rx);
242 spin_unlock_bh(&xs->rx_lock);
244 xs->sk.sk_data_ready(&xs->sk);
250 spin_unlock_bh(&xs->rx_lock);
254 void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
256 xskq_produce_flush_addr_n(umem->cq, nb_entries);
258 EXPORT_SYMBOL(xsk_umem_complete_tx);
260 void xsk_umem_consume_tx_done(struct xdp_umem *umem)
265 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
266 xs->sk.sk_write_space(&xs->sk);
270 EXPORT_SYMBOL(xsk_umem_consume_tx_done);
272 bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc)
277 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
278 if (!xskq_peek_desc(xs->tx, desc, umem))
281 if (xskq_produce_addr_lazy(umem->cq, desc->addr))
284 xskq_discard_desc(xs->tx);
293 EXPORT_SYMBOL(xsk_umem_consume_tx);
295 static int xsk_zc_xmit(struct sock *sk)
297 struct xdp_sock *xs = xdp_sk(sk);
298 struct net_device *dev = xs->dev;
300 return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id,
304 static void xsk_destruct_skb(struct sk_buff *skb)
306 u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
307 struct xdp_sock *xs = xdp_sk(skb->sk);
310 spin_lock_irqsave(&xs->tx_completion_lock, flags);
311 WARN_ON_ONCE(xskq_produce_addr(xs->umem->cq, addr));
312 spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
317 static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
320 u32 max_batch = TX_BATCH_SIZE;
321 struct xdp_sock *xs = xdp_sk(sk);
322 bool sent_frame = false;
323 struct xdp_desc desc;
327 mutex_lock(&xs->mutex);
329 if (xs->queue_id >= xs->dev->real_num_tx_queues)
332 while (xskq_peek_desc(xs->tx, &desc, xs->umem)) {
337 if (max_batch-- == 0) {
343 skb = sock_alloc_send_skb(sk, len, 1, &err);
344 if (unlikely(!skb)) {
351 buffer = xdp_umem_get_data(xs->umem, addr);
352 err = skb_store_bits(skb, 0, buffer, len);
353 if (unlikely(err) || xskq_reserve_addr(xs->umem->cq)) {
359 skb->priority = sk->sk_priority;
360 skb->mark = sk->sk_mark;
361 skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
362 skb->destructor = xsk_destruct_skb;
364 err = dev_direct_xmit(skb, xs->queue_id);
365 xskq_discard_desc(xs->tx);
366 /* Ignore NET_XMIT_CN as packet might have been sent */
367 if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) {
368 /* SKB completed but not sent */
378 sk->sk_write_space(sk);
380 mutex_unlock(&xs->mutex);
384 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
386 bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
387 struct sock *sk = sock->sk;
388 struct xdp_sock *xs = xdp_sk(sk);
390 if (unlikely(!xs->dev))
392 if (unlikely(!(xs->dev->flags & IFF_UP)))
394 if (unlikely(!xs->tx))
399 return (xs->zc) ? xsk_zc_xmit(sk) : xsk_generic_xmit(sk, m, total_len);
402 static unsigned int xsk_poll(struct file *file, struct socket *sock,
403 struct poll_table_struct *wait)
405 unsigned int mask = datagram_poll(file, sock, wait);
406 struct sock *sk = sock->sk;
407 struct xdp_sock *xs = xdp_sk(sk);
408 struct net_device *dev = xs->dev;
409 struct xdp_umem *umem = xs->umem;
411 if (umem->need_wakeup)
412 dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id,
415 if (xs->rx && !xskq_empty_desc(xs->rx))
416 mask |= POLLIN | POLLRDNORM;
417 if (xs->tx && !xskq_full_desc(xs->tx))
418 mask |= POLLOUT | POLLWRNORM;
423 static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
428 if (entries == 0 || *queue || !is_power_of_2(entries))
431 q = xskq_create(entries, umem_queue);
435 /* Make sure queue is ready before it can be seen by others */
437 WRITE_ONCE(*queue, q);
441 static void xsk_unbind_dev(struct xdp_sock *xs)
443 struct net_device *dev = xs->dev;
445 if (!dev || xs->state != XSK_BOUND)
448 xs->state = XSK_UNBOUND;
450 /* Wait for driver to stop using the xdp socket. */
451 xdp_del_sk_umem(xs->umem, xs);
457 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
458 struct xdp_sock ***map_entry)
460 struct xsk_map *map = NULL;
461 struct xsk_map_node *node;
465 spin_lock_bh(&xs->map_list_lock);
466 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
469 WARN_ON(xsk_map_inc(node->map));
471 *map_entry = node->map_entry;
473 spin_unlock_bh(&xs->map_list_lock);
477 static void xsk_delete_from_maps(struct xdp_sock *xs)
479 /* This function removes the current XDP socket from all the
480 * maps it resides in. We need to take extra care here, due to
481 * the two locks involved. Each map has a lock synchronizing
482 * updates to the entries, and each socket has a lock that
483 * synchronizes access to the list of maps (map_list). For
484 * deadlock avoidance the locks need to be taken in the order
485 * "map lock"->"socket map list lock". We start off by
486 * accessing the socket map list, and take a reference to the
487 * map to guarantee existence between the
488 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
489 * calls. Then we ask the map to remove the socket, which
490 * tries to remove the socket from the map. Note that there
491 * might be updates to the map between
492 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
494 struct xdp_sock **map_entry = NULL;
497 while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
498 xsk_map_try_sock_delete(map, xs, map_entry);
503 static int xsk_release(struct socket *sock)
505 struct sock *sk = sock->sk;
506 struct xdp_sock *xs = xdp_sk(sk);
514 mutex_lock(&net->xdp.lock);
515 sk_del_node_init_rcu(sk);
516 mutex_unlock(&net->xdp.lock);
519 sock_prot_inuse_add(net, sk->sk_prot, -1);
522 xsk_delete_from_maps(xs);
525 xskq_destroy(xs->rx);
526 xskq_destroy(xs->tx);
531 sk_refcnt_debug_release(sk);
537 static struct socket *xsk_lookup_xsk_from_fd(int fd)
542 sock = sockfd_lookup(fd, &err);
544 return ERR_PTR(-ENOTSOCK);
546 if (sock->sk->sk_family != PF_XDP) {
548 return ERR_PTR(-ENOPROTOOPT);
554 /* Check if umem pages are contiguous.
555 * If zero-copy mode, use the DMA address to do the page contiguity check
556 * For all other modes we use addr (kernel virtual address)
557 * Store the result in the low bits of addr.
559 static void xsk_check_page_contiguity(struct xdp_umem *umem, u32 flags)
561 struct xdp_umem_page *pgs = umem->pages;
564 for (i = 0; i < umem->npgs - 1; i++) {
565 is_contig = (flags & XDP_ZEROCOPY) ?
566 (pgs[i].dma + PAGE_SIZE == pgs[i + 1].dma) :
567 (pgs[i].addr + PAGE_SIZE == pgs[i + 1].addr);
568 pgs[i].addr += is_contig << XSK_NEXT_PG_CONTIG_SHIFT;
572 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
574 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
575 struct sock *sk = sock->sk;
576 struct xdp_sock *xs = xdp_sk(sk);
577 struct net_device *dev;
581 if (addr_len < sizeof(struct sockaddr_xdp))
583 if (sxdp->sxdp_family != AF_XDP)
586 flags = sxdp->sxdp_flags;
587 if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
588 XDP_USE_NEED_WAKEUP))
592 mutex_lock(&xs->mutex);
593 if (xs->state != XSK_READY) {
598 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
604 if (!xs->rx && !xs->tx) {
609 qid = sxdp->sxdp_queue_id;
611 if (flags & XDP_SHARED_UMEM) {
612 struct xdp_sock *umem_xs;
615 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
616 (flags & XDP_USE_NEED_WAKEUP)) {
617 /* Cannot specify flags for shared sockets. */
623 /* We have already our own. */
628 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
634 umem_xs = xdp_sk(sock->sk);
635 if (!umem_xs->umem) {
636 /* No umem to inherit. */
640 } else if (umem_xs->dev != dev || umem_xs->queue_id != qid) {
646 xdp_get_umem(umem_xs->umem);
647 WRITE_ONCE(xs->umem, umem_xs->umem);
649 } else if (!xs->umem || !xdp_umem_validate_queues(xs->umem)) {
653 /* This xsk has its own umem. */
654 xskq_set_umem(xs->umem->fq, xs->umem->size,
655 xs->umem->chunk_mask);
656 xskq_set_umem(xs->umem->cq, xs->umem->size,
657 xs->umem->chunk_mask);
659 err = xdp_umem_assign_dev(xs->umem, dev, qid, flags);
663 xsk_check_page_contiguity(xs->umem, flags);
667 xs->zc = xs->umem->zc;
669 xskq_set_umem(xs->rx, xs->umem->size, xs->umem->chunk_mask);
670 xskq_set_umem(xs->tx, xs->umem->size, xs->umem->chunk_mask);
671 xdp_add_sk_umem(xs->umem, xs);
677 xs->state = XSK_BOUND;
679 mutex_unlock(&xs->mutex);
684 struct xdp_umem_reg_v1 {
685 __u64 addr; /* Start of packet data area */
686 __u64 len; /* Length of packet data area */
691 static int xsk_setsockopt(struct socket *sock, int level, int optname,
692 char __user *optval, unsigned int optlen)
694 struct sock *sk = sock->sk;
695 struct xdp_sock *xs = xdp_sk(sk);
698 if (level != SOL_XDP)
705 struct xsk_queue **q;
708 if (optlen < sizeof(entries))
710 if (copy_from_user(&entries, optval, sizeof(entries)))
713 mutex_lock(&xs->mutex);
714 if (xs->state != XSK_READY) {
715 mutex_unlock(&xs->mutex);
718 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
719 err = xsk_init_queue(entries, q, false);
720 if (!err && optname == XDP_TX_RING)
721 /* Tx needs to be explicitly woken up the first time */
722 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
723 mutex_unlock(&xs->mutex);
728 size_t mr_size = sizeof(struct xdp_umem_reg);
729 struct xdp_umem_reg mr = {};
730 struct xdp_umem *umem;
732 if (optlen < sizeof(struct xdp_umem_reg_v1))
734 else if (optlen < sizeof(mr))
735 mr_size = sizeof(struct xdp_umem_reg_v1);
737 if (copy_from_user(&mr, optval, mr_size))
740 mutex_lock(&xs->mutex);
741 if (xs->state != XSK_READY || xs->umem) {
742 mutex_unlock(&xs->mutex);
746 umem = xdp_umem_create(&mr);
748 mutex_unlock(&xs->mutex);
749 return PTR_ERR(umem);
752 /* Make sure umem is ready before it can be seen by others */
754 WRITE_ONCE(xs->umem, umem);
755 mutex_unlock(&xs->mutex);
758 case XDP_UMEM_FILL_RING:
759 case XDP_UMEM_COMPLETION_RING:
761 struct xsk_queue **q;
764 if (copy_from_user(&entries, optval, sizeof(entries)))
767 mutex_lock(&xs->mutex);
768 if (xs->state != XSK_READY) {
769 mutex_unlock(&xs->mutex);
773 mutex_unlock(&xs->mutex);
777 q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq :
779 err = xsk_init_queue(entries, q, true);
780 mutex_unlock(&xs->mutex);
790 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
792 ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
793 ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
794 ring->desc = offsetof(struct xdp_rxtx_ring, desc);
797 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
799 ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
800 ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
801 ring->desc = offsetof(struct xdp_umem_ring, desc);
804 static int xsk_getsockopt(struct socket *sock, int level, int optname,
805 char __user *optval, int __user *optlen)
807 struct sock *sk = sock->sk;
808 struct xdp_sock *xs = xdp_sk(sk);
811 if (level != SOL_XDP)
814 if (get_user(len, optlen))
822 struct xdp_statistics stats;
824 if (len < sizeof(stats))
827 mutex_lock(&xs->mutex);
828 stats.rx_dropped = xs->rx_dropped;
829 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
830 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
831 mutex_unlock(&xs->mutex);
833 if (copy_to_user(optval, &stats, sizeof(stats)))
835 if (put_user(sizeof(stats), optlen))
840 case XDP_MMAP_OFFSETS:
842 struct xdp_mmap_offsets off;
843 struct xdp_mmap_offsets_v1 off_v1;
844 bool flags_supported = true;
847 if (len < sizeof(off_v1))
849 else if (len < sizeof(off))
850 flags_supported = false;
852 if (flags_supported) {
853 /* xdp_ring_offset is identical to xdp_ring_offset_v1
854 * except for the flags field added to the end.
856 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
858 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
860 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
862 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
864 off.rx.flags = offsetof(struct xdp_rxtx_ring,
866 off.tx.flags = offsetof(struct xdp_rxtx_ring,
868 off.fr.flags = offsetof(struct xdp_umem_ring,
870 off.cr.flags = offsetof(struct xdp_umem_ring,
876 xsk_enter_rxtx_offsets(&off_v1.rx);
877 xsk_enter_rxtx_offsets(&off_v1.tx);
878 xsk_enter_umem_offsets(&off_v1.fr);
879 xsk_enter_umem_offsets(&off_v1.cr);
881 len = sizeof(off_v1);
885 if (copy_to_user(optval, to_copy, len))
887 if (put_user(len, optlen))
894 struct xdp_options opts = {};
896 if (len < sizeof(opts))
899 mutex_lock(&xs->mutex);
901 opts.flags |= XDP_OPTIONS_ZEROCOPY;
902 mutex_unlock(&xs->mutex);
905 if (copy_to_user(optval, &opts, len))
907 if (put_user(len, optlen))
919 static int xsk_mmap(struct file *file, struct socket *sock,
920 struct vm_area_struct *vma)
922 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
923 unsigned long size = vma->vm_end - vma->vm_start;
924 struct xdp_sock *xs = xdp_sk(sock->sk);
925 struct xsk_queue *q = NULL;
926 struct xdp_umem *umem;
930 if (xs->state != XSK_READY)
933 if (offset == XDP_PGOFF_RX_RING) {
934 q = READ_ONCE(xs->rx);
935 } else if (offset == XDP_PGOFF_TX_RING) {
936 q = READ_ONCE(xs->tx);
938 umem = READ_ONCE(xs->umem);
942 /* Matches the smp_wmb() in XDP_UMEM_REG */
944 if (offset == XDP_UMEM_PGOFF_FILL_RING)
945 q = READ_ONCE(umem->fq);
946 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
947 q = READ_ONCE(umem->cq);
953 /* Matches the smp_wmb() in xsk_init_queue */
955 qpg = virt_to_head_page(q->ring);
956 if (size > (PAGE_SIZE << compound_order(qpg)))
959 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
960 return remap_pfn_range(vma, vma->vm_start, pfn,
961 size, vma->vm_page_prot);
964 static int xsk_notifier(struct notifier_block *this,
965 unsigned long msg, void *ptr)
967 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
968 struct net *net = dev_net(dev);
972 case NETDEV_UNREGISTER:
973 mutex_lock(&net->xdp.lock);
974 sk_for_each(sk, &net->xdp.list) {
975 struct xdp_sock *xs = xdp_sk(sk);
977 mutex_lock(&xs->mutex);
978 if (xs->dev == dev) {
979 sk->sk_err = ENETDOWN;
980 if (!sock_flag(sk, SOCK_DEAD))
981 sk->sk_error_report(sk);
985 /* Clear device references in umem. */
986 xdp_umem_clear_dev(xs->umem);
988 mutex_unlock(&xs->mutex);
990 mutex_unlock(&net->xdp.lock);
996 static struct proto xsk_proto = {
998 .owner = THIS_MODULE,
999 .obj_size = sizeof(struct xdp_sock),
1002 static const struct proto_ops xsk_proto_ops = {
1004 .owner = THIS_MODULE,
1005 .release = xsk_release,
1007 .connect = sock_no_connect,
1008 .socketpair = sock_no_socketpair,
1009 .accept = sock_no_accept,
1010 .getname = sock_no_getname,
1012 .ioctl = sock_no_ioctl,
1013 .listen = sock_no_listen,
1014 .shutdown = sock_no_shutdown,
1015 .setsockopt = xsk_setsockopt,
1016 .getsockopt = xsk_getsockopt,
1017 .sendmsg = xsk_sendmsg,
1018 .recvmsg = sock_no_recvmsg,
1020 .sendpage = sock_no_sendpage,
1023 static void xsk_destruct(struct sock *sk)
1025 struct xdp_sock *xs = xdp_sk(sk);
1027 if (!sock_flag(sk, SOCK_DEAD))
1030 xdp_put_umem(xs->umem);
1032 sk_refcnt_debug_dec(sk);
1035 static int xsk_create(struct net *net, struct socket *sock, int protocol,
1039 struct xdp_sock *xs;
1041 if (!ns_capable(net->user_ns, CAP_NET_RAW))
1043 if (sock->type != SOCK_RAW)
1044 return -ESOCKTNOSUPPORT;
1047 return -EPROTONOSUPPORT;
1049 sock->state = SS_UNCONNECTED;
1051 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1055 sock->ops = &xsk_proto_ops;
1057 sock_init_data(sock, sk);
1059 sk->sk_family = PF_XDP;
1061 sk->sk_destruct = xsk_destruct;
1062 sk_refcnt_debug_inc(sk);
1064 sock_set_flag(sk, SOCK_RCU_FREE);
1067 xs->state = XSK_READY;
1068 mutex_init(&xs->mutex);
1069 spin_lock_init(&xs->rx_lock);
1070 spin_lock_init(&xs->tx_completion_lock);
1072 INIT_LIST_HEAD(&xs->map_list);
1073 spin_lock_init(&xs->map_list_lock);
1075 mutex_lock(&net->xdp.lock);
1076 sk_add_node_rcu(sk, &net->xdp.list);
1077 mutex_unlock(&net->xdp.lock);
1080 sock_prot_inuse_add(net, &xsk_proto, 1);
1086 static const struct net_proto_family xsk_family_ops = {
1088 .create = xsk_create,
1089 .owner = THIS_MODULE,
1092 static struct notifier_block xsk_netdev_notifier = {
1093 .notifier_call = xsk_notifier,
1096 static int __net_init xsk_net_init(struct net *net)
1098 mutex_init(&net->xdp.lock);
1099 INIT_HLIST_HEAD(&net->xdp.list);
1103 static void __net_exit xsk_net_exit(struct net *net)
1105 WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1108 static struct pernet_operations xsk_net_ops = {
1109 .init = xsk_net_init,
1110 .exit = xsk_net_exit,
1113 static int __init xsk_init(void)
1117 err = proto_register(&xsk_proto, 0 /* no slab */);
1121 err = sock_register(&xsk_family_ops);
1125 err = register_pernet_subsys(&xsk_net_ops);
1129 err = register_netdevice_notifier(&xsk_netdev_notifier);
1136 unregister_pernet_subsys(&xsk_net_ops);
1138 sock_unregister(PF_XDP);
1140 proto_unregister(&xsk_proto);
1145 fs_initcall(xsk_init);