1 // SPDX-License-Identifier: GPL-2.0
4 * AF_XDP sockets allows a channel between XDP programs and userspace
6 * Copyright(c) 2018 Intel Corporation.
8 * Author(s): Björn Töpel <bjorn.topel@intel.com>
9 * Magnus Karlsson <magnus.karlsson@intel.com>
12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
14 #include <linux/if_xdp.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/task.h>
19 #include <linux/socket.h>
20 #include <linux/file.h>
21 #include <linux/uaccess.h>
22 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/rculist.h>
25 #include <net/xdp_sock_drv.h>
28 #include "xsk_queue.h"
32 #define TX_BATCH_SIZE 16
34 static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
36 bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
38 return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) &&
39 READ_ONCE(xs->umem->fq);
42 void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
44 if (umem->need_wakeup & XDP_WAKEUP_RX)
47 umem->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
48 umem->need_wakeup |= XDP_WAKEUP_RX;
50 EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
52 void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
56 if (umem->need_wakeup & XDP_WAKEUP_TX)
60 list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
61 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
65 umem->need_wakeup |= XDP_WAKEUP_TX;
67 EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
69 void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
71 if (!(umem->need_wakeup & XDP_WAKEUP_RX))
74 umem->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
75 umem->need_wakeup &= ~XDP_WAKEUP_RX;
77 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
79 void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
83 if (!(umem->need_wakeup & XDP_WAKEUP_TX))
87 list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
88 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
92 umem->need_wakeup &= ~XDP_WAKEUP_TX;
94 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
96 bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
98 return umem->flags & XDP_UMEM_USES_NEED_WAKEUP;
100 EXPORT_SYMBOL(xsk_umem_uses_need_wakeup);
102 void xp_release(struct xdp_buff_xsk *xskb)
104 xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
107 static u64 xp_get_handle(struct xdp_buff_xsk *xskb)
109 u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
111 offset += xskb->pool->headroom;
112 if (!xskb->pool->unaligned)
113 return xskb->orig_addr + offset;
114 return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
117 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
119 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
123 addr = xp_get_handle(xskb);
124 err = xskq_prod_reserve_desc(xs->rx, addr, len);
134 static void xsk_copy_xdp(struct xdp_buff *to, struct xdp_buff *from, u32 len)
136 void *from_buf, *to_buf;
139 if (unlikely(xdp_data_meta_unsupported(from))) {
140 from_buf = from->data;
144 from_buf = from->data_meta;
145 metalen = from->data - from->data_meta;
146 to_buf = to->data - metalen;
149 memcpy(to_buf, from_buf, len + metalen);
152 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len,
155 struct xdp_buff *xsk_xdp;
158 if (len > xsk_umem_get_rx_frame_size(xs->umem)) {
163 xsk_xdp = xsk_buff_alloc(xs->umem);
169 xsk_copy_xdp(xsk_xdp, xdp, len);
170 err = __xsk_rcv_zc(xs, xsk_xdp, len);
172 xsk_buff_free(xsk_xdp);
176 xdp_return_buff(xdp);
180 static bool xsk_is_bound(struct xdp_sock *xs)
182 if (READ_ONCE(xs->state) == XSK_BOUND) {
183 /* Matches smp_wmb() in bind(). */
190 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp,
195 if (!xsk_is_bound(xs))
198 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
201 len = xdp->data_end - xdp->data;
203 return xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL ?
204 __xsk_rcv_zc(xs, xdp, len) :
205 __xsk_rcv(xs, xdp, len, explicit_free);
208 static void xsk_flush(struct xdp_sock *xs)
210 xskq_prod_submit(xs->rx);
211 __xskq_cons_release(xs->umem->fq);
212 sock_def_readable(&xs->sk);
215 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
219 spin_lock_bh(&xs->rx_lock);
220 err = xsk_rcv(xs, xdp, false);
222 spin_unlock_bh(&xs->rx_lock);
226 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
228 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
231 err = xsk_rcv(xs, xdp, true);
235 if (!xs->flush_node.prev)
236 list_add(&xs->flush_node, flush_list);
241 void __xsk_map_flush(void)
243 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
244 struct xdp_sock *xs, *tmp;
246 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
248 __list_del_clearprev(&xs->flush_node);
252 void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
254 xskq_prod_submit_n(umem->cq, nb_entries);
256 EXPORT_SYMBOL(xsk_umem_complete_tx);
258 void xsk_umem_consume_tx_done(struct xdp_umem *umem)
263 list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
264 __xskq_cons_release(xs->tx);
265 xs->sk.sk_write_space(&xs->sk);
269 EXPORT_SYMBOL(xsk_umem_consume_tx_done);
271 bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc)
276 list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
277 if (!xskq_cons_peek_desc(xs->tx, desc, umem))
280 /* This is the backpressure mechanism for the Tx path.
281 * Reserve space in the completion queue and only proceed
282 * if there is space in it. This avoids having to implement
283 * any buffering in the Tx path.
285 if (xskq_prod_reserve_addr(umem->cq, desc->addr))
288 xskq_cons_release(xs->tx);
297 EXPORT_SYMBOL(xsk_umem_consume_tx);
299 static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
301 struct net_device *dev = xs->dev;
305 err = dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
311 static int xsk_zc_xmit(struct xdp_sock *xs)
313 return xsk_wakeup(xs, XDP_WAKEUP_TX);
316 static void xsk_destruct_skb(struct sk_buff *skb)
318 u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
319 struct xdp_sock *xs = xdp_sk(skb->sk);
322 spin_lock_irqsave(&xs->tx_completion_lock, flags);
323 xskq_prod_submit_addr(xs->umem->cq, addr);
324 spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
329 static int xsk_generic_xmit(struct sock *sk)
331 struct xdp_sock *xs = xdp_sk(sk);
332 u32 max_batch = TX_BATCH_SIZE;
333 bool sent_frame = false;
334 struct xdp_desc desc;
338 mutex_lock(&xs->mutex);
340 if (xs->queue_id >= xs->dev->real_num_tx_queues)
343 while (xskq_cons_peek_desc(xs->tx, &desc, xs->umem)) {
348 if (max_batch-- == 0) {
354 skb = sock_alloc_send_skb(sk, len, 1, &err);
360 buffer = xsk_buff_raw_get_data(xs->umem, addr);
361 err = skb_store_bits(skb, 0, buffer, len);
362 /* This is the backpressure mechanism for the Tx path.
363 * Reserve space in the completion queue and only proceed
364 * if there is space in it. This avoids having to implement
365 * any buffering in the Tx path.
367 if (unlikely(err) || xskq_prod_reserve(xs->umem->cq)) {
373 skb->priority = sk->sk_priority;
374 skb->mark = sk->sk_mark;
375 skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
376 skb->destructor = xsk_destruct_skb;
378 err = dev_direct_xmit(skb, xs->queue_id);
379 xskq_cons_release(xs->tx);
380 /* Ignore NET_XMIT_CN as packet might have been sent */
381 if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) {
382 /* SKB completed but not sent */
392 sk->sk_write_space(sk);
394 mutex_unlock(&xs->mutex);
398 static int __xsk_sendmsg(struct sock *sk)
400 struct xdp_sock *xs = xdp_sk(sk);
402 if (unlikely(!(xs->dev->flags & IFF_UP)))
404 if (unlikely(!xs->tx))
407 return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk);
410 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
412 bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
413 struct sock *sk = sock->sk;
414 struct xdp_sock *xs = xdp_sk(sk);
416 if (unlikely(!xsk_is_bound(xs)))
418 if (unlikely(need_wait))
421 return __xsk_sendmsg(sk);
424 static __poll_t xsk_poll(struct file *file, struct socket *sock,
425 struct poll_table_struct *wait)
427 __poll_t mask = datagram_poll(file, sock, wait);
428 struct sock *sk = sock->sk;
429 struct xdp_sock *xs = xdp_sk(sk);
430 struct xdp_umem *umem;
432 if (unlikely(!xsk_is_bound(xs)))
437 if (umem->need_wakeup) {
439 xsk_wakeup(xs, umem->need_wakeup);
441 /* Poll needs to drive Tx also in copy mode */
445 if (xs->rx && !xskq_prod_is_empty(xs->rx))
446 mask |= EPOLLIN | EPOLLRDNORM;
447 if (xs->tx && !xskq_cons_is_full(xs->tx))
448 mask |= EPOLLOUT | EPOLLWRNORM;
453 static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
458 if (entries == 0 || *queue || !is_power_of_2(entries))
461 q = xskq_create(entries, umem_queue);
465 /* Make sure queue is ready before it can be seen by others */
467 WRITE_ONCE(*queue, q);
471 static void xsk_unbind_dev(struct xdp_sock *xs)
473 struct net_device *dev = xs->dev;
475 if (xs->state != XSK_BOUND)
477 WRITE_ONCE(xs->state, XSK_UNBOUND);
479 /* Wait for driver to stop using the xdp socket. */
480 xdp_del_sk_umem(xs->umem, xs);
486 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
487 struct xdp_sock ***map_entry)
489 struct xsk_map *map = NULL;
490 struct xsk_map_node *node;
494 spin_lock_bh(&xs->map_list_lock);
495 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
498 WARN_ON(xsk_map_inc(node->map));
500 *map_entry = node->map_entry;
502 spin_unlock_bh(&xs->map_list_lock);
506 static void xsk_delete_from_maps(struct xdp_sock *xs)
508 /* This function removes the current XDP socket from all the
509 * maps it resides in. We need to take extra care here, due to
510 * the two locks involved. Each map has a lock synchronizing
511 * updates to the entries, and each socket has a lock that
512 * synchronizes access to the list of maps (map_list). For
513 * deadlock avoidance the locks need to be taken in the order
514 * "map lock"->"socket map list lock". We start off by
515 * accessing the socket map list, and take a reference to the
516 * map to guarantee existence between the
517 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
518 * calls. Then we ask the map to remove the socket, which
519 * tries to remove the socket from the map. Note that there
520 * might be updates to the map between
521 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
523 struct xdp_sock **map_entry = NULL;
526 while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
527 xsk_map_try_sock_delete(map, xs, map_entry);
532 static int xsk_release(struct socket *sock)
534 struct sock *sk = sock->sk;
535 struct xdp_sock *xs = xdp_sk(sk);
543 mutex_lock(&net->xdp.lock);
544 sk_del_node_init_rcu(sk);
545 mutex_unlock(&net->xdp.lock);
548 sock_prot_inuse_add(net, sk->sk_prot, -1);
551 xsk_delete_from_maps(xs);
552 mutex_lock(&xs->mutex);
554 mutex_unlock(&xs->mutex);
556 xskq_destroy(xs->rx);
557 xskq_destroy(xs->tx);
562 sk_refcnt_debug_release(sk);
568 static struct socket *xsk_lookup_xsk_from_fd(int fd)
573 sock = sockfd_lookup(fd, &err);
575 return ERR_PTR(-ENOTSOCK);
577 if (sock->sk->sk_family != PF_XDP) {
579 return ERR_PTR(-ENOPROTOOPT);
585 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
587 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
588 struct sock *sk = sock->sk;
589 struct xdp_sock *xs = xdp_sk(sk);
590 struct net_device *dev;
594 if (addr_len < sizeof(struct sockaddr_xdp))
596 if (sxdp->sxdp_family != AF_XDP)
599 flags = sxdp->sxdp_flags;
600 if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
601 XDP_USE_NEED_WAKEUP))
605 mutex_lock(&xs->mutex);
606 if (xs->state != XSK_READY) {
611 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
617 if (!xs->rx && !xs->tx) {
622 qid = sxdp->sxdp_queue_id;
624 if (flags & XDP_SHARED_UMEM) {
625 struct xdp_sock *umem_xs;
628 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
629 (flags & XDP_USE_NEED_WAKEUP)) {
630 /* Cannot specify flags for shared sockets. */
636 /* We have already our own. */
641 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
647 umem_xs = xdp_sk(sock->sk);
648 if (!xsk_is_bound(umem_xs)) {
653 if (umem_xs->dev != dev || umem_xs->queue_id != qid) {
659 xdp_get_umem(umem_xs->umem);
660 WRITE_ONCE(xs->umem, umem_xs->umem);
662 } else if (!xs->umem || !xdp_umem_validate_queues(xs->umem)) {
666 /* This xsk has its own umem. */
667 err = xdp_umem_assign_dev(xs->umem, dev, qid, flags);
673 xs->zc = xs->umem->zc;
675 xdp_add_sk_umem(xs->umem, xs);
681 /* Matches smp_rmb() in bind() for shared umem
682 * sockets, and xsk_is_bound().
685 WRITE_ONCE(xs->state, XSK_BOUND);
688 mutex_unlock(&xs->mutex);
693 struct xdp_umem_reg_v1 {
694 __u64 addr; /* Start of packet data area */
695 __u64 len; /* Length of packet data area */
700 static int xsk_setsockopt(struct socket *sock, int level, int optname,
701 char __user *optval, unsigned int optlen)
703 struct sock *sk = sock->sk;
704 struct xdp_sock *xs = xdp_sk(sk);
707 if (level != SOL_XDP)
714 struct xsk_queue **q;
717 if (optlen < sizeof(entries))
719 if (copy_from_user(&entries, optval, sizeof(entries)))
722 mutex_lock(&xs->mutex);
723 if (xs->state != XSK_READY) {
724 mutex_unlock(&xs->mutex);
727 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
728 err = xsk_init_queue(entries, q, false);
729 if (!err && optname == XDP_TX_RING)
730 /* Tx needs to be explicitly woken up the first time */
731 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
732 mutex_unlock(&xs->mutex);
737 size_t mr_size = sizeof(struct xdp_umem_reg);
738 struct xdp_umem_reg mr = {};
739 struct xdp_umem *umem;
741 if (optlen < sizeof(struct xdp_umem_reg_v1))
743 else if (optlen < sizeof(mr))
744 mr_size = sizeof(struct xdp_umem_reg_v1);
746 if (copy_from_user(&mr, optval, mr_size))
749 mutex_lock(&xs->mutex);
750 if (xs->state != XSK_READY || xs->umem) {
751 mutex_unlock(&xs->mutex);
755 umem = xdp_umem_create(&mr);
757 mutex_unlock(&xs->mutex);
758 return PTR_ERR(umem);
761 /* Make sure umem is ready before it can be seen by others */
763 WRITE_ONCE(xs->umem, umem);
764 mutex_unlock(&xs->mutex);
767 case XDP_UMEM_FILL_RING:
768 case XDP_UMEM_COMPLETION_RING:
770 struct xsk_queue **q;
773 if (copy_from_user(&entries, optval, sizeof(entries)))
776 mutex_lock(&xs->mutex);
777 if (xs->state != XSK_READY) {
778 mutex_unlock(&xs->mutex);
782 mutex_unlock(&xs->mutex);
786 q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq :
788 err = xsk_init_queue(entries, q, true);
789 if (optname == XDP_UMEM_FILL_RING)
790 xp_set_fq(xs->umem->pool, *q);
791 mutex_unlock(&xs->mutex);
801 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
803 ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
804 ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
805 ring->desc = offsetof(struct xdp_rxtx_ring, desc);
808 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
810 ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
811 ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
812 ring->desc = offsetof(struct xdp_umem_ring, desc);
815 static int xsk_getsockopt(struct socket *sock, int level, int optname,
816 char __user *optval, int __user *optlen)
818 struct sock *sk = sock->sk;
819 struct xdp_sock *xs = xdp_sk(sk);
822 if (level != SOL_XDP)
825 if (get_user(len, optlen))
833 struct xdp_statistics stats;
835 if (len < sizeof(stats))
838 mutex_lock(&xs->mutex);
839 stats.rx_dropped = xs->rx_dropped;
840 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
841 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
842 mutex_unlock(&xs->mutex);
844 if (copy_to_user(optval, &stats, sizeof(stats)))
846 if (put_user(sizeof(stats), optlen))
851 case XDP_MMAP_OFFSETS:
853 struct xdp_mmap_offsets off;
854 struct xdp_mmap_offsets_v1 off_v1;
855 bool flags_supported = true;
858 if (len < sizeof(off_v1))
860 else if (len < sizeof(off))
861 flags_supported = false;
863 if (flags_supported) {
864 /* xdp_ring_offset is identical to xdp_ring_offset_v1
865 * except for the flags field added to the end.
867 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
869 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
871 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
873 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
875 off.rx.flags = offsetof(struct xdp_rxtx_ring,
877 off.tx.flags = offsetof(struct xdp_rxtx_ring,
879 off.fr.flags = offsetof(struct xdp_umem_ring,
881 off.cr.flags = offsetof(struct xdp_umem_ring,
887 xsk_enter_rxtx_offsets(&off_v1.rx);
888 xsk_enter_rxtx_offsets(&off_v1.tx);
889 xsk_enter_umem_offsets(&off_v1.fr);
890 xsk_enter_umem_offsets(&off_v1.cr);
892 len = sizeof(off_v1);
896 if (copy_to_user(optval, to_copy, len))
898 if (put_user(len, optlen))
905 struct xdp_options opts = {};
907 if (len < sizeof(opts))
910 mutex_lock(&xs->mutex);
912 opts.flags |= XDP_OPTIONS_ZEROCOPY;
913 mutex_unlock(&xs->mutex);
916 if (copy_to_user(optval, &opts, len))
918 if (put_user(len, optlen))
930 static int xsk_mmap(struct file *file, struct socket *sock,
931 struct vm_area_struct *vma)
933 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
934 unsigned long size = vma->vm_end - vma->vm_start;
935 struct xdp_sock *xs = xdp_sk(sock->sk);
936 struct xsk_queue *q = NULL;
937 struct xdp_umem *umem;
941 if (READ_ONCE(xs->state) != XSK_READY)
944 if (offset == XDP_PGOFF_RX_RING) {
945 q = READ_ONCE(xs->rx);
946 } else if (offset == XDP_PGOFF_TX_RING) {
947 q = READ_ONCE(xs->tx);
949 umem = READ_ONCE(xs->umem);
953 /* Matches the smp_wmb() in XDP_UMEM_REG */
955 if (offset == XDP_UMEM_PGOFF_FILL_RING)
956 q = READ_ONCE(umem->fq);
957 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
958 q = READ_ONCE(umem->cq);
964 /* Matches the smp_wmb() in xsk_init_queue */
966 qpg = virt_to_head_page(q->ring);
967 if (size > page_size(qpg))
970 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
971 return remap_pfn_range(vma, vma->vm_start, pfn,
972 size, vma->vm_page_prot);
975 static int xsk_notifier(struct notifier_block *this,
976 unsigned long msg, void *ptr)
978 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
979 struct net *net = dev_net(dev);
983 case NETDEV_UNREGISTER:
984 mutex_lock(&net->xdp.lock);
985 sk_for_each(sk, &net->xdp.list) {
986 struct xdp_sock *xs = xdp_sk(sk);
988 mutex_lock(&xs->mutex);
989 if (xs->dev == dev) {
990 sk->sk_err = ENETDOWN;
991 if (!sock_flag(sk, SOCK_DEAD))
992 sk->sk_error_report(sk);
996 /* Clear device references in umem. */
997 xdp_umem_clear_dev(xs->umem);
999 mutex_unlock(&xs->mutex);
1001 mutex_unlock(&net->xdp.lock);
1007 static struct proto xsk_proto = {
1009 .owner = THIS_MODULE,
1010 .obj_size = sizeof(struct xdp_sock),
1013 static const struct proto_ops xsk_proto_ops = {
1015 .owner = THIS_MODULE,
1016 .release = xsk_release,
1018 .connect = sock_no_connect,
1019 .socketpair = sock_no_socketpair,
1020 .accept = sock_no_accept,
1021 .getname = sock_no_getname,
1023 .ioctl = sock_no_ioctl,
1024 .listen = sock_no_listen,
1025 .shutdown = sock_no_shutdown,
1026 .setsockopt = xsk_setsockopt,
1027 .getsockopt = xsk_getsockopt,
1028 .sendmsg = xsk_sendmsg,
1029 .recvmsg = sock_no_recvmsg,
1031 .sendpage = sock_no_sendpage,
1034 static void xsk_destruct(struct sock *sk)
1036 struct xdp_sock *xs = xdp_sk(sk);
1038 if (!sock_flag(sk, SOCK_DEAD))
1041 xdp_put_umem(xs->umem);
1043 sk_refcnt_debug_dec(sk);
1046 static int xsk_create(struct net *net, struct socket *sock, int protocol,
1050 struct xdp_sock *xs;
1052 if (!ns_capable(net->user_ns, CAP_NET_RAW))
1054 if (sock->type != SOCK_RAW)
1055 return -ESOCKTNOSUPPORT;
1058 return -EPROTONOSUPPORT;
1060 sock->state = SS_UNCONNECTED;
1062 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1066 sock->ops = &xsk_proto_ops;
1068 sock_init_data(sock, sk);
1070 sk->sk_family = PF_XDP;
1072 sk->sk_destruct = xsk_destruct;
1073 sk_refcnt_debug_inc(sk);
1075 sock_set_flag(sk, SOCK_RCU_FREE);
1078 xs->state = XSK_READY;
1079 mutex_init(&xs->mutex);
1080 spin_lock_init(&xs->rx_lock);
1081 spin_lock_init(&xs->tx_completion_lock);
1083 INIT_LIST_HEAD(&xs->map_list);
1084 spin_lock_init(&xs->map_list_lock);
1086 mutex_lock(&net->xdp.lock);
1087 sk_add_node_rcu(sk, &net->xdp.list);
1088 mutex_unlock(&net->xdp.lock);
1091 sock_prot_inuse_add(net, &xsk_proto, 1);
1097 static const struct net_proto_family xsk_family_ops = {
1099 .create = xsk_create,
1100 .owner = THIS_MODULE,
1103 static struct notifier_block xsk_netdev_notifier = {
1104 .notifier_call = xsk_notifier,
1107 static int __net_init xsk_net_init(struct net *net)
1109 mutex_init(&net->xdp.lock);
1110 INIT_HLIST_HEAD(&net->xdp.list);
1114 static void __net_exit xsk_net_exit(struct net *net)
1116 WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1119 static struct pernet_operations xsk_net_ops = {
1120 .init = xsk_net_init,
1121 .exit = xsk_net_exit,
1124 static int __init xsk_init(void)
1128 err = proto_register(&xsk_proto, 0 /* no slab */);
1132 err = sock_register(&xsk_family_ops);
1136 err = register_pernet_subsys(&xsk_net_ops);
1140 err = register_netdevice_notifier(&xsk_netdev_notifier);
1144 for_each_possible_cpu(cpu)
1145 INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
1149 unregister_pernet_subsys(&xsk_net_ops);
1151 sock_unregister(PF_XDP);
1153 proto_unregister(&xsk_proto);
1158 fs_initcall(xsk_init);