1 // SPDX-License-Identifier: GPL-2.0
4 * AF_XDP sockets allows a channel between XDP programs and userspace
6 * Copyright(c) 2018 Intel Corporation.
8 * Author(s): Björn Töpel <bjorn.topel@intel.com>
9 * Magnus Karlsson <magnus.karlsson@intel.com>
12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
14 #include <linux/if_xdp.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/task.h>
19 #include <linux/socket.h>
20 #include <linux/file.h>
21 #include <linux/uaccess.h>
22 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/rculist.h>
25 #include <net/xdp_sock.h>
28 #include "xsk_queue.h"
31 #define TX_BATCH_SIZE 16
33 static struct xdp_sock *xdp_sk(struct sock *sk)
35 return (struct xdp_sock *)sk;
38 bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
40 return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) &&
41 READ_ONCE(xs->umem->fq);
44 u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
46 return xskq_peek_addr(umem->fq, addr);
48 EXPORT_SYMBOL(xsk_umem_peek_addr);
50 void xsk_umem_discard_addr(struct xdp_umem *umem)
52 xskq_discard_addr(umem->fq);
54 EXPORT_SYMBOL(xsk_umem_discard_addr);
56 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
58 void *to_buf, *from_buf;
63 if (!xskq_peek_addr(xs->umem->fq, &addr) ||
64 len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
69 addr += xs->umem->headroom;
71 if (unlikely(xdp_data_meta_unsupported(xdp))) {
75 from_buf = xdp->data_meta;
76 metalen = xdp->data - xdp->data_meta;
79 to_buf = xdp_umem_get_data(xs->umem, addr);
80 memcpy(to_buf, from_buf, len + metalen);
82 err = xskq_produce_batch_desc(xs->rx, addr, len);
84 xskq_discard_addr(xs->umem->fq);
93 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
95 int err = xskq_produce_batch_desc(xs->rx, (u64)xdp->handle, len);
103 int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
107 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
110 len = xdp->data_end - xdp->data;
112 return (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) ?
113 __xsk_rcv_zc(xs, xdp, len) : __xsk_rcv(xs, xdp, len);
116 void xsk_flush(struct xdp_sock *xs)
118 xskq_produce_flush_desc(xs->rx);
119 xs->sk.sk_data_ready(&xs->sk);
122 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
124 u32 metalen = xdp->data - xdp->data_meta;
125 u32 len = xdp->data_end - xdp->data;
130 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
133 if (!xskq_peek_addr(xs->umem->fq, &addr) ||
134 len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
139 addr += xs->umem->headroom;
141 buffer = xdp_umem_get_data(xs->umem, addr);
142 memcpy(buffer, xdp->data_meta, len + metalen);
144 err = xskq_produce_batch_desc(xs->rx, addr, len);
146 xskq_discard_addr(xs->umem->fq);
155 void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
157 xskq_produce_flush_addr_n(umem->cq, nb_entries);
159 EXPORT_SYMBOL(xsk_umem_complete_tx);
161 void xsk_umem_consume_tx_done(struct xdp_umem *umem)
166 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
167 xs->sk.sk_write_space(&xs->sk);
171 EXPORT_SYMBOL(xsk_umem_consume_tx_done);
173 bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
175 struct xdp_desc desc;
179 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
180 if (!xskq_peek_desc(xs->tx, &desc))
183 if (xskq_produce_addr_lazy(umem->cq, desc.addr))
186 *dma = xdp_umem_get_dma(umem, desc.addr);
189 xskq_discard_desc(xs->tx);
198 EXPORT_SYMBOL(xsk_umem_consume_tx);
200 static int xsk_zc_xmit(struct sock *sk)
202 struct xdp_sock *xs = xdp_sk(sk);
203 struct net_device *dev = xs->dev;
205 return dev->netdev_ops->ndo_xsk_async_xmit(dev, xs->queue_id);
208 static void xsk_destruct_skb(struct sk_buff *skb)
210 u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
211 struct xdp_sock *xs = xdp_sk(skb->sk);
214 spin_lock_irqsave(&xs->tx_completion_lock, flags);
215 WARN_ON_ONCE(xskq_produce_addr(xs->umem->cq, addr));
216 spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
221 static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
224 u32 max_batch = TX_BATCH_SIZE;
225 struct xdp_sock *xs = xdp_sk(sk);
226 bool sent_frame = false;
227 struct xdp_desc desc;
231 mutex_lock(&xs->mutex);
233 while (xskq_peek_desc(xs->tx, &desc)) {
238 if (max_batch-- == 0) {
243 if (xskq_reserve_addr(xs->umem->cq))
246 if (xs->queue_id >= xs->dev->real_num_tx_queues)
250 skb = sock_alloc_send_skb(sk, len, 1, &err);
251 if (unlikely(!skb)) {
258 buffer = xdp_umem_get_data(xs->umem, addr);
259 err = skb_store_bits(skb, 0, buffer, len);
266 skb->priority = sk->sk_priority;
267 skb->mark = sk->sk_mark;
268 skb_shinfo(skb)->destructor_arg = (void *)(long)addr;
269 skb->destructor = xsk_destruct_skb;
271 err = dev_direct_xmit(skb, xs->queue_id);
272 xskq_discard_desc(xs->tx);
273 /* Ignore NET_XMIT_CN as packet might have been sent */
274 if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) {
275 /* SKB completed but not sent */
285 sk->sk_write_space(sk);
287 mutex_unlock(&xs->mutex);
291 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
293 bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
294 struct sock *sk = sock->sk;
295 struct xdp_sock *xs = xdp_sk(sk);
297 if (unlikely(!xs->dev))
299 if (unlikely(!(xs->dev->flags & IFF_UP)))
301 if (unlikely(!xs->tx))
306 return (xs->zc) ? xsk_zc_xmit(sk) : xsk_generic_xmit(sk, m, total_len);
309 static unsigned int xsk_poll(struct file *file, struct socket *sock,
310 struct poll_table_struct *wait)
312 unsigned int mask = datagram_poll(file, sock, wait);
313 struct sock *sk = sock->sk;
314 struct xdp_sock *xs = xdp_sk(sk);
316 if (xs->rx && !xskq_empty_desc(xs->rx))
317 mask |= POLLIN | POLLRDNORM;
318 if (xs->tx && !xskq_full_desc(xs->tx))
319 mask |= POLLOUT | POLLWRNORM;
324 static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
329 if (entries == 0 || *queue || !is_power_of_2(entries))
332 q = xskq_create(entries, umem_queue);
336 /* Make sure queue is ready before it can be seen by others */
342 static int xsk_release(struct socket *sock)
344 struct sock *sk = sock->sk;
345 struct xdp_sock *xs = xdp_sk(sk);
353 mutex_lock(&net->xdp.lock);
354 sk_del_node_init_rcu(sk);
355 mutex_unlock(&net->xdp.lock);
358 sock_prot_inuse_add(net, sk->sk_prot, -1);
362 struct net_device *dev = xs->dev;
364 /* Wait for driver to stop using the xdp socket. */
365 xdp_del_sk_umem(xs->umem, xs);
371 xskq_destroy(xs->rx);
372 xskq_destroy(xs->tx);
373 xdp_put_umem(xs->umem);
378 sk_refcnt_debug_release(sk);
384 static struct socket *xsk_lookup_xsk_from_fd(int fd)
389 sock = sockfd_lookup(fd, &err);
391 return ERR_PTR(-ENOTSOCK);
393 if (sock->sk->sk_family != PF_XDP) {
395 return ERR_PTR(-ENOPROTOOPT);
401 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
403 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
404 struct sock *sk = sock->sk;
405 struct xdp_sock *xs = xdp_sk(sk);
406 struct net_device *dev;
410 if (addr_len < sizeof(struct sockaddr_xdp))
412 if (sxdp->sxdp_family != AF_XDP)
415 mutex_lock(&xs->mutex);
421 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
427 if (!xs->rx && !xs->tx) {
432 qid = sxdp->sxdp_queue_id;
433 flags = sxdp->sxdp_flags;
435 if (flags & XDP_SHARED_UMEM) {
436 struct xdp_sock *umem_xs;
439 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY)) {
440 /* Cannot specify flags for shared sockets. */
446 /* We have already our own. */
451 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
457 umem_xs = xdp_sk(sock->sk);
458 if (!umem_xs->umem) {
459 /* No umem to inherit. */
463 } else if (umem_xs->dev != dev || umem_xs->queue_id != qid) {
469 xdp_get_umem(umem_xs->umem);
470 xs->umem = umem_xs->umem;
472 } else if (!xs->umem || !xdp_umem_validate_queues(xs->umem)) {
476 /* This xsk has its own umem. */
477 xskq_set_umem(xs->umem->fq, xs->umem->size,
478 xs->umem->chunk_mask);
479 xskq_set_umem(xs->umem->cq, xs->umem->size,
480 xs->umem->chunk_mask);
482 err = xdp_umem_assign_dev(xs->umem, dev, qid, flags);
488 xs->zc = xs->umem->zc;
490 xskq_set_umem(xs->rx, xs->umem->size, xs->umem->chunk_mask);
491 xskq_set_umem(xs->tx, xs->umem->size, xs->umem->chunk_mask);
492 xdp_add_sk_umem(xs->umem, xs);
498 mutex_unlock(&xs->mutex);
502 static int xsk_setsockopt(struct socket *sock, int level, int optname,
503 char __user *optval, unsigned int optlen)
505 struct sock *sk = sock->sk;
506 struct xdp_sock *xs = xdp_sk(sk);
509 if (level != SOL_XDP)
516 struct xsk_queue **q;
519 if (optlen < sizeof(entries))
521 if (copy_from_user(&entries, optval, sizeof(entries)))
524 mutex_lock(&xs->mutex);
525 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
526 err = xsk_init_queue(entries, q, false);
527 mutex_unlock(&xs->mutex);
532 struct xdp_umem_reg mr;
533 struct xdp_umem *umem;
535 if (copy_from_user(&mr, optval, sizeof(mr)))
538 mutex_lock(&xs->mutex);
540 mutex_unlock(&xs->mutex);
544 umem = xdp_umem_create(&mr);
546 mutex_unlock(&xs->mutex);
547 return PTR_ERR(umem);
550 /* Make sure umem is ready before it can be seen by others */
553 mutex_unlock(&xs->mutex);
556 case XDP_UMEM_FILL_RING:
557 case XDP_UMEM_COMPLETION_RING:
559 struct xsk_queue **q;
562 if (copy_from_user(&entries, optval, sizeof(entries)))
565 mutex_lock(&xs->mutex);
567 mutex_unlock(&xs->mutex);
571 q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq :
573 err = xsk_init_queue(entries, q, true);
574 mutex_unlock(&xs->mutex);
584 static int xsk_getsockopt(struct socket *sock, int level, int optname,
585 char __user *optval, int __user *optlen)
587 struct sock *sk = sock->sk;
588 struct xdp_sock *xs = xdp_sk(sk);
591 if (level != SOL_XDP)
594 if (get_user(len, optlen))
602 struct xdp_statistics stats;
604 if (len < sizeof(stats))
607 mutex_lock(&xs->mutex);
608 stats.rx_dropped = xs->rx_dropped;
609 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
610 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
611 mutex_unlock(&xs->mutex);
613 if (copy_to_user(optval, &stats, sizeof(stats)))
615 if (put_user(sizeof(stats), optlen))
620 case XDP_MMAP_OFFSETS:
622 struct xdp_mmap_offsets off;
624 if (len < sizeof(off))
627 off.rx.producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
628 off.rx.consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
629 off.rx.desc = offsetof(struct xdp_rxtx_ring, desc);
630 off.tx.producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
631 off.tx.consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
632 off.tx.desc = offsetof(struct xdp_rxtx_ring, desc);
634 off.fr.producer = offsetof(struct xdp_umem_ring, ptrs.producer);
635 off.fr.consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
636 off.fr.desc = offsetof(struct xdp_umem_ring, desc);
637 off.cr.producer = offsetof(struct xdp_umem_ring, ptrs.producer);
638 off.cr.consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
639 off.cr.desc = offsetof(struct xdp_umem_ring, desc);
642 if (copy_to_user(optval, &off, len))
644 if (put_user(len, optlen))
656 static int xsk_mmap(struct file *file, struct socket *sock,
657 struct vm_area_struct *vma)
659 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
660 unsigned long size = vma->vm_end - vma->vm_start;
661 struct xdp_sock *xs = xdp_sk(sock->sk);
662 struct xsk_queue *q = NULL;
663 struct xdp_umem *umem;
667 if (offset == XDP_PGOFF_RX_RING) {
668 q = READ_ONCE(xs->rx);
669 } else if (offset == XDP_PGOFF_TX_RING) {
670 q = READ_ONCE(xs->tx);
672 umem = READ_ONCE(xs->umem);
676 if (offset == XDP_UMEM_PGOFF_FILL_RING)
677 q = READ_ONCE(umem->fq);
678 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
679 q = READ_ONCE(umem->cq);
685 qpg = virt_to_head_page(q->ring);
686 if (size > (PAGE_SIZE << compound_order(qpg)))
689 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
690 return remap_pfn_range(vma, vma->vm_start, pfn,
691 size, vma->vm_page_prot);
694 static struct proto xsk_proto = {
696 .owner = THIS_MODULE,
697 .obj_size = sizeof(struct xdp_sock),
700 static const struct proto_ops xsk_proto_ops = {
702 .owner = THIS_MODULE,
703 .release = xsk_release,
705 .connect = sock_no_connect,
706 .socketpair = sock_no_socketpair,
707 .accept = sock_no_accept,
708 .getname = sock_no_getname,
710 .ioctl = sock_no_ioctl,
711 .listen = sock_no_listen,
712 .shutdown = sock_no_shutdown,
713 .setsockopt = xsk_setsockopt,
714 .getsockopt = xsk_getsockopt,
715 .sendmsg = xsk_sendmsg,
716 .recvmsg = sock_no_recvmsg,
718 .sendpage = sock_no_sendpage,
721 static int xsk_create(struct net *net, struct socket *sock, int protocol,
727 if (!ns_capable(net->user_ns, CAP_NET_RAW))
729 if (sock->type != SOCK_RAW)
730 return -ESOCKTNOSUPPORT;
733 return -EPROTONOSUPPORT;
735 sock->state = SS_UNCONNECTED;
737 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
741 sock->ops = &xsk_proto_ops;
743 sock_init_data(sock, sk);
745 sk->sk_family = PF_XDP;
747 sock_set_flag(sk, SOCK_RCU_FREE);
750 mutex_init(&xs->mutex);
751 spin_lock_init(&xs->tx_completion_lock);
753 mutex_lock(&net->xdp.lock);
754 sk_add_node_rcu(sk, &net->xdp.list);
755 mutex_unlock(&net->xdp.lock);
758 sock_prot_inuse_add(net, &xsk_proto, 1);
764 static const struct net_proto_family xsk_family_ops = {
766 .create = xsk_create,
767 .owner = THIS_MODULE,
770 static int __net_init xsk_net_init(struct net *net)
772 mutex_init(&net->xdp.lock);
773 INIT_HLIST_HEAD(&net->xdp.list);
777 static void __net_exit xsk_net_exit(struct net *net)
779 WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
782 static struct pernet_operations xsk_net_ops = {
783 .init = xsk_net_init,
784 .exit = xsk_net_exit,
787 static int __init xsk_init(void)
791 err = proto_register(&xsk_proto, 0 /* no slab */);
795 err = sock_register(&xsk_family_ops);
799 err = register_pernet_subsys(&xsk_net_ops);
805 sock_unregister(PF_XDP);
807 proto_unregister(&xsk_proto);
812 fs_initcall(xsk_init);