strparser: Add __strp_unpause and use it in ktls.
[linux-2.6-microblaze.git] / net / xdp / xsk.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP sockets
3  *
4  * AF_XDP sockets allows a channel between XDP programs and userspace
5  * applications.
6  * Copyright(c) 2018 Intel Corporation.
7  *
8  * Author(s): Björn Töpel <bjorn.topel@intel.com>
9  *            Magnus Karlsson <magnus.karlsson@intel.com>
10  */
11
12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13
14 #include <linux/if_xdp.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/task.h>
19 #include <linux/socket.h>
20 #include <linux/file.h>
21 #include <linux/uaccess.h>
22 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/rculist.h>
25 #include <net/xdp_sock.h>
26 #include <net/xdp.h>
27
28 #include "xsk_queue.h"
29 #include "xdp_umem.h"
30
31 #define TX_BATCH_SIZE 16
32
33 static struct xdp_sock *xdp_sk(struct sock *sk)
34 {
35         return (struct xdp_sock *)sk;
36 }
37
38 bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
39 {
40         return READ_ONCE(xs->rx) &&  READ_ONCE(xs->umem) &&
41                 READ_ONCE(xs->umem->fq);
42 }
43
44 u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
45 {
46         return xskq_peek_addr(umem->fq, addr);
47 }
48 EXPORT_SYMBOL(xsk_umem_peek_addr);
49
50 void xsk_umem_discard_addr(struct xdp_umem *umem)
51 {
52         xskq_discard_addr(umem->fq);
53 }
54 EXPORT_SYMBOL(xsk_umem_discard_addr);
55
56 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
57 {
58         void *buffer;
59         u64 addr;
60         int err;
61
62         if (!xskq_peek_addr(xs->umem->fq, &addr) ||
63             len > xs->umem->chunk_size_nohr) {
64                 xs->rx_dropped++;
65                 return -ENOSPC;
66         }
67
68         addr += xs->umem->headroom;
69
70         buffer = xdp_umem_get_data(xs->umem, addr);
71         memcpy(buffer, xdp->data, len);
72         err = xskq_produce_batch_desc(xs->rx, addr, len);
73         if (!err) {
74                 xskq_discard_addr(xs->umem->fq);
75                 xdp_return_buff(xdp);
76                 return 0;
77         }
78
79         xs->rx_dropped++;
80         return err;
81 }
82
83 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
84 {
85         int err = xskq_produce_batch_desc(xs->rx, (u64)xdp->handle, len);
86
87         if (err) {
88                 xdp_return_buff(xdp);
89                 xs->rx_dropped++;
90         }
91
92         return err;
93 }
94
95 int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
96 {
97         u32 len;
98
99         if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
100                 return -EINVAL;
101
102         len = xdp->data_end - xdp->data;
103
104         return (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) ?
105                 __xsk_rcv_zc(xs, xdp, len) : __xsk_rcv(xs, xdp, len);
106 }
107
108 void xsk_flush(struct xdp_sock *xs)
109 {
110         xskq_produce_flush_desc(xs->rx);
111         xs->sk.sk_data_ready(&xs->sk);
112 }
113
114 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
115 {
116         u32 len = xdp->data_end - xdp->data;
117         void *buffer;
118         u64 addr;
119         int err;
120
121         if (!xskq_peek_addr(xs->umem->fq, &addr) ||
122             len > xs->umem->chunk_size_nohr) {
123                 xs->rx_dropped++;
124                 return -ENOSPC;
125         }
126
127         addr += xs->umem->headroom;
128
129         buffer = xdp_umem_get_data(xs->umem, addr);
130         memcpy(buffer, xdp->data, len);
131         err = xskq_produce_batch_desc(xs->rx, addr, len);
132         if (!err) {
133                 xskq_discard_addr(xs->umem->fq);
134                 xsk_flush(xs);
135                 return 0;
136         }
137
138         xs->rx_dropped++;
139         return err;
140 }
141
142 void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
143 {
144         xskq_produce_flush_addr_n(umem->cq, nb_entries);
145 }
146 EXPORT_SYMBOL(xsk_umem_complete_tx);
147
148 void xsk_umem_consume_tx_done(struct xdp_umem *umem)
149 {
150         struct xdp_sock *xs;
151
152         rcu_read_lock();
153         list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
154                 xs->sk.sk_write_space(&xs->sk);
155         }
156         rcu_read_unlock();
157 }
158 EXPORT_SYMBOL(xsk_umem_consume_tx_done);
159
160 bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
161 {
162         struct xdp_desc desc;
163         struct xdp_sock *xs;
164
165         rcu_read_lock();
166         list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
167                 if (!xskq_peek_desc(xs->tx, &desc))
168                         continue;
169
170                 if (xskq_produce_addr_lazy(umem->cq, desc.addr))
171                         goto out;
172
173                 *dma = xdp_umem_get_dma(umem, desc.addr);
174                 *len = desc.len;
175
176                 xskq_discard_desc(xs->tx);
177                 rcu_read_unlock();
178                 return true;
179         }
180
181 out:
182         rcu_read_unlock();
183         return false;
184 }
185 EXPORT_SYMBOL(xsk_umem_consume_tx);
186
187 static int xsk_zc_xmit(struct sock *sk)
188 {
189         struct xdp_sock *xs = xdp_sk(sk);
190         struct net_device *dev = xs->dev;
191
192         return dev->netdev_ops->ndo_xsk_async_xmit(dev, xs->queue_id);
193 }
194
195 static void xsk_destruct_skb(struct sk_buff *skb)
196 {
197         u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
198         struct xdp_sock *xs = xdp_sk(skb->sk);
199
200         WARN_ON_ONCE(xskq_produce_addr(xs->umem->cq, addr));
201
202         sock_wfree(skb);
203 }
204
205 static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
206                             size_t total_len)
207 {
208         u32 max_batch = TX_BATCH_SIZE;
209         struct xdp_sock *xs = xdp_sk(sk);
210         bool sent_frame = false;
211         struct xdp_desc desc;
212         struct sk_buff *skb;
213         int err = 0;
214
215         if (unlikely(!xs->tx))
216                 return -ENOBUFS;
217
218         mutex_lock(&xs->mutex);
219
220         while (xskq_peek_desc(xs->tx, &desc)) {
221                 char *buffer;
222                 u64 addr;
223                 u32 len;
224
225                 if (max_batch-- == 0) {
226                         err = -EAGAIN;
227                         goto out;
228                 }
229
230                 if (xskq_reserve_addr(xs->umem->cq)) {
231                         err = -EAGAIN;
232                         goto out;
233                 }
234
235                 len = desc.len;
236                 if (unlikely(len > xs->dev->mtu)) {
237                         err = -EMSGSIZE;
238                         goto out;
239                 }
240
241                 if (xs->queue_id >= xs->dev->real_num_tx_queues) {
242                         err = -ENXIO;
243                         goto out;
244                 }
245
246                 skb = sock_alloc_send_skb(sk, len, 1, &err);
247                 if (unlikely(!skb)) {
248                         err = -EAGAIN;
249                         goto out;
250                 }
251
252                 skb_put(skb, len);
253                 addr = desc.addr;
254                 buffer = xdp_umem_get_data(xs->umem, addr);
255                 err = skb_store_bits(skb, 0, buffer, len);
256                 if (unlikely(err)) {
257                         kfree_skb(skb);
258                         goto out;
259                 }
260
261                 skb->dev = xs->dev;
262                 skb->priority = sk->sk_priority;
263                 skb->mark = sk->sk_mark;
264                 skb_shinfo(skb)->destructor_arg = (void *)(long)addr;
265                 skb->destructor = xsk_destruct_skb;
266
267                 err = dev_direct_xmit(skb, xs->queue_id);
268                 /* Ignore NET_XMIT_CN as packet might have been sent */
269                 if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) {
270                         err = -EAGAIN;
271                         /* SKB consumed by dev_direct_xmit() */
272                         goto out;
273                 }
274
275                 sent_frame = true;
276                 xskq_discard_desc(xs->tx);
277         }
278
279 out:
280         if (sent_frame)
281                 sk->sk_write_space(sk);
282
283         mutex_unlock(&xs->mutex);
284         return err;
285 }
286
287 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
288 {
289         bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
290         struct sock *sk = sock->sk;
291         struct xdp_sock *xs = xdp_sk(sk);
292
293         if (unlikely(!xs->dev))
294                 return -ENXIO;
295         if (unlikely(!(xs->dev->flags & IFF_UP)))
296                 return -ENETDOWN;
297         if (need_wait)
298                 return -EOPNOTSUPP;
299
300         return (xs->zc) ? xsk_zc_xmit(sk) : xsk_generic_xmit(sk, m, total_len);
301 }
302
303 static unsigned int xsk_poll(struct file *file, struct socket *sock,
304                              struct poll_table_struct *wait)
305 {
306         unsigned int mask = datagram_poll(file, sock, wait);
307         struct sock *sk = sock->sk;
308         struct xdp_sock *xs = xdp_sk(sk);
309
310         if (xs->rx && !xskq_empty_desc(xs->rx))
311                 mask |= POLLIN | POLLRDNORM;
312         if (xs->tx && !xskq_full_desc(xs->tx))
313                 mask |= POLLOUT | POLLWRNORM;
314
315         return mask;
316 }
317
318 static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
319                           bool umem_queue)
320 {
321         struct xsk_queue *q;
322
323         if (entries == 0 || *queue || !is_power_of_2(entries))
324                 return -EINVAL;
325
326         q = xskq_create(entries, umem_queue);
327         if (!q)
328                 return -ENOMEM;
329
330         /* Make sure queue is ready before it can be seen by others */
331         smp_wmb();
332         *queue = q;
333         return 0;
334 }
335
336 static int xsk_release(struct socket *sock)
337 {
338         struct sock *sk = sock->sk;
339         struct xdp_sock *xs = xdp_sk(sk);
340         struct net *net;
341
342         if (!sk)
343                 return 0;
344
345         net = sock_net(sk);
346
347         local_bh_disable();
348         sock_prot_inuse_add(net, sk->sk_prot, -1);
349         local_bh_enable();
350
351         if (xs->dev) {
352                 /* Wait for driver to stop using the xdp socket. */
353                 synchronize_net();
354                 dev_put(xs->dev);
355                 xs->dev = NULL;
356         }
357
358         sock_orphan(sk);
359         sock->sk = NULL;
360
361         sk_refcnt_debug_release(sk);
362         sock_put(sk);
363
364         return 0;
365 }
366
367 static struct socket *xsk_lookup_xsk_from_fd(int fd)
368 {
369         struct socket *sock;
370         int err;
371
372         sock = sockfd_lookup(fd, &err);
373         if (!sock)
374                 return ERR_PTR(-ENOTSOCK);
375
376         if (sock->sk->sk_family != PF_XDP) {
377                 sockfd_put(sock);
378                 return ERR_PTR(-ENOPROTOOPT);
379         }
380
381         return sock;
382 }
383
384 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
385 {
386         struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
387         struct sock *sk = sock->sk;
388         struct xdp_sock *xs = xdp_sk(sk);
389         struct net_device *dev;
390         u32 flags, qid;
391         int err = 0;
392
393         if (addr_len < sizeof(struct sockaddr_xdp))
394                 return -EINVAL;
395         if (sxdp->sxdp_family != AF_XDP)
396                 return -EINVAL;
397
398         mutex_lock(&xs->mutex);
399         if (xs->dev) {
400                 err = -EBUSY;
401                 goto out_release;
402         }
403
404         dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
405         if (!dev) {
406                 err = -ENODEV;
407                 goto out_release;
408         }
409
410         if (!xs->rx && !xs->tx) {
411                 err = -EINVAL;
412                 goto out_unlock;
413         }
414
415         qid = sxdp->sxdp_queue_id;
416
417         if ((xs->rx && qid >= dev->real_num_rx_queues) ||
418             (xs->tx && qid >= dev->real_num_tx_queues)) {
419                 err = -EINVAL;
420                 goto out_unlock;
421         }
422
423         flags = sxdp->sxdp_flags;
424
425         if (flags & XDP_SHARED_UMEM) {
426                 struct xdp_sock *umem_xs;
427                 struct socket *sock;
428
429                 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY)) {
430                         /* Cannot specify flags for shared sockets. */
431                         err = -EINVAL;
432                         goto out_unlock;
433                 }
434
435                 if (xs->umem) {
436                         /* We have already our own. */
437                         err = -EINVAL;
438                         goto out_unlock;
439                 }
440
441                 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
442                 if (IS_ERR(sock)) {
443                         err = PTR_ERR(sock);
444                         goto out_unlock;
445                 }
446
447                 umem_xs = xdp_sk(sock->sk);
448                 if (!umem_xs->umem) {
449                         /* No umem to inherit. */
450                         err = -EBADF;
451                         sockfd_put(sock);
452                         goto out_unlock;
453                 } else if (umem_xs->dev != dev || umem_xs->queue_id != qid) {
454                         err = -EINVAL;
455                         sockfd_put(sock);
456                         goto out_unlock;
457                 }
458
459                 xdp_get_umem(umem_xs->umem);
460                 xs->umem = umem_xs->umem;
461                 sockfd_put(sock);
462         } else if (!xs->umem || !xdp_umem_validate_queues(xs->umem)) {
463                 err = -EINVAL;
464                 goto out_unlock;
465         } else {
466                 /* This xsk has its own umem. */
467                 xskq_set_umem(xs->umem->fq, &xs->umem->props);
468                 xskq_set_umem(xs->umem->cq, &xs->umem->props);
469
470                 err = xdp_umem_assign_dev(xs->umem, dev, qid, flags);
471                 if (err)
472                         goto out_unlock;
473         }
474
475         xs->dev = dev;
476         xs->zc = xs->umem->zc;
477         xs->queue_id = qid;
478         xskq_set_umem(xs->rx, &xs->umem->props);
479         xskq_set_umem(xs->tx, &xs->umem->props);
480         xdp_add_sk_umem(xs->umem, xs);
481
482 out_unlock:
483         if (err)
484                 dev_put(dev);
485 out_release:
486         mutex_unlock(&xs->mutex);
487         return err;
488 }
489
490 static int xsk_setsockopt(struct socket *sock, int level, int optname,
491                           char __user *optval, unsigned int optlen)
492 {
493         struct sock *sk = sock->sk;
494         struct xdp_sock *xs = xdp_sk(sk);
495         int err;
496
497         if (level != SOL_XDP)
498                 return -ENOPROTOOPT;
499
500         switch (optname) {
501         case XDP_RX_RING:
502         case XDP_TX_RING:
503         {
504                 struct xsk_queue **q;
505                 int entries;
506
507                 if (optlen < sizeof(entries))
508                         return -EINVAL;
509                 if (copy_from_user(&entries, optval, sizeof(entries)))
510                         return -EFAULT;
511
512                 mutex_lock(&xs->mutex);
513                 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
514                 err = xsk_init_queue(entries, q, false);
515                 mutex_unlock(&xs->mutex);
516                 return err;
517         }
518         case XDP_UMEM_REG:
519         {
520                 struct xdp_umem_reg mr;
521                 struct xdp_umem *umem;
522
523                 if (copy_from_user(&mr, optval, sizeof(mr)))
524                         return -EFAULT;
525
526                 mutex_lock(&xs->mutex);
527                 if (xs->umem) {
528                         mutex_unlock(&xs->mutex);
529                         return -EBUSY;
530                 }
531
532                 umem = xdp_umem_create(&mr);
533                 if (IS_ERR(umem)) {
534                         mutex_unlock(&xs->mutex);
535                         return PTR_ERR(umem);
536                 }
537
538                 /* Make sure umem is ready before it can be seen by others */
539                 smp_wmb();
540                 xs->umem = umem;
541                 mutex_unlock(&xs->mutex);
542                 return 0;
543         }
544         case XDP_UMEM_FILL_RING:
545         case XDP_UMEM_COMPLETION_RING:
546         {
547                 struct xsk_queue **q;
548                 int entries;
549
550                 if (copy_from_user(&entries, optval, sizeof(entries)))
551                         return -EFAULT;
552
553                 mutex_lock(&xs->mutex);
554                 if (!xs->umem) {
555                         mutex_unlock(&xs->mutex);
556                         return -EINVAL;
557                 }
558
559                 q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq :
560                         &xs->umem->cq;
561                 err = xsk_init_queue(entries, q, true);
562                 mutex_unlock(&xs->mutex);
563                 return err;
564         }
565         default:
566                 break;
567         }
568
569         return -ENOPROTOOPT;
570 }
571
572 static int xsk_getsockopt(struct socket *sock, int level, int optname,
573                           char __user *optval, int __user *optlen)
574 {
575         struct sock *sk = sock->sk;
576         struct xdp_sock *xs = xdp_sk(sk);
577         int len;
578
579         if (level != SOL_XDP)
580                 return -ENOPROTOOPT;
581
582         if (get_user(len, optlen))
583                 return -EFAULT;
584         if (len < 0)
585                 return -EINVAL;
586
587         switch (optname) {
588         case XDP_STATISTICS:
589         {
590                 struct xdp_statistics stats;
591
592                 if (len < sizeof(stats))
593                         return -EINVAL;
594
595                 mutex_lock(&xs->mutex);
596                 stats.rx_dropped = xs->rx_dropped;
597                 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
598                 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
599                 mutex_unlock(&xs->mutex);
600
601                 if (copy_to_user(optval, &stats, sizeof(stats)))
602                         return -EFAULT;
603                 if (put_user(sizeof(stats), optlen))
604                         return -EFAULT;
605
606                 return 0;
607         }
608         case XDP_MMAP_OFFSETS:
609         {
610                 struct xdp_mmap_offsets off;
611
612                 if (len < sizeof(off))
613                         return -EINVAL;
614
615                 off.rx.producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
616                 off.rx.consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
617                 off.rx.desc     = offsetof(struct xdp_rxtx_ring, desc);
618                 off.tx.producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
619                 off.tx.consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
620                 off.tx.desc     = offsetof(struct xdp_rxtx_ring, desc);
621
622                 off.fr.producer = offsetof(struct xdp_umem_ring, ptrs.producer);
623                 off.fr.consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
624                 off.fr.desc     = offsetof(struct xdp_umem_ring, desc);
625                 off.cr.producer = offsetof(struct xdp_umem_ring, ptrs.producer);
626                 off.cr.consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
627                 off.cr.desc     = offsetof(struct xdp_umem_ring, desc);
628
629                 len = sizeof(off);
630                 if (copy_to_user(optval, &off, len))
631                         return -EFAULT;
632                 if (put_user(len, optlen))
633                         return -EFAULT;
634
635                 return 0;
636         }
637         default:
638                 break;
639         }
640
641         return -EOPNOTSUPP;
642 }
643
644 static int xsk_mmap(struct file *file, struct socket *sock,
645                     struct vm_area_struct *vma)
646 {
647         unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
648         unsigned long size = vma->vm_end - vma->vm_start;
649         struct xdp_sock *xs = xdp_sk(sock->sk);
650         struct xsk_queue *q = NULL;
651         struct xdp_umem *umem;
652         unsigned long pfn;
653         struct page *qpg;
654
655         if (offset == XDP_PGOFF_RX_RING) {
656                 q = READ_ONCE(xs->rx);
657         } else if (offset == XDP_PGOFF_TX_RING) {
658                 q = READ_ONCE(xs->tx);
659         } else {
660                 umem = READ_ONCE(xs->umem);
661                 if (!umem)
662                         return -EINVAL;
663
664                 if (offset == XDP_UMEM_PGOFF_FILL_RING)
665                         q = READ_ONCE(umem->fq);
666                 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
667                         q = READ_ONCE(umem->cq);
668         }
669
670         if (!q)
671                 return -EINVAL;
672
673         qpg = virt_to_head_page(q->ring);
674         if (size > (PAGE_SIZE << compound_order(qpg)))
675                 return -EINVAL;
676
677         pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
678         return remap_pfn_range(vma, vma->vm_start, pfn,
679                                size, vma->vm_page_prot);
680 }
681
682 static struct proto xsk_proto = {
683         .name =         "XDP",
684         .owner =        THIS_MODULE,
685         .obj_size =     sizeof(struct xdp_sock),
686 };
687
688 static const struct proto_ops xsk_proto_ops = {
689         .family         = PF_XDP,
690         .owner          = THIS_MODULE,
691         .release        = xsk_release,
692         .bind           = xsk_bind,
693         .connect        = sock_no_connect,
694         .socketpair     = sock_no_socketpair,
695         .accept         = sock_no_accept,
696         .getname        = sock_no_getname,
697         .poll           = xsk_poll,
698         .ioctl          = sock_no_ioctl,
699         .listen         = sock_no_listen,
700         .shutdown       = sock_no_shutdown,
701         .setsockopt     = xsk_setsockopt,
702         .getsockopt     = xsk_getsockopt,
703         .sendmsg        = xsk_sendmsg,
704         .recvmsg        = sock_no_recvmsg,
705         .mmap           = xsk_mmap,
706         .sendpage       = sock_no_sendpage,
707 };
708
709 static void xsk_destruct(struct sock *sk)
710 {
711         struct xdp_sock *xs = xdp_sk(sk);
712
713         if (!sock_flag(sk, SOCK_DEAD))
714                 return;
715
716         xskq_destroy(xs->rx);
717         xskq_destroy(xs->tx);
718         xdp_del_sk_umem(xs->umem, xs);
719         xdp_put_umem(xs->umem);
720
721         sk_refcnt_debug_dec(sk);
722 }
723
724 static int xsk_create(struct net *net, struct socket *sock, int protocol,
725                       int kern)
726 {
727         struct sock *sk;
728         struct xdp_sock *xs;
729
730         if (!ns_capable(net->user_ns, CAP_NET_RAW))
731                 return -EPERM;
732         if (sock->type != SOCK_RAW)
733                 return -ESOCKTNOSUPPORT;
734
735         if (protocol)
736                 return -EPROTONOSUPPORT;
737
738         sock->state = SS_UNCONNECTED;
739
740         sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
741         if (!sk)
742                 return -ENOBUFS;
743
744         sock->ops = &xsk_proto_ops;
745
746         sock_init_data(sock, sk);
747
748         sk->sk_family = PF_XDP;
749
750         sk->sk_destruct = xsk_destruct;
751         sk_refcnt_debug_inc(sk);
752
753         xs = xdp_sk(sk);
754         mutex_init(&xs->mutex);
755
756         local_bh_disable();
757         sock_prot_inuse_add(net, &xsk_proto, 1);
758         local_bh_enable();
759
760         return 0;
761 }
762
763 static const struct net_proto_family xsk_family_ops = {
764         .family = PF_XDP,
765         .create = xsk_create,
766         .owner  = THIS_MODULE,
767 };
768
769 static int __init xsk_init(void)
770 {
771         int err;
772
773         err = proto_register(&xsk_proto, 0 /* no slab */);
774         if (err)
775                 goto out;
776
777         err = sock_register(&xsk_family_ops);
778         if (err)
779                 goto out_proto;
780
781         return 0;
782
783 out_proto:
784         proto_unregister(&xsk_proto);
785 out:
786         return err;
787 }
788
789 fs_initcall(xsk_init);