1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel Connection Multiplexor
5 * Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
9 #include <linux/errno.h>
10 #include <linux/errqueue.h>
11 #include <linux/file.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/net.h>
16 #include <linux/netdevice.h>
17 #include <linux/poll.h>
18 #include <linux/rculist.h>
19 #include <linux/skbuff.h>
20 #include <linux/socket.h>
21 #include <linux/uaccess.h>
22 #include <linux/workqueue.h>
23 #include <linux/syscalls.h>
24 #include <linux/sched/signal.h>
27 #include <net/netns/generic.h>
29 #include <uapi/linux/kcm.h>
31 unsigned int kcm_net_id;
33 static struct kmem_cache *kcm_psockp __read_mostly;
34 static struct kmem_cache *kcm_muxp __read_mostly;
35 static struct workqueue_struct *kcm_wq;
37 static inline struct kcm_sock *kcm_sk(const struct sock *sk)
39 return (struct kcm_sock *)sk;
42 static inline struct kcm_tx_msg *kcm_tx_msg(struct sk_buff *skb)
44 return (struct kcm_tx_msg *)skb->cb;
47 static void report_csk_error(struct sock *csk, int err)
50 csk->sk_error_report(csk);
53 static void kcm_abort_tx_psock(struct kcm_psock *psock, int err,
56 struct sock *csk = psock->sk;
57 struct kcm_mux *mux = psock->mux;
59 /* Unrecoverable error in transmit */
61 spin_lock_bh(&mux->lock);
63 if (psock->tx_stopped) {
64 spin_unlock_bh(&mux->lock);
68 psock->tx_stopped = 1;
69 KCM_STATS_INCR(psock->stats.tx_aborts);
72 /* Take off psocks_avail list */
73 list_del(&psock->psock_avail_list);
74 } else if (wakeup_kcm) {
75 /* In this case psock is being aborted while outside of
76 * write_msgs and psock is reserved. Schedule tx_work
77 * to handle the failure there. Need to commit tx_stopped
78 * before queuing work.
82 queue_work(kcm_wq, &psock->tx_kcm->tx_work);
85 spin_unlock_bh(&mux->lock);
87 /* Report error on lower socket */
88 report_csk_error(csk, err);
91 /* RX mux lock held. */
92 static void kcm_update_rx_mux_stats(struct kcm_mux *mux,
93 struct kcm_psock *psock)
95 STRP_STATS_ADD(mux->stats.rx_bytes,
96 psock->strp.stats.bytes -
97 psock->saved_rx_bytes);
99 psock->strp.stats.msgs - psock->saved_rx_msgs;
100 psock->saved_rx_msgs = psock->strp.stats.msgs;
101 psock->saved_rx_bytes = psock->strp.stats.bytes;
104 static void kcm_update_tx_mux_stats(struct kcm_mux *mux,
105 struct kcm_psock *psock)
107 KCM_STATS_ADD(mux->stats.tx_bytes,
108 psock->stats.tx_bytes - psock->saved_tx_bytes);
109 mux->stats.tx_msgs +=
110 psock->stats.tx_msgs - psock->saved_tx_msgs;
111 psock->saved_tx_msgs = psock->stats.tx_msgs;
112 psock->saved_tx_bytes = psock->stats.tx_bytes;
115 static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
117 /* KCM is ready to receive messages on its queue-- either the KCM is new or
118 * has become unblocked after being blocked on full socket buffer. Queue any
119 * pending ready messages on a psock. RX mux lock held.
121 static void kcm_rcv_ready(struct kcm_sock *kcm)
123 struct kcm_mux *mux = kcm->mux;
124 struct kcm_psock *psock;
127 if (unlikely(kcm->rx_wait || kcm->rx_psock || kcm->rx_disabled))
130 while (unlikely((skb = __skb_dequeue(&mux->rx_hold_queue)))) {
131 if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
132 /* Assuming buffer limit has been reached */
133 skb_queue_head(&mux->rx_hold_queue, skb);
134 WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
139 while (!list_empty(&mux->psocks_ready)) {
140 psock = list_first_entry(&mux->psocks_ready, struct kcm_psock,
143 if (kcm_queue_rcv_skb(&kcm->sk, psock->ready_rx_msg)) {
144 /* Assuming buffer limit has been reached */
145 WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
149 /* Consumed the ready message on the psock. Schedule rx_work to
152 list_del(&psock->psock_ready_list);
153 psock->ready_rx_msg = NULL;
154 /* Commit clearing of ready_rx_msg for queuing work */
157 strp_unpause(&psock->strp);
158 strp_check_rcv(&psock->strp);
161 /* Buffer limit is okay now, add to ready list */
162 list_add_tail(&kcm->wait_rx_list,
163 &kcm->mux->kcm_rx_waiters);
167 static void kcm_rfree(struct sk_buff *skb)
169 struct sock *sk = skb->sk;
170 struct kcm_sock *kcm = kcm_sk(sk);
171 struct kcm_mux *mux = kcm->mux;
172 unsigned int len = skb->truesize;
174 sk_mem_uncharge(sk, len);
175 atomic_sub(len, &sk->sk_rmem_alloc);
177 /* For reading rx_wait and rx_psock without holding lock */
178 smp_mb__after_atomic();
180 if (!kcm->rx_wait && !kcm->rx_psock &&
181 sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) {
182 spin_lock_bh(&mux->rx_lock);
184 spin_unlock_bh(&mux->rx_lock);
188 static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
190 struct sk_buff_head *list = &sk->sk_receive_queue;
192 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
195 if (!sk_rmem_schedule(sk, skb, skb->truesize))
202 skb->destructor = kcm_rfree;
203 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
204 sk_mem_charge(sk, skb->truesize);
206 skb_queue_tail(list, skb);
208 if (!sock_flag(sk, SOCK_DEAD))
209 sk->sk_data_ready(sk);
214 /* Requeue received messages for a kcm socket to other kcm sockets. This is
215 * called with a kcm socket is receive disabled.
218 static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head)
221 struct kcm_sock *kcm;
223 while ((skb = __skb_dequeue(head))) {
224 /* Reset destructor to avoid calling kcm_rcv_ready */
225 skb->destructor = sock_rfree;
228 if (list_empty(&mux->kcm_rx_waiters)) {
229 skb_queue_tail(&mux->rx_hold_queue, skb);
233 kcm = list_first_entry(&mux->kcm_rx_waiters,
234 struct kcm_sock, wait_rx_list);
236 if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
237 /* Should mean socket buffer full */
238 list_del(&kcm->wait_rx_list);
239 kcm->rx_wait = false;
241 /* Commit rx_wait to read in kcm_free */
249 /* Lower sock lock held */
250 static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock,
251 struct sk_buff *head)
253 struct kcm_mux *mux = psock->mux;
254 struct kcm_sock *kcm;
256 WARN_ON(psock->ready_rx_msg);
259 return psock->rx_kcm;
261 spin_lock_bh(&mux->rx_lock);
264 spin_unlock_bh(&mux->rx_lock);
265 return psock->rx_kcm;
268 kcm_update_rx_mux_stats(mux, psock);
270 if (list_empty(&mux->kcm_rx_waiters)) {
271 psock->ready_rx_msg = head;
272 strp_pause(&psock->strp);
273 list_add_tail(&psock->psock_ready_list,
275 spin_unlock_bh(&mux->rx_lock);
279 kcm = list_first_entry(&mux->kcm_rx_waiters,
280 struct kcm_sock, wait_rx_list);
281 list_del(&kcm->wait_rx_list);
282 kcm->rx_wait = false;
285 kcm->rx_psock = psock;
287 spin_unlock_bh(&mux->rx_lock);
292 static void kcm_done(struct kcm_sock *kcm);
294 static void kcm_done_work(struct work_struct *w)
296 kcm_done(container_of(w, struct kcm_sock, done_work));
299 /* Lower sock held */
300 static void unreserve_rx_kcm(struct kcm_psock *psock,
303 struct kcm_sock *kcm = psock->rx_kcm;
304 struct kcm_mux *mux = psock->mux;
309 spin_lock_bh(&mux->rx_lock);
311 psock->rx_kcm = NULL;
312 kcm->rx_psock = NULL;
314 /* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with
319 if (unlikely(kcm->done)) {
320 spin_unlock_bh(&mux->rx_lock);
322 /* Need to run kcm_done in a task since we need to qcquire
323 * callback locks which may already be held here.
325 INIT_WORK(&kcm->done_work, kcm_done_work);
326 schedule_work(&kcm->done_work);
330 if (unlikely(kcm->rx_disabled)) {
331 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
332 } else if (rcv_ready || unlikely(!sk_rmem_alloc_get(&kcm->sk))) {
333 /* Check for degenerative race with rx_wait that all
334 * data was dequeued (accounted for in kcm_rfree).
338 spin_unlock_bh(&mux->rx_lock);
341 /* Lower sock lock held */
342 static void psock_data_ready(struct sock *sk)
344 struct kcm_psock *psock;
346 read_lock_bh(&sk->sk_callback_lock);
348 psock = (struct kcm_psock *)sk->sk_user_data;
350 strp_data_ready(&psock->strp);
352 read_unlock_bh(&sk->sk_callback_lock);
355 /* Called with lower sock held */
356 static void kcm_rcv_strparser(struct strparser *strp, struct sk_buff *skb)
358 struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
359 struct kcm_sock *kcm;
362 kcm = reserve_rx_kcm(psock, skb);
364 /* Unable to reserve a KCM, message is held in psock and strp
370 if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
371 /* Should mean socket buffer full */
372 unreserve_rx_kcm(psock, false);
377 static int kcm_parse_func_strparser(struct strparser *strp, struct sk_buff *skb)
379 struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
380 struct bpf_prog *prog = psock->bpf_prog;
383 res = bpf_prog_run_pin_on_cpu(prog, skb);
387 static int kcm_read_sock_done(struct strparser *strp, int err)
389 struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
391 unreserve_rx_kcm(psock, true);
396 static void psock_state_change(struct sock *sk)
398 /* TCP only does a EPOLLIN for a half close. Do a EPOLLHUP here
399 * since application will normally not poll with EPOLLIN
400 * on the TCP sockets.
403 report_csk_error(sk, EPIPE);
406 static void psock_write_space(struct sock *sk)
408 struct kcm_psock *psock;
410 struct kcm_sock *kcm;
412 read_lock_bh(&sk->sk_callback_lock);
414 psock = (struct kcm_psock *)sk->sk_user_data;
415 if (unlikely(!psock))
419 spin_lock_bh(&mux->lock);
421 /* Check if the socket is reserved so someone is waiting for sending. */
423 if (kcm && !unlikely(kcm->tx_stopped))
424 queue_work(kcm_wq, &kcm->tx_work);
426 spin_unlock_bh(&mux->lock);
428 read_unlock_bh(&sk->sk_callback_lock);
431 static void unreserve_psock(struct kcm_sock *kcm);
433 /* kcm sock is locked. */
434 static struct kcm_psock *reserve_psock(struct kcm_sock *kcm)
436 struct kcm_mux *mux = kcm->mux;
437 struct kcm_psock *psock;
439 psock = kcm->tx_psock;
441 smp_rmb(); /* Must read tx_psock before tx_wait */
444 WARN_ON(kcm->tx_wait);
445 if (unlikely(psock->tx_stopped))
446 unreserve_psock(kcm);
448 return kcm->tx_psock;
451 spin_lock_bh(&mux->lock);
453 /* Check again under lock to see if psock was reserved for this
454 * psock via psock_unreserve.
456 psock = kcm->tx_psock;
457 if (unlikely(psock)) {
458 WARN_ON(kcm->tx_wait);
459 spin_unlock_bh(&mux->lock);
460 return kcm->tx_psock;
463 if (!list_empty(&mux->psocks_avail)) {
464 psock = list_first_entry(&mux->psocks_avail,
467 list_del(&psock->psock_avail_list);
469 list_del(&kcm->wait_psock_list);
470 kcm->tx_wait = false;
472 kcm->tx_psock = psock;
474 KCM_STATS_INCR(psock->stats.reserved);
475 } else if (!kcm->tx_wait) {
476 list_add_tail(&kcm->wait_psock_list,
477 &mux->kcm_tx_waiters);
481 spin_unlock_bh(&mux->lock);
487 static void psock_now_avail(struct kcm_psock *psock)
489 struct kcm_mux *mux = psock->mux;
490 struct kcm_sock *kcm;
492 if (list_empty(&mux->kcm_tx_waiters)) {
493 list_add_tail(&psock->psock_avail_list,
496 kcm = list_first_entry(&mux->kcm_tx_waiters,
499 list_del(&kcm->wait_psock_list);
500 kcm->tx_wait = false;
503 /* Commit before changing tx_psock since that is read in
504 * reserve_psock before queuing work.
508 kcm->tx_psock = psock;
509 KCM_STATS_INCR(psock->stats.reserved);
510 queue_work(kcm_wq, &kcm->tx_work);
514 /* kcm sock is locked. */
515 static void unreserve_psock(struct kcm_sock *kcm)
517 struct kcm_psock *psock;
518 struct kcm_mux *mux = kcm->mux;
520 spin_lock_bh(&mux->lock);
522 psock = kcm->tx_psock;
524 if (WARN_ON(!psock)) {
525 spin_unlock_bh(&mux->lock);
529 smp_rmb(); /* Read tx_psock before tx_wait */
531 kcm_update_tx_mux_stats(mux, psock);
533 WARN_ON(kcm->tx_wait);
535 kcm->tx_psock = NULL;
536 psock->tx_kcm = NULL;
537 KCM_STATS_INCR(psock->stats.unreserved);
539 if (unlikely(psock->tx_stopped)) {
542 list_del(&psock->psock_list);
545 fput(psock->sk->sk_socket->file);
546 kmem_cache_free(kcm_psockp, psock);
549 /* Don't put back on available list */
551 spin_unlock_bh(&mux->lock);
556 psock_now_avail(psock);
558 spin_unlock_bh(&mux->lock);
561 static void kcm_report_tx_retry(struct kcm_sock *kcm)
563 struct kcm_mux *mux = kcm->mux;
565 spin_lock_bh(&mux->lock);
566 KCM_STATS_INCR(mux->stats.tx_retries);
567 spin_unlock_bh(&mux->lock);
570 /* Write any messages ready on the kcm socket. Called with kcm sock lock
571 * held. Return bytes actually sent or error.
573 static int kcm_write_msgs(struct kcm_sock *kcm)
575 struct sock *sk = &kcm->sk;
576 struct kcm_psock *psock;
577 struct sk_buff *skb, *head;
578 struct kcm_tx_msg *txm;
579 unsigned short fragidx, frag_offset;
580 unsigned int sent, total_sent = 0;
583 kcm->tx_wait_more = false;
584 psock = kcm->tx_psock;
585 if (unlikely(psock && psock->tx_stopped)) {
586 /* A reserved psock was aborted asynchronously. Unreserve
587 * it and we'll retry the message.
589 unreserve_psock(kcm);
590 kcm_report_tx_retry(kcm);
591 if (skb_queue_empty(&sk->sk_write_queue))
594 kcm_tx_msg(skb_peek(&sk->sk_write_queue))->sent = 0;
596 } else if (skb_queue_empty(&sk->sk_write_queue)) {
600 head = skb_peek(&sk->sk_write_queue);
601 txm = kcm_tx_msg(head);
604 /* Send of first skbuff in queue already in progress */
605 if (WARN_ON(!psock)) {
610 frag_offset = txm->frag_offset;
611 fragidx = txm->fragidx;
618 psock = reserve_psock(kcm);
624 txm = kcm_tx_msg(head);
628 if (WARN_ON(!skb_shinfo(skb)->nr_frags)) {
633 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags;
639 frag = &skb_shinfo(skb)->frags[fragidx];
640 if (WARN_ON(!skb_frag_size(frag))) {
645 ret = kernel_sendpage(psock->sk->sk_socket,
647 skb_frag_off(frag) + frag_offset,
648 skb_frag_size(frag) - frag_offset,
651 if (ret == -EAGAIN) {
652 /* Save state to try again when there's
653 * write space on the socket
656 txm->frag_offset = frag_offset;
657 txm->fragidx = fragidx;
664 /* Hard failure in sending message, abort this
665 * psock since it has lost framing
666 * synchronization and retry sending the
667 * message from the beginning.
669 kcm_abort_tx_psock(psock, ret ? -ret : EPIPE,
671 unreserve_psock(kcm);
674 kcm_report_tx_retry(kcm);
682 KCM_STATS_ADD(psock->stats.tx_bytes, ret);
683 if (frag_offset < skb_frag_size(frag)) {
684 /* Not finished with this frag */
690 if (skb_has_frag_list(skb)) {
691 skb = skb_shinfo(skb)->frag_list;
694 } else if (skb->next) {
699 /* Successfully sent the whole packet, account for it. */
700 skb_dequeue(&sk->sk_write_queue);
702 sk->sk_wmem_queued -= sent;
704 KCM_STATS_INCR(psock->stats.tx_msgs);
705 } while ((head = skb_peek(&sk->sk_write_queue)));
708 /* Done with all queued messages. */
709 WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
710 unreserve_psock(kcm);
713 /* Check if write space is available */
714 sk->sk_write_space(sk);
716 return total_sent ? : ret;
719 static void kcm_tx_work(struct work_struct *w)
721 struct kcm_sock *kcm = container_of(w, struct kcm_sock, tx_work);
722 struct sock *sk = &kcm->sk;
727 /* Primarily for SOCK_DGRAM sockets, also handle asynchronous tx
730 err = kcm_write_msgs(kcm);
732 /* Hard failure in write, report error on KCM socket */
733 pr_warn("KCM: Hard failure on kcm_write_msgs %d\n", err);
734 report_csk_error(&kcm->sk, -err);
738 /* Primarily for SOCK_SEQPACKET sockets */
739 if (likely(sk->sk_socket) &&
740 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
741 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
742 sk->sk_write_space(sk);
749 static void kcm_push(struct kcm_sock *kcm)
751 if (kcm->tx_wait_more)
755 static ssize_t kcm_sendpage(struct socket *sock, struct page *page,
756 int offset, size_t size, int flags)
759 struct sock *sk = sock->sk;
760 struct kcm_sock *kcm = kcm_sk(sk);
761 struct sk_buff *skb = NULL, *head = NULL;
762 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
767 if (flags & MSG_SENDPAGE_NOTLAST)
770 /* No MSG_EOR from splice, only look at MSG_MORE */
771 eor = !(flags & MSG_MORE);
775 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
782 /* Previously opened message */
784 skb = kcm_tx_msg(head)->last_skb;
785 i = skb_shinfo(skb)->nr_frags;
787 if (skb_can_coalesce(skb, i, page, offset)) {
788 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
789 skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG;
793 if (i >= MAX_SKB_FRAGS) {
794 struct sk_buff *tskb;
796 tskb = alloc_skb(0, sk->sk_allocation);
799 err = sk_stream_wait_memory(sk, &timeo);
805 skb_shinfo(head)->frag_list = tskb;
810 skb->ip_summed = CHECKSUM_UNNECESSARY;
814 /* Call the sk_stream functions to manage the sndbuf mem. */
815 if (!sk_stream_memory_free(sk)) {
817 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
818 err = sk_stream_wait_memory(sk, &timeo);
823 head = alloc_skb(0, sk->sk_allocation);
826 err = sk_stream_wait_memory(sk, &timeo);
836 skb_fill_page_desc(skb, i, page, offset, size);
837 skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG;
841 skb->data_len += size;
842 skb->truesize += size;
843 sk->sk_wmem_queued += size;
844 sk_mem_charge(sk, size);
848 head->data_len += size;
849 head->truesize += size;
853 bool not_busy = skb_queue_empty(&sk->sk_write_queue);
855 /* Message complete, queue it on send buffer */
856 __skb_queue_tail(&sk->sk_write_queue, head);
858 KCM_STATS_INCR(kcm->stats.tx_msgs);
860 if (flags & MSG_BATCH) {
861 kcm->tx_wait_more = true;
862 } else if (kcm->tx_wait_more || not_busy) {
863 err = kcm_write_msgs(kcm);
865 /* We got a hard error in write_msgs but have
866 * already queued this message. Report an error
867 * in the socket, but don't affect return value
870 pr_warn("KCM: Hard failure on kcm_write_msgs\n");
871 report_csk_error(&kcm->sk, -err);
875 /* Message not complete, save state */
877 kcm_tx_msg(head)->last_skb = skb;
880 KCM_STATS_ADD(kcm->stats.tx_bytes, size);
888 err = sk_stream_error(sk, flags, err);
890 /* make sure we wake any epoll edge trigger waiter */
891 if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
892 sk->sk_write_space(sk);
898 static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
900 struct sock *sk = sock->sk;
901 struct kcm_sock *kcm = kcm_sk(sk);
902 struct sk_buff *skb = NULL, *head = NULL;
903 size_t copy, copied = 0;
904 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
905 int eor = (sock->type == SOCK_DGRAM) ?
906 !(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR);
911 /* Per tcp_sendmsg this should be in poll */
912 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
918 /* Previously opened message */
920 skb = kcm_tx_msg(head)->last_skb;
924 /* Call the sk_stream functions to manage the sndbuf mem. */
925 if (!sk_stream_memory_free(sk)) {
927 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
928 err = sk_stream_wait_memory(sk, &timeo);
933 if (msg_data_left(msg)) {
934 /* New message, alloc head skb */
935 head = alloc_skb(0, sk->sk_allocation);
938 err = sk_stream_wait_memory(sk, &timeo);
942 head = alloc_skb(0, sk->sk_allocation);
947 /* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
948 * csum_and_copy_from_iter from skb_do_copy_data_nocache.
950 skb->ip_summed = CHECKSUM_UNNECESSARY;
954 while (msg_data_left(msg)) {
956 int i = skb_shinfo(skb)->nr_frags;
957 struct page_frag *pfrag = sk_page_frag(sk);
959 if (!sk_page_frag_refill(sk, pfrag))
960 goto wait_for_memory;
962 if (!skb_can_coalesce(skb, i, pfrag->page,
964 if (i == MAX_SKB_FRAGS) {
965 struct sk_buff *tskb;
967 tskb = alloc_skb(0, sk->sk_allocation);
969 goto wait_for_memory;
972 skb_shinfo(head)->frag_list = tskb;
977 skb->ip_summed = CHECKSUM_UNNECESSARY;
983 copy = min_t(int, msg_data_left(msg),
984 pfrag->size - pfrag->offset);
986 if (!sk_wmem_schedule(sk, copy))
987 goto wait_for_memory;
989 err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
996 /* Update the skb. */
998 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1000 skb_fill_page_desc(skb, i, pfrag->page,
1001 pfrag->offset, copy);
1002 get_page(pfrag->page);
1005 pfrag->offset += copy;
1009 head->data_len += copy;
1016 err = sk_stream_wait_memory(sk, &timeo);
1022 bool not_busy = skb_queue_empty(&sk->sk_write_queue);
1025 /* Message complete, queue it on send buffer */
1026 __skb_queue_tail(&sk->sk_write_queue, head);
1027 kcm->seq_skb = NULL;
1028 KCM_STATS_INCR(kcm->stats.tx_msgs);
1031 if (msg->msg_flags & MSG_BATCH) {
1032 kcm->tx_wait_more = true;
1033 } else if (kcm->tx_wait_more || not_busy) {
1034 err = kcm_write_msgs(kcm);
1036 /* We got a hard error in write_msgs but have
1037 * already queued this message. Report an error
1038 * in the socket, but don't affect return value
1041 pr_warn("KCM: Hard failure on kcm_write_msgs\n");
1042 report_csk_error(&kcm->sk, -err);
1046 /* Message not complete, save state */
1049 kcm->seq_skb = head;
1050 kcm_tx_msg(head)->last_skb = skb;
1054 KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
1062 if (copied && sock->type == SOCK_SEQPACKET) {
1063 /* Wrote some bytes before encountering an
1064 * error, return partial success.
1066 goto partial_message;
1069 if (skb_has_frag_list(head)) {
1070 kfree_skb_list(skb_shinfo(head)->frag_list);
1071 skb_shinfo(head)->frag_list = NULL;
1074 if (head != kcm->seq_skb)
1077 err = sk_stream_error(sk, msg->msg_flags, err);
1079 /* make sure we wake any epoll edge trigger waiter */
1080 if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
1081 sk->sk_write_space(sk);
1087 static struct sk_buff *kcm_wait_data(struct sock *sk, int flags,
1088 long timeo, int *err)
1090 struct sk_buff *skb;
1092 while (!(skb = skb_peek(&sk->sk_receive_queue))) {
1094 *err = sock_error(sk);
1098 if (sock_flag(sk, SOCK_DONE))
1101 if ((flags & MSG_DONTWAIT) || !timeo) {
1106 sk_wait_data(sk, &timeo, NULL);
1108 /* Handle signals */
1109 if (signal_pending(current)) {
1110 *err = sock_intr_errno(timeo);
1118 static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
1119 size_t len, int flags)
1121 struct sock *sk = sock->sk;
1122 struct kcm_sock *kcm = kcm_sk(sk);
1125 struct strp_msg *stm;
1127 struct sk_buff *skb;
1129 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1133 skb = kcm_wait_data(sk, flags, timeo, &err);
1137 /* Okay, have a message on the receive queue */
1139 stm = strp_msg(skb);
1141 if (len > stm->full_len)
1142 len = stm->full_len;
1144 err = skb_copy_datagram_msg(skb, stm->offset, msg, len);
1149 if (likely(!(flags & MSG_PEEK))) {
1150 KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1151 if (copied < stm->full_len) {
1152 if (sock->type == SOCK_DGRAM) {
1153 /* Truncated message */
1154 msg->msg_flags |= MSG_TRUNC;
1157 stm->offset += copied;
1158 stm->full_len -= copied;
1161 /* Finished with message */
1162 msg->msg_flags |= MSG_EOR;
1163 KCM_STATS_INCR(kcm->stats.rx_msgs);
1164 skb_unlink(skb, &sk->sk_receive_queue);
1172 return copied ? : err;
1175 static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
1176 struct pipe_inode_info *pipe, size_t len,
1179 struct sock *sk = sock->sk;
1180 struct kcm_sock *kcm = kcm_sk(sk);
1182 struct strp_msg *stm;
1185 struct sk_buff *skb;
1187 /* Only support splice for SOCKSEQPACKET */
1189 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1193 skb = kcm_wait_data(sk, flags, timeo, &err);
1197 /* Okay, have a message on the receive queue */
1199 stm = strp_msg(skb);
1201 if (len > stm->full_len)
1202 len = stm->full_len;
1204 copied = skb_splice_bits(skb, sk, stm->offset, pipe, len, flags);
1210 KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1212 stm->offset += copied;
1213 stm->full_len -= copied;
1215 /* We have no way to return MSG_EOR. If all the bytes have been
1216 * read we still leave the message in the receive socket buffer.
1217 * A subsequent recvmsg needs to be done to return MSG_EOR and
1218 * finish reading the message.
1231 /* kcm sock lock held */
1232 static void kcm_recv_disable(struct kcm_sock *kcm)
1234 struct kcm_mux *mux = kcm->mux;
1236 if (kcm->rx_disabled)
1239 spin_lock_bh(&mux->rx_lock);
1241 kcm->rx_disabled = 1;
1243 /* If a psock is reserved we'll do cleanup in unreserve */
1244 if (!kcm->rx_psock) {
1246 list_del(&kcm->wait_rx_list);
1247 kcm->rx_wait = false;
1250 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
1253 spin_unlock_bh(&mux->rx_lock);
1256 /* kcm sock lock held */
1257 static void kcm_recv_enable(struct kcm_sock *kcm)
1259 struct kcm_mux *mux = kcm->mux;
1261 if (!kcm->rx_disabled)
1264 spin_lock_bh(&mux->rx_lock);
1266 kcm->rx_disabled = 0;
1269 spin_unlock_bh(&mux->rx_lock);
1272 static int kcm_setsockopt(struct socket *sock, int level, int optname,
1273 sockptr_t optval, unsigned int optlen)
1275 struct kcm_sock *kcm = kcm_sk(sock->sk);
1279 if (level != SOL_KCM)
1280 return -ENOPROTOOPT;
1282 if (optlen < sizeof(int))
1285 if (copy_from_sockptr(&val, optval, sizeof(int)))
1288 valbool = val ? 1 : 0;
1291 case KCM_RECV_DISABLE:
1292 lock_sock(&kcm->sk);
1294 kcm_recv_disable(kcm);
1296 kcm_recv_enable(kcm);
1297 release_sock(&kcm->sk);
1306 static int kcm_getsockopt(struct socket *sock, int level, int optname,
1307 char __user *optval, int __user *optlen)
1309 struct kcm_sock *kcm = kcm_sk(sock->sk);
1312 if (level != SOL_KCM)
1313 return -ENOPROTOOPT;
1315 if (get_user(len, optlen))
1318 len = min_t(unsigned int, len, sizeof(int));
1323 case KCM_RECV_DISABLE:
1324 val = kcm->rx_disabled;
1327 return -ENOPROTOOPT;
1330 if (put_user(len, optlen))
1332 if (copy_to_user(optval, &val, len))
1337 static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
1339 struct kcm_sock *tkcm;
1340 struct list_head *head;
1343 /* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
1344 * we set sk_state, otherwise epoll_wait always returns right away with
1347 kcm->sk.sk_state = TCP_ESTABLISHED;
1349 /* Add to mux's kcm sockets list */
1351 spin_lock_bh(&mux->lock);
1353 head = &mux->kcm_socks;
1354 list_for_each_entry(tkcm, &mux->kcm_socks, kcm_sock_list) {
1355 if (tkcm->index != index)
1357 head = &tkcm->kcm_sock_list;
1361 list_add(&kcm->kcm_sock_list, head);
1364 mux->kcm_socks_cnt++;
1365 spin_unlock_bh(&mux->lock);
1367 INIT_WORK(&kcm->tx_work, kcm_tx_work);
1369 spin_lock_bh(&mux->rx_lock);
1371 spin_unlock_bh(&mux->rx_lock);
1374 static int kcm_attach(struct socket *sock, struct socket *csock,
1375 struct bpf_prog *prog)
1377 struct kcm_sock *kcm = kcm_sk(sock->sk);
1378 struct kcm_mux *mux = kcm->mux;
1380 struct kcm_psock *psock = NULL, *tpsock;
1381 struct list_head *head;
1383 static const struct strp_callbacks cb = {
1384 .rcv_msg = kcm_rcv_strparser,
1385 .parse_msg = kcm_parse_func_strparser,
1386 .read_sock_done = kcm_read_sock_done,
1396 /* Only allow TCP sockets to be attached for now */
1397 if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
1398 csk->sk_protocol != IPPROTO_TCP) {
1403 /* Don't allow listeners or closed sockets */
1404 if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) {
1409 psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
1417 psock->bpf_prog = prog;
1419 err = strp_init(&psock->strp, csk, &cb);
1421 kmem_cache_free(kcm_psockp, psock);
1425 write_lock_bh(&csk->sk_callback_lock);
1427 /* Check if sk_user_data is already by KCM or someone else.
1428 * Must be done under lock to prevent race conditions.
1430 if (csk->sk_user_data) {
1431 write_unlock_bh(&csk->sk_callback_lock);
1432 strp_stop(&psock->strp);
1433 strp_done(&psock->strp);
1434 kmem_cache_free(kcm_psockp, psock);
1439 psock->save_data_ready = csk->sk_data_ready;
1440 psock->save_write_space = csk->sk_write_space;
1441 psock->save_state_change = csk->sk_state_change;
1442 csk->sk_user_data = psock;
1443 csk->sk_data_ready = psock_data_ready;
1444 csk->sk_write_space = psock_write_space;
1445 csk->sk_state_change = psock_state_change;
1447 write_unlock_bh(&csk->sk_callback_lock);
1451 /* Finished initialization, now add the psock to the MUX. */
1452 spin_lock_bh(&mux->lock);
1453 head = &mux->psocks;
1454 list_for_each_entry(tpsock, &mux->psocks, psock_list) {
1455 if (tpsock->index != index)
1457 head = &tpsock->psock_list;
1461 list_add(&psock->psock_list, head);
1462 psock->index = index;
1464 KCM_STATS_INCR(mux->stats.psock_attach);
1466 psock_now_avail(psock);
1467 spin_unlock_bh(&mux->lock);
1469 /* Schedule RX work in case there are already bytes queued */
1470 strp_check_rcv(&psock->strp);
1478 static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
1480 struct socket *csock;
1481 struct bpf_prog *prog;
1484 csock = sockfd_lookup(info->fd, &err);
1488 prog = bpf_prog_get_type(info->bpf_fd, BPF_PROG_TYPE_SOCKET_FILTER);
1490 err = PTR_ERR(prog);
1494 err = kcm_attach(sock, csock, prog);
1500 /* Keep reference on file also */
1508 static void kcm_unattach(struct kcm_psock *psock)
1510 struct sock *csk = psock->sk;
1511 struct kcm_mux *mux = psock->mux;
1515 /* Stop getting callbacks from TCP socket. After this there should
1516 * be no way to reserve a kcm for this psock.
1518 write_lock_bh(&csk->sk_callback_lock);
1519 csk->sk_user_data = NULL;
1520 csk->sk_data_ready = psock->save_data_ready;
1521 csk->sk_write_space = psock->save_write_space;
1522 csk->sk_state_change = psock->save_state_change;
1523 strp_stop(&psock->strp);
1525 if (WARN_ON(psock->rx_kcm)) {
1526 write_unlock_bh(&csk->sk_callback_lock);
1531 spin_lock_bh(&mux->rx_lock);
1533 /* Stop receiver activities. After this point psock should not be
1534 * able to get onto ready list either through callbacks or work.
1536 if (psock->ready_rx_msg) {
1537 list_del(&psock->psock_ready_list);
1538 kfree_skb(psock->ready_rx_msg);
1539 psock->ready_rx_msg = NULL;
1540 KCM_STATS_INCR(mux->stats.rx_ready_drops);
1543 spin_unlock_bh(&mux->rx_lock);
1545 write_unlock_bh(&csk->sk_callback_lock);
1547 /* Call strp_done without sock lock */
1549 strp_done(&psock->strp);
1552 bpf_prog_put(psock->bpf_prog);
1554 spin_lock_bh(&mux->lock);
1556 aggregate_psock_stats(&psock->stats, &mux->aggregate_psock_stats);
1557 save_strp_stats(&psock->strp, &mux->aggregate_strp_stats);
1559 KCM_STATS_INCR(mux->stats.psock_unattach);
1561 if (psock->tx_kcm) {
1562 /* psock was reserved. Just mark it finished and we will clean
1563 * up in the kcm paths, we need kcm lock which can not be
1566 KCM_STATS_INCR(mux->stats.psock_unattach_rsvd);
1567 spin_unlock_bh(&mux->lock);
1569 /* We are unattaching a socket that is reserved. Abort the
1570 * socket since we may be out of sync in sending on it. We need
1571 * to do this without the mux lock.
1573 kcm_abort_tx_psock(psock, EPIPE, false);
1575 spin_lock_bh(&mux->lock);
1576 if (!psock->tx_kcm) {
1577 /* psock now unreserved in window mux was unlocked */
1582 /* Commit done before queuing work to process it */
1585 /* Queue tx work to make sure psock->done is handled */
1586 queue_work(kcm_wq, &psock->tx_kcm->tx_work);
1587 spin_unlock_bh(&mux->lock);
1590 if (!psock->tx_stopped)
1591 list_del(&psock->psock_avail_list);
1592 list_del(&psock->psock_list);
1594 spin_unlock_bh(&mux->lock);
1597 fput(csk->sk_socket->file);
1598 kmem_cache_free(kcm_psockp, psock);
1604 static int kcm_unattach_ioctl(struct socket *sock, struct kcm_unattach *info)
1606 struct kcm_sock *kcm = kcm_sk(sock->sk);
1607 struct kcm_mux *mux = kcm->mux;
1608 struct kcm_psock *psock;
1609 struct socket *csock;
1613 csock = sockfd_lookup(info->fd, &err);
1625 spin_lock_bh(&mux->lock);
1627 list_for_each_entry(psock, &mux->psocks, psock_list) {
1628 if (psock->sk != csk)
1631 /* Found the matching psock */
1633 if (psock->unattaching || WARN_ON(psock->done)) {
1638 psock->unattaching = 1;
1640 spin_unlock_bh(&mux->lock);
1642 /* Lower socket lock should already be held */
1643 kcm_unattach(psock);
1649 spin_unlock_bh(&mux->lock);
1656 static struct proto kcm_proto = {
1658 .owner = THIS_MODULE,
1659 .obj_size = sizeof(struct kcm_sock),
1662 /* Clone a kcm socket. */
1663 static struct file *kcm_clone(struct socket *osock)
1665 struct socket *newsock;
1668 newsock = sock_alloc();
1670 return ERR_PTR(-ENFILE);
1672 newsock->type = osock->type;
1673 newsock->ops = osock->ops;
1675 __module_get(newsock->ops->owner);
1677 newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
1680 sock_release(newsock);
1681 return ERR_PTR(-ENOMEM);
1683 sock_init_data(newsock, newsk);
1684 init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
1686 return sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
1689 static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1694 case SIOCKCMATTACH: {
1695 struct kcm_attach info;
1697 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1700 err = kcm_attach_ioctl(sock, &info);
1704 case SIOCKCMUNATTACH: {
1705 struct kcm_unattach info;
1707 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1710 err = kcm_unattach_ioctl(sock, &info);
1714 case SIOCKCMCLONE: {
1715 struct kcm_clone info;
1718 info.fd = get_unused_fd_flags(0);
1719 if (unlikely(info.fd < 0))
1722 file = kcm_clone(sock);
1724 put_unused_fd(info.fd);
1725 return PTR_ERR(file);
1727 if (copy_to_user((void __user *)arg, &info,
1729 put_unused_fd(info.fd);
1733 fd_install(info.fd, file);
1745 static void free_mux(struct rcu_head *rcu)
1747 struct kcm_mux *mux = container_of(rcu,
1748 struct kcm_mux, rcu);
1750 kmem_cache_free(kcm_muxp, mux);
1753 static void release_mux(struct kcm_mux *mux)
1755 struct kcm_net *knet = mux->knet;
1756 struct kcm_psock *psock, *tmp_psock;
1758 /* Release psocks */
1759 list_for_each_entry_safe(psock, tmp_psock,
1760 &mux->psocks, psock_list) {
1761 if (!WARN_ON(psock->unattaching))
1762 kcm_unattach(psock);
1765 if (WARN_ON(mux->psocks_cnt))
1768 __skb_queue_purge(&mux->rx_hold_queue);
1770 mutex_lock(&knet->mutex);
1771 aggregate_mux_stats(&mux->stats, &knet->aggregate_mux_stats);
1772 aggregate_psock_stats(&mux->aggregate_psock_stats,
1773 &knet->aggregate_psock_stats);
1774 aggregate_strp_stats(&mux->aggregate_strp_stats,
1775 &knet->aggregate_strp_stats);
1776 list_del_rcu(&mux->kcm_mux_list);
1778 mutex_unlock(&knet->mutex);
1780 call_rcu(&mux->rcu, free_mux);
1783 static void kcm_done(struct kcm_sock *kcm)
1785 struct kcm_mux *mux = kcm->mux;
1786 struct sock *sk = &kcm->sk;
1789 spin_lock_bh(&mux->rx_lock);
1790 if (kcm->rx_psock) {
1791 /* Cleanup in unreserve_rx_kcm */
1793 kcm->rx_disabled = 1;
1795 spin_unlock_bh(&mux->rx_lock);
1800 list_del(&kcm->wait_rx_list);
1801 kcm->rx_wait = false;
1803 /* Move any pending receive messages to other kcm sockets */
1804 requeue_rx_msgs(mux, &sk->sk_receive_queue);
1806 spin_unlock_bh(&mux->rx_lock);
1808 if (WARN_ON(sk_rmem_alloc_get(sk)))
1811 /* Detach from MUX */
1812 spin_lock_bh(&mux->lock);
1814 list_del(&kcm->kcm_sock_list);
1815 mux->kcm_socks_cnt--;
1816 socks_cnt = mux->kcm_socks_cnt;
1818 spin_unlock_bh(&mux->lock);
1821 /* We are done with the mux now. */
1825 WARN_ON(kcm->rx_wait);
1830 /* Called by kcm_release to close a KCM socket.
1831 * If this is the last KCM socket on the MUX, destroy the MUX.
1833 static int kcm_release(struct socket *sock)
1835 struct sock *sk = sock->sk;
1836 struct kcm_sock *kcm;
1837 struct kcm_mux *mux;
1838 struct kcm_psock *psock;
1847 kfree_skb(kcm->seq_skb);
1850 /* Purge queue under lock to avoid race condition with tx_work trying
1851 * to act when queue is nonempty. If tx_work runs after this point
1852 * it will just return.
1854 __skb_queue_purge(&sk->sk_write_queue);
1856 /* Set tx_stopped. This is checked when psock is bound to a kcm and we
1857 * get a writespace callback. This prevents further work being queued
1858 * from the callback (unbinding the psock occurs after canceling work.
1860 kcm->tx_stopped = 1;
1864 spin_lock_bh(&mux->lock);
1866 /* Take of tx_wait list, after this point there should be no way
1867 * that a psock will be assigned to this kcm.
1869 list_del(&kcm->wait_psock_list);
1870 kcm->tx_wait = false;
1872 spin_unlock_bh(&mux->lock);
1874 /* Cancel work. After this point there should be no outside references
1875 * to the kcm socket.
1877 cancel_work_sync(&kcm->tx_work);
1880 psock = kcm->tx_psock;
1882 /* A psock was reserved, so we need to kill it since it
1883 * may already have some bytes queued from a message. We
1884 * need to do this after removing kcm from tx_wait list.
1886 kcm_abort_tx_psock(psock, EPIPE, false);
1887 unreserve_psock(kcm);
1891 WARN_ON(kcm->tx_wait);
1892 WARN_ON(kcm->tx_psock);
1901 static const struct proto_ops kcm_dgram_ops = {
1903 .owner = THIS_MODULE,
1904 .release = kcm_release,
1905 .bind = sock_no_bind,
1906 .connect = sock_no_connect,
1907 .socketpair = sock_no_socketpair,
1908 .accept = sock_no_accept,
1909 .getname = sock_no_getname,
1910 .poll = datagram_poll,
1912 .listen = sock_no_listen,
1913 .shutdown = sock_no_shutdown,
1914 .setsockopt = kcm_setsockopt,
1915 .getsockopt = kcm_getsockopt,
1916 .sendmsg = kcm_sendmsg,
1917 .recvmsg = kcm_recvmsg,
1918 .mmap = sock_no_mmap,
1919 .sendpage = kcm_sendpage,
1922 static const struct proto_ops kcm_seqpacket_ops = {
1924 .owner = THIS_MODULE,
1925 .release = kcm_release,
1926 .bind = sock_no_bind,
1927 .connect = sock_no_connect,
1928 .socketpair = sock_no_socketpair,
1929 .accept = sock_no_accept,
1930 .getname = sock_no_getname,
1931 .poll = datagram_poll,
1933 .listen = sock_no_listen,
1934 .shutdown = sock_no_shutdown,
1935 .setsockopt = kcm_setsockopt,
1936 .getsockopt = kcm_getsockopt,
1937 .sendmsg = kcm_sendmsg,
1938 .recvmsg = kcm_recvmsg,
1939 .mmap = sock_no_mmap,
1940 .sendpage = kcm_sendpage,
1941 .splice_read = kcm_splice_read,
1944 /* Create proto operation for kcm sockets */
1945 static int kcm_create(struct net *net, struct socket *sock,
1946 int protocol, int kern)
1948 struct kcm_net *knet = net_generic(net, kcm_net_id);
1950 struct kcm_mux *mux;
1952 switch (sock->type) {
1954 sock->ops = &kcm_dgram_ops;
1956 case SOCK_SEQPACKET:
1957 sock->ops = &kcm_seqpacket_ops;
1960 return -ESOCKTNOSUPPORT;
1963 if (protocol != KCMPROTO_CONNECTED)
1964 return -EPROTONOSUPPORT;
1966 sk = sk_alloc(net, PF_KCM, GFP_KERNEL, &kcm_proto, kern);
1970 /* Allocate a kcm mux, shared between KCM sockets */
1971 mux = kmem_cache_zalloc(kcm_muxp, GFP_KERNEL);
1977 spin_lock_init(&mux->lock);
1978 spin_lock_init(&mux->rx_lock);
1979 INIT_LIST_HEAD(&mux->kcm_socks);
1980 INIT_LIST_HEAD(&mux->kcm_rx_waiters);
1981 INIT_LIST_HEAD(&mux->kcm_tx_waiters);
1983 INIT_LIST_HEAD(&mux->psocks);
1984 INIT_LIST_HEAD(&mux->psocks_ready);
1985 INIT_LIST_HEAD(&mux->psocks_avail);
1989 /* Add new MUX to list */
1990 mutex_lock(&knet->mutex);
1991 list_add_rcu(&mux->kcm_mux_list, &knet->mux_list);
1993 mutex_unlock(&knet->mutex);
1995 skb_queue_head_init(&mux->rx_hold_queue);
1997 /* Init KCM socket */
1998 sock_init_data(sock, sk);
1999 init_kcm_sock(kcm_sk(sk), mux);
2004 static const struct net_proto_family kcm_family_ops = {
2006 .create = kcm_create,
2007 .owner = THIS_MODULE,
2010 static __net_init int kcm_init_net(struct net *net)
2012 struct kcm_net *knet = net_generic(net, kcm_net_id);
2014 INIT_LIST_HEAD_RCU(&knet->mux_list);
2015 mutex_init(&knet->mutex);
2020 static __net_exit void kcm_exit_net(struct net *net)
2022 struct kcm_net *knet = net_generic(net, kcm_net_id);
2024 /* All KCM sockets should be closed at this point, which should mean
2025 * that all multiplexors and psocks have been destroyed.
2027 WARN_ON(!list_empty(&knet->mux_list));
2030 static struct pernet_operations kcm_net_ops = {
2031 .init = kcm_init_net,
2032 .exit = kcm_exit_net,
2034 .size = sizeof(struct kcm_net),
2037 static int __init kcm_init(void)
2041 kcm_muxp = kmem_cache_create("kcm_mux_cache",
2042 sizeof(struct kcm_mux), 0,
2043 SLAB_HWCACHE_ALIGN, NULL);
2047 kcm_psockp = kmem_cache_create("kcm_psock_cache",
2048 sizeof(struct kcm_psock), 0,
2049 SLAB_HWCACHE_ALIGN, NULL);
2053 kcm_wq = create_singlethread_workqueue("kkcmd");
2057 err = proto_register(&kcm_proto, 1);
2061 err = register_pernet_device(&kcm_net_ops);
2065 err = sock_register(&kcm_family_ops);
2067 goto sock_register_fail;
2069 err = kcm_proc_init();
2071 goto proc_init_fail;
2076 sock_unregister(PF_KCM);
2079 unregister_pernet_device(&kcm_net_ops);
2082 proto_unregister(&kcm_proto);
2085 kmem_cache_destroy(kcm_muxp);
2086 kmem_cache_destroy(kcm_psockp);
2089 destroy_workqueue(kcm_wq);
2094 static void __exit kcm_exit(void)
2097 sock_unregister(PF_KCM);
2098 unregister_pernet_device(&kcm_net_ops);
2099 proto_unregister(&kcm_proto);
2100 destroy_workqueue(kcm_wq);
2102 kmem_cache_destroy(kcm_muxp);
2103 kmem_cache_destroy(kcm_psockp);
2106 module_init(kcm_init);
2107 module_exit(kcm_exit);
2109 MODULE_LICENSE("GPL");
2110 MODULE_ALIAS_NETPROTO(PF_KCM);