1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2017 - 2019, Intel Corporation.
7 #define pr_fmt(fmt) "MPTCP: " fmt
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/sched/signal.h>
13 #include <linux/atomic.h>
15 #include <net/inet_common.h>
16 #include <net/inet_hashtables.h>
17 #include <net/protocol.h>
19 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
20 #include <net/transp_v6.h>
22 #include <net/mptcp.h>
26 #define MPTCP_SAME_STATE TCP_MAX_STATES
28 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
30 struct mptcp_sock msk;
39 #define MPTCP_SKB_CB(__skb) ((struct mptcp_skb_cb *)&((__skb)->cb[0]))
41 static struct percpu_counter mptcp_sockets_allocated;
43 /* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not
44 * completed yet or has failed, return the subflow socket.
45 * Otherwise return NULL.
47 static struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk)
49 if (!msk->subflow || READ_ONCE(msk->can_ack))
55 static bool __mptcp_needs_tcp_fallback(const struct mptcp_sock *msk)
57 return msk->first && !sk_is_mptcp(msk->first);
60 static struct socket *mptcp_is_tcpsk(struct sock *sk)
62 struct socket *sock = sk->sk_socket;
67 if (unlikely(sk->sk_prot == &tcp_prot)) {
68 /* we are being invoked after mptcp_accept() has
69 * accepted a non-mp-capable flow: sk is a tcp_sk,
72 * Hand the socket over to tcp so all further socket ops
75 sock->ops = &inet_stream_ops;
77 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
78 } else if (unlikely(sk->sk_prot == &tcpv6_prot)) {
79 sock->ops = &inet6_stream_ops;
87 static struct socket *__mptcp_tcp_fallback(struct mptcp_sock *msk)
91 sock_owned_by_me((const struct sock *)msk);
93 sock = mptcp_is_tcpsk((struct sock *)msk);
97 if (likely(!__mptcp_needs_tcp_fallback(msk)))
103 static bool __mptcp_can_create_subflow(const struct mptcp_sock *msk)
108 static struct socket *__mptcp_socket_create(struct mptcp_sock *msk, int state)
110 struct mptcp_subflow_context *subflow;
111 struct sock *sk = (struct sock *)msk;
112 struct socket *ssock;
115 ssock = __mptcp_tcp_fallback(msk);
119 ssock = __mptcp_nmpc_socket(msk);
123 if (!__mptcp_can_create_subflow(msk))
124 return ERR_PTR(-EINVAL);
126 err = mptcp_subflow_create_socket(sk, &ssock);
130 msk->first = ssock->sk;
131 msk->subflow = ssock;
132 subflow = mptcp_subflow_ctx(ssock->sk);
133 list_add(&subflow->node, &msk->conn_list);
134 subflow->request_mptcp = 1;
137 if (state != MPTCP_SAME_STATE)
138 inet_sk_state_store(sk, state);
142 static void __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
144 unsigned int offset, size_t copy_len)
146 struct sock *sk = (struct sock *)msk;
148 __skb_unlink(skb, &ssk->sk_receive_queue);
149 skb_set_owner_r(skb, sk);
150 __skb_queue_tail(&sk->sk_receive_queue, skb);
152 msk->ack_seq += copy_len;
153 MPTCP_SKB_CB(skb)->offset = offset;
156 /* both sockets must be locked */
157 static bool mptcp_subflow_dsn_valid(const struct mptcp_sock *msk,
160 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
161 u64 dsn = mptcp_subflow_get_mapped_dsn(subflow);
163 /* revalidate data sequence number.
165 * mptcp_subflow_data_available() is usually called
166 * without msk lock. Its unlikely (but possible)
167 * that msk->ack_seq has been advanced since the last
168 * call found in-sequence data.
170 if (likely(dsn == msk->ack_seq))
173 subflow->data_avail = 0;
174 return mptcp_subflow_data_available(ssk);
177 static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
181 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
182 struct sock *sk = (struct sock *)msk;
183 unsigned int moved = 0;
184 bool more_data_avail;
188 if (!mptcp_subflow_dsn_valid(msk, ssk)) {
193 if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
194 int rcvbuf = max(ssk->sk_rcvbuf, sk->sk_rcvbuf);
196 if (rcvbuf > sk->sk_rcvbuf)
197 sk->sk_rcvbuf = rcvbuf;
202 u32 map_remaining, offset;
203 u32 seq = tp->copied_seq;
207 /* try to move as much data as available */
208 map_remaining = subflow->map_data_len -
209 mptcp_subflow_get_map_offset(subflow);
211 skb = skb_peek(&ssk->sk_receive_queue);
215 offset = seq - TCP_SKB_CB(skb)->seq;
216 fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
222 if (offset < skb->len) {
223 size_t len = skb->len - offset;
228 __mptcp_move_skb(msk, ssk, skb, offset, len);
232 if (WARN_ON_ONCE(map_remaining < len))
236 sk_eat_skb(ssk, skb);
240 WRITE_ONCE(tp->copied_seq, seq);
241 more_data_avail = mptcp_subflow_data_available(ssk);
243 if (atomic_read(&sk->sk_rmem_alloc) > READ_ONCE(sk->sk_rcvbuf)) {
247 } while (more_data_avail);
254 /* In most cases we will be able to lock the mptcp socket. If its already
255 * owned, we need to defer to the work queue to avoid ABBA deadlock.
257 static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
259 struct sock *sk = (struct sock *)msk;
260 unsigned int moved = 0;
262 if (READ_ONCE(sk->sk_lock.owned))
265 if (unlikely(!spin_trylock_bh(&sk->sk_lock.slock)))
268 /* must re-check after taking the lock */
269 if (!READ_ONCE(sk->sk_lock.owned))
270 __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
272 spin_unlock_bh(&sk->sk_lock.slock);
277 void mptcp_data_ready(struct sock *sk, struct sock *ssk)
279 struct mptcp_sock *msk = mptcp_sk(sk);
281 set_bit(MPTCP_DATA_READY, &msk->flags);
283 if (atomic_read(&sk->sk_rmem_alloc) < READ_ONCE(sk->sk_rcvbuf) &&
284 move_skbs_to_msk(msk, ssk))
287 /* don't schedule if mptcp sk is (still) over limit */
288 if (atomic_read(&sk->sk_rmem_alloc) > READ_ONCE(sk->sk_rcvbuf))
291 /* mptcp socket is owned, release_cb should retry */
292 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
293 &sk->sk_tsq_flags)) {
296 /* need to try again, its possible release_cb() has already
297 * been called after the test_and_set_bit() above.
299 move_skbs_to_msk(msk, ssk);
302 sk->sk_data_ready(sk);
305 static void __mptcp_flush_join_list(struct mptcp_sock *msk)
307 if (likely(list_empty(&msk->join_list)))
310 spin_lock_bh(&msk->join_list_lock);
311 list_splice_tail_init(&msk->join_list, &msk->conn_list);
312 spin_unlock_bh(&msk->join_list_lock);
315 static void mptcp_set_timeout(const struct sock *sk, const struct sock *ssk)
317 long tout = ssk && inet_csk(ssk)->icsk_pending ?
318 inet_csk(ssk)->icsk_timeout - jiffies : 0;
321 tout = mptcp_sk(sk)->timer_ival;
322 mptcp_sk(sk)->timer_ival = tout > 0 ? tout : TCP_RTO_MIN;
325 static bool mptcp_timer_pending(struct sock *sk)
327 return timer_pending(&inet_csk(sk)->icsk_retransmit_timer);
330 static void mptcp_reset_timer(struct sock *sk)
332 struct inet_connection_sock *icsk = inet_csk(sk);
335 /* should never be called with mptcp level timer cleared */
336 tout = READ_ONCE(mptcp_sk(sk)->timer_ival);
337 if (WARN_ON_ONCE(!tout))
339 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + tout);
342 void mptcp_data_acked(struct sock *sk)
344 mptcp_reset_timer(sk);
346 if (!sk_stream_is_writeable(sk) &&
347 schedule_work(&mptcp_sk(sk)->work))
351 void mptcp_subflow_eof(struct sock *sk)
353 struct mptcp_sock *msk = mptcp_sk(sk);
355 if (!test_and_set_bit(MPTCP_WORK_EOF, &msk->flags) &&
356 schedule_work(&msk->work))
360 static void mptcp_stop_timer(struct sock *sk)
362 struct inet_connection_sock *icsk = inet_csk(sk);
364 sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
365 mptcp_sk(sk)->timer_ival = 0;
368 static bool mptcp_ext_cache_refill(struct mptcp_sock *msk)
370 if (!msk->cached_ext)
371 msk->cached_ext = __skb_ext_alloc();
373 return !!msk->cached_ext;
376 static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk)
378 struct mptcp_subflow_context *subflow;
379 struct sock *sk = (struct sock *)msk;
381 sock_owned_by_me(sk);
383 mptcp_for_each_subflow(msk, subflow) {
384 if (subflow->data_avail)
385 return mptcp_subflow_tcp_sock(subflow);
391 static bool mptcp_skb_can_collapse_to(u64 write_seq,
392 const struct sk_buff *skb,
393 const struct mptcp_ext *mpext)
395 if (!tcp_skb_can_collapse_to(skb))
398 /* can collapse only if MPTCP level sequence is in order */
399 return mpext && mpext->data_seq + mpext->data_len == write_seq;
402 static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
403 const struct page_frag *pfrag,
404 const struct mptcp_data_frag *df)
406 return df && pfrag->page == df->page &&
407 df->data_seq + df->data_len == msk->write_seq;
410 static void dfrag_uncharge(struct sock *sk, int len)
412 sk_mem_uncharge(sk, len);
413 sk_wmem_queued_add(sk, -len);
416 static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag)
418 int len = dfrag->data_len + dfrag->overhead;
420 list_del(&dfrag->list);
421 dfrag_uncharge(sk, len);
422 put_page(dfrag->page);
425 static void mptcp_clean_una(struct sock *sk)
427 struct mptcp_sock *msk = mptcp_sk(sk);
428 struct mptcp_data_frag *dtmp, *dfrag;
429 u64 snd_una = atomic64_read(&msk->snd_una);
430 bool cleaned = false;
432 list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) {
433 if (after64(dfrag->data_seq + dfrag->data_len, snd_una))
436 dfrag_clear(sk, dfrag);
440 dfrag = mptcp_rtx_head(sk);
441 if (dfrag && after64(snd_una, dfrag->data_seq)) {
442 u64 delta = dfrag->data_seq + dfrag->data_len - snd_una;
444 dfrag->data_seq += delta;
445 dfrag->data_len -= delta;
447 dfrag_uncharge(sk, delta);
452 sk_mem_reclaim_partial(sk);
454 /* Only wake up writers if a subflow is ready */
455 if (test_bit(MPTCP_SEND_SPACE, &msk->flags))
456 sk_stream_write_space(sk);
460 /* ensure we get enough memory for the frag hdr, beyond some minimal amount of
463 static bool mptcp_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
465 if (likely(skb_page_frag_refill(32U + sizeof(struct mptcp_data_frag),
466 pfrag, sk->sk_allocation)))
469 sk->sk_prot->enter_memory_pressure(sk);
470 sk_stream_moderate_sndbuf(sk);
474 static struct mptcp_data_frag *
475 mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag,
478 int offset = ALIGN(orig_offset, sizeof(long));
479 struct mptcp_data_frag *dfrag;
481 dfrag = (struct mptcp_data_frag *)(page_to_virt(pfrag->page) + offset);
483 dfrag->data_seq = msk->write_seq;
484 dfrag->overhead = offset - orig_offset + sizeof(struct mptcp_data_frag);
485 dfrag->offset = offset + sizeof(struct mptcp_data_frag);
486 dfrag->page = pfrag->page;
491 static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
492 struct msghdr *msg, struct mptcp_data_frag *dfrag,
493 long *timeo, int *pmss_now,
496 int mss_now, avail_size, size_goal, offset, ret, frag_truesize = 0;
497 bool dfrag_collapsed, can_collapse = false;
498 struct mptcp_sock *msk = mptcp_sk(sk);
499 struct mptcp_ext *mpext = NULL;
500 bool retransmission = !!dfrag;
501 struct sk_buff *skb, *tail;
502 struct page_frag *pfrag;
507 /* use the mptcp page cache so that we can easily move the data
508 * from one substream to another, but do per subflow memory accounting
509 * Note: pfrag is used only !retransmission, but the compiler if
510 * fooled into a warning if we don't init here
512 pfrag = sk_page_frag(sk);
513 while ((!retransmission && !mptcp_page_frag_refill(ssk, pfrag)) ||
514 !mptcp_ext_cache_refill(msk)) {
515 ret = sk_stream_wait_memory(ssk, timeo);
519 /* if sk_stream_wait_memory() sleeps snd_una can change
520 * significantly, refresh the rtx queue
524 if (unlikely(__mptcp_needs_tcp_fallback(msk)))
527 if (!retransmission) {
528 write_seq = &msk->write_seq;
531 write_seq = &dfrag->data_seq;
535 /* compute copy limit */
536 mss_now = tcp_send_mss(ssk, &size_goal, msg->msg_flags);
538 *ps_goal = size_goal;
539 avail_size = size_goal;
540 skb = tcp_write_queue_tail(ssk);
542 mpext = skb_ext_find(skb, SKB_EXT_MPTCP);
544 /* Limit the write to the size available in the
545 * current skb, if any, so that we create at most a new skb.
546 * Explicitly tells TCP internals to avoid collapsing on later
547 * queue management operation, to avoid breaking the ext <->
548 * SSN association set here
550 can_collapse = (size_goal - skb->len > 0) &&
551 mptcp_skb_can_collapse_to(*write_seq, skb, mpext);
553 TCP_SKB_CB(skb)->eor = 1;
555 avail_size = size_goal - skb->len;
558 if (!retransmission) {
559 /* reuse tail pfrag, if possible, or carve a new one from the
562 dfrag = mptcp_rtx_tail(sk);
563 offset = pfrag->offset;
564 dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag);
565 if (!dfrag_collapsed) {
566 dfrag = mptcp_carve_data_frag(msk, pfrag, offset);
567 offset = dfrag->offset;
568 frag_truesize = dfrag->overhead;
570 psize = min_t(size_t, pfrag->size - offset, avail_size);
573 pr_debug("left=%zu", msg_data_left(msg));
574 psize = copy_page_from_iter(pfrag->page, offset,
575 min_t(size_t, msg_data_left(msg),
578 pr_debug("left=%zu", msg_data_left(msg));
582 if (!sk_wmem_schedule(sk, psize + dfrag->overhead))
585 offset = dfrag->offset;
586 psize = min_t(size_t, dfrag->data_len, avail_size);
589 /* tell the TCP stack to delay the push so that we can safely
590 * access the skb after the sendpages call
592 ret = do_tcp_sendpages(ssk, page, offset, psize,
593 msg->msg_flags | MSG_SENDPAGE_NOTLAST);
597 frag_truesize += ret;
598 if (!retransmission) {
599 if (unlikely(ret < psize))
600 iov_iter_revert(&msg->msg_iter, psize - ret);
602 /* send successful, keep track of sent data for mptcp-level
605 dfrag->data_len += ret;
606 if (!dfrag_collapsed) {
607 get_page(dfrag->page);
608 list_add_tail(&dfrag->list, &msk->rtx_queue);
609 sk_wmem_queued_add(sk, frag_truesize);
611 sk_wmem_queued_add(sk, ret);
614 /* charge data on mptcp rtx queue to the master socket
615 * Note: we charge such data both to sk and ssk
617 sk->sk_forward_alloc -= frag_truesize;
620 /* if the tail skb extension is still the cached one, collapsing
621 * really happened. Note: we can't check for 'same skb' as the sk_buff
622 * hdr on tail can be transmitted, freed and re-allocated by the
623 * do_tcp_sendpages() call
625 tail = tcp_write_queue_tail(ssk);
626 if (mpext && tail && mpext == skb_ext_find(tail, SKB_EXT_MPTCP)) {
627 WARN_ON_ONCE(!can_collapse);
628 mpext->data_len += ret;
632 skb = tcp_write_queue_tail(ssk);
633 mpext = __skb_ext_set(skb, SKB_EXT_MPTCP, msk->cached_ext);
634 msk->cached_ext = NULL;
636 memset(mpext, 0, sizeof(*mpext));
637 mpext->data_seq = *write_seq;
638 mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq;
639 mpext->data_len = ret;
643 pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d",
644 mpext->data_seq, mpext->subflow_seq, mpext->data_len,
649 pfrag->offset += frag_truesize;
651 mptcp_subflow_ctx(ssk)->rel_write_seq += ret;
656 static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
658 struct mptcp_subflow_context *subflow;
659 struct sock *backup = NULL;
661 sock_owned_by_me((const struct sock *)msk);
663 mptcp_for_each_subflow(msk, subflow) {
664 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
666 if (!sk_stream_memory_free(ssk)) {
667 struct socket *sock = ssk->sk_socket;
670 clear_bit(MPTCP_SEND_SPACE, &msk->flags);
671 smp_mb__after_atomic();
673 /* enables sk->write_space() callbacks */
674 set_bit(SOCK_NOSPACE, &sock->flags);
680 if (subflow->backup) {
693 static void ssk_check_wmem(struct mptcp_sock *msk, struct sock *ssk)
697 if (likely(sk_stream_is_writeable(ssk)))
700 sock = READ_ONCE(ssk->sk_socket);
703 clear_bit(MPTCP_SEND_SPACE, &msk->flags);
704 smp_mb__after_atomic();
705 /* set NOSPACE only after clearing SEND_SPACE flag */
706 set_bit(SOCK_NOSPACE, &sock->flags);
710 static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
712 int mss_now = 0, size_goal = 0, ret = 0;
713 struct mptcp_sock *msk = mptcp_sk(sk);
714 struct socket *ssock;
719 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
724 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
726 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
727 ret = sk_stream_wait_connect(sk, &timeo);
733 ssock = __mptcp_tcp_fallback(msk);
734 if (unlikely(ssock)) {
736 pr_debug("fallback passthrough");
737 ret = sock_sendmsg(ssock, msg);
738 return ret >= 0 ? ret + copied : (copied ? copied : ret);
743 __mptcp_flush_join_list(msk);
744 ssk = mptcp_subflow_get_send(msk);
745 while (!sk_stream_memory_free(sk) || !ssk) {
746 ret = sk_stream_wait_memory(sk, &timeo);
752 ssk = mptcp_subflow_get_send(msk);
753 if (list_empty(&msk->conn_list)) {
759 pr_debug("conn_list->subflow=%p", ssk);
762 while (msg_data_left(msg)) {
763 ret = mptcp_sendmsg_frag(sk, ssk, msg, NULL, &timeo, &mss_now,
767 if (ret == 0 && unlikely(__mptcp_needs_tcp_fallback(msk))) {
768 /* Can happen for passive sockets:
769 * 3WHS negotiated MPTCP, but first packet after is
770 * plain TCP (e.g. due to middlebox filtering unknown
782 mptcp_set_timeout(sk, ssk);
785 tcp_push(ssk, msg->msg_flags, mss_now, tcp_sk(ssk)->nonagle,
788 /* start the timer, if it's not pending */
789 if (!mptcp_timer_pending(sk))
790 mptcp_reset_timer(sk);
793 ssk_check_wmem(msk, ssk);
800 static void mptcp_wait_data(struct sock *sk, long *timeo)
802 DEFINE_WAIT_FUNC(wait, woken_wake_function);
803 struct mptcp_sock *msk = mptcp_sk(sk);
805 add_wait_queue(sk_sleep(sk), &wait);
806 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
808 sk_wait_event(sk, timeo,
809 test_and_clear_bit(MPTCP_DATA_READY, &msk->flags), &wait);
811 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
812 remove_wait_queue(sk_sleep(sk), &wait);
815 static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
819 struct sock *sk = (struct sock *)msk;
823 while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
824 u32 offset = MPTCP_SKB_CB(skb)->offset;
825 u32 data_len = skb->len - offset;
826 u32 count = min_t(size_t, len - copied, data_len);
829 err = skb_copy_datagram_msg(skb, offset, msg, count);
830 if (unlikely(err < 0)) {
838 if (count < data_len) {
839 MPTCP_SKB_CB(skb)->offset += count;
843 __skb_unlink(skb, &sk->sk_receive_queue);
853 static bool __mptcp_move_skbs(struct mptcp_sock *msk)
855 unsigned int moved = 0;
859 struct sock *ssk = mptcp_subflow_recv_lookup(msk);
865 done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
872 static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
873 int nonblock, int flags, int *addr_len)
875 struct mptcp_sock *msk = mptcp_sk(sk);
876 struct socket *ssock;
881 if (msg->msg_flags & ~(MSG_WAITALL | MSG_DONTWAIT))
885 ssock = __mptcp_tcp_fallback(msk);
886 if (unlikely(ssock)) {
889 pr_debug("fallback-read subflow=%p",
890 mptcp_subflow_ctx(ssock->sk));
891 copied = sock_recvmsg(ssock, msg, flags);
895 timeo = sock_rcvtimeo(sk, nonblock);
897 len = min_t(size_t, len, INT_MAX);
898 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
899 __mptcp_flush_join_list(msk);
901 while (len > (size_t)copied) {
904 bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied);
905 if (unlikely(bytes_read < 0)) {
911 copied += bytes_read;
913 if (skb_queue_empty(&sk->sk_receive_queue) &&
914 __mptcp_move_skbs(msk))
917 /* only the master socket status is relevant here. The exit
918 * conditions mirror closely tcp_recvmsg()
920 if (copied >= target)
925 sk->sk_state == TCP_CLOSE ||
926 (sk->sk_shutdown & RCV_SHUTDOWN) ||
928 signal_pending(current))
932 copied = sock_error(sk);
936 if (sk->sk_shutdown & RCV_SHUTDOWN)
939 if (sk->sk_state == TCP_CLOSE) {
949 if (signal_pending(current)) {
950 copied = sock_intr_errno(timeo);
955 pr_debug("block timeout %ld", timeo);
956 mptcp_wait_data(sk, &timeo);
957 ssock = __mptcp_tcp_fallback(msk);
962 if (skb_queue_empty(&sk->sk_receive_queue)) {
963 /* entire backlog drained, clear DATA_READY. */
964 clear_bit(MPTCP_DATA_READY, &msk->flags);
966 /* .. race-breaker: ssk might have gotten new data
967 * after last __mptcp_move_skbs() returned false.
969 if (unlikely(__mptcp_move_skbs(msk)))
970 set_bit(MPTCP_DATA_READY, &msk->flags);
971 } else if (unlikely(!test_bit(MPTCP_DATA_READY, &msk->flags))) {
972 /* data to read but mptcp_wait_data() cleared DATA_READY */
973 set_bit(MPTCP_DATA_READY, &msk->flags);
980 static void mptcp_retransmit_handler(struct sock *sk)
982 struct mptcp_sock *msk = mptcp_sk(sk);
984 if (atomic64_read(&msk->snd_una) == msk->write_seq) {
985 mptcp_stop_timer(sk);
987 set_bit(MPTCP_WORK_RTX, &msk->flags);
988 if (schedule_work(&msk->work))
993 static void mptcp_retransmit_timer(struct timer_list *t)
995 struct inet_connection_sock *icsk = from_timer(icsk, t,
996 icsk_retransmit_timer);
997 struct sock *sk = &icsk->icsk_inet.sk;
1000 if (!sock_owned_by_user(sk)) {
1001 mptcp_retransmit_handler(sk);
1003 /* delegate our work to tcp_release_cb() */
1004 if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED,
1012 /* Find an idle subflow. Return NULL if there is unacked data at tcp
1015 * A backup subflow is returned only if that is the only kind available.
1017 static struct sock *mptcp_subflow_get_retrans(const struct mptcp_sock *msk)
1019 struct mptcp_subflow_context *subflow;
1020 struct sock *backup = NULL;
1022 sock_owned_by_me((const struct sock *)msk);
1024 mptcp_for_each_subflow(msk, subflow) {
1025 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1027 /* still data outstanding at TCP level? Don't retransmit. */
1028 if (!tcp_write_queue_empty(ssk))
1031 if (subflow->backup) {
1043 /* subflow sockets can be either outgoing (connect) or incoming
1046 * Outgoing subflows use in-kernel sockets.
1047 * Incoming subflows do not have their own 'struct socket' allocated,
1048 * so we need to use tcp_close() after detaching them from the mptcp
1051 static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
1052 struct mptcp_subflow_context *subflow,
1055 struct socket *sock = READ_ONCE(ssk->sk_socket);
1057 list_del(&subflow->node);
1059 if (sock && sock != sk->sk_socket) {
1060 /* outgoing subflow */
1063 /* incoming subflow */
1064 tcp_close(ssk, timeout);
1068 static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
1073 static void mptcp_check_for_eof(struct mptcp_sock *msk)
1075 struct mptcp_subflow_context *subflow;
1076 struct sock *sk = (struct sock *)msk;
1079 mptcp_for_each_subflow(msk, subflow)
1080 receivers += !subflow->rx_eof;
1082 if (!receivers && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1083 /* hopefully temporary hack: propagate shutdown status
1084 * to msk, when all subflows agree on it
1086 sk->sk_shutdown |= RCV_SHUTDOWN;
1088 smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
1089 set_bit(MPTCP_DATA_READY, &msk->flags);
1090 sk->sk_data_ready(sk);
1094 static void mptcp_worker(struct work_struct *work)
1096 struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
1097 struct sock *ssk, *sk = &msk->sk.icsk_inet.sk;
1098 int orig_len, orig_offset, ret, mss_now = 0, size_goal = 0;
1099 struct mptcp_data_frag *dfrag;
1106 mptcp_clean_una(sk);
1107 __mptcp_flush_join_list(msk);
1108 __mptcp_move_skbs(msk);
1110 if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
1111 mptcp_check_for_eof(msk);
1113 if (!test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
1116 dfrag = mptcp_rtx_head(sk);
1120 ssk = mptcp_subflow_get_retrans(msk);
1126 msg.msg_flags = MSG_DONTWAIT;
1127 orig_len = dfrag->data_len;
1128 orig_offset = dfrag->offset;
1129 orig_write_seq = dfrag->data_seq;
1130 while (dfrag->data_len > 0) {
1131 ret = mptcp_sendmsg_frag(sk, ssk, &msg, dfrag, &timeo, &mss_now,
1136 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RETRANSSEGS);
1138 dfrag->data_len -= ret;
1139 dfrag->offset += ret;
1142 tcp_push(ssk, msg.msg_flags, mss_now, tcp_sk(ssk)->nonagle,
1145 dfrag->data_seq = orig_write_seq;
1146 dfrag->offset = orig_offset;
1147 dfrag->data_len = orig_len;
1149 mptcp_set_timeout(sk, ssk);
1153 if (!mptcp_timer_pending(sk))
1154 mptcp_reset_timer(sk);
1161 static int __mptcp_init_sock(struct sock *sk)
1163 struct mptcp_sock *msk = mptcp_sk(sk);
1165 spin_lock_init(&msk->join_list_lock);
1167 INIT_LIST_HEAD(&msk->conn_list);
1168 INIT_LIST_HEAD(&msk->join_list);
1169 INIT_LIST_HEAD(&msk->rtx_queue);
1170 __set_bit(MPTCP_SEND_SPACE, &msk->flags);
1171 INIT_WORK(&msk->work, mptcp_worker);
1174 inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
1176 mptcp_pm_data_init(msk);
1178 /* re-use the csk retrans timer for MPTCP-level retrans */
1179 timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0);
1184 static int mptcp_init_sock(struct sock *sk)
1186 struct net *net = sock_net(sk);
1189 if (!mptcp_is_enabled(net))
1190 return -ENOPROTOOPT;
1192 if (unlikely(!net->mib.mptcp_statistics) && !mptcp_mib_alloc(net))
1195 ret = __mptcp_init_sock(sk);
1199 sk_sockets_allocated_inc(sk);
1200 sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[2];
1205 static void __mptcp_clear_xmit(struct sock *sk)
1207 struct mptcp_sock *msk = mptcp_sk(sk);
1208 struct mptcp_data_frag *dtmp, *dfrag;
1210 sk_stop_timer(sk, &msk->sk.icsk_retransmit_timer);
1212 list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list)
1213 dfrag_clear(sk, dfrag);
1216 static void mptcp_cancel_work(struct sock *sk)
1218 struct mptcp_sock *msk = mptcp_sk(sk);
1220 if (cancel_work_sync(&msk->work))
1224 static void mptcp_subflow_shutdown(struct sock *ssk, int how,
1225 bool data_fin_tx_enable, u64 data_fin_tx_seq)
1229 switch (ssk->sk_state) {
1231 if (!(how & RCV_SHUTDOWN))
1235 tcp_disconnect(ssk, O_NONBLOCK);
1238 if (data_fin_tx_enable) {
1239 struct mptcp_subflow_context *subflow;
1241 subflow = mptcp_subflow_ctx(ssk);
1242 subflow->data_fin_tx_seq = data_fin_tx_seq;
1243 subflow->data_fin_tx_enable = 1;
1246 ssk->sk_shutdown |= how;
1247 tcp_shutdown(ssk, how);
1251 /* Wake up anyone sleeping in poll. */
1252 ssk->sk_state_change(ssk);
1256 /* Called with msk lock held, releases such lock before returning */
1257 static void mptcp_close(struct sock *sk, long timeout)
1259 struct mptcp_subflow_context *subflow, *tmp;
1260 struct mptcp_sock *msk = mptcp_sk(sk);
1261 LIST_HEAD(conn_list);
1262 u64 data_fin_tx_seq;
1266 mptcp_token_destroy(msk->token);
1267 inet_sk_state_store(sk, TCP_CLOSE);
1269 __mptcp_flush_join_list(msk);
1271 list_splice_init(&msk->conn_list, &conn_list);
1273 data_fin_tx_seq = msk->write_seq;
1275 __mptcp_clear_xmit(sk);
1279 list_for_each_entry_safe(subflow, tmp, &conn_list, node) {
1280 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1282 subflow->data_fin_tx_seq = data_fin_tx_seq;
1283 subflow->data_fin_tx_enable = 1;
1284 __mptcp_close_ssk(sk, ssk, subflow, timeout);
1287 mptcp_cancel_work(sk);
1288 mptcp_pm_close(msk);
1290 __skb_queue_purge(&sk->sk_receive_queue);
1292 sk_common_release(sk);
1295 static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
1297 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1298 const struct ipv6_pinfo *ssk6 = inet6_sk(ssk);
1299 struct ipv6_pinfo *msk6 = inet6_sk(msk);
1301 msk->sk_v6_daddr = ssk->sk_v6_daddr;
1302 msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr;
1305 msk6->saddr = ssk6->saddr;
1306 msk6->flow_label = ssk6->flow_label;
1310 inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num;
1311 inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport;
1312 inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport;
1313 inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr;
1314 inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr;
1315 inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr;
1318 static int mptcp_disconnect(struct sock *sk, int flags)
1320 /* Should never be called.
1321 * inet_stream_connect() calls ->disconnect, but that
1322 * refers to the subflow socket, not the mptcp one.
1328 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1329 static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
1331 unsigned int offset = sizeof(struct mptcp6_sock) - sizeof(struct ipv6_pinfo);
1333 return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
1337 struct sock *mptcp_sk_clone(const struct sock *sk,
1338 const struct mptcp_options_received *mp_opt,
1339 struct request_sock *req)
1341 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
1342 struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC);
1343 struct mptcp_sock *msk;
1349 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1350 if (nsk->sk_family == AF_INET6)
1351 inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk);
1354 __mptcp_init_sock(nsk);
1356 msk = mptcp_sk(nsk);
1357 msk->local_key = subflow_req->local_key;
1358 msk->token = subflow_req->token;
1359 msk->subflow = NULL;
1361 if (unlikely(mptcp_token_new_accept(subflow_req->token, nsk))) {
1362 nsk->sk_state = TCP_CLOSE;
1363 bh_unlock_sock(nsk);
1365 /* we can't call into mptcp_close() here - possible BH context
1366 * free the sock directly.
1367 * sk_clone_lock() sets nsk refcnt to two, hence call sk_free()
1370 sk_common_release(nsk);
1375 msk->write_seq = subflow_req->idsn + 1;
1376 atomic64_set(&msk->snd_una, msk->write_seq);
1377 if (mp_opt->mp_capable) {
1378 msk->can_ack = true;
1379 msk->remote_key = mp_opt->sndr_key;
1380 mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq);
1382 msk->ack_seq = ack_seq;
1385 sock_reset_flag(nsk, SOCK_RCU_FREE);
1386 /* will be fully established after successful MPC subflow creation */
1387 inet_sk_state_store(nsk, TCP_SYN_RECV);
1388 bh_unlock_sock(nsk);
1390 /* keep a single reference */
1395 static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
1398 struct mptcp_sock *msk = mptcp_sk(sk);
1399 struct socket *listener;
1402 listener = __mptcp_nmpc_socket(msk);
1403 if (WARN_ON_ONCE(!listener)) {
1408 pr_debug("msk=%p, listener=%p", msk, mptcp_subflow_ctx(listener->sk));
1409 newsk = inet_csk_accept(listener->sk, flags, err, kern);
1413 pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk));
1415 if (sk_is_mptcp(newsk)) {
1416 struct mptcp_subflow_context *subflow;
1417 struct sock *new_mptcp_sock;
1418 struct sock *ssk = newsk;
1420 subflow = mptcp_subflow_ctx(newsk);
1421 new_mptcp_sock = subflow->conn;
1423 /* is_mptcp should be false if subflow->conn is missing, see
1424 * subflow_syn_recv_sock()
1426 if (WARN_ON_ONCE(!new_mptcp_sock)) {
1427 tcp_sk(newsk)->is_mptcp = 0;
1431 /* acquire the 2nd reference for the owning socket */
1432 sock_hold(new_mptcp_sock);
1435 bh_lock_sock(new_mptcp_sock);
1436 msk = mptcp_sk(new_mptcp_sock);
1439 newsk = new_mptcp_sock;
1440 mptcp_copy_inaddrs(newsk, ssk);
1441 list_add(&subflow->node, &msk->conn_list);
1442 inet_sk_state_store(newsk, TCP_ESTABLISHED);
1444 bh_unlock_sock(new_mptcp_sock);
1446 __MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
1449 MPTCP_INC_STATS(sock_net(sk),
1450 MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
1456 static void mptcp_destroy(struct sock *sk)
1458 struct mptcp_sock *msk = mptcp_sk(sk);
1460 if (msk->cached_ext)
1461 __skb_ext_put(msk->cached_ext);
1463 sk_sockets_allocated_dec(sk);
1466 static int mptcp_setsockopt(struct sock *sk, int level, int optname,
1467 char __user *optval, unsigned int optlen)
1469 struct mptcp_sock *msk = mptcp_sk(sk);
1470 struct socket *ssock;
1472 pr_debug("msk=%p", msk);
1474 /* @@ the meaning of setsockopt() when the socket is connected and
1475 * there are multiple subflows is not yet defined. It is up to the
1476 * MPTCP-level socket to configure the subflows until the subflow
1477 * is in TCP fallback, when TCP socket options are passed through
1478 * to the one remaining subflow.
1481 ssock = __mptcp_tcp_fallback(msk);
1484 return tcp_setsockopt(ssock->sk, level, optname, optval,
1490 static int mptcp_getsockopt(struct sock *sk, int level, int optname,
1491 char __user *optval, int __user *option)
1493 struct mptcp_sock *msk = mptcp_sk(sk);
1494 struct socket *ssock;
1496 pr_debug("msk=%p", msk);
1498 /* @@ the meaning of setsockopt() when the socket is connected and
1499 * there are multiple subflows is not yet defined. It is up to the
1500 * MPTCP-level socket to configure the subflows until the subflow
1501 * is in TCP fallback, when socket options are passed through
1502 * to the one remaining subflow.
1505 ssock = __mptcp_tcp_fallback(msk);
1508 return tcp_getsockopt(ssock->sk, level, optname, optval,
1514 #define MPTCP_DEFERRED_ALL (TCPF_DELACK_TIMER_DEFERRED | \
1515 TCPF_WRITE_TIMER_DEFERRED)
1517 /* this is very alike tcp_release_cb() but we must handle differently a
1518 * different set of events
1520 static void mptcp_release_cb(struct sock *sk)
1522 unsigned long flags, nflags;
1525 flags = sk->sk_tsq_flags;
1526 if (!(flags & MPTCP_DEFERRED_ALL))
1528 nflags = flags & ~MPTCP_DEFERRED_ALL;
1529 } while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags);
1531 sock_release_ownership(sk);
1533 if (flags & TCPF_DELACK_TIMER_DEFERRED) {
1534 struct mptcp_sock *msk = mptcp_sk(sk);
1537 ssk = mptcp_subflow_recv_lookup(msk);
1538 if (!ssk || !schedule_work(&msk->work))
1542 if (flags & TCPF_WRITE_TIMER_DEFERRED) {
1543 mptcp_retransmit_handler(sk);
1548 static int mptcp_get_port(struct sock *sk, unsigned short snum)
1550 struct mptcp_sock *msk = mptcp_sk(sk);
1551 struct socket *ssock;
1553 ssock = __mptcp_nmpc_socket(msk);
1554 pr_debug("msk=%p, subflow=%p", msk, ssock);
1555 if (WARN_ON_ONCE(!ssock))
1558 return inet_csk_get_port(ssock->sk, snum);
1561 void mptcp_finish_connect(struct sock *ssk)
1563 struct mptcp_subflow_context *subflow;
1564 struct mptcp_sock *msk;
1568 subflow = mptcp_subflow_ctx(ssk);
1572 if (!subflow->mp_capable) {
1573 MPTCP_INC_STATS(sock_net(sk),
1574 MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
1578 pr_debug("msk=%p, token=%u", sk, subflow->token);
1580 mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq);
1582 subflow->map_seq = ack_seq;
1583 subflow->map_subflow_seq = 1;
1584 subflow->rel_write_seq = 1;
1586 /* the socket is not connected yet, no msk/subflow ops can access/race
1587 * accessing the field below
1589 WRITE_ONCE(msk->remote_key, subflow->remote_key);
1590 WRITE_ONCE(msk->local_key, subflow->local_key);
1591 WRITE_ONCE(msk->token, subflow->token);
1592 WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
1593 WRITE_ONCE(msk->ack_seq, ack_seq);
1594 WRITE_ONCE(msk->can_ack, 1);
1595 atomic64_set(&msk->snd_una, msk->write_seq);
1597 mptcp_pm_new_connection(msk, 0);
1600 static void mptcp_sock_graft(struct sock *sk, struct socket *parent)
1602 write_lock_bh(&sk->sk_callback_lock);
1603 rcu_assign_pointer(sk->sk_wq, &parent->wq);
1604 sk_set_socket(sk, parent);
1605 sk->sk_uid = SOCK_INODE(parent)->i_uid;
1606 write_unlock_bh(&sk->sk_callback_lock);
1609 bool mptcp_finish_join(struct sock *sk)
1611 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1612 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
1613 struct sock *parent = (void *)msk;
1614 struct socket *parent_sock;
1617 pr_debug("msk=%p, subflow=%p", msk, subflow);
1619 /* mptcp socket already closing? */
1620 if (inet_sk_state_load(parent) != TCP_ESTABLISHED)
1623 if (!msk->pm.server_side)
1626 /* passive connection, attach to msk socket */
1627 parent_sock = READ_ONCE(parent->sk_socket);
1628 if (parent_sock && !sk->sk_socket)
1629 mptcp_sock_graft(sk, parent_sock);
1631 ret = mptcp_pm_allow_new_subflow(msk);
1633 subflow->map_seq = msk->ack_seq;
1635 /* active connections are already on conn_list */
1636 spin_lock_bh(&msk->join_list_lock);
1637 if (!WARN_ON_ONCE(!list_empty(&subflow->node)))
1638 list_add_tail(&subflow->node, &msk->join_list);
1639 spin_unlock_bh(&msk->join_list_lock);
1644 bool mptcp_sk_is_subflow(const struct sock *sk)
1646 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1648 return subflow->mp_join == 1;
1651 static bool mptcp_memory_free(const struct sock *sk, int wake)
1653 struct mptcp_sock *msk = mptcp_sk(sk);
1655 return wake ? test_bit(MPTCP_SEND_SPACE, &msk->flags) : true;
1658 static struct proto mptcp_prot = {
1660 .owner = THIS_MODULE,
1661 .init = mptcp_init_sock,
1662 .disconnect = mptcp_disconnect,
1663 .close = mptcp_close,
1664 .accept = mptcp_accept,
1665 .setsockopt = mptcp_setsockopt,
1666 .getsockopt = mptcp_getsockopt,
1667 .shutdown = tcp_shutdown,
1668 .destroy = mptcp_destroy,
1669 .sendmsg = mptcp_sendmsg,
1670 .recvmsg = mptcp_recvmsg,
1671 .release_cb = mptcp_release_cb,
1673 .unhash = inet_unhash,
1674 .get_port = mptcp_get_port,
1675 .sockets_allocated = &mptcp_sockets_allocated,
1676 .memory_allocated = &tcp_memory_allocated,
1677 .memory_pressure = &tcp_memory_pressure,
1678 .stream_memory_free = mptcp_memory_free,
1679 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
1680 .sysctl_mem = sysctl_tcp_mem,
1681 .obj_size = sizeof(struct mptcp_sock),
1682 .no_autobind = true,
1685 static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1687 struct mptcp_sock *msk = mptcp_sk(sock->sk);
1688 struct socket *ssock;
1691 lock_sock(sock->sk);
1692 ssock = __mptcp_socket_create(msk, MPTCP_SAME_STATE);
1693 if (IS_ERR(ssock)) {
1694 err = PTR_ERR(ssock);
1698 err = ssock->ops->bind(ssock, uaddr, addr_len);
1700 mptcp_copy_inaddrs(sock->sk, ssock->sk);
1703 release_sock(sock->sk);
1707 static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1708 int addr_len, int flags)
1710 struct mptcp_sock *msk = mptcp_sk(sock->sk);
1711 struct socket *ssock;
1714 lock_sock(sock->sk);
1715 if (sock->state != SS_UNCONNECTED && msk->subflow) {
1716 /* pending connection or invalid state, let existing subflow
1719 ssock = msk->subflow;
1723 ssock = __mptcp_socket_create(msk, TCP_SYN_SENT);
1724 if (IS_ERR(ssock)) {
1725 err = PTR_ERR(ssock);
1729 #ifdef CONFIG_TCP_MD5SIG
1730 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
1733 if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info))
1734 mptcp_subflow_ctx(ssock->sk)->request_mptcp = 0;
1738 err = ssock->ops->connect(ssock, uaddr, addr_len, flags);
1739 sock->state = ssock->state;
1741 /* on successful connect, the msk state will be moved to established by
1742 * subflow_finish_connect()
1744 if (!err || err == EINPROGRESS)
1745 mptcp_copy_inaddrs(sock->sk, ssock->sk);
1747 inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
1750 release_sock(sock->sk);
1754 static int mptcp_v4_getname(struct socket *sock, struct sockaddr *uaddr,
1757 if (sock->sk->sk_prot == &tcp_prot) {
1758 /* we are being invoked from __sys_accept4, after
1759 * mptcp_accept() has just accepted a non-mp-capable
1760 * flow: sk is a tcp_sk, not an mptcp one.
1762 * Hand the socket over to tcp so all further socket ops
1765 sock->ops = &inet_stream_ops;
1768 return inet_getname(sock, uaddr, peer);
1771 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1772 static int mptcp_v6_getname(struct socket *sock, struct sockaddr *uaddr,
1775 if (sock->sk->sk_prot == &tcpv6_prot) {
1776 /* we are being invoked from __sys_accept4 after
1777 * mptcp_accept() has accepted a non-mp-capable
1778 * subflow: sk is a tcp_sk, not mptcp.
1780 * Hand the socket over to tcp so all further
1781 * socket ops bypass mptcp.
1783 sock->ops = &inet6_stream_ops;
1786 return inet6_getname(sock, uaddr, peer);
1790 static int mptcp_listen(struct socket *sock, int backlog)
1792 struct mptcp_sock *msk = mptcp_sk(sock->sk);
1793 struct socket *ssock;
1796 pr_debug("msk=%p", msk);
1798 lock_sock(sock->sk);
1799 ssock = __mptcp_socket_create(msk, TCP_LISTEN);
1800 if (IS_ERR(ssock)) {
1801 err = PTR_ERR(ssock);
1805 sock_set_flag(sock->sk, SOCK_RCU_FREE);
1807 err = ssock->ops->listen(ssock, backlog);
1808 inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
1810 mptcp_copy_inaddrs(sock->sk, ssock->sk);
1813 release_sock(sock->sk);
1817 static bool is_tcp_proto(const struct proto *p)
1819 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1820 return p == &tcp_prot || p == &tcpv6_prot;
1822 return p == &tcp_prot;
1826 static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
1827 int flags, bool kern)
1829 struct mptcp_sock *msk = mptcp_sk(sock->sk);
1830 struct socket *ssock;
1833 pr_debug("msk=%p", msk);
1835 lock_sock(sock->sk);
1836 if (sock->sk->sk_state != TCP_LISTEN)
1839 ssock = __mptcp_nmpc_socket(msk);
1843 sock_hold(ssock->sk);
1844 release_sock(sock->sk);
1846 err = ssock->ops->accept(sock, newsock, flags, kern);
1847 if (err == 0 && !is_tcp_proto(newsock->sk->sk_prot)) {
1848 struct mptcp_sock *msk = mptcp_sk(newsock->sk);
1849 struct mptcp_subflow_context *subflow;
1851 /* set ssk->sk_socket of accept()ed flows to mptcp socket.
1852 * This is needed so NOSPACE flag can be set from tcp stack.
1854 __mptcp_flush_join_list(msk);
1855 list_for_each_entry(subflow, &msk->conn_list, node) {
1856 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1858 if (!ssk->sk_socket)
1859 mptcp_sock_graft(ssk, newsock);
1863 sock_put(ssock->sk);
1867 release_sock(sock->sk);
1871 static __poll_t mptcp_poll(struct file *file, struct socket *sock,
1872 struct poll_table_struct *wait)
1874 struct sock *sk = sock->sk;
1875 struct mptcp_sock *msk;
1876 struct socket *ssock;
1881 ssock = __mptcp_tcp_fallback(msk);
1883 ssock = __mptcp_nmpc_socket(msk);
1885 mask = ssock->ops->poll(file, ssock, wait);
1891 sock_poll_wait(file, sock, wait);
1894 if (test_bit(MPTCP_DATA_READY, &msk->flags))
1895 mask = EPOLLIN | EPOLLRDNORM;
1896 if (sk_stream_is_writeable(sk) &&
1897 test_bit(MPTCP_SEND_SPACE, &msk->flags))
1898 mask |= EPOLLOUT | EPOLLWRNORM;
1899 if (sk->sk_shutdown & RCV_SHUTDOWN)
1900 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
1907 static int mptcp_shutdown(struct socket *sock, int how)
1909 struct mptcp_sock *msk = mptcp_sk(sock->sk);
1910 struct mptcp_subflow_context *subflow;
1911 struct socket *ssock;
1914 pr_debug("sk=%p, how=%d", msk, how);
1916 lock_sock(sock->sk);
1917 ssock = __mptcp_tcp_fallback(msk);
1919 release_sock(sock->sk);
1920 return inet_shutdown(ssock, how);
1923 if (how == SHUT_WR || how == SHUT_RDWR)
1924 inet_sk_state_store(sock->sk, TCP_FIN_WAIT1);
1928 if ((how & ~SHUTDOWN_MASK) || !how) {
1933 if (sock->state == SS_CONNECTING) {
1934 if ((1 << sock->sk->sk_state) &
1935 (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
1936 sock->state = SS_DISCONNECTING;
1938 sock->state = SS_CONNECTED;
1941 __mptcp_flush_join_list(msk);
1942 mptcp_for_each_subflow(msk, subflow) {
1943 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
1945 mptcp_subflow_shutdown(tcp_sk, how, 1, msk->write_seq);
1949 release_sock(sock->sk);
1954 static const struct proto_ops mptcp_stream_ops = {
1956 .owner = THIS_MODULE,
1957 .release = inet_release,
1959 .connect = mptcp_stream_connect,
1960 .socketpair = sock_no_socketpair,
1961 .accept = mptcp_stream_accept,
1962 .getname = mptcp_v4_getname,
1964 .ioctl = inet_ioctl,
1965 .gettstamp = sock_gettstamp,
1966 .listen = mptcp_listen,
1967 .shutdown = mptcp_shutdown,
1968 .setsockopt = sock_common_setsockopt,
1969 .getsockopt = sock_common_getsockopt,
1970 .sendmsg = inet_sendmsg,
1971 .recvmsg = inet_recvmsg,
1972 .mmap = sock_no_mmap,
1973 .sendpage = inet_sendpage,
1974 #ifdef CONFIG_COMPAT
1975 .compat_setsockopt = compat_sock_common_setsockopt,
1976 .compat_getsockopt = compat_sock_common_getsockopt,
1980 static struct inet_protosw mptcp_protosw = {
1981 .type = SOCK_STREAM,
1982 .protocol = IPPROTO_MPTCP,
1983 .prot = &mptcp_prot,
1984 .ops = &mptcp_stream_ops,
1985 .flags = INET_PROTOSW_ICSK,
1988 void mptcp_proto_init(void)
1990 mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo;
1992 if (percpu_counter_init(&mptcp_sockets_allocated, 0, GFP_KERNEL))
1993 panic("Failed to allocate MPTCP pcpu counter\n");
1995 mptcp_subflow_init();
1998 if (proto_register(&mptcp_prot, 1) != 0)
1999 panic("Failed to register MPTCP proto.\n");
2001 inet_register_protosw(&mptcp_protosw);
2003 BUILD_BUG_ON(sizeof(struct mptcp_skb_cb) > sizeof_field(struct sk_buff, cb));
2006 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
2007 static const struct proto_ops mptcp_v6_stream_ops = {
2009 .owner = THIS_MODULE,
2010 .release = inet6_release,
2012 .connect = mptcp_stream_connect,
2013 .socketpair = sock_no_socketpair,
2014 .accept = mptcp_stream_accept,
2015 .getname = mptcp_v6_getname,
2017 .ioctl = inet6_ioctl,
2018 .gettstamp = sock_gettstamp,
2019 .listen = mptcp_listen,
2020 .shutdown = mptcp_shutdown,
2021 .setsockopt = sock_common_setsockopt,
2022 .getsockopt = sock_common_getsockopt,
2023 .sendmsg = inet6_sendmsg,
2024 .recvmsg = inet6_recvmsg,
2025 .mmap = sock_no_mmap,
2026 .sendpage = inet_sendpage,
2027 #ifdef CONFIG_COMPAT
2028 .compat_setsockopt = compat_sock_common_setsockopt,
2029 .compat_getsockopt = compat_sock_common_getsockopt,
2033 static struct proto mptcp_v6_prot;
2035 static void mptcp_v6_destroy(struct sock *sk)
2038 inet6_destroy_sock(sk);
2041 static struct inet_protosw mptcp_v6_protosw = {
2042 .type = SOCK_STREAM,
2043 .protocol = IPPROTO_MPTCP,
2044 .prot = &mptcp_v6_prot,
2045 .ops = &mptcp_v6_stream_ops,
2046 .flags = INET_PROTOSW_ICSK,
2049 int mptcp_proto_v6_init(void)
2053 mptcp_v6_prot = mptcp_prot;
2054 strcpy(mptcp_v6_prot.name, "MPTCPv6");
2055 mptcp_v6_prot.slab = NULL;
2056 mptcp_v6_prot.destroy = mptcp_v6_destroy;
2057 mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock);
2059 err = proto_register(&mptcp_v6_prot, 1);
2063 err = inet6_register_protosw(&mptcp_v6_protosw);
2065 proto_unregister(&mptcp_v6_prot);