1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2017 - 2019, Intel Corporation.
7 #define pr_fmt(fmt) "MPTCP: " fmt
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <crypto/algapi.h>
13 #include <crypto/sha.h>
15 #include <net/inet_common.h>
16 #include <net/inet_hashtables.h>
17 #include <net/protocol.h>
19 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
20 #include <net/ip6_route.h>
22 #include <net/mptcp.h>
26 static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
27 enum linux_mptcp_mib_field field)
29 MPTCP_INC_STATS(sock_net(req_to_sk(req)), field);
32 static void subflow_req_destructor(struct request_sock *req)
34 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
36 pr_debug("subflow_req=%p", subflow_req);
39 sock_put((struct sock *)subflow_req->msk);
41 mptcp_token_destroy_request(req);
42 tcp_request_sock_ops.destructor(req);
45 static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
50 put_unaligned_be32(nonce1, &msg[0]);
51 put_unaligned_be32(nonce2, &msg[4]);
53 mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
56 static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk)
58 return mptcp_is_fully_established((void *)msk) &&
59 READ_ONCE(msk->pm.accept_subflow);
62 /* validate received token and create truncated hmac and nonce for SYN-ACK */
63 static struct mptcp_sock *subflow_token_join_request(struct request_sock *req,
64 const struct sk_buff *skb)
66 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
67 u8 hmac[SHA256_DIGEST_SIZE];
68 struct mptcp_sock *msk;
71 msk = mptcp_token_get_sock(subflow_req->token);
73 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
77 local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
79 sock_put((struct sock *)msk);
82 subflow_req->local_id = local_id;
84 get_random_bytes(&subflow_req->local_nonce, sizeof(u32));
86 subflow_generate_hmac(msk->local_key, msk->remote_key,
87 subflow_req->local_nonce,
88 subflow_req->remote_nonce, hmac);
90 subflow_req->thmac = get_unaligned_be64(hmac);
94 static void subflow_init_req(struct request_sock *req,
95 const struct sock *sk_listener,
98 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
99 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
100 struct mptcp_options_received mp_opt;
102 pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
104 mptcp_get_options(skb, &mp_opt);
106 subflow_req->mp_capable = 0;
107 subflow_req->mp_join = 0;
108 subflow_req->msk = NULL;
109 mptcp_token_init_request(req);
111 #ifdef CONFIG_TCP_MD5SIG
112 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
115 if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info))
119 if (mp_opt.mp_capable) {
120 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
124 } else if (mp_opt.mp_join) {
125 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
128 if (mp_opt.mp_capable && listener->request_mptcp) {
131 err = mptcp_token_new_request(req);
133 subflow_req->mp_capable = 1;
135 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
136 } else if (mp_opt.mp_join && listener->request_mptcp) {
137 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
138 subflow_req->mp_join = 1;
139 subflow_req->backup = mp_opt.backup;
140 subflow_req->remote_id = mp_opt.join_id;
141 subflow_req->token = mp_opt.token;
142 subflow_req->remote_nonce = mp_opt.nonce;
143 subflow_req->msk = subflow_token_join_request(req, skb);
144 pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
145 subflow_req->remote_nonce, subflow_req->msk);
149 static void subflow_v4_init_req(struct request_sock *req,
150 const struct sock *sk_listener,
153 tcp_rsk(req)->is_mptcp = 1;
155 tcp_request_sock_ipv4_ops.init_req(req, sk_listener, skb);
157 subflow_init_req(req, sk_listener, skb);
160 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
161 static void subflow_v6_init_req(struct request_sock *req,
162 const struct sock *sk_listener,
165 tcp_rsk(req)->is_mptcp = 1;
167 tcp_request_sock_ipv6_ops.init_req(req, sk_listener, skb);
169 subflow_init_req(req, sk_listener, skb);
173 /* validate received truncated hmac and create hmac for third ACK */
174 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
176 u8 hmac[SHA256_DIGEST_SIZE];
179 subflow_generate_hmac(subflow->remote_key, subflow->local_key,
180 subflow->remote_nonce, subflow->local_nonce,
183 thmac = get_unaligned_be64(hmac);
184 pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n",
185 subflow, subflow->token,
186 (unsigned long long)thmac,
187 (unsigned long long)subflow->thmac);
189 return thmac == subflow->thmac;
192 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
194 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
195 struct mptcp_options_received mp_opt;
196 struct sock *parent = subflow->conn;
198 subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
200 if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
201 inet_sk_state_store(parent, TCP_ESTABLISHED);
202 parent->sk_state_change(parent);
205 /* be sure no special action on any packet other than syn-ack */
206 if (subflow->conn_finished)
209 subflow->rel_write_seq = 1;
210 subflow->conn_finished = 1;
211 subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
212 pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
214 mptcp_get_options(skb, &mp_opt);
215 if (subflow->request_mptcp) {
216 if (!mp_opt.mp_capable) {
217 MPTCP_INC_STATS(sock_net(sk),
218 MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
219 mptcp_do_fallback(sk);
220 pr_fallback(mptcp_sk(subflow->conn));
224 subflow->mp_capable = 1;
225 subflow->can_ack = 1;
226 subflow->remote_key = mp_opt.sndr_key;
227 pr_debug("subflow=%p, remote_key=%llu", subflow,
228 subflow->remote_key);
229 mptcp_finish_connect(sk);
230 } else if (subflow->request_join) {
231 u8 hmac[SHA256_DIGEST_SIZE];
236 subflow->thmac = mp_opt.thmac;
237 subflow->remote_nonce = mp_opt.nonce;
238 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", subflow,
239 subflow->thmac, subflow->remote_nonce);
241 if (!subflow_thmac_valid(subflow)) {
242 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC);
246 subflow_generate_hmac(subflow->local_key, subflow->remote_key,
247 subflow->local_nonce,
248 subflow->remote_nonce,
250 memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
252 if (!mptcp_finish_join(sk))
255 subflow->mp_join = 1;
256 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
257 } else if (mptcp_check_fallback(sk)) {
259 mptcp_rcv_space_init(mptcp_sk(parent), sk);
264 tcp_send_active_reset(sk, GFP_ATOMIC);
268 static struct request_sock_ops subflow_request_sock_ops;
269 static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops;
271 static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
273 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
275 pr_debug("subflow=%p", subflow);
277 /* Never answer to SYNs sent to broadcast or multicast */
278 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
281 return tcp_conn_request(&subflow_request_sock_ops,
282 &subflow_request_sock_ipv4_ops,
289 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
290 static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops;
291 static struct inet_connection_sock_af_ops subflow_v6_specific;
292 static struct inet_connection_sock_af_ops subflow_v6m_specific;
294 static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
296 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
298 pr_debug("subflow=%p", subflow);
300 if (skb->protocol == htons(ETH_P_IP))
301 return subflow_v4_conn_request(sk, skb);
303 if (!ipv6_unicast_destination(skb))
306 return tcp_conn_request(&subflow_request_sock_ops,
307 &subflow_request_sock_ipv6_ops, sk, skb);
311 return 0; /* don't send reset */
315 /* validate hmac received in third ACK */
316 static bool subflow_hmac_valid(const struct request_sock *req,
317 const struct mptcp_options_received *mp_opt)
319 const struct mptcp_subflow_request_sock *subflow_req;
320 u8 hmac[SHA256_DIGEST_SIZE];
321 struct mptcp_sock *msk;
323 subflow_req = mptcp_subflow_rsk(req);
324 msk = subflow_req->msk;
328 subflow_generate_hmac(msk->remote_key, msk->local_key,
329 subflow_req->remote_nonce,
330 subflow_req->local_nonce, hmac);
332 return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
335 static void mptcp_sock_destruct(struct sock *sk)
337 /* if new mptcp socket isn't accepted, it is free'd
338 * from the tcp listener sockets request queue, linked
339 * from req->sk. The tcp socket is released.
340 * This calls the ULP release function which will
341 * also remove the mptcp socket, via
342 * sock_put(ctx->conn).
344 * Problem is that the mptcp socket will not be in
345 * SYN_RECV state and doesn't have SOCK_DEAD flag.
346 * Both result in warnings from inet_sock_destruct.
349 if (sk->sk_state == TCP_SYN_RECV) {
350 sk->sk_state = TCP_CLOSE;
351 WARN_ON_ONCE(sk->sk_socket);
355 mptcp_token_destroy(mptcp_sk(sk));
356 inet_sock_destruct(sk);
359 static void mptcp_force_close(struct sock *sk)
361 inet_sk_state_store(sk, TCP_CLOSE);
362 sk_common_release(sk);
365 static void subflow_ulp_fallback(struct sock *sk,
366 struct mptcp_subflow_context *old_ctx)
368 struct inet_connection_sock *icsk = inet_csk(sk);
370 mptcp_subflow_tcp_fallback(sk, old_ctx);
371 icsk->icsk_ulp_ops = NULL;
372 rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
373 tcp_sk(sk)->is_mptcp = 0;
376 static void subflow_drop_ctx(struct sock *ssk)
378 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
383 subflow_ulp_fallback(ssk, ctx);
390 void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
391 struct mptcp_options_received *mp_opt)
393 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
395 subflow->remote_key = mp_opt->sndr_key;
396 subflow->fully_established = 1;
397 subflow->can_ack = 1;
398 WRITE_ONCE(msk->fully_established, true);
401 static struct sock *subflow_syn_recv_sock(const struct sock *sk,
403 struct request_sock *req,
404 struct dst_entry *dst,
405 struct request_sock *req_unhash,
408 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
409 struct mptcp_subflow_request_sock *subflow_req;
410 struct mptcp_options_received mp_opt;
411 bool fallback, fallback_is_fatal;
412 struct sock *new_msk = NULL;
415 pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
417 /* After child creation we must look for 'mp_capable' even when options
420 mp_opt.mp_capable = 0;
422 /* hopefully temporary handling for MP_JOIN+syncookie */
423 subflow_req = mptcp_subflow_rsk(req);
424 fallback_is_fatal = tcp_rsk(req)->is_mptcp && subflow_req->mp_join;
425 fallback = !tcp_rsk(req)->is_mptcp;
429 /* if the sk is MP_CAPABLE, we try to fetch the client key */
430 if (subflow_req->mp_capable) {
431 if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) {
432 /* here we can receive and accept an in-window,
433 * out-of-order pkt, which will not carry the MP_CAPABLE
434 * opt even on mptcp enabled paths
439 mptcp_get_options(skb, &mp_opt);
440 if (!mp_opt.mp_capable) {
446 new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
449 } else if (subflow_req->mp_join) {
450 mptcp_get_options(skb, &mp_opt);
451 if (!mp_opt.mp_join ||
452 !mptcp_can_accept_new_subflow(subflow_req->msk) ||
453 !subflow_hmac_valid(req, &mp_opt)) {
454 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
460 child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
461 req_unhash, own_req);
463 if (child && *own_req) {
464 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child);
466 tcp_rsk(req)->drop_req = false;
468 /* we need to fallback on ctx allocation failure and on pre-reqs
469 * checking above. In the latter scenario we additionally need
470 * to reset the context to non MPTCP status.
472 if (!ctx || fallback) {
473 if (fallback_is_fatal)
476 subflow_drop_ctx(child);
480 if (ctx->mp_capable) {
481 /* this can't race with mptcp_close(), as the msk is
482 * not yet exposted to user-space
484 inet_sk_state_store((void *)new_msk, TCP_ESTABLISHED);
486 /* new mpc subflow takes ownership of the newly
487 * created mptcp socket
489 new_msk->sk_destruct = mptcp_sock_destruct;
490 mptcp_pm_new_connection(mptcp_sk(new_msk), 1);
491 mptcp_token_accept(subflow_req, mptcp_sk(new_msk));
495 /* with OoO packets we can reach here without ingress
498 if (mp_opt.mp_capable)
499 mptcp_subflow_fully_established(ctx, &mp_opt);
500 } else if (ctx->mp_join) {
501 struct mptcp_sock *owner;
503 owner = subflow_req->msk;
507 /* move the msk reference ownership to the subflow */
508 subflow_req->msk = NULL;
509 ctx->conn = (struct sock *)owner;
510 if (!mptcp_finish_join(child))
513 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX);
514 tcp_rsk(req)->drop_req = true;
519 /* dispose of the left over mptcp master, if any */
520 if (unlikely(new_msk))
521 mptcp_force_close(new_msk);
523 /* check for expected invariant - should never trigger, just help
524 * catching eariler subtle bugs
526 WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp &&
527 (!mptcp_subflow_ctx(child) ||
528 !mptcp_subflow_ctx(child)->conn));
532 subflow_drop_ctx(child);
533 tcp_rsk(req)->drop_req = true;
534 inet_csk_prepare_for_destroy_sock(child);
536 req->rsk_ops->send_reset(sk, skb);
538 /* The last child reference will be released by the caller */
542 static struct inet_connection_sock_af_ops subflow_specific;
544 enum mapping_status {
552 static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq)
554 if ((u32)seq == (u32)old_seq)
557 /* Assume map covers data not mapped yet. */
558 return seq | ((old_seq + old_data_len + 1) & GENMASK_ULL(63, 32));
561 static void warn_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
563 WARN_ONCE(1, "Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
564 ssn, subflow->map_subflow_seq, subflow->map_data_len);
567 static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
569 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
570 unsigned int skb_consumed;
572 skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq;
573 if (WARN_ON_ONCE(skb_consumed >= skb->len))
576 return skb->len - skb_consumed <= subflow->map_data_len -
577 mptcp_subflow_get_map_offset(subflow);
580 static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
582 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
583 u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
585 if (unlikely(before(ssn, subflow->map_subflow_seq))) {
586 /* Mapping covers data later in the subflow stream,
587 * currently unsupported.
589 warn_bad_map(subflow, ssn);
592 if (unlikely(!before(ssn, subflow->map_subflow_seq +
593 subflow->map_data_len))) {
594 /* Mapping does covers past subflow data, invalid */
595 warn_bad_map(subflow, ssn + skb->len);
601 static enum mapping_status get_mapping_status(struct sock *ssk,
602 struct mptcp_sock *msk)
604 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
605 struct mptcp_ext *mpext;
610 skb = skb_peek(&ssk->sk_receive_queue);
612 return MAPPING_EMPTY;
614 if (mptcp_check_fallback(ssk))
615 return MAPPING_DUMMY;
617 mpext = mptcp_get_ext(skb);
618 if (!mpext || !mpext->use_map) {
619 if (!subflow->map_valid && !skb->len) {
620 /* the TCP stack deliver 0 len FIN pkt to the receive
621 * queue, that is the only 0len pkts ever expected here,
622 * and we can admit no mapping only for 0 len pkts
624 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
625 WARN_ONCE(1, "0len seq %d:%d flags %x",
626 TCP_SKB_CB(skb)->seq,
627 TCP_SKB_CB(skb)->end_seq,
628 TCP_SKB_CB(skb)->tcp_flags);
629 sk_eat_skb(ssk, skb);
630 return MAPPING_EMPTY;
633 if (!subflow->map_valid)
634 return MAPPING_INVALID;
639 pr_debug("seq=%llu is64=%d ssn=%u data_len=%u data_fin=%d",
640 mpext->data_seq, mpext->dsn64, mpext->subflow_seq,
641 mpext->data_len, mpext->data_fin);
643 data_len = mpext->data_len;
645 pr_err("Infinite mapping not handled");
646 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
647 return MAPPING_INVALID;
650 if (mpext->data_fin == 1) {
652 mptcp_update_rcv_data_fin(msk, mpext->data_seq);
653 pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq);
654 if (subflow->map_valid) {
655 /* A DATA_FIN might arrive in a DSS
656 * option before the previous mapping
657 * has been fully consumed. Continue
658 * handling the existing mapping.
660 skb_ext_del(skb, SKB_EXT_MPTCP);
663 return MAPPING_DATA_FIN;
666 mptcp_update_rcv_data_fin(msk, mpext->data_seq + data_len);
667 pr_debug("DATA_FIN with mapping seq=%llu", mpext->data_seq + data_len);
670 /* Adjust for DATA_FIN using 1 byte of sequence space */
675 map_seq = expand_seq(subflow->map_seq, subflow->map_data_len,
677 subflow->use_64bit_ack = 0;
678 pr_debug("expanded seq=%llu", subflow->map_seq);
680 map_seq = mpext->data_seq;
681 subflow->use_64bit_ack = 1;
684 if (subflow->map_valid) {
685 /* Allow replacing only with an identical map */
686 if (subflow->map_seq == map_seq &&
687 subflow->map_subflow_seq == mpext->subflow_seq &&
688 subflow->map_data_len == data_len) {
689 skb_ext_del(skb, SKB_EXT_MPTCP);
693 /* If this skb data are fully covered by the current mapping,
694 * the new map would need caching, which is not supported
696 if (skb_is_fully_mapped(ssk, skb)) {
697 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH);
698 return MAPPING_INVALID;
701 /* will validate the next map after consuming the current one */
705 subflow->map_seq = map_seq;
706 subflow->map_subflow_seq = mpext->subflow_seq;
707 subflow->map_data_len = data_len;
708 subflow->map_valid = 1;
709 subflow->mpc_map = mpext->mpc_map;
710 pr_debug("new map seq=%llu subflow_seq=%u data_len=%u",
711 subflow->map_seq, subflow->map_subflow_seq,
712 subflow->map_data_len);
715 /* we revalidate valid mapping on new skb, because we must ensure
716 * the current skb is completely covered by the available mapping
718 if (!validate_mapping(ssk, skb))
719 return MAPPING_INVALID;
721 skb_ext_del(skb, SKB_EXT_MPTCP);
725 static int subflow_read_actor(read_descriptor_t *desc,
727 unsigned int offset, size_t len)
729 size_t copy_len = min(desc->count, len);
731 desc->count -= copy_len;
733 pr_debug("flushed %zu bytes, %zu left", copy_len, desc->count);
737 static bool subflow_check_data_avail(struct sock *ssk)
739 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
740 enum mapping_status status;
741 struct mptcp_sock *msk;
744 pr_debug("msk=%p ssk=%p data_avail=%d skb=%p", subflow->conn, ssk,
745 subflow->data_avail, skb_peek(&ssk->sk_receive_queue));
746 if (subflow->data_avail)
749 msk = mptcp_sk(subflow->conn);
756 status = get_mapping_status(ssk, msk);
757 pr_debug("msk=%p ssk=%p status=%d", msk, ssk, status);
758 if (status == MAPPING_INVALID) {
759 ssk->sk_err = EBADMSG;
762 if (status == MAPPING_DUMMY) {
763 __mptcp_do_fallback(msk);
764 skb = skb_peek(&ssk->sk_receive_queue);
765 subflow->map_valid = 1;
766 subflow->map_seq = READ_ONCE(msk->ack_seq);
767 subflow->map_data_len = skb->len;
768 subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq -
773 if (status != MAPPING_OK)
776 skb = skb_peek(&ssk->sk_receive_queue);
777 if (WARN_ON_ONCE(!skb))
780 /* if msk lacks the remote key, this subflow must provide an
781 * MP_CAPABLE-based mapping
783 if (unlikely(!READ_ONCE(msk->can_ack))) {
784 if (!subflow->mpc_map) {
785 ssk->sk_err = EBADMSG;
788 WRITE_ONCE(msk->remote_key, subflow->remote_key);
789 WRITE_ONCE(msk->ack_seq, subflow->map_seq);
790 WRITE_ONCE(msk->can_ack, true);
793 old_ack = READ_ONCE(msk->ack_seq);
794 ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
795 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
797 if (ack_seq == old_ack)
800 /* only accept in-sequence mapping. Old values are spurious
801 * retransmission; we can hit "future" values on active backup
802 * subflow switch, we relay on retransmissions to get
804 * Cuncurrent subflows support will require subflow data
807 map_remaining = subflow->map_data_len -
808 mptcp_subflow_get_map_offset(subflow);
809 if (before64(ack_seq, old_ack))
810 delta = min_t(size_t, old_ack - ack_seq, map_remaining);
812 delta = min_t(size_t, ack_seq - old_ack, map_remaining);
814 /* discard mapped data */
815 pr_debug("discarding %zu bytes, current map len=%d", delta,
818 read_descriptor_t desc = {
823 ret = tcp_read_sock(ssk, &desc, subflow_read_actor);
830 if (delta == map_remaining)
831 subflow->map_valid = 0;
837 /* fatal protocol error, close the socket */
838 /* This barrier is coupled with smp_rmb() in tcp_poll() */
840 ssk->sk_error_report(ssk);
841 tcp_set_state(ssk, TCP_CLOSE);
842 tcp_send_active_reset(ssk, GFP_ATOMIC);
846 bool mptcp_subflow_data_available(struct sock *sk)
848 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
851 /* check if current mapping is still valid */
852 if (subflow->map_valid &&
853 mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
854 subflow->map_valid = 0;
855 subflow->data_avail = 0;
857 pr_debug("Done with mapping: seq=%u data_len=%u",
858 subflow->map_subflow_seq,
859 subflow->map_data_len);
862 if (!subflow_check_data_avail(sk)) {
863 subflow->data_avail = 0;
867 skb = skb_peek(&sk->sk_receive_queue);
868 subflow->data_avail = skb &&
869 before(tcp_sk(sk)->copied_seq, TCP_SKB_CB(skb)->end_seq);
870 return subflow->data_avail;
873 /* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy,
876 * In mptcp, rwin is about the mptcp-level connection data.
878 * Data that is still on the ssk rx queue can thus be ignored,
879 * as far as mptcp peer is concerened that data is still inflight.
880 * DSS ACK is updated when skb is moved to the mptcp rx queue.
882 void mptcp_space(const struct sock *ssk, int *space, int *full_space)
884 const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
885 const struct sock *sk = subflow->conn;
887 *space = tcp_space(sk);
888 *full_space = tcp_full_space(sk);
891 static void subflow_data_ready(struct sock *sk)
893 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
894 u16 state = 1 << inet_sk_state_load(sk);
895 struct sock *parent = subflow->conn;
896 struct mptcp_sock *msk;
898 msk = mptcp_sk(parent);
899 if (state & TCPF_LISTEN) {
900 set_bit(MPTCP_DATA_READY, &msk->flags);
901 parent->sk_data_ready(parent);
905 WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
906 !subflow->mp_join && !(state & TCPF_CLOSE));
908 if (mptcp_subflow_data_available(sk))
909 mptcp_data_ready(parent, sk);
912 static void subflow_write_space(struct sock *sk)
914 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
915 struct sock *parent = subflow->conn;
917 sk_stream_write_space(sk);
918 if (sk_stream_is_writeable(sk)) {
919 set_bit(MPTCP_SEND_SPACE, &mptcp_sk(parent)->flags);
920 smp_mb__after_atomic();
921 /* set SEND_SPACE before sk_stream_write_space clears NOSPACE */
922 sk_stream_write_space(parent);
926 static struct inet_connection_sock_af_ops *
927 subflow_default_af_ops(struct sock *sk)
929 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
930 if (sk->sk_family == AF_INET6)
931 return &subflow_v6_specific;
933 return &subflow_specific;
936 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
937 void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
939 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
940 struct inet_connection_sock *icsk = inet_csk(sk);
941 struct inet_connection_sock_af_ops *target;
943 target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
945 pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
946 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
948 if (likely(icsk->icsk_af_ops == target))
951 subflow->icsk_af_ops = icsk->icsk_af_ops;
952 icsk->icsk_af_ops = target;
956 static void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
957 struct sockaddr_storage *addr)
959 memset(addr, 0, sizeof(*addr));
960 addr->ss_family = info->family;
961 if (addr->ss_family == AF_INET) {
962 struct sockaddr_in *in_addr = (struct sockaddr_in *)addr;
964 in_addr->sin_addr = info->addr;
965 in_addr->sin_port = info->port;
967 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
968 else if (addr->ss_family == AF_INET6) {
969 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr;
971 in6_addr->sin6_addr = info->addr6;
972 in6_addr->sin6_port = info->port;
977 int __mptcp_subflow_connect(struct sock *sk, int ifindex,
978 const struct mptcp_addr_info *loc,
979 const struct mptcp_addr_info *remote)
981 struct mptcp_sock *msk = mptcp_sk(sk);
982 struct mptcp_subflow_context *subflow;
983 struct sockaddr_storage addr;
984 int local_id = loc->id;
991 if (!mptcp_is_fully_established(sk))
994 err = mptcp_subflow_create_socket(sk, &sf);
999 subflow = mptcp_subflow_ctx(ssk);
1001 get_random_bytes(&subflow->local_nonce, sizeof(u32));
1002 } while (!subflow->local_nonce);
1005 err = mptcp_pm_get_local_id(msk, (struct sock_common *)ssk);
1012 subflow->remote_key = msk->remote_key;
1013 subflow->local_key = msk->local_key;
1014 subflow->token = msk->token;
1015 mptcp_info2sockaddr(loc, &addr);
1017 addrlen = sizeof(struct sockaddr_in);
1018 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1019 if (loc->family == AF_INET6)
1020 addrlen = sizeof(struct sockaddr_in6);
1022 ssk->sk_bound_dev_if = ifindex;
1023 err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
1027 mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
1028 pr_debug("msk=%p remote_token=%u local_id=%d", msk, remote_token,
1030 subflow->remote_token = remote_token;
1031 subflow->local_id = local_id;
1032 subflow->request_join = 1;
1033 subflow->request_bkup = 1;
1034 mptcp_info2sockaddr(remote, &addr);
1036 err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
1037 if (err && err != -EINPROGRESS)
1040 spin_lock_bh(&msk->join_list_lock);
1041 list_add_tail(&subflow->node, &msk->join_list);
1042 spin_unlock_bh(&msk->join_list_lock);
1051 int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
1053 struct mptcp_subflow_context *subflow;
1054 struct net *net = sock_net(sk);
1058 err = sock_create_kern(net, sk->sk_family, SOCK_STREAM, IPPROTO_TCP,
1065 /* kernel sockets do not by default acquire net ref, but TCP timer
1068 sf->sk->sk_net_refcnt = 1;
1070 #ifdef CONFIG_PROC_FS
1071 this_cpu_add(*net->core.sock_inuse, 1);
1073 err = tcp_set_ulp(sf->sk, "mptcp");
1074 release_sock(sf->sk);
1081 /* the newly created socket really belongs to the owning MPTCP master
1082 * socket, even if for additional subflows the allocation is performed
1083 * by a kernel workqueue. Adjust inode references, so that the
1084 * procfs/diag interaces really show this one belonging to the correct
1087 SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino;
1088 SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid;
1089 SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
1091 subflow = mptcp_subflow_ctx(sf->sk);
1092 pr_debug("subflow=%p", subflow);
1101 static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
1104 struct inet_connection_sock *icsk = inet_csk(sk);
1105 struct mptcp_subflow_context *ctx;
1107 ctx = kzalloc(sizeof(*ctx), priority);
1111 rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
1112 INIT_LIST_HEAD(&ctx->node);
1114 pr_debug("subflow=%p", ctx);
1121 static void __subflow_state_change(struct sock *sk)
1123 struct socket_wq *wq;
1126 wq = rcu_dereference(sk->sk_wq);
1127 if (skwq_has_sleeper(wq))
1128 wake_up_interruptible_all(&wq->wait);
1132 static bool subflow_is_done(const struct sock *sk)
1134 return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE;
1137 static void subflow_state_change(struct sock *sk)
1139 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1140 struct sock *parent = subflow->conn;
1142 __subflow_state_change(sk);
1144 if (subflow_simultaneous_connect(sk)) {
1145 mptcp_do_fallback(sk);
1146 mptcp_rcv_space_init(mptcp_sk(parent), sk);
1147 pr_fallback(mptcp_sk(parent));
1148 subflow->conn_finished = 1;
1149 if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
1150 inet_sk_state_store(parent, TCP_ESTABLISHED);
1151 parent->sk_state_change(parent);
1155 /* as recvmsg() does not acquire the subflow socket for ssk selection
1156 * a fin packet carrying a DSS can be unnoticed if we don't trigger
1157 * the data available machinery here.
1159 if (mptcp_subflow_data_available(sk))
1160 mptcp_data_ready(parent, sk);
1162 if (__mptcp_check_fallback(mptcp_sk(parent)) &&
1163 !(parent->sk_shutdown & RCV_SHUTDOWN) &&
1164 !subflow->rx_eof && subflow_is_done(sk)) {
1165 subflow->rx_eof = 1;
1166 mptcp_subflow_eof(parent);
1170 static int subflow_ulp_init(struct sock *sk)
1172 struct inet_connection_sock *icsk = inet_csk(sk);
1173 struct mptcp_subflow_context *ctx;
1174 struct tcp_sock *tp = tcp_sk(sk);
1177 /* disallow attaching ULP to a socket unless it has been
1178 * created with sock_create_kern()
1180 if (!sk->sk_kern_sock) {
1185 ctx = subflow_create_ctx(sk, GFP_KERNEL);
1191 pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
1194 ctx->icsk_af_ops = icsk->icsk_af_ops;
1195 icsk->icsk_af_ops = subflow_default_af_ops(sk);
1196 ctx->tcp_data_ready = sk->sk_data_ready;
1197 ctx->tcp_state_change = sk->sk_state_change;
1198 ctx->tcp_write_space = sk->sk_write_space;
1199 sk->sk_data_ready = subflow_data_ready;
1200 sk->sk_write_space = subflow_write_space;
1201 sk->sk_state_change = subflow_state_change;
1206 static void subflow_ulp_release(struct sock *sk)
1208 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(sk);
1214 sock_put(ctx->conn);
1216 kfree_rcu(ctx, rcu);
1219 static void subflow_ulp_clone(const struct request_sock *req,
1221 const gfp_t priority)
1223 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
1224 struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk);
1225 struct mptcp_subflow_context *new_ctx;
1227 if (!tcp_rsk(req)->is_mptcp ||
1228 (!subflow_req->mp_capable && !subflow_req->mp_join)) {
1229 subflow_ulp_fallback(newsk, old_ctx);
1233 new_ctx = subflow_create_ctx(newsk, priority);
1235 subflow_ulp_fallback(newsk, old_ctx);
1239 new_ctx->conn_finished = 1;
1240 new_ctx->icsk_af_ops = old_ctx->icsk_af_ops;
1241 new_ctx->tcp_data_ready = old_ctx->tcp_data_ready;
1242 new_ctx->tcp_state_change = old_ctx->tcp_state_change;
1243 new_ctx->tcp_write_space = old_ctx->tcp_write_space;
1244 new_ctx->rel_write_seq = 1;
1245 new_ctx->tcp_sock = newsk;
1247 if (subflow_req->mp_capable) {
1248 /* see comments in subflow_syn_recv_sock(), MPTCP connection
1249 * is fully established only after we receive the remote key
1251 new_ctx->mp_capable = 1;
1252 new_ctx->local_key = subflow_req->local_key;
1253 new_ctx->token = subflow_req->token;
1254 new_ctx->ssn_offset = subflow_req->ssn_offset;
1255 new_ctx->idsn = subflow_req->idsn;
1256 } else if (subflow_req->mp_join) {
1257 new_ctx->ssn_offset = subflow_req->ssn_offset;
1258 new_ctx->mp_join = 1;
1259 new_ctx->fully_established = 1;
1260 new_ctx->backup = subflow_req->backup;
1261 new_ctx->local_id = subflow_req->local_id;
1262 new_ctx->token = subflow_req->token;
1263 new_ctx->thmac = subflow_req->thmac;
1267 static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
1269 .owner = THIS_MODULE,
1270 .init = subflow_ulp_init,
1271 .release = subflow_ulp_release,
1272 .clone = subflow_ulp_clone,
1275 static int subflow_ops_init(struct request_sock_ops *subflow_ops)
1277 subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock);
1278 subflow_ops->slab_name = "request_sock_subflow";
1280 subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name,
1281 subflow_ops->obj_size, 0,
1283 SLAB_TYPESAFE_BY_RCU,
1285 if (!subflow_ops->slab)
1288 subflow_ops->destructor = subflow_req_destructor;
1293 void __init mptcp_subflow_init(void)
1295 subflow_request_sock_ops = tcp_request_sock_ops;
1296 if (subflow_ops_init(&subflow_request_sock_ops) != 0)
1297 panic("MPTCP: failed to init subflow request sock ops\n");
1299 subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
1300 subflow_request_sock_ipv4_ops.init_req = subflow_v4_init_req;
1302 subflow_specific = ipv4_specific;
1303 subflow_specific.conn_request = subflow_v4_conn_request;
1304 subflow_specific.syn_recv_sock = subflow_syn_recv_sock;
1305 subflow_specific.sk_rx_dst_set = subflow_finish_connect;
1307 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1308 subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
1309 subflow_request_sock_ipv6_ops.init_req = subflow_v6_init_req;
1311 subflow_v6_specific = ipv6_specific;
1312 subflow_v6_specific.conn_request = subflow_v6_conn_request;
1313 subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock;
1314 subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect;
1316 subflow_v6m_specific = subflow_v6_specific;
1317 subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit;
1318 subflow_v6m_specific.send_check = ipv4_specific.send_check;
1319 subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len;
1320 subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced;
1321 subflow_v6m_specific.net_frag_header_len = 0;
1324 mptcp_diag_subflow_init(&subflow_ulp_ops);
1326 if (tcp_register_ulp(&subflow_ulp_ops) != 0)
1327 panic("MPTCP: failed to register subflows to ULP\n");