1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2017 - 2019, Intel Corporation.
7 #define pr_fmt(fmt) "MPTCP: " fmt
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <crypto/algapi.h>
13 #include <crypto/sha2.h>
15 #include <net/inet_common.h>
16 #include <net/inet_hashtables.h>
17 #include <net/protocol.h>
19 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
20 #include <net/ip6_route.h>
21 #include <net/transp_v6.h>
23 #include <net/mptcp.h>
24 #include <uapi/linux/mptcp.h>
28 static void mptcp_subflow_ops_undo_override(struct sock *ssk);
30 static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
31 enum linux_mptcp_mib_field field)
33 MPTCP_INC_STATS(sock_net(req_to_sk(req)), field);
36 static void subflow_req_destructor(struct request_sock *req)
38 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
40 pr_debug("subflow_req=%p", subflow_req);
43 sock_put((struct sock *)subflow_req->msk);
45 mptcp_token_destroy_request(req);
46 tcp_request_sock_ops.destructor(req);
49 static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
54 put_unaligned_be32(nonce1, &msg[0]);
55 put_unaligned_be32(nonce2, &msg[4]);
57 mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
60 static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk)
62 return mptcp_is_fully_established((void *)msk) &&
63 READ_ONCE(msk->pm.accept_subflow);
66 /* validate received token and create truncated hmac and nonce for SYN-ACK */
67 static void subflow_req_create_thmac(struct mptcp_subflow_request_sock *subflow_req)
69 struct mptcp_sock *msk = subflow_req->msk;
70 u8 hmac[SHA256_DIGEST_SIZE];
72 get_random_bytes(&subflow_req->local_nonce, sizeof(u32));
74 subflow_generate_hmac(msk->local_key, msk->remote_key,
75 subflow_req->local_nonce,
76 subflow_req->remote_nonce, hmac);
78 subflow_req->thmac = get_unaligned_be64(hmac);
81 static struct mptcp_sock *subflow_token_join_request(struct request_sock *req)
83 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
84 struct mptcp_sock *msk;
87 msk = mptcp_token_get_sock(subflow_req->token);
89 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
93 local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
95 sock_put((struct sock *)msk);
98 subflow_req->local_id = local_id;
103 static void subflow_init_req(struct request_sock *req, const struct sock *sk_listener)
105 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
107 subflow_req->mp_capable = 0;
108 subflow_req->mp_join = 0;
109 subflow_req->msk = NULL;
110 mptcp_token_init_request(req);
113 static bool subflow_use_different_sport(struct mptcp_sock *msk, const struct sock *sk)
115 return inet_sk(sk)->inet_sport != inet_sk((struct sock *)msk)->inet_sport;
118 /* Init mptcp request socket.
120 * Returns an error code if a JOIN has failed and a TCP reset
123 static int subflow_check_req(struct request_sock *req,
124 const struct sock *sk_listener,
127 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
128 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
129 struct mptcp_options_received mp_opt;
131 pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
133 #ifdef CONFIG_TCP_MD5SIG
134 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
137 if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info))
141 mptcp_get_options(skb, &mp_opt);
143 if (mp_opt.mp_capable) {
144 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
148 } else if (mp_opt.mp_join) {
149 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
152 if (mp_opt.mp_capable && listener->request_mptcp) {
153 int err, retries = 4;
155 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
158 get_random_bytes(&subflow_req->local_key, sizeof(subflow_req->local_key));
159 } while (subflow_req->local_key == 0);
161 if (unlikely(req->syncookie)) {
162 mptcp_crypto_key_sha(subflow_req->local_key,
165 if (mptcp_token_exists(subflow_req->token)) {
169 subflow_req->mp_capable = 1;
174 err = mptcp_token_new_request(req);
176 subflow_req->mp_capable = 1;
177 else if (retries-- > 0)
180 } else if (mp_opt.mp_join && listener->request_mptcp) {
181 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
182 subflow_req->mp_join = 1;
183 subflow_req->backup = mp_opt.backup;
184 subflow_req->remote_id = mp_opt.join_id;
185 subflow_req->token = mp_opt.token;
186 subflow_req->remote_nonce = mp_opt.nonce;
187 subflow_req->msk = subflow_token_join_request(req);
189 /* Can't fall back to TCP in this case. */
190 if (!subflow_req->msk)
193 if (subflow_use_different_sport(subflow_req->msk, sk_listener)) {
194 pr_debug("syn inet_sport=%d %d",
195 ntohs(inet_sk(sk_listener)->inet_sport),
196 ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport));
197 if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) {
198 sock_put((struct sock *)subflow_req->msk);
199 mptcp_token_destroy_request(req);
200 tcp_request_sock_ops.destructor(req);
201 subflow_req->msk = NULL;
202 subflow_req->mp_join = 0;
203 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTSYNRX);
206 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTSYNRX);
209 subflow_req_create_thmac(subflow_req);
211 if (unlikely(req->syncookie)) {
212 if (mptcp_can_accept_new_subflow(subflow_req->msk))
213 subflow_init_req_cookie_join_save(subflow_req, skb);
216 pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
217 subflow_req->remote_nonce, subflow_req->msk);
223 int mptcp_subflow_init_cookie_req(struct request_sock *req,
224 const struct sock *sk_listener,
227 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
228 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
229 struct mptcp_options_received mp_opt;
232 subflow_init_req(req, sk_listener);
233 mptcp_get_options(skb, &mp_opt);
235 if (mp_opt.mp_capable && mp_opt.mp_join)
238 if (mp_opt.mp_capable && listener->request_mptcp) {
239 if (mp_opt.sndr_key == 0)
242 subflow_req->local_key = mp_opt.rcvr_key;
243 err = mptcp_token_new_request(req);
247 subflow_req->mp_capable = 1;
248 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
249 } else if (mp_opt.mp_join && listener->request_mptcp) {
250 if (!mptcp_token_join_cookie_init_state(subflow_req, skb))
253 if (mptcp_can_accept_new_subflow(subflow_req->msk))
254 subflow_req->mp_join = 1;
256 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
261 EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req);
263 static struct dst_entry *subflow_v4_route_req(const struct sock *sk,
266 struct request_sock *req)
268 struct dst_entry *dst;
271 tcp_rsk(req)->is_mptcp = 1;
272 subflow_init_req(req, sk);
274 dst = tcp_request_sock_ipv4_ops.route_req(sk, skb, fl, req);
278 err = subflow_check_req(req, sk, skb);
284 tcp_request_sock_ops.send_reset(sk, skb);
288 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
289 static struct dst_entry *subflow_v6_route_req(const struct sock *sk,
292 struct request_sock *req)
294 struct dst_entry *dst;
297 tcp_rsk(req)->is_mptcp = 1;
298 subflow_init_req(req, sk);
300 dst = tcp_request_sock_ipv6_ops.route_req(sk, skb, fl, req);
304 err = subflow_check_req(req, sk, skb);
310 tcp6_request_sock_ops.send_reset(sk, skb);
315 /* validate received truncated hmac and create hmac for third ACK */
316 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
318 u8 hmac[SHA256_DIGEST_SIZE];
321 subflow_generate_hmac(subflow->remote_key, subflow->local_key,
322 subflow->remote_nonce, subflow->local_nonce,
325 thmac = get_unaligned_be64(hmac);
326 pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n",
327 subflow, subflow->token,
328 (unsigned long long)thmac,
329 (unsigned long long)subflow->thmac);
331 return thmac == subflow->thmac;
334 void mptcp_subflow_reset(struct sock *ssk)
336 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
337 struct sock *sk = subflow->conn;
339 /* must hold: tcp_done() could drop last reference on parent */
342 tcp_set_state(ssk, TCP_CLOSE);
343 tcp_send_active_reset(ssk, GFP_ATOMIC);
345 if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
346 schedule_work(&mptcp_sk(sk)->work))
347 return; /* worker will put sk for us */
352 static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct sock *sk)
354 return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport;
357 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
359 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
360 struct mptcp_options_received mp_opt;
361 struct sock *parent = subflow->conn;
363 subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
365 if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
366 inet_sk_state_store(parent, TCP_ESTABLISHED);
367 parent->sk_state_change(parent);
370 /* be sure no special action on any packet other than syn-ack */
371 if (subflow->conn_finished)
374 mptcp_propagate_sndbuf(parent, sk);
375 subflow->rel_write_seq = 1;
376 subflow->conn_finished = 1;
377 subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
378 pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
380 mptcp_get_options(skb, &mp_opt);
381 if (subflow->request_mptcp) {
382 if (!mp_opt.mp_capable) {
383 MPTCP_INC_STATS(sock_net(sk),
384 MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
385 mptcp_do_fallback(sk);
386 pr_fallback(mptcp_sk(subflow->conn));
390 subflow->mp_capable = 1;
391 subflow->can_ack = 1;
392 subflow->remote_key = mp_opt.sndr_key;
393 pr_debug("subflow=%p, remote_key=%llu", subflow,
394 subflow->remote_key);
395 mptcp_finish_connect(sk);
396 } else if (subflow->request_join) {
397 u8 hmac[SHA256_DIGEST_SIZE];
402 subflow->thmac = mp_opt.thmac;
403 subflow->remote_nonce = mp_opt.nonce;
404 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", subflow,
405 subflow->thmac, subflow->remote_nonce);
407 if (!subflow_thmac_valid(subflow)) {
408 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC);
412 subflow_generate_hmac(subflow->local_key, subflow->remote_key,
413 subflow->local_nonce,
414 subflow->remote_nonce,
416 memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
418 if (!mptcp_finish_join(sk))
421 subflow->mp_join = 1;
422 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
424 if (subflow_use_different_dport(mptcp_sk(parent), sk)) {
425 pr_debug("synack inet_dport=%d %d",
426 ntohs(inet_sk(sk)->inet_dport),
427 ntohs(inet_sk(parent)->inet_dport));
428 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX);
430 } else if (mptcp_check_fallback(sk)) {
432 mptcp_rcv_space_init(mptcp_sk(parent), sk);
437 mptcp_subflow_reset(sk);
440 struct request_sock_ops mptcp_subflow_request_sock_ops;
441 EXPORT_SYMBOL_GPL(mptcp_subflow_request_sock_ops);
442 static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops;
444 static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
446 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
448 pr_debug("subflow=%p", subflow);
450 /* Never answer to SYNs sent to broadcast or multicast */
451 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
454 return tcp_conn_request(&mptcp_subflow_request_sock_ops,
455 &subflow_request_sock_ipv4_ops,
462 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
463 static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops;
464 static struct inet_connection_sock_af_ops subflow_v6_specific;
465 static struct inet_connection_sock_af_ops subflow_v6m_specific;
466 static struct proto tcpv6_prot_override;
468 static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
470 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
472 pr_debug("subflow=%p", subflow);
474 if (skb->protocol == htons(ETH_P_IP))
475 return subflow_v4_conn_request(sk, skb);
477 if (!ipv6_unicast_destination(skb))
480 if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
481 __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
485 return tcp_conn_request(&mptcp_subflow_request_sock_ops,
486 &subflow_request_sock_ipv6_ops, sk, skb);
490 return 0; /* don't send reset */
494 /* validate hmac received in third ACK */
495 static bool subflow_hmac_valid(const struct request_sock *req,
496 const struct mptcp_options_received *mp_opt)
498 const struct mptcp_subflow_request_sock *subflow_req;
499 u8 hmac[SHA256_DIGEST_SIZE];
500 struct mptcp_sock *msk;
502 subflow_req = mptcp_subflow_rsk(req);
503 msk = subflow_req->msk;
507 subflow_generate_hmac(msk->remote_key, msk->local_key,
508 subflow_req->remote_nonce,
509 subflow_req->local_nonce, hmac);
511 return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
514 static void mptcp_sock_destruct(struct sock *sk)
516 /* if new mptcp socket isn't accepted, it is free'd
517 * from the tcp listener sockets request queue, linked
518 * from req->sk. The tcp socket is released.
519 * This calls the ULP release function which will
520 * also remove the mptcp socket, via
521 * sock_put(ctx->conn).
523 * Problem is that the mptcp socket will be in
524 * ESTABLISHED state and will not have the SOCK_DEAD flag.
525 * Both result in warnings from inet_sock_destruct.
528 if (sk->sk_state == TCP_ESTABLISHED) {
529 sk->sk_state = TCP_CLOSE;
530 WARN_ON_ONCE(sk->sk_socket);
534 mptcp_destroy_common(mptcp_sk(sk));
535 inet_sock_destruct(sk);
538 static void mptcp_force_close(struct sock *sk)
540 inet_sk_state_store(sk, TCP_CLOSE);
541 sk_common_release(sk);
544 static void subflow_ulp_fallback(struct sock *sk,
545 struct mptcp_subflow_context *old_ctx)
547 struct inet_connection_sock *icsk = inet_csk(sk);
549 mptcp_subflow_tcp_fallback(sk, old_ctx);
550 icsk->icsk_ulp_ops = NULL;
551 rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
552 tcp_sk(sk)->is_mptcp = 0;
554 mptcp_subflow_ops_undo_override(sk);
557 static void subflow_drop_ctx(struct sock *ssk)
559 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
564 subflow_ulp_fallback(ssk, ctx);
571 void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
572 struct mptcp_options_received *mp_opt)
574 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
576 subflow->remote_key = mp_opt->sndr_key;
577 subflow->fully_established = 1;
578 subflow->can_ack = 1;
579 WRITE_ONCE(msk->fully_established, true);
582 static struct sock *subflow_syn_recv_sock(const struct sock *sk,
584 struct request_sock *req,
585 struct dst_entry *dst,
586 struct request_sock *req_unhash,
589 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
590 struct mptcp_subflow_request_sock *subflow_req;
591 struct mptcp_options_received mp_opt;
592 bool fallback, fallback_is_fatal;
593 struct sock *new_msk = NULL;
596 pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
598 /* After child creation we must look for 'mp_capable' even when options
601 mp_opt.mp_capable = 0;
603 /* hopefully temporary handling for MP_JOIN+syncookie */
604 subflow_req = mptcp_subflow_rsk(req);
605 fallback_is_fatal = tcp_rsk(req)->is_mptcp && subflow_req->mp_join;
606 fallback = !tcp_rsk(req)->is_mptcp;
610 /* if the sk is MP_CAPABLE, we try to fetch the client key */
611 if (subflow_req->mp_capable) {
612 if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) {
613 /* here we can receive and accept an in-window,
614 * out-of-order pkt, which will not carry the MP_CAPABLE
615 * opt even on mptcp enabled paths
620 mptcp_get_options(skb, &mp_opt);
621 if (!mp_opt.mp_capable) {
627 new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
630 } else if (subflow_req->mp_join) {
631 mptcp_get_options(skb, &mp_opt);
632 if (!mp_opt.mp_join || !subflow_hmac_valid(req, &mp_opt) ||
633 !mptcp_can_accept_new_subflow(subflow_req->msk)) {
634 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
640 child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
641 req_unhash, own_req);
643 if (child && *own_req) {
644 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child);
646 tcp_rsk(req)->drop_req = false;
648 /* we need to fallback on ctx allocation failure and on pre-reqs
649 * checking above. In the latter scenario we additionally need
650 * to reset the context to non MPTCP status.
652 if (!ctx || fallback) {
653 if (fallback_is_fatal)
656 subflow_drop_ctx(child);
660 if (ctx->mp_capable) {
661 /* this can't race with mptcp_close(), as the msk is
662 * not yet exposted to user-space
664 inet_sk_state_store((void *)new_msk, TCP_ESTABLISHED);
666 /* record the newly created socket as the first msk
667 * subflow, but don't link it yet into conn_list
669 WRITE_ONCE(mptcp_sk(new_msk)->first, child);
671 /* new mpc subflow takes ownership of the newly
672 * created mptcp socket
674 new_msk->sk_destruct = mptcp_sock_destruct;
675 mptcp_pm_new_connection(mptcp_sk(new_msk), child, 1);
676 mptcp_token_accept(subflow_req, mptcp_sk(new_msk));
680 /* with OoO packets we can reach here without ingress
683 if (mp_opt.mp_capable)
684 mptcp_subflow_fully_established(ctx, &mp_opt);
685 } else if (ctx->mp_join) {
686 struct mptcp_sock *owner;
688 owner = subflow_req->msk;
692 /* move the msk reference ownership to the subflow */
693 subflow_req->msk = NULL;
694 ctx->conn = (struct sock *)owner;
696 if (subflow_use_different_sport(owner, sk)) {
697 pr_debug("ack inet_sport=%d %d",
698 ntohs(inet_sk(sk)->inet_sport),
699 ntohs(inet_sk((struct sock *)owner)->inet_sport));
700 if (!mptcp_pm_sport_in_anno_list(owner, sk)) {
701 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTACKRX);
704 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTACKRX);
707 if (!mptcp_finish_join(child))
710 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX);
711 tcp_rsk(req)->drop_req = true;
716 /* dispose of the left over mptcp master, if any */
717 if (unlikely(new_msk))
718 mptcp_force_close(new_msk);
720 /* check for expected invariant - should never trigger, just help
721 * catching eariler subtle bugs
723 WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp &&
724 (!mptcp_subflow_ctx(child) ||
725 !mptcp_subflow_ctx(child)->conn));
729 subflow_drop_ctx(child);
730 tcp_rsk(req)->drop_req = true;
731 inet_csk_prepare_for_destroy_sock(child);
733 req->rsk_ops->send_reset(sk, skb);
735 /* The last child reference will be released by the caller */
739 static struct inet_connection_sock_af_ops subflow_specific;
740 static struct proto tcp_prot_override;
742 enum mapping_status {
750 static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq)
752 if ((u32)seq == (u32)old_seq)
755 /* Assume map covers data not mapped yet. */
756 return seq | ((old_seq + old_data_len + 1) & GENMASK_ULL(63, 32));
759 static void warn_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
761 WARN_ONCE(1, "Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
762 ssn, subflow->map_subflow_seq, subflow->map_data_len);
765 static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
767 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
768 unsigned int skb_consumed;
770 skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq;
771 if (WARN_ON_ONCE(skb_consumed >= skb->len))
774 return skb->len - skb_consumed <= subflow->map_data_len -
775 mptcp_subflow_get_map_offset(subflow);
778 static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
780 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
781 u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
783 if (unlikely(before(ssn, subflow->map_subflow_seq))) {
784 /* Mapping covers data later in the subflow stream,
785 * currently unsupported.
787 warn_bad_map(subflow, ssn);
790 if (unlikely(!before(ssn, subflow->map_subflow_seq +
791 subflow->map_data_len))) {
792 /* Mapping does covers past subflow data, invalid */
793 warn_bad_map(subflow, ssn + skb->len);
799 static enum mapping_status get_mapping_status(struct sock *ssk,
800 struct mptcp_sock *msk)
802 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
803 struct mptcp_ext *mpext;
808 skb = skb_peek(&ssk->sk_receive_queue);
810 return MAPPING_EMPTY;
812 if (mptcp_check_fallback(ssk))
813 return MAPPING_DUMMY;
815 mpext = mptcp_get_ext(skb);
816 if (!mpext || !mpext->use_map) {
817 if (!subflow->map_valid && !skb->len) {
818 /* the TCP stack deliver 0 len FIN pkt to the receive
819 * queue, that is the only 0len pkts ever expected here,
820 * and we can admit no mapping only for 0 len pkts
822 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
823 WARN_ONCE(1, "0len seq %d:%d flags %x",
824 TCP_SKB_CB(skb)->seq,
825 TCP_SKB_CB(skb)->end_seq,
826 TCP_SKB_CB(skb)->tcp_flags);
827 sk_eat_skb(ssk, skb);
828 return MAPPING_EMPTY;
831 if (!subflow->map_valid)
832 return MAPPING_INVALID;
837 pr_debug("seq=%llu is64=%d ssn=%u data_len=%u data_fin=%d",
838 mpext->data_seq, mpext->dsn64, mpext->subflow_seq,
839 mpext->data_len, mpext->data_fin);
841 data_len = mpext->data_len;
843 pr_err("Infinite mapping not handled");
844 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
845 return MAPPING_INVALID;
848 if (mpext->data_fin == 1) {
850 bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq,
852 pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq);
853 if (subflow->map_valid) {
854 /* A DATA_FIN might arrive in a DSS
855 * option before the previous mapping
856 * has been fully consumed. Continue
857 * handling the existing mapping.
859 skb_ext_del(skb, SKB_EXT_MPTCP);
862 if (updated && schedule_work(&msk->work))
863 sock_hold((struct sock *)msk);
865 return MAPPING_DATA_FIN;
868 u64 data_fin_seq = mpext->data_seq + data_len - 1;
870 /* If mpext->data_seq is a 32-bit value, data_fin_seq
871 * must also be limited to 32 bits.
874 data_fin_seq &= GENMASK_ULL(31, 0);
876 mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64);
877 pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d",
878 data_fin_seq, mpext->dsn64);
881 /* Adjust for DATA_FIN using 1 byte of sequence space */
886 map_seq = expand_seq(subflow->map_seq, subflow->map_data_len,
888 pr_debug("expanded seq=%llu", subflow->map_seq);
890 map_seq = mpext->data_seq;
892 WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64);
894 if (subflow->map_valid) {
895 /* Allow replacing only with an identical map */
896 if (subflow->map_seq == map_seq &&
897 subflow->map_subflow_seq == mpext->subflow_seq &&
898 subflow->map_data_len == data_len) {
899 skb_ext_del(skb, SKB_EXT_MPTCP);
903 /* If this skb data are fully covered by the current mapping,
904 * the new map would need caching, which is not supported
906 if (skb_is_fully_mapped(ssk, skb)) {
907 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH);
908 return MAPPING_INVALID;
911 /* will validate the next map after consuming the current one */
915 subflow->map_seq = map_seq;
916 subflow->map_subflow_seq = mpext->subflow_seq;
917 subflow->map_data_len = data_len;
918 subflow->map_valid = 1;
919 subflow->mpc_map = mpext->mpc_map;
920 pr_debug("new map seq=%llu subflow_seq=%u data_len=%u",
921 subflow->map_seq, subflow->map_subflow_seq,
922 subflow->map_data_len);
925 /* we revalidate valid mapping on new skb, because we must ensure
926 * the current skb is completely covered by the available mapping
928 if (!validate_mapping(ssk, skb))
929 return MAPPING_INVALID;
931 skb_ext_del(skb, SKB_EXT_MPTCP);
935 static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
938 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
939 bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
942 incr = limit >= skb->len ? skb->len + fin : limit;
944 pr_debug("discarding=%d len=%d seq=%d", incr, skb->len,
945 subflow->map_subflow_seq);
946 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
947 tcp_sk(ssk)->copied_seq += incr;
948 if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq))
949 sk_eat_skb(ssk, skb);
950 if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
951 subflow->map_valid = 0;
954 /* sched mptcp worker to remove the subflow if no more data is pending */
955 static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
957 struct sock *sk = (struct sock *)msk;
959 if (likely(ssk->sk_state != TCP_CLOSE))
962 if (skb_queue_empty(&ssk->sk_receive_queue) &&
963 !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) {
965 if (!schedule_work(&msk->work))
970 static bool subflow_check_data_avail(struct sock *ssk)
972 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
973 enum mapping_status status;
974 struct mptcp_sock *msk;
977 pr_debug("msk=%p ssk=%p data_avail=%d skb=%p", subflow->conn, ssk,
978 subflow->data_avail, skb_peek(&ssk->sk_receive_queue));
979 if (!skb_peek(&ssk->sk_receive_queue))
980 subflow->data_avail = 0;
981 if (subflow->data_avail)
984 msk = mptcp_sk(subflow->conn);
989 status = get_mapping_status(ssk, msk);
990 pr_debug("msk=%p ssk=%p status=%d", msk, ssk, status);
991 if (status == MAPPING_INVALID) {
992 ssk->sk_err = EBADMSG;
995 if (status == MAPPING_DUMMY) {
996 __mptcp_do_fallback(msk);
997 skb = skb_peek(&ssk->sk_receive_queue);
998 subflow->map_valid = 1;
999 subflow->map_seq = READ_ONCE(msk->ack_seq);
1000 subflow->map_data_len = skb->len;
1001 subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq -
1002 subflow->ssn_offset;
1003 subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
1007 if (status != MAPPING_OK)
1010 skb = skb_peek(&ssk->sk_receive_queue);
1011 if (WARN_ON_ONCE(!skb))
1014 /* if msk lacks the remote key, this subflow must provide an
1015 * MP_CAPABLE-based mapping
1017 if (unlikely(!READ_ONCE(msk->can_ack))) {
1018 if (!subflow->mpc_map) {
1019 ssk->sk_err = EBADMSG;
1022 WRITE_ONCE(msk->remote_key, subflow->remote_key);
1023 WRITE_ONCE(msk->ack_seq, subflow->map_seq);
1024 WRITE_ONCE(msk->can_ack, true);
1027 old_ack = READ_ONCE(msk->ack_seq);
1028 ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
1029 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
1031 if (ack_seq == old_ack) {
1032 subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
1034 } else if (after64(ack_seq, old_ack)) {
1035 subflow->data_avail = MPTCP_SUBFLOW_OOO_DATA;
1039 /* only accept in-sequence mapping. Old values are spurious
1042 mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
1047 subflow_sched_work_if_closed(msk, ssk);
1050 /* fatal protocol error, close the socket */
1051 /* This barrier is coupled with smp_rmb() in tcp_poll() */
1053 ssk->sk_error_report(ssk);
1054 tcp_set_state(ssk, TCP_CLOSE);
1055 tcp_send_active_reset(ssk, GFP_ATOMIC);
1056 subflow->data_avail = 0;
1060 bool mptcp_subflow_data_available(struct sock *sk)
1062 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1064 /* check if current mapping is still valid */
1065 if (subflow->map_valid &&
1066 mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
1067 subflow->map_valid = 0;
1068 subflow->data_avail = 0;
1070 pr_debug("Done with mapping: seq=%u data_len=%u",
1071 subflow->map_subflow_seq,
1072 subflow->map_data_len);
1075 return subflow_check_data_avail(sk);
1078 /* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy,
1081 * In mptcp, rwin is about the mptcp-level connection data.
1083 * Data that is still on the ssk rx queue can thus be ignored,
1084 * as far as mptcp peer is concerened that data is still inflight.
1085 * DSS ACK is updated when skb is moved to the mptcp rx queue.
1087 void mptcp_space(const struct sock *ssk, int *space, int *full_space)
1089 const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1090 const struct sock *sk = subflow->conn;
1092 *space = __mptcp_space(sk);
1093 *full_space = tcp_full_space(sk);
1096 static void subflow_data_ready(struct sock *sk)
1098 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1099 u16 state = 1 << inet_sk_state_load(sk);
1100 struct sock *parent = subflow->conn;
1101 struct mptcp_sock *msk;
1103 msk = mptcp_sk(parent);
1104 if (state & TCPF_LISTEN) {
1105 /* MPJ subflow are removed from accept queue before reaching here,
1106 * avoid stray wakeups
1108 if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
1111 set_bit(MPTCP_DATA_READY, &msk->flags);
1112 parent->sk_data_ready(parent);
1116 WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
1117 !subflow->mp_join && !(state & TCPF_CLOSE));
1119 if (mptcp_subflow_data_available(sk))
1120 mptcp_data_ready(parent, sk);
1123 static void subflow_write_space(struct sock *ssk)
1125 struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1127 mptcp_propagate_sndbuf(sk, ssk);
1128 mptcp_write_space(sk);
1131 void __mptcp_error_report(struct sock *sk)
1133 struct mptcp_subflow_context *subflow;
1134 struct mptcp_sock *msk = mptcp_sk(sk);
1136 mptcp_for_each_subflow(msk, subflow) {
1137 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1138 int err = sock_error(ssk);
1143 /* only propagate errors on fallen-back sockets or
1146 if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(msk))
1149 inet_sk_state_store(sk, inet_sk_state_load(ssk));
1152 /* This barrier is coupled with smp_rmb() in mptcp_poll() */
1154 sk->sk_error_report(sk);
1159 static void subflow_error_report(struct sock *ssk)
1161 struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1163 mptcp_data_lock(sk);
1164 if (!sock_owned_by_user(sk))
1165 __mptcp_error_report(sk);
1167 set_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->flags);
1168 mptcp_data_unlock(sk);
1171 static struct inet_connection_sock_af_ops *
1172 subflow_default_af_ops(struct sock *sk)
1174 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1175 if (sk->sk_family == AF_INET6)
1176 return &subflow_v6_specific;
1178 return &subflow_specific;
1181 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1182 void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
1184 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1185 struct inet_connection_sock *icsk = inet_csk(sk);
1186 struct inet_connection_sock_af_ops *target;
1188 target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
1190 pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
1191 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
1193 if (likely(icsk->icsk_af_ops == target))
1196 subflow->icsk_af_ops = icsk->icsk_af_ops;
1197 icsk->icsk_af_ops = target;
1201 void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
1202 struct sockaddr_storage *addr,
1203 unsigned short family)
1205 memset(addr, 0, sizeof(*addr));
1206 addr->ss_family = family;
1207 if (addr->ss_family == AF_INET) {
1208 struct sockaddr_in *in_addr = (struct sockaddr_in *)addr;
1210 if (info->family == AF_INET)
1211 in_addr->sin_addr = info->addr;
1212 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1213 else if (ipv6_addr_v4mapped(&info->addr6))
1214 in_addr->sin_addr.s_addr = info->addr6.s6_addr32[3];
1216 in_addr->sin_port = info->port;
1218 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1219 else if (addr->ss_family == AF_INET6) {
1220 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr;
1222 if (info->family == AF_INET)
1223 ipv6_addr_set_v4mapped(info->addr.s_addr,
1224 &in6_addr->sin6_addr);
1226 in6_addr->sin6_addr = info->addr6;
1227 in6_addr->sin6_port = info->port;
1232 int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
1233 const struct mptcp_addr_info *remote)
1235 struct mptcp_sock *msk = mptcp_sk(sk);
1236 struct mptcp_subflow_context *subflow;
1237 struct sockaddr_storage addr;
1238 int remote_id = remote->id;
1239 int local_id = loc->id;
1246 if (!mptcp_is_fully_established(sk))
1249 err = mptcp_subflow_create_socket(sk, &sf);
1254 subflow = mptcp_subflow_ctx(ssk);
1256 get_random_bytes(&subflow->local_nonce, sizeof(u32));
1257 } while (!subflow->local_nonce);
1260 err = mptcp_pm_get_local_id(msk, (struct sock_common *)ssk);
1267 subflow->remote_key = msk->remote_key;
1268 subflow->local_key = msk->local_key;
1269 subflow->token = msk->token;
1270 mptcp_info2sockaddr(loc, &addr, ssk->sk_family);
1272 addrlen = sizeof(struct sockaddr_in);
1273 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1274 if (addr.ss_family == AF_INET6)
1275 addrlen = sizeof(struct sockaddr_in6);
1277 ssk->sk_bound_dev_if = loc->ifindex;
1278 err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
1282 mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
1283 pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
1284 remote_token, local_id, remote_id);
1285 subflow->remote_token = remote_token;
1286 subflow->local_id = local_id;
1287 subflow->remote_id = remote_id;
1288 subflow->request_join = 1;
1289 subflow->request_bkup = !!(loc->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
1290 mptcp_info2sockaddr(remote, &addr, ssk->sk_family);
1292 mptcp_add_pending_subflow(msk, subflow);
1293 err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
1294 if (err && err != -EINPROGRESS)
1297 /* discard the subflow socket */
1298 mptcp_sock_graft(ssk, sk->sk_socket);
1299 iput(SOCK_INODE(sf));
1303 spin_lock_bh(&msk->join_list_lock);
1304 list_del(&subflow->node);
1305 spin_unlock_bh(&msk->join_list_lock);
1306 sock_put(mptcp_subflow_tcp_sock(subflow));
1309 subflow->disposable = 1;
1314 static void mptcp_attach_cgroup(struct sock *parent, struct sock *child)
1316 #ifdef CONFIG_SOCK_CGROUP_DATA
1317 struct sock_cgroup_data *parent_skcd = &parent->sk_cgrp_data,
1318 *child_skcd = &child->sk_cgrp_data;
1320 /* only the additional subflows created by kworkers have to be modified */
1321 if (cgroup_id(sock_cgroup_ptr(parent_skcd)) !=
1322 cgroup_id(sock_cgroup_ptr(child_skcd))) {
1324 struct mem_cgroup *memcg = parent->sk_memcg;
1326 mem_cgroup_sk_free(child);
1327 if (memcg && css_tryget(&memcg->css))
1328 child->sk_memcg = memcg;
1329 #endif /* CONFIG_MEMCG */
1331 cgroup_sk_free(child_skcd);
1332 *child_skcd = *parent_skcd;
1333 cgroup_sk_clone(child_skcd);
1335 #endif /* CONFIG_SOCK_CGROUP_DATA */
1338 static void mptcp_subflow_ops_override(struct sock *ssk)
1340 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1341 if (ssk->sk_prot == &tcpv6_prot)
1342 ssk->sk_prot = &tcpv6_prot_override;
1345 ssk->sk_prot = &tcp_prot_override;
1348 static void mptcp_subflow_ops_undo_override(struct sock *ssk)
1350 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1351 if (ssk->sk_prot == &tcpv6_prot_override)
1352 ssk->sk_prot = &tcpv6_prot;
1355 ssk->sk_prot = &tcp_prot;
1357 int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
1359 struct mptcp_subflow_context *subflow;
1360 struct net *net = sock_net(sk);
1364 /* un-accepted server sockets can reach here - on bad configuration
1365 * bail early to avoid greater trouble later
1367 if (unlikely(!sk->sk_socket))
1370 err = sock_create_kern(net, sk->sk_family, SOCK_STREAM, IPPROTO_TCP,
1377 /* the newly created socket has to be in the same cgroup as its parent */
1378 mptcp_attach_cgroup(sk, sf->sk);
1380 /* kernel sockets do not by default acquire net ref, but TCP timer
1383 sf->sk->sk_net_refcnt = 1;
1385 #ifdef CONFIG_PROC_FS
1386 this_cpu_add(*net->core.sock_inuse, 1);
1388 err = tcp_set_ulp(sf->sk, "mptcp");
1389 release_sock(sf->sk);
1396 /* the newly created socket really belongs to the owning MPTCP master
1397 * socket, even if for additional subflows the allocation is performed
1398 * by a kernel workqueue. Adjust inode references, so that the
1399 * procfs/diag interaces really show this one belonging to the correct
1402 SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino;
1403 SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid;
1404 SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
1406 subflow = mptcp_subflow_ctx(sf->sk);
1407 pr_debug("subflow=%p", subflow);
1412 mptcp_subflow_ops_override(sf->sk);
1417 static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
1420 struct inet_connection_sock *icsk = inet_csk(sk);
1421 struct mptcp_subflow_context *ctx;
1423 ctx = kzalloc(sizeof(*ctx), priority);
1427 rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
1428 INIT_LIST_HEAD(&ctx->node);
1429 INIT_LIST_HEAD(&ctx->delegated_node);
1431 pr_debug("subflow=%p", ctx);
1438 static void __subflow_state_change(struct sock *sk)
1440 struct socket_wq *wq;
1443 wq = rcu_dereference(sk->sk_wq);
1444 if (skwq_has_sleeper(wq))
1445 wake_up_interruptible_all(&wq->wait);
1449 static bool subflow_is_done(const struct sock *sk)
1451 return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE;
1454 static void subflow_state_change(struct sock *sk)
1456 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1457 struct sock *parent = subflow->conn;
1459 __subflow_state_change(sk);
1461 if (subflow_simultaneous_connect(sk)) {
1462 mptcp_propagate_sndbuf(parent, sk);
1463 mptcp_do_fallback(sk);
1464 mptcp_rcv_space_init(mptcp_sk(parent), sk);
1465 pr_fallback(mptcp_sk(parent));
1466 subflow->conn_finished = 1;
1467 if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
1468 inet_sk_state_store(parent, TCP_ESTABLISHED);
1469 parent->sk_state_change(parent);
1473 /* as recvmsg() does not acquire the subflow socket for ssk selection
1474 * a fin packet carrying a DSS can be unnoticed if we don't trigger
1475 * the data available machinery here.
1477 if (mptcp_subflow_data_available(sk))
1478 mptcp_data_ready(parent, sk);
1480 subflow_sched_work_if_closed(mptcp_sk(parent), sk);
1482 if (__mptcp_check_fallback(mptcp_sk(parent)) &&
1483 !subflow->rx_eof && subflow_is_done(sk)) {
1484 subflow->rx_eof = 1;
1485 mptcp_subflow_eof(parent);
1489 static int subflow_ulp_init(struct sock *sk)
1491 struct inet_connection_sock *icsk = inet_csk(sk);
1492 struct mptcp_subflow_context *ctx;
1493 struct tcp_sock *tp = tcp_sk(sk);
1496 /* disallow attaching ULP to a socket unless it has been
1497 * created with sock_create_kern()
1499 if (!sk->sk_kern_sock) {
1504 ctx = subflow_create_ctx(sk, GFP_KERNEL);
1510 pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
1513 ctx->icsk_af_ops = icsk->icsk_af_ops;
1514 icsk->icsk_af_ops = subflow_default_af_ops(sk);
1515 ctx->tcp_data_ready = sk->sk_data_ready;
1516 ctx->tcp_state_change = sk->sk_state_change;
1517 ctx->tcp_write_space = sk->sk_write_space;
1518 ctx->tcp_error_report = sk->sk_error_report;
1519 sk->sk_data_ready = subflow_data_ready;
1520 sk->sk_write_space = subflow_write_space;
1521 sk->sk_state_change = subflow_state_change;
1522 sk->sk_error_report = subflow_error_report;
1527 static void subflow_ulp_release(struct sock *ssk)
1529 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
1530 bool release = true;
1538 /* if the msk has been orphaned, keep the ctx
1539 * alive, will be freed by __mptcp_close_ssk(),
1540 * when the subflow is still unaccepted
1542 release = ctx->disposable || list_empty(&ctx->node);
1546 mptcp_subflow_ops_undo_override(ssk);
1548 kfree_rcu(ctx, rcu);
1551 static void subflow_ulp_clone(const struct request_sock *req,
1553 const gfp_t priority)
1555 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
1556 struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk);
1557 struct mptcp_subflow_context *new_ctx;
1559 if (!tcp_rsk(req)->is_mptcp ||
1560 (!subflow_req->mp_capable && !subflow_req->mp_join)) {
1561 subflow_ulp_fallback(newsk, old_ctx);
1565 new_ctx = subflow_create_ctx(newsk, priority);
1567 subflow_ulp_fallback(newsk, old_ctx);
1571 new_ctx->conn_finished = 1;
1572 new_ctx->icsk_af_ops = old_ctx->icsk_af_ops;
1573 new_ctx->tcp_data_ready = old_ctx->tcp_data_ready;
1574 new_ctx->tcp_state_change = old_ctx->tcp_state_change;
1575 new_ctx->tcp_write_space = old_ctx->tcp_write_space;
1576 new_ctx->tcp_error_report = old_ctx->tcp_error_report;
1577 new_ctx->rel_write_seq = 1;
1578 new_ctx->tcp_sock = newsk;
1580 if (subflow_req->mp_capable) {
1581 /* see comments in subflow_syn_recv_sock(), MPTCP connection
1582 * is fully established only after we receive the remote key
1584 new_ctx->mp_capable = 1;
1585 new_ctx->local_key = subflow_req->local_key;
1586 new_ctx->token = subflow_req->token;
1587 new_ctx->ssn_offset = subflow_req->ssn_offset;
1588 new_ctx->idsn = subflow_req->idsn;
1589 } else if (subflow_req->mp_join) {
1590 new_ctx->ssn_offset = subflow_req->ssn_offset;
1591 new_ctx->mp_join = 1;
1592 new_ctx->fully_established = 1;
1593 new_ctx->backup = subflow_req->backup;
1594 new_ctx->local_id = subflow_req->local_id;
1595 new_ctx->remote_id = subflow_req->remote_id;
1596 new_ctx->token = subflow_req->token;
1597 new_ctx->thmac = subflow_req->thmac;
1601 static void tcp_release_cb_override(struct sock *ssk)
1603 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1605 if (mptcp_subflow_has_delegated_action(subflow))
1606 mptcp_subflow_process_delegated(ssk);
1608 tcp_release_cb(ssk);
1611 static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
1613 .owner = THIS_MODULE,
1614 .init = subflow_ulp_init,
1615 .release = subflow_ulp_release,
1616 .clone = subflow_ulp_clone,
1619 static int subflow_ops_init(struct request_sock_ops *subflow_ops)
1621 subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock);
1622 subflow_ops->slab_name = "request_sock_subflow";
1624 subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name,
1625 subflow_ops->obj_size, 0,
1627 SLAB_TYPESAFE_BY_RCU,
1629 if (!subflow_ops->slab)
1632 subflow_ops->destructor = subflow_req_destructor;
1637 void __init mptcp_subflow_init(void)
1639 mptcp_subflow_request_sock_ops = tcp_request_sock_ops;
1640 if (subflow_ops_init(&mptcp_subflow_request_sock_ops) != 0)
1641 panic("MPTCP: failed to init subflow request sock ops\n");
1643 subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
1644 subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req;
1646 subflow_specific = ipv4_specific;
1647 subflow_specific.conn_request = subflow_v4_conn_request;
1648 subflow_specific.syn_recv_sock = subflow_syn_recv_sock;
1649 subflow_specific.sk_rx_dst_set = subflow_finish_connect;
1651 tcp_prot_override = tcp_prot;
1652 tcp_prot_override.release_cb = tcp_release_cb_override;
1654 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1655 subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
1656 subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req;
1658 subflow_v6_specific = ipv6_specific;
1659 subflow_v6_specific.conn_request = subflow_v6_conn_request;
1660 subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock;
1661 subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect;
1663 subflow_v6m_specific = subflow_v6_specific;
1664 subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit;
1665 subflow_v6m_specific.send_check = ipv4_specific.send_check;
1666 subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len;
1667 subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced;
1668 subflow_v6m_specific.net_frag_header_len = 0;
1670 tcpv6_prot_override = tcpv6_prot;
1671 tcpv6_prot_override.release_cb = tcp_release_cb_override;
1674 mptcp_diag_subflow_init(&subflow_ulp_ops);
1676 if (tcp_register_ulp(&subflow_ulp_ops) != 0)
1677 panic("MPTCP: failed to register subflows to ULP\n");