1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2017 - 2019, Intel Corporation.
7 #define pr_fmt(fmt) "MPTCP: " fmt
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <crypto/algapi.h>
13 #include <crypto/sha2.h>
15 #include <net/inet_common.h>
16 #include <net/inet_hashtables.h>
17 #include <net/protocol.h>
19 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
20 #include <net/ip6_route.h>
21 #include <net/transp_v6.h>
23 #include <net/mptcp.h>
24 #include <uapi/linux/mptcp.h>
28 #include <trace/events/mptcp.h>
30 static void mptcp_subflow_ops_undo_override(struct sock *ssk);
32 static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
33 enum linux_mptcp_mib_field field)
35 MPTCP_INC_STATS(sock_net(req_to_sk(req)), field);
38 static void subflow_req_destructor(struct request_sock *req)
40 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
42 pr_debug("subflow_req=%p", subflow_req);
45 sock_put((struct sock *)subflow_req->msk);
47 mptcp_token_destroy_request(req);
48 tcp_request_sock_ops.destructor(req);
51 static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
56 put_unaligned_be32(nonce1, &msg[0]);
57 put_unaligned_be32(nonce2, &msg[4]);
59 mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
62 static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk)
64 return mptcp_is_fully_established((void *)msk) &&
65 READ_ONCE(msk->pm.accept_subflow);
68 /* validate received token and create truncated hmac and nonce for SYN-ACK */
69 static void subflow_req_create_thmac(struct mptcp_subflow_request_sock *subflow_req)
71 struct mptcp_sock *msk = subflow_req->msk;
72 u8 hmac[SHA256_DIGEST_SIZE];
74 get_random_bytes(&subflow_req->local_nonce, sizeof(u32));
76 subflow_generate_hmac(msk->local_key, msk->remote_key,
77 subflow_req->local_nonce,
78 subflow_req->remote_nonce, hmac);
80 subflow_req->thmac = get_unaligned_be64(hmac);
83 static struct mptcp_sock *subflow_token_join_request(struct request_sock *req)
85 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
86 struct mptcp_sock *msk;
89 msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token);
91 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
95 local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
97 sock_put((struct sock *)msk);
100 subflow_req->local_id = local_id;
105 static void subflow_init_req(struct request_sock *req, const struct sock *sk_listener)
107 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
109 subflow_req->mp_capable = 0;
110 subflow_req->mp_join = 0;
111 subflow_req->csum_reqd = mptcp_is_checksum_enabled(sock_net(sk_listener));
112 subflow_req->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk_listener));
113 subflow_req->msk = NULL;
114 mptcp_token_init_request(req);
117 static bool subflow_use_different_sport(struct mptcp_sock *msk, const struct sock *sk)
119 return inet_sk(sk)->inet_sport != inet_sk((struct sock *)msk)->inet_sport;
122 static void subflow_add_reset_reason(struct sk_buff *skb, u8 reason)
124 struct mptcp_ext *mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
127 memset(mpext, 0, sizeof(*mpext));
128 mpext->reset_reason = reason;
132 /* Init mptcp request socket.
134 * Returns an error code if a JOIN has failed and a TCP reset
137 static int subflow_check_req(struct request_sock *req,
138 const struct sock *sk_listener,
141 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
142 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
143 struct mptcp_options_received mp_opt;
144 bool opt_mp_capable, opt_mp_join;
146 pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
148 #ifdef CONFIG_TCP_MD5SIG
149 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
152 if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info))
156 mptcp_get_options(skb, &mp_opt);
158 opt_mp_capable = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPC);
159 opt_mp_join = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ);
160 if (opt_mp_capable) {
161 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
165 } else if (opt_mp_join) {
166 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
169 if (opt_mp_capable && listener->request_mptcp) {
170 int err, retries = MPTCP_TOKEN_MAX_RETRIES;
172 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
175 get_random_bytes(&subflow_req->local_key, sizeof(subflow_req->local_key));
176 } while (subflow_req->local_key == 0);
178 if (unlikely(req->syncookie)) {
179 mptcp_crypto_key_sha(subflow_req->local_key,
182 if (mptcp_token_exists(subflow_req->token)) {
185 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT);
187 subflow_req->mp_capable = 1;
192 err = mptcp_token_new_request(req);
194 subflow_req->mp_capable = 1;
195 else if (retries-- > 0)
198 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT);
200 } else if (opt_mp_join && listener->request_mptcp) {
201 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
202 subflow_req->mp_join = 1;
203 subflow_req->backup = mp_opt.backup;
204 subflow_req->remote_id = mp_opt.join_id;
205 subflow_req->token = mp_opt.token;
206 subflow_req->remote_nonce = mp_opt.nonce;
207 subflow_req->msk = subflow_token_join_request(req);
209 /* Can't fall back to TCP in this case. */
210 if (!subflow_req->msk) {
211 subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP);
215 if (subflow_use_different_sport(subflow_req->msk, sk_listener)) {
216 pr_debug("syn inet_sport=%d %d",
217 ntohs(inet_sk(sk_listener)->inet_sport),
218 ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport));
219 if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) {
220 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTSYNRX);
223 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTSYNRX);
226 subflow_req_create_thmac(subflow_req);
228 if (unlikely(req->syncookie)) {
229 if (mptcp_can_accept_new_subflow(subflow_req->msk))
230 subflow_init_req_cookie_join_save(subflow_req, skb);
235 pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
236 subflow_req->remote_nonce, subflow_req->msk);
242 int mptcp_subflow_init_cookie_req(struct request_sock *req,
243 const struct sock *sk_listener,
246 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
247 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
248 struct mptcp_options_received mp_opt;
249 bool opt_mp_capable, opt_mp_join;
252 subflow_init_req(req, sk_listener);
253 mptcp_get_options(skb, &mp_opt);
255 opt_mp_capable = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPC);
256 opt_mp_join = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ);
257 if (opt_mp_capable && opt_mp_join)
260 if (opt_mp_capable && listener->request_mptcp) {
261 if (mp_opt.sndr_key == 0)
264 subflow_req->local_key = mp_opt.rcvr_key;
265 err = mptcp_token_new_request(req);
269 subflow_req->mp_capable = 1;
270 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
271 } else if (opt_mp_join && listener->request_mptcp) {
272 if (!mptcp_token_join_cookie_init_state(subflow_req, skb))
275 subflow_req->mp_join = 1;
276 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
281 EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req);
283 static struct dst_entry *subflow_v4_route_req(const struct sock *sk,
286 struct request_sock *req)
288 struct dst_entry *dst;
291 tcp_rsk(req)->is_mptcp = 1;
292 subflow_init_req(req, sk);
294 dst = tcp_request_sock_ipv4_ops.route_req(sk, skb, fl, req);
298 err = subflow_check_req(req, sk, skb);
304 tcp_request_sock_ops.send_reset(sk, skb);
308 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
309 static struct dst_entry *subflow_v6_route_req(const struct sock *sk,
312 struct request_sock *req)
314 struct dst_entry *dst;
317 tcp_rsk(req)->is_mptcp = 1;
318 subflow_init_req(req, sk);
320 dst = tcp_request_sock_ipv6_ops.route_req(sk, skb, fl, req);
324 err = subflow_check_req(req, sk, skb);
330 tcp6_request_sock_ops.send_reset(sk, skb);
335 /* validate received truncated hmac and create hmac for third ACK */
336 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
338 u8 hmac[SHA256_DIGEST_SIZE];
341 subflow_generate_hmac(subflow->remote_key, subflow->local_key,
342 subflow->remote_nonce, subflow->local_nonce,
345 thmac = get_unaligned_be64(hmac);
346 pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n",
347 subflow, subflow->token,
348 (unsigned long long)thmac,
349 (unsigned long long)subflow->thmac);
351 return thmac == subflow->thmac;
354 void mptcp_subflow_reset(struct sock *ssk)
356 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
357 struct sock *sk = subflow->conn;
359 /* must hold: tcp_done() could drop last reference on parent */
362 tcp_set_state(ssk, TCP_CLOSE);
363 tcp_send_active_reset(ssk, GFP_ATOMIC);
365 if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
366 schedule_work(&mptcp_sk(sk)->work))
367 return; /* worker will put sk for us */
372 static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct sock *sk)
374 return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport;
377 void __mptcp_set_connected(struct sock *sk)
379 if (sk->sk_state == TCP_SYN_SENT) {
380 inet_sk_state_store(sk, TCP_ESTABLISHED);
381 sk->sk_state_change(sk);
385 static void mptcp_set_connected(struct sock *sk)
388 if (!sock_owned_by_user(sk))
389 __mptcp_set_connected(sk);
391 __set_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->cb_flags);
392 mptcp_data_unlock(sk);
395 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
397 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
398 struct mptcp_options_received mp_opt;
399 struct sock *parent = subflow->conn;
401 subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
403 /* be sure no special action on any packet other than syn-ack */
404 if (subflow->conn_finished)
407 mptcp_propagate_sndbuf(parent, sk);
408 subflow->rel_write_seq = 1;
409 subflow->conn_finished = 1;
410 subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
411 pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
413 mptcp_get_options(skb, &mp_opt);
414 if (subflow->request_mptcp) {
415 if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC)) {
416 MPTCP_INC_STATS(sock_net(sk),
417 MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
418 mptcp_do_fallback(sk);
419 pr_fallback(mptcp_sk(subflow->conn));
423 if (mp_opt.suboptions & OPTION_MPTCP_CSUMREQD)
424 WRITE_ONCE(mptcp_sk(parent)->csum_enabled, true);
425 if (mp_opt.deny_join_id0)
426 WRITE_ONCE(mptcp_sk(parent)->pm.remote_deny_join_id0, true);
427 subflow->mp_capable = 1;
428 subflow->can_ack = 1;
429 subflow->remote_key = mp_opt.sndr_key;
430 pr_debug("subflow=%p, remote_key=%llu", subflow,
431 subflow->remote_key);
432 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK);
433 mptcp_finish_connect(sk);
434 mptcp_set_connected(parent);
435 } else if (subflow->request_join) {
436 u8 hmac[SHA256_DIGEST_SIZE];
438 if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ)) {
439 subflow->reset_reason = MPTCP_RST_EMPTCP;
443 subflow->backup = mp_opt.backup;
444 subflow->thmac = mp_opt.thmac;
445 subflow->remote_nonce = mp_opt.nonce;
446 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d",
447 subflow, subflow->thmac, subflow->remote_nonce,
450 if (!subflow_thmac_valid(subflow)) {
451 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC);
452 subflow->reset_reason = MPTCP_RST_EMPTCP;
456 if (!mptcp_finish_join(sk))
459 subflow_generate_hmac(subflow->local_key, subflow->remote_key,
460 subflow->local_nonce,
461 subflow->remote_nonce,
463 memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
465 subflow->mp_join = 1;
466 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
468 if (subflow_use_different_dport(mptcp_sk(parent), sk)) {
469 pr_debug("synack inet_dport=%d %d",
470 ntohs(inet_sk(sk)->inet_dport),
471 ntohs(inet_sk(parent)->inet_dport));
472 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX);
474 } else if (mptcp_check_fallback(sk)) {
476 mptcp_rcv_space_init(mptcp_sk(parent), sk);
477 mptcp_set_connected(parent);
482 subflow->reset_transient = 0;
483 mptcp_subflow_reset(sk);
486 struct request_sock_ops mptcp_subflow_request_sock_ops;
487 EXPORT_SYMBOL_GPL(mptcp_subflow_request_sock_ops);
488 static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops;
490 static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
492 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
494 pr_debug("subflow=%p", subflow);
496 /* Never answer to SYNs sent to broadcast or multicast */
497 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
500 return tcp_conn_request(&mptcp_subflow_request_sock_ops,
501 &subflow_request_sock_ipv4_ops,
508 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
509 static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops;
510 static struct inet_connection_sock_af_ops subflow_v6_specific;
511 static struct inet_connection_sock_af_ops subflow_v6m_specific;
512 static struct proto tcpv6_prot_override;
514 static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
516 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
518 pr_debug("subflow=%p", subflow);
520 if (skb->protocol == htons(ETH_P_IP))
521 return subflow_v4_conn_request(sk, skb);
523 if (!ipv6_unicast_destination(skb))
526 if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
527 __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
531 return tcp_conn_request(&mptcp_subflow_request_sock_ops,
532 &subflow_request_sock_ipv6_ops, sk, skb);
536 return 0; /* don't send reset */
540 /* validate hmac received in third ACK */
541 static bool subflow_hmac_valid(const struct request_sock *req,
542 const struct mptcp_options_received *mp_opt)
544 const struct mptcp_subflow_request_sock *subflow_req;
545 u8 hmac[SHA256_DIGEST_SIZE];
546 struct mptcp_sock *msk;
548 subflow_req = mptcp_subflow_rsk(req);
549 msk = subflow_req->msk;
553 subflow_generate_hmac(msk->remote_key, msk->local_key,
554 subflow_req->remote_nonce,
555 subflow_req->local_nonce, hmac);
557 return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
560 static void mptcp_sock_destruct(struct sock *sk)
562 /* if new mptcp socket isn't accepted, it is free'd
563 * from the tcp listener sockets request queue, linked
564 * from req->sk. The tcp socket is released.
565 * This calls the ULP release function which will
566 * also remove the mptcp socket, via
567 * sock_put(ctx->conn).
569 * Problem is that the mptcp socket will be in
570 * ESTABLISHED state and will not have the SOCK_DEAD flag.
571 * Both result in warnings from inet_sock_destruct.
573 if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
574 sk->sk_state = TCP_CLOSE;
575 WARN_ON_ONCE(sk->sk_socket);
579 mptcp_destroy_common(mptcp_sk(sk));
580 inet_sock_destruct(sk);
583 static void mptcp_force_close(struct sock *sk)
585 /* the msk is not yet exposed to user-space */
586 inet_sk_state_store(sk, TCP_CLOSE);
587 sk_common_release(sk);
590 static void subflow_ulp_fallback(struct sock *sk,
591 struct mptcp_subflow_context *old_ctx)
593 struct inet_connection_sock *icsk = inet_csk(sk);
595 mptcp_subflow_tcp_fallback(sk, old_ctx);
596 icsk->icsk_ulp_ops = NULL;
597 rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
598 tcp_sk(sk)->is_mptcp = 0;
600 mptcp_subflow_ops_undo_override(sk);
603 static void subflow_drop_ctx(struct sock *ssk)
605 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
610 subflow_ulp_fallback(ssk, ctx);
617 void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
618 struct mptcp_options_received *mp_opt)
620 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
622 subflow->remote_key = mp_opt->sndr_key;
623 subflow->fully_established = 1;
624 subflow->can_ack = 1;
625 WRITE_ONCE(msk->fully_established, true);
628 static struct sock *subflow_syn_recv_sock(const struct sock *sk,
630 struct request_sock *req,
631 struct dst_entry *dst,
632 struct request_sock *req_unhash,
635 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
636 struct mptcp_subflow_request_sock *subflow_req;
637 struct mptcp_options_received mp_opt;
638 bool fallback, fallback_is_fatal;
639 struct sock *new_msk = NULL;
642 pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
644 /* After child creation we must look for MPC even when options
647 mp_opt.suboptions = 0;
649 /* hopefully temporary handling for MP_JOIN+syncookie */
650 subflow_req = mptcp_subflow_rsk(req);
651 fallback_is_fatal = tcp_rsk(req)->is_mptcp && subflow_req->mp_join;
652 fallback = !tcp_rsk(req)->is_mptcp;
656 /* if the sk is MP_CAPABLE, we try to fetch the client key */
657 if (subflow_req->mp_capable) {
658 /* we can receive and accept an in-window, out-of-order pkt,
659 * which may not carry the MP_CAPABLE opt even on mptcp enabled
660 * paths: always try to extract the peer key, and fallback
661 * for packets missing it.
662 * Even OoO DSS packets coming legitly after dropped or
663 * reordered MPC will cause fallback, but we don't have other
666 mptcp_get_options(skb, &mp_opt);
667 if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC)) {
672 new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
675 } else if (subflow_req->mp_join) {
676 mptcp_get_options(skb, &mp_opt);
677 if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ) ||
678 !subflow_hmac_valid(req, &mp_opt) ||
679 !mptcp_can_accept_new_subflow(subflow_req->msk)) {
680 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
686 child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
687 req_unhash, own_req);
689 if (child && *own_req) {
690 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child);
692 tcp_rsk(req)->drop_req = false;
694 /* we need to fallback on ctx allocation failure and on pre-reqs
695 * checking above. In the latter scenario we additionally need
696 * to reset the context to non MPTCP status.
698 if (!ctx || fallback) {
699 if (fallback_is_fatal) {
700 subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP);
704 subflow_drop_ctx(child);
708 /* ssk inherits options of listener sk */
709 ctx->setsockopt_seq = listener->setsockopt_seq;
711 if (ctx->mp_capable) {
712 /* this can't race with mptcp_close(), as the msk is
713 * not yet exposted to user-space
715 inet_sk_state_store((void *)new_msk, TCP_ESTABLISHED);
717 /* record the newly created socket as the first msk
718 * subflow, but don't link it yet into conn_list
720 WRITE_ONCE(mptcp_sk(new_msk)->first, child);
722 /* new mpc subflow takes ownership of the newly
723 * created mptcp socket
725 new_msk->sk_destruct = mptcp_sock_destruct;
726 mptcp_sk(new_msk)->setsockopt_seq = ctx->setsockopt_seq;
727 mptcp_pm_new_connection(mptcp_sk(new_msk), child, 1);
728 mptcp_token_accept(subflow_req, mptcp_sk(new_msk));
732 /* with OoO packets we can reach here without ingress
735 if (mp_opt.suboptions & OPTIONS_MPTCP_MPC)
736 mptcp_subflow_fully_established(ctx, &mp_opt);
737 } else if (ctx->mp_join) {
738 struct mptcp_sock *owner;
740 owner = subflow_req->msk;
742 subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
746 /* move the msk reference ownership to the subflow */
747 subflow_req->msk = NULL;
748 ctx->conn = (struct sock *)owner;
750 if (subflow_use_different_sport(owner, sk)) {
751 pr_debug("ack inet_sport=%d %d",
752 ntohs(inet_sk(sk)->inet_sport),
753 ntohs(inet_sk((struct sock *)owner)->inet_sport));
754 if (!mptcp_pm_sport_in_anno_list(owner, sk)) {
755 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTACKRX);
758 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTACKRX);
761 if (!mptcp_finish_join(child))
764 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX);
765 tcp_rsk(req)->drop_req = true;
770 /* dispose of the left over mptcp master, if any */
771 if (unlikely(new_msk))
772 mptcp_force_close(new_msk);
774 /* check for expected invariant - should never trigger, just help
775 * catching eariler subtle bugs
777 WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp &&
778 (!mptcp_subflow_ctx(child) ||
779 !mptcp_subflow_ctx(child)->conn));
783 subflow_drop_ctx(child);
784 tcp_rsk(req)->drop_req = true;
785 inet_csk_prepare_for_destroy_sock(child);
787 req->rsk_ops->send_reset(sk, skb);
789 /* The last child reference will be released by the caller */
793 static struct inet_connection_sock_af_ops subflow_specific;
794 static struct proto tcp_prot_override;
796 enum mapping_status {
804 static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
806 pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
807 ssn, subflow->map_subflow_seq, subflow->map_data_len);
810 static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
812 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
813 unsigned int skb_consumed;
815 skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq;
816 if (WARN_ON_ONCE(skb_consumed >= skb->len))
819 return skb->len - skb_consumed <= subflow->map_data_len -
820 mptcp_subflow_get_map_offset(subflow);
823 static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
825 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
826 u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
828 if (unlikely(before(ssn, subflow->map_subflow_seq))) {
829 /* Mapping covers data later in the subflow stream,
830 * currently unsupported.
832 dbg_bad_map(subflow, ssn);
835 if (unlikely(!before(ssn, subflow->map_subflow_seq +
836 subflow->map_data_len))) {
837 /* Mapping does covers past subflow data, invalid */
838 dbg_bad_map(subflow, ssn);
844 static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *skb,
847 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
848 u32 offset, seq, delta;
855 /* mapping already validated on previous traversal */
856 if (subflow->map_csum_len == subflow->map_data_len)
859 /* traverse the receive queue, ensuring it contains a full
860 * DSS mapping and accumulating the related csum.
861 * Preserve the accoumlate csum across multiple calls, to compute
864 delta = subflow->map_data_len - subflow->map_csum_len;
866 seq = tcp_sk(ssk)->copied_seq + subflow->map_csum_len;
867 offset = seq - TCP_SKB_CB(skb)->seq;
869 /* if the current skb has not been accounted yet, csum its contents
870 * up to the amount covered by the current DSS
872 if (offset < skb->len) {
875 len = min(skb->len - offset, delta);
876 csum = skb_checksum(skb, offset, len, 0);
877 subflow->map_data_csum = csum_block_add(subflow->map_data_csum, csum,
878 subflow->map_csum_len);
881 subflow->map_csum_len += len;
886 if (skb_queue_is_last(&ssk->sk_receive_queue, skb)) {
887 /* if this subflow is closed, the partial mapping
888 * will be never completed; flush the pending skbs, so
889 * that subflow_sched_work_if_closed() can kick in
891 if (unlikely(ssk->sk_state == TCP_CLOSE))
892 while ((skb = skb_peek(&ssk->sk_receive_queue)))
893 sk_eat_skb(ssk, skb);
895 /* not enough data to validate the csum */
896 return MAPPING_EMPTY;
899 /* the DSS mapping for next skbs will be validated later,
900 * when a get_mapping_status call will process such skb
905 /* note that 'map_data_len' accounts only for the carried data, does
906 * not include the eventual seq increment due to the data fin,
907 * while the pseudo header requires the original DSS data len,
910 csum = __mptcp_make_csum(subflow->map_seq,
911 subflow->map_subflow_seq,
912 subflow->map_data_len + subflow->map_data_fin,
913 subflow->map_data_csum);
914 if (unlikely(csum)) {
915 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DATACSUMERR);
916 subflow->send_mp_fail = 1;
917 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPFAILTX);
918 return subflow->mp_join ? MAPPING_INVALID : MAPPING_DUMMY;
924 static enum mapping_status get_mapping_status(struct sock *ssk,
925 struct mptcp_sock *msk)
927 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
928 bool csum_reqd = READ_ONCE(msk->csum_enabled);
929 struct mptcp_ext *mpext;
934 skb = skb_peek(&ssk->sk_receive_queue);
936 return MAPPING_EMPTY;
938 if (mptcp_check_fallback(ssk))
939 return MAPPING_DUMMY;
941 mpext = mptcp_get_ext(skb);
942 if (!mpext || !mpext->use_map) {
943 if (!subflow->map_valid && !skb->len) {
944 /* the TCP stack deliver 0 len FIN pkt to the receive
945 * queue, that is the only 0len pkts ever expected here,
946 * and we can admit no mapping only for 0 len pkts
948 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
949 WARN_ONCE(1, "0len seq %d:%d flags %x",
950 TCP_SKB_CB(skb)->seq,
951 TCP_SKB_CB(skb)->end_seq,
952 TCP_SKB_CB(skb)->tcp_flags);
953 sk_eat_skb(ssk, skb);
954 return MAPPING_EMPTY;
957 if (!subflow->map_valid)
958 return MAPPING_INVALID;
963 trace_get_mapping_status(mpext);
965 data_len = mpext->data_len;
967 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
968 return MAPPING_INVALID;
971 if (mpext->data_fin == 1) {
973 bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq,
975 pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq);
976 if (subflow->map_valid) {
977 /* A DATA_FIN might arrive in a DSS
978 * option before the previous mapping
979 * has been fully consumed. Continue
980 * handling the existing mapping.
982 skb_ext_del(skb, SKB_EXT_MPTCP);
985 if (updated && schedule_work(&msk->work))
986 sock_hold((struct sock *)msk);
988 return MAPPING_DATA_FIN;
991 u64 data_fin_seq = mpext->data_seq + data_len - 1;
993 /* If mpext->data_seq is a 32-bit value, data_fin_seq
994 * must also be limited to 32 bits.
997 data_fin_seq &= GENMASK_ULL(31, 0);
999 mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64);
1000 pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d",
1001 data_fin_seq, mpext->dsn64);
1004 /* Adjust for DATA_FIN using 1 byte of sequence space */
1008 map_seq = mptcp_expand_seq(READ_ONCE(msk->ack_seq), mpext->data_seq, mpext->dsn64);
1009 WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64);
1011 if (subflow->map_valid) {
1012 /* Allow replacing only with an identical map */
1013 if (subflow->map_seq == map_seq &&
1014 subflow->map_subflow_seq == mpext->subflow_seq &&
1015 subflow->map_data_len == data_len &&
1016 subflow->map_csum_reqd == mpext->csum_reqd) {
1017 skb_ext_del(skb, SKB_EXT_MPTCP);
1021 /* If this skb data are fully covered by the current mapping,
1022 * the new map would need caching, which is not supported
1024 if (skb_is_fully_mapped(ssk, skb)) {
1025 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH);
1026 return MAPPING_INVALID;
1029 /* will validate the next map after consuming the current one */
1033 subflow->map_seq = map_seq;
1034 subflow->map_subflow_seq = mpext->subflow_seq;
1035 subflow->map_data_len = data_len;
1036 subflow->map_valid = 1;
1037 subflow->map_data_fin = mpext->data_fin;
1038 subflow->mpc_map = mpext->mpc_map;
1039 subflow->map_csum_reqd = mpext->csum_reqd;
1040 subflow->map_csum_len = 0;
1041 subflow->map_data_csum = csum_unfold(mpext->csum);
1043 /* Cfr RFC 8684 Section 3.3.0 */
1044 if (unlikely(subflow->map_csum_reqd != csum_reqd))
1045 return MAPPING_INVALID;
1047 pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
1048 subflow->map_seq, subflow->map_subflow_seq,
1049 subflow->map_data_len, subflow->map_csum_reqd,
1050 subflow->map_data_csum);
1053 /* we revalidate valid mapping on new skb, because we must ensure
1054 * the current skb is completely covered by the available mapping
1056 if (!validate_mapping(ssk, skb)) {
1057 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSTCPMISMATCH);
1058 return MAPPING_INVALID;
1061 skb_ext_del(skb, SKB_EXT_MPTCP);
1064 return validate_data_csum(ssk, skb, csum_reqd);
1067 static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
1070 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1071 bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
1074 incr = limit >= skb->len ? skb->len + fin : limit;
1076 pr_debug("discarding=%d len=%d seq=%d", incr, skb->len,
1077 subflow->map_subflow_seq);
1078 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
1079 tcp_sk(ssk)->copied_seq += incr;
1080 if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq))
1081 sk_eat_skb(ssk, skb);
1082 if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
1083 subflow->map_valid = 0;
1086 /* sched mptcp worker to remove the subflow if no more data is pending */
1087 static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
1089 struct sock *sk = (struct sock *)msk;
1091 if (likely(ssk->sk_state != TCP_CLOSE))
1094 if (skb_queue_empty(&ssk->sk_receive_queue) &&
1095 !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) {
1097 if (!schedule_work(&msk->work))
1102 static bool subflow_check_data_avail(struct sock *ssk)
1104 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1105 enum mapping_status status;
1106 struct mptcp_sock *msk;
1107 struct sk_buff *skb;
1109 if (!skb_peek(&ssk->sk_receive_queue))
1110 WRITE_ONCE(subflow->data_avail, 0);
1111 if (subflow->data_avail)
1114 msk = mptcp_sk(subflow->conn);
1119 status = get_mapping_status(ssk, msk);
1120 trace_subflow_check_data_avail(status, skb_peek(&ssk->sk_receive_queue));
1121 if (unlikely(status == MAPPING_INVALID))
1124 if (unlikely(status == MAPPING_DUMMY))
1127 if (status != MAPPING_OK)
1130 skb = skb_peek(&ssk->sk_receive_queue);
1131 if (WARN_ON_ONCE(!skb))
1134 /* if msk lacks the remote key, this subflow must provide an
1135 * MP_CAPABLE-based mapping
1137 if (unlikely(!READ_ONCE(msk->can_ack))) {
1138 if (!subflow->mpc_map)
1140 WRITE_ONCE(msk->remote_key, subflow->remote_key);
1141 WRITE_ONCE(msk->ack_seq, subflow->map_seq);
1142 WRITE_ONCE(msk->can_ack, true);
1145 old_ack = READ_ONCE(msk->ack_seq);
1146 ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
1147 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
1149 if (unlikely(before64(ack_seq, old_ack))) {
1150 mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
1154 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
1160 subflow_sched_work_if_closed(msk, ssk);
1164 /* RFC 8684 section 3.7. */
1165 if (subflow->send_mp_fail) {
1166 if (mptcp_has_another_subflow(ssk)) {
1167 while ((skb = skb_peek(&ssk->sk_receive_queue)))
1168 sk_eat_skb(ssk, skb);
1170 ssk->sk_err = EBADMSG;
1171 tcp_set_state(ssk, TCP_CLOSE);
1172 subflow->reset_transient = 0;
1173 subflow->reset_reason = MPTCP_RST_EMIDDLEBOX;
1174 tcp_send_active_reset(ssk, GFP_ATOMIC);
1175 WRITE_ONCE(subflow->data_avail, 0);
1179 if (subflow->mp_join || subflow->fully_established) {
1180 /* fatal protocol error, close the socket.
1181 * subflow_error_report() will introduce the appropriate barriers
1183 ssk->sk_err = EBADMSG;
1184 tcp_set_state(ssk, TCP_CLOSE);
1185 subflow->reset_transient = 0;
1186 subflow->reset_reason = MPTCP_RST_EMPTCP;
1187 tcp_send_active_reset(ssk, GFP_ATOMIC);
1188 WRITE_ONCE(subflow->data_avail, 0);
1192 __mptcp_do_fallback(msk);
1193 skb = skb_peek(&ssk->sk_receive_queue);
1194 subflow->map_valid = 1;
1195 subflow->map_seq = READ_ONCE(msk->ack_seq);
1196 subflow->map_data_len = skb->len;
1197 subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
1198 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
1202 bool mptcp_subflow_data_available(struct sock *sk)
1204 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1206 /* check if current mapping is still valid */
1207 if (subflow->map_valid &&
1208 mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
1209 subflow->map_valid = 0;
1210 WRITE_ONCE(subflow->data_avail, 0);
1212 pr_debug("Done with mapping: seq=%u data_len=%u",
1213 subflow->map_subflow_seq,
1214 subflow->map_data_len);
1217 return subflow_check_data_avail(sk);
1220 /* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy,
1223 * In mptcp, rwin is about the mptcp-level connection data.
1225 * Data that is still on the ssk rx queue can thus be ignored,
1226 * as far as mptcp peer is concerned that data is still inflight.
1227 * DSS ACK is updated when skb is moved to the mptcp rx queue.
1229 void mptcp_space(const struct sock *ssk, int *space, int *full_space)
1231 const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1232 const struct sock *sk = subflow->conn;
1234 *space = __mptcp_space(sk);
1235 *full_space = tcp_full_space(sk);
1238 void __mptcp_error_report(struct sock *sk)
1240 struct mptcp_subflow_context *subflow;
1241 struct mptcp_sock *msk = mptcp_sk(sk);
1243 mptcp_for_each_subflow(msk, subflow) {
1244 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1245 int err = sock_error(ssk);
1250 /* only propagate errors on fallen-back sockets or
1253 if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(msk))
1256 inet_sk_state_store(sk, inet_sk_state_load(ssk));
1259 /* This barrier is coupled with smp_rmb() in mptcp_poll() */
1261 sk_error_report(sk);
1266 static void subflow_error_report(struct sock *ssk)
1268 struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1270 mptcp_data_lock(sk);
1271 if (!sock_owned_by_user(sk))
1272 __mptcp_error_report(sk);
1274 __set_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->cb_flags);
1275 mptcp_data_unlock(sk);
1278 static void subflow_data_ready(struct sock *sk)
1280 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1281 u16 state = 1 << inet_sk_state_load(sk);
1282 struct sock *parent = subflow->conn;
1283 struct mptcp_sock *msk;
1285 msk = mptcp_sk(parent);
1286 if (state & TCPF_LISTEN) {
1287 /* MPJ subflow are removed from accept queue before reaching here,
1288 * avoid stray wakeups
1290 if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
1293 parent->sk_data_ready(parent);
1297 WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
1298 !subflow->mp_join && !(state & TCPF_CLOSE));
1300 if (mptcp_subflow_data_available(sk))
1301 mptcp_data_ready(parent, sk);
1302 else if (unlikely(sk->sk_err))
1303 subflow_error_report(sk);
1306 static void subflow_write_space(struct sock *ssk)
1308 struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1310 mptcp_propagate_sndbuf(sk, ssk);
1311 mptcp_write_space(sk);
1314 static struct inet_connection_sock_af_ops *
1315 subflow_default_af_ops(struct sock *sk)
1317 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1318 if (sk->sk_family == AF_INET6)
1319 return &subflow_v6_specific;
1321 return &subflow_specific;
1324 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1325 void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
1327 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1328 struct inet_connection_sock *icsk = inet_csk(sk);
1329 struct inet_connection_sock_af_ops *target;
1331 target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
1333 pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
1334 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
1336 if (likely(icsk->icsk_af_ops == target))
1339 subflow->icsk_af_ops = icsk->icsk_af_ops;
1340 icsk->icsk_af_ops = target;
1344 void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
1345 struct sockaddr_storage *addr,
1346 unsigned short family)
1348 memset(addr, 0, sizeof(*addr));
1349 addr->ss_family = family;
1350 if (addr->ss_family == AF_INET) {
1351 struct sockaddr_in *in_addr = (struct sockaddr_in *)addr;
1353 if (info->family == AF_INET)
1354 in_addr->sin_addr = info->addr;
1355 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1356 else if (ipv6_addr_v4mapped(&info->addr6))
1357 in_addr->sin_addr.s_addr = info->addr6.s6_addr32[3];
1359 in_addr->sin_port = info->port;
1361 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1362 else if (addr->ss_family == AF_INET6) {
1363 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr;
1365 if (info->family == AF_INET)
1366 ipv6_addr_set_v4mapped(info->addr.s_addr,
1367 &in6_addr->sin6_addr);
1369 in6_addr->sin6_addr = info->addr6;
1370 in6_addr->sin6_port = info->port;
1375 int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
1376 const struct mptcp_addr_info *remote)
1378 struct mptcp_sock *msk = mptcp_sk(sk);
1379 struct mptcp_subflow_context *subflow;
1380 struct sockaddr_storage addr;
1381 int remote_id = remote->id;
1382 int local_id = loc->id;
1391 if (!mptcp_is_fully_established(sk))
1394 err = mptcp_subflow_create_socket(sk, &sf);
1399 subflow = mptcp_subflow_ctx(ssk);
1401 get_random_bytes(&subflow->local_nonce, sizeof(u32));
1402 } while (!subflow->local_nonce);
1405 err = mptcp_pm_get_local_id(msk, (struct sock_common *)ssk);
1412 mptcp_pm_get_flags_and_ifindex_by_id(sock_net(sk), local_id,
1414 subflow->remote_key = msk->remote_key;
1415 subflow->local_key = msk->local_key;
1416 subflow->token = msk->token;
1417 mptcp_info2sockaddr(loc, &addr, ssk->sk_family);
1419 addrlen = sizeof(struct sockaddr_in);
1420 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1421 if (addr.ss_family == AF_INET6)
1422 addrlen = sizeof(struct sockaddr_in6);
1424 mptcp_sockopt_sync(msk, ssk);
1426 ssk->sk_bound_dev_if = ifindex;
1427 err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
1431 mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
1432 pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
1433 remote_token, local_id, remote_id);
1434 subflow->remote_token = remote_token;
1435 subflow->local_id = local_id;
1436 subflow->remote_id = remote_id;
1437 subflow->request_join = 1;
1438 subflow->request_bkup = !!(flags & MPTCP_PM_ADDR_FLAG_BACKUP);
1439 mptcp_info2sockaddr(remote, &addr, ssk->sk_family);
1442 list_add_tail(&subflow->node, &msk->conn_list);
1443 err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
1444 if (err && err != -EINPROGRESS)
1447 /* discard the subflow socket */
1448 mptcp_sock_graft(ssk, sk->sk_socket);
1449 iput(SOCK_INODE(sf));
1453 list_del(&subflow->node);
1454 sock_put(mptcp_subflow_tcp_sock(subflow));
1457 subflow->disposable = 1;
1462 static void mptcp_attach_cgroup(struct sock *parent, struct sock *child)
1464 #ifdef CONFIG_SOCK_CGROUP_DATA
1465 struct sock_cgroup_data *parent_skcd = &parent->sk_cgrp_data,
1466 *child_skcd = &child->sk_cgrp_data;
1468 /* only the additional subflows created by kworkers have to be modified */
1469 if (cgroup_id(sock_cgroup_ptr(parent_skcd)) !=
1470 cgroup_id(sock_cgroup_ptr(child_skcd))) {
1472 struct mem_cgroup *memcg = parent->sk_memcg;
1474 mem_cgroup_sk_free(child);
1475 if (memcg && css_tryget(&memcg->css))
1476 child->sk_memcg = memcg;
1477 #endif /* CONFIG_MEMCG */
1479 cgroup_sk_free(child_skcd);
1480 *child_skcd = *parent_skcd;
1481 cgroup_sk_clone(child_skcd);
1483 #endif /* CONFIG_SOCK_CGROUP_DATA */
1486 static void mptcp_subflow_ops_override(struct sock *ssk)
1488 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1489 if (ssk->sk_prot == &tcpv6_prot)
1490 ssk->sk_prot = &tcpv6_prot_override;
1493 ssk->sk_prot = &tcp_prot_override;
1496 static void mptcp_subflow_ops_undo_override(struct sock *ssk)
1498 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1499 if (ssk->sk_prot == &tcpv6_prot_override)
1500 ssk->sk_prot = &tcpv6_prot;
1503 ssk->sk_prot = &tcp_prot;
1505 int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
1507 struct mptcp_subflow_context *subflow;
1508 struct net *net = sock_net(sk);
1512 /* un-accepted server sockets can reach here - on bad configuration
1513 * bail early to avoid greater trouble later
1515 if (unlikely(!sk->sk_socket))
1518 err = sock_create_kern(net, sk->sk_family, SOCK_STREAM, IPPROTO_TCP,
1525 /* the newly created socket has to be in the same cgroup as its parent */
1526 mptcp_attach_cgroup(sk, sf->sk);
1528 /* kernel sockets do not by default acquire net ref, but TCP timer
1531 sf->sk->sk_net_refcnt = 1;
1532 get_net_track(net, &sf->sk->ns_tracker, GFP_KERNEL);
1533 sock_inuse_add(net, 1);
1534 err = tcp_set_ulp(sf->sk, "mptcp");
1535 release_sock(sf->sk);
1542 /* the newly created socket really belongs to the owning MPTCP master
1543 * socket, even if for additional subflows the allocation is performed
1544 * by a kernel workqueue. Adjust inode references, so that the
1545 * procfs/diag interaces really show this one belonging to the correct
1548 SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino;
1549 SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid;
1550 SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
1552 subflow = mptcp_subflow_ctx(sf->sk);
1553 pr_debug("subflow=%p", subflow);
1558 mptcp_subflow_ops_override(sf->sk);
1563 static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
1566 struct inet_connection_sock *icsk = inet_csk(sk);
1567 struct mptcp_subflow_context *ctx;
1569 ctx = kzalloc(sizeof(*ctx), priority);
1573 rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
1574 INIT_LIST_HEAD(&ctx->node);
1575 INIT_LIST_HEAD(&ctx->delegated_node);
1577 pr_debug("subflow=%p", ctx);
1584 static void __subflow_state_change(struct sock *sk)
1586 struct socket_wq *wq;
1589 wq = rcu_dereference(sk->sk_wq);
1590 if (skwq_has_sleeper(wq))
1591 wake_up_interruptible_all(&wq->wait);
1595 static bool subflow_is_done(const struct sock *sk)
1597 return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE;
1600 static void subflow_state_change(struct sock *sk)
1602 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1603 struct sock *parent = subflow->conn;
1605 __subflow_state_change(sk);
1607 if (subflow_simultaneous_connect(sk)) {
1608 mptcp_propagate_sndbuf(parent, sk);
1609 mptcp_do_fallback(sk);
1610 mptcp_rcv_space_init(mptcp_sk(parent), sk);
1611 pr_fallback(mptcp_sk(parent));
1612 subflow->conn_finished = 1;
1613 mptcp_set_connected(parent);
1616 /* as recvmsg() does not acquire the subflow socket for ssk selection
1617 * a fin packet carrying a DSS can be unnoticed if we don't trigger
1618 * the data available machinery here.
1620 if (mptcp_subflow_data_available(sk))
1621 mptcp_data_ready(parent, sk);
1622 else if (unlikely(sk->sk_err))
1623 subflow_error_report(sk);
1625 subflow_sched_work_if_closed(mptcp_sk(parent), sk);
1627 if (__mptcp_check_fallback(mptcp_sk(parent)) &&
1628 !subflow->rx_eof && subflow_is_done(sk)) {
1629 subflow->rx_eof = 1;
1630 mptcp_subflow_eof(parent);
1634 static int subflow_ulp_init(struct sock *sk)
1636 struct inet_connection_sock *icsk = inet_csk(sk);
1637 struct mptcp_subflow_context *ctx;
1638 struct tcp_sock *tp = tcp_sk(sk);
1641 /* disallow attaching ULP to a socket unless it has been
1642 * created with sock_create_kern()
1644 if (!sk->sk_kern_sock) {
1649 ctx = subflow_create_ctx(sk, GFP_KERNEL);
1655 pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
1658 ctx->icsk_af_ops = icsk->icsk_af_ops;
1659 icsk->icsk_af_ops = subflow_default_af_ops(sk);
1660 ctx->tcp_data_ready = sk->sk_data_ready;
1661 ctx->tcp_state_change = sk->sk_state_change;
1662 ctx->tcp_write_space = sk->sk_write_space;
1663 ctx->tcp_error_report = sk->sk_error_report;
1664 sk->sk_data_ready = subflow_data_ready;
1665 sk->sk_write_space = subflow_write_space;
1666 sk->sk_state_change = subflow_state_change;
1667 sk->sk_error_report = subflow_error_report;
1672 static void subflow_ulp_release(struct sock *ssk)
1674 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
1675 bool release = true;
1683 /* if the msk has been orphaned, keep the ctx
1684 * alive, will be freed by __mptcp_close_ssk(),
1685 * when the subflow is still unaccepted
1687 release = ctx->disposable || list_empty(&ctx->node);
1691 mptcp_subflow_ops_undo_override(ssk);
1693 kfree_rcu(ctx, rcu);
1696 static void subflow_ulp_clone(const struct request_sock *req,
1698 const gfp_t priority)
1700 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
1701 struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk);
1702 struct mptcp_subflow_context *new_ctx;
1704 if (!tcp_rsk(req)->is_mptcp ||
1705 (!subflow_req->mp_capable && !subflow_req->mp_join)) {
1706 subflow_ulp_fallback(newsk, old_ctx);
1710 new_ctx = subflow_create_ctx(newsk, priority);
1712 subflow_ulp_fallback(newsk, old_ctx);
1716 new_ctx->conn_finished = 1;
1717 new_ctx->icsk_af_ops = old_ctx->icsk_af_ops;
1718 new_ctx->tcp_data_ready = old_ctx->tcp_data_ready;
1719 new_ctx->tcp_state_change = old_ctx->tcp_state_change;
1720 new_ctx->tcp_write_space = old_ctx->tcp_write_space;
1721 new_ctx->tcp_error_report = old_ctx->tcp_error_report;
1722 new_ctx->rel_write_seq = 1;
1723 new_ctx->tcp_sock = newsk;
1725 if (subflow_req->mp_capable) {
1726 /* see comments in subflow_syn_recv_sock(), MPTCP connection
1727 * is fully established only after we receive the remote key
1729 new_ctx->mp_capable = 1;
1730 new_ctx->local_key = subflow_req->local_key;
1731 new_ctx->token = subflow_req->token;
1732 new_ctx->ssn_offset = subflow_req->ssn_offset;
1733 new_ctx->idsn = subflow_req->idsn;
1734 } else if (subflow_req->mp_join) {
1735 new_ctx->ssn_offset = subflow_req->ssn_offset;
1736 new_ctx->mp_join = 1;
1737 new_ctx->fully_established = 1;
1738 new_ctx->backup = subflow_req->backup;
1739 new_ctx->local_id = subflow_req->local_id;
1740 new_ctx->remote_id = subflow_req->remote_id;
1741 new_ctx->token = subflow_req->token;
1742 new_ctx->thmac = subflow_req->thmac;
1746 static void tcp_release_cb_override(struct sock *ssk)
1748 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1750 if (mptcp_subflow_has_delegated_action(subflow))
1751 mptcp_subflow_process_delegated(ssk);
1753 tcp_release_cb(ssk);
1756 static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
1758 .owner = THIS_MODULE,
1759 .init = subflow_ulp_init,
1760 .release = subflow_ulp_release,
1761 .clone = subflow_ulp_clone,
1764 static int subflow_ops_init(struct request_sock_ops *subflow_ops)
1766 subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock);
1767 subflow_ops->slab_name = "request_sock_subflow";
1769 subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name,
1770 subflow_ops->obj_size, 0,
1772 SLAB_TYPESAFE_BY_RCU,
1774 if (!subflow_ops->slab)
1777 subflow_ops->destructor = subflow_req_destructor;
1782 void __init mptcp_subflow_init(void)
1784 mptcp_subflow_request_sock_ops = tcp_request_sock_ops;
1785 if (subflow_ops_init(&mptcp_subflow_request_sock_ops) != 0)
1786 panic("MPTCP: failed to init subflow request sock ops\n");
1788 subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
1789 subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req;
1791 subflow_specific = ipv4_specific;
1792 subflow_specific.conn_request = subflow_v4_conn_request;
1793 subflow_specific.syn_recv_sock = subflow_syn_recv_sock;
1794 subflow_specific.sk_rx_dst_set = subflow_finish_connect;
1796 tcp_prot_override = tcp_prot;
1797 tcp_prot_override.release_cb = tcp_release_cb_override;
1799 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1800 subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
1801 subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req;
1803 subflow_v6_specific = ipv6_specific;
1804 subflow_v6_specific.conn_request = subflow_v6_conn_request;
1805 subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock;
1806 subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect;
1808 subflow_v6m_specific = subflow_v6_specific;
1809 subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit;
1810 subflow_v6m_specific.send_check = ipv4_specific.send_check;
1811 subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len;
1812 subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced;
1813 subflow_v6m_specific.net_frag_header_len = 0;
1815 tcpv6_prot_override = tcpv6_prot;
1816 tcpv6_prot_override.release_cb = tcp_release_cb_override;
1819 mptcp_diag_subflow_init(&subflow_ulp_ops);
1821 if (tcp_register_ulp(&subflow_ulp_ops) != 0)
1822 panic("MPTCP: failed to register subflows to ULP\n");