1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2017 - 2019, Intel Corporation.
7 #define pr_fmt(fmt) "MPTCP: " fmt
9 #include <linux/kernel.h>
10 #include <crypto/sha2.h>
12 #include <net/mptcp.h>
16 #include <trace/events/mptcp.h>
18 static bool mptcp_cap_flag_sha256(u8 flags)
20 return (flags & MPTCP_CAP_FLAG_MASK) == MPTCP_CAP_HMAC_SHA256;
23 static void mptcp_parse_option(const struct sk_buff *skb,
24 const unsigned char *ptr, int opsize,
25 struct mptcp_options_received *mp_opt)
27 u8 subtype = *ptr >> 4;
34 case MPTCPOPT_MP_CAPABLE:
35 /* strict size checking */
36 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
37 if (skb->len > tcp_hdr(skb)->doff << 2)
38 expected_opsize = TCPOLEN_MPTCP_MPC_ACK_DATA;
40 expected_opsize = TCPOLEN_MPTCP_MPC_ACK;
42 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)
43 expected_opsize = TCPOLEN_MPTCP_MPC_SYNACK;
45 expected_opsize = TCPOLEN_MPTCP_MPC_SYN;
47 if (opsize != expected_opsize)
50 /* try to be gentle vs future versions on the initial syn */
51 version = *ptr++ & MPTCP_VERSION_MASK;
52 if (opsize != TCPOLEN_MPTCP_MPC_SYN) {
53 if (version != MPTCP_SUPPORTED_VERSION)
55 } else if (version < MPTCP_SUPPORTED_VERSION) {
60 if (!mptcp_cap_flag_sha256(flags) ||
61 (flags & MPTCP_CAP_EXTENSIBILITY))
64 /* RFC 6824, Section 3.1:
65 * "For the Checksum Required bit (labeled "A"), if either
66 * host requires the use of checksums, checksums MUST be used.
67 * In other words, the only way for checksums not to be used
68 * is if both hosts in their SYNs set A=0."
71 * "If a checksum is not present when its use has been
72 * negotiated, the receiver MUST close the subflow with a RST as
73 * it is considered broken."
75 * We don't implement DSS checksum - fall back to TCP.
77 if (flags & MPTCP_CAP_CHECKSUM_REQD)
80 mp_opt->mp_capable = 1;
81 if (opsize >= TCPOLEN_MPTCP_MPC_SYNACK) {
82 mp_opt->sndr_key = get_unaligned_be64(ptr);
85 if (opsize >= TCPOLEN_MPTCP_MPC_ACK) {
86 mp_opt->rcvr_key = get_unaligned_be64(ptr);
89 if (opsize == TCPOLEN_MPTCP_MPC_ACK_DATA) {
91 * "the data parameters in a MP_CAPABLE are semantically
92 * equivalent to those in a DSS option and can be used
98 mp_opt->data_len = get_unaligned_be16(ptr);
101 pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d",
102 version, flags, opsize, mp_opt->sndr_key,
103 mp_opt->rcvr_key, mp_opt->data_len);
106 case MPTCPOPT_MP_JOIN:
108 if (opsize == TCPOLEN_MPTCP_MPJ_SYN) {
109 mp_opt->backup = *ptr++ & MPTCPOPT_BACKUP;
110 mp_opt->join_id = *ptr++;
111 mp_opt->token = get_unaligned_be32(ptr);
113 mp_opt->nonce = get_unaligned_be32(ptr);
115 pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u",
116 mp_opt->backup, mp_opt->join_id,
117 mp_opt->token, mp_opt->nonce);
118 } else if (opsize == TCPOLEN_MPTCP_MPJ_SYNACK) {
119 mp_opt->backup = *ptr++ & MPTCPOPT_BACKUP;
120 mp_opt->join_id = *ptr++;
121 mp_opt->thmac = get_unaligned_be64(ptr);
123 mp_opt->nonce = get_unaligned_be32(ptr);
125 pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u",
126 mp_opt->backup, mp_opt->join_id,
127 mp_opt->thmac, mp_opt->nonce);
128 } else if (opsize == TCPOLEN_MPTCP_MPJ_ACK) {
130 memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN);
131 pr_debug("MP_JOIN hmac");
141 /* we must clear 'mpc_map' be able to detect MP_CAPABLE
142 * map vs DSS map in mptcp_incoming_options(), and reconstruct
143 * map info accordingly
146 flags = (*ptr++) & MPTCP_DSS_FLAG_MASK;
147 mp_opt->data_fin = (flags & MPTCP_DSS_DATA_FIN) != 0;
148 mp_opt->dsn64 = (flags & MPTCP_DSS_DSN64) != 0;
149 mp_opt->use_map = (flags & MPTCP_DSS_HAS_MAP) != 0;
150 mp_opt->ack64 = (flags & MPTCP_DSS_ACK64) != 0;
151 mp_opt->use_ack = (flags & MPTCP_DSS_HAS_ACK);
153 pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d",
154 mp_opt->data_fin, mp_opt->dsn64,
155 mp_opt->use_map, mp_opt->ack64,
158 expected_opsize = TCPOLEN_MPTCP_DSS_BASE;
160 if (mp_opt->use_ack) {
162 expected_opsize += TCPOLEN_MPTCP_DSS_ACK64;
164 expected_opsize += TCPOLEN_MPTCP_DSS_ACK32;
167 if (mp_opt->use_map) {
169 expected_opsize += TCPOLEN_MPTCP_DSS_MAP64;
171 expected_opsize += TCPOLEN_MPTCP_DSS_MAP32;
174 /* RFC 6824, Section 3.3:
175 * If a checksum is present, but its use had
176 * not been negotiated in the MP_CAPABLE handshake,
177 * the checksum field MUST be ignored.
179 if (opsize != expected_opsize &&
180 opsize != expected_opsize + TCPOLEN_MPTCP_DSS_CHECKSUM)
185 if (mp_opt->use_ack) {
187 mp_opt->data_ack = get_unaligned_be64(ptr);
190 mp_opt->data_ack = get_unaligned_be32(ptr);
194 pr_debug("data_ack=%llu", mp_opt->data_ack);
197 if (mp_opt->use_map) {
199 mp_opt->data_seq = get_unaligned_be64(ptr);
202 mp_opt->data_seq = get_unaligned_be32(ptr);
206 mp_opt->subflow_seq = get_unaligned_be32(ptr);
209 mp_opt->data_len = get_unaligned_be16(ptr);
212 pr_debug("data_seq=%llu subflow_seq=%u data_len=%u",
213 mp_opt->data_seq, mp_opt->subflow_seq,
219 case MPTCPOPT_ADD_ADDR:
220 mp_opt->echo = (*ptr++) & MPTCP_ADDR_ECHO;
222 if (opsize == TCPOLEN_MPTCP_ADD_ADDR ||
223 opsize == TCPOLEN_MPTCP_ADD_ADDR_PORT)
224 mp_opt->addr.family = AF_INET;
225 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
226 else if (opsize == TCPOLEN_MPTCP_ADD_ADDR6 ||
227 opsize == TCPOLEN_MPTCP_ADD_ADDR6_PORT)
228 mp_opt->addr.family = AF_INET6;
233 if (opsize == TCPOLEN_MPTCP_ADD_ADDR_BASE ||
234 opsize == TCPOLEN_MPTCP_ADD_ADDR_BASE_PORT)
235 mp_opt->addr.family = AF_INET;
236 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
237 else if (opsize == TCPOLEN_MPTCP_ADD_ADDR6_BASE ||
238 opsize == TCPOLEN_MPTCP_ADD_ADDR6_BASE_PORT)
239 mp_opt->addr.family = AF_INET6;
245 mp_opt->add_addr = 1;
246 mp_opt->addr.id = *ptr++;
247 if (mp_opt->addr.family == AF_INET) {
248 memcpy((u8 *)&mp_opt->addr.addr.s_addr, (u8 *)ptr, 4);
250 if (opsize == TCPOLEN_MPTCP_ADD_ADDR_PORT ||
251 opsize == TCPOLEN_MPTCP_ADD_ADDR_BASE_PORT) {
252 mp_opt->addr.port = htons(get_unaligned_be16(ptr));
256 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
258 memcpy(mp_opt->addr.addr6.s6_addr, (u8 *)ptr, 16);
260 if (opsize == TCPOLEN_MPTCP_ADD_ADDR6_PORT ||
261 opsize == TCPOLEN_MPTCP_ADD_ADDR6_BASE_PORT) {
262 mp_opt->addr.port = htons(get_unaligned_be16(ptr));
268 mp_opt->ahmac = get_unaligned_be64(ptr);
271 pr_debug("ADD_ADDR%s: id=%d, ahmac=%llu, echo=%d, port=%d",
272 (mp_opt->addr.family == AF_INET6) ? "6" : "",
273 mp_opt->addr.id, mp_opt->ahmac, mp_opt->echo, ntohs(mp_opt->addr.port));
276 case MPTCPOPT_RM_ADDR:
277 if (opsize < TCPOLEN_MPTCP_RM_ADDR_BASE + 1 ||
278 opsize > TCPOLEN_MPTCP_RM_ADDR_BASE + MPTCP_RM_IDS_MAX)
284 mp_opt->rm_list.nr = opsize - TCPOLEN_MPTCP_RM_ADDR_BASE;
285 for (i = 0; i < mp_opt->rm_list.nr; i++)
286 mp_opt->rm_list.ids[i] = *ptr++;
287 pr_debug("RM_ADDR: rm_list_nr=%d", mp_opt->rm_list.nr);
290 case MPTCPOPT_MP_PRIO:
291 if (opsize != TCPOLEN_MPTCP_PRIO)
295 mp_opt->backup = *ptr++ & MPTCP_PRIO_BKUP;
296 pr_debug("MP_PRIO: prio=%d", mp_opt->backup);
299 case MPTCPOPT_MP_FASTCLOSE:
300 if (opsize != TCPOLEN_MPTCP_FASTCLOSE)
304 mp_opt->rcvr_key = get_unaligned_be64(ptr);
306 mp_opt->fastclose = 1;
310 if (opsize != TCPOLEN_MPTCP_RST)
313 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST))
317 mp_opt->reset_transient = flags & MPTCP_RST_TRANSIENT;
318 mp_opt->reset_reason = *ptr;
326 void mptcp_get_options(const struct sk_buff *skb,
327 struct mptcp_options_received *mp_opt)
329 const struct tcphdr *th = tcp_hdr(skb);
330 const unsigned char *ptr;
333 /* initialize option status */
334 mp_opt->mp_capable = 0;
336 mp_opt->add_addr = 0;
338 mp_opt->fastclose = 0;
339 mp_opt->addr.port = 0;
345 length = (th->doff * 4) - sizeof(struct tcphdr);
346 ptr = (const unsigned char *)(th + 1);
355 case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */
360 if (opsize < 2) /* "silly options" */
363 return; /* don't parse partial options */
364 if (opcode == TCPOPT_MPTCP)
365 mptcp_parse_option(skb, ptr, opsize, mp_opt);
372 bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
373 unsigned int *size, struct mptcp_out_options *opts)
375 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
377 /* we will use snd_isn to detect first pkt [re]transmission
378 * in mptcp_established_options_mp()
380 subflow->snd_isn = TCP_SKB_CB(skb)->end_seq;
381 if (subflow->request_mptcp) {
382 opts->suboptions = OPTION_MPTCP_MPC_SYN;
383 *size = TCPOLEN_MPTCP_MPC_SYN;
385 } else if (subflow->request_join) {
386 pr_debug("remote_token=%u, nonce=%u", subflow->remote_token,
387 subflow->local_nonce);
388 opts->suboptions = OPTION_MPTCP_MPJ_SYN;
389 opts->join_id = subflow->local_id;
390 opts->token = subflow->remote_token;
391 opts->nonce = subflow->local_nonce;
392 opts->backup = subflow->request_bkup;
393 *size = TCPOLEN_MPTCP_MPJ_SYN;
399 /* MP_JOIN client subflow must wait for 4th ack before sending any data:
400 * TCP can't schedule delack timer before the subflow is fully established.
401 * MPTCP uses the delack timer to do 3rd ack retransmissions
403 static void schedule_3rdack_retransmission(struct sock *sk)
405 struct inet_connection_sock *icsk = inet_csk(sk);
406 struct tcp_sock *tp = tcp_sk(sk);
407 unsigned long timeout;
409 /* reschedule with a timeout above RTT, as we must look only for drop */
411 timeout = tp->srtt_us << 1;
413 timeout = TCP_TIMEOUT_INIT;
415 WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER);
416 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
417 icsk->icsk_ack.timeout = timeout;
418 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
421 static void clear_3rdack_retransmission(struct sock *sk)
423 struct inet_connection_sock *icsk = inet_csk(sk);
425 sk_stop_timer(sk, &icsk->icsk_delack_timer);
426 icsk->icsk_ack.timeout = 0;
427 icsk->icsk_ack.ato = 0;
428 icsk->icsk_ack.pending &= ~(ICSK_ACK_SCHED | ICSK_ACK_TIMER);
431 static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
432 bool snd_data_fin_enable,
434 unsigned int remaining,
435 struct mptcp_out_options *opts)
437 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
438 struct mptcp_ext *mpext;
439 unsigned int data_len;
441 /* When skb is not available, we better over-estimate the emitted
442 * options len. A full DSS option (28 bytes) is longer than
443 * TCPOLEN_MPTCP_MPC_ACK_DATA(22) or TCPOLEN_MPTCP_MPJ_ACK(24), so
444 * tell the caller to defer the estimate to
445 * mptcp_established_options_dss(), which will reserve enough space.
450 /* MPC/MPJ needed only on 3rd ack packet, DATA_FIN and TCP shutdown take precedence */
451 if (subflow->fully_established || snd_data_fin_enable ||
452 subflow->snd_isn != TCP_SKB_CB(skb)->seq ||
453 sk->sk_state != TCP_ESTABLISHED)
456 if (subflow->mp_capable) {
457 mpext = mptcp_get_ext(skb);
458 data_len = mpext ? mpext->data_len : 0;
460 /* we will check ext_copy.data_len in mptcp_write_options() to
461 * discriminate between TCPOLEN_MPTCP_MPC_ACK_DATA and
462 * TCPOLEN_MPTCP_MPC_ACK
464 opts->ext_copy.data_len = data_len;
465 opts->suboptions = OPTION_MPTCP_MPC_ACK;
466 opts->sndr_key = subflow->local_key;
467 opts->rcvr_key = subflow->remote_key;
470 * The MP_CAPABLE option is carried on the SYN, SYN/ACK, and ACK
471 * packets that start the first subflow of an MPTCP connection,
472 * as well as the first packet that carries data
475 *size = ALIGN(TCPOLEN_MPTCP_MPC_ACK_DATA, 4);
477 *size = TCPOLEN_MPTCP_MPC_ACK;
479 pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d",
480 subflow, subflow->local_key, subflow->remote_key,
484 } else if (subflow->mp_join) {
485 opts->suboptions = OPTION_MPTCP_MPJ_ACK;
486 memcpy(opts->hmac, subflow->hmac, MPTCPOPT_HMAC_LEN);
487 *size = TCPOLEN_MPTCP_MPJ_ACK;
488 pr_debug("subflow=%p", subflow);
490 schedule_3rdack_retransmission(sk);
496 static void mptcp_write_data_fin(struct mptcp_subflow_context *subflow,
497 struct sk_buff *skb, struct mptcp_ext *ext)
499 /* The write_seq value has already been incremented, so the actual
500 * sequence number for the DATA_FIN is one less.
502 u64 data_fin_tx_seq = READ_ONCE(mptcp_sk(subflow->conn)->write_seq) - 1;
504 if (!ext->use_map || !skb->len) {
505 /* RFC6824 requires a DSS mapping with specific values
506 * if DATA_FIN is set but no data payload is mapped
511 ext->data_seq = data_fin_tx_seq;
512 ext->subflow_seq = 0;
514 } else if (ext->data_seq + ext->data_len == data_fin_tx_seq) {
515 /* If there's an existing DSS mapping and it is the
516 * final mapping, DATA_FIN consumes 1 additional byte of
524 static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
525 bool snd_data_fin_enable,
527 unsigned int remaining,
528 struct mptcp_out_options *opts)
530 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
531 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
532 unsigned int dss_size = 0;
533 struct mptcp_ext *mpext;
534 unsigned int ack_size;
538 mpext = skb ? mptcp_get_ext(skb) : NULL;
540 if (!skb || (mpext && mpext->use_map) || snd_data_fin_enable) {
541 unsigned int map_size;
543 map_size = TCPOLEN_MPTCP_DSS_BASE + TCPOLEN_MPTCP_DSS_MAP64;
545 remaining -= map_size;
548 opts->ext_copy = *mpext;
550 if (skb && snd_data_fin_enable)
551 mptcp_write_data_fin(subflow, skb, &opts->ext_copy);
555 /* passive sockets msk will set the 'can_ack' after accept(), even
556 * if the first subflow may have the already the remote key handy
558 opts->ext_copy.use_ack = 0;
559 if (!READ_ONCE(msk->can_ack)) {
560 *size = ALIGN(dss_size, 4);
564 ack_seq = READ_ONCE(msk->ack_seq);
565 if (READ_ONCE(msk->use_64bit_ack)) {
566 ack_size = TCPOLEN_MPTCP_DSS_ACK64;
567 opts->ext_copy.data_ack = ack_seq;
568 opts->ext_copy.ack64 = 1;
570 ack_size = TCPOLEN_MPTCP_DSS_ACK32;
571 opts->ext_copy.data_ack32 = (uint32_t)ack_seq;
572 opts->ext_copy.ack64 = 0;
574 opts->ext_copy.use_ack = 1;
575 WRITE_ONCE(msk->old_wspace, __mptcp_space((struct sock *)msk));
577 /* Add kind/length/subtype/flag overhead if mapping is not populated */
579 ack_size += TCPOLEN_MPTCP_DSS_BASE;
581 dss_size += ack_size;
583 *size = ALIGN(dss_size, 4);
587 static u64 add_addr_generate_hmac(u64 key1, u64 key2,
588 struct mptcp_addr_info *addr)
590 u16 port = ntohs(addr->port);
591 u8 hmac[SHA256_DIGEST_SIZE];
596 if (addr->family == AF_INET) {
597 memcpy(&msg[i], &addr->addr.s_addr, 4);
600 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
601 else if (addr->family == AF_INET6) {
602 memcpy(&msg[i], &addr->addr6.s6_addr, 16);
606 msg[i++] = port >> 8;
607 msg[i++] = port & 0xFF;
609 mptcp_crypto_hmac_sha(key1, key2, msg, i, hmac);
611 return get_unaligned_be64(&hmac[SHA256_DIGEST_SIZE - sizeof(u64)]);
614 static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *skb,
616 unsigned int remaining,
617 struct mptcp_out_options *opts)
619 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
620 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
621 bool drop_other_suboptions = false;
622 unsigned int opt_size = *size;
627 if ((mptcp_pm_should_add_signal_ipv6(msk) ||
628 mptcp_pm_should_add_signal_port(msk) ||
629 mptcp_pm_should_add_signal_echo(msk)) &&
630 skb && skb_is_tcp_pure_ack(skb)) {
631 pr_debug("drop other suboptions");
632 opts->suboptions = 0;
633 opts->ext_copy.use_ack = 0;
634 opts->ext_copy.use_map = 0;
635 remaining += opt_size;
636 drop_other_suboptions = true;
639 if (!mptcp_pm_should_add_signal(msk) ||
640 !(mptcp_pm_add_addr_signal(msk, remaining, &opts->addr, &echo, &port)))
643 len = mptcp_add_addr_len(opts->addr.family, echo, port);
648 if (drop_other_suboptions)
650 opts->suboptions |= OPTION_MPTCP_ADD_ADDR;
652 opts->ahmac = add_addr_generate_hmac(msk->local_key,
656 pr_debug("addr_id=%d, ahmac=%llu, echo=%d, port=%d",
657 opts->addr.id, opts->ahmac, echo, ntohs(opts->addr.port));
662 static bool mptcp_established_options_rm_addr(struct sock *sk,
664 unsigned int remaining,
665 struct mptcp_out_options *opts)
667 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
668 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
669 struct mptcp_rm_list rm_list;
672 if (!mptcp_pm_should_rm_signal(msk) ||
673 !(mptcp_pm_rm_addr_signal(msk, remaining, &rm_list)))
676 len = mptcp_rm_addr_len(&rm_list);
683 opts->suboptions |= OPTION_MPTCP_RM_ADDR;
684 opts->rm_list = rm_list;
686 for (i = 0; i < opts->rm_list.nr; i++)
687 pr_debug("rm_list_ids[%d]=%d", i, opts->rm_list.ids[i]);
692 static bool mptcp_established_options_mp_prio(struct sock *sk,
694 unsigned int remaining,
695 struct mptcp_out_options *opts)
697 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
699 if (!subflow->send_mp_prio)
702 /* account for the trailing 'nop' option */
703 if (remaining < TCPOLEN_MPTCP_PRIO_ALIGN)
706 *size = TCPOLEN_MPTCP_PRIO_ALIGN;
707 opts->suboptions |= OPTION_MPTCP_PRIO;
708 opts->backup = subflow->request_bkup;
710 pr_debug("prio=%d", opts->backup);
715 static noinline void mptcp_established_options_rst(struct sock *sk, struct sk_buff *skb,
717 unsigned int remaining,
718 struct mptcp_out_options *opts)
720 const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
722 if (remaining < TCPOLEN_MPTCP_RST)
725 *size = TCPOLEN_MPTCP_RST;
726 opts->suboptions |= OPTION_MPTCP_RST;
727 opts->reset_transient = subflow->reset_transient;
728 opts->reset_reason = subflow->reset_reason;
731 bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
732 unsigned int *size, unsigned int remaining,
733 struct mptcp_out_options *opts)
735 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
736 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
737 unsigned int opt_size = 0;
741 opts->suboptions = 0;
743 if (unlikely(__mptcp_check_fallback(msk)))
746 if (unlikely(skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST)) {
747 mptcp_established_options_rst(sk, skb, size, remaining, opts);
751 snd_data_fin = mptcp_data_fin_enabled(msk);
752 if (mptcp_established_options_mp(sk, skb, snd_data_fin, &opt_size, remaining, opts))
754 else if (mptcp_established_options_dss(sk, skb, snd_data_fin, &opt_size, remaining, opts))
757 /* we reserved enough space for the above options, and exceeding the
758 * TCP option space would be fatal
760 if (WARN_ON_ONCE(opt_size > remaining))
764 remaining -= opt_size;
765 if (mptcp_established_options_add_addr(sk, skb, &opt_size, remaining, opts)) {
767 remaining -= opt_size;
769 } else if (mptcp_established_options_rm_addr(sk, &opt_size, remaining, opts)) {
771 remaining -= opt_size;
775 if (mptcp_established_options_mp_prio(sk, &opt_size, remaining, opts)) {
777 remaining -= opt_size;
784 bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
785 struct mptcp_out_options *opts)
787 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
789 if (subflow_req->mp_capable) {
790 opts->suboptions = OPTION_MPTCP_MPC_SYNACK;
791 opts->sndr_key = subflow_req->local_key;
792 *size = TCPOLEN_MPTCP_MPC_SYNACK;
793 pr_debug("subflow_req=%p, local_key=%llu",
794 subflow_req, subflow_req->local_key);
796 } else if (subflow_req->mp_join) {
797 opts->suboptions = OPTION_MPTCP_MPJ_SYNACK;
798 opts->backup = subflow_req->backup;
799 opts->join_id = subflow_req->local_id;
800 opts->thmac = subflow_req->thmac;
801 opts->nonce = subflow_req->local_nonce;
802 pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u",
803 subflow_req, opts->backup, opts->join_id,
804 opts->thmac, opts->nonce);
805 *size = TCPOLEN_MPTCP_MPJ_SYNACK;
811 static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
812 struct mptcp_subflow_context *subflow,
814 struct mptcp_options_received *mp_opt)
816 /* here we can process OoO, in-window pkts, only in-sequence 4th ack
817 * will make the subflow fully established
819 if (likely(subflow->fully_established)) {
820 /* on passive sockets, check for 3rd ack retransmission
821 * note that msk is always set by subflow_syn_recv_sock()
822 * for mp_join subflows
824 if (TCP_SKB_CB(skb)->seq == subflow->ssn_offset + 1 &&
825 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq &&
826 subflow->mp_join && mp_opt->mp_join &&
827 READ_ONCE(msk->pm.server_side))
829 goto fully_established;
832 /* we must process OoO packets before the first subflow is fully
833 * established. OoO packets are instead a protocol violation
834 * for MP_JOIN subflows as the peer must not send any data
835 * before receiving the forth ack - cfr. RFC 8684 section 3.2.
837 if (TCP_SKB_CB(skb)->seq != subflow->ssn_offset + 1) {
838 if (subflow->mp_join)
840 return subflow->mp_capable;
843 if (mp_opt->dss && mp_opt->use_ack) {
844 /* subflows are fully established as soon as we get any
847 subflow->fully_established = 1;
848 WRITE_ONCE(msk->fully_established, true);
849 goto fully_established;
852 if (mp_opt->add_addr) {
853 WRITE_ONCE(msk->fully_established, true);
857 /* If the first established packet does not contain MP_CAPABLE + data
858 * then fallback to TCP. Fallback scenarios requires a reset for
861 if (!mp_opt->mp_capable) {
862 if (subflow->mp_join)
864 subflow->mp_capable = 0;
866 __mptcp_do_fallback(msk);
870 if (unlikely(!READ_ONCE(msk->pm.server_side)))
871 pr_warn_once("bogus mpc option on established client sk");
872 mptcp_subflow_fully_established(subflow, mp_opt);
875 /* if the subflow is not already linked into the conn_list, we can't
876 * notify the PM: this subflow is still on the listener queue
877 * and the PM possibly acquiring the subflow lock could race with
880 if (likely(subflow->pm_notified) || list_empty(&subflow->node))
883 subflow->pm_notified = 1;
884 if (subflow->mp_join) {
885 clear_3rdack_retransmission(ssk);
886 mptcp_pm_subflow_established(msk);
888 mptcp_pm_fully_established(msk, ssk, GFP_ATOMIC);
893 mptcp_subflow_reset(ssk);
897 static u64 expand_ack(u64 old_ack, u64 cur_ack, bool use_64bit)
899 u32 old_ack32, cur_ack32;
904 old_ack32 = (u32)old_ack;
905 cur_ack32 = (u32)cur_ack;
906 cur_ack = (old_ack & GENMASK_ULL(63, 32)) + cur_ack32;
907 if (unlikely(before(cur_ack32, old_ack32)))
908 return cur_ack + (1LL << 32);
912 static void ack_update_msk(struct mptcp_sock *msk,
914 struct mptcp_options_received *mp_opt)
916 u64 new_wnd_end, new_snd_una, snd_nxt = READ_ONCE(msk->snd_nxt);
917 struct sock *sk = (struct sock *)msk;
922 /* avoid ack expansion on update conflict, to reduce the risk of
923 * wrongly expanding to a future ack sequence number, which is way
924 * more dangerous than missing an ack
926 old_snd_una = msk->snd_una;
927 new_snd_una = expand_ack(old_snd_una, mp_opt->data_ack, mp_opt->ack64);
929 /* ACK for data not even sent yet? Ignore. */
930 if (after64(new_snd_una, snd_nxt))
931 new_snd_una = old_snd_una;
933 new_wnd_end = new_snd_una + tcp_sk(ssk)->snd_wnd;
935 if (after64(new_wnd_end, msk->wnd_end))
936 msk->wnd_end = new_wnd_end;
938 /* this assumes mptcp_incoming_options() is invoked after tcp_ack() */
939 if (after64(msk->wnd_end, READ_ONCE(msk->snd_nxt)))
940 __mptcp_check_push(sk, ssk);
942 if (after64(new_snd_una, old_snd_una)) {
943 msk->snd_una = new_snd_una;
944 __mptcp_data_acked(sk);
946 mptcp_data_unlock(sk);
948 trace_ack_update_msk(mp_opt->data_ack,
949 old_snd_una, new_snd_una,
950 new_wnd_end, msk->wnd_end);
953 bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq, bool use_64bit)
955 /* Skip if DATA_FIN was already received.
956 * If updating simultaneously with the recvmsg loop, values
957 * should match. If they mismatch, the peer is misbehaving and
958 * we will prefer the most recent information.
960 if (READ_ONCE(msk->rcv_data_fin))
963 WRITE_ONCE(msk->rcv_data_fin_seq,
964 expand_ack(READ_ONCE(msk->ack_seq), data_fin_seq, use_64bit));
965 WRITE_ONCE(msk->rcv_data_fin, 1);
970 static bool add_addr_hmac_valid(struct mptcp_sock *msk,
971 struct mptcp_options_received *mp_opt)
978 hmac = add_addr_generate_hmac(msk->remote_key,
982 pr_debug("msk=%p, ahmac=%llu, mp_opt->ahmac=%llu\n",
983 msk, (unsigned long long)hmac,
984 (unsigned long long)mp_opt->ahmac);
986 return hmac == mp_opt->ahmac;
989 void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
991 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
992 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
993 struct mptcp_options_received mp_opt;
994 struct mptcp_ext *mpext;
996 if (__mptcp_check_fallback(msk)) {
997 /* Keep it simple and unconditionally trigger send data cleanup and
998 * pending queue spooling. We will need to acquire the data lock
999 * for more accurate checks, and once the lock is acquired, such
1000 * helpers are cheap.
1002 mptcp_data_lock(subflow->conn);
1003 if (sk_stream_memory_free(sk))
1004 __mptcp_check_push(subflow->conn, sk);
1005 __mptcp_data_acked(subflow->conn);
1006 mptcp_data_unlock(subflow->conn);
1010 mptcp_get_options(skb, &mp_opt);
1011 if (!check_fully_established(msk, sk, subflow, skb, &mp_opt))
1014 if (mp_opt.fastclose &&
1015 msk->local_key == mp_opt.rcvr_key) {
1016 WRITE_ONCE(msk->rcv_fastclose, true);
1017 mptcp_schedule_work((struct sock *)msk);
1020 if (mp_opt.add_addr && add_addr_hmac_valid(msk, &mp_opt)) {
1022 mptcp_pm_add_addr_received(msk, &mp_opt.addr);
1023 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ADDADDR);
1025 mptcp_pm_add_addr_echoed(msk, &mp_opt.addr);
1026 mptcp_pm_del_add_timer(msk, &mp_opt.addr, true);
1027 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ECHOADD);
1030 if (mp_opt.addr.port)
1031 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_PORTADD);
1033 mp_opt.add_addr = 0;
1036 if (mp_opt.rm_addr) {
1037 mptcp_pm_rm_addr_received(msk, &mp_opt.rm_list);
1041 if (mp_opt.mp_prio) {
1042 mptcp_pm_mp_prio_received(sk, mp_opt.backup);
1043 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPPRIORX);
1048 subflow->reset_seen = 1;
1049 subflow->reset_reason = mp_opt.reset_reason;
1050 subflow->reset_transient = mp_opt.reset_transient;
1056 /* we can't wait for recvmsg() to update the ack_seq, otherwise
1057 * monodirectional flows will stuck
1060 ack_update_msk(msk, sk, &mp_opt);
1062 /* Zero-data-length packets are dropped by the caller and not
1063 * propagated to the MPTCP layer, so the skb extension does not
1064 * need to be allocated or populated. DATA_FIN information, if
1065 * present, needs to be updated here before the skb is freed.
1067 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
1068 if (mp_opt.data_fin && mp_opt.data_len == 1 &&
1069 mptcp_update_rcv_data_fin(msk, mp_opt.data_seq, mp_opt.dsn64) &&
1070 schedule_work(&msk->work))
1071 sock_hold(subflow->conn);
1076 mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
1080 memset(mpext, 0, sizeof(*mpext));
1082 if (mp_opt.use_map) {
1083 if (mp_opt.mpc_map) {
1084 /* this is an MP_CAPABLE carrying MPTCP data
1085 * we know this map the first chunk of data
1087 mptcp_crypto_key_sha(subflow->remote_key, NULL,
1090 mpext->subflow_seq = 1;
1093 mpext->data_fin = 0;
1095 mpext->data_seq = mp_opt.data_seq;
1096 mpext->subflow_seq = mp_opt.subflow_seq;
1097 mpext->dsn64 = mp_opt.dsn64;
1098 mpext->data_fin = mp_opt.data_fin;
1100 mpext->data_len = mp_opt.data_len;
1105 static void mptcp_set_rwin(const struct tcp_sock *tp)
1107 const struct sock *ssk = (const struct sock *)tp;
1108 const struct mptcp_subflow_context *subflow;
1109 struct mptcp_sock *msk;
1112 subflow = mptcp_subflow_ctx(ssk);
1113 msk = mptcp_sk(subflow->conn);
1115 ack_seq = READ_ONCE(msk->ack_seq) + tp->rcv_wnd;
1117 if (after64(ack_seq, READ_ONCE(msk->rcv_wnd_sent)))
1118 WRITE_ONCE(msk->rcv_wnd_sent, ack_seq);
1121 void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
1122 struct mptcp_out_options *opts)
1124 if ((OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_SYNACK |
1125 OPTION_MPTCP_MPC_ACK) & opts->suboptions) {
1128 if (OPTION_MPTCP_MPC_SYN & opts->suboptions)
1129 len = TCPOLEN_MPTCP_MPC_SYN;
1130 else if (OPTION_MPTCP_MPC_SYNACK & opts->suboptions)
1131 len = TCPOLEN_MPTCP_MPC_SYNACK;
1132 else if (opts->ext_copy.data_len)
1133 len = TCPOLEN_MPTCP_MPC_ACK_DATA;
1135 len = TCPOLEN_MPTCP_MPC_ACK;
1137 *ptr++ = mptcp_option(MPTCPOPT_MP_CAPABLE, len,
1138 MPTCP_SUPPORTED_VERSION,
1139 MPTCP_CAP_HMAC_SHA256);
1141 if (!((OPTION_MPTCP_MPC_SYNACK | OPTION_MPTCP_MPC_ACK) &
1143 goto mp_capable_done;
1145 put_unaligned_be64(opts->sndr_key, ptr);
1147 if (!((OPTION_MPTCP_MPC_ACK) & opts->suboptions))
1148 goto mp_capable_done;
1150 put_unaligned_be64(opts->rcvr_key, ptr);
1152 if (!opts->ext_copy.data_len)
1153 goto mp_capable_done;
1155 put_unaligned_be32(opts->ext_copy.data_len << 16 |
1156 TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
1161 if (OPTION_MPTCP_ADD_ADDR & opts->suboptions) {
1162 u8 len = TCPOLEN_MPTCP_ADD_ADDR_BASE;
1163 u8 echo = MPTCP_ADDR_ECHO;
1165 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1166 if (opts->addr.family == AF_INET6)
1167 len = TCPOLEN_MPTCP_ADD_ADDR6_BASE;
1170 if (opts->addr.port)
1171 len += TCPOLEN_MPTCP_PORT_LEN;
1174 len += sizeof(opts->ahmac);
1178 *ptr++ = mptcp_option(MPTCPOPT_ADD_ADDR,
1179 len, echo, opts->addr.id);
1180 if (opts->addr.family == AF_INET) {
1181 memcpy((u8 *)ptr, (u8 *)&opts->addr.addr.s_addr, 4);
1184 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1185 else if (opts->addr.family == AF_INET6) {
1186 memcpy((u8 *)ptr, opts->addr.addr6.s6_addr, 16);
1191 if (!opts->addr.port) {
1193 put_unaligned_be64(opts->ahmac, ptr);
1197 u16 port = ntohs(opts->addr.port);
1200 u8 *bptr = (u8 *)ptr;
1202 put_unaligned_be16(port, bptr);
1204 put_unaligned_be64(opts->ahmac, bptr);
1206 put_unaligned_be16(TCPOPT_NOP << 8 |
1211 put_unaligned_be32(port << 16 |
1219 if (OPTION_MPTCP_RM_ADDR & opts->suboptions) {
1222 *ptr++ = mptcp_option(MPTCPOPT_RM_ADDR,
1223 TCPOLEN_MPTCP_RM_ADDR_BASE + opts->rm_list.nr,
1224 0, opts->rm_list.ids[0]);
1226 while (i < opts->rm_list.nr) {
1227 u8 id1, id2, id3, id4;
1229 id1 = opts->rm_list.ids[i];
1230 id2 = i + 1 < opts->rm_list.nr ? opts->rm_list.ids[i + 1] : TCPOPT_NOP;
1231 id3 = i + 2 < opts->rm_list.nr ? opts->rm_list.ids[i + 2] : TCPOPT_NOP;
1232 id4 = i + 3 < opts->rm_list.nr ? opts->rm_list.ids[i + 3] : TCPOPT_NOP;
1233 put_unaligned_be32(id1 << 24 | id2 << 16 | id3 << 8 | id4, ptr);
1239 if (OPTION_MPTCP_PRIO & opts->suboptions) {
1240 const struct sock *ssk = (const struct sock *)tp;
1241 struct mptcp_subflow_context *subflow;
1243 subflow = mptcp_subflow_ctx(ssk);
1244 subflow->send_mp_prio = 0;
1246 *ptr++ = mptcp_option(MPTCPOPT_MP_PRIO,
1248 opts->backup, TCPOPT_NOP);
1251 if (OPTION_MPTCP_MPJ_SYN & opts->suboptions) {
1252 *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN,
1253 TCPOLEN_MPTCP_MPJ_SYN,
1254 opts->backup, opts->join_id);
1255 put_unaligned_be32(opts->token, ptr);
1257 put_unaligned_be32(opts->nonce, ptr);
1261 if (OPTION_MPTCP_MPJ_SYNACK & opts->suboptions) {
1262 *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN,
1263 TCPOLEN_MPTCP_MPJ_SYNACK,
1264 opts->backup, opts->join_id);
1265 put_unaligned_be64(opts->thmac, ptr);
1267 put_unaligned_be32(opts->nonce, ptr);
1271 if (OPTION_MPTCP_MPJ_ACK & opts->suboptions) {
1272 *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN,
1273 TCPOLEN_MPTCP_MPJ_ACK, 0, 0);
1274 memcpy(ptr, opts->hmac, MPTCPOPT_HMAC_LEN);
1278 if (OPTION_MPTCP_RST & opts->suboptions)
1279 *ptr++ = mptcp_option(MPTCPOPT_RST,
1281 opts->reset_transient,
1282 opts->reset_reason);
1284 if (opts->ext_copy.use_ack || opts->ext_copy.use_map) {
1285 struct mptcp_ext *mpext = &opts->ext_copy;
1286 u8 len = TCPOLEN_MPTCP_DSS_BASE;
1289 if (mpext->use_ack) {
1290 flags = MPTCP_DSS_HAS_ACK;
1292 len += TCPOLEN_MPTCP_DSS_ACK64;
1293 flags |= MPTCP_DSS_ACK64;
1295 len += TCPOLEN_MPTCP_DSS_ACK32;
1299 if (mpext->use_map) {
1300 len += TCPOLEN_MPTCP_DSS_MAP64;
1302 /* Use only 64-bit mapping flags for now, add
1303 * support for optional 32-bit mappings later.
1305 flags |= MPTCP_DSS_HAS_MAP | MPTCP_DSS_DSN64;
1306 if (mpext->data_fin)
1307 flags |= MPTCP_DSS_DATA_FIN;
1310 *ptr++ = mptcp_option(MPTCPOPT_DSS, len, 0, flags);
1312 if (mpext->use_ack) {
1314 put_unaligned_be64(mpext->data_ack, ptr);
1317 put_unaligned_be32(mpext->data_ack32, ptr);
1322 if (mpext->use_map) {
1323 put_unaligned_be64(mpext->data_seq, ptr);
1325 put_unaligned_be32(mpext->subflow_seq, ptr);
1327 put_unaligned_be32(mpext->data_len << 16 |
1328 TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
1336 __be32 mptcp_get_reset_option(const struct sk_buff *skb)
1338 const struct mptcp_ext *ext = mptcp_get_ext(skb);
1342 flags = ext->reset_transient;
1343 reason = ext->reset_reason;
1345 return mptcp_option(MPTCPOPT_RST, TCPOLEN_MPTCP_RST,
1351 EXPORT_SYMBOL_GPL(mptcp_get_reset_option);