1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2017 - 2019, Intel Corporation.
7 #define pr_fmt(fmt) "MPTCP: " fmt
9 #include <linux/kernel.h>
10 #include <crypto/sha2.h>
12 #include <net/mptcp.h>
16 #include <trace/events/mptcp.h>
18 static bool mptcp_cap_flag_sha256(u8 flags)
20 return (flags & MPTCP_CAP_FLAG_MASK) == MPTCP_CAP_HMAC_SHA256;
23 static void mptcp_parse_option(const struct sk_buff *skb,
24 const unsigned char *ptr, int opsize,
25 struct mptcp_options_received *mp_opt)
27 u8 subtype = *ptr >> 4;
34 case MPTCPOPT_MP_CAPABLE:
35 /* strict size checking */
36 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
37 if (skb->len > tcp_hdr(skb)->doff << 2)
38 expected_opsize = TCPOLEN_MPTCP_MPC_ACK_DATA;
40 expected_opsize = TCPOLEN_MPTCP_MPC_ACK;
42 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)
43 expected_opsize = TCPOLEN_MPTCP_MPC_SYNACK;
45 expected_opsize = TCPOLEN_MPTCP_MPC_SYN;
47 if (opsize != expected_opsize)
50 /* try to be gentle vs future versions on the initial syn */
51 version = *ptr++ & MPTCP_VERSION_MASK;
52 if (opsize != TCPOLEN_MPTCP_MPC_SYN) {
53 if (version != MPTCP_SUPPORTED_VERSION)
55 } else if (version < MPTCP_SUPPORTED_VERSION) {
60 if (!mptcp_cap_flag_sha256(flags) ||
61 (flags & MPTCP_CAP_EXTENSIBILITY))
64 /* RFC 6824, Section 3.1:
65 * "For the Checksum Required bit (labeled "A"), if either
66 * host requires the use of checksums, checksums MUST be used.
67 * In other words, the only way for checksums not to be used
68 * is if both hosts in their SYNs set A=0."
71 * "If a checksum is not present when its use has been
72 * negotiated, the receiver MUST close the subflow with a RST as
73 * it is considered broken."
75 * We don't implement DSS checksum - fall back to TCP.
77 if (flags & MPTCP_CAP_CHECKSUM_REQD)
80 mp_opt->mp_capable = 1;
81 if (opsize >= TCPOLEN_MPTCP_MPC_SYNACK) {
82 mp_opt->sndr_key = get_unaligned_be64(ptr);
85 if (opsize >= TCPOLEN_MPTCP_MPC_ACK) {
86 mp_opt->rcvr_key = get_unaligned_be64(ptr);
89 if (opsize == TCPOLEN_MPTCP_MPC_ACK_DATA) {
91 * "the data parameters in a MP_CAPABLE are semantically
92 * equivalent to those in a DSS option and can be used
98 mp_opt->data_len = get_unaligned_be16(ptr);
101 pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d",
102 version, flags, opsize, mp_opt->sndr_key,
103 mp_opt->rcvr_key, mp_opt->data_len);
106 case MPTCPOPT_MP_JOIN:
108 if (opsize == TCPOLEN_MPTCP_MPJ_SYN) {
109 mp_opt->backup = *ptr++ & MPTCPOPT_BACKUP;
110 mp_opt->join_id = *ptr++;
111 mp_opt->token = get_unaligned_be32(ptr);
113 mp_opt->nonce = get_unaligned_be32(ptr);
115 pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u",
116 mp_opt->backup, mp_opt->join_id,
117 mp_opt->token, mp_opt->nonce);
118 } else if (opsize == TCPOLEN_MPTCP_MPJ_SYNACK) {
119 mp_opt->backup = *ptr++ & MPTCPOPT_BACKUP;
120 mp_opt->join_id = *ptr++;
121 mp_opt->thmac = get_unaligned_be64(ptr);
123 mp_opt->nonce = get_unaligned_be32(ptr);
125 pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u",
126 mp_opt->backup, mp_opt->join_id,
127 mp_opt->thmac, mp_opt->nonce);
128 } else if (opsize == TCPOLEN_MPTCP_MPJ_ACK) {
130 memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN);
131 pr_debug("MP_JOIN hmac");
133 pr_warn("MP_JOIN bad option size");
142 /* we must clear 'mpc_map' be able to detect MP_CAPABLE
143 * map vs DSS map in mptcp_incoming_options(), and reconstruct
144 * map info accordingly
147 flags = (*ptr++) & MPTCP_DSS_FLAG_MASK;
148 mp_opt->data_fin = (flags & MPTCP_DSS_DATA_FIN) != 0;
149 mp_opt->dsn64 = (flags & MPTCP_DSS_DSN64) != 0;
150 mp_opt->use_map = (flags & MPTCP_DSS_HAS_MAP) != 0;
151 mp_opt->ack64 = (flags & MPTCP_DSS_ACK64) != 0;
152 mp_opt->use_ack = (flags & MPTCP_DSS_HAS_ACK);
154 pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d",
155 mp_opt->data_fin, mp_opt->dsn64,
156 mp_opt->use_map, mp_opt->ack64,
159 expected_opsize = TCPOLEN_MPTCP_DSS_BASE;
161 if (mp_opt->use_ack) {
163 expected_opsize += TCPOLEN_MPTCP_DSS_ACK64;
165 expected_opsize += TCPOLEN_MPTCP_DSS_ACK32;
168 if (mp_opt->use_map) {
170 expected_opsize += TCPOLEN_MPTCP_DSS_MAP64;
172 expected_opsize += TCPOLEN_MPTCP_DSS_MAP32;
175 /* RFC 6824, Section 3.3:
176 * If a checksum is present, but its use had
177 * not been negotiated in the MP_CAPABLE handshake,
178 * the checksum field MUST be ignored.
180 if (opsize != expected_opsize &&
181 opsize != expected_opsize + TCPOLEN_MPTCP_DSS_CHECKSUM)
186 if (mp_opt->use_ack) {
188 mp_opt->data_ack = get_unaligned_be64(ptr);
191 mp_opt->data_ack = get_unaligned_be32(ptr);
195 pr_debug("data_ack=%llu", mp_opt->data_ack);
198 if (mp_opt->use_map) {
200 mp_opt->data_seq = get_unaligned_be64(ptr);
203 mp_opt->data_seq = get_unaligned_be32(ptr);
207 mp_opt->subflow_seq = get_unaligned_be32(ptr);
210 mp_opt->data_len = get_unaligned_be16(ptr);
213 pr_debug("data_seq=%llu subflow_seq=%u data_len=%u",
214 mp_opt->data_seq, mp_opt->subflow_seq,
220 case MPTCPOPT_ADD_ADDR:
221 mp_opt->echo = (*ptr++) & MPTCP_ADDR_ECHO;
223 if (opsize == TCPOLEN_MPTCP_ADD_ADDR ||
224 opsize == TCPOLEN_MPTCP_ADD_ADDR_PORT)
225 mp_opt->addr.family = AF_INET;
226 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
227 else if (opsize == TCPOLEN_MPTCP_ADD_ADDR6 ||
228 opsize == TCPOLEN_MPTCP_ADD_ADDR6_PORT)
229 mp_opt->addr.family = AF_INET6;
234 if (opsize == TCPOLEN_MPTCP_ADD_ADDR_BASE ||
235 opsize == TCPOLEN_MPTCP_ADD_ADDR_BASE_PORT)
236 mp_opt->addr.family = AF_INET;
237 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
238 else if (opsize == TCPOLEN_MPTCP_ADD_ADDR6_BASE ||
239 opsize == TCPOLEN_MPTCP_ADD_ADDR6_BASE_PORT)
240 mp_opt->addr.family = AF_INET6;
246 mp_opt->add_addr = 1;
247 mp_opt->addr.id = *ptr++;
248 if (mp_opt->addr.family == AF_INET) {
249 memcpy((u8 *)&mp_opt->addr.addr.s_addr, (u8 *)ptr, 4);
251 if (opsize == TCPOLEN_MPTCP_ADD_ADDR_PORT ||
252 opsize == TCPOLEN_MPTCP_ADD_ADDR_BASE_PORT) {
253 mp_opt->addr.port = htons(get_unaligned_be16(ptr));
257 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
259 memcpy(mp_opt->addr.addr6.s6_addr, (u8 *)ptr, 16);
261 if (opsize == TCPOLEN_MPTCP_ADD_ADDR6_PORT ||
262 opsize == TCPOLEN_MPTCP_ADD_ADDR6_BASE_PORT) {
263 mp_opt->addr.port = htons(get_unaligned_be16(ptr));
269 mp_opt->ahmac = get_unaligned_be64(ptr);
272 pr_debug("ADD_ADDR%s: id=%d, ahmac=%llu, echo=%d, port=%d",
273 (mp_opt->addr.family == AF_INET6) ? "6" : "",
274 mp_opt->addr.id, mp_opt->ahmac, mp_opt->echo, ntohs(mp_opt->addr.port));
277 case MPTCPOPT_RM_ADDR:
278 if (opsize < TCPOLEN_MPTCP_RM_ADDR_BASE + 1 ||
279 opsize > TCPOLEN_MPTCP_RM_ADDR_BASE + MPTCP_RM_IDS_MAX)
285 mp_opt->rm_list.nr = opsize - TCPOLEN_MPTCP_RM_ADDR_BASE;
286 for (i = 0; i < mp_opt->rm_list.nr; i++)
287 mp_opt->rm_list.ids[i] = *ptr++;
288 pr_debug("RM_ADDR: rm_list_nr=%d", mp_opt->rm_list.nr);
291 case MPTCPOPT_MP_PRIO:
292 if (opsize != TCPOLEN_MPTCP_PRIO)
296 mp_opt->backup = *ptr++ & MPTCP_PRIO_BKUP;
297 pr_debug("MP_PRIO: prio=%d", mp_opt->backup);
300 case MPTCPOPT_MP_FASTCLOSE:
301 if (opsize != TCPOLEN_MPTCP_FASTCLOSE)
305 mp_opt->rcvr_key = get_unaligned_be64(ptr);
307 mp_opt->fastclose = 1;
311 if (opsize != TCPOLEN_MPTCP_RST)
314 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST))
318 mp_opt->reset_transient = flags & MPTCP_RST_TRANSIENT;
319 mp_opt->reset_reason = *ptr;
327 void mptcp_get_options(const struct sk_buff *skb,
328 struct mptcp_options_received *mp_opt)
330 const struct tcphdr *th = tcp_hdr(skb);
331 const unsigned char *ptr;
334 /* initialize option status */
335 mp_opt->mp_capable = 0;
337 mp_opt->add_addr = 0;
339 mp_opt->fastclose = 0;
340 mp_opt->addr.port = 0;
346 length = (th->doff * 4) - sizeof(struct tcphdr);
347 ptr = (const unsigned char *)(th + 1);
356 case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */
361 if (opsize < 2) /* "silly options" */
364 return; /* don't parse partial options */
365 if (opcode == TCPOPT_MPTCP)
366 mptcp_parse_option(skb, ptr, opsize, mp_opt);
373 bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
374 unsigned int *size, struct mptcp_out_options *opts)
376 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
378 /* we will use snd_isn to detect first pkt [re]transmission
379 * in mptcp_established_options_mp()
381 subflow->snd_isn = TCP_SKB_CB(skb)->end_seq;
382 if (subflow->request_mptcp) {
383 opts->suboptions = OPTION_MPTCP_MPC_SYN;
384 *size = TCPOLEN_MPTCP_MPC_SYN;
386 } else if (subflow->request_join) {
387 pr_debug("remote_token=%u, nonce=%u", subflow->remote_token,
388 subflow->local_nonce);
389 opts->suboptions = OPTION_MPTCP_MPJ_SYN;
390 opts->join_id = subflow->local_id;
391 opts->token = subflow->remote_token;
392 opts->nonce = subflow->local_nonce;
393 opts->backup = subflow->request_bkup;
394 *size = TCPOLEN_MPTCP_MPJ_SYN;
400 /* MP_JOIN client subflow must wait for 4th ack before sending any data:
401 * TCP can't schedule delack timer before the subflow is fully established.
402 * MPTCP uses the delack timer to do 3rd ack retransmissions
404 static void schedule_3rdack_retransmission(struct sock *sk)
406 struct inet_connection_sock *icsk = inet_csk(sk);
407 struct tcp_sock *tp = tcp_sk(sk);
408 unsigned long timeout;
410 /* reschedule with a timeout above RTT, as we must look only for drop */
412 timeout = tp->srtt_us << 1;
414 timeout = TCP_TIMEOUT_INIT;
416 WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER);
417 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
418 icsk->icsk_ack.timeout = timeout;
419 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
422 static void clear_3rdack_retransmission(struct sock *sk)
424 struct inet_connection_sock *icsk = inet_csk(sk);
426 sk_stop_timer(sk, &icsk->icsk_delack_timer);
427 icsk->icsk_ack.timeout = 0;
428 icsk->icsk_ack.ato = 0;
429 icsk->icsk_ack.pending &= ~(ICSK_ACK_SCHED | ICSK_ACK_TIMER);
432 static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
433 bool snd_data_fin_enable,
435 unsigned int remaining,
436 struct mptcp_out_options *opts)
438 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
439 struct mptcp_ext *mpext;
440 unsigned int data_len;
442 /* When skb is not available, we better over-estimate the emitted
443 * options len. A full DSS option (28 bytes) is longer than
444 * TCPOLEN_MPTCP_MPC_ACK_DATA(22) or TCPOLEN_MPTCP_MPJ_ACK(24), so
445 * tell the caller to defer the estimate to
446 * mptcp_established_options_dss(), which will reserve enough space.
451 /* MPC/MPJ needed only on 3rd ack packet, DATA_FIN and TCP shutdown take precedence */
452 if (subflow->fully_established || snd_data_fin_enable ||
453 subflow->snd_isn != TCP_SKB_CB(skb)->seq ||
454 sk->sk_state != TCP_ESTABLISHED)
457 if (subflow->mp_capable) {
458 mpext = mptcp_get_ext(skb);
459 data_len = mpext ? mpext->data_len : 0;
461 /* we will check ext_copy.data_len in mptcp_write_options() to
462 * discriminate between TCPOLEN_MPTCP_MPC_ACK_DATA and
463 * TCPOLEN_MPTCP_MPC_ACK
465 opts->ext_copy.data_len = data_len;
466 opts->suboptions = OPTION_MPTCP_MPC_ACK;
467 opts->sndr_key = subflow->local_key;
468 opts->rcvr_key = subflow->remote_key;
471 * The MP_CAPABLE option is carried on the SYN, SYN/ACK, and ACK
472 * packets that start the first subflow of an MPTCP connection,
473 * as well as the first packet that carries data
476 *size = ALIGN(TCPOLEN_MPTCP_MPC_ACK_DATA, 4);
478 *size = TCPOLEN_MPTCP_MPC_ACK;
480 pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d",
481 subflow, subflow->local_key, subflow->remote_key,
485 } else if (subflow->mp_join) {
486 opts->suboptions = OPTION_MPTCP_MPJ_ACK;
487 memcpy(opts->hmac, subflow->hmac, MPTCPOPT_HMAC_LEN);
488 *size = TCPOLEN_MPTCP_MPJ_ACK;
489 pr_debug("subflow=%p", subflow);
491 schedule_3rdack_retransmission(sk);
497 static void mptcp_write_data_fin(struct mptcp_subflow_context *subflow,
498 struct sk_buff *skb, struct mptcp_ext *ext)
500 /* The write_seq value has already been incremented, so the actual
501 * sequence number for the DATA_FIN is one less.
503 u64 data_fin_tx_seq = READ_ONCE(mptcp_sk(subflow->conn)->write_seq) - 1;
505 if (!ext->use_map || !skb->len) {
506 /* RFC6824 requires a DSS mapping with specific values
507 * if DATA_FIN is set but no data payload is mapped
512 ext->data_seq = data_fin_tx_seq;
513 ext->subflow_seq = 0;
515 } else if (ext->data_seq + ext->data_len == data_fin_tx_seq) {
516 /* If there's an existing DSS mapping and it is the
517 * final mapping, DATA_FIN consumes 1 additional byte of
525 static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
526 bool snd_data_fin_enable,
528 unsigned int remaining,
529 struct mptcp_out_options *opts)
531 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
532 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
533 unsigned int dss_size = 0;
534 struct mptcp_ext *mpext;
535 unsigned int ack_size;
539 mpext = skb ? mptcp_get_ext(skb) : NULL;
541 if (!skb || (mpext && mpext->use_map) || snd_data_fin_enable) {
542 unsigned int map_size;
544 map_size = TCPOLEN_MPTCP_DSS_BASE + TCPOLEN_MPTCP_DSS_MAP64;
546 remaining -= map_size;
549 opts->ext_copy = *mpext;
551 if (skb && snd_data_fin_enable)
552 mptcp_write_data_fin(subflow, skb, &opts->ext_copy);
556 /* passive sockets msk will set the 'can_ack' after accept(), even
557 * if the first subflow may have the already the remote key handy
559 opts->ext_copy.use_ack = 0;
560 if (!READ_ONCE(msk->can_ack)) {
561 *size = ALIGN(dss_size, 4);
565 ack_seq = READ_ONCE(msk->ack_seq);
566 if (READ_ONCE(msk->use_64bit_ack)) {
567 ack_size = TCPOLEN_MPTCP_DSS_ACK64;
568 opts->ext_copy.data_ack = ack_seq;
569 opts->ext_copy.ack64 = 1;
571 ack_size = TCPOLEN_MPTCP_DSS_ACK32;
572 opts->ext_copy.data_ack32 = (uint32_t)ack_seq;
573 opts->ext_copy.ack64 = 0;
575 opts->ext_copy.use_ack = 1;
576 WRITE_ONCE(msk->old_wspace, __mptcp_space((struct sock *)msk));
578 /* Add kind/length/subtype/flag overhead if mapping is not populated */
580 ack_size += TCPOLEN_MPTCP_DSS_BASE;
582 dss_size += ack_size;
584 *size = ALIGN(dss_size, 4);
588 static u64 add_addr_generate_hmac(u64 key1, u64 key2,
589 struct mptcp_addr_info *addr)
591 u16 port = ntohs(addr->port);
592 u8 hmac[SHA256_DIGEST_SIZE];
597 if (addr->family == AF_INET) {
598 memcpy(&msg[i], &addr->addr.s_addr, 4);
601 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
602 else if (addr->family == AF_INET6) {
603 memcpy(&msg[i], &addr->addr6.s6_addr, 16);
607 msg[i++] = port >> 8;
608 msg[i++] = port & 0xFF;
610 mptcp_crypto_hmac_sha(key1, key2, msg, i, hmac);
612 return get_unaligned_be64(&hmac[SHA256_DIGEST_SIZE - sizeof(u64)]);
615 static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *skb,
617 unsigned int remaining,
618 struct mptcp_out_options *opts)
620 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
621 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
622 bool drop_other_suboptions = false;
623 unsigned int opt_size = *size;
628 if ((mptcp_pm_should_add_signal_ipv6(msk) ||
629 mptcp_pm_should_add_signal_port(msk) ||
630 mptcp_pm_should_add_signal_echo(msk)) &&
631 skb && skb_is_tcp_pure_ack(skb)) {
632 pr_debug("drop other suboptions");
633 opts->suboptions = 0;
634 opts->ext_copy.use_ack = 0;
635 opts->ext_copy.use_map = 0;
636 remaining += opt_size;
637 drop_other_suboptions = true;
640 if (!mptcp_pm_should_add_signal(msk) ||
641 !(mptcp_pm_add_addr_signal(msk, remaining, &opts->addr, &echo, &port)))
644 len = mptcp_add_addr_len(opts->addr.family, echo, port);
649 if (drop_other_suboptions)
651 opts->suboptions |= OPTION_MPTCP_ADD_ADDR;
653 opts->ahmac = add_addr_generate_hmac(msk->local_key,
657 pr_debug("addr_id=%d, ahmac=%llu, echo=%d, port=%d",
658 opts->addr.id, opts->ahmac, echo, ntohs(opts->addr.port));
663 static bool mptcp_established_options_rm_addr(struct sock *sk,
665 unsigned int remaining,
666 struct mptcp_out_options *opts)
668 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
669 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
670 struct mptcp_rm_list rm_list;
673 if (!mptcp_pm_should_rm_signal(msk) ||
674 !(mptcp_pm_rm_addr_signal(msk, remaining, &rm_list)))
677 len = mptcp_rm_addr_len(&rm_list);
684 opts->suboptions |= OPTION_MPTCP_RM_ADDR;
685 opts->rm_list = rm_list;
687 for (i = 0; i < opts->rm_list.nr; i++)
688 pr_debug("rm_list_ids[%d]=%d", i, opts->rm_list.ids[i]);
693 static bool mptcp_established_options_mp_prio(struct sock *sk,
695 unsigned int remaining,
696 struct mptcp_out_options *opts)
698 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
700 if (!subflow->send_mp_prio)
703 /* account for the trailing 'nop' option */
704 if (remaining < TCPOLEN_MPTCP_PRIO_ALIGN)
707 *size = TCPOLEN_MPTCP_PRIO_ALIGN;
708 opts->suboptions |= OPTION_MPTCP_PRIO;
709 opts->backup = subflow->request_bkup;
711 pr_debug("prio=%d", opts->backup);
716 static noinline void mptcp_established_options_rst(struct sock *sk, struct sk_buff *skb,
718 unsigned int remaining,
719 struct mptcp_out_options *opts)
721 const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
723 if (remaining < TCPOLEN_MPTCP_RST)
726 *size = TCPOLEN_MPTCP_RST;
727 opts->suboptions |= OPTION_MPTCP_RST;
728 opts->reset_transient = subflow->reset_transient;
729 opts->reset_reason = subflow->reset_reason;
732 bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
733 unsigned int *size, unsigned int remaining,
734 struct mptcp_out_options *opts)
736 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
737 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
738 unsigned int opt_size = 0;
742 opts->suboptions = 0;
744 if (unlikely(__mptcp_check_fallback(msk)))
747 if (unlikely(skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST)) {
748 mptcp_established_options_rst(sk, skb, size, remaining, opts);
752 snd_data_fin = mptcp_data_fin_enabled(msk);
753 if (mptcp_established_options_mp(sk, skb, snd_data_fin, &opt_size, remaining, opts))
755 else if (mptcp_established_options_dss(sk, skb, snd_data_fin, &opt_size, remaining, opts))
758 /* we reserved enough space for the above options, and exceeding the
759 * TCP option space would be fatal
761 if (WARN_ON_ONCE(opt_size > remaining))
765 remaining -= opt_size;
766 if (mptcp_established_options_add_addr(sk, skb, &opt_size, remaining, opts)) {
768 remaining -= opt_size;
770 } else if (mptcp_established_options_rm_addr(sk, &opt_size, remaining, opts)) {
772 remaining -= opt_size;
776 if (mptcp_established_options_mp_prio(sk, &opt_size, remaining, opts)) {
778 remaining -= opt_size;
785 bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
786 struct mptcp_out_options *opts)
788 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
790 if (subflow_req->mp_capable) {
791 opts->suboptions = OPTION_MPTCP_MPC_SYNACK;
792 opts->sndr_key = subflow_req->local_key;
793 *size = TCPOLEN_MPTCP_MPC_SYNACK;
794 pr_debug("subflow_req=%p, local_key=%llu",
795 subflow_req, subflow_req->local_key);
797 } else if (subflow_req->mp_join) {
798 opts->suboptions = OPTION_MPTCP_MPJ_SYNACK;
799 opts->backup = subflow_req->backup;
800 opts->join_id = subflow_req->local_id;
801 opts->thmac = subflow_req->thmac;
802 opts->nonce = subflow_req->local_nonce;
803 pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u",
804 subflow_req, opts->backup, opts->join_id,
805 opts->thmac, opts->nonce);
806 *size = TCPOLEN_MPTCP_MPJ_SYNACK;
812 static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
813 struct mptcp_subflow_context *subflow,
815 struct mptcp_options_received *mp_opt)
817 /* here we can process OoO, in-window pkts, only in-sequence 4th ack
818 * will make the subflow fully established
820 if (likely(subflow->fully_established)) {
821 /* on passive sockets, check for 3rd ack retransmission
822 * note that msk is always set by subflow_syn_recv_sock()
823 * for mp_join subflows
825 if (TCP_SKB_CB(skb)->seq == subflow->ssn_offset + 1 &&
826 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq &&
827 subflow->mp_join && mp_opt->mp_join &&
828 READ_ONCE(msk->pm.server_side))
830 goto fully_established;
833 /* we must process OoO packets before the first subflow is fully
834 * established. OoO packets are instead a protocol violation
835 * for MP_JOIN subflows as the peer must not send any data
836 * before receiving the forth ack - cfr. RFC 8684 section 3.2.
838 if (TCP_SKB_CB(skb)->seq != subflow->ssn_offset + 1) {
839 if (subflow->mp_join)
841 return subflow->mp_capable;
844 if (mp_opt->dss && mp_opt->use_ack) {
845 /* subflows are fully established as soon as we get any
848 subflow->fully_established = 1;
849 WRITE_ONCE(msk->fully_established, true);
850 goto fully_established;
853 if (mp_opt->add_addr) {
854 WRITE_ONCE(msk->fully_established, true);
858 /* If the first established packet does not contain MP_CAPABLE + data
859 * then fallback to TCP. Fallback scenarios requires a reset for
862 if (!mp_opt->mp_capable) {
863 if (subflow->mp_join)
865 subflow->mp_capable = 0;
867 __mptcp_do_fallback(msk);
871 if (unlikely(!READ_ONCE(msk->pm.server_side)))
872 pr_warn_once("bogus mpc option on established client sk");
873 mptcp_subflow_fully_established(subflow, mp_opt);
876 /* if the subflow is not already linked into the conn_list, we can't
877 * notify the PM: this subflow is still on the listener queue
878 * and the PM possibly acquiring the subflow lock could race with
881 if (likely(subflow->pm_notified) || list_empty(&subflow->node))
884 subflow->pm_notified = 1;
885 if (subflow->mp_join) {
886 clear_3rdack_retransmission(ssk);
887 mptcp_pm_subflow_established(msk);
889 mptcp_pm_fully_established(msk, ssk, GFP_ATOMIC);
894 mptcp_subflow_reset(ssk);
898 static u64 expand_ack(u64 old_ack, u64 cur_ack, bool use_64bit)
900 u32 old_ack32, cur_ack32;
905 old_ack32 = (u32)old_ack;
906 cur_ack32 = (u32)cur_ack;
907 cur_ack = (old_ack & GENMASK_ULL(63, 32)) + cur_ack32;
908 if (unlikely(before(cur_ack32, old_ack32)))
909 return cur_ack + (1LL << 32);
913 static void ack_update_msk(struct mptcp_sock *msk,
915 struct mptcp_options_received *mp_opt)
917 u64 new_wnd_end, new_snd_una, snd_nxt = READ_ONCE(msk->snd_nxt);
918 struct sock *sk = (struct sock *)msk;
923 /* avoid ack expansion on update conflict, to reduce the risk of
924 * wrongly expanding to a future ack sequence number, which is way
925 * more dangerous than missing an ack
927 old_snd_una = msk->snd_una;
928 new_snd_una = expand_ack(old_snd_una, mp_opt->data_ack, mp_opt->ack64);
930 /* ACK for data not even sent yet? Ignore. */
931 if (after64(new_snd_una, snd_nxt))
932 new_snd_una = old_snd_una;
934 new_wnd_end = new_snd_una + tcp_sk(ssk)->snd_wnd;
936 if (after64(new_wnd_end, msk->wnd_end))
937 msk->wnd_end = new_wnd_end;
939 /* this assumes mptcp_incoming_options() is invoked after tcp_ack() */
940 if (after64(msk->wnd_end, READ_ONCE(msk->snd_nxt)))
941 __mptcp_check_push(sk, ssk);
943 if (after64(new_snd_una, old_snd_una)) {
944 msk->snd_una = new_snd_una;
945 __mptcp_data_acked(sk);
947 mptcp_data_unlock(sk);
949 trace_ack_update_msk(mp_opt->data_ack,
950 old_snd_una, new_snd_una,
951 new_wnd_end, msk->wnd_end);
954 bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq, bool use_64bit)
956 /* Skip if DATA_FIN was already received.
957 * If updating simultaneously with the recvmsg loop, values
958 * should match. If they mismatch, the peer is misbehaving and
959 * we will prefer the most recent information.
961 if (READ_ONCE(msk->rcv_data_fin))
964 WRITE_ONCE(msk->rcv_data_fin_seq,
965 expand_ack(READ_ONCE(msk->ack_seq), data_fin_seq, use_64bit));
966 WRITE_ONCE(msk->rcv_data_fin, 1);
971 static bool add_addr_hmac_valid(struct mptcp_sock *msk,
972 struct mptcp_options_received *mp_opt)
979 hmac = add_addr_generate_hmac(msk->remote_key,
983 pr_debug("msk=%p, ahmac=%llu, mp_opt->ahmac=%llu\n",
984 msk, (unsigned long long)hmac,
985 (unsigned long long)mp_opt->ahmac);
987 return hmac == mp_opt->ahmac;
990 void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
992 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
993 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
994 struct mptcp_options_received mp_opt;
995 struct mptcp_ext *mpext;
997 if (__mptcp_check_fallback(msk)) {
998 /* Keep it simple and unconditionally trigger send data cleanup and
999 * pending queue spooling. We will need to acquire the data lock
1000 * for more accurate checks, and once the lock is acquired, such
1001 * helpers are cheap.
1003 mptcp_data_lock(subflow->conn);
1004 if (sk_stream_memory_free(sk))
1005 __mptcp_check_push(subflow->conn, sk);
1006 __mptcp_data_acked(subflow->conn);
1007 mptcp_data_unlock(subflow->conn);
1011 mptcp_get_options(skb, &mp_opt);
1012 if (!check_fully_established(msk, sk, subflow, skb, &mp_opt))
1015 if (mp_opt.fastclose &&
1016 msk->local_key == mp_opt.rcvr_key) {
1017 WRITE_ONCE(msk->rcv_fastclose, true);
1018 mptcp_schedule_work((struct sock *)msk);
1021 if (mp_opt.add_addr && add_addr_hmac_valid(msk, &mp_opt)) {
1023 mptcp_pm_add_addr_received(msk, &mp_opt.addr);
1024 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ADDADDR);
1026 mptcp_pm_add_addr_echoed(msk, &mp_opt.addr);
1027 mptcp_pm_del_add_timer(msk, &mp_opt.addr);
1028 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ECHOADD);
1031 if (mp_opt.addr.port)
1032 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_PORTADD);
1034 mp_opt.add_addr = 0;
1037 if (mp_opt.rm_addr) {
1038 mptcp_pm_rm_addr_received(msk, &mp_opt.rm_list);
1042 if (mp_opt.mp_prio) {
1043 mptcp_pm_mp_prio_received(sk, mp_opt.backup);
1044 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPPRIORX);
1049 subflow->reset_seen = 1;
1050 subflow->reset_reason = mp_opt.reset_reason;
1051 subflow->reset_transient = mp_opt.reset_transient;
1057 /* we can't wait for recvmsg() to update the ack_seq, otherwise
1058 * monodirectional flows will stuck
1061 ack_update_msk(msk, sk, &mp_opt);
1063 /* Zero-data-length packets are dropped by the caller and not
1064 * propagated to the MPTCP layer, so the skb extension does not
1065 * need to be allocated or populated. DATA_FIN information, if
1066 * present, needs to be updated here before the skb is freed.
1068 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
1069 if (mp_opt.data_fin && mp_opt.data_len == 1 &&
1070 mptcp_update_rcv_data_fin(msk, mp_opt.data_seq, mp_opt.dsn64) &&
1071 schedule_work(&msk->work))
1072 sock_hold(subflow->conn);
1077 mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
1081 memset(mpext, 0, sizeof(*mpext));
1083 if (mp_opt.use_map) {
1084 if (mp_opt.mpc_map) {
1085 /* this is an MP_CAPABLE carrying MPTCP data
1086 * we know this map the first chunk of data
1088 mptcp_crypto_key_sha(subflow->remote_key, NULL,
1091 mpext->subflow_seq = 1;
1094 mpext->data_fin = 0;
1096 mpext->data_seq = mp_opt.data_seq;
1097 mpext->subflow_seq = mp_opt.subflow_seq;
1098 mpext->dsn64 = mp_opt.dsn64;
1099 mpext->data_fin = mp_opt.data_fin;
1101 mpext->data_len = mp_opt.data_len;
1106 static void mptcp_set_rwin(const struct tcp_sock *tp)
1108 const struct sock *ssk = (const struct sock *)tp;
1109 const struct mptcp_subflow_context *subflow;
1110 struct mptcp_sock *msk;
1113 subflow = mptcp_subflow_ctx(ssk);
1114 msk = mptcp_sk(subflow->conn);
1116 ack_seq = READ_ONCE(msk->ack_seq) + tp->rcv_wnd;
1118 if (after64(ack_seq, READ_ONCE(msk->rcv_wnd_sent)))
1119 WRITE_ONCE(msk->rcv_wnd_sent, ack_seq);
1122 void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
1123 struct mptcp_out_options *opts)
1125 if ((OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_SYNACK |
1126 OPTION_MPTCP_MPC_ACK) & opts->suboptions) {
1129 if (OPTION_MPTCP_MPC_SYN & opts->suboptions)
1130 len = TCPOLEN_MPTCP_MPC_SYN;
1131 else if (OPTION_MPTCP_MPC_SYNACK & opts->suboptions)
1132 len = TCPOLEN_MPTCP_MPC_SYNACK;
1133 else if (opts->ext_copy.data_len)
1134 len = TCPOLEN_MPTCP_MPC_ACK_DATA;
1136 len = TCPOLEN_MPTCP_MPC_ACK;
1138 *ptr++ = mptcp_option(MPTCPOPT_MP_CAPABLE, len,
1139 MPTCP_SUPPORTED_VERSION,
1140 MPTCP_CAP_HMAC_SHA256);
1142 if (!((OPTION_MPTCP_MPC_SYNACK | OPTION_MPTCP_MPC_ACK) &
1144 goto mp_capable_done;
1146 put_unaligned_be64(opts->sndr_key, ptr);
1148 if (!((OPTION_MPTCP_MPC_ACK) & opts->suboptions))
1149 goto mp_capable_done;
1151 put_unaligned_be64(opts->rcvr_key, ptr);
1153 if (!opts->ext_copy.data_len)
1154 goto mp_capable_done;
1156 put_unaligned_be32(opts->ext_copy.data_len << 16 |
1157 TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
1162 if (OPTION_MPTCP_ADD_ADDR & opts->suboptions) {
1163 u8 len = TCPOLEN_MPTCP_ADD_ADDR_BASE;
1164 u8 echo = MPTCP_ADDR_ECHO;
1166 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1167 if (opts->addr.family == AF_INET6)
1168 len = TCPOLEN_MPTCP_ADD_ADDR6_BASE;
1171 if (opts->addr.port)
1172 len += TCPOLEN_MPTCP_PORT_LEN;
1175 len += sizeof(opts->ahmac);
1179 *ptr++ = mptcp_option(MPTCPOPT_ADD_ADDR,
1180 len, echo, opts->addr.id);
1181 if (opts->addr.family == AF_INET) {
1182 memcpy((u8 *)ptr, (u8 *)&opts->addr.addr.s_addr, 4);
1185 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1186 else if (opts->addr.family == AF_INET6) {
1187 memcpy((u8 *)ptr, opts->addr.addr6.s6_addr, 16);
1192 if (!opts->addr.port) {
1194 put_unaligned_be64(opts->ahmac, ptr);
1198 u16 port = ntohs(opts->addr.port);
1201 u8 *bptr = (u8 *)ptr;
1203 put_unaligned_be16(port, bptr);
1205 put_unaligned_be64(opts->ahmac, bptr);
1207 put_unaligned_be16(TCPOPT_NOP << 8 |
1212 put_unaligned_be32(port << 16 |
1220 if (OPTION_MPTCP_RM_ADDR & opts->suboptions) {
1223 *ptr++ = mptcp_option(MPTCPOPT_RM_ADDR,
1224 TCPOLEN_MPTCP_RM_ADDR_BASE + opts->rm_list.nr,
1225 0, opts->rm_list.ids[0]);
1227 while (i < opts->rm_list.nr) {
1228 u8 id1, id2, id3, id4;
1230 id1 = opts->rm_list.ids[i];
1231 id2 = i + 1 < opts->rm_list.nr ? opts->rm_list.ids[i + 1] : TCPOPT_NOP;
1232 id3 = i + 2 < opts->rm_list.nr ? opts->rm_list.ids[i + 2] : TCPOPT_NOP;
1233 id4 = i + 3 < opts->rm_list.nr ? opts->rm_list.ids[i + 3] : TCPOPT_NOP;
1234 put_unaligned_be32(id1 << 24 | id2 << 16 | id3 << 8 | id4, ptr);
1240 if (OPTION_MPTCP_PRIO & opts->suboptions) {
1241 const struct sock *ssk = (const struct sock *)tp;
1242 struct mptcp_subflow_context *subflow;
1244 subflow = mptcp_subflow_ctx(ssk);
1245 subflow->send_mp_prio = 0;
1247 *ptr++ = mptcp_option(MPTCPOPT_MP_PRIO,
1249 opts->backup, TCPOPT_NOP);
1252 if (OPTION_MPTCP_MPJ_SYN & opts->suboptions) {
1253 *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN,
1254 TCPOLEN_MPTCP_MPJ_SYN,
1255 opts->backup, opts->join_id);
1256 put_unaligned_be32(opts->token, ptr);
1258 put_unaligned_be32(opts->nonce, ptr);
1262 if (OPTION_MPTCP_MPJ_SYNACK & opts->suboptions) {
1263 *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN,
1264 TCPOLEN_MPTCP_MPJ_SYNACK,
1265 opts->backup, opts->join_id);
1266 put_unaligned_be64(opts->thmac, ptr);
1268 put_unaligned_be32(opts->nonce, ptr);
1272 if (OPTION_MPTCP_MPJ_ACK & opts->suboptions) {
1273 *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN,
1274 TCPOLEN_MPTCP_MPJ_ACK, 0, 0);
1275 memcpy(ptr, opts->hmac, MPTCPOPT_HMAC_LEN);
1279 if (OPTION_MPTCP_RST & opts->suboptions)
1280 *ptr++ = mptcp_option(MPTCPOPT_RST,
1282 opts->reset_transient,
1283 opts->reset_reason);
1285 if (opts->ext_copy.use_ack || opts->ext_copy.use_map) {
1286 struct mptcp_ext *mpext = &opts->ext_copy;
1287 u8 len = TCPOLEN_MPTCP_DSS_BASE;
1290 if (mpext->use_ack) {
1291 flags = MPTCP_DSS_HAS_ACK;
1293 len += TCPOLEN_MPTCP_DSS_ACK64;
1294 flags |= MPTCP_DSS_ACK64;
1296 len += TCPOLEN_MPTCP_DSS_ACK32;
1300 if (mpext->use_map) {
1301 len += TCPOLEN_MPTCP_DSS_MAP64;
1303 /* Use only 64-bit mapping flags for now, add
1304 * support for optional 32-bit mappings later.
1306 flags |= MPTCP_DSS_HAS_MAP | MPTCP_DSS_DSN64;
1307 if (mpext->data_fin)
1308 flags |= MPTCP_DSS_DATA_FIN;
1311 *ptr++ = mptcp_option(MPTCPOPT_DSS, len, 0, flags);
1313 if (mpext->use_ack) {
1315 put_unaligned_be64(mpext->data_ack, ptr);
1318 put_unaligned_be32(mpext->data_ack32, ptr);
1323 if (mpext->use_map) {
1324 put_unaligned_be64(mpext->data_seq, ptr);
1326 put_unaligned_be32(mpext->subflow_seq, ptr);
1328 put_unaligned_be32(mpext->data_len << 16 |
1329 TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
1337 __be32 mptcp_get_reset_option(const struct sk_buff *skb)
1339 const struct mptcp_ext *ext = mptcp_get_ext(skb);
1343 flags = ext->reset_transient;
1344 reason = ext->reset_reason;
1346 return mptcp_option(MPTCPOPT_RST, TCPOLEN_MPTCP_RST,
1352 EXPORT_SYMBOL_GPL(mptcp_get_reset_option);