Merge branch 'work.sysctl' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-2.6-microblaze.git] / net / mptcp / subflow.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Multipath TCP
3  *
4  * Copyright (c) 2017 - 2019, Intel Corporation.
5  */
6
7 #define pr_fmt(fmt) "MPTCP: " fmt
8
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <crypto/algapi.h>
13 #include <crypto/sha.h>
14 #include <net/sock.h>
15 #include <net/inet_common.h>
16 #include <net/inet_hashtables.h>
17 #include <net/protocol.h>
18 #include <net/tcp.h>
19 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
20 #include <net/ip6_route.h>
21 #endif
22 #include <net/mptcp.h>
23 #include "protocol.h"
24 #include "mib.h"
25
26 static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
27                                   enum linux_mptcp_mib_field field)
28 {
29         MPTCP_INC_STATS(sock_net(req_to_sk(req)), field);
30 }
31
32 static int subflow_rebuild_header(struct sock *sk)
33 {
34         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
35         int local_id, err = 0;
36
37         if (subflow->request_mptcp && !subflow->token) {
38                 pr_debug("subflow=%p", sk);
39                 err = mptcp_token_new_connect(sk);
40         } else if (subflow->request_join && !subflow->local_nonce) {
41                 struct mptcp_sock *msk = (struct mptcp_sock *)subflow->conn;
42
43                 pr_debug("subflow=%p", sk);
44
45                 do {
46                         get_random_bytes(&subflow->local_nonce, sizeof(u32));
47                 } while (!subflow->local_nonce);
48
49                 if (subflow->local_id)
50                         goto out;
51
52                 local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)sk);
53                 if (local_id < 0)
54                         return -EINVAL;
55
56                 subflow->local_id = local_id;
57         }
58
59 out:
60         if (err)
61                 return err;
62
63         return subflow->icsk_af_ops->rebuild_header(sk);
64 }
65
66 static void subflow_req_destructor(struct request_sock *req)
67 {
68         struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
69
70         pr_debug("subflow_req=%p", subflow_req);
71
72         if (subflow_req->mp_capable)
73                 mptcp_token_destroy_request(subflow_req->token);
74         tcp_request_sock_ops.destructor(req);
75 }
76
77 static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
78                                   void *hmac)
79 {
80         u8 msg[8];
81
82         put_unaligned_be32(nonce1, &msg[0]);
83         put_unaligned_be32(nonce2, &msg[4]);
84
85         mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
86 }
87
88 /* validate received token and create truncated hmac and nonce for SYN-ACK */
89 static bool subflow_token_join_request(struct request_sock *req,
90                                        const struct sk_buff *skb)
91 {
92         struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
93         u8 hmac[SHA256_DIGEST_SIZE];
94         struct mptcp_sock *msk;
95         int local_id;
96
97         msk = mptcp_token_get_sock(subflow_req->token);
98         if (!msk) {
99                 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
100                 return false;
101         }
102
103         local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
104         if (local_id < 0) {
105                 sock_put((struct sock *)msk);
106                 return false;
107         }
108         subflow_req->local_id = local_id;
109
110         get_random_bytes(&subflow_req->local_nonce, sizeof(u32));
111
112         subflow_generate_hmac(msk->local_key, msk->remote_key,
113                               subflow_req->local_nonce,
114                               subflow_req->remote_nonce, hmac);
115
116         subflow_req->thmac = get_unaligned_be64(hmac);
117
118         sock_put((struct sock *)msk);
119         return true;
120 }
121
122 static void subflow_init_req(struct request_sock *req,
123                              const struct sock *sk_listener,
124                              struct sk_buff *skb)
125 {
126         struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
127         struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
128         struct mptcp_options_received mp_opt;
129
130         pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
131
132         mptcp_get_options(skb, &mp_opt);
133
134         subflow_req->mp_capable = 0;
135         subflow_req->mp_join = 0;
136
137 #ifdef CONFIG_TCP_MD5SIG
138         /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
139          * TCP option space.
140          */
141         if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info))
142                 return;
143 #endif
144
145         if (mp_opt.mp_capable) {
146                 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
147
148                 if (mp_opt.mp_join)
149                         return;
150         } else if (mp_opt.mp_join) {
151                 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
152         }
153
154         if (mp_opt.mp_capable && listener->request_mptcp) {
155                 int err;
156
157                 err = mptcp_token_new_request(req);
158                 if (err == 0)
159                         subflow_req->mp_capable = 1;
160
161                 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
162         } else if (mp_opt.mp_join && listener->request_mptcp) {
163                 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
164                 subflow_req->mp_join = 1;
165                 subflow_req->backup = mp_opt.backup;
166                 subflow_req->remote_id = mp_opt.join_id;
167                 subflow_req->token = mp_opt.token;
168                 subflow_req->remote_nonce = mp_opt.nonce;
169                 pr_debug("token=%u, remote_nonce=%u", subflow_req->token,
170                          subflow_req->remote_nonce);
171                 if (!subflow_token_join_request(req, skb)) {
172                         subflow_req->mp_join = 0;
173                         // @@ need to trigger RST
174                 }
175         }
176 }
177
178 static void subflow_v4_init_req(struct request_sock *req,
179                                 const struct sock *sk_listener,
180                                 struct sk_buff *skb)
181 {
182         tcp_rsk(req)->is_mptcp = 1;
183
184         tcp_request_sock_ipv4_ops.init_req(req, sk_listener, skb);
185
186         subflow_init_req(req, sk_listener, skb);
187 }
188
189 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
190 static void subflow_v6_init_req(struct request_sock *req,
191                                 const struct sock *sk_listener,
192                                 struct sk_buff *skb)
193 {
194         tcp_rsk(req)->is_mptcp = 1;
195
196         tcp_request_sock_ipv6_ops.init_req(req, sk_listener, skb);
197
198         subflow_init_req(req, sk_listener, skb);
199 }
200 #endif
201
202 /* validate received truncated hmac and create hmac for third ACK */
203 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
204 {
205         u8 hmac[SHA256_DIGEST_SIZE];
206         u64 thmac;
207
208         subflow_generate_hmac(subflow->remote_key, subflow->local_key,
209                               subflow->remote_nonce, subflow->local_nonce,
210                               hmac);
211
212         thmac = get_unaligned_be64(hmac);
213         pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n",
214                  subflow, subflow->token,
215                  (unsigned long long)thmac,
216                  (unsigned long long)subflow->thmac);
217
218         return thmac == subflow->thmac;
219 }
220
221 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
222 {
223         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
224         struct mptcp_options_received mp_opt;
225         struct sock *parent = subflow->conn;
226         struct tcp_sock *tp = tcp_sk(sk);
227
228         subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
229
230         if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
231                 inet_sk_state_store(parent, TCP_ESTABLISHED);
232                 parent->sk_state_change(parent);
233         }
234
235         /* be sure no special action on any packet other than syn-ack */
236         if (subflow->conn_finished)
237                 return;
238
239         subflow->conn_finished = 1;
240
241         mptcp_get_options(skb, &mp_opt);
242         if (subflow->request_mptcp && mp_opt.mp_capable) {
243                 subflow->mp_capable = 1;
244                 subflow->can_ack = 1;
245                 subflow->remote_key = mp_opt.sndr_key;
246                 pr_debug("subflow=%p, remote_key=%llu", subflow,
247                          subflow->remote_key);
248         } else if (subflow->request_join && mp_opt.mp_join) {
249                 subflow->mp_join = 1;
250                 subflow->thmac = mp_opt.thmac;
251                 subflow->remote_nonce = mp_opt.nonce;
252                 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", subflow,
253                          subflow->thmac, subflow->remote_nonce);
254         } else if (subflow->request_mptcp) {
255                 tp->is_mptcp = 0;
256         }
257
258         if (!tp->is_mptcp)
259                 return;
260
261         if (subflow->mp_capable) {
262                 pr_debug("subflow=%p, remote_key=%llu", mptcp_subflow_ctx(sk),
263                          subflow->remote_key);
264                 mptcp_finish_connect(sk);
265
266                 if (skb) {
267                         pr_debug("synack seq=%u", TCP_SKB_CB(skb)->seq);
268                         subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
269                 }
270         } else if (subflow->mp_join) {
271                 u8 hmac[SHA256_DIGEST_SIZE];
272
273                 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u",
274                          subflow, subflow->thmac,
275                          subflow->remote_nonce);
276                 if (!subflow_thmac_valid(subflow)) {
277                         MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC);
278                         subflow->mp_join = 0;
279                         goto do_reset;
280                 }
281
282                 subflow_generate_hmac(subflow->local_key, subflow->remote_key,
283                                       subflow->local_nonce,
284                                       subflow->remote_nonce,
285                                       hmac);
286
287                 memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
288
289                 if (skb)
290                         subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
291
292                 if (!mptcp_finish_join(sk))
293                         goto do_reset;
294
295                 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
296         } else {
297 do_reset:
298                 tcp_send_active_reset(sk, GFP_ATOMIC);
299                 tcp_done(sk);
300         }
301 }
302
303 static struct request_sock_ops subflow_request_sock_ops;
304 static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops;
305
306 static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
307 {
308         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
309
310         pr_debug("subflow=%p", subflow);
311
312         /* Never answer to SYNs sent to broadcast or multicast */
313         if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
314                 goto drop;
315
316         return tcp_conn_request(&subflow_request_sock_ops,
317                                 &subflow_request_sock_ipv4_ops,
318                                 sk, skb);
319 drop:
320         tcp_listendrop(sk);
321         return 0;
322 }
323
324 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
325 static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops;
326 static struct inet_connection_sock_af_ops subflow_v6_specific;
327 static struct inet_connection_sock_af_ops subflow_v6m_specific;
328
329 static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
330 {
331         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
332
333         pr_debug("subflow=%p", subflow);
334
335         if (skb->protocol == htons(ETH_P_IP))
336                 return subflow_v4_conn_request(sk, skb);
337
338         if (!ipv6_unicast_destination(skb))
339                 goto drop;
340
341         return tcp_conn_request(&subflow_request_sock_ops,
342                                 &subflow_request_sock_ipv6_ops, sk, skb);
343
344 drop:
345         tcp_listendrop(sk);
346         return 0; /* don't send reset */
347 }
348 #endif
349
350 /* validate hmac received in third ACK */
351 static bool subflow_hmac_valid(const struct request_sock *req,
352                                const struct mptcp_options_received *mp_opt)
353 {
354         const struct mptcp_subflow_request_sock *subflow_req;
355         u8 hmac[SHA256_DIGEST_SIZE];
356         struct mptcp_sock *msk;
357         bool ret;
358
359         subflow_req = mptcp_subflow_rsk(req);
360         msk = mptcp_token_get_sock(subflow_req->token);
361         if (!msk)
362                 return false;
363
364         subflow_generate_hmac(msk->remote_key, msk->local_key,
365                               subflow_req->remote_nonce,
366                               subflow_req->local_nonce, hmac);
367
368         ret = true;
369         if (crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN))
370                 ret = false;
371
372         sock_put((struct sock *)msk);
373         return ret;
374 }
375
376 static void mptcp_sock_destruct(struct sock *sk)
377 {
378         /* if new mptcp socket isn't accepted, it is free'd
379          * from the tcp listener sockets request queue, linked
380          * from req->sk.  The tcp socket is released.
381          * This calls the ULP release function which will
382          * also remove the mptcp socket, via
383          * sock_put(ctx->conn).
384          *
385          * Problem is that the mptcp socket will not be in
386          * SYN_RECV state and doesn't have SOCK_DEAD flag.
387          * Both result in warnings from inet_sock_destruct.
388          */
389
390         if (sk->sk_state == TCP_SYN_RECV) {
391                 sk->sk_state = TCP_CLOSE;
392                 WARN_ON_ONCE(sk->sk_socket);
393                 sock_orphan(sk);
394         }
395
396         inet_sock_destruct(sk);
397 }
398
399 static void mptcp_force_close(struct sock *sk)
400 {
401         inet_sk_state_store(sk, TCP_CLOSE);
402         sk_common_release(sk);
403 }
404
405 static void subflow_ulp_fallback(struct sock *sk,
406                                  struct mptcp_subflow_context *old_ctx)
407 {
408         struct inet_connection_sock *icsk = inet_csk(sk);
409
410         mptcp_subflow_tcp_fallback(sk, old_ctx);
411         icsk->icsk_ulp_ops = NULL;
412         rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
413         tcp_sk(sk)->is_mptcp = 0;
414 }
415
416 static void subflow_drop_ctx(struct sock *ssk)
417 {
418         struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
419
420         if (!ctx)
421                 return;
422
423         subflow_ulp_fallback(ssk, ctx);
424         if (ctx->conn)
425                 sock_put(ctx->conn);
426
427         kfree_rcu(ctx, rcu);
428 }
429
430 static struct sock *subflow_syn_recv_sock(const struct sock *sk,
431                                           struct sk_buff *skb,
432                                           struct request_sock *req,
433                                           struct dst_entry *dst,
434                                           struct request_sock *req_unhash,
435                                           bool *own_req)
436 {
437         struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
438         struct mptcp_subflow_request_sock *subflow_req;
439         struct mptcp_options_received mp_opt;
440         bool fallback_is_fatal = false;
441         struct sock *new_msk = NULL;
442         bool fallback = false;
443         struct sock *child;
444
445         pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
446
447         /* we need later a valid 'mp_capable' value even when options are not
448          * parsed
449          */
450         mp_opt.mp_capable = 0;
451         if (tcp_rsk(req)->is_mptcp == 0)
452                 goto create_child;
453
454         /* if the sk is MP_CAPABLE, we try to fetch the client key */
455         subflow_req = mptcp_subflow_rsk(req);
456         if (subflow_req->mp_capable) {
457                 if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) {
458                         /* here we can receive and accept an in-window,
459                          * out-of-order pkt, which will not carry the MP_CAPABLE
460                          * opt even on mptcp enabled paths
461                          */
462                         goto create_msk;
463                 }
464
465                 mptcp_get_options(skb, &mp_opt);
466                 if (!mp_opt.mp_capable) {
467                         fallback = true;
468                         goto create_child;
469                 }
470
471 create_msk:
472                 new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
473                 if (!new_msk)
474                         fallback = true;
475         } else if (subflow_req->mp_join) {
476                 fallback_is_fatal = true;
477                 mptcp_get_options(skb, &mp_opt);
478                 if (!mp_opt.mp_join ||
479                     !subflow_hmac_valid(req, &mp_opt)) {
480                         SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
481                         return NULL;
482                 }
483         }
484
485 create_child:
486         child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
487                                                      req_unhash, own_req);
488
489         if (child && *own_req) {
490                 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child);
491
492                 tcp_rsk(req)->drop_req = false;
493
494                 /* we need to fallback on ctx allocation failure and on pre-reqs
495                  * checking above. In the latter scenario we additionally need
496                  * to reset the context to non MPTCP status.
497                  */
498                 if (!ctx || fallback) {
499                         if (fallback_is_fatal)
500                                 goto dispose_child;
501
502                         subflow_drop_ctx(child);
503                         goto out;
504                 }
505
506                 if (ctx->mp_capable) {
507                         /* new mpc subflow takes ownership of the newly
508                          * created mptcp socket
509                          */
510                         new_msk->sk_destruct = mptcp_sock_destruct;
511                         mptcp_pm_new_connection(mptcp_sk(new_msk), 1);
512                         ctx->conn = new_msk;
513                         new_msk = NULL;
514
515                         /* with OoO packets we can reach here without ingress
516                          * mpc option
517                          */
518                         ctx->remote_key = mp_opt.sndr_key;
519                         ctx->fully_established = mp_opt.mp_capable;
520                         ctx->can_ack = mp_opt.mp_capable;
521                 } else if (ctx->mp_join) {
522                         struct mptcp_sock *owner;
523
524                         owner = mptcp_token_get_sock(ctx->token);
525                         if (!owner)
526                                 goto dispose_child;
527
528                         ctx->conn = (struct sock *)owner;
529                         if (!mptcp_finish_join(child))
530                                 goto dispose_child;
531
532                         SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX);
533                         tcp_rsk(req)->drop_req = true;
534                 }
535         }
536
537 out:
538         /* dispose of the left over mptcp master, if any */
539         if (unlikely(new_msk))
540                 mptcp_force_close(new_msk);
541
542         /* check for expected invariant - should never trigger, just help
543          * catching eariler subtle bugs
544          */
545         WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp &&
546                      (!mptcp_subflow_ctx(child) ||
547                       !mptcp_subflow_ctx(child)->conn));
548         return child;
549
550 dispose_child:
551         subflow_drop_ctx(child);
552         tcp_rsk(req)->drop_req = true;
553         tcp_send_active_reset(child, GFP_ATOMIC);
554         inet_csk_prepare_for_destroy_sock(child);
555         tcp_done(child);
556
557         /* The last child reference will be released by the caller */
558         return child;
559 }
560
561 static struct inet_connection_sock_af_ops subflow_specific;
562
563 enum mapping_status {
564         MAPPING_OK,
565         MAPPING_INVALID,
566         MAPPING_EMPTY,
567         MAPPING_DATA_FIN
568 };
569
570 static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq)
571 {
572         if ((u32)seq == (u32)old_seq)
573                 return old_seq;
574
575         /* Assume map covers data not mapped yet. */
576         return seq | ((old_seq + old_data_len + 1) & GENMASK_ULL(63, 32));
577 }
578
579 static void warn_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
580 {
581         WARN_ONCE(1, "Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
582                   ssn, subflow->map_subflow_seq, subflow->map_data_len);
583 }
584
585 static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
586 {
587         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
588         unsigned int skb_consumed;
589
590         skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq;
591         if (WARN_ON_ONCE(skb_consumed >= skb->len))
592                 return true;
593
594         return skb->len - skb_consumed <= subflow->map_data_len -
595                                           mptcp_subflow_get_map_offset(subflow);
596 }
597
598 static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
599 {
600         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
601         u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
602
603         if (unlikely(before(ssn, subflow->map_subflow_seq))) {
604                 /* Mapping covers data later in the subflow stream,
605                  * currently unsupported.
606                  */
607                 warn_bad_map(subflow, ssn);
608                 return false;
609         }
610         if (unlikely(!before(ssn, subflow->map_subflow_seq +
611                                   subflow->map_data_len))) {
612                 /* Mapping does covers past subflow data, invalid */
613                 warn_bad_map(subflow, ssn + skb->len);
614                 return false;
615         }
616         return true;
617 }
618
619 static enum mapping_status get_mapping_status(struct sock *ssk)
620 {
621         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
622         struct mptcp_ext *mpext;
623         struct sk_buff *skb;
624         u16 data_len;
625         u64 map_seq;
626
627         skb = skb_peek(&ssk->sk_receive_queue);
628         if (!skb)
629                 return MAPPING_EMPTY;
630
631         mpext = mptcp_get_ext(skb);
632         if (!mpext || !mpext->use_map) {
633                 if (!subflow->map_valid && !skb->len) {
634                         /* the TCP stack deliver 0 len FIN pkt to the receive
635                          * queue, that is the only 0len pkts ever expected here,
636                          * and we can admit no mapping only for 0 len pkts
637                          */
638                         if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
639                                 WARN_ONCE(1, "0len seq %d:%d flags %x",
640                                           TCP_SKB_CB(skb)->seq,
641                                           TCP_SKB_CB(skb)->end_seq,
642                                           TCP_SKB_CB(skb)->tcp_flags);
643                         sk_eat_skb(ssk, skb);
644                         return MAPPING_EMPTY;
645                 }
646
647                 if (!subflow->map_valid)
648                         return MAPPING_INVALID;
649
650                 goto validate_seq;
651         }
652
653         pr_debug("seq=%llu is64=%d ssn=%u data_len=%u data_fin=%d",
654                  mpext->data_seq, mpext->dsn64, mpext->subflow_seq,
655                  mpext->data_len, mpext->data_fin);
656
657         data_len = mpext->data_len;
658         if (data_len == 0) {
659                 pr_err("Infinite mapping not handled");
660                 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
661                 return MAPPING_INVALID;
662         }
663
664         if (mpext->data_fin == 1) {
665                 if (data_len == 1) {
666                         pr_debug("DATA_FIN with no payload");
667                         if (subflow->map_valid) {
668                                 /* A DATA_FIN might arrive in a DSS
669                                  * option before the previous mapping
670                                  * has been fully consumed. Continue
671                                  * handling the existing mapping.
672                                  */
673                                 skb_ext_del(skb, SKB_EXT_MPTCP);
674                                 return MAPPING_OK;
675                         } else {
676                                 return MAPPING_DATA_FIN;
677                         }
678                 }
679
680                 /* Adjust for DATA_FIN using 1 byte of sequence space */
681                 data_len--;
682         }
683
684         if (!mpext->dsn64) {
685                 map_seq = expand_seq(subflow->map_seq, subflow->map_data_len,
686                                      mpext->data_seq);
687                 subflow->use_64bit_ack = 0;
688                 pr_debug("expanded seq=%llu", subflow->map_seq);
689         } else {
690                 map_seq = mpext->data_seq;
691                 subflow->use_64bit_ack = 1;
692         }
693
694         if (subflow->map_valid) {
695                 /* Allow replacing only with an identical map */
696                 if (subflow->map_seq == map_seq &&
697                     subflow->map_subflow_seq == mpext->subflow_seq &&
698                     subflow->map_data_len == data_len) {
699                         skb_ext_del(skb, SKB_EXT_MPTCP);
700                         return MAPPING_OK;
701                 }
702
703                 /* If this skb data are fully covered by the current mapping,
704                  * the new map would need caching, which is not supported
705                  */
706                 if (skb_is_fully_mapped(ssk, skb)) {
707                         MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH);
708                         return MAPPING_INVALID;
709                 }
710
711                 /* will validate the next map after consuming the current one */
712                 return MAPPING_OK;
713         }
714
715         subflow->map_seq = map_seq;
716         subflow->map_subflow_seq = mpext->subflow_seq;
717         subflow->map_data_len = data_len;
718         subflow->map_valid = 1;
719         subflow->mpc_map = mpext->mpc_map;
720         pr_debug("new map seq=%llu subflow_seq=%u data_len=%u",
721                  subflow->map_seq, subflow->map_subflow_seq,
722                  subflow->map_data_len);
723
724 validate_seq:
725         /* we revalidate valid mapping on new skb, because we must ensure
726          * the current skb is completely covered by the available mapping
727          */
728         if (!validate_mapping(ssk, skb))
729                 return MAPPING_INVALID;
730
731         skb_ext_del(skb, SKB_EXT_MPTCP);
732         return MAPPING_OK;
733 }
734
735 static int subflow_read_actor(read_descriptor_t *desc,
736                               struct sk_buff *skb,
737                               unsigned int offset, size_t len)
738 {
739         size_t copy_len = min(desc->count, len);
740
741         desc->count -= copy_len;
742
743         pr_debug("flushed %zu bytes, %zu left", copy_len, desc->count);
744         return copy_len;
745 }
746
747 static bool subflow_check_data_avail(struct sock *ssk)
748 {
749         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
750         enum mapping_status status;
751         struct mptcp_sock *msk;
752         struct sk_buff *skb;
753
754         pr_debug("msk=%p ssk=%p data_avail=%d skb=%p", subflow->conn, ssk,
755                  subflow->data_avail, skb_peek(&ssk->sk_receive_queue));
756         if (subflow->data_avail)
757                 return true;
758
759         msk = mptcp_sk(subflow->conn);
760         for (;;) {
761                 u32 map_remaining;
762                 size_t delta;
763                 u64 ack_seq;
764                 u64 old_ack;
765
766                 status = get_mapping_status(ssk);
767                 pr_debug("msk=%p ssk=%p status=%d", msk, ssk, status);
768                 if (status == MAPPING_INVALID) {
769                         ssk->sk_err = EBADMSG;
770                         goto fatal;
771                 }
772
773                 if (status != MAPPING_OK)
774                         return false;
775
776                 skb = skb_peek(&ssk->sk_receive_queue);
777                 if (WARN_ON_ONCE(!skb))
778                         return false;
779
780                 /* if msk lacks the remote key, this subflow must provide an
781                  * MP_CAPABLE-based mapping
782                  */
783                 if (unlikely(!READ_ONCE(msk->can_ack))) {
784                         if (!subflow->mpc_map) {
785                                 ssk->sk_err = EBADMSG;
786                                 goto fatal;
787                         }
788                         WRITE_ONCE(msk->remote_key, subflow->remote_key);
789                         WRITE_ONCE(msk->ack_seq, subflow->map_seq);
790                         WRITE_ONCE(msk->can_ack, true);
791                 }
792
793                 old_ack = READ_ONCE(msk->ack_seq);
794                 ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
795                 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
796                          ack_seq);
797                 if (ack_seq == old_ack)
798                         break;
799
800                 /* only accept in-sequence mapping. Old values are spurious
801                  * retransmission; we can hit "future" values on active backup
802                  * subflow switch, we relay on retransmissions to get
803                  * in-sequence data.
804                  * Cuncurrent subflows support will require subflow data
805                  * reordering
806                  */
807                 map_remaining = subflow->map_data_len -
808                                 mptcp_subflow_get_map_offset(subflow);
809                 if (before64(ack_seq, old_ack))
810                         delta = min_t(size_t, old_ack - ack_seq, map_remaining);
811                 else
812                         delta = min_t(size_t, ack_seq - old_ack, map_remaining);
813
814                 /* discard mapped data */
815                 pr_debug("discarding %zu bytes, current map len=%d", delta,
816                          map_remaining);
817                 if (delta) {
818                         read_descriptor_t desc = {
819                                 .count = delta,
820                         };
821                         int ret;
822
823                         ret = tcp_read_sock(ssk, &desc, subflow_read_actor);
824                         if (ret < 0) {
825                                 ssk->sk_err = -ret;
826                                 goto fatal;
827                         }
828                         if (ret < delta)
829                                 return false;
830                         if (delta == map_remaining)
831                                 subflow->map_valid = 0;
832                 }
833         }
834         return true;
835
836 fatal:
837         /* fatal protocol error, close the socket */
838         /* This barrier is coupled with smp_rmb() in tcp_poll() */
839         smp_wmb();
840         ssk->sk_error_report(ssk);
841         tcp_set_state(ssk, TCP_CLOSE);
842         tcp_send_active_reset(ssk, GFP_ATOMIC);
843         return false;
844 }
845
846 bool mptcp_subflow_data_available(struct sock *sk)
847 {
848         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
849         struct sk_buff *skb;
850
851         /* check if current mapping is still valid */
852         if (subflow->map_valid &&
853             mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
854                 subflow->map_valid = 0;
855                 subflow->data_avail = 0;
856
857                 pr_debug("Done with mapping: seq=%u data_len=%u",
858                          subflow->map_subflow_seq,
859                          subflow->map_data_len);
860         }
861
862         if (!subflow_check_data_avail(sk)) {
863                 subflow->data_avail = 0;
864                 return false;
865         }
866
867         skb = skb_peek(&sk->sk_receive_queue);
868         subflow->data_avail = skb &&
869                        before(tcp_sk(sk)->copied_seq, TCP_SKB_CB(skb)->end_seq);
870         return subflow->data_avail;
871 }
872
873 /* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy,
874  * not the ssk one.
875  *
876  * In mptcp, rwin is about the mptcp-level connection data.
877  *
878  * Data that is still on the ssk rx queue can thus be ignored,
879  * as far as mptcp peer is concerened that data is still inflight.
880  * DSS ACK is updated when skb is moved to the mptcp rx queue.
881  */
882 void mptcp_space(const struct sock *ssk, int *space, int *full_space)
883 {
884         const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
885         const struct sock *sk = subflow->conn;
886
887         *space = tcp_space(sk);
888         *full_space = tcp_full_space(sk);
889 }
890
891 static void subflow_data_ready(struct sock *sk)
892 {
893         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
894         struct sock *parent = subflow->conn;
895
896         if (!subflow->mp_capable && !subflow->mp_join) {
897                 subflow->tcp_data_ready(sk);
898
899                 parent->sk_data_ready(parent);
900                 return;
901         }
902
903         if (mptcp_subflow_data_available(sk))
904                 mptcp_data_ready(parent, sk);
905 }
906
907 static void subflow_write_space(struct sock *sk)
908 {
909         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
910         struct sock *parent = subflow->conn;
911
912         sk_stream_write_space(sk);
913         if (sk_stream_is_writeable(sk)) {
914                 set_bit(MPTCP_SEND_SPACE, &mptcp_sk(parent)->flags);
915                 smp_mb__after_atomic();
916                 /* set SEND_SPACE before sk_stream_write_space clears NOSPACE */
917                 sk_stream_write_space(parent);
918         }
919 }
920
921 static struct inet_connection_sock_af_ops *
922 subflow_default_af_ops(struct sock *sk)
923 {
924 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
925         if (sk->sk_family == AF_INET6)
926                 return &subflow_v6_specific;
927 #endif
928         return &subflow_specific;
929 }
930
931 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
932 void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
933 {
934         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
935         struct inet_connection_sock *icsk = inet_csk(sk);
936         struct inet_connection_sock_af_ops *target;
937
938         target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
939
940         pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
941                  subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
942
943         if (likely(icsk->icsk_af_ops == target))
944                 return;
945
946         subflow->icsk_af_ops = icsk->icsk_af_ops;
947         icsk->icsk_af_ops = target;
948 }
949 #endif
950
951 static void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
952                                 struct sockaddr_storage *addr)
953 {
954         memset(addr, 0, sizeof(*addr));
955         addr->ss_family = info->family;
956         if (addr->ss_family == AF_INET) {
957                 struct sockaddr_in *in_addr = (struct sockaddr_in *)addr;
958
959                 in_addr->sin_addr = info->addr;
960                 in_addr->sin_port = info->port;
961         }
962 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
963         else if (addr->ss_family == AF_INET6) {
964                 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr;
965
966                 in6_addr->sin6_addr = info->addr6;
967                 in6_addr->sin6_port = info->port;
968         }
969 #endif
970 }
971
972 int __mptcp_subflow_connect(struct sock *sk, int ifindex,
973                             const struct mptcp_addr_info *loc,
974                             const struct mptcp_addr_info *remote)
975 {
976         struct mptcp_sock *msk = mptcp_sk(sk);
977         struct mptcp_subflow_context *subflow;
978         struct sockaddr_storage addr;
979         struct socket *sf;
980         u32 remote_token;
981         int addrlen;
982         int err;
983
984         if (sk->sk_state != TCP_ESTABLISHED)
985                 return -ENOTCONN;
986
987         err = mptcp_subflow_create_socket(sk, &sf);
988         if (err)
989                 return err;
990
991         subflow = mptcp_subflow_ctx(sf->sk);
992         subflow->remote_key = msk->remote_key;
993         subflow->local_key = msk->local_key;
994         subflow->token = msk->token;
995         mptcp_info2sockaddr(loc, &addr);
996
997         addrlen = sizeof(struct sockaddr_in);
998 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
999         if (loc->family == AF_INET6)
1000                 addrlen = sizeof(struct sockaddr_in6);
1001 #endif
1002         sf->sk->sk_bound_dev_if = ifindex;
1003         err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
1004         if (err)
1005                 goto failed;
1006
1007         mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
1008         pr_debug("msk=%p remote_token=%u", msk, remote_token);
1009         subflow->remote_token = remote_token;
1010         subflow->local_id = loc->id;
1011         subflow->request_join = 1;
1012         subflow->request_bkup = 1;
1013         mptcp_info2sockaddr(remote, &addr);
1014
1015         err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
1016         if (err && err != -EINPROGRESS)
1017                 goto failed;
1018
1019         spin_lock_bh(&msk->join_list_lock);
1020         list_add_tail(&subflow->node, &msk->join_list);
1021         spin_unlock_bh(&msk->join_list_lock);
1022
1023         return err;
1024
1025 failed:
1026         sock_release(sf);
1027         return err;
1028 }
1029
1030 int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
1031 {
1032         struct mptcp_subflow_context *subflow;
1033         struct net *net = sock_net(sk);
1034         struct socket *sf;
1035         int err;
1036
1037         err = sock_create_kern(net, sk->sk_family, SOCK_STREAM, IPPROTO_TCP,
1038                                &sf);
1039         if (err)
1040                 return err;
1041
1042         lock_sock(sf->sk);
1043
1044         /* kernel sockets do not by default acquire net ref, but TCP timer
1045          * needs it.
1046          */
1047         sf->sk->sk_net_refcnt = 1;
1048         get_net(net);
1049 #ifdef CONFIG_PROC_FS
1050         this_cpu_add(*net->core.sock_inuse, 1);
1051 #endif
1052         err = tcp_set_ulp(sf->sk, "mptcp");
1053         release_sock(sf->sk);
1054
1055         if (err)
1056                 return err;
1057
1058         /* the newly created socket really belongs to the owning MPTCP master
1059          * socket, even if for additional subflows the allocation is performed
1060          * by a kernel workqueue. Adjust inode references, so that the
1061          * procfs/diag interaces really show this one belonging to the correct
1062          * user.
1063          */
1064         SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino;
1065         SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid;
1066         SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
1067
1068         subflow = mptcp_subflow_ctx(sf->sk);
1069         pr_debug("subflow=%p", subflow);
1070
1071         *new_sock = sf;
1072         sock_hold(sk);
1073         subflow->conn = sk;
1074
1075         return 0;
1076 }
1077
1078 static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
1079                                                         gfp_t priority)
1080 {
1081         struct inet_connection_sock *icsk = inet_csk(sk);
1082         struct mptcp_subflow_context *ctx;
1083
1084         ctx = kzalloc(sizeof(*ctx), priority);
1085         if (!ctx)
1086                 return NULL;
1087
1088         rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
1089         INIT_LIST_HEAD(&ctx->node);
1090
1091         pr_debug("subflow=%p", ctx);
1092
1093         ctx->tcp_sock = sk;
1094
1095         return ctx;
1096 }
1097
1098 static void __subflow_state_change(struct sock *sk)
1099 {
1100         struct socket_wq *wq;
1101
1102         rcu_read_lock();
1103         wq = rcu_dereference(sk->sk_wq);
1104         if (skwq_has_sleeper(wq))
1105                 wake_up_interruptible_all(&wq->wait);
1106         rcu_read_unlock();
1107 }
1108
1109 static bool subflow_is_done(const struct sock *sk)
1110 {
1111         return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE;
1112 }
1113
1114 static void subflow_state_change(struct sock *sk)
1115 {
1116         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1117         struct sock *parent = subflow->conn;
1118
1119         __subflow_state_change(sk);
1120
1121         /* as recvmsg() does not acquire the subflow socket for ssk selection
1122          * a fin packet carrying a DSS can be unnoticed if we don't trigger
1123          * the data available machinery here.
1124          */
1125         if (subflow->mp_capable && mptcp_subflow_data_available(sk))
1126                 mptcp_data_ready(parent, sk);
1127
1128         if (!(parent->sk_shutdown & RCV_SHUTDOWN) &&
1129             !subflow->rx_eof && subflow_is_done(sk)) {
1130                 subflow->rx_eof = 1;
1131                 mptcp_subflow_eof(parent);
1132         }
1133 }
1134
1135 static int subflow_ulp_init(struct sock *sk)
1136 {
1137         struct inet_connection_sock *icsk = inet_csk(sk);
1138         struct mptcp_subflow_context *ctx;
1139         struct tcp_sock *tp = tcp_sk(sk);
1140         int err = 0;
1141
1142         /* disallow attaching ULP to a socket unless it has been
1143          * created with sock_create_kern()
1144          */
1145         if (!sk->sk_kern_sock) {
1146                 err = -EOPNOTSUPP;
1147                 goto out;
1148         }
1149
1150         ctx = subflow_create_ctx(sk, GFP_KERNEL);
1151         if (!ctx) {
1152                 err = -ENOMEM;
1153                 goto out;
1154         }
1155
1156         pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
1157
1158         tp->is_mptcp = 1;
1159         ctx->icsk_af_ops = icsk->icsk_af_ops;
1160         icsk->icsk_af_ops = subflow_default_af_ops(sk);
1161         ctx->tcp_data_ready = sk->sk_data_ready;
1162         ctx->tcp_state_change = sk->sk_state_change;
1163         ctx->tcp_write_space = sk->sk_write_space;
1164         sk->sk_data_ready = subflow_data_ready;
1165         sk->sk_write_space = subflow_write_space;
1166         sk->sk_state_change = subflow_state_change;
1167 out:
1168         return err;
1169 }
1170
1171 static void subflow_ulp_release(struct sock *sk)
1172 {
1173         struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(sk);
1174
1175         if (!ctx)
1176                 return;
1177
1178         if (ctx->conn)
1179                 sock_put(ctx->conn);
1180
1181         kfree_rcu(ctx, rcu);
1182 }
1183
1184 static void subflow_ulp_clone(const struct request_sock *req,
1185                               struct sock *newsk,
1186                               const gfp_t priority)
1187 {
1188         struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
1189         struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk);
1190         struct mptcp_subflow_context *new_ctx;
1191
1192         if (!tcp_rsk(req)->is_mptcp ||
1193             (!subflow_req->mp_capable && !subflow_req->mp_join)) {
1194                 subflow_ulp_fallback(newsk, old_ctx);
1195                 return;
1196         }
1197
1198         new_ctx = subflow_create_ctx(newsk, priority);
1199         if (!new_ctx) {
1200                 subflow_ulp_fallback(newsk, old_ctx);
1201                 return;
1202         }
1203
1204         new_ctx->conn_finished = 1;
1205         new_ctx->icsk_af_ops = old_ctx->icsk_af_ops;
1206         new_ctx->tcp_data_ready = old_ctx->tcp_data_ready;
1207         new_ctx->tcp_state_change = old_ctx->tcp_state_change;
1208         new_ctx->tcp_write_space = old_ctx->tcp_write_space;
1209         new_ctx->rel_write_seq = 1;
1210         new_ctx->tcp_sock = newsk;
1211
1212         if (subflow_req->mp_capable) {
1213                 /* see comments in subflow_syn_recv_sock(), MPTCP connection
1214                  * is fully established only after we receive the remote key
1215                  */
1216                 new_ctx->mp_capable = 1;
1217                 new_ctx->local_key = subflow_req->local_key;
1218                 new_ctx->token = subflow_req->token;
1219                 new_ctx->ssn_offset = subflow_req->ssn_offset;
1220                 new_ctx->idsn = subflow_req->idsn;
1221         } else if (subflow_req->mp_join) {
1222                 new_ctx->ssn_offset = subflow_req->ssn_offset;
1223                 new_ctx->mp_join = 1;
1224                 new_ctx->fully_established = 1;
1225                 new_ctx->backup = subflow_req->backup;
1226                 new_ctx->local_id = subflow_req->local_id;
1227                 new_ctx->token = subflow_req->token;
1228                 new_ctx->thmac = subflow_req->thmac;
1229         }
1230 }
1231
1232 static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
1233         .name           = "mptcp",
1234         .owner          = THIS_MODULE,
1235         .init           = subflow_ulp_init,
1236         .release        = subflow_ulp_release,
1237         .clone          = subflow_ulp_clone,
1238 };
1239
1240 static int subflow_ops_init(struct request_sock_ops *subflow_ops)
1241 {
1242         subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock);
1243         subflow_ops->slab_name = "request_sock_subflow";
1244
1245         subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name,
1246                                               subflow_ops->obj_size, 0,
1247                                               SLAB_ACCOUNT |
1248                                               SLAB_TYPESAFE_BY_RCU,
1249                                               NULL);
1250         if (!subflow_ops->slab)
1251                 return -ENOMEM;
1252
1253         subflow_ops->destructor = subflow_req_destructor;
1254
1255         return 0;
1256 }
1257
1258 void mptcp_subflow_init(void)
1259 {
1260         subflow_request_sock_ops = tcp_request_sock_ops;
1261         if (subflow_ops_init(&subflow_request_sock_ops) != 0)
1262                 panic("MPTCP: failed to init subflow request sock ops\n");
1263
1264         subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
1265         subflow_request_sock_ipv4_ops.init_req = subflow_v4_init_req;
1266
1267         subflow_specific = ipv4_specific;
1268         subflow_specific.conn_request = subflow_v4_conn_request;
1269         subflow_specific.syn_recv_sock = subflow_syn_recv_sock;
1270         subflow_specific.sk_rx_dst_set = subflow_finish_connect;
1271         subflow_specific.rebuild_header = subflow_rebuild_header;
1272
1273 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1274         subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
1275         subflow_request_sock_ipv6_ops.init_req = subflow_v6_init_req;
1276
1277         subflow_v6_specific = ipv6_specific;
1278         subflow_v6_specific.conn_request = subflow_v6_conn_request;
1279         subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock;
1280         subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect;
1281         subflow_v6_specific.rebuild_header = subflow_rebuild_header;
1282
1283         subflow_v6m_specific = subflow_v6_specific;
1284         subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit;
1285         subflow_v6m_specific.send_check = ipv4_specific.send_check;
1286         subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len;
1287         subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced;
1288         subflow_v6m_specific.net_frag_header_len = 0;
1289 #endif
1290
1291         mptcp_diag_subflow_init(&subflow_ulp_ops);
1292
1293         if (tcp_register_ulp(&subflow_ulp_ops) != 0)
1294                 panic("MPTCP: failed to register subflows to ULP\n");
1295 }