Merge tag 'block-5.8-2020-07-01' of git://git.kernel.dk/linux-block
[linux-2.6-microblaze.git] / net / mptcp / subflow.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Multipath TCP
3  *
4  * Copyright (c) 2017 - 2019, Intel Corporation.
5  */
6
7 #define pr_fmt(fmt) "MPTCP: " fmt
8
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <crypto/algapi.h>
13 #include <crypto/sha.h>
14 #include <net/sock.h>
15 #include <net/inet_common.h>
16 #include <net/inet_hashtables.h>
17 #include <net/protocol.h>
18 #include <net/tcp.h>
19 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
20 #include <net/ip6_route.h>
21 #endif
22 #include <net/mptcp.h>
23 #include "protocol.h"
24 #include "mib.h"
25
26 static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
27                                   enum linux_mptcp_mib_field field)
28 {
29         MPTCP_INC_STATS(sock_net(req_to_sk(req)), field);
30 }
31
32 static int subflow_rebuild_header(struct sock *sk)
33 {
34         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
35         int local_id, err = 0;
36
37         if (subflow->request_mptcp && !subflow->token) {
38                 pr_debug("subflow=%p", sk);
39                 err = mptcp_token_new_connect(sk);
40         } else if (subflow->request_join && !subflow->local_nonce) {
41                 struct mptcp_sock *msk = (struct mptcp_sock *)subflow->conn;
42
43                 pr_debug("subflow=%p", sk);
44
45                 do {
46                         get_random_bytes(&subflow->local_nonce, sizeof(u32));
47                 } while (!subflow->local_nonce);
48
49                 if (subflow->local_id)
50                         goto out;
51
52                 local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)sk);
53                 if (local_id < 0)
54                         return -EINVAL;
55
56                 subflow->local_id = local_id;
57         }
58
59 out:
60         if (err)
61                 return err;
62
63         return subflow->icsk_af_ops->rebuild_header(sk);
64 }
65
66 static void subflow_req_destructor(struct request_sock *req)
67 {
68         struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
69
70         pr_debug("subflow_req=%p", subflow_req);
71
72         if (subflow_req->msk)
73                 sock_put((struct sock *)subflow_req->msk);
74
75         if (subflow_req->mp_capable)
76                 mptcp_token_destroy_request(subflow_req->token);
77         tcp_request_sock_ops.destructor(req);
78 }
79
80 static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
81                                   void *hmac)
82 {
83         u8 msg[8];
84
85         put_unaligned_be32(nonce1, &msg[0]);
86         put_unaligned_be32(nonce2, &msg[4]);
87
88         mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
89 }
90
91 /* validate received token and create truncated hmac and nonce for SYN-ACK */
92 static struct mptcp_sock *subflow_token_join_request(struct request_sock *req,
93                                                      const struct sk_buff *skb)
94 {
95         struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
96         u8 hmac[SHA256_DIGEST_SIZE];
97         struct mptcp_sock *msk;
98         int local_id;
99
100         msk = mptcp_token_get_sock(subflow_req->token);
101         if (!msk) {
102                 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
103                 return NULL;
104         }
105
106         local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
107         if (local_id < 0) {
108                 sock_put((struct sock *)msk);
109                 return NULL;
110         }
111         subflow_req->local_id = local_id;
112
113         get_random_bytes(&subflow_req->local_nonce, sizeof(u32));
114
115         subflow_generate_hmac(msk->local_key, msk->remote_key,
116                               subflow_req->local_nonce,
117                               subflow_req->remote_nonce, hmac);
118
119         subflow_req->thmac = get_unaligned_be64(hmac);
120         return msk;
121 }
122
123 static void subflow_init_req(struct request_sock *req,
124                              const struct sock *sk_listener,
125                              struct sk_buff *skb)
126 {
127         struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
128         struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
129         struct mptcp_options_received mp_opt;
130
131         pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
132
133         mptcp_get_options(skb, &mp_opt);
134
135         subflow_req->mp_capable = 0;
136         subflow_req->mp_join = 0;
137         subflow_req->msk = NULL;
138
139 #ifdef CONFIG_TCP_MD5SIG
140         /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
141          * TCP option space.
142          */
143         if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info))
144                 return;
145 #endif
146
147         if (mp_opt.mp_capable) {
148                 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
149
150                 if (mp_opt.mp_join)
151                         return;
152         } else if (mp_opt.mp_join) {
153                 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
154         }
155
156         if (mp_opt.mp_capable && listener->request_mptcp) {
157                 int err;
158
159                 err = mptcp_token_new_request(req);
160                 if (err == 0)
161                         subflow_req->mp_capable = 1;
162
163                 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
164         } else if (mp_opt.mp_join && listener->request_mptcp) {
165                 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
166                 subflow_req->mp_join = 1;
167                 subflow_req->backup = mp_opt.backup;
168                 subflow_req->remote_id = mp_opt.join_id;
169                 subflow_req->token = mp_opt.token;
170                 subflow_req->remote_nonce = mp_opt.nonce;
171                 subflow_req->msk = subflow_token_join_request(req, skb);
172                 pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
173                          subflow_req->remote_nonce, subflow_req->msk);
174         }
175 }
176
177 static void subflow_v4_init_req(struct request_sock *req,
178                                 const struct sock *sk_listener,
179                                 struct sk_buff *skb)
180 {
181         tcp_rsk(req)->is_mptcp = 1;
182
183         tcp_request_sock_ipv4_ops.init_req(req, sk_listener, skb);
184
185         subflow_init_req(req, sk_listener, skb);
186 }
187
188 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
189 static void subflow_v6_init_req(struct request_sock *req,
190                                 const struct sock *sk_listener,
191                                 struct sk_buff *skb)
192 {
193         tcp_rsk(req)->is_mptcp = 1;
194
195         tcp_request_sock_ipv6_ops.init_req(req, sk_listener, skb);
196
197         subflow_init_req(req, sk_listener, skb);
198 }
199 #endif
200
201 /* validate received truncated hmac and create hmac for third ACK */
202 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
203 {
204         u8 hmac[SHA256_DIGEST_SIZE];
205         u64 thmac;
206
207         subflow_generate_hmac(subflow->remote_key, subflow->local_key,
208                               subflow->remote_nonce, subflow->local_nonce,
209                               hmac);
210
211         thmac = get_unaligned_be64(hmac);
212         pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n",
213                  subflow, subflow->token,
214                  (unsigned long long)thmac,
215                  (unsigned long long)subflow->thmac);
216
217         return thmac == subflow->thmac;
218 }
219
220 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
221 {
222         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
223         struct mptcp_options_received mp_opt;
224         struct sock *parent = subflow->conn;
225         struct tcp_sock *tp = tcp_sk(sk);
226
227         subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
228
229         if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
230                 inet_sk_state_store(parent, TCP_ESTABLISHED);
231                 parent->sk_state_change(parent);
232         }
233
234         /* be sure no special action on any packet other than syn-ack */
235         if (subflow->conn_finished)
236                 return;
237
238         subflow->conn_finished = 1;
239
240         mptcp_get_options(skb, &mp_opt);
241         if (subflow->request_mptcp && mp_opt.mp_capable) {
242                 subflow->mp_capable = 1;
243                 subflow->can_ack = 1;
244                 subflow->remote_key = mp_opt.sndr_key;
245                 pr_debug("subflow=%p, remote_key=%llu", subflow,
246                          subflow->remote_key);
247         } else if (subflow->request_join && mp_opt.mp_join) {
248                 subflow->mp_join = 1;
249                 subflow->thmac = mp_opt.thmac;
250                 subflow->remote_nonce = mp_opt.nonce;
251                 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", subflow,
252                          subflow->thmac, subflow->remote_nonce);
253         } else if (subflow->request_mptcp) {
254                 tp->is_mptcp = 0;
255         }
256
257         if (!tp->is_mptcp)
258                 return;
259
260         if (subflow->mp_capable) {
261                 pr_debug("subflow=%p, remote_key=%llu", mptcp_subflow_ctx(sk),
262                          subflow->remote_key);
263                 mptcp_finish_connect(sk);
264
265                 if (skb) {
266                         pr_debug("synack seq=%u", TCP_SKB_CB(skb)->seq);
267                         subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
268                 }
269         } else if (subflow->mp_join) {
270                 u8 hmac[SHA256_DIGEST_SIZE];
271
272                 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u",
273                          subflow, subflow->thmac,
274                          subflow->remote_nonce);
275                 if (!subflow_thmac_valid(subflow)) {
276                         MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC);
277                         subflow->mp_join = 0;
278                         goto do_reset;
279                 }
280
281                 subflow_generate_hmac(subflow->local_key, subflow->remote_key,
282                                       subflow->local_nonce,
283                                       subflow->remote_nonce,
284                                       hmac);
285
286                 memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
287
288                 if (skb)
289                         subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
290
291                 if (!mptcp_finish_join(sk))
292                         goto do_reset;
293
294                 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
295         } else {
296 do_reset:
297                 tcp_send_active_reset(sk, GFP_ATOMIC);
298                 tcp_done(sk);
299         }
300 }
301
302 static struct request_sock_ops subflow_request_sock_ops;
303 static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops;
304
305 static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
306 {
307         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
308
309         pr_debug("subflow=%p", subflow);
310
311         /* Never answer to SYNs sent to broadcast or multicast */
312         if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
313                 goto drop;
314
315         return tcp_conn_request(&subflow_request_sock_ops,
316                                 &subflow_request_sock_ipv4_ops,
317                                 sk, skb);
318 drop:
319         tcp_listendrop(sk);
320         return 0;
321 }
322
323 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
324 static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops;
325 static struct inet_connection_sock_af_ops subflow_v6_specific;
326 static struct inet_connection_sock_af_ops subflow_v6m_specific;
327
328 static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
329 {
330         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
331
332         pr_debug("subflow=%p", subflow);
333
334         if (skb->protocol == htons(ETH_P_IP))
335                 return subflow_v4_conn_request(sk, skb);
336
337         if (!ipv6_unicast_destination(skb))
338                 goto drop;
339
340         return tcp_conn_request(&subflow_request_sock_ops,
341                                 &subflow_request_sock_ipv6_ops, sk, skb);
342
343 drop:
344         tcp_listendrop(sk);
345         return 0; /* don't send reset */
346 }
347 #endif
348
349 /* validate hmac received in third ACK */
350 static bool subflow_hmac_valid(const struct request_sock *req,
351                                const struct mptcp_options_received *mp_opt)
352 {
353         const struct mptcp_subflow_request_sock *subflow_req;
354         u8 hmac[SHA256_DIGEST_SIZE];
355         struct mptcp_sock *msk;
356
357         subflow_req = mptcp_subflow_rsk(req);
358         msk = subflow_req->msk;
359         if (!msk)
360                 return false;
361
362         subflow_generate_hmac(msk->remote_key, msk->local_key,
363                               subflow_req->remote_nonce,
364                               subflow_req->local_nonce, hmac);
365
366         return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
367 }
368
369 static void mptcp_sock_destruct(struct sock *sk)
370 {
371         /* if new mptcp socket isn't accepted, it is free'd
372          * from the tcp listener sockets request queue, linked
373          * from req->sk.  The tcp socket is released.
374          * This calls the ULP release function which will
375          * also remove the mptcp socket, via
376          * sock_put(ctx->conn).
377          *
378          * Problem is that the mptcp socket will not be in
379          * SYN_RECV state and doesn't have SOCK_DEAD flag.
380          * Both result in warnings from inet_sock_destruct.
381          */
382
383         if (sk->sk_state == TCP_SYN_RECV) {
384                 sk->sk_state = TCP_CLOSE;
385                 WARN_ON_ONCE(sk->sk_socket);
386                 sock_orphan(sk);
387         }
388
389         mptcp_token_destroy(mptcp_sk(sk)->token);
390         inet_sock_destruct(sk);
391 }
392
393 static void mptcp_force_close(struct sock *sk)
394 {
395         inet_sk_state_store(sk, TCP_CLOSE);
396         sk_common_release(sk);
397 }
398
399 static void subflow_ulp_fallback(struct sock *sk,
400                                  struct mptcp_subflow_context *old_ctx)
401 {
402         struct inet_connection_sock *icsk = inet_csk(sk);
403
404         mptcp_subflow_tcp_fallback(sk, old_ctx);
405         icsk->icsk_ulp_ops = NULL;
406         rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
407         tcp_sk(sk)->is_mptcp = 0;
408 }
409
410 static void subflow_drop_ctx(struct sock *ssk)
411 {
412         struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
413
414         if (!ctx)
415                 return;
416
417         subflow_ulp_fallback(ssk, ctx);
418         if (ctx->conn)
419                 sock_put(ctx->conn);
420
421         kfree_rcu(ctx, rcu);
422 }
423
424 static struct sock *subflow_syn_recv_sock(const struct sock *sk,
425                                           struct sk_buff *skb,
426                                           struct request_sock *req,
427                                           struct dst_entry *dst,
428                                           struct request_sock *req_unhash,
429                                           bool *own_req)
430 {
431         struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
432         struct mptcp_subflow_request_sock *subflow_req;
433         struct mptcp_options_received mp_opt;
434         bool fallback, fallback_is_fatal;
435         struct sock *new_msk = NULL;
436         struct sock *child;
437
438         pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
439
440         /* After child creation we must look for 'mp_capable' even when options
441          * are not parsed
442          */
443         mp_opt.mp_capable = 0;
444
445         /* hopefully temporary handling for MP_JOIN+syncookie */
446         subflow_req = mptcp_subflow_rsk(req);
447         fallback_is_fatal = subflow_req->mp_join;
448         fallback = !tcp_rsk(req)->is_mptcp;
449         if (fallback)
450                 goto create_child;
451
452         /* if the sk is MP_CAPABLE, we try to fetch the client key */
453         if (subflow_req->mp_capable) {
454                 if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) {
455                         /* here we can receive and accept an in-window,
456                          * out-of-order pkt, which will not carry the MP_CAPABLE
457                          * opt even on mptcp enabled paths
458                          */
459                         goto create_msk;
460                 }
461
462                 mptcp_get_options(skb, &mp_opt);
463                 if (!mp_opt.mp_capable) {
464                         fallback = true;
465                         goto create_child;
466                 }
467
468 create_msk:
469                 new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
470                 if (!new_msk)
471                         fallback = true;
472         } else if (subflow_req->mp_join) {
473                 mptcp_get_options(skb, &mp_opt);
474                 if (!mp_opt.mp_join ||
475                     !subflow_hmac_valid(req, &mp_opt)) {
476                         SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
477                         fallback = true;
478                 }
479         }
480
481 create_child:
482         child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
483                                                      req_unhash, own_req);
484
485         if (child && *own_req) {
486                 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child);
487
488                 tcp_rsk(req)->drop_req = false;
489
490                 /* we need to fallback on ctx allocation failure and on pre-reqs
491                  * checking above. In the latter scenario we additionally need
492                  * to reset the context to non MPTCP status.
493                  */
494                 if (!ctx || fallback) {
495                         if (fallback_is_fatal)
496                                 goto dispose_child;
497
498                         subflow_drop_ctx(child);
499                         goto out;
500                 }
501
502                 if (ctx->mp_capable) {
503                         /* new mpc subflow takes ownership of the newly
504                          * created mptcp socket
505                          */
506                         new_msk->sk_destruct = mptcp_sock_destruct;
507                         mptcp_pm_new_connection(mptcp_sk(new_msk), 1);
508                         ctx->conn = new_msk;
509                         new_msk = NULL;
510
511                         /* with OoO packets we can reach here without ingress
512                          * mpc option
513                          */
514                         ctx->remote_key = mp_opt.sndr_key;
515                         ctx->fully_established = mp_opt.mp_capable;
516                         ctx->can_ack = mp_opt.mp_capable;
517                 } else if (ctx->mp_join) {
518                         struct mptcp_sock *owner;
519
520                         owner = subflow_req->msk;
521                         if (!owner)
522                                 goto dispose_child;
523
524                         /* move the msk reference ownership to the subflow */
525                         subflow_req->msk = NULL;
526                         ctx->conn = (struct sock *)owner;
527                         if (!mptcp_finish_join(child))
528                                 goto dispose_child;
529
530                         SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX);
531                         tcp_rsk(req)->drop_req = true;
532                 }
533         }
534
535 out:
536         /* dispose of the left over mptcp master, if any */
537         if (unlikely(new_msk))
538                 mptcp_force_close(new_msk);
539
540         /* check for expected invariant - should never trigger, just help
541          * catching eariler subtle bugs
542          */
543         WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp &&
544                      (!mptcp_subflow_ctx(child) ||
545                       !mptcp_subflow_ctx(child)->conn));
546         return child;
547
548 dispose_child:
549         subflow_drop_ctx(child);
550         tcp_rsk(req)->drop_req = true;
551         tcp_send_active_reset(child, GFP_ATOMIC);
552         inet_csk_prepare_for_destroy_sock(child);
553         tcp_done(child);
554
555         /* The last child reference will be released by the caller */
556         return child;
557 }
558
559 static struct inet_connection_sock_af_ops subflow_specific;
560
561 enum mapping_status {
562         MAPPING_OK,
563         MAPPING_INVALID,
564         MAPPING_EMPTY,
565         MAPPING_DATA_FIN
566 };
567
568 static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq)
569 {
570         if ((u32)seq == (u32)old_seq)
571                 return old_seq;
572
573         /* Assume map covers data not mapped yet. */
574         return seq | ((old_seq + old_data_len + 1) & GENMASK_ULL(63, 32));
575 }
576
577 static void warn_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
578 {
579         WARN_ONCE(1, "Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
580                   ssn, subflow->map_subflow_seq, subflow->map_data_len);
581 }
582
583 static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
584 {
585         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
586         unsigned int skb_consumed;
587
588         skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq;
589         if (WARN_ON_ONCE(skb_consumed >= skb->len))
590                 return true;
591
592         return skb->len - skb_consumed <= subflow->map_data_len -
593                                           mptcp_subflow_get_map_offset(subflow);
594 }
595
596 static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
597 {
598         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
599         u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
600
601         if (unlikely(before(ssn, subflow->map_subflow_seq))) {
602                 /* Mapping covers data later in the subflow stream,
603                  * currently unsupported.
604                  */
605                 warn_bad_map(subflow, ssn);
606                 return false;
607         }
608         if (unlikely(!before(ssn, subflow->map_subflow_seq +
609                                   subflow->map_data_len))) {
610                 /* Mapping does covers past subflow data, invalid */
611                 warn_bad_map(subflow, ssn + skb->len);
612                 return false;
613         }
614         return true;
615 }
616
617 static enum mapping_status get_mapping_status(struct sock *ssk)
618 {
619         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
620         struct mptcp_ext *mpext;
621         struct sk_buff *skb;
622         u16 data_len;
623         u64 map_seq;
624
625         skb = skb_peek(&ssk->sk_receive_queue);
626         if (!skb)
627                 return MAPPING_EMPTY;
628
629         mpext = mptcp_get_ext(skb);
630         if (!mpext || !mpext->use_map) {
631                 if (!subflow->map_valid && !skb->len) {
632                         /* the TCP stack deliver 0 len FIN pkt to the receive
633                          * queue, that is the only 0len pkts ever expected here,
634                          * and we can admit no mapping only for 0 len pkts
635                          */
636                         if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
637                                 WARN_ONCE(1, "0len seq %d:%d flags %x",
638                                           TCP_SKB_CB(skb)->seq,
639                                           TCP_SKB_CB(skb)->end_seq,
640                                           TCP_SKB_CB(skb)->tcp_flags);
641                         sk_eat_skb(ssk, skb);
642                         return MAPPING_EMPTY;
643                 }
644
645                 if (!subflow->map_valid)
646                         return MAPPING_INVALID;
647
648                 goto validate_seq;
649         }
650
651         pr_debug("seq=%llu is64=%d ssn=%u data_len=%u data_fin=%d",
652                  mpext->data_seq, mpext->dsn64, mpext->subflow_seq,
653                  mpext->data_len, mpext->data_fin);
654
655         data_len = mpext->data_len;
656         if (data_len == 0) {
657                 pr_err("Infinite mapping not handled");
658                 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
659                 return MAPPING_INVALID;
660         }
661
662         if (mpext->data_fin == 1) {
663                 if (data_len == 1) {
664                         pr_debug("DATA_FIN with no payload");
665                         if (subflow->map_valid) {
666                                 /* A DATA_FIN might arrive in a DSS
667                                  * option before the previous mapping
668                                  * has been fully consumed. Continue
669                                  * handling the existing mapping.
670                                  */
671                                 skb_ext_del(skb, SKB_EXT_MPTCP);
672                                 return MAPPING_OK;
673                         } else {
674                                 return MAPPING_DATA_FIN;
675                         }
676                 }
677
678                 /* Adjust for DATA_FIN using 1 byte of sequence space */
679                 data_len--;
680         }
681
682         if (!mpext->dsn64) {
683                 map_seq = expand_seq(subflow->map_seq, subflow->map_data_len,
684                                      mpext->data_seq);
685                 subflow->use_64bit_ack = 0;
686                 pr_debug("expanded seq=%llu", subflow->map_seq);
687         } else {
688                 map_seq = mpext->data_seq;
689                 subflow->use_64bit_ack = 1;
690         }
691
692         if (subflow->map_valid) {
693                 /* Allow replacing only with an identical map */
694                 if (subflow->map_seq == map_seq &&
695                     subflow->map_subflow_seq == mpext->subflow_seq &&
696                     subflow->map_data_len == data_len) {
697                         skb_ext_del(skb, SKB_EXT_MPTCP);
698                         return MAPPING_OK;
699                 }
700
701                 /* If this skb data are fully covered by the current mapping,
702                  * the new map would need caching, which is not supported
703                  */
704                 if (skb_is_fully_mapped(ssk, skb)) {
705                         MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH);
706                         return MAPPING_INVALID;
707                 }
708
709                 /* will validate the next map after consuming the current one */
710                 return MAPPING_OK;
711         }
712
713         subflow->map_seq = map_seq;
714         subflow->map_subflow_seq = mpext->subflow_seq;
715         subflow->map_data_len = data_len;
716         subflow->map_valid = 1;
717         subflow->mpc_map = mpext->mpc_map;
718         pr_debug("new map seq=%llu subflow_seq=%u data_len=%u",
719                  subflow->map_seq, subflow->map_subflow_seq,
720                  subflow->map_data_len);
721
722 validate_seq:
723         /* we revalidate valid mapping on new skb, because we must ensure
724          * the current skb is completely covered by the available mapping
725          */
726         if (!validate_mapping(ssk, skb))
727                 return MAPPING_INVALID;
728
729         skb_ext_del(skb, SKB_EXT_MPTCP);
730         return MAPPING_OK;
731 }
732
733 static int subflow_read_actor(read_descriptor_t *desc,
734                               struct sk_buff *skb,
735                               unsigned int offset, size_t len)
736 {
737         size_t copy_len = min(desc->count, len);
738
739         desc->count -= copy_len;
740
741         pr_debug("flushed %zu bytes, %zu left", copy_len, desc->count);
742         return copy_len;
743 }
744
745 static bool subflow_check_data_avail(struct sock *ssk)
746 {
747         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
748         enum mapping_status status;
749         struct mptcp_sock *msk;
750         struct sk_buff *skb;
751
752         pr_debug("msk=%p ssk=%p data_avail=%d skb=%p", subflow->conn, ssk,
753                  subflow->data_avail, skb_peek(&ssk->sk_receive_queue));
754         if (subflow->data_avail)
755                 return true;
756
757         msk = mptcp_sk(subflow->conn);
758         for (;;) {
759                 u32 map_remaining;
760                 size_t delta;
761                 u64 ack_seq;
762                 u64 old_ack;
763
764                 status = get_mapping_status(ssk);
765                 pr_debug("msk=%p ssk=%p status=%d", msk, ssk, status);
766                 if (status == MAPPING_INVALID) {
767                         ssk->sk_err = EBADMSG;
768                         goto fatal;
769                 }
770
771                 if (status != MAPPING_OK)
772                         return false;
773
774                 skb = skb_peek(&ssk->sk_receive_queue);
775                 if (WARN_ON_ONCE(!skb))
776                         return false;
777
778                 /* if msk lacks the remote key, this subflow must provide an
779                  * MP_CAPABLE-based mapping
780                  */
781                 if (unlikely(!READ_ONCE(msk->can_ack))) {
782                         if (!subflow->mpc_map) {
783                                 ssk->sk_err = EBADMSG;
784                                 goto fatal;
785                         }
786                         WRITE_ONCE(msk->remote_key, subflow->remote_key);
787                         WRITE_ONCE(msk->ack_seq, subflow->map_seq);
788                         WRITE_ONCE(msk->can_ack, true);
789                 }
790
791                 old_ack = READ_ONCE(msk->ack_seq);
792                 ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
793                 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
794                          ack_seq);
795                 if (ack_seq == old_ack)
796                         break;
797
798                 /* only accept in-sequence mapping. Old values are spurious
799                  * retransmission; we can hit "future" values on active backup
800                  * subflow switch, we relay on retransmissions to get
801                  * in-sequence data.
802                  * Cuncurrent subflows support will require subflow data
803                  * reordering
804                  */
805                 map_remaining = subflow->map_data_len -
806                                 mptcp_subflow_get_map_offset(subflow);
807                 if (before64(ack_seq, old_ack))
808                         delta = min_t(size_t, old_ack - ack_seq, map_remaining);
809                 else
810                         delta = min_t(size_t, ack_seq - old_ack, map_remaining);
811
812                 /* discard mapped data */
813                 pr_debug("discarding %zu bytes, current map len=%d", delta,
814                          map_remaining);
815                 if (delta) {
816                         read_descriptor_t desc = {
817                                 .count = delta,
818                         };
819                         int ret;
820
821                         ret = tcp_read_sock(ssk, &desc, subflow_read_actor);
822                         if (ret < 0) {
823                                 ssk->sk_err = -ret;
824                                 goto fatal;
825                         }
826                         if (ret < delta)
827                                 return false;
828                         if (delta == map_remaining)
829                                 subflow->map_valid = 0;
830                 }
831         }
832         return true;
833
834 fatal:
835         /* fatal protocol error, close the socket */
836         /* This barrier is coupled with smp_rmb() in tcp_poll() */
837         smp_wmb();
838         ssk->sk_error_report(ssk);
839         tcp_set_state(ssk, TCP_CLOSE);
840         tcp_send_active_reset(ssk, GFP_ATOMIC);
841         return false;
842 }
843
844 bool mptcp_subflow_data_available(struct sock *sk)
845 {
846         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
847         struct sk_buff *skb;
848
849         /* check if current mapping is still valid */
850         if (subflow->map_valid &&
851             mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
852                 subflow->map_valid = 0;
853                 subflow->data_avail = 0;
854
855                 pr_debug("Done with mapping: seq=%u data_len=%u",
856                          subflow->map_subflow_seq,
857                          subflow->map_data_len);
858         }
859
860         if (!subflow_check_data_avail(sk)) {
861                 subflow->data_avail = 0;
862                 return false;
863         }
864
865         skb = skb_peek(&sk->sk_receive_queue);
866         subflow->data_avail = skb &&
867                        before(tcp_sk(sk)->copied_seq, TCP_SKB_CB(skb)->end_seq);
868         return subflow->data_avail;
869 }
870
871 /* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy,
872  * not the ssk one.
873  *
874  * In mptcp, rwin is about the mptcp-level connection data.
875  *
876  * Data that is still on the ssk rx queue can thus be ignored,
877  * as far as mptcp peer is concerened that data is still inflight.
878  * DSS ACK is updated when skb is moved to the mptcp rx queue.
879  */
880 void mptcp_space(const struct sock *ssk, int *space, int *full_space)
881 {
882         const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
883         const struct sock *sk = subflow->conn;
884
885         *space = tcp_space(sk);
886         *full_space = tcp_full_space(sk);
887 }
888
889 static void subflow_data_ready(struct sock *sk)
890 {
891         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
892         struct sock *parent = subflow->conn;
893
894         if (!subflow->mp_capable && !subflow->mp_join) {
895                 subflow->tcp_data_ready(sk);
896
897                 parent->sk_data_ready(parent);
898                 return;
899         }
900
901         if (mptcp_subflow_data_available(sk))
902                 mptcp_data_ready(parent, sk);
903 }
904
905 static void subflow_write_space(struct sock *sk)
906 {
907         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
908         struct sock *parent = subflow->conn;
909
910         sk_stream_write_space(sk);
911         if (sk_stream_is_writeable(sk)) {
912                 set_bit(MPTCP_SEND_SPACE, &mptcp_sk(parent)->flags);
913                 smp_mb__after_atomic();
914                 /* set SEND_SPACE before sk_stream_write_space clears NOSPACE */
915                 sk_stream_write_space(parent);
916         }
917 }
918
919 static struct inet_connection_sock_af_ops *
920 subflow_default_af_ops(struct sock *sk)
921 {
922 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
923         if (sk->sk_family == AF_INET6)
924                 return &subflow_v6_specific;
925 #endif
926         return &subflow_specific;
927 }
928
929 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
930 void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
931 {
932         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
933         struct inet_connection_sock *icsk = inet_csk(sk);
934         struct inet_connection_sock_af_ops *target;
935
936         target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
937
938         pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
939                  subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
940
941         if (likely(icsk->icsk_af_ops == target))
942                 return;
943
944         subflow->icsk_af_ops = icsk->icsk_af_ops;
945         icsk->icsk_af_ops = target;
946 }
947 #endif
948
949 static void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
950                                 struct sockaddr_storage *addr)
951 {
952         memset(addr, 0, sizeof(*addr));
953         addr->ss_family = info->family;
954         if (addr->ss_family == AF_INET) {
955                 struct sockaddr_in *in_addr = (struct sockaddr_in *)addr;
956
957                 in_addr->sin_addr = info->addr;
958                 in_addr->sin_port = info->port;
959         }
960 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
961         else if (addr->ss_family == AF_INET6) {
962                 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr;
963
964                 in6_addr->sin6_addr = info->addr6;
965                 in6_addr->sin6_port = info->port;
966         }
967 #endif
968 }
969
970 int __mptcp_subflow_connect(struct sock *sk, int ifindex,
971                             const struct mptcp_addr_info *loc,
972                             const struct mptcp_addr_info *remote)
973 {
974         struct mptcp_sock *msk = mptcp_sk(sk);
975         struct mptcp_subflow_context *subflow;
976         struct sockaddr_storage addr;
977         struct socket *sf;
978         u32 remote_token;
979         int addrlen;
980         int err;
981
982         if (sk->sk_state != TCP_ESTABLISHED)
983                 return -ENOTCONN;
984
985         err = mptcp_subflow_create_socket(sk, &sf);
986         if (err)
987                 return err;
988
989         subflow = mptcp_subflow_ctx(sf->sk);
990         subflow->remote_key = msk->remote_key;
991         subflow->local_key = msk->local_key;
992         subflow->token = msk->token;
993         mptcp_info2sockaddr(loc, &addr);
994
995         addrlen = sizeof(struct sockaddr_in);
996 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
997         if (loc->family == AF_INET6)
998                 addrlen = sizeof(struct sockaddr_in6);
999 #endif
1000         sf->sk->sk_bound_dev_if = ifindex;
1001         err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
1002         if (err)
1003                 goto failed;
1004
1005         mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
1006         pr_debug("msk=%p remote_token=%u", msk, remote_token);
1007         subflow->remote_token = remote_token;
1008         subflow->local_id = loc->id;
1009         subflow->request_join = 1;
1010         subflow->request_bkup = 1;
1011         mptcp_info2sockaddr(remote, &addr);
1012
1013         err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
1014         if (err && err != -EINPROGRESS)
1015                 goto failed;
1016
1017         spin_lock_bh(&msk->join_list_lock);
1018         list_add_tail(&subflow->node, &msk->join_list);
1019         spin_unlock_bh(&msk->join_list_lock);
1020
1021         return err;
1022
1023 failed:
1024         sock_release(sf);
1025         return err;
1026 }
1027
1028 int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
1029 {
1030         struct mptcp_subflow_context *subflow;
1031         struct net *net = sock_net(sk);
1032         struct socket *sf;
1033         int err;
1034
1035         err = sock_create_kern(net, sk->sk_family, SOCK_STREAM, IPPROTO_TCP,
1036                                &sf);
1037         if (err)
1038                 return err;
1039
1040         lock_sock(sf->sk);
1041
1042         /* kernel sockets do not by default acquire net ref, but TCP timer
1043          * needs it.
1044          */
1045         sf->sk->sk_net_refcnt = 1;
1046         get_net(net);
1047 #ifdef CONFIG_PROC_FS
1048         this_cpu_add(*net->core.sock_inuse, 1);
1049 #endif
1050         err = tcp_set_ulp(sf->sk, "mptcp");
1051         release_sock(sf->sk);
1052
1053         if (err) {
1054                 sock_release(sf);
1055                 return err;
1056         }
1057
1058         /* the newly created socket really belongs to the owning MPTCP master
1059          * socket, even if for additional subflows the allocation is performed
1060          * by a kernel workqueue. Adjust inode references, so that the
1061          * procfs/diag interaces really show this one belonging to the correct
1062          * user.
1063          */
1064         SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino;
1065         SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid;
1066         SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
1067
1068         subflow = mptcp_subflow_ctx(sf->sk);
1069         pr_debug("subflow=%p", subflow);
1070
1071         *new_sock = sf;
1072         sock_hold(sk);
1073         subflow->conn = sk;
1074
1075         return 0;
1076 }
1077
1078 static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
1079                                                         gfp_t priority)
1080 {
1081         struct inet_connection_sock *icsk = inet_csk(sk);
1082         struct mptcp_subflow_context *ctx;
1083
1084         ctx = kzalloc(sizeof(*ctx), priority);
1085         if (!ctx)
1086                 return NULL;
1087
1088         rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
1089         INIT_LIST_HEAD(&ctx->node);
1090
1091         pr_debug("subflow=%p", ctx);
1092
1093         ctx->tcp_sock = sk;
1094
1095         return ctx;
1096 }
1097
1098 static void __subflow_state_change(struct sock *sk)
1099 {
1100         struct socket_wq *wq;
1101
1102         rcu_read_lock();
1103         wq = rcu_dereference(sk->sk_wq);
1104         if (skwq_has_sleeper(wq))
1105                 wake_up_interruptible_all(&wq->wait);
1106         rcu_read_unlock();
1107 }
1108
1109 static bool subflow_is_done(const struct sock *sk)
1110 {
1111         return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE;
1112 }
1113
1114 static void subflow_state_change(struct sock *sk)
1115 {
1116         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1117         struct sock *parent = subflow->conn;
1118
1119         __subflow_state_change(sk);
1120
1121         /* as recvmsg() does not acquire the subflow socket for ssk selection
1122          * a fin packet carrying a DSS can be unnoticed if we don't trigger
1123          * the data available machinery here.
1124          */
1125         if (subflow->mp_capable && mptcp_subflow_data_available(sk))
1126                 mptcp_data_ready(parent, sk);
1127
1128         if (!(parent->sk_shutdown & RCV_SHUTDOWN) &&
1129             !subflow->rx_eof && subflow_is_done(sk)) {
1130                 subflow->rx_eof = 1;
1131                 mptcp_subflow_eof(parent);
1132         }
1133 }
1134
1135 static int subflow_ulp_init(struct sock *sk)
1136 {
1137         struct inet_connection_sock *icsk = inet_csk(sk);
1138         struct mptcp_subflow_context *ctx;
1139         struct tcp_sock *tp = tcp_sk(sk);
1140         int err = 0;
1141
1142         /* disallow attaching ULP to a socket unless it has been
1143          * created with sock_create_kern()
1144          */
1145         if (!sk->sk_kern_sock) {
1146                 err = -EOPNOTSUPP;
1147                 goto out;
1148         }
1149
1150         ctx = subflow_create_ctx(sk, GFP_KERNEL);
1151         if (!ctx) {
1152                 err = -ENOMEM;
1153                 goto out;
1154         }
1155
1156         pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
1157
1158         tp->is_mptcp = 1;
1159         ctx->icsk_af_ops = icsk->icsk_af_ops;
1160         icsk->icsk_af_ops = subflow_default_af_ops(sk);
1161         ctx->tcp_data_ready = sk->sk_data_ready;
1162         ctx->tcp_state_change = sk->sk_state_change;
1163         ctx->tcp_write_space = sk->sk_write_space;
1164         sk->sk_data_ready = subflow_data_ready;
1165         sk->sk_write_space = subflow_write_space;
1166         sk->sk_state_change = subflow_state_change;
1167 out:
1168         return err;
1169 }
1170
1171 static void subflow_ulp_release(struct sock *sk)
1172 {
1173         struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(sk);
1174
1175         if (!ctx)
1176                 return;
1177
1178         if (ctx->conn)
1179                 sock_put(ctx->conn);
1180
1181         kfree_rcu(ctx, rcu);
1182 }
1183
1184 static void subflow_ulp_clone(const struct request_sock *req,
1185                               struct sock *newsk,
1186                               const gfp_t priority)
1187 {
1188         struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
1189         struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk);
1190         struct mptcp_subflow_context *new_ctx;
1191
1192         if (!tcp_rsk(req)->is_mptcp ||
1193             (!subflow_req->mp_capable && !subflow_req->mp_join)) {
1194                 subflow_ulp_fallback(newsk, old_ctx);
1195                 return;
1196         }
1197
1198         new_ctx = subflow_create_ctx(newsk, priority);
1199         if (!new_ctx) {
1200                 subflow_ulp_fallback(newsk, old_ctx);
1201                 return;
1202         }
1203
1204         new_ctx->conn_finished = 1;
1205         new_ctx->icsk_af_ops = old_ctx->icsk_af_ops;
1206         new_ctx->tcp_data_ready = old_ctx->tcp_data_ready;
1207         new_ctx->tcp_state_change = old_ctx->tcp_state_change;
1208         new_ctx->tcp_write_space = old_ctx->tcp_write_space;
1209         new_ctx->rel_write_seq = 1;
1210         new_ctx->tcp_sock = newsk;
1211
1212         if (subflow_req->mp_capable) {
1213                 /* see comments in subflow_syn_recv_sock(), MPTCP connection
1214                  * is fully established only after we receive the remote key
1215                  */
1216                 new_ctx->mp_capable = 1;
1217                 new_ctx->local_key = subflow_req->local_key;
1218                 new_ctx->token = subflow_req->token;
1219                 new_ctx->ssn_offset = subflow_req->ssn_offset;
1220                 new_ctx->idsn = subflow_req->idsn;
1221         } else if (subflow_req->mp_join) {
1222                 new_ctx->ssn_offset = subflow_req->ssn_offset;
1223                 new_ctx->mp_join = 1;
1224                 new_ctx->fully_established = 1;
1225                 new_ctx->backup = subflow_req->backup;
1226                 new_ctx->local_id = subflow_req->local_id;
1227                 new_ctx->token = subflow_req->token;
1228                 new_ctx->thmac = subflow_req->thmac;
1229         }
1230 }
1231
1232 static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
1233         .name           = "mptcp",
1234         .owner          = THIS_MODULE,
1235         .init           = subflow_ulp_init,
1236         .release        = subflow_ulp_release,
1237         .clone          = subflow_ulp_clone,
1238 };
1239
1240 static int subflow_ops_init(struct request_sock_ops *subflow_ops)
1241 {
1242         subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock);
1243         subflow_ops->slab_name = "request_sock_subflow";
1244
1245         subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name,
1246                                               subflow_ops->obj_size, 0,
1247                                               SLAB_ACCOUNT |
1248                                               SLAB_TYPESAFE_BY_RCU,
1249                                               NULL);
1250         if (!subflow_ops->slab)
1251                 return -ENOMEM;
1252
1253         subflow_ops->destructor = subflow_req_destructor;
1254
1255         return 0;
1256 }
1257
1258 void mptcp_subflow_init(void)
1259 {
1260         subflow_request_sock_ops = tcp_request_sock_ops;
1261         if (subflow_ops_init(&subflow_request_sock_ops) != 0)
1262                 panic("MPTCP: failed to init subflow request sock ops\n");
1263
1264         subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
1265         subflow_request_sock_ipv4_ops.init_req = subflow_v4_init_req;
1266
1267         subflow_specific = ipv4_specific;
1268         subflow_specific.conn_request = subflow_v4_conn_request;
1269         subflow_specific.syn_recv_sock = subflow_syn_recv_sock;
1270         subflow_specific.sk_rx_dst_set = subflow_finish_connect;
1271         subflow_specific.rebuild_header = subflow_rebuild_header;
1272
1273 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1274         subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
1275         subflow_request_sock_ipv6_ops.init_req = subflow_v6_init_req;
1276
1277         subflow_v6_specific = ipv6_specific;
1278         subflow_v6_specific.conn_request = subflow_v6_conn_request;
1279         subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock;
1280         subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect;
1281         subflow_v6_specific.rebuild_header = subflow_rebuild_header;
1282
1283         subflow_v6m_specific = subflow_v6_specific;
1284         subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit;
1285         subflow_v6m_specific.send_check = ipv4_specific.send_check;
1286         subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len;
1287         subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced;
1288         subflow_v6m_specific.net_frag_header_len = 0;
1289 #endif
1290
1291         mptcp_diag_subflow_init(&subflow_ulp_ops);
1292
1293         if (tcp_register_ulp(&subflow_ulp_ops) != 0)
1294                 panic("MPTCP: failed to register subflows to ULP\n");
1295 }