1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Linux INET6 implementation
7 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp.c
11 * linux/net/ipv4/tcp_input.c
12 * linux/net/ipv4/tcp_output.c
15 * Hideaki YOSHIFUJI : sin6_scope_id support
16 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
17 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
18 * a single port at the same time.
19 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
22 #include <linux/bottom_half.h>
23 #include <linux/module.h>
24 #include <linux/errno.h>
25 #include <linux/types.h>
26 #include <linux/socket.h>
27 #include <linux/sockios.h>
28 #include <linux/net.h>
29 #include <linux/jiffies.h>
31 #include <linux/in6.h>
32 #include <linux/netdevice.h>
33 #include <linux/init.h>
34 #include <linux/jhash.h>
35 #include <linux/ipsec.h>
36 #include <linux/times.h>
37 #include <linux/slab.h>
38 #include <linux/uaccess.h>
39 #include <linux/ipv6.h>
40 #include <linux/icmpv6.h>
41 #include <linux/random.h>
42 #include <linux/indirect_call_wrapper.h>
45 #include <net/ndisc.h>
46 #include <net/inet6_hashtables.h>
47 #include <net/inet6_connection_sock.h>
49 #include <net/transp_v6.h>
50 #include <net/addrconf.h>
51 #include <net/ip6_route.h>
52 #include <net/ip6_checksum.h>
53 #include <net/inet_ecn.h>
54 #include <net/protocol.h>
57 #include <net/dsfield.h>
58 #include <net/timewait_sock.h>
59 #include <net/inet_common.h>
60 #include <net/secure_seq.h>
61 #include <net/hotdata.h>
62 #include <net/busy_poll.h>
64 #include <linux/proc_fs.h>
65 #include <linux/seq_file.h>
67 #include <crypto/hash.h>
68 #include <linux/scatterlist.h>
70 #include <trace/events/tcp.h>
72 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
73 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
74 struct request_sock *req);
76 INDIRECT_CALLABLE_SCOPE int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78 static const struct inet_connection_sock_af_ops ipv6_mapped;
79 const struct inet_connection_sock_af_ops ipv6_specific;
80 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
81 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
85 /* Helper returning the inet6 address from a given tcp socket.
86 * It can be used in TCP stack instead of inet6_sk(sk).
87 * This avoids a dereference and allow compiler optimizations.
88 * It is a specialized version of inet6_sk_generic().
90 #define tcp_inet6_sk(sk) (&container_of_const(tcp_sk(sk), \
91 struct tcp6_sock, tcp)->inet6)
93 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
95 struct dst_entry *dst = skb_dst(skb);
97 if (dst && dst_hold_safe(dst)) {
98 const struct rt6_info *rt = (const struct rt6_info *)dst;
100 rcu_assign_pointer(sk->sk_rx_dst, dst);
101 sk->sk_rx_dst_ifindex = skb->skb_iif;
102 sk->sk_rx_dst_cookie = rt6_get_cookie(rt);
106 static u32 tcp_v6_init_seq(const struct sk_buff *skb)
108 return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
109 ipv6_hdr(skb)->saddr.s6_addr32,
111 tcp_hdr(skb)->source);
114 static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
116 return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
117 ipv6_hdr(skb)->saddr.s6_addr32);
120 static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
123 /* This check is replicated from tcp_v6_connect() and intended to
124 * prevent BPF program called below from accessing bytes that are out
125 * of the bound specified by user in addr_len.
127 if (addr_len < SIN6_LEN_RFC2133)
130 sock_owned_by_me(sk);
132 return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, &addr_len);
135 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
138 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
139 struct inet_connection_sock *icsk = inet_csk(sk);
140 struct in6_addr *saddr = NULL, *final_p, final;
141 struct inet_timewait_death_row *tcp_death_row;
142 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
143 struct inet_sock *inet = inet_sk(sk);
144 struct tcp_sock *tp = tcp_sk(sk);
145 struct net *net = sock_net(sk);
146 struct ipv6_txoptions *opt;
147 struct dst_entry *dst;
152 if (addr_len < SIN6_LEN_RFC2133)
155 if (usin->sin6_family != AF_INET6)
156 return -EAFNOSUPPORT;
158 memset(&fl6, 0, sizeof(fl6));
160 if (inet6_test_bit(SNDFLOW, sk)) {
161 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
162 IP6_ECN_flow_init(fl6.flowlabel);
163 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
164 struct ip6_flowlabel *flowlabel;
165 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
166 if (IS_ERR(flowlabel))
168 fl6_sock_release(flowlabel);
173 * connect() to INADDR_ANY means loopback (BSD'ism).
176 if (ipv6_addr_any(&usin->sin6_addr)) {
177 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
178 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
181 usin->sin6_addr = in6addr_loopback;
184 addr_type = ipv6_addr_type(&usin->sin6_addr);
186 if (addr_type & IPV6_ADDR_MULTICAST)
189 if (addr_type&IPV6_ADDR_LINKLOCAL) {
190 if (addr_len >= sizeof(struct sockaddr_in6) &&
191 usin->sin6_scope_id) {
192 /* If interface is set while binding, indices
195 if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
198 sk->sk_bound_dev_if = usin->sin6_scope_id;
201 /* Connect to link-local address requires an interface */
202 if (!sk->sk_bound_dev_if)
206 if (tp->rx_opt.ts_recent_stamp &&
207 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
208 tp->rx_opt.ts_recent = 0;
209 tp->rx_opt.ts_recent_stamp = 0;
210 WRITE_ONCE(tp->write_seq, 0);
213 sk->sk_v6_daddr = usin->sin6_addr;
214 np->flow_label = fl6.flowlabel;
220 if (addr_type & IPV6_ADDR_MAPPED) {
221 u32 exthdrlen = icsk->icsk_ext_hdr_len;
222 struct sockaddr_in sin;
224 if (ipv6_only_sock(sk))
227 sin.sin_family = AF_INET;
228 sin.sin_port = usin->sin6_port;
229 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
231 /* Paired with READ_ONCE() in tcp_(get|set)sockopt() */
232 WRITE_ONCE(icsk->icsk_af_ops, &ipv6_mapped);
234 mptcpv6_handle_mapped(sk, true);
235 sk->sk_backlog_rcv = tcp_v4_do_rcv;
236 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
237 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
240 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
243 icsk->icsk_ext_hdr_len = exthdrlen;
244 /* Paired with READ_ONCE() in tcp_(get|set)sockopt() */
245 WRITE_ONCE(icsk->icsk_af_ops, &ipv6_specific);
247 mptcpv6_handle_mapped(sk, false);
248 sk->sk_backlog_rcv = tcp_v6_do_rcv;
249 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
250 tp->af_specific = &tcp_sock_ipv6_specific;
254 np->saddr = sk->sk_v6_rcv_saddr;
259 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
260 saddr = &sk->sk_v6_rcv_saddr;
262 fl6.flowi6_proto = IPPROTO_TCP;
263 fl6.daddr = sk->sk_v6_daddr;
264 fl6.saddr = saddr ? *saddr : np->saddr;
265 fl6.flowlabel = ip6_make_flowinfo(np->tclass, np->flow_label);
266 fl6.flowi6_oif = sk->sk_bound_dev_if;
267 fl6.flowi6_mark = sk->sk_mark;
268 fl6.fl6_dport = usin->sin6_port;
269 fl6.fl6_sport = inet->inet_sport;
270 fl6.flowi6_uid = sk->sk_uid;
272 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
273 final_p = fl6_update_dst(&fl6, opt, &final);
275 security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
277 dst = ip6_dst_lookup_flow(net, sk, &fl6, final_p);
283 tp->tcp_usec_ts = dst_tcp_usec_ts(dst);
284 tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
289 err = inet_bhash2_update_saddr(sk, saddr, AF_INET6);
294 /* set the source address */
296 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
298 sk->sk_gso_type = SKB_GSO_TCPV6;
299 ip6_dst_store(sk, dst, NULL, NULL);
301 icsk->icsk_ext_hdr_len = 0;
303 icsk->icsk_ext_hdr_len = opt->opt_flen +
306 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
308 inet->inet_dport = usin->sin6_port;
310 tcp_set_state(sk, TCP_SYN_SENT);
311 err = inet6_hash_connect(tcp_death_row, sk);
317 if (likely(!tp->repair)) {
319 WRITE_ONCE(tp->write_seq,
320 secure_tcpv6_seq(np->saddr.s6_addr32,
321 sk->sk_v6_daddr.s6_addr32,
324 tp->tsoffset = secure_tcpv6_ts_off(net, np->saddr.s6_addr32,
325 sk->sk_v6_daddr.s6_addr32);
328 if (tcp_fastopen_defer_connect(sk, &err))
333 err = tcp_connect(sk);
340 tcp_set_state(sk, TCP_CLOSE);
341 inet_bhash2_reset_saddr(sk);
343 inet->inet_dport = 0;
344 sk->sk_route_caps = 0;
348 static void tcp_v6_mtu_reduced(struct sock *sk)
350 struct dst_entry *dst;
353 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
356 mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
358 /* Drop requests trying to increase our current mss.
359 * Check done in __ip6_rt_update_pmtu() is too late.
361 if (tcp_mtu_to_mss(sk, mtu) >= tcp_sk(sk)->mss_cache)
364 dst = inet6_csk_update_pmtu(sk, mtu);
368 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
369 tcp_sync_mss(sk, dst_mtu(dst));
370 tcp_simple_retransmit(sk);
374 static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
375 u8 type, u8 code, int offset, __be32 info)
377 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
378 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
379 struct net *net = dev_net(skb->dev);
380 struct request_sock *fastopen;
381 struct ipv6_pinfo *np;
388 sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
389 &hdr->daddr, th->dest,
390 &hdr->saddr, ntohs(th->source),
391 skb->dev->ifindex, inet6_sdif(skb));
394 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
399 if (sk->sk_state == TCP_TIME_WAIT) {
400 /* To increase the counter of ignored icmps for TCP-AO */
401 tcp_ao_ignore_icmp(sk, AF_INET6, type, code);
402 inet_twsk_put(inet_twsk(sk));
405 seq = ntohl(th->seq);
406 fatal = icmpv6_err_convert(type, code, &err);
407 if (sk->sk_state == TCP_NEW_SYN_RECV) {
408 tcp_req_err(sk, seq, fatal);
412 if (tcp_ao_ignore_icmp(sk, AF_INET6, type, code)) {
418 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
419 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
421 if (sk->sk_state == TCP_CLOSE)
424 if (static_branch_unlikely(&ip6_min_hopcount)) {
425 /* min_hopcount can be changed concurrently from do_ipv6_setsockopt() */
426 if (ipv6_hdr(skb)->hop_limit < READ_ONCE(tcp_inet6_sk(sk)->min_hopcount)) {
427 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
433 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
434 fastopen = rcu_dereference(tp->fastopen_rsk);
435 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
436 if (sk->sk_state != TCP_LISTEN &&
437 !between(seq, snd_una, tp->snd_nxt)) {
438 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
442 np = tcp_inet6_sk(sk);
444 if (type == NDISC_REDIRECT) {
445 if (!sock_owned_by_user(sk)) {
446 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
449 dst->ops->redirect(dst, sk, skb);
454 if (type == ICMPV6_PKT_TOOBIG) {
455 u32 mtu = ntohl(info);
457 /* We are not interested in TCP_LISTEN and open_requests
458 * (SYN-ACKs send out by Linux are always <576bytes so
459 * they should go through unfragmented).
461 if (sk->sk_state == TCP_LISTEN)
464 if (!ip6_sk_accept_pmtu(sk))
467 if (mtu < IPV6_MIN_MTU)
470 WRITE_ONCE(tp->mtu_info, mtu);
472 if (!sock_owned_by_user(sk))
473 tcp_v6_mtu_reduced(sk);
474 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
481 /* Might be for an request_sock */
482 switch (sk->sk_state) {
485 /* Only in fast or simultaneous open. If a fast open socket is
486 * already accepted it is treated as a connected one below.
488 if (fastopen && !fastopen->sk)
491 ipv6_icmp_error(sk, skb, err, th->dest, ntohl(info), (u8 *)th);
493 if (!sock_owned_by_user(sk)) {
494 WRITE_ONCE(sk->sk_err, err);
495 sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
499 WRITE_ONCE(sk->sk_err_soft, err);
505 /* check if this ICMP message allows revert of backoff.
508 if (!fastopen && type == ICMPV6_DEST_UNREACH &&
509 code == ICMPV6_NOROUTE)
510 tcp_ld_RTO_revert(sk, seq);
513 if (!sock_owned_by_user(sk) && inet6_test_bit(RECVERR6, sk)) {
514 WRITE_ONCE(sk->sk_err, err);
517 WRITE_ONCE(sk->sk_err_soft, err);
526 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
528 struct request_sock *req,
529 struct tcp_fastopen_cookie *foc,
530 enum tcp_synack_type synack_type,
531 struct sk_buff *syn_skb)
533 struct inet_request_sock *ireq = inet_rsk(req);
534 const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
535 struct ipv6_txoptions *opt;
536 struct flowi6 *fl6 = &fl->u.ip6;
541 /* First, grab a route. */
542 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
543 IPPROTO_TCP)) == NULL)
546 skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb);
549 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
550 &ireq->ir_v6_rmt_addr);
552 fl6->daddr = ireq->ir_v6_rmt_addr;
553 if (inet6_test_bit(REPFLOW, sk) && ireq->pktopts)
554 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
556 tclass = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ?
557 (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
558 (np->tclass & INET_ECN_MASK) :
561 if (!INET_ECN_is_capable(tclass) &&
562 tcp_bpf_ca_needs_ecn((struct sock *)req))
563 tclass |= INET_ECN_ECT_0;
566 opt = ireq->ipv6_opt;
568 opt = rcu_dereference(np->opt);
569 err = ip6_xmit(sk, skb, fl6, skb->mark ? : READ_ONCE(sk->sk_mark),
570 opt, tclass, READ_ONCE(sk->sk_priority));
572 err = net_xmit_eval(err);
580 static void tcp_v6_reqsk_destructor(struct request_sock *req)
582 kfree(inet_rsk(req)->ipv6_opt);
583 consume_skb(inet_rsk(req)->pktopts);
586 #ifdef CONFIG_TCP_MD5SIG
587 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
588 const struct in6_addr *addr,
591 return tcp_md5_do_lookup(sk, l3index,
592 (union tcp_md5_addr *)addr, AF_INET6);
595 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
596 const struct sock *addr_sk)
600 l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
601 addr_sk->sk_bound_dev_if);
602 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr,
606 static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
607 sockptr_t optval, int optlen)
609 struct tcp_md5sig cmd;
610 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
611 union tcp_ao_addr *addr;
617 if (optlen < sizeof(cmd))
620 if (copy_from_sockptr(&cmd, optval, sizeof(cmd)))
623 if (sin6->sin6_family != AF_INET6)
626 flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
627 l3flag = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
629 if (optname == TCP_MD5SIG_EXT &&
630 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
631 prefixlen = cmd.tcpm_prefixlen;
632 if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
636 prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
639 if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex &&
640 cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
641 struct net_device *dev;
644 dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex);
645 if (dev && netif_is_l3_master(dev))
646 l3index = dev->ifindex;
649 /* ok to reference set/not set outside of rcu;
650 * right now device MUST be an L3 master
652 if (!dev || !l3index)
656 if (!cmd.tcpm_keylen) {
657 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
658 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
661 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
662 AF_INET6, prefixlen, l3index, flags);
665 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
668 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
669 addr = (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3];
671 /* Don't allow keys for peers that have a matching TCP-AO key.
672 * See the comment in tcp_ao_add_cmd()
674 if (tcp_ao_required(sk, addr, AF_INET,
675 l3flag ? l3index : -1, false))
676 return -EKEYREJECTED;
677 return tcp_md5_do_add(sk, addr,
678 AF_INET, prefixlen, l3index, flags,
679 cmd.tcpm_key, cmd.tcpm_keylen);
682 addr = (union tcp_md5_addr *)&sin6->sin6_addr;
684 /* Don't allow keys for peers that have a matching TCP-AO key.
685 * See the comment in tcp_ao_add_cmd()
687 if (tcp_ao_required(sk, addr, AF_INET6, l3flag ? l3index : -1, false))
688 return -EKEYREJECTED;
690 return tcp_md5_do_add(sk, addr, AF_INET6, prefixlen, l3index, flags,
691 cmd.tcpm_key, cmd.tcpm_keylen);
694 static int tcp_v6_md5_hash_headers(struct tcp_sigpool *hp,
695 const struct in6_addr *daddr,
696 const struct in6_addr *saddr,
697 const struct tcphdr *th, int nbytes)
699 struct tcp6_pseudohdr *bp;
700 struct scatterlist sg;
704 /* 1. TCP pseudo-header (RFC2460) */
707 bp->protocol = cpu_to_be32(IPPROTO_TCP);
708 bp->len = cpu_to_be32(nbytes);
710 _th = (struct tcphdr *)(bp + 1);
711 memcpy(_th, th, sizeof(*th));
714 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
715 ahash_request_set_crypt(hp->req, &sg, NULL,
716 sizeof(*bp) + sizeof(*th));
717 return crypto_ahash_update(hp->req);
720 static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
721 const struct in6_addr *daddr, struct in6_addr *saddr,
722 const struct tcphdr *th)
724 struct tcp_sigpool hp;
726 if (tcp_sigpool_start(tcp_md5_sigpool_id, &hp))
727 goto clear_hash_nostart;
729 if (crypto_ahash_init(hp.req))
731 if (tcp_v6_md5_hash_headers(&hp, daddr, saddr, th, th->doff << 2))
733 if (tcp_md5_hash_key(&hp, key))
735 ahash_request_set_crypt(hp.req, NULL, md5_hash, 0);
736 if (crypto_ahash_final(hp.req))
739 tcp_sigpool_end(&hp);
743 tcp_sigpool_end(&hp);
745 memset(md5_hash, 0, 16);
749 static int tcp_v6_md5_hash_skb(char *md5_hash,
750 const struct tcp_md5sig_key *key,
751 const struct sock *sk,
752 const struct sk_buff *skb)
754 const struct tcphdr *th = tcp_hdr(skb);
755 const struct in6_addr *saddr, *daddr;
756 struct tcp_sigpool hp;
758 if (sk) { /* valid for establish/request sockets */
759 saddr = &sk->sk_v6_rcv_saddr;
760 daddr = &sk->sk_v6_daddr;
762 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
763 saddr = &ip6h->saddr;
764 daddr = &ip6h->daddr;
767 if (tcp_sigpool_start(tcp_md5_sigpool_id, &hp))
768 goto clear_hash_nostart;
770 if (crypto_ahash_init(hp.req))
773 if (tcp_v6_md5_hash_headers(&hp, daddr, saddr, th, skb->len))
775 if (tcp_sigpool_hash_skb_data(&hp, skb, th->doff << 2))
777 if (tcp_md5_hash_key(&hp, key))
779 ahash_request_set_crypt(hp.req, NULL, md5_hash, 0);
780 if (crypto_ahash_final(hp.req))
783 tcp_sigpool_end(&hp);
787 tcp_sigpool_end(&hp);
789 memset(md5_hash, 0, 16);
794 static void tcp_v6_init_req(struct request_sock *req,
795 const struct sock *sk_listener,
798 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
799 struct inet_request_sock *ireq = inet_rsk(req);
800 const struct ipv6_pinfo *np = tcp_inet6_sk(sk_listener);
802 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
803 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
805 /* So that link locals have meaning */
806 if ((!sk_listener->sk_bound_dev_if || l3_slave) &&
807 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
808 ireq->ir_iif = tcp_v6_iif(skb);
810 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
811 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
812 np->rxopt.bits.rxinfo ||
813 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
814 np->rxopt.bits.rxohlim || inet6_test_bit(REPFLOW, sk_listener))) {
815 refcount_inc(&skb->users);
820 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
823 struct request_sock *req)
825 tcp_v6_init_req(req, sk, skb);
827 if (security_inet_conn_request(sk, skb, req))
830 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
833 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
835 .obj_size = sizeof(struct tcp6_request_sock),
836 .rtx_syn_ack = tcp_rtx_synack,
837 .send_ack = tcp_v6_reqsk_send_ack,
838 .destructor = tcp_v6_reqsk_destructor,
839 .send_reset = tcp_v6_send_reset,
840 .syn_ack_timeout = tcp_syn_ack_timeout,
843 const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
844 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
845 sizeof(struct ipv6hdr),
846 #ifdef CONFIG_TCP_MD5SIG
847 .req_md5_lookup = tcp_v6_md5_lookup,
848 .calc_md5_hash = tcp_v6_md5_hash_skb,
851 .ao_lookup = tcp_v6_ao_lookup_rsk,
852 .ao_calc_key = tcp_v6_ao_calc_key_rsk,
853 .ao_synack_hash = tcp_v6_ao_synack_hash,
855 #ifdef CONFIG_SYN_COOKIES
856 .cookie_init_seq = cookie_v6_init_sequence,
858 .route_req = tcp_v6_route_req,
859 .init_seq = tcp_v6_init_seq,
860 .init_ts_off = tcp_v6_init_ts_off,
861 .send_synack = tcp_v6_send_synack,
864 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
865 u32 ack, u32 win, u32 tsval, u32 tsecr,
866 int oif, int rst, u8 tclass, __be32 label,
867 u32 priority, u32 txhash, struct tcp_key *key)
869 const struct tcphdr *th = tcp_hdr(skb);
871 struct sk_buff *buff;
873 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
874 struct sock *ctl_sk = net->ipv6.tcp_sk;
875 unsigned int tot_len = sizeof(struct tcphdr);
876 __be32 mrst = 0, *topt;
877 struct dst_entry *dst;
881 tot_len += TCPOLEN_TSTAMP_ALIGNED;
882 if (tcp_key_is_md5(key))
883 tot_len += TCPOLEN_MD5SIG_ALIGNED;
884 if (tcp_key_is_ao(key))
885 tot_len += tcp_ao_len_aligned(key->ao_key);
888 if (rst && !tcp_key_is_md5(key)) {
889 mrst = mptcp_reset_option(skb);
892 tot_len += sizeof(__be32);
896 buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
900 skb_reserve(buff, MAX_TCP_HEADER);
902 t1 = skb_push(buff, tot_len);
903 skb_reset_transport_header(buff);
905 /* Swap the send and the receive. */
906 memset(t1, 0, sizeof(*t1));
907 t1->dest = th->source;
908 t1->source = th->dest;
909 t1->doff = tot_len / 4;
910 t1->seq = htonl(seq);
911 t1->ack_seq = htonl(ack);
912 t1->ack = !rst || !th->ack;
914 t1->window = htons(win);
916 topt = (__be32 *)(t1 + 1);
919 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
920 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
921 *topt++ = htonl(tsval);
922 *topt++ = htonl(tsecr);
928 #ifdef CONFIG_TCP_MD5SIG
929 if (tcp_key_is_md5(key)) {
930 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
931 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
932 tcp_v6_md5_hash_hdr((__u8 *)topt, key->md5_key,
933 &ipv6_hdr(skb)->saddr,
934 &ipv6_hdr(skb)->daddr, t1);
938 if (tcp_key_is_ao(key)) {
939 *topt++ = htonl((TCPOPT_AO << 24) |
940 (tcp_ao_len(key->ao_key) << 16) |
941 (key->ao_key->sndid << 8) |
944 tcp_ao_hash_hdr(AF_INET6, (char *)topt, key->ao_key,
946 (union tcp_ao_addr *)&ipv6_hdr(skb)->saddr,
947 (union tcp_ao_addr *)&ipv6_hdr(skb)->daddr,
952 memset(&fl6, 0, sizeof(fl6));
953 fl6.daddr = ipv6_hdr(skb)->saddr;
954 fl6.saddr = ipv6_hdr(skb)->daddr;
955 fl6.flowlabel = label;
957 buff->ip_summed = CHECKSUM_PARTIAL;
959 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
961 fl6.flowi6_proto = IPPROTO_TCP;
962 if (rt6_need_strict(&fl6.daddr) && !oif)
963 fl6.flowi6_oif = tcp_v6_iif(skb);
965 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
968 fl6.flowi6_oif = oif;
972 if (sk->sk_state == TCP_TIME_WAIT)
973 mark = inet_twsk(sk)->tw_mark;
975 mark = READ_ONCE(sk->sk_mark);
976 skb_set_delivery_time(buff, tcp_transmit_time(sk), true);
979 /* autoflowlabel/skb_get_hash_flowi6 rely on buff->hash */
980 skb_set_hash(buff, txhash, PKT_HASH_TYPE_L4);
982 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark;
983 fl6.fl6_dport = t1->dest;
984 fl6.fl6_sport = t1->source;
985 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
986 security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6));
988 /* Pass a socket to ip6_dst_lookup either it is for RST
989 * Underlying function will use this to retrieve the network
992 if (sk && sk->sk_state != TCP_TIME_WAIT)
993 dst = ip6_dst_lookup_flow(net, sk, &fl6, NULL); /*sk's xfrm_policy can be referred*/
995 dst = ip6_dst_lookup_flow(net, ctl_sk, &fl6, NULL);
997 skb_dst_set(buff, dst);
998 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL,
999 tclass & ~INET_ECN_MASK, priority);
1000 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
1002 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
1009 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
1011 const struct tcphdr *th = tcp_hdr(skb);
1012 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
1013 const __u8 *md5_hash_location = NULL;
1014 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
1015 bool allocated_traffic_key = false;
1017 const struct tcp_ao_hdr *aoh;
1018 struct tcp_key key = {};
1019 u32 seq = 0, ack_seq = 0;
1025 #ifdef CONFIG_TCP_MD5SIG
1026 unsigned char newhash[16];
1028 struct sock *sk1 = NULL;
1034 /* If sk not NULL, it means we did a successful lookup and incoming
1035 * route had to be correct. prequeue might have dropped our dst.
1037 if (!sk && !ipv6_unicast_destination(skb))
1040 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
1041 /* Invalid TCP option size or twice included auth */
1042 if (tcp_parse_auth_options(th, &md5_hash_location, &aoh))
1044 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
1047 #ifdef CONFIG_TCP_MD5SIG
1048 if (sk && sk_fullsock(sk)) {
1051 /* sdif set, means packet ingressed via a device
1052 * in an L3 domain and inet_iif is set to it.
1054 l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1055 key.md5_key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr, l3index);
1057 key.type = TCP_KEY_MD5;
1058 } else if (md5_hash_location) {
1059 int dif = tcp_v6_iif_l3_slave(skb);
1060 int sdif = tcp_v6_sdif(skb);
1064 * active side is lost. Try to find listening socket through
1065 * source port, and then find md5 key through listening socket.
1066 * we are not loose security here:
1067 * Incoming packet is checked with md5 hash with finding key,
1068 * no RST generated if md5 hash doesn't match.
1070 sk1 = inet6_lookup_listener(net, net->ipv4.tcp_death_row.hashinfo,
1071 NULL, 0, &ipv6h->saddr, th->source,
1072 &ipv6h->daddr, ntohs(th->source),
1077 /* sdif set, means packet ingressed via a device
1078 * in an L3 domain and dif is set to it.
1080 l3index = tcp_v6_sdif(skb) ? dif : 0;
1082 key.md5_key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr, l3index);
1085 key.type = TCP_KEY_MD5;
1087 genhash = tcp_v6_md5_hash_skb(newhash, key.md5_key, NULL, skb);
1088 if (genhash || memcmp(md5_hash_location, newhash, 16) != 0)
1094 seq = ntohl(th->ack_seq);
1096 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1099 #ifdef CONFIG_TCP_AO
1103 l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1104 if (tcp_ao_prepare_reset(sk, skb, aoh, l3index, seq,
1105 &key.ao_key, &key.traffic_key,
1106 &allocated_traffic_key,
1107 &key.rcv_next, &key.sne))
1109 key.type = TCP_KEY_AO;
1114 oif = sk->sk_bound_dev_if;
1115 if (sk_fullsock(sk)) {
1116 trace_tcp_send_reset(sk, skb);
1117 if (inet6_test_bit(REPFLOW, sk))
1118 label = ip6_flowlabel(ipv6h);
1119 priority = READ_ONCE(sk->sk_priority);
1120 txhash = sk->sk_txhash;
1122 if (sk->sk_state == TCP_TIME_WAIT) {
1123 label = cpu_to_be32(inet_twsk(sk)->tw_flowlabel);
1124 priority = inet_twsk(sk)->tw_priority;
1125 txhash = inet_twsk(sk)->tw_txhash;
1128 if (net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_TCP_RESET)
1129 label = ip6_flowlabel(ipv6h);
1132 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, 1,
1133 ipv6_get_dsfield(ipv6h), label, priority, txhash,
1136 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
1138 if (allocated_traffic_key)
1139 kfree(key.traffic_key);
1144 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
1145 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
1146 struct tcp_key *key, u8 tclass,
1147 __be32 label, u32 priority, u32 txhash)
1149 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, 0,
1150 tclass, label, priority, txhash, key);
1153 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1155 struct inet_timewait_sock *tw = inet_twsk(sk);
1156 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1157 struct tcp_key key = {};
1158 #ifdef CONFIG_TCP_AO
1159 struct tcp_ao_info *ao_info;
1161 if (static_branch_unlikely(&tcp_ao_needed.key)) {
1163 /* FIXME: the segment to-be-acked is not verified yet */
1164 ao_info = rcu_dereference(tcptw->ao_info);
1166 const struct tcp_ao_hdr *aoh;
1168 /* Invalid TCP option size or twice included auth */
1169 if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh))
1172 key.ao_key = tcp_ao_established_key(ao_info,
1173 aoh->rnext_keyid, -1);
1177 struct tcp_ao_key *rnext_key;
1179 key.traffic_key = snd_other_key(key.ao_key);
1180 /* rcv_next switches to our rcv_next */
1181 rnext_key = READ_ONCE(ao_info->rnext_key);
1182 key.rcv_next = rnext_key->rcvid;
1183 key.sne = READ_ONCE(ao_info->snd_sne);
1184 key.type = TCP_KEY_AO;
1188 #ifdef CONFIG_TCP_MD5SIG
1189 } else if (static_branch_unlikely(&tcp_md5_needed.key)) {
1190 key.md5_key = tcp_twsk_md5_key(tcptw);
1192 key.type = TCP_KEY_MD5;
1196 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1197 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1198 tcp_tw_tsval(tcptw),
1199 tcptw->tw_ts_recent, tw->tw_bound_dev_if, &key,
1200 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel), tw->tw_priority,
1203 #ifdef CONFIG_TCP_AO
1209 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
1210 struct request_sock *req)
1212 struct tcp_key key = {};
1214 #ifdef CONFIG_TCP_AO
1215 if (static_branch_unlikely(&tcp_ao_needed.key) &&
1216 tcp_rsk_used_ao(req)) {
1217 const struct in6_addr *addr = &ipv6_hdr(skb)->saddr;
1218 const struct tcp_ao_hdr *aoh;
1221 l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1222 /* Invalid TCP option size or twice included auth */
1223 if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh))
1227 key.ao_key = tcp_ao_do_lookup(sk, l3index,
1228 (union tcp_ao_addr *)addr,
1229 AF_INET6, aoh->rnext_keyid, -1);
1230 if (unlikely(!key.ao_key)) {
1231 /* Send ACK with any matching MKT for the peer */
1232 key.ao_key = tcp_ao_do_lookup(sk, l3index,
1233 (union tcp_ao_addr *)addr,
1235 /* Matching key disappeared (user removed the key?)
1236 * let the handshake timeout.
1239 net_info_ratelimited("TCP-AO key for (%pI6, %d)->(%pI6, %d) suddenly disappeared, won't ACK new connection\n",
1241 ntohs(tcp_hdr(skb)->source),
1242 &ipv6_hdr(skb)->daddr,
1243 ntohs(tcp_hdr(skb)->dest));
1247 key.traffic_key = kmalloc(tcp_ao_digest_size(key.ao_key), GFP_ATOMIC);
1248 if (!key.traffic_key)
1251 key.type = TCP_KEY_AO;
1252 key.rcv_next = aoh->keyid;
1253 tcp_v6_ao_calc_key_rsk(key.ao_key, key.traffic_key, req);
1257 #ifdef CONFIG_TCP_MD5SIG
1258 } else if (static_branch_unlikely(&tcp_md5_needed.key)) {
1259 int l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1261 key.md5_key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr,
1264 key.type = TCP_KEY_MD5;
1268 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
1269 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
1272 * The window field (SEG.WND) of every outgoing segment, with the
1273 * exception of <SYN> segments, MUST be right-shifted by
1274 * Rcv.Wind.Shift bits:
1276 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
1277 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
1278 tcp_rsk(req)->rcv_nxt,
1279 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
1280 tcp_rsk_tsval(tcp_rsk(req)),
1281 READ_ONCE(req->ts_recent), sk->sk_bound_dev_if,
1282 &key, ipv6_get_dsfield(ipv6_hdr(skb)), 0,
1283 READ_ONCE(sk->sk_priority),
1284 READ_ONCE(tcp_rsk(req)->txhash));
1285 if (tcp_key_is_ao(&key))
1286 kfree(key.traffic_key);
1290 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1292 #ifdef CONFIG_SYN_COOKIES
1293 const struct tcphdr *th = tcp_hdr(skb);
1296 sk = cookie_v6_check(sk, skb);
1301 u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
1302 struct tcphdr *th, u32 *cookie)
1305 #ifdef CONFIG_SYN_COOKIES
1306 mss = tcp_get_syncookie_mss(&tcp6_request_sock_ops,
1307 &tcp_request_sock_ipv6_ops, sk, th);
1309 *cookie = __cookie_v6_init_sequence(iph, th, &mss);
1310 tcp_synq_overflow(sk);
1316 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1318 if (skb->protocol == htons(ETH_P_IP))
1319 return tcp_v4_conn_request(sk, skb);
1321 if (!ipv6_unicast_destination(skb))
1324 if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
1325 __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
1329 return tcp_conn_request(&tcp6_request_sock_ops,
1330 &tcp_request_sock_ipv6_ops, sk, skb);
1334 return 0; /* don't send reset */
1337 static void tcp_v6_restore_cb(struct sk_buff *skb)
1339 /* We need to move header back to the beginning if xfrm6_policy_check()
1340 * and tcp_v6_fill_cb() are going to be called again.
1341 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1343 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1344 sizeof(struct inet6_skb_parm));
1347 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1348 struct request_sock *req,
1349 struct dst_entry *dst,
1350 struct request_sock *req_unhash,
1353 struct inet_request_sock *ireq;
1354 struct ipv6_pinfo *newnp;
1355 const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1356 struct ipv6_txoptions *opt;
1357 struct inet_sock *newinet;
1358 bool found_dup_sk = false;
1359 struct tcp_sock *newtp;
1361 #ifdef CONFIG_TCP_MD5SIG
1362 struct tcp_md5sig_key *key;
1367 if (skb->protocol == htons(ETH_P_IP)) {
1372 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1373 req_unhash, own_req);
1378 inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
1380 newnp = tcp_inet6_sk(newsk);
1381 newtp = tcp_sk(newsk);
1383 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1385 newnp->saddr = newsk->sk_v6_rcv_saddr;
1387 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1388 if (sk_is_mptcp(newsk))
1389 mptcpv6_handle_mapped(newsk, true);
1390 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1391 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
1392 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1395 newnp->ipv6_mc_list = NULL;
1396 newnp->ipv6_ac_list = NULL;
1397 newnp->ipv6_fl_list = NULL;
1398 newnp->pktoptions = NULL;
1400 newnp->mcast_oif = inet_iif(skb);
1401 newnp->mcast_hops = ip_hdr(skb)->ttl;
1402 newnp->rcv_flowinfo = 0;
1403 if (inet6_test_bit(REPFLOW, sk))
1404 newnp->flow_label = 0;
1407 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1408 * here, tcp_create_openreq_child now does this for us, see the comment in
1409 * that function for the gory details. -acme
1412 /* It is tricky place. Until this moment IPv4 tcp
1413 worked with IPv6 icsk.icsk_af_ops.
1416 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1421 ireq = inet_rsk(req);
1423 if (sk_acceptq_is_full(sk))
1427 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1432 newsk = tcp_create_openreq_child(sk, req, skb);
1437 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1438 * count here, tcp_create_openreq_child now does this for us, see the
1439 * comment in that function for the gory details. -acme
1442 newsk->sk_gso_type = SKB_GSO_TCPV6;
1443 ip6_dst_store(newsk, dst, NULL, NULL);
1444 inet6_sk_rx_dst_set(newsk, skb);
1446 inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
1448 newtp = tcp_sk(newsk);
1449 newinet = inet_sk(newsk);
1450 newnp = tcp_inet6_sk(newsk);
1452 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1454 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1455 newnp->saddr = ireq->ir_v6_loc_addr;
1456 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1457 newsk->sk_bound_dev_if = ireq->ir_iif;
1459 /* Now IPv6 options...
1461 First: no IPv4 options.
1463 newinet->inet_opt = NULL;
1464 newnp->ipv6_mc_list = NULL;
1465 newnp->ipv6_ac_list = NULL;
1466 newnp->ipv6_fl_list = NULL;
1469 newnp->rxopt.all = np->rxopt.all;
1471 newnp->pktoptions = NULL;
1473 newnp->mcast_oif = tcp_v6_iif(skb);
1474 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1475 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1476 if (inet6_test_bit(REPFLOW, sk))
1477 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1479 /* Set ToS of the new socket based upon the value of incoming SYN.
1480 * ECT bits are set later in tcp_init_transfer().
1482 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos))
1483 newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
1485 /* Clone native IPv6 options from listening socket (if any)
1487 Yes, keeping reference count would be much more clever,
1488 but we make one more one thing there: reattach optmem
1491 opt = ireq->ipv6_opt;
1493 opt = rcu_dereference(np->opt);
1495 opt = ipv6_dup_options(newsk, opt);
1496 RCU_INIT_POINTER(newnp->opt, opt);
1498 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1500 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1503 tcp_ca_openreq_child(newsk, dst);
1505 tcp_sync_mss(newsk, dst_mtu(dst));
1506 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1508 tcp_initialize_rcv_mss(newsk);
1510 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1511 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1513 #ifdef CONFIG_TCP_MD5SIG
1514 l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
1516 if (!tcp_rsk_used_ao(req)) {
1517 /* Copy over the MD5 key from the original socket */
1518 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr, l3index);
1520 const union tcp_md5_addr *addr;
1522 addr = (union tcp_md5_addr *)&newsk->sk_v6_daddr;
1523 if (tcp_md5_key_copy(newsk, addr, AF_INET6, 128, l3index, key)) {
1524 inet_csk_prepare_forced_close(newsk);
1531 #ifdef CONFIG_TCP_AO
1532 /* Copy over tcp_ao_info if any */
1533 if (tcp_ao_copy_all_matching(sk, newsk, req, skb, AF_INET6))
1537 if (__inet_inherit_port(sk, newsk) < 0) {
1538 inet_csk_prepare_forced_close(newsk);
1542 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
1545 tcp_move_syn(newtp, req);
1547 /* Clone pktoptions received with SYN, if we own the req */
1548 if (ireq->pktopts) {
1549 newnp->pktoptions = skb_clone_and_charge_r(ireq->pktopts, newsk);
1550 consume_skb(ireq->pktopts);
1551 ireq->pktopts = NULL;
1552 if (newnp->pktoptions)
1553 tcp_v6_restore_cb(newnp->pktoptions);
1556 if (!req_unhash && found_dup_sk) {
1557 /* This code path should only be executed in the
1558 * syncookie case only
1560 bh_unlock_sock(newsk);
1569 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1577 INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
1579 /* The socket must have it's spinlock held when we get
1580 * here, unless it is a TCP_LISTEN socket.
1582 * We have a potential double-lock case here, so even when
1583 * doing backlog processing we use the BH locking scheme.
1584 * This is because we cannot sleep with the original spinlock
1587 INDIRECT_CALLABLE_SCOPE
1588 int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1590 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1591 struct sk_buff *opt_skb = NULL;
1592 enum skb_drop_reason reason;
1593 struct tcp_sock *tp;
1595 /* Imagine: socket is IPv6. IPv4 packet arrives,
1596 goes to IPv4 receive handler and backlogged.
1597 From backlog it always goes here. Kerboom...
1598 Fortunately, tcp_rcv_established and rcv_established
1599 handle them correctly, but it is not case with
1600 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1603 if (skb->protocol == htons(ETH_P_IP))
1604 return tcp_v4_do_rcv(sk, skb);
1607 * socket locking is here for SMP purposes as backlog rcv
1608 * is currently called with bh processing disabled.
1611 /* Do Stevens' IPV6_PKTOPTIONS.
1613 Yes, guys, it is the only place in our code, where we
1614 may make it not affecting IPv4.
1615 The rest of code is protocol independent,
1616 and I do not like idea to uglify IPv4.
1618 Actually, all the idea behind IPV6_PKTOPTIONS
1619 looks not very well thought. For now we latch
1620 options, received in the last packet, enqueued
1621 by tcp. Feel free to propose better solution.
1625 opt_skb = skb_clone_and_charge_r(skb, sk);
1627 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1628 struct dst_entry *dst;
1630 dst = rcu_dereference_protected(sk->sk_rx_dst,
1631 lockdep_sock_is_held(sk));
1633 sock_rps_save_rxhash(sk, skb);
1634 sk_mark_napi_id(sk, skb);
1636 if (sk->sk_rx_dst_ifindex != skb->skb_iif ||
1637 INDIRECT_CALL_1(dst->ops->check, ip6_dst_check,
1638 dst, sk->sk_rx_dst_cookie) == NULL) {
1639 RCU_INIT_POINTER(sk->sk_rx_dst, NULL);
1644 tcp_rcv_established(sk, skb);
1646 goto ipv6_pktoptions;
1650 if (tcp_checksum_complete(skb))
1653 if (sk->sk_state == TCP_LISTEN) {
1654 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1658 reason = tcp_child_process(sk, nsk, skb);
1663 __kfree_skb(opt_skb);
1667 sock_rps_save_rxhash(sk, skb);
1669 reason = tcp_rcv_state_process(sk, skb);
1673 goto ipv6_pktoptions;
1677 tcp_v6_send_reset(sk, skb);
1680 __kfree_skb(opt_skb);
1681 kfree_skb_reason(skb, reason);
1684 reason = SKB_DROP_REASON_TCP_CSUM;
1685 trace_tcp_bad_csum(skb);
1686 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1687 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1692 /* Do you ask, what is it?
1694 1. skb was enqueued by tcp.
1695 2. skb is added to tail of read queue, rather than out of order.
1696 3. socket is not in passive state.
1697 4. Finally, it really contains options, which user wants to receive.
1700 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1701 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1702 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1703 WRITE_ONCE(np->mcast_oif, tcp_v6_iif(opt_skb));
1704 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1705 WRITE_ONCE(np->mcast_hops,
1706 ipv6_hdr(opt_skb)->hop_limit);
1707 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1708 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1709 if (inet6_test_bit(REPFLOW, sk))
1710 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1711 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1712 tcp_v6_restore_cb(opt_skb);
1713 opt_skb = xchg(&np->pktoptions, opt_skb);
1715 __kfree_skb(opt_skb);
1716 opt_skb = xchg(&np->pktoptions, NULL);
1720 consume_skb(opt_skb);
1724 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1725 const struct tcphdr *th)
1727 /* This is tricky: we move IP6CB at its correct location into
1728 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1729 * _decode_session6() uses IP6CB().
1730 * barrier() makes sure compiler won't play aliasing games.
1732 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1733 sizeof(struct inet6_skb_parm));
1736 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1737 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1738 skb->len - th->doff*4);
1739 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1740 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1741 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1742 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1743 TCP_SKB_CB(skb)->sacked = 0;
1744 TCP_SKB_CB(skb)->has_rxtstamp =
1745 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1748 INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
1750 enum skb_drop_reason drop_reason;
1751 int sdif = inet6_sdif(skb);
1752 int dif = inet6_iif(skb);
1753 const struct tcphdr *th;
1754 const struct ipv6hdr *hdr;
1758 struct net *net = dev_net(skb->dev);
1760 drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
1761 if (skb->pkt_type != PACKET_HOST)
1765 * Count it even if it's bad.
1767 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1769 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1772 th = (const struct tcphdr *)skb->data;
1774 if (unlikely(th->doff < sizeof(struct tcphdr) / 4)) {
1775 drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL;
1778 if (!pskb_may_pull(skb, th->doff*4))
1781 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1784 th = (const struct tcphdr *)skb->data;
1785 hdr = ipv6_hdr(skb);
1788 sk = __inet6_lookup_skb(net->ipv4.tcp_death_row.hashinfo, skb, __tcp_hdrlen(th),
1789 th->source, th->dest, inet6_iif(skb), sdif,
1795 if (sk->sk_state == TCP_TIME_WAIT)
1798 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1799 struct request_sock *req = inet_reqsk(sk);
1800 bool req_stolen = false;
1803 sk = req->rsk_listener;
1804 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1805 drop_reason = SKB_DROP_REASON_XFRM_POLICY;
1807 drop_reason = tcp_inbound_hash(sk, req, skb,
1808 &hdr->saddr, &hdr->daddr,
1809 AF_INET6, dif, sdif);
1811 sk_drops_add(sk, skb);
1815 if (tcp_checksum_complete(skb)) {
1819 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1820 nsk = reuseport_migrate_sock(sk, req_to_sk(req), skb);
1822 inet_csk_reqsk_queue_drop_and_put(sk, req);
1826 /* reuseport_migrate_sock() has already held one sk_refcnt
1834 if (!tcp_filter(sk, skb)) {
1835 th = (const struct tcphdr *)skb->data;
1836 hdr = ipv6_hdr(skb);
1837 tcp_v6_fill_cb(skb, hdr, th);
1838 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1840 drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
1845 /* Another cpu got exclusive access to req
1846 * and created a full blown socket.
1847 * Try to feed this packet to this socket
1848 * instead of discarding it.
1850 tcp_v6_restore_cb(skb);
1854 goto discard_and_relse;
1859 tcp_v6_restore_cb(skb);
1861 drop_reason = tcp_child_process(sk, nsk, skb);
1863 tcp_v6_send_reset(nsk, skb);
1864 goto discard_and_relse;
1871 if (static_branch_unlikely(&ip6_min_hopcount)) {
1872 /* min_hopcount can be changed concurrently from do_ipv6_setsockopt() */
1873 if (unlikely(hdr->hop_limit < READ_ONCE(tcp_inet6_sk(sk)->min_hopcount))) {
1874 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1875 drop_reason = SKB_DROP_REASON_TCP_MINTTL;
1876 goto discard_and_relse;
1880 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
1881 drop_reason = SKB_DROP_REASON_XFRM_POLICY;
1882 goto discard_and_relse;
1885 drop_reason = tcp_inbound_hash(sk, NULL, skb, &hdr->saddr, &hdr->daddr,
1886 AF_INET6, dif, sdif);
1888 goto discard_and_relse;
1892 if (tcp_filter(sk, skb)) {
1893 drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
1894 goto discard_and_relse;
1896 th = (const struct tcphdr *)skb->data;
1897 hdr = ipv6_hdr(skb);
1898 tcp_v6_fill_cb(skb, hdr, th);
1902 if (sk->sk_state == TCP_LISTEN) {
1903 ret = tcp_v6_do_rcv(sk, skb);
1904 goto put_and_return;
1907 sk_incoming_cpu_update(sk);
1909 bh_lock_sock_nested(sk);
1910 tcp_segs_in(tcp_sk(sk), skb);
1912 if (!sock_owned_by_user(sk)) {
1913 ret = tcp_v6_do_rcv(sk, skb);
1915 if (tcp_add_backlog(sk, skb, &drop_reason))
1916 goto discard_and_relse;
1922 return ret ? -1 : 0;
1925 drop_reason = SKB_DROP_REASON_NO_SOCKET;
1926 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1929 tcp_v6_fill_cb(skb, hdr, th);
1931 if (tcp_checksum_complete(skb)) {
1933 drop_reason = SKB_DROP_REASON_TCP_CSUM;
1934 trace_tcp_bad_csum(skb);
1935 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1937 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1939 tcp_v6_send_reset(NULL, skb);
1943 SKB_DR_OR(drop_reason, NOT_SPECIFIED);
1944 kfree_skb_reason(skb, drop_reason);
1948 sk_drops_add(sk, skb);
1954 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1955 drop_reason = SKB_DROP_REASON_XFRM_POLICY;
1956 inet_twsk_put(inet_twsk(sk));
1960 tcp_v6_fill_cb(skb, hdr, th);
1962 if (tcp_checksum_complete(skb)) {
1963 inet_twsk_put(inet_twsk(sk));
1967 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1972 sk2 = inet6_lookup_listener(net, net->ipv4.tcp_death_row.hashinfo,
1973 skb, __tcp_hdrlen(th),
1974 &ipv6_hdr(skb)->saddr, th->source,
1975 &ipv6_hdr(skb)->daddr,
1977 tcp_v6_iif_l3_slave(skb),
1980 struct inet_timewait_sock *tw = inet_twsk(sk);
1981 inet_twsk_deschedule_put(tw);
1983 tcp_v6_restore_cb(skb);
1991 tcp_v6_timewait_ack(sk, skb);
1994 tcp_v6_send_reset(sk, skb);
1995 inet_twsk_deschedule_put(inet_twsk(sk));
1997 case TCP_TW_SUCCESS:
2003 void tcp_v6_early_demux(struct sk_buff *skb)
2005 struct net *net = dev_net(skb->dev);
2006 const struct ipv6hdr *hdr;
2007 const struct tcphdr *th;
2010 if (skb->pkt_type != PACKET_HOST)
2013 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
2016 hdr = ipv6_hdr(skb);
2019 if (th->doff < sizeof(struct tcphdr) / 4)
2022 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
2023 sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
2024 &hdr->saddr, th->source,
2025 &hdr->daddr, ntohs(th->dest),
2026 inet6_iif(skb), inet6_sdif(skb));
2029 skb->destructor = sock_edemux;
2030 if (sk_fullsock(sk)) {
2031 struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst);
2034 dst = dst_check(dst, sk->sk_rx_dst_cookie);
2036 sk->sk_rx_dst_ifindex == skb->skb_iif)
2037 skb_dst_set_noref(skb, dst);
2042 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
2043 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
2044 .twsk_unique = tcp_twsk_unique,
2045 .twsk_destructor = tcp_twsk_destructor,
2048 INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
2050 __tcp_v6_send_check(skb, &sk->sk_v6_rcv_saddr, &sk->sk_v6_daddr);
2053 const struct inet_connection_sock_af_ops ipv6_specific = {
2054 .queue_xmit = inet6_csk_xmit,
2055 .send_check = tcp_v6_send_check,
2056 .rebuild_header = inet6_sk_rebuild_header,
2057 .sk_rx_dst_set = inet6_sk_rx_dst_set,
2058 .conn_request = tcp_v6_conn_request,
2059 .syn_recv_sock = tcp_v6_syn_recv_sock,
2060 .net_header_len = sizeof(struct ipv6hdr),
2061 .setsockopt = ipv6_setsockopt,
2062 .getsockopt = ipv6_getsockopt,
2063 .addr2sockaddr = inet6_csk_addr2sockaddr,
2064 .sockaddr_len = sizeof(struct sockaddr_in6),
2065 .mtu_reduced = tcp_v6_mtu_reduced,
2068 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
2069 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
2070 #ifdef CONFIG_TCP_MD5SIG
2071 .md5_lookup = tcp_v6_md5_lookup,
2072 .calc_md5_hash = tcp_v6_md5_hash_skb,
2073 .md5_parse = tcp_v6_parse_md5_keys,
2075 #ifdef CONFIG_TCP_AO
2076 .ao_lookup = tcp_v6_ao_lookup,
2077 .calc_ao_hash = tcp_v6_ao_hash_skb,
2078 .ao_parse = tcp_v6_parse_ao,
2079 .ao_calc_key_sk = tcp_v6_ao_calc_key_sk,
2085 * TCP over IPv4 via INET6 API
2087 static const struct inet_connection_sock_af_ops ipv6_mapped = {
2088 .queue_xmit = ip_queue_xmit,
2089 .send_check = tcp_v4_send_check,
2090 .rebuild_header = inet_sk_rebuild_header,
2091 .sk_rx_dst_set = inet_sk_rx_dst_set,
2092 .conn_request = tcp_v6_conn_request,
2093 .syn_recv_sock = tcp_v6_syn_recv_sock,
2094 .net_header_len = sizeof(struct iphdr),
2095 .setsockopt = ipv6_setsockopt,
2096 .getsockopt = ipv6_getsockopt,
2097 .addr2sockaddr = inet6_csk_addr2sockaddr,
2098 .sockaddr_len = sizeof(struct sockaddr_in6),
2099 .mtu_reduced = tcp_v4_mtu_reduced,
2102 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
2103 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
2104 #ifdef CONFIG_TCP_MD5SIG
2105 .md5_lookup = tcp_v4_md5_lookup,
2106 .calc_md5_hash = tcp_v4_md5_hash_skb,
2107 .md5_parse = tcp_v6_parse_md5_keys,
2109 #ifdef CONFIG_TCP_AO
2110 .ao_lookup = tcp_v6_ao_lookup,
2111 .calc_ao_hash = tcp_v4_ao_hash_skb,
2112 .ao_parse = tcp_v6_parse_ao,
2113 .ao_calc_key_sk = tcp_v4_ao_calc_key_sk,
2118 /* NOTE: A lot of things set to zero explicitly by call to
2119 * sk_alloc() so need not be done here.
2121 static int tcp_v6_init_sock(struct sock *sk)
2123 struct inet_connection_sock *icsk = inet_csk(sk);
2127 icsk->icsk_af_ops = &ipv6_specific;
2129 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
2130 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
2136 #ifdef CONFIG_PROC_FS
2137 /* Proc filesystem TCPv6 sock list dumping. */
2138 static void get_openreq6(struct seq_file *seq,
2139 const struct request_sock *req, int i)
2141 long ttd = req->rsk_timer.expires - jiffies;
2142 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
2143 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
2149 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2150 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
2152 src->s6_addr32[0], src->s6_addr32[1],
2153 src->s6_addr32[2], src->s6_addr32[3],
2154 inet_rsk(req)->ir_num,
2155 dest->s6_addr32[0], dest->s6_addr32[1],
2156 dest->s6_addr32[2], dest->s6_addr32[3],
2157 ntohs(inet_rsk(req)->ir_rmt_port),
2159 0, 0, /* could print option size, but that is af dependent. */
2160 1, /* timers active (only the expire timer) */
2161 jiffies_to_clock_t(ttd),
2163 from_kuid_munged(seq_user_ns(seq),
2164 sock_i_uid(req->rsk_listener)),
2165 0, /* non standard timer */
2166 0, /* open_requests have no inode */
2170 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2172 const struct in6_addr *dest, *src;
2175 unsigned long timer_expires;
2176 const struct inet_sock *inet = inet_sk(sp);
2177 const struct tcp_sock *tp = tcp_sk(sp);
2178 const struct inet_connection_sock *icsk = inet_csk(sp);
2179 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2183 dest = &sp->sk_v6_daddr;
2184 src = &sp->sk_v6_rcv_saddr;
2185 destp = ntohs(inet->inet_dport);
2186 srcp = ntohs(inet->inet_sport);
2188 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2189 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2190 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2192 timer_expires = icsk->icsk_timeout;
2193 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2195 timer_expires = icsk->icsk_timeout;
2196 } else if (timer_pending(&sp->sk_timer)) {
2198 timer_expires = sp->sk_timer.expires;
2201 timer_expires = jiffies;
2204 state = inet_sk_state_load(sp);
2205 if (state == TCP_LISTEN)
2206 rx_queue = READ_ONCE(sp->sk_ack_backlog);
2208 /* Because we don't lock the socket,
2209 * we might find a transient negative value.
2211 rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
2212 READ_ONCE(tp->copied_seq), 0);
2215 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2216 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
2218 src->s6_addr32[0], src->s6_addr32[1],
2219 src->s6_addr32[2], src->s6_addr32[3], srcp,
2220 dest->s6_addr32[0], dest->s6_addr32[1],
2221 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2223 READ_ONCE(tp->write_seq) - tp->snd_una,
2226 jiffies_delta_to_clock_t(timer_expires - jiffies),
2227 icsk->icsk_retransmits,
2228 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
2229 icsk->icsk_probes_out,
2231 refcount_read(&sp->sk_refcnt), sp,
2232 jiffies_to_clock_t(icsk->icsk_rto),
2233 jiffies_to_clock_t(icsk->icsk_ack.ato),
2234 (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp),
2236 state == TCP_LISTEN ?
2237 fastopenq->max_qlen :
2238 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
2242 static void get_timewait6_sock(struct seq_file *seq,
2243 struct inet_timewait_sock *tw, int i)
2245 long delta = tw->tw_timer.expires - jiffies;
2246 const struct in6_addr *dest, *src;
2249 dest = &tw->tw_v6_daddr;
2250 src = &tw->tw_v6_rcv_saddr;
2251 destp = ntohs(tw->tw_dport);
2252 srcp = ntohs(tw->tw_sport);
2255 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2256 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2258 src->s6_addr32[0], src->s6_addr32[1],
2259 src->s6_addr32[2], src->s6_addr32[3], srcp,
2260 dest->s6_addr32[0], dest->s6_addr32[1],
2261 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2262 tw->tw_substate, 0, 0,
2263 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2264 refcount_read(&tw->tw_refcnt), tw);
2267 static int tcp6_seq_show(struct seq_file *seq, void *v)
2269 struct tcp_iter_state *st;
2270 struct sock *sk = v;
2272 if (v == SEQ_START_TOKEN) {
2277 "st tx_queue rx_queue tr tm->when retrnsmt"
2278 " uid timeout inode\n");
2283 if (sk->sk_state == TCP_TIME_WAIT)
2284 get_timewait6_sock(seq, v, st->num);
2285 else if (sk->sk_state == TCP_NEW_SYN_RECV)
2286 get_openreq6(seq, v, st->num);
2288 get_tcp6_sock(seq, v, st->num);
2293 static const struct seq_operations tcp6_seq_ops = {
2294 .show = tcp6_seq_show,
2295 .start = tcp_seq_start,
2296 .next = tcp_seq_next,
2297 .stop = tcp_seq_stop,
2300 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2304 int __net_init tcp6_proc_init(struct net *net)
2306 if (!proc_create_net_data("tcp6", 0444, net->proc_net, &tcp6_seq_ops,
2307 sizeof(struct tcp_iter_state), &tcp6_seq_afinfo))
2312 void tcp6_proc_exit(struct net *net)
2314 remove_proc_entry("tcp6", net->proc_net);
2318 struct proto tcpv6_prot = {
2320 .owner = THIS_MODULE,
2322 .pre_connect = tcp_v6_pre_connect,
2323 .connect = tcp_v6_connect,
2324 .disconnect = tcp_disconnect,
2325 .accept = inet_csk_accept,
2327 .init = tcp_v6_init_sock,
2328 .destroy = tcp_v4_destroy_sock,
2329 .shutdown = tcp_shutdown,
2330 .setsockopt = tcp_setsockopt,
2331 .getsockopt = tcp_getsockopt,
2332 .bpf_bypass_getsockopt = tcp_bpf_bypass_getsockopt,
2333 .keepalive = tcp_set_keepalive,
2334 .recvmsg = tcp_recvmsg,
2335 .sendmsg = tcp_sendmsg,
2336 .splice_eof = tcp_splice_eof,
2337 .backlog_rcv = tcp_v6_do_rcv,
2338 .release_cb = tcp_release_cb,
2340 .unhash = inet_unhash,
2341 .get_port = inet_csk_get_port,
2342 .put_port = inet_put_port,
2343 #ifdef CONFIG_BPF_SYSCALL
2344 .psock_update_sk_prot = tcp_bpf_update_proto,
2346 .enter_memory_pressure = tcp_enter_memory_pressure,
2347 .leave_memory_pressure = tcp_leave_memory_pressure,
2348 .stream_memory_free = tcp_stream_memory_free,
2349 .sockets_allocated = &tcp_sockets_allocated,
2351 .memory_allocated = &tcp_memory_allocated,
2352 .per_cpu_fw_alloc = &tcp_memory_per_cpu_fw_alloc,
2354 .memory_pressure = &tcp_memory_pressure,
2355 .orphan_count = &tcp_orphan_count,
2356 .sysctl_mem = sysctl_tcp_mem,
2357 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
2358 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
2359 .max_header = MAX_TCP_HEADER,
2360 .obj_size = sizeof(struct tcp6_sock),
2361 .ipv6_pinfo_offset = offsetof(struct tcp6_sock, inet6),
2362 .slab_flags = SLAB_TYPESAFE_BY_RCU,
2363 .twsk_prot = &tcp6_timewait_sock_ops,
2364 .rsk_prot = &tcp6_request_sock_ops,
2366 .no_autobind = true,
2367 .diag_destroy = tcp_abort,
2369 EXPORT_SYMBOL_GPL(tcpv6_prot);
2372 static struct inet_protosw tcpv6_protosw = {
2373 .type = SOCK_STREAM,
2374 .protocol = IPPROTO_TCP,
2375 .prot = &tcpv6_prot,
2376 .ops = &inet6_stream_ops,
2377 .flags = INET_PROTOSW_PERMANENT |
2381 static int __net_init tcpv6_net_init(struct net *net)
2383 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2384 SOCK_RAW, IPPROTO_TCP, net);
2387 static void __net_exit tcpv6_net_exit(struct net *net)
2389 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2392 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2394 tcp_twsk_purge(net_exit_list, AF_INET6);
2397 static struct pernet_operations tcpv6_net_ops = {
2398 .init = tcpv6_net_init,
2399 .exit = tcpv6_net_exit,
2400 .exit_batch = tcpv6_net_exit_batch,
2403 int __init tcpv6_init(void)
2407 net_hotdata.tcpv6_protocol = (struct inet6_protocol) {
2408 .handler = tcp_v6_rcv,
2409 .err_handler = tcp_v6_err,
2410 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
2412 ret = inet6_add_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);
2416 /* register inet6 protocol */
2417 ret = inet6_register_protosw(&tcpv6_protosw);
2419 goto out_tcpv6_protocol;
2421 ret = register_pernet_subsys(&tcpv6_net_ops);
2423 goto out_tcpv6_protosw;
2425 ret = mptcpv6_init();
2427 goto out_tcpv6_pernet_subsys;
2432 out_tcpv6_pernet_subsys:
2433 unregister_pernet_subsys(&tcpv6_net_ops);
2435 inet6_unregister_protosw(&tcpv6_protosw);
2437 inet6_del_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);
2441 void tcpv6_exit(void)
2443 unregister_pernet_subsys(&tcpv6_net_ops);
2444 inet6_unregister_protosw(&tcpv6_protosw);
2445 inet6_del_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);