3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/busy_poll.h>
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
69 #include <crypto/hash.h>
70 #include <linux/scatterlist.h>
72 #include <trace/events/tcp.h>
74 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
75 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
76 struct request_sock *req);
78 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
80 static const struct inet_connection_sock_af_ops ipv6_mapped;
81 static const struct inet_connection_sock_af_ops ipv6_specific;
82 #ifdef CONFIG_TCP_MD5SIG
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
84 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
86 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
87 const struct in6_addr *addr)
93 /* Helper returning the inet6 address from a given tcp socket.
94 * It can be used in TCP stack instead of inet6_sk(sk).
95 * This avoids a dereference and allow compiler optimizations.
97 static struct ipv6_pinfo *tcp_inet6_sk(const struct sock *sk)
99 struct tcp6_sock *tcp6 = container_of(tcp_sk(sk), struct tcp6_sock, tcp);
104 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
106 struct dst_entry *dst = skb_dst(skb);
108 if (dst && dst_hold_safe(dst)) {
109 const struct rt6_info *rt = (const struct rt6_info *)dst;
112 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
113 tcp_inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
117 static u32 tcp_v6_init_seq(const struct sk_buff *skb)
119 return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
120 ipv6_hdr(skb)->saddr.s6_addr32,
122 tcp_hdr(skb)->source);
125 static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
127 return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
128 ipv6_hdr(skb)->saddr.s6_addr32);
131 static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
134 /* This check is replicated from tcp_v6_connect() and intended to
135 * prevent BPF program called below from accessing bytes that are out
136 * of the bound specified by user in addr_len.
138 if (addr_len < SIN6_LEN_RFC2133)
141 sock_owned_by_me(sk);
143 return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr);
146 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
149 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
150 struct inet_sock *inet = inet_sk(sk);
151 struct inet_connection_sock *icsk = inet_csk(sk);
152 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
153 struct tcp_sock *tp = tcp_sk(sk);
154 struct in6_addr *saddr = NULL, *final_p, final;
155 struct ipv6_txoptions *opt;
157 struct dst_entry *dst;
160 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
162 if (addr_len < SIN6_LEN_RFC2133)
165 if (usin->sin6_family != AF_INET6)
166 return -EAFNOSUPPORT;
168 memset(&fl6, 0, sizeof(fl6));
171 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
172 IP6_ECN_flow_init(fl6.flowlabel);
173 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
174 struct ip6_flowlabel *flowlabel;
175 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
178 fl6_sock_release(flowlabel);
183 * connect() to INADDR_ANY means loopback (BSD'ism).
186 if (ipv6_addr_any(&usin->sin6_addr)) {
187 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
188 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
191 usin->sin6_addr = in6addr_loopback;
194 addr_type = ipv6_addr_type(&usin->sin6_addr);
196 if (addr_type & IPV6_ADDR_MULTICAST)
199 if (addr_type&IPV6_ADDR_LINKLOCAL) {
200 if (addr_len >= sizeof(struct sockaddr_in6) &&
201 usin->sin6_scope_id) {
202 /* If interface is set while binding, indices
205 if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
208 sk->sk_bound_dev_if = usin->sin6_scope_id;
211 /* Connect to link-local address requires an interface */
212 if (!sk->sk_bound_dev_if)
216 if (tp->rx_opt.ts_recent_stamp &&
217 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
218 tp->rx_opt.ts_recent = 0;
219 tp->rx_opt.ts_recent_stamp = 0;
223 sk->sk_v6_daddr = usin->sin6_addr;
224 np->flow_label = fl6.flowlabel;
230 if (addr_type & IPV6_ADDR_MAPPED) {
231 u32 exthdrlen = icsk->icsk_ext_hdr_len;
232 struct sockaddr_in sin;
234 if (__ipv6_only_sock(sk))
237 sin.sin_family = AF_INET;
238 sin.sin_port = usin->sin6_port;
239 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
241 icsk->icsk_af_ops = &ipv6_mapped;
242 sk->sk_backlog_rcv = tcp_v4_do_rcv;
243 #ifdef CONFIG_TCP_MD5SIG
244 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
247 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
250 icsk->icsk_ext_hdr_len = exthdrlen;
251 icsk->icsk_af_ops = &ipv6_specific;
252 sk->sk_backlog_rcv = tcp_v6_do_rcv;
253 #ifdef CONFIG_TCP_MD5SIG
254 tp->af_specific = &tcp_sock_ipv6_specific;
258 np->saddr = sk->sk_v6_rcv_saddr;
263 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
264 saddr = &sk->sk_v6_rcv_saddr;
266 fl6.flowi6_proto = IPPROTO_TCP;
267 fl6.daddr = sk->sk_v6_daddr;
268 fl6.saddr = saddr ? *saddr : np->saddr;
269 fl6.flowi6_oif = sk->sk_bound_dev_if;
270 fl6.flowi6_mark = sk->sk_mark;
271 fl6.fl6_dport = usin->sin6_port;
272 fl6.fl6_sport = inet->inet_sport;
273 fl6.flowi6_uid = sk->sk_uid;
275 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
276 final_p = fl6_update_dst(&fl6, opt, &final);
278 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
280 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
288 sk->sk_v6_rcv_saddr = *saddr;
291 /* set the source address */
293 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
295 sk->sk_gso_type = SKB_GSO_TCPV6;
296 ip6_dst_store(sk, dst, NULL, NULL);
298 icsk->icsk_ext_hdr_len = 0;
300 icsk->icsk_ext_hdr_len = opt->opt_flen +
303 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
305 inet->inet_dport = usin->sin6_port;
307 tcp_set_state(sk, TCP_SYN_SENT);
308 err = inet6_hash_connect(tcp_death_row, sk);
314 if (likely(!tp->repair)) {
316 tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32,
317 sk->sk_v6_daddr.s6_addr32,
320 tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
322 sk->sk_v6_daddr.s6_addr32);
325 if (tcp_fastopen_defer_connect(sk, &err))
330 err = tcp_connect(sk);
337 tcp_set_state(sk, TCP_CLOSE);
339 inet->inet_dport = 0;
340 sk->sk_route_caps = 0;
344 static void tcp_v6_mtu_reduced(struct sock *sk)
346 struct dst_entry *dst;
348 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
351 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
355 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
356 tcp_sync_mss(sk, dst_mtu(dst));
357 tcp_simple_retransmit(sk);
361 static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
362 u8 type, u8 code, int offset, __be32 info)
364 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
365 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
366 struct net *net = dev_net(skb->dev);
367 struct request_sock *fastopen;
368 struct ipv6_pinfo *np;
375 sk = __inet6_lookup_established(net, &tcp_hashinfo,
376 &hdr->daddr, th->dest,
377 &hdr->saddr, ntohs(th->source),
378 skb->dev->ifindex, inet6_sdif(skb));
381 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
386 if (sk->sk_state == TCP_TIME_WAIT) {
387 inet_twsk_put(inet_twsk(sk));
390 seq = ntohl(th->seq);
391 fatal = icmpv6_err_convert(type, code, &err);
392 if (sk->sk_state == TCP_NEW_SYN_RECV) {
393 tcp_req_err(sk, seq, fatal);
398 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
399 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
401 if (sk->sk_state == TCP_CLOSE)
404 if (ipv6_hdr(skb)->hop_limit < tcp_inet6_sk(sk)->min_hopcount) {
405 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
410 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
411 fastopen = tp->fastopen_rsk;
412 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
413 if (sk->sk_state != TCP_LISTEN &&
414 !between(seq, snd_una, tp->snd_nxt)) {
415 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
419 np = tcp_inet6_sk(sk);
421 if (type == NDISC_REDIRECT) {
422 if (!sock_owned_by_user(sk)) {
423 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
426 dst->ops->redirect(dst, sk, skb);
431 if (type == ICMPV6_PKT_TOOBIG) {
432 /* We are not interested in TCP_LISTEN and open_requests
433 * (SYN-ACKs send out by Linux are always <576bytes so
434 * they should go through unfragmented).
436 if (sk->sk_state == TCP_LISTEN)
439 if (!ip6_sk_accept_pmtu(sk))
442 tp->mtu_info = ntohl(info);
443 if (!sock_owned_by_user(sk))
444 tcp_v6_mtu_reduced(sk);
445 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
452 /* Might be for an request_sock */
453 switch (sk->sk_state) {
456 /* Only in fast or simultaneous open. If a fast open socket is
457 * is already accepted it is treated as a connected one below.
459 if (fastopen && !fastopen->sk)
462 if (!sock_owned_by_user(sk)) {
464 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
468 sk->sk_err_soft = err;
472 if (!sock_owned_by_user(sk) && np->recverr) {
474 sk->sk_error_report(sk);
476 sk->sk_err_soft = err;
485 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
487 struct request_sock *req,
488 struct tcp_fastopen_cookie *foc,
489 enum tcp_synack_type synack_type)
491 struct inet_request_sock *ireq = inet_rsk(req);
492 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
493 struct ipv6_txoptions *opt;
494 struct flowi6 *fl6 = &fl->u.ip6;
498 /* First, grab a route. */
499 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
500 IPPROTO_TCP)) == NULL)
503 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
506 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
507 &ireq->ir_v6_rmt_addr);
509 fl6->daddr = ireq->ir_v6_rmt_addr;
510 if (np->repflow && ireq->pktopts)
511 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
514 opt = ireq->ipv6_opt;
516 opt = rcu_dereference(np->opt);
517 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
519 err = net_xmit_eval(err);
527 static void tcp_v6_reqsk_destructor(struct request_sock *req)
529 kfree(inet_rsk(req)->ipv6_opt);
530 kfree_skb(inet_rsk(req)->pktopts);
533 #ifdef CONFIG_TCP_MD5SIG
534 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
535 const struct in6_addr *addr)
537 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
540 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
541 const struct sock *addr_sk)
543 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
546 static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
547 char __user *optval, int optlen)
549 struct tcp_md5sig cmd;
550 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
553 if (optlen < sizeof(cmd))
556 if (copy_from_user(&cmd, optval, sizeof(cmd)))
559 if (sin6->sin6_family != AF_INET6)
562 if (optname == TCP_MD5SIG_EXT &&
563 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
564 prefixlen = cmd.tcpm_prefixlen;
565 if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
569 prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
572 if (!cmd.tcpm_keylen) {
573 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
574 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
576 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
577 AF_INET6, prefixlen);
580 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
583 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
584 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
585 AF_INET, prefixlen, cmd.tcpm_key,
586 cmd.tcpm_keylen, GFP_KERNEL);
588 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
589 AF_INET6, prefixlen, cmd.tcpm_key,
590 cmd.tcpm_keylen, GFP_KERNEL);
593 static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
594 const struct in6_addr *daddr,
595 const struct in6_addr *saddr,
596 const struct tcphdr *th, int nbytes)
598 struct tcp6_pseudohdr *bp;
599 struct scatterlist sg;
603 /* 1. TCP pseudo-header (RFC2460) */
606 bp->protocol = cpu_to_be32(IPPROTO_TCP);
607 bp->len = cpu_to_be32(nbytes);
609 _th = (struct tcphdr *)(bp + 1);
610 memcpy(_th, th, sizeof(*th));
613 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
614 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
615 sizeof(*bp) + sizeof(*th));
616 return crypto_ahash_update(hp->md5_req);
619 static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
620 const struct in6_addr *daddr, struct in6_addr *saddr,
621 const struct tcphdr *th)
623 struct tcp_md5sig_pool *hp;
624 struct ahash_request *req;
626 hp = tcp_get_md5sig_pool();
628 goto clear_hash_noput;
631 if (crypto_ahash_init(req))
633 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
635 if (tcp_md5_hash_key(hp, key))
637 ahash_request_set_crypt(req, NULL, md5_hash, 0);
638 if (crypto_ahash_final(req))
641 tcp_put_md5sig_pool();
645 tcp_put_md5sig_pool();
647 memset(md5_hash, 0, 16);
651 static int tcp_v6_md5_hash_skb(char *md5_hash,
652 const struct tcp_md5sig_key *key,
653 const struct sock *sk,
654 const struct sk_buff *skb)
656 const struct in6_addr *saddr, *daddr;
657 struct tcp_md5sig_pool *hp;
658 struct ahash_request *req;
659 const struct tcphdr *th = tcp_hdr(skb);
661 if (sk) { /* valid for establish/request sockets */
662 saddr = &sk->sk_v6_rcv_saddr;
663 daddr = &sk->sk_v6_daddr;
665 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
666 saddr = &ip6h->saddr;
667 daddr = &ip6h->daddr;
670 hp = tcp_get_md5sig_pool();
672 goto clear_hash_noput;
675 if (crypto_ahash_init(req))
678 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
680 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
682 if (tcp_md5_hash_key(hp, key))
684 ahash_request_set_crypt(req, NULL, md5_hash, 0);
685 if (crypto_ahash_final(req))
688 tcp_put_md5sig_pool();
692 tcp_put_md5sig_pool();
694 memset(md5_hash, 0, 16);
700 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
701 const struct sk_buff *skb)
703 #ifdef CONFIG_TCP_MD5SIG
704 const __u8 *hash_location = NULL;
705 struct tcp_md5sig_key *hash_expected;
706 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
707 const struct tcphdr *th = tcp_hdr(skb);
711 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
712 hash_location = tcp_parse_md5sig_option(th);
714 /* We've parsed the options - do we have a hash? */
715 if (!hash_expected && !hash_location)
718 if (hash_expected && !hash_location) {
719 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
723 if (!hash_expected && hash_location) {
724 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
728 /* check the signature */
729 genhash = tcp_v6_md5_hash_skb(newhash,
733 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
734 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
735 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
736 genhash ? "failed" : "mismatch",
737 &ip6h->saddr, ntohs(th->source),
738 &ip6h->daddr, ntohs(th->dest));
745 static void tcp_v6_init_req(struct request_sock *req,
746 const struct sock *sk_listener,
749 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
750 struct inet_request_sock *ireq = inet_rsk(req);
751 const struct ipv6_pinfo *np = tcp_inet6_sk(sk_listener);
753 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
754 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
756 /* So that link locals have meaning */
757 if ((!sk_listener->sk_bound_dev_if || l3_slave) &&
758 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
759 ireq->ir_iif = tcp_v6_iif(skb);
761 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
762 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
763 np->rxopt.bits.rxinfo ||
764 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
765 np->rxopt.bits.rxohlim || np->repflow)) {
766 refcount_inc(&skb->users);
771 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
773 const struct request_sock *req)
775 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
778 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
780 .obj_size = sizeof(struct tcp6_request_sock),
781 .rtx_syn_ack = tcp_rtx_synack,
782 .send_ack = tcp_v6_reqsk_send_ack,
783 .destructor = tcp_v6_reqsk_destructor,
784 .send_reset = tcp_v6_send_reset,
785 .syn_ack_timeout = tcp_syn_ack_timeout,
788 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
789 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
790 sizeof(struct ipv6hdr),
791 #ifdef CONFIG_TCP_MD5SIG
792 .req_md5_lookup = tcp_v6_md5_lookup,
793 .calc_md5_hash = tcp_v6_md5_hash_skb,
795 .init_req = tcp_v6_init_req,
796 #ifdef CONFIG_SYN_COOKIES
797 .cookie_init_seq = cookie_v6_init_sequence,
799 .route_req = tcp_v6_route_req,
800 .init_seq = tcp_v6_init_seq,
801 .init_ts_off = tcp_v6_init_ts_off,
802 .send_synack = tcp_v6_send_synack,
805 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
806 u32 ack, u32 win, u32 tsval, u32 tsecr,
807 int oif, struct tcp_md5sig_key *key, int rst,
808 u8 tclass, __be32 label)
810 const struct tcphdr *th = tcp_hdr(skb);
812 struct sk_buff *buff;
814 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
815 struct sock *ctl_sk = net->ipv6.tcp_sk;
816 unsigned int tot_len = sizeof(struct tcphdr);
817 struct dst_entry *dst;
822 tot_len += TCPOLEN_TSTAMP_ALIGNED;
823 #ifdef CONFIG_TCP_MD5SIG
825 tot_len += TCPOLEN_MD5SIG_ALIGNED;
828 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
833 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
835 t1 = skb_push(buff, tot_len);
836 skb_reset_transport_header(buff);
838 /* Swap the send and the receive. */
839 memset(t1, 0, sizeof(*t1));
840 t1->dest = th->source;
841 t1->source = th->dest;
842 t1->doff = tot_len / 4;
843 t1->seq = htonl(seq);
844 t1->ack_seq = htonl(ack);
845 t1->ack = !rst || !th->ack;
847 t1->window = htons(win);
849 topt = (__be32 *)(t1 + 1);
852 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
853 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
854 *topt++ = htonl(tsval);
855 *topt++ = htonl(tsecr);
858 #ifdef CONFIG_TCP_MD5SIG
860 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
861 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
862 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
863 &ipv6_hdr(skb)->saddr,
864 &ipv6_hdr(skb)->daddr, t1);
868 memset(&fl6, 0, sizeof(fl6));
869 fl6.daddr = ipv6_hdr(skb)->saddr;
870 fl6.saddr = ipv6_hdr(skb)->daddr;
871 fl6.flowlabel = label;
873 buff->ip_summed = CHECKSUM_PARTIAL;
876 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
878 fl6.flowi6_proto = IPPROTO_TCP;
879 if (rt6_need_strict(&fl6.daddr) && !oif)
880 fl6.flowi6_oif = tcp_v6_iif(skb);
882 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
885 fl6.flowi6_oif = oif;
889 mark = (sk->sk_state == TCP_TIME_WAIT) ?
890 inet_twsk(sk)->tw_mark : sk->sk_mark;
891 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark;
892 fl6.fl6_dport = t1->dest;
893 fl6.fl6_sport = t1->source;
894 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
895 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
897 /* Pass a socket to ip6_dst_lookup either it is for RST
898 * Underlying function will use this to retrieve the network
901 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
903 skb_dst_set(buff, dst);
904 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
905 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
907 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
914 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
916 const struct tcphdr *th = tcp_hdr(skb);
917 u32 seq = 0, ack_seq = 0;
918 struct tcp_md5sig_key *key = NULL;
919 #ifdef CONFIG_TCP_MD5SIG
920 const __u8 *hash_location = NULL;
921 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
922 unsigned char newhash[16];
924 struct sock *sk1 = NULL;
931 /* If sk not NULL, it means we did a successful lookup and incoming
932 * route had to be correct. prequeue might have dropped our dst.
934 if (!sk && !ipv6_unicast_destination(skb))
937 #ifdef CONFIG_TCP_MD5SIG
939 hash_location = tcp_parse_md5sig_option(th);
940 if (sk && sk_fullsock(sk)) {
941 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
942 } else if (hash_location) {
944 * active side is lost. Try to find listening socket through
945 * source port, and then find md5 key through listening socket.
946 * we are not loose security here:
947 * Incoming packet is checked with md5 hash with finding key,
948 * no RST generated if md5 hash doesn't match.
950 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
951 &tcp_hashinfo, NULL, 0,
953 th->source, &ipv6h->daddr,
955 tcp_v6_iif_l3_slave(skb),
960 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
964 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
965 if (genhash || memcmp(hash_location, newhash, 16) != 0)
971 seq = ntohl(th->ack_seq);
973 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
977 oif = sk->sk_bound_dev_if;
979 trace_tcp_send_reset(sk, skb);
982 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
984 #ifdef CONFIG_TCP_MD5SIG
990 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
991 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
992 struct tcp_md5sig_key *key, u8 tclass,
995 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
999 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1001 struct inet_timewait_sock *tw = inet_twsk(sk);
1002 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1004 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1005 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1006 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
1007 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
1008 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
1013 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
1014 struct request_sock *req)
1016 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
1017 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
1020 * The window field (SEG.WND) of every outgoing segment, with the
1021 * exception of <SYN> segments, MUST be right-shifted by
1022 * Rcv.Wind.Shift bits:
1024 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
1025 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
1026 tcp_rsk(req)->rcv_nxt,
1027 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
1028 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
1029 req->ts_recent, sk->sk_bound_dev_if,
1030 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
1035 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1037 #ifdef CONFIG_SYN_COOKIES
1038 const struct tcphdr *th = tcp_hdr(skb);
1041 sk = cookie_v6_check(sk, skb);
1046 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1048 if (skb->protocol == htons(ETH_P_IP))
1049 return tcp_v4_conn_request(sk, skb);
1051 if (!ipv6_unicast_destination(skb))
1054 return tcp_conn_request(&tcp6_request_sock_ops,
1055 &tcp_request_sock_ipv6_ops, sk, skb);
1059 return 0; /* don't send reset */
1062 static void tcp_v6_restore_cb(struct sk_buff *skb)
1064 /* We need to move header back to the beginning if xfrm6_policy_check()
1065 * and tcp_v6_fill_cb() are going to be called again.
1066 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1068 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1069 sizeof(struct inet6_skb_parm));
1072 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1073 struct request_sock *req,
1074 struct dst_entry *dst,
1075 struct request_sock *req_unhash,
1078 struct inet_request_sock *ireq;
1079 struct ipv6_pinfo *newnp;
1080 const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1081 struct ipv6_txoptions *opt;
1082 struct inet_sock *newinet;
1083 struct tcp_sock *newtp;
1085 #ifdef CONFIG_TCP_MD5SIG
1086 struct tcp_md5sig_key *key;
1090 if (skb->protocol == htons(ETH_P_IP)) {
1095 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1096 req_unhash, own_req);
1101 inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
1103 newinet = inet_sk(newsk);
1104 newnp = tcp_inet6_sk(newsk);
1105 newtp = tcp_sk(newsk);
1107 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1109 newnp->saddr = newsk->sk_v6_rcv_saddr;
1111 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1112 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1113 #ifdef CONFIG_TCP_MD5SIG
1114 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1117 newnp->ipv6_mc_list = NULL;
1118 newnp->ipv6_ac_list = NULL;
1119 newnp->ipv6_fl_list = NULL;
1120 newnp->pktoptions = NULL;
1122 newnp->mcast_oif = tcp_v6_iif(skb);
1123 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1124 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1126 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1129 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1130 * here, tcp_create_openreq_child now does this for us, see the comment in
1131 * that function for the gory details. -acme
1134 /* It is tricky place. Until this moment IPv4 tcp
1135 worked with IPv6 icsk.icsk_af_ops.
1138 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1143 ireq = inet_rsk(req);
1145 if (sk_acceptq_is_full(sk))
1149 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1154 newsk = tcp_create_openreq_child(sk, req, skb);
1159 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1160 * count here, tcp_create_openreq_child now does this for us, see the
1161 * comment in that function for the gory details. -acme
1164 newsk->sk_gso_type = SKB_GSO_TCPV6;
1165 ip6_dst_store(newsk, dst, NULL, NULL);
1166 inet6_sk_rx_dst_set(newsk, skb);
1168 inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
1170 newtp = tcp_sk(newsk);
1171 newinet = inet_sk(newsk);
1172 newnp = tcp_inet6_sk(newsk);
1174 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1176 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1177 newnp->saddr = ireq->ir_v6_loc_addr;
1178 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1179 newsk->sk_bound_dev_if = ireq->ir_iif;
1181 /* Now IPv6 options...
1183 First: no IPv4 options.
1185 newinet->inet_opt = NULL;
1186 newnp->ipv6_mc_list = NULL;
1187 newnp->ipv6_ac_list = NULL;
1188 newnp->ipv6_fl_list = NULL;
1191 newnp->rxopt.all = np->rxopt.all;
1193 newnp->pktoptions = NULL;
1195 newnp->mcast_oif = tcp_v6_iif(skb);
1196 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1197 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1199 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1201 /* Clone native IPv6 options from listening socket (if any)
1203 Yes, keeping reference count would be much more clever,
1204 but we make one more one thing there: reattach optmem
1207 opt = ireq->ipv6_opt;
1209 opt = rcu_dereference(np->opt);
1211 opt = ipv6_dup_options(newsk, opt);
1212 RCU_INIT_POINTER(newnp->opt, opt);
1214 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1216 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1219 tcp_ca_openreq_child(newsk, dst);
1221 tcp_sync_mss(newsk, dst_mtu(dst));
1222 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1224 tcp_initialize_rcv_mss(newsk);
1226 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1227 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1229 #ifdef CONFIG_TCP_MD5SIG
1230 /* Copy over the MD5 key from the original socket */
1231 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1233 /* We're using one, so create a matching key
1234 * on the newsk structure. If we fail to get
1235 * memory, then we end up not copying the key
1238 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1239 AF_INET6, 128, key->key, key->keylen,
1240 sk_gfp_mask(sk, GFP_ATOMIC));
1244 if (__inet_inherit_port(sk, newsk) < 0) {
1245 inet_csk_prepare_forced_close(newsk);
1249 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1251 tcp_move_syn(newtp, req);
1253 /* Clone pktoptions received with SYN, if we own the req */
1254 if (ireq->pktopts) {
1255 newnp->pktoptions = skb_clone(ireq->pktopts,
1256 sk_gfp_mask(sk, GFP_ATOMIC));
1257 consume_skb(ireq->pktopts);
1258 ireq->pktopts = NULL;
1259 if (newnp->pktoptions) {
1260 tcp_v6_restore_cb(newnp->pktoptions);
1261 skb_set_owner_r(newnp->pktoptions, newsk);
1269 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1277 /* The socket must have it's spinlock held when we get
1278 * here, unless it is a TCP_LISTEN socket.
1280 * We have a potential double-lock case here, so even when
1281 * doing backlog processing we use the BH locking scheme.
1282 * This is because we cannot sleep with the original spinlock
1285 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1287 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1288 struct sk_buff *opt_skb = NULL;
1289 struct tcp_sock *tp;
1291 /* Imagine: socket is IPv6. IPv4 packet arrives,
1292 goes to IPv4 receive handler and backlogged.
1293 From backlog it always goes here. Kerboom...
1294 Fortunately, tcp_rcv_established and rcv_established
1295 handle them correctly, but it is not case with
1296 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1299 if (skb->protocol == htons(ETH_P_IP))
1300 return tcp_v4_do_rcv(sk, skb);
1303 * socket locking is here for SMP purposes as backlog rcv
1304 * is currently called with bh processing disabled.
1307 /* Do Stevens' IPV6_PKTOPTIONS.
1309 Yes, guys, it is the only place in our code, where we
1310 may make it not affecting IPv4.
1311 The rest of code is protocol independent,
1312 and I do not like idea to uglify IPv4.
1314 Actually, all the idea behind IPV6_PKTOPTIONS
1315 looks not very well thought. For now we latch
1316 options, received in the last packet, enqueued
1317 by tcp. Feel free to propose better solution.
1321 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1323 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1324 struct dst_entry *dst = sk->sk_rx_dst;
1326 sock_rps_save_rxhash(sk, skb);
1327 sk_mark_napi_id(sk, skb);
1329 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1330 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1332 sk->sk_rx_dst = NULL;
1336 tcp_rcv_established(sk, skb);
1338 goto ipv6_pktoptions;
1342 if (tcp_checksum_complete(skb))
1345 if (sk->sk_state == TCP_LISTEN) {
1346 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1352 if (tcp_child_process(sk, nsk, skb))
1355 __kfree_skb(opt_skb);
1359 sock_rps_save_rxhash(sk, skb);
1361 if (tcp_rcv_state_process(sk, skb))
1364 goto ipv6_pktoptions;
1368 tcp_v6_send_reset(sk, skb);
1371 __kfree_skb(opt_skb);
1375 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1376 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1381 /* Do you ask, what is it?
1383 1. skb was enqueued by tcp.
1384 2. skb is added to tail of read queue, rather than out of order.
1385 3. socket is not in passive state.
1386 4. Finally, it really contains options, which user wants to receive.
1389 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1390 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1391 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1392 np->mcast_oif = tcp_v6_iif(opt_skb);
1393 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1394 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1395 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1396 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1398 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1399 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1400 skb_set_owner_r(opt_skb, sk);
1401 tcp_v6_restore_cb(opt_skb);
1402 opt_skb = xchg(&np->pktoptions, opt_skb);
1404 __kfree_skb(opt_skb);
1405 opt_skb = xchg(&np->pktoptions, NULL);
1413 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1414 const struct tcphdr *th)
1416 /* This is tricky: we move IP6CB at its correct location into
1417 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1418 * _decode_session6() uses IP6CB().
1419 * barrier() makes sure compiler won't play aliasing games.
1421 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1422 sizeof(struct inet6_skb_parm));
1425 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1426 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1427 skb->len - th->doff*4);
1428 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1429 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1430 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1431 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1432 TCP_SKB_CB(skb)->sacked = 0;
1433 TCP_SKB_CB(skb)->has_rxtstamp =
1434 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1437 static int tcp_v6_rcv(struct sk_buff *skb)
1439 int sdif = inet6_sdif(skb);
1440 const struct tcphdr *th;
1441 const struct ipv6hdr *hdr;
1445 struct net *net = dev_net(skb->dev);
1447 if (skb->pkt_type != PACKET_HOST)
1451 * Count it even if it's bad.
1453 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1455 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1458 th = (const struct tcphdr *)skb->data;
1460 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1462 if (!pskb_may_pull(skb, th->doff*4))
1465 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1468 th = (const struct tcphdr *)skb->data;
1469 hdr = ipv6_hdr(skb);
1472 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1473 th->source, th->dest, inet6_iif(skb), sdif,
1479 if (sk->sk_state == TCP_TIME_WAIT)
1482 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1483 struct request_sock *req = inet_reqsk(sk);
1484 bool req_stolen = false;
1487 sk = req->rsk_listener;
1488 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1489 sk_drops_add(sk, skb);
1493 if (tcp_checksum_complete(skb)) {
1497 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1498 inet_csk_reqsk_queue_drop_and_put(sk, req);
1504 if (!tcp_filter(sk, skb)) {
1505 th = (const struct tcphdr *)skb->data;
1506 hdr = ipv6_hdr(skb);
1507 tcp_v6_fill_cb(skb, hdr, th);
1508 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1513 /* Another cpu got exclusive access to req
1514 * and created a full blown socket.
1515 * Try to feed this packet to this socket
1516 * instead of discarding it.
1518 tcp_v6_restore_cb(skb);
1522 goto discard_and_relse;
1526 tcp_v6_restore_cb(skb);
1527 } else if (tcp_child_process(sk, nsk, skb)) {
1528 tcp_v6_send_reset(nsk, skb);
1529 goto discard_and_relse;
1535 if (hdr->hop_limit < tcp_inet6_sk(sk)->min_hopcount) {
1536 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1537 goto discard_and_relse;
1540 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1541 goto discard_and_relse;
1543 if (tcp_v6_inbound_md5_hash(sk, skb))
1544 goto discard_and_relse;
1546 if (tcp_filter(sk, skb))
1547 goto discard_and_relse;
1548 th = (const struct tcphdr *)skb->data;
1549 hdr = ipv6_hdr(skb);
1550 tcp_v6_fill_cb(skb, hdr, th);
1554 if (sk->sk_state == TCP_LISTEN) {
1555 ret = tcp_v6_do_rcv(sk, skb);
1556 goto put_and_return;
1559 sk_incoming_cpu_update(sk);
1561 bh_lock_sock_nested(sk);
1562 tcp_segs_in(tcp_sk(sk), skb);
1564 if (!sock_owned_by_user(sk)) {
1565 ret = tcp_v6_do_rcv(sk, skb);
1566 } else if (tcp_add_backlog(sk, skb)) {
1567 goto discard_and_relse;
1574 return ret ? -1 : 0;
1577 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1580 tcp_v6_fill_cb(skb, hdr, th);
1582 if (tcp_checksum_complete(skb)) {
1584 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1586 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1588 tcp_v6_send_reset(NULL, skb);
1596 sk_drops_add(sk, skb);
1602 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1603 inet_twsk_put(inet_twsk(sk));
1607 tcp_v6_fill_cb(skb, hdr, th);
1609 if (tcp_checksum_complete(skb)) {
1610 inet_twsk_put(inet_twsk(sk));
1614 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1619 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1620 skb, __tcp_hdrlen(th),
1621 &ipv6_hdr(skb)->saddr, th->source,
1622 &ipv6_hdr(skb)->daddr,
1624 tcp_v6_iif_l3_slave(skb),
1627 struct inet_timewait_sock *tw = inet_twsk(sk);
1628 inet_twsk_deschedule_put(tw);
1630 tcp_v6_restore_cb(skb);
1638 tcp_v6_timewait_ack(sk, skb);
1641 tcp_v6_send_reset(sk, skb);
1642 inet_twsk_deschedule_put(inet_twsk(sk));
1644 case TCP_TW_SUCCESS:
1650 static void tcp_v6_early_demux(struct sk_buff *skb)
1652 const struct ipv6hdr *hdr;
1653 const struct tcphdr *th;
1656 if (skb->pkt_type != PACKET_HOST)
1659 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1662 hdr = ipv6_hdr(skb);
1665 if (th->doff < sizeof(struct tcphdr) / 4)
1668 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1669 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1670 &hdr->saddr, th->source,
1671 &hdr->daddr, ntohs(th->dest),
1672 inet6_iif(skb), inet6_sdif(skb));
1675 skb->destructor = sock_edemux;
1676 if (sk_fullsock(sk)) {
1677 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1680 dst = dst_check(dst, tcp_inet6_sk(sk)->rx_dst_cookie);
1682 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1683 skb_dst_set_noref(skb, dst);
1688 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1689 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1690 .twsk_unique = tcp_twsk_unique,
1691 .twsk_destructor = tcp_twsk_destructor,
1694 static const struct inet_connection_sock_af_ops ipv6_specific = {
1695 .queue_xmit = inet6_csk_xmit,
1696 .send_check = tcp_v6_send_check,
1697 .rebuild_header = inet6_sk_rebuild_header,
1698 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1699 .conn_request = tcp_v6_conn_request,
1700 .syn_recv_sock = tcp_v6_syn_recv_sock,
1701 .net_header_len = sizeof(struct ipv6hdr),
1702 .net_frag_header_len = sizeof(struct frag_hdr),
1703 .setsockopt = ipv6_setsockopt,
1704 .getsockopt = ipv6_getsockopt,
1705 .addr2sockaddr = inet6_csk_addr2sockaddr,
1706 .sockaddr_len = sizeof(struct sockaddr_in6),
1707 #ifdef CONFIG_COMPAT
1708 .compat_setsockopt = compat_ipv6_setsockopt,
1709 .compat_getsockopt = compat_ipv6_getsockopt,
1711 .mtu_reduced = tcp_v6_mtu_reduced,
1714 #ifdef CONFIG_TCP_MD5SIG
1715 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1716 .md5_lookup = tcp_v6_md5_lookup,
1717 .calc_md5_hash = tcp_v6_md5_hash_skb,
1718 .md5_parse = tcp_v6_parse_md5_keys,
1723 * TCP over IPv4 via INET6 API
1725 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1726 .queue_xmit = ip_queue_xmit,
1727 .send_check = tcp_v4_send_check,
1728 .rebuild_header = inet_sk_rebuild_header,
1729 .sk_rx_dst_set = inet_sk_rx_dst_set,
1730 .conn_request = tcp_v6_conn_request,
1731 .syn_recv_sock = tcp_v6_syn_recv_sock,
1732 .net_header_len = sizeof(struct iphdr),
1733 .setsockopt = ipv6_setsockopt,
1734 .getsockopt = ipv6_getsockopt,
1735 .addr2sockaddr = inet6_csk_addr2sockaddr,
1736 .sockaddr_len = sizeof(struct sockaddr_in6),
1737 #ifdef CONFIG_COMPAT
1738 .compat_setsockopt = compat_ipv6_setsockopt,
1739 .compat_getsockopt = compat_ipv6_getsockopt,
1741 .mtu_reduced = tcp_v4_mtu_reduced,
1744 #ifdef CONFIG_TCP_MD5SIG
1745 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1746 .md5_lookup = tcp_v4_md5_lookup,
1747 .calc_md5_hash = tcp_v4_md5_hash_skb,
1748 .md5_parse = tcp_v6_parse_md5_keys,
1752 /* NOTE: A lot of things set to zero explicitly by call to
1753 * sk_alloc() so need not be done here.
1755 static int tcp_v6_init_sock(struct sock *sk)
1757 struct inet_connection_sock *icsk = inet_csk(sk);
1761 icsk->icsk_af_ops = &ipv6_specific;
1763 #ifdef CONFIG_TCP_MD5SIG
1764 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1770 static void tcp_v6_destroy_sock(struct sock *sk)
1772 tcp_v4_destroy_sock(sk);
1773 inet6_destroy_sock(sk);
1776 #ifdef CONFIG_PROC_FS
1777 /* Proc filesystem TCPv6 sock list dumping. */
1778 static void get_openreq6(struct seq_file *seq,
1779 const struct request_sock *req, int i)
1781 long ttd = req->rsk_timer.expires - jiffies;
1782 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1783 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1789 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1790 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1792 src->s6_addr32[0], src->s6_addr32[1],
1793 src->s6_addr32[2], src->s6_addr32[3],
1794 inet_rsk(req)->ir_num,
1795 dest->s6_addr32[0], dest->s6_addr32[1],
1796 dest->s6_addr32[2], dest->s6_addr32[3],
1797 ntohs(inet_rsk(req)->ir_rmt_port),
1799 0, 0, /* could print option size, but that is af dependent. */
1800 1, /* timers active (only the expire timer) */
1801 jiffies_to_clock_t(ttd),
1803 from_kuid_munged(seq_user_ns(seq),
1804 sock_i_uid(req->rsk_listener)),
1805 0, /* non standard timer */
1806 0, /* open_requests have no inode */
1810 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1812 const struct in6_addr *dest, *src;
1815 unsigned long timer_expires;
1816 const struct inet_sock *inet = inet_sk(sp);
1817 const struct tcp_sock *tp = tcp_sk(sp);
1818 const struct inet_connection_sock *icsk = inet_csk(sp);
1819 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1823 dest = &sp->sk_v6_daddr;
1824 src = &sp->sk_v6_rcv_saddr;
1825 destp = ntohs(inet->inet_dport);
1826 srcp = ntohs(inet->inet_sport);
1828 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1829 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
1830 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1832 timer_expires = icsk->icsk_timeout;
1833 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1835 timer_expires = icsk->icsk_timeout;
1836 } else if (timer_pending(&sp->sk_timer)) {
1838 timer_expires = sp->sk_timer.expires;
1841 timer_expires = jiffies;
1844 state = inet_sk_state_load(sp);
1845 if (state == TCP_LISTEN)
1846 rx_queue = sp->sk_ack_backlog;
1848 /* Because we don't lock the socket,
1849 * we might find a transient negative value.
1851 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1854 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1855 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1857 src->s6_addr32[0], src->s6_addr32[1],
1858 src->s6_addr32[2], src->s6_addr32[3], srcp,
1859 dest->s6_addr32[0], dest->s6_addr32[1],
1860 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1862 tp->write_seq - tp->snd_una,
1865 jiffies_delta_to_clock_t(timer_expires - jiffies),
1866 icsk->icsk_retransmits,
1867 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1868 icsk->icsk_probes_out,
1870 refcount_read(&sp->sk_refcnt), sp,
1871 jiffies_to_clock_t(icsk->icsk_rto),
1872 jiffies_to_clock_t(icsk->icsk_ack.ato),
1873 (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp),
1875 state == TCP_LISTEN ?
1876 fastopenq->max_qlen :
1877 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1881 static void get_timewait6_sock(struct seq_file *seq,
1882 struct inet_timewait_sock *tw, int i)
1884 long delta = tw->tw_timer.expires - jiffies;
1885 const struct in6_addr *dest, *src;
1888 dest = &tw->tw_v6_daddr;
1889 src = &tw->tw_v6_rcv_saddr;
1890 destp = ntohs(tw->tw_dport);
1891 srcp = ntohs(tw->tw_sport);
1894 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1895 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1897 src->s6_addr32[0], src->s6_addr32[1],
1898 src->s6_addr32[2], src->s6_addr32[3], srcp,
1899 dest->s6_addr32[0], dest->s6_addr32[1],
1900 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1901 tw->tw_substate, 0, 0,
1902 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1903 refcount_read(&tw->tw_refcnt), tw);
1906 static int tcp6_seq_show(struct seq_file *seq, void *v)
1908 struct tcp_iter_state *st;
1909 struct sock *sk = v;
1911 if (v == SEQ_START_TOKEN) {
1916 "st tx_queue rx_queue tr tm->when retrnsmt"
1917 " uid timeout inode\n");
1922 if (sk->sk_state == TCP_TIME_WAIT)
1923 get_timewait6_sock(seq, v, st->num);
1924 else if (sk->sk_state == TCP_NEW_SYN_RECV)
1925 get_openreq6(seq, v, st->num);
1927 get_tcp6_sock(seq, v, st->num);
1932 static const struct seq_operations tcp6_seq_ops = {
1933 .show = tcp6_seq_show,
1934 .start = tcp_seq_start,
1935 .next = tcp_seq_next,
1936 .stop = tcp_seq_stop,
1939 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1943 int __net_init tcp6_proc_init(struct net *net)
1945 if (!proc_create_net_data("tcp6", 0444, net->proc_net, &tcp6_seq_ops,
1946 sizeof(struct tcp_iter_state), &tcp6_seq_afinfo))
1951 void tcp6_proc_exit(struct net *net)
1953 remove_proc_entry("tcp6", net->proc_net);
1957 struct proto tcpv6_prot = {
1959 .owner = THIS_MODULE,
1961 .pre_connect = tcp_v6_pre_connect,
1962 .connect = tcp_v6_connect,
1963 .disconnect = tcp_disconnect,
1964 .accept = inet_csk_accept,
1966 .init = tcp_v6_init_sock,
1967 .destroy = tcp_v6_destroy_sock,
1968 .shutdown = tcp_shutdown,
1969 .setsockopt = tcp_setsockopt,
1970 .getsockopt = tcp_getsockopt,
1971 .keepalive = tcp_set_keepalive,
1972 .recvmsg = tcp_recvmsg,
1973 .sendmsg = tcp_sendmsg,
1974 .sendpage = tcp_sendpage,
1975 .backlog_rcv = tcp_v6_do_rcv,
1976 .release_cb = tcp_release_cb,
1978 .unhash = inet_unhash,
1979 .get_port = inet_csk_get_port,
1980 .enter_memory_pressure = tcp_enter_memory_pressure,
1981 .leave_memory_pressure = tcp_leave_memory_pressure,
1982 .stream_memory_free = tcp_stream_memory_free,
1983 .sockets_allocated = &tcp_sockets_allocated,
1984 .memory_allocated = &tcp_memory_allocated,
1985 .memory_pressure = &tcp_memory_pressure,
1986 .orphan_count = &tcp_orphan_count,
1987 .sysctl_mem = sysctl_tcp_mem,
1988 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
1989 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
1990 .max_header = MAX_TCP_HEADER,
1991 .obj_size = sizeof(struct tcp6_sock),
1992 .slab_flags = SLAB_TYPESAFE_BY_RCU,
1993 .twsk_prot = &tcp6_timewait_sock_ops,
1994 .rsk_prot = &tcp6_request_sock_ops,
1995 .h.hashinfo = &tcp_hashinfo,
1996 .no_autobind = true,
1997 #ifdef CONFIG_COMPAT
1998 .compat_setsockopt = compat_tcp_setsockopt,
1999 .compat_getsockopt = compat_tcp_getsockopt,
2001 .diag_destroy = tcp_abort,
2004 /* thinking of making this const? Don't.
2005 * early_demux can change based on sysctl.
2007 static struct inet6_protocol tcpv6_protocol = {
2008 .early_demux = tcp_v6_early_demux,
2009 .early_demux_handler = tcp_v6_early_demux,
2010 .handler = tcp_v6_rcv,
2011 .err_handler = tcp_v6_err,
2012 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2015 static struct inet_protosw tcpv6_protosw = {
2016 .type = SOCK_STREAM,
2017 .protocol = IPPROTO_TCP,
2018 .prot = &tcpv6_prot,
2019 .ops = &inet6_stream_ops,
2020 .flags = INET_PROTOSW_PERMANENT |
2024 static int __net_init tcpv6_net_init(struct net *net)
2026 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2027 SOCK_RAW, IPPROTO_TCP, net);
2030 static void __net_exit tcpv6_net_exit(struct net *net)
2032 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2035 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2037 inet_twsk_purge(&tcp_hashinfo, AF_INET6);
2040 static struct pernet_operations tcpv6_net_ops = {
2041 .init = tcpv6_net_init,
2042 .exit = tcpv6_net_exit,
2043 .exit_batch = tcpv6_net_exit_batch,
2046 int __init tcpv6_init(void)
2050 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2054 /* register inet6 protocol */
2055 ret = inet6_register_protosw(&tcpv6_protosw);
2057 goto out_tcpv6_protocol;
2059 ret = register_pernet_subsys(&tcpv6_net_ops);
2061 goto out_tcpv6_protosw;
2066 inet6_unregister_protosw(&tcpv6_protosw);
2068 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2072 void tcpv6_exit(void)
2074 unregister_pernet_subsys(&tcpv6_net_ops);
2075 inet6_unregister_protosw(&tcpv6_protosw);
2076 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);