2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/netdma.h>
76 #include <net/secure_seq.h>
77 #include <net/tcp_memcontrol.h>
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
93 #ifdef CONFIG_TCP_MD5SIG
94 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
95 __be32 daddr, __be32 saddr, const struct tcphdr *th);
98 struct inet_hashinfo tcp_hashinfo;
99 EXPORT_SYMBOL(tcp_hashinfo);
101 static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
103 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
106 tcp_hdr(skb)->source);
109 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
111 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
112 struct tcp_sock *tp = tcp_sk(sk);
114 /* With PAWS, it is safe from the viewpoint
115 of data integrity. Even without PAWS it is safe provided sequence
116 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
118 Actually, the idea is close to VJ's one, only timestamp cache is
119 held not per host, but per port pair and TW bucket is used as state
122 If TW bucket has been already destroyed we fall back to VJ's scheme
123 and use initial timestamp retrieved from peer table.
125 if (tcptw->tw_ts_recent_stamp &&
126 (twp == NULL || (sysctl_tcp_tw_reuse &&
127 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
128 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
129 if (tp->write_seq == 0)
131 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
132 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
139 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
141 /* This will initiate an outgoing connection. */
142 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
144 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
145 struct inet_sock *inet = inet_sk(sk);
146 struct tcp_sock *tp = tcp_sk(sk);
147 __be16 orig_sport, orig_dport;
148 __be32 daddr, nexthop;
152 struct ip_options_rcu *inet_opt;
154 if (addr_len < sizeof(struct sockaddr_in))
157 if (usin->sin_family != AF_INET)
158 return -EAFNOSUPPORT;
160 nexthop = daddr = usin->sin_addr.s_addr;
161 inet_opt = rcu_dereference_protected(inet->inet_opt,
162 sock_owned_by_user(sk));
163 if (inet_opt && inet_opt->opt.srr) {
166 nexthop = inet_opt->opt.faddr;
169 orig_sport = inet->inet_sport;
170 orig_dport = usin->sin_port;
171 fl4 = &inet->cork.fl.u.ip4;
172 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
173 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
175 orig_sport, orig_dport, sk, true);
178 if (err == -ENETUNREACH)
179 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
183 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
188 if (!inet_opt || !inet_opt->opt.srr)
191 if (!inet->inet_saddr)
192 inet->inet_saddr = fl4->saddr;
193 inet->inet_rcv_saddr = inet->inet_saddr;
195 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
196 /* Reset inherited state */
197 tp->rx_opt.ts_recent = 0;
198 tp->rx_opt.ts_recent_stamp = 0;
199 if (likely(!tp->repair))
203 if (tcp_death_row.sysctl_tw_recycle &&
204 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
205 tcp_fetch_timewait_stamp(sk, &rt->dst);
207 inet->inet_dport = usin->sin_port;
208 inet->inet_daddr = daddr;
210 inet_csk(sk)->icsk_ext_hdr_len = 0;
212 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
214 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
216 /* Socket identity is still unknown (sport may be zero).
217 * However we set state to SYN-SENT and not releasing socket
218 * lock select source port, enter ourselves into the hash tables and
219 * complete initialization after this.
221 tcp_set_state(sk, TCP_SYN_SENT);
222 err = inet_hash_connect(&tcp_death_row, sk);
226 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
227 inet->inet_sport, inet->inet_dport, sk);
233 /* OK, now commit destination to socket. */
234 sk->sk_gso_type = SKB_GSO_TCPV4;
235 sk_setup_caps(sk, &rt->dst);
237 if (!tp->write_seq && likely(!tp->repair))
238 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
243 inet->inet_id = tp->write_seq ^ jiffies;
245 err = tcp_connect(sk);
255 * This unhashes the socket and releases the local port,
258 tcp_set_state(sk, TCP_CLOSE);
260 sk->sk_route_caps = 0;
261 inet->inet_dport = 0;
264 EXPORT_SYMBOL(tcp_v4_connect);
267 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
268 * It can be called through tcp_release_cb() if socket was owned by user
269 * at the time tcp_v4_err() was called to handle ICMP message.
271 static void tcp_v4_mtu_reduced(struct sock *sk)
273 struct dst_entry *dst;
274 struct inet_sock *inet = inet_sk(sk);
275 u32 mtu = tcp_sk(sk)->mtu_info;
277 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
278 * send out by Linux are always <576bytes so they should go through
281 if (sk->sk_state == TCP_LISTEN)
284 dst = inet_csk_update_pmtu(sk, mtu);
288 /* Something is about to be wrong... Remember soft error
289 * for the case, if this connection will not able to recover.
291 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
292 sk->sk_err_soft = EMSGSIZE;
296 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
297 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
298 tcp_sync_mss(sk, mtu);
300 /* Resend the TCP packet because it's
301 * clear that the old packet has been
302 * dropped. This is the new "fast" path mtu
305 tcp_simple_retransmit(sk);
306 } /* else let the usual retransmit timer handle it */
309 static void do_redirect(struct sk_buff *skb, struct sock *sk)
311 struct dst_entry *dst = __sk_dst_check(sk, 0);
314 dst->ops->redirect(dst, sk, skb);
318 * This routine is called by the ICMP module when it gets some
319 * sort of error condition. If err < 0 then the socket should
320 * be closed and the error returned to the user. If err > 0
321 * it's just the icmp type << 8 | icmp code. After adjustment
322 * header points to the first 8 bytes of the tcp header. We need
323 * to find the appropriate port.
325 * The locking strategy used here is very "optimistic". When
326 * someone else accesses the socket the ICMP is just dropped
327 * and for some paths there is no check at all.
328 * A more general error queue to queue errors for later handling
329 * is probably better.
333 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
335 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
336 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
337 struct inet_connection_sock *icsk;
339 struct inet_sock *inet;
340 const int type = icmp_hdr(icmp_skb)->type;
341 const int code = icmp_hdr(icmp_skb)->code;
344 struct request_sock *req;
348 struct net *net = dev_net(icmp_skb->dev);
350 if (icmp_skb->len < (iph->ihl << 2) + 8) {
351 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
355 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
356 iph->saddr, th->source, inet_iif(icmp_skb));
358 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
361 if (sk->sk_state == TCP_TIME_WAIT) {
362 inet_twsk_put(inet_twsk(sk));
367 /* If too many ICMPs get dropped on busy
368 * servers this needs to be solved differently.
369 * We do take care of PMTU discovery (RFC1191) special case :
370 * we can receive locally generated ICMP messages while socket is held.
372 if (sock_owned_by_user(sk)) {
373 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
374 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
376 if (sk->sk_state == TCP_CLOSE)
379 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
380 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
386 req = tp->fastopen_rsk;
387 seq = ntohl(th->seq);
388 if (sk->sk_state != TCP_LISTEN &&
389 !between(seq, tp->snd_una, tp->snd_nxt) &&
390 (req == NULL || seq != tcp_rsk(req)->snt_isn)) {
391 /* For a Fast Open socket, allow seq to be snt_isn. */
392 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
398 do_redirect(icmp_skb, sk);
400 case ICMP_SOURCE_QUENCH:
401 /* Just silently ignore these. */
403 case ICMP_PARAMETERPROB:
406 case ICMP_DEST_UNREACH:
407 if (code > NR_ICMP_UNREACH)
410 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
412 if (!sock_owned_by_user(sk)) {
413 tcp_v4_mtu_reduced(sk);
415 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
421 err = icmp_err_convert[code].errno;
422 /* check if icmp_skb allows revert of backoff
423 * (see draft-zimmermann-tcp-lcd) */
424 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
426 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
430 /* XXX (TFO) - revisit the following logic for TFO */
432 if (sock_owned_by_user(sk))
435 icsk->icsk_backoff--;
436 inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
437 TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
440 skb = tcp_write_queue_head(sk);
443 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
444 tcp_time_stamp - TCP_SKB_CB(skb)->when);
447 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
448 remaining, TCP_RTO_MAX);
450 /* RTO revert clocked out retransmission.
451 * Will retransmit now */
452 tcp_retransmit_timer(sk);
456 case ICMP_TIME_EXCEEDED:
463 /* XXX (TFO) - if it's a TFO socket and has been accepted, rather
464 * than following the TCP_SYN_RECV case and closing the socket,
465 * we ignore the ICMP error and keep trying like a fully established
466 * socket. Is this the right thing to do?
468 if (req && req->sk == NULL)
471 switch (sk->sk_state) {
472 struct request_sock *req, **prev;
474 if (sock_owned_by_user(sk))
477 req = inet_csk_search_req(sk, &prev, th->dest,
478 iph->daddr, iph->saddr);
482 /* ICMPs are not backlogged, hence we cannot get
483 an established socket here.
487 if (seq != tcp_rsk(req)->snt_isn) {
488 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
493 * Still in SYN_RECV, just remove it silently.
494 * There is no good way to pass the error to the newly
495 * created socket, and POSIX does not want network
496 * errors returned from accept().
498 inet_csk_reqsk_queue_drop(sk, req, prev);
502 case TCP_SYN_RECV: /* Cannot happen.
503 It can f.e. if SYNs crossed,
506 if (!sock_owned_by_user(sk)) {
509 sk->sk_error_report(sk);
513 sk->sk_err_soft = err;
518 /* If we've already connected we will keep trying
519 * until we time out, or the user gives up.
521 * rfc1122 4.2.3.9 allows to consider as hard errors
522 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
523 * but it is obsoleted by pmtu discovery).
525 * Note, that in modern internet, where routing is unreliable
526 * and in each dark corner broken firewalls sit, sending random
527 * errors ordered by their masters even this two messages finally lose
528 * their original sense (even Linux sends invalid PORT_UNREACHs)
530 * Now we are in compliance with RFCs.
535 if (!sock_owned_by_user(sk) && inet->recverr) {
537 sk->sk_error_report(sk);
538 } else { /* Only an error on timeout */
539 sk->sk_err_soft = err;
547 static void __tcp_v4_send_check(struct sk_buff *skb,
548 __be32 saddr, __be32 daddr)
550 struct tcphdr *th = tcp_hdr(skb);
552 if (skb->ip_summed == CHECKSUM_PARTIAL) {
553 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
554 skb->csum_start = skb_transport_header(skb) - skb->head;
555 skb->csum_offset = offsetof(struct tcphdr, check);
557 th->check = tcp_v4_check(skb->len, saddr, daddr,
564 /* This routine computes an IPv4 TCP checksum. */
565 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
567 const struct inet_sock *inet = inet_sk(sk);
569 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
571 EXPORT_SYMBOL(tcp_v4_send_check);
573 int tcp_v4_gso_send_check(struct sk_buff *skb)
575 const struct iphdr *iph;
578 if (!pskb_may_pull(skb, sizeof(*th)))
585 skb->ip_summed = CHECKSUM_PARTIAL;
586 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
591 * This routine will send an RST to the other tcp.
593 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
595 * Answer: if a packet caused RST, it is not for a socket
596 * existing in our system, if it is matched to a socket,
597 * it is just duplicate segment or bug in other side's TCP.
598 * So that we build reply only basing on parameters
599 * arrived with segment.
600 * Exception: precedence violation. We do not implement it in any case.
603 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
605 const struct tcphdr *th = tcp_hdr(skb);
608 #ifdef CONFIG_TCP_MD5SIG
609 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
612 struct ip_reply_arg arg;
613 #ifdef CONFIG_TCP_MD5SIG
614 struct tcp_md5sig_key *key;
615 const __u8 *hash_location = NULL;
616 unsigned char newhash[16];
618 struct sock *sk1 = NULL;
622 /* Never send a reset in response to a reset. */
626 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
629 /* Swap the send and the receive. */
630 memset(&rep, 0, sizeof(rep));
631 rep.th.dest = th->source;
632 rep.th.source = th->dest;
633 rep.th.doff = sizeof(struct tcphdr) / 4;
637 rep.th.seq = th->ack_seq;
640 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
641 skb->len - (th->doff << 2));
644 memset(&arg, 0, sizeof(arg));
645 arg.iov[0].iov_base = (unsigned char *)&rep;
646 arg.iov[0].iov_len = sizeof(rep.th);
648 #ifdef CONFIG_TCP_MD5SIG
649 hash_location = tcp_parse_md5sig_option(th);
650 if (!sk && hash_location) {
652 * active side is lost. Try to find listening socket through
653 * source port, and then find md5 key through listening socket.
654 * we are not loose security here:
655 * Incoming packet is checked with md5 hash with finding key,
656 * no RST generated if md5 hash doesn't match.
658 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
659 &tcp_hashinfo, ip_hdr(skb)->daddr,
660 ntohs(th->source), inet_iif(skb));
661 /* don't send rst if it can't find key */
665 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
666 &ip_hdr(skb)->saddr, AF_INET);
670 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
671 if (genhash || memcmp(hash_location, newhash, 16) != 0)
674 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
680 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
682 (TCPOPT_MD5SIG << 8) |
684 /* Update length and the length the header thinks exists */
685 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
686 rep.th.doff = arg.iov[0].iov_len / 4;
688 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
689 key, ip_hdr(skb)->saddr,
690 ip_hdr(skb)->daddr, &rep.th);
693 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
694 ip_hdr(skb)->saddr, /* XXX */
695 arg.iov[0].iov_len, IPPROTO_TCP, 0);
696 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
697 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
698 /* When socket is gone, all binding information is lost.
699 * routing might fail in this case. No choice here, if we choose to force
700 * input interface, we will misroute in case of asymmetric route.
703 arg.bound_dev_if = sk->sk_bound_dev_if;
705 net = dev_net(skb_dst(skb)->dev);
706 arg.tos = ip_hdr(skb)->tos;
707 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
708 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
710 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
711 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
713 #ifdef CONFIG_TCP_MD5SIG
722 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
723 outside socket context is ugly, certainly. What can I do?
726 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
727 u32 win, u32 ts, int oif,
728 struct tcp_md5sig_key *key,
729 int reply_flags, u8 tos)
731 const struct tcphdr *th = tcp_hdr(skb);
734 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
735 #ifdef CONFIG_TCP_MD5SIG
736 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
740 struct ip_reply_arg arg;
741 struct net *net = dev_net(skb_dst(skb)->dev);
743 memset(&rep.th, 0, sizeof(struct tcphdr));
744 memset(&arg, 0, sizeof(arg));
746 arg.iov[0].iov_base = (unsigned char *)&rep;
747 arg.iov[0].iov_len = sizeof(rep.th);
749 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
750 (TCPOPT_TIMESTAMP << 8) |
752 rep.opt[1] = htonl(tcp_time_stamp);
753 rep.opt[2] = htonl(ts);
754 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
757 /* Swap the send and the receive. */
758 rep.th.dest = th->source;
759 rep.th.source = th->dest;
760 rep.th.doff = arg.iov[0].iov_len / 4;
761 rep.th.seq = htonl(seq);
762 rep.th.ack_seq = htonl(ack);
764 rep.th.window = htons(win);
766 #ifdef CONFIG_TCP_MD5SIG
768 int offset = (ts) ? 3 : 0;
770 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
772 (TCPOPT_MD5SIG << 8) |
774 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
775 rep.th.doff = arg.iov[0].iov_len/4;
777 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
778 key, ip_hdr(skb)->saddr,
779 ip_hdr(skb)->daddr, &rep.th);
782 arg.flags = reply_flags;
783 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
784 ip_hdr(skb)->saddr, /* XXX */
785 arg.iov[0].iov_len, IPPROTO_TCP, 0);
786 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
788 arg.bound_dev_if = oif;
790 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
791 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
793 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
796 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
798 struct inet_timewait_sock *tw = inet_twsk(sk);
799 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
801 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
802 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
805 tcp_twsk_md5_key(tcptw),
806 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
813 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
814 struct request_sock *req)
816 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
817 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
819 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
820 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
821 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
824 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
826 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
831 * Send a SYN-ACK after having received a SYN.
832 * This still operates on a request_sock only, not on a big
835 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
836 struct request_sock *req,
837 struct request_values *rvp,
841 const struct inet_request_sock *ireq = inet_rsk(req);
844 struct sk_buff * skb;
846 /* First, grab a route. */
847 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
850 skb = tcp_make_synack(sk, dst, req, rvp, NULL);
853 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
855 skb_set_queue_mapping(skb, queue_mapping);
856 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
859 err = net_xmit_eval(err);
860 if (!tcp_rsk(req)->snt_synack && !err)
861 tcp_rsk(req)->snt_synack = tcp_time_stamp;
867 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
868 struct request_values *rvp)
870 int res = tcp_v4_send_synack(sk, NULL, req, rvp, 0, false);
873 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
878 * IPv4 request_sock destructor.
880 static void tcp_v4_reqsk_destructor(struct request_sock *req)
882 kfree(inet_rsk(req)->opt);
886 * Return true if a syncookie should be sent
888 bool tcp_syn_flood_action(struct sock *sk,
889 const struct sk_buff *skb,
892 const char *msg = "Dropping request";
893 bool want_cookie = false;
894 struct listen_sock *lopt;
898 #ifdef CONFIG_SYN_COOKIES
899 if (sysctl_tcp_syncookies) {
900 msg = "Sending cookies";
902 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
905 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
907 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
908 if (!lopt->synflood_warned) {
909 lopt->synflood_warned = 1;
910 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
911 proto, ntohs(tcp_hdr(skb)->dest), msg);
915 EXPORT_SYMBOL(tcp_syn_flood_action);
918 * Save and compile IPv4 options into the request_sock if needed.
920 static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
922 const struct ip_options *opt = &(IPCB(skb)->opt);
923 struct ip_options_rcu *dopt = NULL;
925 if (opt && opt->optlen) {
926 int opt_size = sizeof(*dopt) + opt->optlen;
928 dopt = kmalloc(opt_size, GFP_ATOMIC);
930 if (ip_options_echo(&dopt->opt, skb)) {
939 #ifdef CONFIG_TCP_MD5SIG
941 * RFC2385 MD5 checksumming requires a mapping of
942 * IP address->MD5 Key.
943 * We need to maintain these in the sk structure.
946 /* Find the Key structure for an address. */
947 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
948 const union tcp_md5_addr *addr,
951 struct tcp_sock *tp = tcp_sk(sk);
952 struct tcp_md5sig_key *key;
953 struct hlist_node *pos;
954 unsigned int size = sizeof(struct in_addr);
955 struct tcp_md5sig_info *md5sig;
957 /* caller either holds rcu_read_lock() or socket lock */
958 md5sig = rcu_dereference_check(tp->md5sig_info,
959 sock_owned_by_user(sk) ||
960 lockdep_is_held(&sk->sk_lock.slock));
963 #if IS_ENABLED(CONFIG_IPV6)
964 if (family == AF_INET6)
965 size = sizeof(struct in6_addr);
967 hlist_for_each_entry_rcu(key, pos, &md5sig->head, node) {
968 if (key->family != family)
970 if (!memcmp(&key->addr, addr, size))
975 EXPORT_SYMBOL(tcp_md5_do_lookup);
977 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
978 struct sock *addr_sk)
980 union tcp_md5_addr *addr;
982 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
983 return tcp_md5_do_lookup(sk, addr, AF_INET);
985 EXPORT_SYMBOL(tcp_v4_md5_lookup);
987 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
988 struct request_sock *req)
990 union tcp_md5_addr *addr;
992 addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
993 return tcp_md5_do_lookup(sk, addr, AF_INET);
996 /* This can be called on a newly created socket, from other files */
997 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
998 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
1000 /* Add Key to the list */
1001 struct tcp_md5sig_key *key;
1002 struct tcp_sock *tp = tcp_sk(sk);
1003 struct tcp_md5sig_info *md5sig;
1005 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
1007 /* Pre-existing entry - just update that one. */
1008 memcpy(key->key, newkey, newkeylen);
1009 key->keylen = newkeylen;
1013 md5sig = rcu_dereference_protected(tp->md5sig_info,
1014 sock_owned_by_user(sk));
1016 md5sig = kmalloc(sizeof(*md5sig), gfp);
1020 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1021 INIT_HLIST_HEAD(&md5sig->head);
1022 rcu_assign_pointer(tp->md5sig_info, md5sig);
1025 key = sock_kmalloc(sk, sizeof(*key), gfp);
1028 if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) {
1029 sock_kfree_s(sk, key, sizeof(*key));
1033 memcpy(key->key, newkey, newkeylen);
1034 key->keylen = newkeylen;
1035 key->family = family;
1036 memcpy(&key->addr, addr,
1037 (family == AF_INET6) ? sizeof(struct in6_addr) :
1038 sizeof(struct in_addr));
1039 hlist_add_head_rcu(&key->node, &md5sig->head);
1042 EXPORT_SYMBOL(tcp_md5_do_add);
1044 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1046 struct tcp_sock *tp = tcp_sk(sk);
1047 struct tcp_md5sig_key *key;
1048 struct tcp_md5sig_info *md5sig;
1050 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
1053 hlist_del_rcu(&key->node);
1054 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1055 kfree_rcu(key, rcu);
1056 md5sig = rcu_dereference_protected(tp->md5sig_info,
1057 sock_owned_by_user(sk));
1058 if (hlist_empty(&md5sig->head))
1059 tcp_free_md5sig_pool();
1062 EXPORT_SYMBOL(tcp_md5_do_del);
1064 static void tcp_clear_md5_list(struct sock *sk)
1066 struct tcp_sock *tp = tcp_sk(sk);
1067 struct tcp_md5sig_key *key;
1068 struct hlist_node *pos, *n;
1069 struct tcp_md5sig_info *md5sig;
1071 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1073 if (!hlist_empty(&md5sig->head))
1074 tcp_free_md5sig_pool();
1075 hlist_for_each_entry_safe(key, pos, n, &md5sig->head, node) {
1076 hlist_del_rcu(&key->node);
1077 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1078 kfree_rcu(key, rcu);
1082 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1085 struct tcp_md5sig cmd;
1086 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1088 if (optlen < sizeof(cmd))
1091 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1094 if (sin->sin_family != AF_INET)
1097 if (!cmd.tcpm_key || !cmd.tcpm_keylen)
1098 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1101 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1104 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1105 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1109 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1110 __be32 daddr, __be32 saddr, int nbytes)
1112 struct tcp4_pseudohdr *bp;
1113 struct scatterlist sg;
1115 bp = &hp->md5_blk.ip4;
1118 * 1. the TCP pseudo-header (in the order: source IP address,
1119 * destination IP address, zero-padded protocol number, and
1125 bp->protocol = IPPROTO_TCP;
1126 bp->len = cpu_to_be16(nbytes);
1128 sg_init_one(&sg, bp, sizeof(*bp));
1129 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1132 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1133 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1135 struct tcp_md5sig_pool *hp;
1136 struct hash_desc *desc;
1138 hp = tcp_get_md5sig_pool();
1140 goto clear_hash_noput;
1141 desc = &hp->md5_desc;
1143 if (crypto_hash_init(desc))
1145 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1147 if (tcp_md5_hash_header(hp, th))
1149 if (tcp_md5_hash_key(hp, key))
1151 if (crypto_hash_final(desc, md5_hash))
1154 tcp_put_md5sig_pool();
1158 tcp_put_md5sig_pool();
1160 memset(md5_hash, 0, 16);
1164 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1165 const struct sock *sk, const struct request_sock *req,
1166 const struct sk_buff *skb)
1168 struct tcp_md5sig_pool *hp;
1169 struct hash_desc *desc;
1170 const struct tcphdr *th = tcp_hdr(skb);
1171 __be32 saddr, daddr;
1174 saddr = inet_sk(sk)->inet_saddr;
1175 daddr = inet_sk(sk)->inet_daddr;
1177 saddr = inet_rsk(req)->loc_addr;
1178 daddr = inet_rsk(req)->rmt_addr;
1180 const struct iphdr *iph = ip_hdr(skb);
1185 hp = tcp_get_md5sig_pool();
1187 goto clear_hash_noput;
1188 desc = &hp->md5_desc;
1190 if (crypto_hash_init(desc))
1193 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1195 if (tcp_md5_hash_header(hp, th))
1197 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1199 if (tcp_md5_hash_key(hp, key))
1201 if (crypto_hash_final(desc, md5_hash))
1204 tcp_put_md5sig_pool();
1208 tcp_put_md5sig_pool();
1210 memset(md5_hash, 0, 16);
1213 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1215 static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1218 * This gets called for each TCP segment that arrives
1219 * so we want to be efficient.
1220 * We have 3 drop cases:
1221 * o No MD5 hash and one expected.
1222 * o MD5 hash and we're not expecting one.
1223 * o MD5 hash and its wrong.
1225 const __u8 *hash_location = NULL;
1226 struct tcp_md5sig_key *hash_expected;
1227 const struct iphdr *iph = ip_hdr(skb);
1228 const struct tcphdr *th = tcp_hdr(skb);
1230 unsigned char newhash[16];
1232 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1234 hash_location = tcp_parse_md5sig_option(th);
1236 /* We've parsed the options - do we have a hash? */
1237 if (!hash_expected && !hash_location)
1240 if (hash_expected && !hash_location) {
1241 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1245 if (!hash_expected && hash_location) {
1246 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1250 /* Okay, so this is hash_expected and hash_location -
1251 * so we need to calculate the checksum.
1253 genhash = tcp_v4_md5_hash_skb(newhash,
1257 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1258 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1259 &iph->saddr, ntohs(th->source),
1260 &iph->daddr, ntohs(th->dest),
1261 genhash ? " tcp_v4_calc_md5_hash failed"
1270 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1272 .obj_size = sizeof(struct tcp_request_sock),
1273 .rtx_syn_ack = tcp_v4_rtx_synack,
1274 .send_ack = tcp_v4_reqsk_send_ack,
1275 .destructor = tcp_v4_reqsk_destructor,
1276 .send_reset = tcp_v4_send_reset,
1277 .syn_ack_timeout = tcp_syn_ack_timeout,
1280 #ifdef CONFIG_TCP_MD5SIG
1281 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1282 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1283 .calc_md5_hash = tcp_v4_md5_hash_skb,
1287 static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
1288 struct request_sock *req,
1289 struct tcp_fastopen_cookie *foc,
1290 struct tcp_fastopen_cookie *valid_foc)
1292 bool skip_cookie = false;
1293 struct fastopen_queue *fastopenq;
1295 if (likely(!fastopen_cookie_present(foc))) {
1296 /* See include/net/tcp.h for the meaning of these knobs */
1297 if ((sysctl_tcp_fastopen & TFO_SERVER_ALWAYS) ||
1298 ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD) &&
1299 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1)))
1300 skip_cookie = true; /* no cookie to validate */
1304 fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
1305 /* A FO option is present; bump the counter. */
1306 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVE);
1308 /* Make sure the listener has enabled fastopen, and we don't
1309 * exceed the max # of pending TFO requests allowed before trying
1310 * to validating the cookie in order to avoid burning CPU cycles
1313 * XXX (TFO) - The implication of checking the max_qlen before
1314 * processing a cookie request is that clients can't differentiate
1315 * between qlen overflow causing Fast Open to be disabled
1316 * temporarily vs a server not supporting Fast Open at all.
1318 if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) == 0 ||
1319 fastopenq == NULL || fastopenq->max_qlen == 0)
1322 if (fastopenq->qlen >= fastopenq->max_qlen) {
1323 struct request_sock *req1;
1324 spin_lock(&fastopenq->lock);
1325 req1 = fastopenq->rskq_rst_head;
1326 if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
1327 spin_unlock(&fastopenq->lock);
1328 NET_INC_STATS_BH(sock_net(sk),
1329 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
1330 /* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/
1334 fastopenq->rskq_rst_head = req1->dl_next;
1336 spin_unlock(&fastopenq->lock);
1340 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1343 if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) {
1344 if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) {
1345 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1346 if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) ||
1347 memcmp(&foc->val[0], &valid_foc->val[0],
1348 TCP_FASTOPEN_COOKIE_SIZE) != 0)
1350 valid_foc->len = -1;
1352 /* Acknowledge the data received from the peer. */
1353 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1355 } else if (foc->len == 0) { /* Client requesting a cookie */
1356 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1357 NET_INC_STATS_BH(sock_net(sk),
1358 LINUX_MIB_TCPFASTOPENCOOKIEREQD);
1360 /* Client sent a cookie with wrong size. Treat it
1361 * the same as invalid and return a valid one.
1363 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1368 static int tcp_v4_conn_req_fastopen(struct sock *sk,
1369 struct sk_buff *skb,
1370 struct sk_buff *skb_synack,
1371 struct request_sock *req,
1372 struct request_values *rvp)
1374 struct tcp_sock *tp = tcp_sk(sk);
1375 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
1376 const struct inet_request_sock *ireq = inet_rsk(req);
1380 req->num_retrans = 0;
1381 req->num_timeout = 0;
1384 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
1385 if (child == NULL) {
1386 NET_INC_STATS_BH(sock_net(sk),
1387 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1388 kfree_skb(skb_synack);
1391 err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
1392 ireq->rmt_addr, ireq->opt);
1393 err = net_xmit_eval(err);
1395 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1396 /* XXX (TFO) - is it ok to ignore error and continue? */
1398 spin_lock(&queue->fastopenq->lock);
1399 queue->fastopenq->qlen++;
1400 spin_unlock(&queue->fastopenq->lock);
1402 /* Initialize the child socket. Have to fix some values to take
1403 * into account the child is a Fast Open socket and is created
1404 * only out of the bits carried in the SYN packet.
1408 tp->fastopen_rsk = req;
1409 /* Do a hold on the listner sk so that if the listener is being
1410 * closed, the child that has been accepted can live on and still
1411 * access listen_lock.
1414 tcp_rsk(req)->listener = sk;
1416 /* RFC1323: The window in SYN & SYN/ACK segments is never
1417 * scaled. So correct it appropriately.
1419 tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
1421 /* Activate the retrans timer so that SYNACK can be retransmitted.
1422 * The request socket is not added to the SYN table of the parent
1423 * because it's been added to the accept queue directly.
1425 inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
1426 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
1428 /* Add the child socket directly into the accept queue */
1429 inet_csk_reqsk_queue_add(sk, req, child);
1431 /* Now finish processing the fastopen child socket. */
1432 inet_csk(child)->icsk_af_ops->rebuild_header(child);
1433 tcp_init_congestion_control(child);
1434 tcp_mtup_init(child);
1435 tcp_init_buffer_space(child);
1436 tcp_init_metrics(child);
1438 /* Queue the data carried in the SYN packet. We need to first
1439 * bump skb's refcnt because the caller will attempt to free it.
1441 * XXX (TFO) - we honor a zero-payload TFO request for now.
1442 * (Any reason not to?)
1444 if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq + 1) {
1445 /* Don't queue the skb if there is no payload in SYN.
1446 * XXX (TFO) - How about SYN+FIN?
1448 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1452 __skb_pull(skb, tcp_hdr(skb)->doff * 4);
1453 skb_set_owner_r(skb, child);
1454 __skb_queue_tail(&child->sk_receive_queue, skb);
1455 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1456 tp->syn_data_acked = 1;
1458 sk->sk_data_ready(sk, 0);
1459 bh_unlock_sock(child);
1461 WARN_ON(req->sk == NULL);
1465 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1467 struct tcp_extend_values tmp_ext;
1468 struct tcp_options_received tmp_opt;
1469 const u8 *hash_location;
1470 struct request_sock *req;
1471 struct inet_request_sock *ireq;
1472 struct tcp_sock *tp = tcp_sk(sk);
1473 struct dst_entry *dst = NULL;
1474 __be32 saddr = ip_hdr(skb)->saddr;
1475 __be32 daddr = ip_hdr(skb)->daddr;
1476 __u32 isn = TCP_SKB_CB(skb)->when;
1477 bool want_cookie = false;
1479 struct tcp_fastopen_cookie foc = { .len = -1 };
1480 struct tcp_fastopen_cookie valid_foc = { .len = -1 };
1481 struct sk_buff *skb_synack;
1484 /* Never answer to SYNs send to broadcast or multicast */
1485 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1488 /* TW buckets are converted to open requests without
1489 * limitations, they conserve resources and peer is
1490 * evidently real one.
1492 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1493 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1498 /* Accept backlog is full. If we have already queued enough
1499 * of warm entries in syn queue, drop request. It is better than
1500 * clogging syn queue with openreqs with exponentially increasing
1503 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1506 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1510 #ifdef CONFIG_TCP_MD5SIG
1511 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1514 tcp_clear_options(&tmp_opt);
1515 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1516 tmp_opt.user_mss = tp->rx_opt.user_mss;
1517 tcp_parse_options(skb, &tmp_opt, &hash_location, 0,
1518 want_cookie ? NULL : &foc);
1520 if (tmp_opt.cookie_plus > 0 &&
1521 tmp_opt.saw_tstamp &&
1522 !tp->rx_opt.cookie_out_never &&
1523 (sysctl_tcp_cookie_size > 0 ||
1524 (tp->cookie_values != NULL &&
1525 tp->cookie_values->cookie_desired > 0))) {
1527 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1528 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1530 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1531 goto drop_and_release;
1533 /* Secret recipe starts with IP addresses */
1534 *mess++ ^= (__force u32)daddr;
1535 *mess++ ^= (__force u32)saddr;
1537 /* plus variable length Initiator Cookie */
1540 *c++ ^= *hash_location++;
1542 want_cookie = false; /* not our kind of cookie */
1543 tmp_ext.cookie_out_never = 0; /* false */
1544 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1545 } else if (!tp->rx_opt.cookie_in_always) {
1546 /* redundant indications, but ensure initialization. */
1547 tmp_ext.cookie_out_never = 1; /* true */
1548 tmp_ext.cookie_plus = 0;
1550 goto drop_and_release;
1552 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1554 if (want_cookie && !tmp_opt.saw_tstamp)
1555 tcp_clear_options(&tmp_opt);
1557 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1558 tcp_openreq_init(req, &tmp_opt, skb);
1560 ireq = inet_rsk(req);
1561 ireq->loc_addr = daddr;
1562 ireq->rmt_addr = saddr;
1563 ireq->no_srccheck = inet_sk(sk)->transparent;
1564 ireq->opt = tcp_v4_save_options(skb);
1566 if (security_inet_conn_request(sk, skb, req))
1569 if (!want_cookie || tmp_opt.tstamp_ok)
1570 TCP_ECN_create_request(req, skb);
1573 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1574 req->cookie_ts = tmp_opt.tstamp_ok;
1576 /* VJ's idea. We save last timestamp seen
1577 * from the destination in peer table, when entering
1578 * state TIME-WAIT, and check against it before
1579 * accepting new connection request.
1581 * If "isn" is not zero, this request hit alive
1582 * timewait bucket, so that all the necessary checks
1583 * are made in the function processing timewait state.
1585 if (tmp_opt.saw_tstamp &&
1586 tcp_death_row.sysctl_tw_recycle &&
1587 (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
1588 fl4.daddr == saddr) {
1589 if (!tcp_peer_is_proven(req, dst, true)) {
1590 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1591 goto drop_and_release;
1594 /* Kill the following clause, if you dislike this way. */
1595 else if (!sysctl_tcp_syncookies &&
1596 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1597 (sysctl_max_syn_backlog >> 2)) &&
1598 !tcp_peer_is_proven(req, dst, false)) {
1599 /* Without syncookies last quarter of
1600 * backlog is filled with destinations,
1601 * proven to be alive.
1602 * It means that we continue to communicate
1603 * to destinations, already remembered
1604 * to the moment of synflood.
1606 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1607 &saddr, ntohs(tcp_hdr(skb)->source));
1608 goto drop_and_release;
1611 isn = tcp_v4_init_sequence(skb);
1613 tcp_rsk(req)->snt_isn = isn;
1616 dst = inet_csk_route_req(sk, &fl4, req);
1620 do_fastopen = tcp_fastopen_check(sk, skb, req, &foc, &valid_foc);
1622 /* We don't call tcp_v4_send_synack() directly because we need
1623 * to make sure a child socket can be created successfully before
1624 * sending back synack!
1626 * XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
1627 * (or better yet, call tcp_send_synack() in the child context
1628 * directly, but will have to fix bunch of other code first)
1629 * after syn_recv_sock() except one will need to first fix the
1630 * latter to remove its dependency on the current implementation
1631 * of tcp_v4_send_synack()->tcp_select_initial_window().
1633 skb_synack = tcp_make_synack(sk, dst, req,
1634 (struct request_values *)&tmp_ext,
1635 fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
1638 __tcp_v4_send_check(skb_synack, ireq->loc_addr, ireq->rmt_addr);
1639 skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
1643 if (likely(!do_fastopen)) {
1645 err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
1646 ireq->rmt_addr, ireq->opt);
1647 err = net_xmit_eval(err);
1648 if (err || want_cookie)
1651 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1652 tcp_rsk(req)->listener = NULL;
1653 /* Add the request_sock to the SYN table */
1654 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1655 if (fastopen_cookie_present(&foc) && foc.len != 0)
1656 NET_INC_STATS_BH(sock_net(sk),
1657 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1658 } else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req,
1659 (struct request_values *)&tmp_ext))
1671 EXPORT_SYMBOL(tcp_v4_conn_request);
1675 * The three way handshake has completed - we got a valid synack -
1676 * now create the new socket.
1678 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1679 struct request_sock *req,
1680 struct dst_entry *dst)
1682 struct inet_request_sock *ireq;
1683 struct inet_sock *newinet;
1684 struct tcp_sock *newtp;
1686 #ifdef CONFIG_TCP_MD5SIG
1687 struct tcp_md5sig_key *key;
1689 struct ip_options_rcu *inet_opt;
1691 if (sk_acceptq_is_full(sk))
1694 newsk = tcp_create_openreq_child(sk, req, skb);
1698 newsk->sk_gso_type = SKB_GSO_TCPV4;
1699 inet_sk_rx_dst_set(newsk, skb);
1701 newtp = tcp_sk(newsk);
1702 newinet = inet_sk(newsk);
1703 ireq = inet_rsk(req);
1704 newinet->inet_daddr = ireq->rmt_addr;
1705 newinet->inet_rcv_saddr = ireq->loc_addr;
1706 newinet->inet_saddr = ireq->loc_addr;
1707 inet_opt = ireq->opt;
1708 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1710 newinet->mc_index = inet_iif(skb);
1711 newinet->mc_ttl = ip_hdr(skb)->ttl;
1712 newinet->rcv_tos = ip_hdr(skb)->tos;
1713 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1715 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1716 newinet->inet_id = newtp->write_seq ^ jiffies;
1719 dst = inet_csk_route_child_sock(sk, newsk, req);
1723 /* syncookie case : see end of cookie_v4_check() */
1725 sk_setup_caps(newsk, dst);
1727 tcp_mtup_init(newsk);
1728 tcp_sync_mss(newsk, dst_mtu(dst));
1729 newtp->advmss = dst_metric_advmss(dst);
1730 if (tcp_sk(sk)->rx_opt.user_mss &&
1731 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1732 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1734 tcp_initialize_rcv_mss(newsk);
1735 tcp_synack_rtt_meas(newsk, req);
1736 newtp->total_retrans = req->num_retrans;
1738 #ifdef CONFIG_TCP_MD5SIG
1739 /* Copy over the MD5 key from the original socket */
1740 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1744 * We're using one, so create a matching key
1745 * on the newsk structure. If we fail to get
1746 * memory, then we end up not copying the key
1749 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1750 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1751 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1755 if (__inet_inherit_port(sk, newsk) < 0)
1757 __inet_hash_nolisten(newsk, NULL);
1762 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1766 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1769 inet_csk_prepare_forced_close(newsk);
1773 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1775 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1777 struct tcphdr *th = tcp_hdr(skb);
1778 const struct iphdr *iph = ip_hdr(skb);
1780 struct request_sock **prev;
1781 /* Find possible connection requests. */
1782 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1783 iph->saddr, iph->daddr);
1785 return tcp_check_req(sk, skb, req, prev, false);
1787 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1788 th->source, iph->daddr, th->dest, inet_iif(skb));
1791 if (nsk->sk_state != TCP_TIME_WAIT) {
1795 inet_twsk_put(inet_twsk(nsk));
1799 #ifdef CONFIG_SYN_COOKIES
1801 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1806 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1808 const struct iphdr *iph = ip_hdr(skb);
1810 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1811 if (!tcp_v4_check(skb->len, iph->saddr,
1812 iph->daddr, skb->csum)) {
1813 skb->ip_summed = CHECKSUM_UNNECESSARY;
1818 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1819 skb->len, IPPROTO_TCP, 0);
1821 if (skb->len <= 76) {
1822 return __skb_checksum_complete(skb);
1828 /* The socket must have it's spinlock held when we get
1831 * We have a potential double-lock case here, so even when
1832 * doing backlog processing we use the BH locking scheme.
1833 * This is because we cannot sleep with the original spinlock
1836 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1839 #ifdef CONFIG_TCP_MD5SIG
1841 * We really want to reject the packet as early as possible
1843 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1844 * o There is an MD5 option and we're not expecting one
1846 if (tcp_v4_inbound_md5_hash(sk, skb))
1850 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1851 struct dst_entry *dst = sk->sk_rx_dst;
1853 sock_rps_save_rxhash(sk, skb);
1855 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1856 dst->ops->check(dst, 0) == NULL) {
1858 sk->sk_rx_dst = NULL;
1861 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1868 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1871 if (sk->sk_state == TCP_LISTEN) {
1872 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1877 sock_rps_save_rxhash(nsk, skb);
1878 if (tcp_child_process(sk, nsk, skb)) {
1885 sock_rps_save_rxhash(sk, skb);
1887 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1894 tcp_v4_send_reset(rsk, skb);
1897 /* Be careful here. If this function gets more complicated and
1898 * gcc suffers from register pressure on the x86, sk (in %ebx)
1899 * might be destroyed here. This current version compiles correctly,
1900 * but you have been warned.
1905 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1908 EXPORT_SYMBOL(tcp_v4_do_rcv);
1910 void tcp_v4_early_demux(struct sk_buff *skb)
1912 const struct iphdr *iph;
1913 const struct tcphdr *th;
1916 if (skb->pkt_type != PACKET_HOST)
1919 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1925 if (th->doff < sizeof(struct tcphdr) / 4)
1928 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1929 iph->saddr, th->source,
1930 iph->daddr, ntohs(th->dest),
1934 skb->destructor = sock_edemux;
1935 if (sk->sk_state != TCP_TIME_WAIT) {
1936 struct dst_entry *dst = sk->sk_rx_dst;
1939 dst = dst_check(dst, 0);
1941 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1942 skb_dst_set_noref(skb, dst);
1951 int tcp_v4_rcv(struct sk_buff *skb)
1953 const struct iphdr *iph;
1954 const struct tcphdr *th;
1957 struct net *net = dev_net(skb->dev);
1959 if (skb->pkt_type != PACKET_HOST)
1962 /* Count it even if it's bad */
1963 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1965 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1970 if (th->doff < sizeof(struct tcphdr) / 4)
1972 if (!pskb_may_pull(skb, th->doff * 4))
1975 /* An explanation is required here, I think.
1976 * Packet length and doff are validated by header prediction,
1977 * provided case of th->doff==0 is eliminated.
1978 * So, we defer the checks. */
1979 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1984 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1985 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1986 skb->len - th->doff * 4);
1987 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1988 TCP_SKB_CB(skb)->when = 0;
1989 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1990 TCP_SKB_CB(skb)->sacked = 0;
1992 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1997 if (sk->sk_state == TCP_TIME_WAIT)
2000 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
2001 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
2002 goto discard_and_relse;
2005 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
2006 goto discard_and_relse;
2009 if (sk_filter(sk, skb))
2010 goto discard_and_relse;
2014 bh_lock_sock_nested(sk);
2016 if (!sock_owned_by_user(sk)) {
2017 #ifdef CONFIG_NET_DMA
2018 struct tcp_sock *tp = tcp_sk(sk);
2019 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
2020 tp->ucopy.dma_chan = net_dma_find_channel();
2021 if (tp->ucopy.dma_chan)
2022 ret = tcp_v4_do_rcv(sk, skb);
2026 if (!tcp_prequeue(sk, skb))
2027 ret = tcp_v4_do_rcv(sk, skb);
2029 } else if (unlikely(sk_add_backlog(sk, skb,
2030 sk->sk_rcvbuf + sk->sk_sndbuf))) {
2032 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
2033 goto discard_and_relse;
2042 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
2045 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
2047 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
2049 tcp_v4_send_reset(NULL, skb);
2053 /* Discard frame. */
2062 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
2063 inet_twsk_put(inet_twsk(sk));
2067 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
2068 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
2069 inet_twsk_put(inet_twsk(sk));
2072 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
2074 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
2076 iph->daddr, th->dest,
2079 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
2080 inet_twsk_put(inet_twsk(sk));
2084 /* Fall through to ACK */
2087 tcp_v4_timewait_ack(sk, skb);
2091 case TCP_TW_SUCCESS:;
2096 static struct timewait_sock_ops tcp_timewait_sock_ops = {
2097 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
2098 .twsk_unique = tcp_twsk_unique,
2099 .twsk_destructor= tcp_twsk_destructor,
2102 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
2104 struct dst_entry *dst = skb_dst(skb);
2107 sk->sk_rx_dst = dst;
2108 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
2110 EXPORT_SYMBOL(inet_sk_rx_dst_set);
2112 const struct inet_connection_sock_af_ops ipv4_specific = {
2113 .queue_xmit = ip_queue_xmit,
2114 .send_check = tcp_v4_send_check,
2115 .rebuild_header = inet_sk_rebuild_header,
2116 .sk_rx_dst_set = inet_sk_rx_dst_set,
2117 .conn_request = tcp_v4_conn_request,
2118 .syn_recv_sock = tcp_v4_syn_recv_sock,
2119 .net_header_len = sizeof(struct iphdr),
2120 .setsockopt = ip_setsockopt,
2121 .getsockopt = ip_getsockopt,
2122 .addr2sockaddr = inet_csk_addr2sockaddr,
2123 .sockaddr_len = sizeof(struct sockaddr_in),
2124 .bind_conflict = inet_csk_bind_conflict,
2125 #ifdef CONFIG_COMPAT
2126 .compat_setsockopt = compat_ip_setsockopt,
2127 .compat_getsockopt = compat_ip_getsockopt,
2130 EXPORT_SYMBOL(ipv4_specific);
2132 #ifdef CONFIG_TCP_MD5SIG
2133 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2134 .md5_lookup = tcp_v4_md5_lookup,
2135 .calc_md5_hash = tcp_v4_md5_hash_skb,
2136 .md5_parse = tcp_v4_parse_md5_keys,
2140 /* NOTE: A lot of things set to zero explicitly by call to
2141 * sk_alloc() so need not be done here.
2143 static int tcp_v4_init_sock(struct sock *sk)
2145 struct inet_connection_sock *icsk = inet_csk(sk);
2149 icsk->icsk_af_ops = &ipv4_specific;
2151 #ifdef CONFIG_TCP_MD5SIG
2152 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2158 void tcp_v4_destroy_sock(struct sock *sk)
2160 struct tcp_sock *tp = tcp_sk(sk);
2162 tcp_clear_xmit_timers(sk);
2164 tcp_cleanup_congestion_control(sk);
2166 /* Cleanup up the write buffer. */
2167 tcp_write_queue_purge(sk);
2169 /* Cleans up our, hopefully empty, out_of_order_queue. */
2170 __skb_queue_purge(&tp->out_of_order_queue);
2172 #ifdef CONFIG_TCP_MD5SIG
2173 /* Clean up the MD5 key list, if any */
2174 if (tp->md5sig_info) {
2175 tcp_clear_md5_list(sk);
2176 kfree_rcu(tp->md5sig_info, rcu);
2177 tp->md5sig_info = NULL;
2181 #ifdef CONFIG_NET_DMA
2182 /* Cleans up our sk_async_wait_queue */
2183 __skb_queue_purge(&sk->sk_async_wait_queue);
2186 /* Clean prequeue, it must be empty really */
2187 __skb_queue_purge(&tp->ucopy.prequeue);
2189 /* Clean up a referenced TCP bind bucket. */
2190 if (inet_csk(sk)->icsk_bind_hash)
2193 /* TCP Cookie Transactions */
2194 if (tp->cookie_values != NULL) {
2195 kref_put(&tp->cookie_values->kref,
2196 tcp_cookie_values_release);
2197 tp->cookie_values = NULL;
2199 BUG_ON(tp->fastopen_rsk != NULL);
2201 /* If socket is aborted during connect operation */
2202 tcp_free_fastopen_req(tp);
2204 sk_sockets_allocated_dec(sk);
2205 sock_release_memcg(sk);
2207 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2209 #ifdef CONFIG_PROC_FS
2210 /* Proc filesystem TCP sock list dumping. */
2212 static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
2214 return hlist_nulls_empty(head) ? NULL :
2215 list_entry(head->first, struct inet_timewait_sock, tw_node);
2218 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
2220 return !is_a_nulls(tw->tw_node.next) ?
2221 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2225 * Get next listener socket follow cur. If cur is NULL, get first socket
2226 * starting from bucket given in st->bucket; when st->bucket is zero the
2227 * very first socket in the hash table is returned.
2229 static void *listening_get_next(struct seq_file *seq, void *cur)
2231 struct inet_connection_sock *icsk;
2232 struct hlist_nulls_node *node;
2233 struct sock *sk = cur;
2234 struct inet_listen_hashbucket *ilb;
2235 struct tcp_iter_state *st = seq->private;
2236 struct net *net = seq_file_net(seq);
2239 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2240 spin_lock_bh(&ilb->lock);
2241 sk = sk_nulls_head(&ilb->head);
2245 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2249 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2250 struct request_sock *req = cur;
2252 icsk = inet_csk(st->syn_wait_sk);
2256 if (req->rsk_ops->family == st->family) {
2262 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2265 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2267 sk = sk_nulls_next(st->syn_wait_sk);
2268 st->state = TCP_SEQ_STATE_LISTENING;
2269 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2271 icsk = inet_csk(sk);
2272 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2273 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2275 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2276 sk = sk_nulls_next(sk);
2279 sk_nulls_for_each_from(sk, node) {
2280 if (!net_eq(sock_net(sk), net))
2282 if (sk->sk_family == st->family) {
2286 icsk = inet_csk(sk);
2287 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2288 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2290 st->uid = sock_i_uid(sk);
2291 st->syn_wait_sk = sk;
2292 st->state = TCP_SEQ_STATE_OPENREQ;
2296 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2298 spin_unlock_bh(&ilb->lock);
2300 if (++st->bucket < INET_LHTABLE_SIZE) {
2301 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2302 spin_lock_bh(&ilb->lock);
2303 sk = sk_nulls_head(&ilb->head);
2311 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2313 struct tcp_iter_state *st = seq->private;
2318 rc = listening_get_next(seq, NULL);
2320 while (rc && *pos) {
2321 rc = listening_get_next(seq, rc);
2327 static inline bool empty_bucket(struct tcp_iter_state *st)
2329 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2330 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2334 * Get first established socket starting from bucket given in st->bucket.
2335 * If st->bucket is zero, the very first socket in the hash is returned.
2337 static void *established_get_first(struct seq_file *seq)
2339 struct tcp_iter_state *st = seq->private;
2340 struct net *net = seq_file_net(seq);
2344 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2346 struct hlist_nulls_node *node;
2347 struct inet_timewait_sock *tw;
2348 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2350 /* Lockless fast path for the common case of empty buckets */
2351 if (empty_bucket(st))
2355 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2356 if (sk->sk_family != st->family ||
2357 !net_eq(sock_net(sk), net)) {
2363 st->state = TCP_SEQ_STATE_TIME_WAIT;
2364 inet_twsk_for_each(tw, node,
2365 &tcp_hashinfo.ehash[st->bucket].twchain) {
2366 if (tw->tw_family != st->family ||
2367 !net_eq(twsk_net(tw), net)) {
2373 spin_unlock_bh(lock);
2374 st->state = TCP_SEQ_STATE_ESTABLISHED;
2380 static void *established_get_next(struct seq_file *seq, void *cur)
2382 struct sock *sk = cur;
2383 struct inet_timewait_sock *tw;
2384 struct hlist_nulls_node *node;
2385 struct tcp_iter_state *st = seq->private;
2386 struct net *net = seq_file_net(seq);
2391 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2395 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2402 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2403 st->state = TCP_SEQ_STATE_ESTABLISHED;
2405 /* Look for next non empty bucket */
2407 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2410 if (st->bucket > tcp_hashinfo.ehash_mask)
2413 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2414 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2416 sk = sk_nulls_next(sk);
2418 sk_nulls_for_each_from(sk, node) {
2419 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2423 st->state = TCP_SEQ_STATE_TIME_WAIT;
2424 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2432 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2434 struct tcp_iter_state *st = seq->private;
2438 rc = established_get_first(seq);
2441 rc = established_get_next(seq, rc);
2447 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2450 struct tcp_iter_state *st = seq->private;
2452 st->state = TCP_SEQ_STATE_LISTENING;
2453 rc = listening_get_idx(seq, &pos);
2456 st->state = TCP_SEQ_STATE_ESTABLISHED;
2457 rc = established_get_idx(seq, pos);
2463 static void *tcp_seek_last_pos(struct seq_file *seq)
2465 struct tcp_iter_state *st = seq->private;
2466 int offset = st->offset;
2467 int orig_num = st->num;
2470 switch (st->state) {
2471 case TCP_SEQ_STATE_OPENREQ:
2472 case TCP_SEQ_STATE_LISTENING:
2473 if (st->bucket >= INET_LHTABLE_SIZE)
2475 st->state = TCP_SEQ_STATE_LISTENING;
2476 rc = listening_get_next(seq, NULL);
2477 while (offset-- && rc)
2478 rc = listening_get_next(seq, rc);
2483 case TCP_SEQ_STATE_ESTABLISHED:
2484 case TCP_SEQ_STATE_TIME_WAIT:
2485 st->state = TCP_SEQ_STATE_ESTABLISHED;
2486 if (st->bucket > tcp_hashinfo.ehash_mask)
2488 rc = established_get_first(seq);
2489 while (offset-- && rc)
2490 rc = established_get_next(seq, rc);
2498 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2500 struct tcp_iter_state *st = seq->private;
2503 if (*pos && *pos == st->last_pos) {
2504 rc = tcp_seek_last_pos(seq);
2509 st->state = TCP_SEQ_STATE_LISTENING;
2513 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2516 st->last_pos = *pos;
2520 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2522 struct tcp_iter_state *st = seq->private;
2525 if (v == SEQ_START_TOKEN) {
2526 rc = tcp_get_idx(seq, 0);
2530 switch (st->state) {
2531 case TCP_SEQ_STATE_OPENREQ:
2532 case TCP_SEQ_STATE_LISTENING:
2533 rc = listening_get_next(seq, v);
2535 st->state = TCP_SEQ_STATE_ESTABLISHED;
2538 rc = established_get_first(seq);
2541 case TCP_SEQ_STATE_ESTABLISHED:
2542 case TCP_SEQ_STATE_TIME_WAIT:
2543 rc = established_get_next(seq, v);
2548 st->last_pos = *pos;
2552 static void tcp_seq_stop(struct seq_file *seq, void *v)
2554 struct tcp_iter_state *st = seq->private;
2556 switch (st->state) {
2557 case TCP_SEQ_STATE_OPENREQ:
2559 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2560 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2562 case TCP_SEQ_STATE_LISTENING:
2563 if (v != SEQ_START_TOKEN)
2564 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2566 case TCP_SEQ_STATE_TIME_WAIT:
2567 case TCP_SEQ_STATE_ESTABLISHED:
2569 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2574 int tcp_seq_open(struct inode *inode, struct file *file)
2576 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2577 struct tcp_iter_state *s;
2580 err = seq_open_net(inode, file, &afinfo->seq_ops,
2581 sizeof(struct tcp_iter_state));
2585 s = ((struct seq_file *)file->private_data)->private;
2586 s->family = afinfo->family;
2590 EXPORT_SYMBOL(tcp_seq_open);
2592 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2595 struct proc_dir_entry *p;
2597 afinfo->seq_ops.start = tcp_seq_start;
2598 afinfo->seq_ops.next = tcp_seq_next;
2599 afinfo->seq_ops.stop = tcp_seq_stop;
2601 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2602 afinfo->seq_fops, afinfo);
2607 EXPORT_SYMBOL(tcp_proc_register);
2609 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2611 proc_net_remove(net, afinfo->name);
2613 EXPORT_SYMBOL(tcp_proc_unregister);
2615 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2616 struct seq_file *f, int i, kuid_t uid, int *len)
2618 const struct inet_request_sock *ireq = inet_rsk(req);
2619 long delta = req->expires - jiffies;
2621 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2622 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2625 ntohs(inet_sk(sk)->inet_sport),
2627 ntohs(ireq->rmt_port),
2629 0, 0, /* could print option size, but that is af dependent. */
2630 1, /* timers active (only the expire timer) */
2631 jiffies_delta_to_clock_t(delta),
2633 from_kuid_munged(seq_user_ns(f), uid),
2634 0, /* non standard timer */
2635 0, /* open_requests have no inode */
2636 atomic_read(&sk->sk_refcnt),
2641 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2644 unsigned long timer_expires;
2645 const struct tcp_sock *tp = tcp_sk(sk);
2646 const struct inet_connection_sock *icsk = inet_csk(sk);
2647 const struct inet_sock *inet = inet_sk(sk);
2648 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2649 __be32 dest = inet->inet_daddr;
2650 __be32 src = inet->inet_rcv_saddr;
2651 __u16 destp = ntohs(inet->inet_dport);
2652 __u16 srcp = ntohs(inet->inet_sport);
2655 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2657 timer_expires = icsk->icsk_timeout;
2658 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2660 timer_expires = icsk->icsk_timeout;
2661 } else if (timer_pending(&sk->sk_timer)) {
2663 timer_expires = sk->sk_timer.expires;
2666 timer_expires = jiffies;
2669 if (sk->sk_state == TCP_LISTEN)
2670 rx_queue = sk->sk_ack_backlog;
2673 * because we dont lock socket, we might find a transient negative value
2675 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2677 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2678 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2679 i, src, srcp, dest, destp, sk->sk_state,
2680 tp->write_seq - tp->snd_una,
2683 jiffies_delta_to_clock_t(timer_expires - jiffies),
2684 icsk->icsk_retransmits,
2685 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2686 icsk->icsk_probes_out,
2688 atomic_read(&sk->sk_refcnt), sk,
2689 jiffies_to_clock_t(icsk->icsk_rto),
2690 jiffies_to_clock_t(icsk->icsk_ack.ato),
2691 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2693 sk->sk_state == TCP_LISTEN ?
2694 (fastopenq ? fastopenq->max_qlen : 0) :
2695 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh),
2699 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2700 struct seq_file *f, int i, int *len)
2704 long delta = tw->tw_ttd - jiffies;
2706 dest = tw->tw_daddr;
2707 src = tw->tw_rcv_saddr;
2708 destp = ntohs(tw->tw_dport);
2709 srcp = ntohs(tw->tw_sport);
2711 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2712 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2713 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2714 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2715 atomic_read(&tw->tw_refcnt), tw, len);
2720 static int tcp4_seq_show(struct seq_file *seq, void *v)
2722 struct tcp_iter_state *st;
2725 if (v == SEQ_START_TOKEN) {
2726 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2727 " sl local_address rem_address st tx_queue "
2728 "rx_queue tr tm->when retrnsmt uid timeout "
2734 switch (st->state) {
2735 case TCP_SEQ_STATE_LISTENING:
2736 case TCP_SEQ_STATE_ESTABLISHED:
2737 get_tcp4_sock(v, seq, st->num, &len);
2739 case TCP_SEQ_STATE_OPENREQ:
2740 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2742 case TCP_SEQ_STATE_TIME_WAIT:
2743 get_timewait4_sock(v, seq, st->num, &len);
2746 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2751 static const struct file_operations tcp_afinfo_seq_fops = {
2752 .owner = THIS_MODULE,
2753 .open = tcp_seq_open,
2755 .llseek = seq_lseek,
2756 .release = seq_release_net
2759 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2762 .seq_fops = &tcp_afinfo_seq_fops,
2764 .show = tcp4_seq_show,
2768 static int __net_init tcp4_proc_init_net(struct net *net)
2770 return tcp_proc_register(net, &tcp4_seq_afinfo);
2773 static void __net_exit tcp4_proc_exit_net(struct net *net)
2775 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2778 static struct pernet_operations tcp4_net_ops = {
2779 .init = tcp4_proc_init_net,
2780 .exit = tcp4_proc_exit_net,
2783 int __init tcp4_proc_init(void)
2785 return register_pernet_subsys(&tcp4_net_ops);
2788 void tcp4_proc_exit(void)
2790 unregister_pernet_subsys(&tcp4_net_ops);
2792 #endif /* CONFIG_PROC_FS */
2794 struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2796 const struct iphdr *iph = skb_gro_network_header(skb);
2800 switch (skb->ip_summed) {
2801 case CHECKSUM_COMPLETE:
2802 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2804 skb->ip_summed = CHECKSUM_UNNECESSARY;
2808 NAPI_GRO_CB(skb)->flush = 1;
2812 wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
2813 skb_gro_len(skb), IPPROTO_TCP, 0);
2814 sum = csum_fold(skb_checksum(skb,
2815 skb_gro_offset(skb),
2821 skb->ip_summed = CHECKSUM_UNNECESSARY;
2825 return tcp_gro_receive(head, skb);
2828 int tcp4_gro_complete(struct sk_buff *skb)
2830 const struct iphdr *iph = ip_hdr(skb);
2831 struct tcphdr *th = tcp_hdr(skb);
2833 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2834 iph->saddr, iph->daddr, 0);
2835 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2837 return tcp_gro_complete(skb);
2840 struct proto tcp_prot = {
2842 .owner = THIS_MODULE,
2844 .connect = tcp_v4_connect,
2845 .disconnect = tcp_disconnect,
2846 .accept = inet_csk_accept,
2848 .init = tcp_v4_init_sock,
2849 .destroy = tcp_v4_destroy_sock,
2850 .shutdown = tcp_shutdown,
2851 .setsockopt = tcp_setsockopt,
2852 .getsockopt = tcp_getsockopt,
2853 .recvmsg = tcp_recvmsg,
2854 .sendmsg = tcp_sendmsg,
2855 .sendpage = tcp_sendpage,
2856 .backlog_rcv = tcp_v4_do_rcv,
2857 .release_cb = tcp_release_cb,
2858 .mtu_reduced = tcp_v4_mtu_reduced,
2860 .unhash = inet_unhash,
2861 .get_port = inet_csk_get_port,
2862 .enter_memory_pressure = tcp_enter_memory_pressure,
2863 .sockets_allocated = &tcp_sockets_allocated,
2864 .orphan_count = &tcp_orphan_count,
2865 .memory_allocated = &tcp_memory_allocated,
2866 .memory_pressure = &tcp_memory_pressure,
2867 .sysctl_wmem = sysctl_tcp_wmem,
2868 .sysctl_rmem = sysctl_tcp_rmem,
2869 .max_header = MAX_TCP_HEADER,
2870 .obj_size = sizeof(struct tcp_sock),
2871 .slab_flags = SLAB_DESTROY_BY_RCU,
2872 .twsk_prot = &tcp_timewait_sock_ops,
2873 .rsk_prot = &tcp_request_sock_ops,
2874 .h.hashinfo = &tcp_hashinfo,
2875 .no_autobind = true,
2876 #ifdef CONFIG_COMPAT
2877 .compat_setsockopt = compat_tcp_setsockopt,
2878 .compat_getsockopt = compat_tcp_getsockopt,
2880 #ifdef CONFIG_MEMCG_KMEM
2881 .init_cgroup = tcp_init_cgroup,
2882 .destroy_cgroup = tcp_destroy_cgroup,
2883 .proto_cgroup = tcp_proto_cgroup,
2886 EXPORT_SYMBOL(tcp_prot);
2888 static int __net_init tcp_sk_init(struct net *net)
2893 static void __net_exit tcp_sk_exit(struct net *net)
2897 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2899 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2902 static struct pernet_operations __net_initdata tcp_sk_ops = {
2903 .init = tcp_sk_init,
2904 .exit = tcp_sk_exit,
2905 .exit_batch = tcp_sk_exit_batch,
2908 void __init tcp_v4_init(void)
2910 inet_hashinfo_init(&tcp_hashinfo);
2911 if (register_pernet_subsys(&tcp_sk_ops))
2912 panic("Failed to create the TCP control socket.\n");