2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/secure_seq.h>
76 #include <net/busy_poll.h>
78 #include <linux/inet.h>
79 #include <linux/ipv6.h>
80 #include <linux/stddef.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
83 #include <linux/inetdevice.h>
85 #include <crypto/hash.h>
86 #include <linux/scatterlist.h>
88 #ifdef CONFIG_TCP_MD5SIG
89 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
90 __be32 daddr, __be32 saddr, const struct tcphdr *th);
93 struct inet_hashinfo tcp_hashinfo;
94 EXPORT_SYMBOL(tcp_hashinfo);
96 static u32 tcp_v4_init_seq(const struct sk_buff *skb)
98 return secure_tcp_seq(ip_hdr(skb)->daddr,
101 tcp_hdr(skb)->source);
104 static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
106 return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
109 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
111 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
112 struct tcp_sock *tp = tcp_sk(sk);
114 /* With PAWS, it is safe from the viewpoint
115 of data integrity. Even without PAWS it is safe provided sequence
116 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
118 Actually, the idea is close to VJ's one, only timestamp cache is
119 held not per host, but per port pair and TW bucket is used as state
122 If TW bucket has been already destroyed we fall back to VJ's scheme
123 and use initial timestamp retrieved from peer table.
125 if (tcptw->tw_ts_recent_stamp &&
126 (!twp || (sock_net(sk)->ipv4.sysctl_tcp_tw_reuse &&
127 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
128 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
129 if (tp->write_seq == 0)
131 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
132 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
139 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
141 /* This will initiate an outgoing connection. */
142 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
144 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
145 struct inet_sock *inet = inet_sk(sk);
146 struct tcp_sock *tp = tcp_sk(sk);
147 __be16 orig_sport, orig_dport;
148 __be32 daddr, nexthop;
152 struct ip_options_rcu *inet_opt;
153 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
155 if (addr_len < sizeof(struct sockaddr_in))
158 if (usin->sin_family != AF_INET)
159 return -EAFNOSUPPORT;
161 nexthop = daddr = usin->sin_addr.s_addr;
162 inet_opt = rcu_dereference_protected(inet->inet_opt,
163 lockdep_sock_is_held(sk));
164 if (inet_opt && inet_opt->opt.srr) {
167 nexthop = inet_opt->opt.faddr;
170 orig_sport = inet->inet_sport;
171 orig_dport = usin->sin_port;
172 fl4 = &inet->cork.fl.u.ip4;
173 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
174 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
176 orig_sport, orig_dport, sk);
179 if (err == -ENETUNREACH)
180 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
184 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
189 if (!inet_opt || !inet_opt->opt.srr)
192 if (!inet->inet_saddr)
193 inet->inet_saddr = fl4->saddr;
194 sk_rcv_saddr_set(sk, inet->inet_saddr);
196 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
197 /* Reset inherited state */
198 tp->rx_opt.ts_recent = 0;
199 tp->rx_opt.ts_recent_stamp = 0;
200 if (likely(!tp->repair))
204 inet->inet_dport = usin->sin_port;
205 sk_daddr_set(sk, daddr);
207 inet_csk(sk)->icsk_ext_hdr_len = 0;
209 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
211 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
213 /* Socket identity is still unknown (sport may be zero).
214 * However we set state to SYN-SENT and not releasing socket
215 * lock select source port, enter ourselves into the hash tables and
216 * complete initialization after this.
218 tcp_set_state(sk, TCP_SYN_SENT);
219 err = inet_hash_connect(tcp_death_row, sk);
225 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
226 inet->inet_sport, inet->inet_dport, sk);
232 /* OK, now commit destination to socket. */
233 sk->sk_gso_type = SKB_GSO_TCPV4;
234 sk_setup_caps(sk, &rt->dst);
237 if (likely(!tp->repair)) {
239 tp->write_seq = secure_tcp_seq(inet->inet_saddr,
243 tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
248 inet->inet_id = tp->write_seq ^ jiffies;
250 if (tcp_fastopen_defer_connect(sk, &err))
255 err = tcp_connect(sk);
264 * This unhashes the socket and releases the local port,
267 tcp_set_state(sk, TCP_CLOSE);
269 sk->sk_route_caps = 0;
270 inet->inet_dport = 0;
273 EXPORT_SYMBOL(tcp_v4_connect);
276 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
277 * It can be called through tcp_release_cb() if socket was owned by user
278 * at the time tcp_v4_err() was called to handle ICMP message.
280 void tcp_v4_mtu_reduced(struct sock *sk)
282 struct inet_sock *inet = inet_sk(sk);
283 struct dst_entry *dst;
286 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
288 mtu = tcp_sk(sk)->mtu_info;
289 dst = inet_csk_update_pmtu(sk, mtu);
293 /* Something is about to be wrong... Remember soft error
294 * for the case, if this connection will not able to recover.
296 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
297 sk->sk_err_soft = EMSGSIZE;
301 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
302 ip_sk_accept_pmtu(sk) &&
303 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
304 tcp_sync_mss(sk, mtu);
306 /* Resend the TCP packet because it's
307 * clear that the old packet has been
308 * dropped. This is the new "fast" path mtu
311 tcp_simple_retransmit(sk);
312 } /* else let the usual retransmit timer handle it */
314 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
316 static void do_redirect(struct sk_buff *skb, struct sock *sk)
318 struct dst_entry *dst = __sk_dst_check(sk, 0);
321 dst->ops->redirect(dst, sk, skb);
325 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
326 void tcp_req_err(struct sock *sk, u32 seq, bool abort)
328 struct request_sock *req = inet_reqsk(sk);
329 struct net *net = sock_net(sk);
331 /* ICMPs are not backlogged, hence we cannot get
332 * an established socket here.
334 if (seq != tcp_rsk(req)->snt_isn) {
335 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
338 * Still in SYN_RECV, just remove it silently.
339 * There is no good way to pass the error to the newly
340 * created socket, and POSIX does not want network
341 * errors returned from accept().
343 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
344 tcp_listendrop(req->rsk_listener);
348 EXPORT_SYMBOL(tcp_req_err);
351 * This routine is called by the ICMP module when it gets some
352 * sort of error condition. If err < 0 then the socket should
353 * be closed and the error returned to the user. If err > 0
354 * it's just the icmp type << 8 | icmp code. After adjustment
355 * header points to the first 8 bytes of the tcp header. We need
356 * to find the appropriate port.
358 * The locking strategy used here is very "optimistic". When
359 * someone else accesses the socket the ICMP is just dropped
360 * and for some paths there is no check at all.
361 * A more general error queue to queue errors for later handling
362 * is probably better.
366 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
368 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
369 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
370 struct inet_connection_sock *icsk;
372 struct inet_sock *inet;
373 const int type = icmp_hdr(icmp_skb)->type;
374 const int code = icmp_hdr(icmp_skb)->code;
377 struct request_sock *fastopen;
382 struct net *net = dev_net(icmp_skb->dev);
384 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
385 th->dest, iph->saddr, ntohs(th->source),
386 inet_iif(icmp_skb), 0);
388 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
391 if (sk->sk_state == TCP_TIME_WAIT) {
392 inet_twsk_put(inet_twsk(sk));
395 seq = ntohl(th->seq);
396 if (sk->sk_state == TCP_NEW_SYN_RECV)
397 return tcp_req_err(sk, seq,
398 type == ICMP_PARAMETERPROB ||
399 type == ICMP_TIME_EXCEEDED ||
400 (type == ICMP_DEST_UNREACH &&
401 (code == ICMP_NET_UNREACH ||
402 code == ICMP_HOST_UNREACH)));
405 /* If too many ICMPs get dropped on busy
406 * servers this needs to be solved differently.
407 * We do take care of PMTU discovery (RFC1191) special case :
408 * we can receive locally generated ICMP messages while socket is held.
410 if (sock_owned_by_user(sk)) {
411 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
412 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
414 if (sk->sk_state == TCP_CLOSE)
417 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
418 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
424 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
425 fastopen = tp->fastopen_rsk;
426 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
427 if (sk->sk_state != TCP_LISTEN &&
428 !between(seq, snd_una, tp->snd_nxt)) {
429 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
435 if (!sock_owned_by_user(sk))
436 do_redirect(icmp_skb, sk);
438 case ICMP_SOURCE_QUENCH:
439 /* Just silently ignore these. */
441 case ICMP_PARAMETERPROB:
444 case ICMP_DEST_UNREACH:
445 if (code > NR_ICMP_UNREACH)
448 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
449 /* We are not interested in TCP_LISTEN and open_requests
450 * (SYN-ACKs send out by Linux are always <576bytes so
451 * they should go through unfragmented).
453 if (sk->sk_state == TCP_LISTEN)
457 if (!sock_owned_by_user(sk)) {
458 tcp_v4_mtu_reduced(sk);
460 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
466 err = icmp_err_convert[code].errno;
467 /* check if icmp_skb allows revert of backoff
468 * (see draft-zimmermann-tcp-lcd) */
469 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
471 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
472 !icsk->icsk_backoff || fastopen)
475 if (sock_owned_by_user(sk))
478 icsk->icsk_backoff--;
479 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
481 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
483 skb = tcp_rtx_queue_head(sk);
486 tcp_mstamp_refresh(tp);
487 delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp);
488 remaining = icsk->icsk_rto -
489 usecs_to_jiffies(delta_us);
492 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
493 remaining, TCP_RTO_MAX);
495 /* RTO revert clocked out retransmission.
496 * Will retransmit now */
497 tcp_retransmit_timer(sk);
501 case ICMP_TIME_EXCEEDED:
508 switch (sk->sk_state) {
511 /* Only in fast or simultaneous open. If a fast open socket is
512 * is already accepted it is treated as a connected one below.
514 if (fastopen && !fastopen->sk)
517 if (!sock_owned_by_user(sk)) {
520 sk->sk_error_report(sk);
524 sk->sk_err_soft = err;
529 /* If we've already connected we will keep trying
530 * until we time out, or the user gives up.
532 * rfc1122 4.2.3.9 allows to consider as hard errors
533 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
534 * but it is obsoleted by pmtu discovery).
536 * Note, that in modern internet, where routing is unreliable
537 * and in each dark corner broken firewalls sit, sending random
538 * errors ordered by their masters even this two messages finally lose
539 * their original sense (even Linux sends invalid PORT_UNREACHs)
541 * Now we are in compliance with RFCs.
546 if (!sock_owned_by_user(sk) && inet->recverr) {
548 sk->sk_error_report(sk);
549 } else { /* Only an error on timeout */
550 sk->sk_err_soft = err;
558 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
560 struct tcphdr *th = tcp_hdr(skb);
562 if (skb->ip_summed == CHECKSUM_PARTIAL) {
563 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
564 skb->csum_start = skb_transport_header(skb) - skb->head;
565 skb->csum_offset = offsetof(struct tcphdr, check);
567 th->check = tcp_v4_check(skb->len, saddr, daddr,
574 /* This routine computes an IPv4 TCP checksum. */
575 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
577 const struct inet_sock *inet = inet_sk(sk);
579 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
581 EXPORT_SYMBOL(tcp_v4_send_check);
584 * This routine will send an RST to the other tcp.
586 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
588 * Answer: if a packet caused RST, it is not for a socket
589 * existing in our system, if it is matched to a socket,
590 * it is just duplicate segment or bug in other side's TCP.
591 * So that we build reply only basing on parameters
592 * arrived with segment.
593 * Exception: precedence violation. We do not implement it in any case.
596 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
598 const struct tcphdr *th = tcp_hdr(skb);
601 #ifdef CONFIG_TCP_MD5SIG
602 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
605 struct ip_reply_arg arg;
606 #ifdef CONFIG_TCP_MD5SIG
607 struct tcp_md5sig_key *key = NULL;
608 const __u8 *hash_location = NULL;
609 unsigned char newhash[16];
611 struct sock *sk1 = NULL;
615 /* Never send a reset in response to a reset. */
619 /* If sk not NULL, it means we did a successful lookup and incoming
620 * route had to be correct. prequeue might have dropped our dst.
622 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
625 /* Swap the send and the receive. */
626 memset(&rep, 0, sizeof(rep));
627 rep.th.dest = th->source;
628 rep.th.source = th->dest;
629 rep.th.doff = sizeof(struct tcphdr) / 4;
633 rep.th.seq = th->ack_seq;
636 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
637 skb->len - (th->doff << 2));
640 memset(&arg, 0, sizeof(arg));
641 arg.iov[0].iov_base = (unsigned char *)&rep;
642 arg.iov[0].iov_len = sizeof(rep.th);
644 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
645 #ifdef CONFIG_TCP_MD5SIG
647 hash_location = tcp_parse_md5sig_option(th);
648 if (sk && sk_fullsock(sk)) {
649 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
650 &ip_hdr(skb)->saddr, AF_INET);
651 } else if (hash_location) {
653 * active side is lost. Try to find listening socket through
654 * source port, and then find md5 key through listening socket.
655 * we are not loose security here:
656 * Incoming packet is checked with md5 hash with finding key,
657 * no RST generated if md5 hash doesn't match.
659 sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
661 th->source, ip_hdr(skb)->daddr,
662 ntohs(th->source), inet_iif(skb),
664 /* don't send rst if it can't find key */
668 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
669 &ip_hdr(skb)->saddr, AF_INET);
674 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
675 if (genhash || memcmp(hash_location, newhash, 16) != 0)
681 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
683 (TCPOPT_MD5SIG << 8) |
685 /* Update length and the length the header thinks exists */
686 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
687 rep.th.doff = arg.iov[0].iov_len / 4;
689 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
690 key, ip_hdr(skb)->saddr,
691 ip_hdr(skb)->daddr, &rep.th);
694 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
695 ip_hdr(skb)->saddr, /* XXX */
696 arg.iov[0].iov_len, IPPROTO_TCP, 0);
697 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
698 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
700 /* When socket is gone, all binding information is lost.
701 * routing might fail in this case. No choice here, if we choose to force
702 * input interface, we will misroute in case of asymmetric route.
705 arg.bound_dev_if = sk->sk_bound_dev_if;
707 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
708 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
710 arg.tos = ip_hdr(skb)->tos;
711 arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
713 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
714 skb, &TCP_SKB_CB(skb)->header.h4.opt,
715 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
716 &arg, arg.iov[0].iov_len);
718 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
719 __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
722 #ifdef CONFIG_TCP_MD5SIG
728 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
729 outside socket context is ugly, certainly. What can I do?
732 static void tcp_v4_send_ack(const struct sock *sk,
733 struct sk_buff *skb, u32 seq, u32 ack,
734 u32 win, u32 tsval, u32 tsecr, int oif,
735 struct tcp_md5sig_key *key,
736 int reply_flags, u8 tos)
738 const struct tcphdr *th = tcp_hdr(skb);
741 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
742 #ifdef CONFIG_TCP_MD5SIG
743 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
747 struct net *net = sock_net(sk);
748 struct ip_reply_arg arg;
750 memset(&rep.th, 0, sizeof(struct tcphdr));
751 memset(&arg, 0, sizeof(arg));
753 arg.iov[0].iov_base = (unsigned char *)&rep;
754 arg.iov[0].iov_len = sizeof(rep.th);
756 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
757 (TCPOPT_TIMESTAMP << 8) |
759 rep.opt[1] = htonl(tsval);
760 rep.opt[2] = htonl(tsecr);
761 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
764 /* Swap the send and the receive. */
765 rep.th.dest = th->source;
766 rep.th.source = th->dest;
767 rep.th.doff = arg.iov[0].iov_len / 4;
768 rep.th.seq = htonl(seq);
769 rep.th.ack_seq = htonl(ack);
771 rep.th.window = htons(win);
773 #ifdef CONFIG_TCP_MD5SIG
775 int offset = (tsecr) ? 3 : 0;
777 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
779 (TCPOPT_MD5SIG << 8) |
781 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
782 rep.th.doff = arg.iov[0].iov_len/4;
784 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
785 key, ip_hdr(skb)->saddr,
786 ip_hdr(skb)->daddr, &rep.th);
789 arg.flags = reply_flags;
790 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
791 ip_hdr(skb)->saddr, /* XXX */
792 arg.iov[0].iov_len, IPPROTO_TCP, 0);
793 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
795 arg.bound_dev_if = oif;
797 arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
799 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
800 skb, &TCP_SKB_CB(skb)->header.h4.opt,
801 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
802 &arg, arg.iov[0].iov_len);
804 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
808 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
810 struct inet_timewait_sock *tw = inet_twsk(sk);
811 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
813 tcp_v4_send_ack(sk, skb,
814 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
815 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
816 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
819 tcp_twsk_md5_key(tcptw),
820 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
827 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
828 struct request_sock *req)
830 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
831 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
833 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
837 * The window field (SEG.WND) of every outgoing segment, with the
838 * exception of <SYN> segments, MUST be right-shifted by
839 * Rcv.Wind.Shift bits:
841 tcp_v4_send_ack(sk, skb, seq,
842 tcp_rsk(req)->rcv_nxt,
843 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
844 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
847 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
849 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
854 * Send a SYN-ACK after having received a SYN.
855 * This still operates on a request_sock only, not on a big
858 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
860 struct request_sock *req,
861 struct tcp_fastopen_cookie *foc,
862 enum tcp_synack_type synack_type)
864 const struct inet_request_sock *ireq = inet_rsk(req);
869 /* First, grab a route. */
870 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
873 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
876 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
878 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
881 err = net_xmit_eval(err);
888 * IPv4 request_sock destructor.
890 static void tcp_v4_reqsk_destructor(struct request_sock *req)
892 kfree(inet_rsk(req)->opt);
895 #ifdef CONFIG_TCP_MD5SIG
897 * RFC2385 MD5 checksumming requires a mapping of
898 * IP address->MD5 Key.
899 * We need to maintain these in the sk structure.
902 /* Find the Key structure for an address. */
903 struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
904 const union tcp_md5_addr *addr,
907 const struct tcp_sock *tp = tcp_sk(sk);
908 struct tcp_md5sig_key *key;
909 const struct tcp_md5sig_info *md5sig;
911 struct tcp_md5sig_key *best_match = NULL;
914 /* caller either holds rcu_read_lock() or socket lock */
915 md5sig = rcu_dereference_check(tp->md5sig_info,
916 lockdep_sock_is_held(sk));
920 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
921 if (key->family != family)
924 if (family == AF_INET) {
925 mask = inet_make_mask(key->prefixlen);
926 match = (key->addr.a4.s_addr & mask) ==
927 (addr->a4.s_addr & mask);
928 #if IS_ENABLED(CONFIG_IPV6)
929 } else if (family == AF_INET6) {
930 match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
937 if (match && (!best_match ||
938 key->prefixlen > best_match->prefixlen))
943 EXPORT_SYMBOL(tcp_md5_do_lookup);
945 static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
946 const union tcp_md5_addr *addr,
947 int family, u8 prefixlen)
949 const struct tcp_sock *tp = tcp_sk(sk);
950 struct tcp_md5sig_key *key;
951 unsigned int size = sizeof(struct in_addr);
952 const struct tcp_md5sig_info *md5sig;
954 /* caller either holds rcu_read_lock() or socket lock */
955 md5sig = rcu_dereference_check(tp->md5sig_info,
956 lockdep_sock_is_held(sk));
959 #if IS_ENABLED(CONFIG_IPV6)
960 if (family == AF_INET6)
961 size = sizeof(struct in6_addr);
963 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
964 if (key->family != family)
966 if (!memcmp(&key->addr, addr, size) &&
967 key->prefixlen == prefixlen)
973 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
974 const struct sock *addr_sk)
976 const union tcp_md5_addr *addr;
978 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
979 return tcp_md5_do_lookup(sk, addr, AF_INET);
981 EXPORT_SYMBOL(tcp_v4_md5_lookup);
983 /* This can be called on a newly created socket, from other files */
984 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
985 int family, u8 prefixlen, const u8 *newkey, u8 newkeylen,
988 /* Add Key to the list */
989 struct tcp_md5sig_key *key;
990 struct tcp_sock *tp = tcp_sk(sk);
991 struct tcp_md5sig_info *md5sig;
993 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
995 /* Pre-existing entry - just update that one. */
996 memcpy(key->key, newkey, newkeylen);
997 key->keylen = newkeylen;
1001 md5sig = rcu_dereference_protected(tp->md5sig_info,
1002 lockdep_sock_is_held(sk));
1004 md5sig = kmalloc(sizeof(*md5sig), gfp);
1008 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1009 INIT_HLIST_HEAD(&md5sig->head);
1010 rcu_assign_pointer(tp->md5sig_info, md5sig);
1013 key = sock_kmalloc(sk, sizeof(*key), gfp);
1016 if (!tcp_alloc_md5sig_pool()) {
1017 sock_kfree_s(sk, key, sizeof(*key));
1021 memcpy(key->key, newkey, newkeylen);
1022 key->keylen = newkeylen;
1023 key->family = family;
1024 key->prefixlen = prefixlen;
1025 memcpy(&key->addr, addr,
1026 (family == AF_INET6) ? sizeof(struct in6_addr) :
1027 sizeof(struct in_addr));
1028 hlist_add_head_rcu(&key->node, &md5sig->head);
1031 EXPORT_SYMBOL(tcp_md5_do_add);
1033 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1036 struct tcp_md5sig_key *key;
1038 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
1041 hlist_del_rcu(&key->node);
1042 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1043 kfree_rcu(key, rcu);
1046 EXPORT_SYMBOL(tcp_md5_do_del);
1048 static void tcp_clear_md5_list(struct sock *sk)
1050 struct tcp_sock *tp = tcp_sk(sk);
1051 struct tcp_md5sig_key *key;
1052 struct hlist_node *n;
1053 struct tcp_md5sig_info *md5sig;
1055 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1057 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1058 hlist_del_rcu(&key->node);
1059 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1060 kfree_rcu(key, rcu);
1064 static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
1065 char __user *optval, int optlen)
1067 struct tcp_md5sig cmd;
1068 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1071 if (optlen < sizeof(cmd))
1074 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1077 if (sin->sin_family != AF_INET)
1080 if (optname == TCP_MD5SIG_EXT &&
1081 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
1082 prefixlen = cmd.tcpm_prefixlen;
1087 if (!cmd.tcpm_keylen)
1088 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1089 AF_INET, prefixlen);
1091 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1094 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1095 AF_INET, prefixlen, cmd.tcpm_key, cmd.tcpm_keylen,
1099 static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1100 __be32 daddr, __be32 saddr,
1101 const struct tcphdr *th, int nbytes)
1103 struct tcp4_pseudohdr *bp;
1104 struct scatterlist sg;
1111 bp->protocol = IPPROTO_TCP;
1112 bp->len = cpu_to_be16(nbytes);
1114 _th = (struct tcphdr *)(bp + 1);
1115 memcpy(_th, th, sizeof(*th));
1118 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1119 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1120 sizeof(*bp) + sizeof(*th));
1121 return crypto_ahash_update(hp->md5_req);
1124 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1125 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1127 struct tcp_md5sig_pool *hp;
1128 struct ahash_request *req;
1130 hp = tcp_get_md5sig_pool();
1132 goto clear_hash_noput;
1135 if (crypto_ahash_init(req))
1137 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
1139 if (tcp_md5_hash_key(hp, key))
1141 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1142 if (crypto_ahash_final(req))
1145 tcp_put_md5sig_pool();
1149 tcp_put_md5sig_pool();
1151 memset(md5_hash, 0, 16);
1155 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1156 const struct sock *sk,
1157 const struct sk_buff *skb)
1159 struct tcp_md5sig_pool *hp;
1160 struct ahash_request *req;
1161 const struct tcphdr *th = tcp_hdr(skb);
1162 __be32 saddr, daddr;
1164 if (sk) { /* valid for establish/request sockets */
1165 saddr = sk->sk_rcv_saddr;
1166 daddr = sk->sk_daddr;
1168 const struct iphdr *iph = ip_hdr(skb);
1173 hp = tcp_get_md5sig_pool();
1175 goto clear_hash_noput;
1178 if (crypto_ahash_init(req))
1181 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
1183 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1185 if (tcp_md5_hash_key(hp, key))
1187 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1188 if (crypto_ahash_final(req))
1191 tcp_put_md5sig_pool();
1195 tcp_put_md5sig_pool();
1197 memset(md5_hash, 0, 16);
1200 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1204 /* Called with rcu_read_lock() */
1205 static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1206 const struct sk_buff *skb)
1208 #ifdef CONFIG_TCP_MD5SIG
1210 * This gets called for each TCP segment that arrives
1211 * so we want to be efficient.
1212 * We have 3 drop cases:
1213 * o No MD5 hash and one expected.
1214 * o MD5 hash and we're not expecting one.
1215 * o MD5 hash and its wrong.
1217 const __u8 *hash_location = NULL;
1218 struct tcp_md5sig_key *hash_expected;
1219 const struct iphdr *iph = ip_hdr(skb);
1220 const struct tcphdr *th = tcp_hdr(skb);
1222 unsigned char newhash[16];
1224 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1226 hash_location = tcp_parse_md5sig_option(th);
1228 /* We've parsed the options - do we have a hash? */
1229 if (!hash_expected && !hash_location)
1232 if (hash_expected && !hash_location) {
1233 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1237 if (!hash_expected && hash_location) {
1238 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1242 /* Okay, so this is hash_expected and hash_location -
1243 * so we need to calculate the checksum.
1245 genhash = tcp_v4_md5_hash_skb(newhash,
1249 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1250 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
1251 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1252 &iph->saddr, ntohs(th->source),
1253 &iph->daddr, ntohs(th->dest),
1254 genhash ? " tcp_v4_calc_md5_hash failed"
1263 static void tcp_v4_init_req(struct request_sock *req,
1264 const struct sock *sk_listener,
1265 struct sk_buff *skb)
1267 struct inet_request_sock *ireq = inet_rsk(req);
1269 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1270 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1271 ireq->opt = tcp_v4_save_options(sock_net(sk_listener), skb);
1274 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1276 const struct request_sock *req)
1278 return inet_csk_route_req(sk, &fl->u.ip4, req);
1281 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1283 .obj_size = sizeof(struct tcp_request_sock),
1284 .rtx_syn_ack = tcp_rtx_synack,
1285 .send_ack = tcp_v4_reqsk_send_ack,
1286 .destructor = tcp_v4_reqsk_destructor,
1287 .send_reset = tcp_v4_send_reset,
1288 .syn_ack_timeout = tcp_syn_ack_timeout,
1291 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1292 .mss_clamp = TCP_MSS_DEFAULT,
1293 #ifdef CONFIG_TCP_MD5SIG
1294 .req_md5_lookup = tcp_v4_md5_lookup,
1295 .calc_md5_hash = tcp_v4_md5_hash_skb,
1297 .init_req = tcp_v4_init_req,
1298 #ifdef CONFIG_SYN_COOKIES
1299 .cookie_init_seq = cookie_v4_init_sequence,
1301 .route_req = tcp_v4_route_req,
1302 .init_seq = tcp_v4_init_seq,
1303 .init_ts_off = tcp_v4_init_ts_off,
1304 .send_synack = tcp_v4_send_synack,
1307 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1309 /* Never answer to SYNs send to broadcast or multicast */
1310 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1313 return tcp_conn_request(&tcp_request_sock_ops,
1314 &tcp_request_sock_ipv4_ops, sk, skb);
1320 EXPORT_SYMBOL(tcp_v4_conn_request);
1324 * The three way handshake has completed - we got a valid synack -
1325 * now create the new socket.
1327 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1328 struct request_sock *req,
1329 struct dst_entry *dst,
1330 struct request_sock *req_unhash,
1333 struct inet_request_sock *ireq;
1334 struct inet_sock *newinet;
1335 struct tcp_sock *newtp;
1337 #ifdef CONFIG_TCP_MD5SIG
1338 struct tcp_md5sig_key *key;
1340 struct ip_options_rcu *inet_opt;
1342 if (sk_acceptq_is_full(sk))
1345 newsk = tcp_create_openreq_child(sk, req, skb);
1349 newsk->sk_gso_type = SKB_GSO_TCPV4;
1350 inet_sk_rx_dst_set(newsk, skb);
1352 newtp = tcp_sk(newsk);
1353 newinet = inet_sk(newsk);
1354 ireq = inet_rsk(req);
1355 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1356 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1357 newsk->sk_bound_dev_if = ireq->ir_iif;
1358 newinet->inet_saddr = ireq->ir_loc_addr;
1359 inet_opt = ireq->opt;
1360 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1362 newinet->mc_index = inet_iif(skb);
1363 newinet->mc_ttl = ip_hdr(skb)->ttl;
1364 newinet->rcv_tos = ip_hdr(skb)->tos;
1365 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1367 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1368 newinet->inet_id = newtp->write_seq ^ jiffies;
1371 dst = inet_csk_route_child_sock(sk, newsk, req);
1375 /* syncookie case : see end of cookie_v4_check() */
1377 sk_setup_caps(newsk, dst);
1379 tcp_ca_openreq_child(newsk, dst);
1381 tcp_sync_mss(newsk, dst_mtu(dst));
1382 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1384 tcp_initialize_rcv_mss(newsk);
1386 #ifdef CONFIG_TCP_MD5SIG
1387 /* Copy over the MD5 key from the original socket */
1388 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1392 * We're using one, so create a matching key
1393 * on the newsk structure. If we fail to get
1394 * memory, then we end up not copying the key
1397 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1398 AF_INET, 32, key->key, key->keylen, GFP_ATOMIC);
1399 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1403 if (__inet_inherit_port(sk, newsk) < 0)
1405 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1407 tcp_move_syn(newtp, req);
1412 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1419 inet_csk_prepare_forced_close(newsk);
1423 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1425 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1427 #ifdef CONFIG_SYN_COOKIES
1428 const struct tcphdr *th = tcp_hdr(skb);
1431 sk = cookie_v4_check(sk, skb);
1436 /* The socket must have it's spinlock held when we get
1437 * here, unless it is a TCP_LISTEN socket.
1439 * We have a potential double-lock case here, so even when
1440 * doing backlog processing we use the BH locking scheme.
1441 * This is because we cannot sleep with the original spinlock
1444 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1448 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1449 struct dst_entry *dst = sk->sk_rx_dst;
1451 sock_rps_save_rxhash(sk, skb);
1452 sk_mark_napi_id(sk, skb);
1454 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1455 !dst->ops->check(dst, 0)) {
1457 sk->sk_rx_dst = NULL;
1460 tcp_rcv_established(sk, skb, tcp_hdr(skb));
1464 if (tcp_checksum_complete(skb))
1467 if (sk->sk_state == TCP_LISTEN) {
1468 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1473 if (tcp_child_process(sk, nsk, skb)) {
1480 sock_rps_save_rxhash(sk, skb);
1482 if (tcp_rcv_state_process(sk, skb)) {
1489 tcp_v4_send_reset(rsk, skb);
1492 /* Be careful here. If this function gets more complicated and
1493 * gcc suffers from register pressure on the x86, sk (in %ebx)
1494 * might be destroyed here. This current version compiles correctly,
1495 * but you have been warned.
1500 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1501 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1504 EXPORT_SYMBOL(tcp_v4_do_rcv);
1506 int tcp_v4_early_demux(struct sk_buff *skb)
1508 const struct iphdr *iph;
1509 const struct tcphdr *th;
1512 if (skb->pkt_type != PACKET_HOST)
1515 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1521 if (th->doff < sizeof(struct tcphdr) / 4)
1524 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1525 iph->saddr, th->source,
1526 iph->daddr, ntohs(th->dest),
1527 skb->skb_iif, inet_sdif(skb));
1530 skb->destructor = sock_edemux;
1531 if (sk_fullsock(sk)) {
1532 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1535 dst = dst_check(dst, 0);
1537 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1538 skb_dst_set_noref(skb, dst);
1544 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1546 u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
1548 /* Only socket owner can try to collapse/prune rx queues
1549 * to reduce memory overhead, so add a little headroom here.
1550 * Few sockets backlog are possibly concurrently non empty.
1554 /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1555 * we can fix skb->truesize to its real value to avoid future drops.
1556 * This is valid because skb is not yet charged to the socket.
1557 * It has been noticed pure SACK packets were sometimes dropped
1558 * (if cooked by drivers without copybreak feature).
1562 if (unlikely(sk_add_backlog(sk, skb, limit))) {
1564 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1569 EXPORT_SYMBOL(tcp_add_backlog);
1571 int tcp_filter(struct sock *sk, struct sk_buff *skb)
1573 struct tcphdr *th = (struct tcphdr *)skb->data;
1574 unsigned int eaten = skb->len;
1577 err = sk_filter_trim_cap(sk, skb, th->doff * 4);
1580 TCP_SKB_CB(skb)->end_seq -= eaten;
1584 EXPORT_SYMBOL(tcp_filter);
1590 int tcp_v4_rcv(struct sk_buff *skb)
1592 struct net *net = dev_net(skb->dev);
1593 int sdif = inet_sdif(skb);
1594 const struct iphdr *iph;
1595 const struct tcphdr *th;
1600 if (skb->pkt_type != PACKET_HOST)
1603 /* Count it even if it's bad */
1604 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1606 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1609 th = (const struct tcphdr *)skb->data;
1611 if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
1613 if (!pskb_may_pull(skb, th->doff * 4))
1616 /* An explanation is required here, I think.
1617 * Packet length and doff are validated by header prediction,
1618 * provided case of th->doff==0 is eliminated.
1619 * So, we defer the checks. */
1621 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1624 th = (const struct tcphdr *)skb->data;
1626 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1627 * barrier() makes sure compiler wont play fool^Waliasing games.
1629 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1630 sizeof(struct inet_skb_parm));
1633 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1634 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1635 skb->len - th->doff * 4);
1636 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1637 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1638 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1639 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1640 TCP_SKB_CB(skb)->sacked = 0;
1641 TCP_SKB_CB(skb)->has_rxtstamp =
1642 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1645 sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1646 th->dest, sdif, &refcounted);
1651 if (sk->sk_state == TCP_TIME_WAIT)
1654 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1655 struct request_sock *req = inet_reqsk(sk);
1658 sk = req->rsk_listener;
1659 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
1660 sk_drops_add(sk, skb);
1664 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1665 inet_csk_reqsk_queue_drop_and_put(sk, req);
1668 /* We own a reference on the listener, increase it again
1669 * as we might lose it too soon.
1674 if (!tcp_filter(sk, skb))
1675 nsk = tcp_check_req(sk, skb, req, false);
1678 goto discard_and_relse;
1682 } else if (tcp_child_process(sk, nsk, skb)) {
1683 tcp_v4_send_reset(nsk, skb);
1684 goto discard_and_relse;
1690 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1691 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1692 goto discard_and_relse;
1695 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1696 goto discard_and_relse;
1698 if (tcp_v4_inbound_md5_hash(sk, skb))
1699 goto discard_and_relse;
1703 if (tcp_filter(sk, skb))
1704 goto discard_and_relse;
1705 th = (const struct tcphdr *)skb->data;
1710 if (sk->sk_state == TCP_LISTEN) {
1711 ret = tcp_v4_do_rcv(sk, skb);
1712 goto put_and_return;
1715 sk_incoming_cpu_update(sk);
1717 bh_lock_sock_nested(sk);
1718 tcp_segs_in(tcp_sk(sk), skb);
1720 if (!sock_owned_by_user(sk)) {
1721 ret = tcp_v4_do_rcv(sk, skb);
1722 } else if (tcp_add_backlog(sk, skb)) {
1723 goto discard_and_relse;
1734 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1737 if (tcp_checksum_complete(skb)) {
1739 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1741 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1743 tcp_v4_send_reset(NULL, skb);
1747 /* Discard frame. */
1752 sk_drops_add(sk, skb);
1758 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1759 inet_twsk_put(inet_twsk(sk));
1763 if (tcp_checksum_complete(skb)) {
1764 inet_twsk_put(inet_twsk(sk));
1767 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1769 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1772 iph->saddr, th->source,
1773 iph->daddr, th->dest,
1777 inet_twsk_deschedule_put(inet_twsk(sk));
1786 tcp_v4_timewait_ack(sk, skb);
1789 tcp_v4_send_reset(sk, skb);
1790 inet_twsk_deschedule_put(inet_twsk(sk));
1792 case TCP_TW_SUCCESS:;
1797 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1798 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1799 .twsk_unique = tcp_twsk_unique,
1800 .twsk_destructor= tcp_twsk_destructor,
1803 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1805 struct dst_entry *dst = skb_dst(skb);
1807 if (dst && dst_hold_safe(dst)) {
1808 sk->sk_rx_dst = dst;
1809 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1812 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1814 const struct inet_connection_sock_af_ops ipv4_specific = {
1815 .queue_xmit = ip_queue_xmit,
1816 .send_check = tcp_v4_send_check,
1817 .rebuild_header = inet_sk_rebuild_header,
1818 .sk_rx_dst_set = inet_sk_rx_dst_set,
1819 .conn_request = tcp_v4_conn_request,
1820 .syn_recv_sock = tcp_v4_syn_recv_sock,
1821 .net_header_len = sizeof(struct iphdr),
1822 .setsockopt = ip_setsockopt,
1823 .getsockopt = ip_getsockopt,
1824 .addr2sockaddr = inet_csk_addr2sockaddr,
1825 .sockaddr_len = sizeof(struct sockaddr_in),
1826 #ifdef CONFIG_COMPAT
1827 .compat_setsockopt = compat_ip_setsockopt,
1828 .compat_getsockopt = compat_ip_getsockopt,
1830 .mtu_reduced = tcp_v4_mtu_reduced,
1832 EXPORT_SYMBOL(ipv4_specific);
1834 #ifdef CONFIG_TCP_MD5SIG
1835 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1836 .md5_lookup = tcp_v4_md5_lookup,
1837 .calc_md5_hash = tcp_v4_md5_hash_skb,
1838 .md5_parse = tcp_v4_parse_md5_keys,
1842 /* NOTE: A lot of things set to zero explicitly by call to
1843 * sk_alloc() so need not be done here.
1845 static int tcp_v4_init_sock(struct sock *sk)
1847 struct inet_connection_sock *icsk = inet_csk(sk);
1851 icsk->icsk_af_ops = &ipv4_specific;
1853 #ifdef CONFIG_TCP_MD5SIG
1854 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1860 void tcp_v4_destroy_sock(struct sock *sk)
1862 struct tcp_sock *tp = tcp_sk(sk);
1864 tcp_clear_xmit_timers(sk);
1866 tcp_cleanup_congestion_control(sk);
1868 tcp_cleanup_ulp(sk);
1870 /* Cleanup up the write buffer. */
1871 tcp_write_queue_purge(sk);
1873 /* Check if we want to disable active TFO */
1874 tcp_fastopen_active_disable_ofo_check(sk);
1876 /* Cleans up our, hopefully empty, out_of_order_queue. */
1877 skb_rbtree_purge(&tp->out_of_order_queue);
1879 #ifdef CONFIG_TCP_MD5SIG
1880 /* Clean up the MD5 key list, if any */
1881 if (tp->md5sig_info) {
1882 tcp_clear_md5_list(sk);
1883 kfree_rcu(tp->md5sig_info, rcu);
1884 tp->md5sig_info = NULL;
1888 /* Clean up a referenced TCP bind bucket. */
1889 if (inet_csk(sk)->icsk_bind_hash)
1892 BUG_ON(tp->fastopen_rsk);
1894 /* If socket is aborted during connect operation */
1895 tcp_free_fastopen_req(tp);
1896 tcp_saved_syn_free(tp);
1898 sk_sockets_allocated_dec(sk);
1900 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1902 #ifdef CONFIG_PROC_FS
1903 /* Proc filesystem TCP sock list dumping. */
1906 * Get next listener socket follow cur. If cur is NULL, get first socket
1907 * starting from bucket given in st->bucket; when st->bucket is zero the
1908 * very first socket in the hash table is returned.
1910 static void *listening_get_next(struct seq_file *seq, void *cur)
1912 struct tcp_iter_state *st = seq->private;
1913 struct net *net = seq_file_net(seq);
1914 struct inet_listen_hashbucket *ilb;
1915 struct sock *sk = cur;
1919 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1920 spin_lock(&ilb->lock);
1921 sk = sk_head(&ilb->head);
1925 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1931 sk_for_each_from(sk) {
1932 if (!net_eq(sock_net(sk), net))
1934 if (sk->sk_family == st->family)
1937 spin_unlock(&ilb->lock);
1939 if (++st->bucket < INET_LHTABLE_SIZE)
1944 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1946 struct tcp_iter_state *st = seq->private;
1951 rc = listening_get_next(seq, NULL);
1953 while (rc && *pos) {
1954 rc = listening_get_next(seq, rc);
1960 static inline bool empty_bucket(const struct tcp_iter_state *st)
1962 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1966 * Get first established socket starting from bucket given in st->bucket.
1967 * If st->bucket is zero, the very first socket in the hash is returned.
1969 static void *established_get_first(struct seq_file *seq)
1971 struct tcp_iter_state *st = seq->private;
1972 struct net *net = seq_file_net(seq);
1976 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1978 struct hlist_nulls_node *node;
1979 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1981 /* Lockless fast path for the common case of empty buckets */
1982 if (empty_bucket(st))
1986 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1987 if (sk->sk_family != st->family ||
1988 !net_eq(sock_net(sk), net)) {
1994 spin_unlock_bh(lock);
2000 static void *established_get_next(struct seq_file *seq, void *cur)
2002 struct sock *sk = cur;
2003 struct hlist_nulls_node *node;
2004 struct tcp_iter_state *st = seq->private;
2005 struct net *net = seq_file_net(seq);
2010 sk = sk_nulls_next(sk);
2012 sk_nulls_for_each_from(sk, node) {
2013 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2017 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2019 return established_get_first(seq);
2022 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2024 struct tcp_iter_state *st = seq->private;
2028 rc = established_get_first(seq);
2031 rc = established_get_next(seq, rc);
2037 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2040 struct tcp_iter_state *st = seq->private;
2042 st->state = TCP_SEQ_STATE_LISTENING;
2043 rc = listening_get_idx(seq, &pos);
2046 st->state = TCP_SEQ_STATE_ESTABLISHED;
2047 rc = established_get_idx(seq, pos);
2053 static void *tcp_seek_last_pos(struct seq_file *seq)
2055 struct tcp_iter_state *st = seq->private;
2056 int offset = st->offset;
2057 int orig_num = st->num;
2060 switch (st->state) {
2061 case TCP_SEQ_STATE_LISTENING:
2062 if (st->bucket >= INET_LHTABLE_SIZE)
2064 st->state = TCP_SEQ_STATE_LISTENING;
2065 rc = listening_get_next(seq, NULL);
2066 while (offset-- && rc)
2067 rc = listening_get_next(seq, rc);
2071 st->state = TCP_SEQ_STATE_ESTABLISHED;
2073 case TCP_SEQ_STATE_ESTABLISHED:
2074 if (st->bucket > tcp_hashinfo.ehash_mask)
2076 rc = established_get_first(seq);
2077 while (offset-- && rc)
2078 rc = established_get_next(seq, rc);
2086 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2088 struct tcp_iter_state *st = seq->private;
2091 if (*pos && *pos == st->last_pos) {
2092 rc = tcp_seek_last_pos(seq);
2097 st->state = TCP_SEQ_STATE_LISTENING;
2101 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2104 st->last_pos = *pos;
2108 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2110 struct tcp_iter_state *st = seq->private;
2113 if (v == SEQ_START_TOKEN) {
2114 rc = tcp_get_idx(seq, 0);
2118 switch (st->state) {
2119 case TCP_SEQ_STATE_LISTENING:
2120 rc = listening_get_next(seq, v);
2122 st->state = TCP_SEQ_STATE_ESTABLISHED;
2125 rc = established_get_first(seq);
2128 case TCP_SEQ_STATE_ESTABLISHED:
2129 rc = established_get_next(seq, v);
2134 st->last_pos = *pos;
2138 static void tcp_seq_stop(struct seq_file *seq, void *v)
2140 struct tcp_iter_state *st = seq->private;
2142 switch (st->state) {
2143 case TCP_SEQ_STATE_LISTENING:
2144 if (v != SEQ_START_TOKEN)
2145 spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
2147 case TCP_SEQ_STATE_ESTABLISHED:
2149 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2154 int tcp_seq_open(struct inode *inode, struct file *file)
2156 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2157 struct tcp_iter_state *s;
2160 err = seq_open_net(inode, file, &afinfo->seq_ops,
2161 sizeof(struct tcp_iter_state));
2165 s = ((struct seq_file *)file->private_data)->private;
2166 s->family = afinfo->family;
2170 EXPORT_SYMBOL(tcp_seq_open);
2172 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2175 struct proc_dir_entry *p;
2177 afinfo->seq_ops.start = tcp_seq_start;
2178 afinfo->seq_ops.next = tcp_seq_next;
2179 afinfo->seq_ops.stop = tcp_seq_stop;
2181 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2182 afinfo->seq_fops, afinfo);
2187 EXPORT_SYMBOL(tcp_proc_register);
2189 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2191 remove_proc_entry(afinfo->name, net->proc_net);
2193 EXPORT_SYMBOL(tcp_proc_unregister);
2195 static void get_openreq4(const struct request_sock *req,
2196 struct seq_file *f, int i)
2198 const struct inet_request_sock *ireq = inet_rsk(req);
2199 long delta = req->rsk_timer.expires - jiffies;
2201 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2202 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2207 ntohs(ireq->ir_rmt_port),
2209 0, 0, /* could print option size, but that is af dependent. */
2210 1, /* timers active (only the expire timer) */
2211 jiffies_delta_to_clock_t(delta),
2213 from_kuid_munged(seq_user_ns(f),
2214 sock_i_uid(req->rsk_listener)),
2215 0, /* non standard timer */
2216 0, /* open_requests have no inode */
2221 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2224 unsigned long timer_expires;
2225 const struct tcp_sock *tp = tcp_sk(sk);
2226 const struct inet_connection_sock *icsk = inet_csk(sk);
2227 const struct inet_sock *inet = inet_sk(sk);
2228 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2229 __be32 dest = inet->inet_daddr;
2230 __be32 src = inet->inet_rcv_saddr;
2231 __u16 destp = ntohs(inet->inet_dport);
2232 __u16 srcp = ntohs(inet->inet_sport);
2236 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2237 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2238 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2240 timer_expires = icsk->icsk_timeout;
2241 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2243 timer_expires = icsk->icsk_timeout;
2244 } else if (timer_pending(&sk->sk_timer)) {
2246 timer_expires = sk->sk_timer.expires;
2249 timer_expires = jiffies;
2252 state = sk_state_load(sk);
2253 if (state == TCP_LISTEN)
2254 rx_queue = sk->sk_ack_backlog;
2256 /* Because we don't lock the socket,
2257 * we might find a transient negative value.
2259 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2261 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2262 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2263 i, src, srcp, dest, destp, state,
2264 tp->write_seq - tp->snd_una,
2267 jiffies_delta_to_clock_t(timer_expires - jiffies),
2268 icsk->icsk_retransmits,
2269 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2270 icsk->icsk_probes_out,
2272 refcount_read(&sk->sk_refcnt), sk,
2273 jiffies_to_clock_t(icsk->icsk_rto),
2274 jiffies_to_clock_t(icsk->icsk_ack.ato),
2275 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2277 state == TCP_LISTEN ?
2278 fastopenq->max_qlen :
2279 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2282 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2283 struct seq_file *f, int i)
2285 long delta = tw->tw_timer.expires - jiffies;
2289 dest = tw->tw_daddr;
2290 src = tw->tw_rcv_saddr;
2291 destp = ntohs(tw->tw_dport);
2292 srcp = ntohs(tw->tw_sport);
2294 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2295 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2296 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2297 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2298 refcount_read(&tw->tw_refcnt), tw);
2303 static int tcp4_seq_show(struct seq_file *seq, void *v)
2305 struct tcp_iter_state *st;
2306 struct sock *sk = v;
2308 seq_setwidth(seq, TMPSZ - 1);
2309 if (v == SEQ_START_TOKEN) {
2310 seq_puts(seq, " sl local_address rem_address st tx_queue "
2311 "rx_queue tr tm->when retrnsmt uid timeout "
2317 if (sk->sk_state == TCP_TIME_WAIT)
2318 get_timewait4_sock(v, seq, st->num);
2319 else if (sk->sk_state == TCP_NEW_SYN_RECV)
2320 get_openreq4(v, seq, st->num);
2322 get_tcp4_sock(v, seq, st->num);
2328 static const struct file_operations tcp_afinfo_seq_fops = {
2329 .owner = THIS_MODULE,
2330 .open = tcp_seq_open,
2332 .llseek = seq_lseek,
2333 .release = seq_release_net
2336 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2339 .seq_fops = &tcp_afinfo_seq_fops,
2341 .show = tcp4_seq_show,
2345 static int __net_init tcp4_proc_init_net(struct net *net)
2347 return tcp_proc_register(net, &tcp4_seq_afinfo);
2350 static void __net_exit tcp4_proc_exit_net(struct net *net)
2352 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2355 static struct pernet_operations tcp4_net_ops = {
2356 .init = tcp4_proc_init_net,
2357 .exit = tcp4_proc_exit_net,
2360 int __init tcp4_proc_init(void)
2362 return register_pernet_subsys(&tcp4_net_ops);
2365 void tcp4_proc_exit(void)
2367 unregister_pernet_subsys(&tcp4_net_ops);
2369 #endif /* CONFIG_PROC_FS */
2371 struct proto tcp_prot = {
2373 .owner = THIS_MODULE,
2375 .connect = tcp_v4_connect,
2376 .disconnect = tcp_disconnect,
2377 .accept = inet_csk_accept,
2379 .init = tcp_v4_init_sock,
2380 .destroy = tcp_v4_destroy_sock,
2381 .shutdown = tcp_shutdown,
2382 .setsockopt = tcp_setsockopt,
2383 .getsockopt = tcp_getsockopt,
2384 .keepalive = tcp_set_keepalive,
2385 .recvmsg = tcp_recvmsg,
2386 .sendmsg = tcp_sendmsg,
2387 .sendpage = tcp_sendpage,
2388 .backlog_rcv = tcp_v4_do_rcv,
2389 .release_cb = tcp_release_cb,
2391 .unhash = inet_unhash,
2392 .get_port = inet_csk_get_port,
2393 .enter_memory_pressure = tcp_enter_memory_pressure,
2394 .leave_memory_pressure = tcp_leave_memory_pressure,
2395 .stream_memory_free = tcp_stream_memory_free,
2396 .sockets_allocated = &tcp_sockets_allocated,
2397 .orphan_count = &tcp_orphan_count,
2398 .memory_allocated = &tcp_memory_allocated,
2399 .memory_pressure = &tcp_memory_pressure,
2400 .sysctl_mem = sysctl_tcp_mem,
2401 .sysctl_wmem = sysctl_tcp_wmem,
2402 .sysctl_rmem = sysctl_tcp_rmem,
2403 .max_header = MAX_TCP_HEADER,
2404 .obj_size = sizeof(struct tcp_sock),
2405 .slab_flags = SLAB_TYPESAFE_BY_RCU,
2406 .twsk_prot = &tcp_timewait_sock_ops,
2407 .rsk_prot = &tcp_request_sock_ops,
2408 .h.hashinfo = &tcp_hashinfo,
2409 .no_autobind = true,
2410 #ifdef CONFIG_COMPAT
2411 .compat_setsockopt = compat_tcp_setsockopt,
2412 .compat_getsockopt = compat_tcp_getsockopt,
2414 .diag_destroy = tcp_abort,
2416 EXPORT_SYMBOL(tcp_prot);
2418 static void __net_exit tcp_sk_exit(struct net *net)
2422 for_each_possible_cpu(cpu)
2423 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2424 free_percpu(net->ipv4.tcp_sk);
2427 static int __net_init tcp_sk_init(struct net *net)
2431 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2432 if (!net->ipv4.tcp_sk)
2435 for_each_possible_cpu(cpu) {
2438 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2442 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2443 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2446 net->ipv4.sysctl_tcp_ecn = 2;
2447 net->ipv4.sysctl_tcp_ecn_fallback = 1;
2449 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2450 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2451 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2453 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2454 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2455 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2457 net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
2458 net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
2459 net->ipv4.sysctl_tcp_syncookies = 1;
2460 net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
2461 net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
2462 net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
2463 net->ipv4.sysctl_tcp_orphan_retries = 0;
2464 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2465 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
2466 net->ipv4.sysctl_tcp_tw_reuse = 0;
2468 cnt = tcp_hashinfo.ehash_mask + 1;
2469 net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2;
2470 net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
2472 net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
2473 net->ipv4.sysctl_tcp_sack = 1;
2474 net->ipv4.sysctl_tcp_window_scaling = 1;
2475 net->ipv4.sysctl_tcp_timestamps = 1;
2477 net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
2478 spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
2479 net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
2480 atomic_set(&net->ipv4.tfo_active_disable_times, 0);
2489 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2493 inet_twsk_purge(&tcp_hashinfo, AF_INET);
2495 list_for_each_entry(net, net_exit_list, exit_list)
2496 tcp_fastopen_ctx_destroy(net);
2499 static struct pernet_operations __net_initdata tcp_sk_ops = {
2500 .init = tcp_sk_init,
2501 .exit = tcp_sk_exit,
2502 .exit_batch = tcp_sk_exit_batch,
2505 void __init tcp_v4_init(void)
2507 if (register_pernet_subsys(&tcp_sk_ops))
2508 panic("Failed to create the TCP control socket.\n");