2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/secure_seq.h>
76 #include <net/busy_poll.h>
78 #include <linux/inet.h>
79 #include <linux/ipv6.h>
80 #include <linux/stddef.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
83 #include <linux/inetdevice.h>
85 #include <crypto/hash.h>
86 #include <linux/scatterlist.h>
88 #include <trace/events/tcp.h>
90 #ifdef CONFIG_TCP_MD5SIG
91 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
92 __be32 daddr, __be32 saddr, const struct tcphdr *th);
95 struct inet_hashinfo tcp_hashinfo;
96 EXPORT_SYMBOL(tcp_hashinfo);
98 static u32 tcp_v4_init_seq(const struct sk_buff *skb)
100 return secure_tcp_seq(ip_hdr(skb)->daddr,
103 tcp_hdr(skb)->source);
106 static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
108 return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
111 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
113 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
114 struct tcp_sock *tp = tcp_sk(sk);
116 /* With PAWS, it is safe from the viewpoint
117 of data integrity. Even without PAWS it is safe provided sequence
118 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
120 Actually, the idea is close to VJ's one, only timestamp cache is
121 held not per host, but per port pair and TW bucket is used as state
124 If TW bucket has been already destroyed we fall back to VJ's scheme
125 and use initial timestamp retrieved from peer table.
127 if (tcptw->tw_ts_recent_stamp &&
128 (!twp || (sock_net(sk)->ipv4.sysctl_tcp_tw_reuse &&
129 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
130 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
131 if (tp->write_seq == 0)
133 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
134 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
141 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
143 /* This will initiate an outgoing connection. */
144 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
146 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
147 struct inet_sock *inet = inet_sk(sk);
148 struct tcp_sock *tp = tcp_sk(sk);
149 __be16 orig_sport, orig_dport;
150 __be32 daddr, nexthop;
154 struct ip_options_rcu *inet_opt;
155 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
157 if (addr_len < sizeof(struct sockaddr_in))
160 if (usin->sin_family != AF_INET)
161 return -EAFNOSUPPORT;
163 nexthop = daddr = usin->sin_addr.s_addr;
164 inet_opt = rcu_dereference_protected(inet->inet_opt,
165 lockdep_sock_is_held(sk));
166 if (inet_opt && inet_opt->opt.srr) {
169 nexthop = inet_opt->opt.faddr;
172 orig_sport = inet->inet_sport;
173 orig_dport = usin->sin_port;
174 fl4 = &inet->cork.fl.u.ip4;
175 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
176 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
178 orig_sport, orig_dport, sk);
181 if (err == -ENETUNREACH)
182 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
186 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
191 if (!inet_opt || !inet_opt->opt.srr)
194 if (!inet->inet_saddr)
195 inet->inet_saddr = fl4->saddr;
196 sk_rcv_saddr_set(sk, inet->inet_saddr);
198 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
199 /* Reset inherited state */
200 tp->rx_opt.ts_recent = 0;
201 tp->rx_opt.ts_recent_stamp = 0;
202 if (likely(!tp->repair))
206 inet->inet_dport = usin->sin_port;
207 sk_daddr_set(sk, daddr);
209 inet_csk(sk)->icsk_ext_hdr_len = 0;
211 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
213 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
215 /* Socket identity is still unknown (sport may be zero).
216 * However we set state to SYN-SENT and not releasing socket
217 * lock select source port, enter ourselves into the hash tables and
218 * complete initialization after this.
220 tcp_set_state(sk, TCP_SYN_SENT);
221 err = inet_hash_connect(tcp_death_row, sk);
227 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
228 inet->inet_sport, inet->inet_dport, sk);
234 /* OK, now commit destination to socket. */
235 sk->sk_gso_type = SKB_GSO_TCPV4;
236 sk_setup_caps(sk, &rt->dst);
239 if (likely(!tp->repair)) {
241 tp->write_seq = secure_tcp_seq(inet->inet_saddr,
245 tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
250 inet->inet_id = tp->write_seq ^ jiffies;
252 if (tcp_fastopen_defer_connect(sk, &err))
257 err = tcp_connect(sk);
266 * This unhashes the socket and releases the local port,
269 tcp_set_state(sk, TCP_CLOSE);
271 sk->sk_route_caps = 0;
272 inet->inet_dport = 0;
275 EXPORT_SYMBOL(tcp_v4_connect);
278 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
279 * It can be called through tcp_release_cb() if socket was owned by user
280 * at the time tcp_v4_err() was called to handle ICMP message.
282 void tcp_v4_mtu_reduced(struct sock *sk)
284 struct inet_sock *inet = inet_sk(sk);
285 struct dst_entry *dst;
288 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
290 mtu = tcp_sk(sk)->mtu_info;
291 dst = inet_csk_update_pmtu(sk, mtu);
295 /* Something is about to be wrong... Remember soft error
296 * for the case, if this connection will not able to recover.
298 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
299 sk->sk_err_soft = EMSGSIZE;
303 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
304 ip_sk_accept_pmtu(sk) &&
305 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
306 tcp_sync_mss(sk, mtu);
308 /* Resend the TCP packet because it's
309 * clear that the old packet has been
310 * dropped. This is the new "fast" path mtu
313 tcp_simple_retransmit(sk);
314 } /* else let the usual retransmit timer handle it */
316 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
318 static void do_redirect(struct sk_buff *skb, struct sock *sk)
320 struct dst_entry *dst = __sk_dst_check(sk, 0);
323 dst->ops->redirect(dst, sk, skb);
327 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
328 void tcp_req_err(struct sock *sk, u32 seq, bool abort)
330 struct request_sock *req = inet_reqsk(sk);
331 struct net *net = sock_net(sk);
333 /* ICMPs are not backlogged, hence we cannot get
334 * an established socket here.
336 if (seq != tcp_rsk(req)->snt_isn) {
337 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
340 * Still in SYN_RECV, just remove it silently.
341 * There is no good way to pass the error to the newly
342 * created socket, and POSIX does not want network
343 * errors returned from accept().
345 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
346 tcp_listendrop(req->rsk_listener);
350 EXPORT_SYMBOL(tcp_req_err);
353 * This routine is called by the ICMP module when it gets some
354 * sort of error condition. If err < 0 then the socket should
355 * be closed and the error returned to the user. If err > 0
356 * it's just the icmp type << 8 | icmp code. After adjustment
357 * header points to the first 8 bytes of the tcp header. We need
358 * to find the appropriate port.
360 * The locking strategy used here is very "optimistic". When
361 * someone else accesses the socket the ICMP is just dropped
362 * and for some paths there is no check at all.
363 * A more general error queue to queue errors for later handling
364 * is probably better.
368 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
370 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
371 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
372 struct inet_connection_sock *icsk;
374 struct inet_sock *inet;
375 const int type = icmp_hdr(icmp_skb)->type;
376 const int code = icmp_hdr(icmp_skb)->code;
379 struct request_sock *fastopen;
384 struct net *net = dev_net(icmp_skb->dev);
386 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
387 th->dest, iph->saddr, ntohs(th->source),
388 inet_iif(icmp_skb), 0);
390 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
393 if (sk->sk_state == TCP_TIME_WAIT) {
394 inet_twsk_put(inet_twsk(sk));
397 seq = ntohl(th->seq);
398 if (sk->sk_state == TCP_NEW_SYN_RECV)
399 return tcp_req_err(sk, seq,
400 type == ICMP_PARAMETERPROB ||
401 type == ICMP_TIME_EXCEEDED ||
402 (type == ICMP_DEST_UNREACH &&
403 (code == ICMP_NET_UNREACH ||
404 code == ICMP_HOST_UNREACH)));
407 /* If too many ICMPs get dropped on busy
408 * servers this needs to be solved differently.
409 * We do take care of PMTU discovery (RFC1191) special case :
410 * we can receive locally generated ICMP messages while socket is held.
412 if (sock_owned_by_user(sk)) {
413 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
414 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
416 if (sk->sk_state == TCP_CLOSE)
419 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
420 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
426 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
427 fastopen = tp->fastopen_rsk;
428 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
429 if (sk->sk_state != TCP_LISTEN &&
430 !between(seq, snd_una, tp->snd_nxt)) {
431 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
437 if (!sock_owned_by_user(sk))
438 do_redirect(icmp_skb, sk);
440 case ICMP_SOURCE_QUENCH:
441 /* Just silently ignore these. */
443 case ICMP_PARAMETERPROB:
446 case ICMP_DEST_UNREACH:
447 if (code > NR_ICMP_UNREACH)
450 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
451 /* We are not interested in TCP_LISTEN and open_requests
452 * (SYN-ACKs send out by Linux are always <576bytes so
453 * they should go through unfragmented).
455 if (sk->sk_state == TCP_LISTEN)
459 if (!sock_owned_by_user(sk)) {
460 tcp_v4_mtu_reduced(sk);
462 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
468 err = icmp_err_convert[code].errno;
469 /* check if icmp_skb allows revert of backoff
470 * (see draft-zimmermann-tcp-lcd) */
471 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
473 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
474 !icsk->icsk_backoff || fastopen)
477 if (sock_owned_by_user(sk))
480 icsk->icsk_backoff--;
481 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
483 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
485 skb = tcp_rtx_queue_head(sk);
488 tcp_mstamp_refresh(tp);
489 delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp);
490 remaining = icsk->icsk_rto -
491 usecs_to_jiffies(delta_us);
494 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
495 remaining, TCP_RTO_MAX);
497 /* RTO revert clocked out retransmission.
498 * Will retransmit now */
499 tcp_retransmit_timer(sk);
503 case ICMP_TIME_EXCEEDED:
510 switch (sk->sk_state) {
513 /* Only in fast or simultaneous open. If a fast open socket is
514 * is already accepted it is treated as a connected one below.
516 if (fastopen && !fastopen->sk)
519 if (!sock_owned_by_user(sk)) {
522 sk->sk_error_report(sk);
526 sk->sk_err_soft = err;
531 /* If we've already connected we will keep trying
532 * until we time out, or the user gives up.
534 * rfc1122 4.2.3.9 allows to consider as hard errors
535 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
536 * but it is obsoleted by pmtu discovery).
538 * Note, that in modern internet, where routing is unreliable
539 * and in each dark corner broken firewalls sit, sending random
540 * errors ordered by their masters even this two messages finally lose
541 * their original sense (even Linux sends invalid PORT_UNREACHs)
543 * Now we are in compliance with RFCs.
548 if (!sock_owned_by_user(sk) && inet->recverr) {
550 sk->sk_error_report(sk);
551 } else { /* Only an error on timeout */
552 sk->sk_err_soft = err;
560 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
562 struct tcphdr *th = tcp_hdr(skb);
564 if (skb->ip_summed == CHECKSUM_PARTIAL) {
565 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
566 skb->csum_start = skb_transport_header(skb) - skb->head;
567 skb->csum_offset = offsetof(struct tcphdr, check);
569 th->check = tcp_v4_check(skb->len, saddr, daddr,
576 /* This routine computes an IPv4 TCP checksum. */
577 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
579 const struct inet_sock *inet = inet_sk(sk);
581 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
583 EXPORT_SYMBOL(tcp_v4_send_check);
586 * This routine will send an RST to the other tcp.
588 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
590 * Answer: if a packet caused RST, it is not for a socket
591 * existing in our system, if it is matched to a socket,
592 * it is just duplicate segment or bug in other side's TCP.
593 * So that we build reply only basing on parameters
594 * arrived with segment.
595 * Exception: precedence violation. We do not implement it in any case.
598 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
600 const struct tcphdr *th = tcp_hdr(skb);
603 #ifdef CONFIG_TCP_MD5SIG
604 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
607 struct ip_reply_arg arg;
608 #ifdef CONFIG_TCP_MD5SIG
609 struct tcp_md5sig_key *key = NULL;
610 const __u8 *hash_location = NULL;
611 unsigned char newhash[16];
613 struct sock *sk1 = NULL;
617 /* Never send a reset in response to a reset. */
621 /* If sk not NULL, it means we did a successful lookup and incoming
622 * route had to be correct. prequeue might have dropped our dst.
624 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
627 /* Swap the send and the receive. */
628 memset(&rep, 0, sizeof(rep));
629 rep.th.dest = th->source;
630 rep.th.source = th->dest;
631 rep.th.doff = sizeof(struct tcphdr) / 4;
635 rep.th.seq = th->ack_seq;
638 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
639 skb->len - (th->doff << 2));
642 memset(&arg, 0, sizeof(arg));
643 arg.iov[0].iov_base = (unsigned char *)&rep;
644 arg.iov[0].iov_len = sizeof(rep.th);
646 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
647 #ifdef CONFIG_TCP_MD5SIG
649 hash_location = tcp_parse_md5sig_option(th);
650 if (sk && sk_fullsock(sk)) {
651 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
652 &ip_hdr(skb)->saddr, AF_INET);
653 } else if (hash_location) {
655 * active side is lost. Try to find listening socket through
656 * source port, and then find md5 key through listening socket.
657 * we are not loose security here:
658 * Incoming packet is checked with md5 hash with finding key,
659 * no RST generated if md5 hash doesn't match.
661 sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
663 th->source, ip_hdr(skb)->daddr,
664 ntohs(th->source), inet_iif(skb),
666 /* don't send rst if it can't find key */
670 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
671 &ip_hdr(skb)->saddr, AF_INET);
676 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
677 if (genhash || memcmp(hash_location, newhash, 16) != 0)
683 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
685 (TCPOPT_MD5SIG << 8) |
687 /* Update length and the length the header thinks exists */
688 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
689 rep.th.doff = arg.iov[0].iov_len / 4;
691 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
692 key, ip_hdr(skb)->saddr,
693 ip_hdr(skb)->daddr, &rep.th);
696 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
697 ip_hdr(skb)->saddr, /* XXX */
698 arg.iov[0].iov_len, IPPROTO_TCP, 0);
699 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
700 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
702 /* When socket is gone, all binding information is lost.
703 * routing might fail in this case. No choice here, if we choose to force
704 * input interface, we will misroute in case of asymmetric route.
707 arg.bound_dev_if = sk->sk_bound_dev_if;
708 trace_tcp_send_reset(sk, skb);
711 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
712 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
714 arg.tos = ip_hdr(skb)->tos;
715 arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
717 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
718 skb, &TCP_SKB_CB(skb)->header.h4.opt,
719 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
720 &arg, arg.iov[0].iov_len);
722 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
723 __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
726 #ifdef CONFIG_TCP_MD5SIG
732 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
733 outside socket context is ugly, certainly. What can I do?
736 static void tcp_v4_send_ack(const struct sock *sk,
737 struct sk_buff *skb, u32 seq, u32 ack,
738 u32 win, u32 tsval, u32 tsecr, int oif,
739 struct tcp_md5sig_key *key,
740 int reply_flags, u8 tos)
742 const struct tcphdr *th = tcp_hdr(skb);
745 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
746 #ifdef CONFIG_TCP_MD5SIG
747 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
751 struct net *net = sock_net(sk);
752 struct ip_reply_arg arg;
754 memset(&rep.th, 0, sizeof(struct tcphdr));
755 memset(&arg, 0, sizeof(arg));
757 arg.iov[0].iov_base = (unsigned char *)&rep;
758 arg.iov[0].iov_len = sizeof(rep.th);
760 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
761 (TCPOPT_TIMESTAMP << 8) |
763 rep.opt[1] = htonl(tsval);
764 rep.opt[2] = htonl(tsecr);
765 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
768 /* Swap the send and the receive. */
769 rep.th.dest = th->source;
770 rep.th.source = th->dest;
771 rep.th.doff = arg.iov[0].iov_len / 4;
772 rep.th.seq = htonl(seq);
773 rep.th.ack_seq = htonl(ack);
775 rep.th.window = htons(win);
777 #ifdef CONFIG_TCP_MD5SIG
779 int offset = (tsecr) ? 3 : 0;
781 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
783 (TCPOPT_MD5SIG << 8) |
785 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
786 rep.th.doff = arg.iov[0].iov_len/4;
788 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
789 key, ip_hdr(skb)->saddr,
790 ip_hdr(skb)->daddr, &rep.th);
793 arg.flags = reply_flags;
794 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
795 ip_hdr(skb)->saddr, /* XXX */
796 arg.iov[0].iov_len, IPPROTO_TCP, 0);
797 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
799 arg.bound_dev_if = oif;
801 arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
803 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
804 skb, &TCP_SKB_CB(skb)->header.h4.opt,
805 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
806 &arg, arg.iov[0].iov_len);
808 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
812 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
814 struct inet_timewait_sock *tw = inet_twsk(sk);
815 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
817 tcp_v4_send_ack(sk, skb,
818 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
819 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
820 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
823 tcp_twsk_md5_key(tcptw),
824 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
831 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
832 struct request_sock *req)
834 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
835 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
837 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
841 * The window field (SEG.WND) of every outgoing segment, with the
842 * exception of <SYN> segments, MUST be right-shifted by
843 * Rcv.Wind.Shift bits:
845 tcp_v4_send_ack(sk, skb, seq,
846 tcp_rsk(req)->rcv_nxt,
847 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
848 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
851 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
853 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
858 * Send a SYN-ACK after having received a SYN.
859 * This still operates on a request_sock only, not on a big
862 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
864 struct request_sock *req,
865 struct tcp_fastopen_cookie *foc,
866 enum tcp_synack_type synack_type)
868 const struct inet_request_sock *ireq = inet_rsk(req);
873 /* First, grab a route. */
874 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
877 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
880 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
882 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
884 rcu_dereference(ireq->ireq_opt));
885 err = net_xmit_eval(err);
892 * IPv4 request_sock destructor.
894 static void tcp_v4_reqsk_destructor(struct request_sock *req)
896 kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
899 #ifdef CONFIG_TCP_MD5SIG
901 * RFC2385 MD5 checksumming requires a mapping of
902 * IP address->MD5 Key.
903 * We need to maintain these in the sk structure.
906 /* Find the Key structure for an address. */
907 struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
908 const union tcp_md5_addr *addr,
911 const struct tcp_sock *tp = tcp_sk(sk);
912 struct tcp_md5sig_key *key;
913 const struct tcp_md5sig_info *md5sig;
915 struct tcp_md5sig_key *best_match = NULL;
918 /* caller either holds rcu_read_lock() or socket lock */
919 md5sig = rcu_dereference_check(tp->md5sig_info,
920 lockdep_sock_is_held(sk));
924 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
925 if (key->family != family)
928 if (family == AF_INET) {
929 mask = inet_make_mask(key->prefixlen);
930 match = (key->addr.a4.s_addr & mask) ==
931 (addr->a4.s_addr & mask);
932 #if IS_ENABLED(CONFIG_IPV6)
933 } else if (family == AF_INET6) {
934 match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
941 if (match && (!best_match ||
942 key->prefixlen > best_match->prefixlen))
947 EXPORT_SYMBOL(tcp_md5_do_lookup);
949 static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
950 const union tcp_md5_addr *addr,
951 int family, u8 prefixlen)
953 const struct tcp_sock *tp = tcp_sk(sk);
954 struct tcp_md5sig_key *key;
955 unsigned int size = sizeof(struct in_addr);
956 const struct tcp_md5sig_info *md5sig;
958 /* caller either holds rcu_read_lock() or socket lock */
959 md5sig = rcu_dereference_check(tp->md5sig_info,
960 lockdep_sock_is_held(sk));
963 #if IS_ENABLED(CONFIG_IPV6)
964 if (family == AF_INET6)
965 size = sizeof(struct in6_addr);
967 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
968 if (key->family != family)
970 if (!memcmp(&key->addr, addr, size) &&
971 key->prefixlen == prefixlen)
977 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
978 const struct sock *addr_sk)
980 const union tcp_md5_addr *addr;
982 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
983 return tcp_md5_do_lookup(sk, addr, AF_INET);
985 EXPORT_SYMBOL(tcp_v4_md5_lookup);
987 /* This can be called on a newly created socket, from other files */
988 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
989 int family, u8 prefixlen, const u8 *newkey, u8 newkeylen,
992 /* Add Key to the list */
993 struct tcp_md5sig_key *key;
994 struct tcp_sock *tp = tcp_sk(sk);
995 struct tcp_md5sig_info *md5sig;
997 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
999 /* Pre-existing entry - just update that one. */
1000 memcpy(key->key, newkey, newkeylen);
1001 key->keylen = newkeylen;
1005 md5sig = rcu_dereference_protected(tp->md5sig_info,
1006 lockdep_sock_is_held(sk));
1008 md5sig = kmalloc(sizeof(*md5sig), gfp);
1012 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1013 INIT_HLIST_HEAD(&md5sig->head);
1014 rcu_assign_pointer(tp->md5sig_info, md5sig);
1017 key = sock_kmalloc(sk, sizeof(*key), gfp);
1020 if (!tcp_alloc_md5sig_pool()) {
1021 sock_kfree_s(sk, key, sizeof(*key));
1025 memcpy(key->key, newkey, newkeylen);
1026 key->keylen = newkeylen;
1027 key->family = family;
1028 key->prefixlen = prefixlen;
1029 memcpy(&key->addr, addr,
1030 (family == AF_INET6) ? sizeof(struct in6_addr) :
1031 sizeof(struct in_addr));
1032 hlist_add_head_rcu(&key->node, &md5sig->head);
1035 EXPORT_SYMBOL(tcp_md5_do_add);
1037 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1040 struct tcp_md5sig_key *key;
1042 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
1045 hlist_del_rcu(&key->node);
1046 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1047 kfree_rcu(key, rcu);
1050 EXPORT_SYMBOL(tcp_md5_do_del);
1052 static void tcp_clear_md5_list(struct sock *sk)
1054 struct tcp_sock *tp = tcp_sk(sk);
1055 struct tcp_md5sig_key *key;
1056 struct hlist_node *n;
1057 struct tcp_md5sig_info *md5sig;
1059 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1061 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1062 hlist_del_rcu(&key->node);
1063 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1064 kfree_rcu(key, rcu);
1068 static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
1069 char __user *optval, int optlen)
1071 struct tcp_md5sig cmd;
1072 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1075 if (optlen < sizeof(cmd))
1078 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1081 if (sin->sin_family != AF_INET)
1084 if (optname == TCP_MD5SIG_EXT &&
1085 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
1086 prefixlen = cmd.tcpm_prefixlen;
1091 if (!cmd.tcpm_keylen)
1092 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1093 AF_INET, prefixlen);
1095 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1098 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1099 AF_INET, prefixlen, cmd.tcpm_key, cmd.tcpm_keylen,
1103 static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1104 __be32 daddr, __be32 saddr,
1105 const struct tcphdr *th, int nbytes)
1107 struct tcp4_pseudohdr *bp;
1108 struct scatterlist sg;
1115 bp->protocol = IPPROTO_TCP;
1116 bp->len = cpu_to_be16(nbytes);
1118 _th = (struct tcphdr *)(bp + 1);
1119 memcpy(_th, th, sizeof(*th));
1122 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1123 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1124 sizeof(*bp) + sizeof(*th));
1125 return crypto_ahash_update(hp->md5_req);
1128 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1129 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1131 struct tcp_md5sig_pool *hp;
1132 struct ahash_request *req;
1134 hp = tcp_get_md5sig_pool();
1136 goto clear_hash_noput;
1139 if (crypto_ahash_init(req))
1141 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
1143 if (tcp_md5_hash_key(hp, key))
1145 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1146 if (crypto_ahash_final(req))
1149 tcp_put_md5sig_pool();
1153 tcp_put_md5sig_pool();
1155 memset(md5_hash, 0, 16);
1159 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1160 const struct sock *sk,
1161 const struct sk_buff *skb)
1163 struct tcp_md5sig_pool *hp;
1164 struct ahash_request *req;
1165 const struct tcphdr *th = tcp_hdr(skb);
1166 __be32 saddr, daddr;
1168 if (sk) { /* valid for establish/request sockets */
1169 saddr = sk->sk_rcv_saddr;
1170 daddr = sk->sk_daddr;
1172 const struct iphdr *iph = ip_hdr(skb);
1177 hp = tcp_get_md5sig_pool();
1179 goto clear_hash_noput;
1182 if (crypto_ahash_init(req))
1185 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
1187 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1189 if (tcp_md5_hash_key(hp, key))
1191 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1192 if (crypto_ahash_final(req))
1195 tcp_put_md5sig_pool();
1199 tcp_put_md5sig_pool();
1201 memset(md5_hash, 0, 16);
1204 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1208 /* Called with rcu_read_lock() */
1209 static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1210 const struct sk_buff *skb)
1212 #ifdef CONFIG_TCP_MD5SIG
1214 * This gets called for each TCP segment that arrives
1215 * so we want to be efficient.
1216 * We have 3 drop cases:
1217 * o No MD5 hash and one expected.
1218 * o MD5 hash and we're not expecting one.
1219 * o MD5 hash and its wrong.
1221 const __u8 *hash_location = NULL;
1222 struct tcp_md5sig_key *hash_expected;
1223 const struct iphdr *iph = ip_hdr(skb);
1224 const struct tcphdr *th = tcp_hdr(skb);
1226 unsigned char newhash[16];
1228 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1230 hash_location = tcp_parse_md5sig_option(th);
1232 /* We've parsed the options - do we have a hash? */
1233 if (!hash_expected && !hash_location)
1236 if (hash_expected && !hash_location) {
1237 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1241 if (!hash_expected && hash_location) {
1242 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1246 /* Okay, so this is hash_expected and hash_location -
1247 * so we need to calculate the checksum.
1249 genhash = tcp_v4_md5_hash_skb(newhash,
1253 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1254 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
1255 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1256 &iph->saddr, ntohs(th->source),
1257 &iph->daddr, ntohs(th->dest),
1258 genhash ? " tcp_v4_calc_md5_hash failed"
1267 static void tcp_v4_init_req(struct request_sock *req,
1268 const struct sock *sk_listener,
1269 struct sk_buff *skb)
1271 struct inet_request_sock *ireq = inet_rsk(req);
1272 struct net *net = sock_net(sk_listener);
1274 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1275 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1276 RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
1279 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1281 const struct request_sock *req)
1283 return inet_csk_route_req(sk, &fl->u.ip4, req);
1286 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1288 .obj_size = sizeof(struct tcp_request_sock),
1289 .rtx_syn_ack = tcp_rtx_synack,
1290 .send_ack = tcp_v4_reqsk_send_ack,
1291 .destructor = tcp_v4_reqsk_destructor,
1292 .send_reset = tcp_v4_send_reset,
1293 .syn_ack_timeout = tcp_syn_ack_timeout,
1296 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1297 .mss_clamp = TCP_MSS_DEFAULT,
1298 #ifdef CONFIG_TCP_MD5SIG
1299 .req_md5_lookup = tcp_v4_md5_lookup,
1300 .calc_md5_hash = tcp_v4_md5_hash_skb,
1302 .init_req = tcp_v4_init_req,
1303 #ifdef CONFIG_SYN_COOKIES
1304 .cookie_init_seq = cookie_v4_init_sequence,
1306 .route_req = tcp_v4_route_req,
1307 .init_seq = tcp_v4_init_seq,
1308 .init_ts_off = tcp_v4_init_ts_off,
1309 .send_synack = tcp_v4_send_synack,
1312 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1314 /* Never answer to SYNs send to broadcast or multicast */
1315 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1318 return tcp_conn_request(&tcp_request_sock_ops,
1319 &tcp_request_sock_ipv4_ops, sk, skb);
1325 EXPORT_SYMBOL(tcp_v4_conn_request);
1329 * The three way handshake has completed - we got a valid synack -
1330 * now create the new socket.
1332 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1333 struct request_sock *req,
1334 struct dst_entry *dst,
1335 struct request_sock *req_unhash,
1338 struct inet_request_sock *ireq;
1339 struct inet_sock *newinet;
1340 struct tcp_sock *newtp;
1342 #ifdef CONFIG_TCP_MD5SIG
1343 struct tcp_md5sig_key *key;
1345 struct ip_options_rcu *inet_opt;
1347 if (sk_acceptq_is_full(sk))
1350 newsk = tcp_create_openreq_child(sk, req, skb);
1354 newsk->sk_gso_type = SKB_GSO_TCPV4;
1355 inet_sk_rx_dst_set(newsk, skb);
1357 newtp = tcp_sk(newsk);
1358 newinet = inet_sk(newsk);
1359 ireq = inet_rsk(req);
1360 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1361 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1362 newsk->sk_bound_dev_if = ireq->ir_iif;
1363 newinet->inet_saddr = ireq->ir_loc_addr;
1364 inet_opt = rcu_dereference(ireq->ireq_opt);
1365 RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
1366 newinet->mc_index = inet_iif(skb);
1367 newinet->mc_ttl = ip_hdr(skb)->ttl;
1368 newinet->rcv_tos = ip_hdr(skb)->tos;
1369 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1371 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1372 newinet->inet_id = newtp->write_seq ^ jiffies;
1375 dst = inet_csk_route_child_sock(sk, newsk, req);
1379 /* syncookie case : see end of cookie_v4_check() */
1381 sk_setup_caps(newsk, dst);
1383 tcp_ca_openreq_child(newsk, dst);
1385 tcp_sync_mss(newsk, dst_mtu(dst));
1386 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1388 tcp_initialize_rcv_mss(newsk);
1390 #ifdef CONFIG_TCP_MD5SIG
1391 /* Copy over the MD5 key from the original socket */
1392 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1396 * We're using one, so create a matching key
1397 * on the newsk structure. If we fail to get
1398 * memory, then we end up not copying the key
1401 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1402 AF_INET, 32, key->key, key->keylen, GFP_ATOMIC);
1403 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1407 if (__inet_inherit_port(sk, newsk) < 0)
1409 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1410 if (likely(*own_req)) {
1411 tcp_move_syn(newtp, req);
1412 ireq->ireq_opt = NULL;
1414 newinet->inet_opt = NULL;
1419 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1426 newinet->inet_opt = NULL;
1427 inet_csk_prepare_forced_close(newsk);
1431 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1433 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1435 #ifdef CONFIG_SYN_COOKIES
1436 const struct tcphdr *th = tcp_hdr(skb);
1439 sk = cookie_v4_check(sk, skb);
1444 /* The socket must have it's spinlock held when we get
1445 * here, unless it is a TCP_LISTEN socket.
1447 * We have a potential double-lock case here, so even when
1448 * doing backlog processing we use the BH locking scheme.
1449 * This is because we cannot sleep with the original spinlock
1452 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1456 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1457 struct dst_entry *dst = sk->sk_rx_dst;
1459 sock_rps_save_rxhash(sk, skb);
1460 sk_mark_napi_id(sk, skb);
1462 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1463 !dst->ops->check(dst, 0)) {
1465 sk->sk_rx_dst = NULL;
1468 tcp_rcv_established(sk, skb, tcp_hdr(skb));
1472 if (tcp_checksum_complete(skb))
1475 if (sk->sk_state == TCP_LISTEN) {
1476 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1481 if (tcp_child_process(sk, nsk, skb)) {
1488 sock_rps_save_rxhash(sk, skb);
1490 if (tcp_rcv_state_process(sk, skb)) {
1497 tcp_v4_send_reset(rsk, skb);
1500 /* Be careful here. If this function gets more complicated and
1501 * gcc suffers from register pressure on the x86, sk (in %ebx)
1502 * might be destroyed here. This current version compiles correctly,
1503 * but you have been warned.
1508 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1509 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1512 EXPORT_SYMBOL(tcp_v4_do_rcv);
1514 int tcp_v4_early_demux(struct sk_buff *skb)
1516 const struct iphdr *iph;
1517 const struct tcphdr *th;
1520 if (skb->pkt_type != PACKET_HOST)
1523 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1529 if (th->doff < sizeof(struct tcphdr) / 4)
1532 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1533 iph->saddr, th->source,
1534 iph->daddr, ntohs(th->dest),
1535 skb->skb_iif, inet_sdif(skb));
1538 skb->destructor = sock_edemux;
1539 if (sk_fullsock(sk)) {
1540 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1543 dst = dst_check(dst, 0);
1545 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1546 skb_dst_set_noref(skb, dst);
1552 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1554 u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
1556 /* Only socket owner can try to collapse/prune rx queues
1557 * to reduce memory overhead, so add a little headroom here.
1558 * Few sockets backlog are possibly concurrently non empty.
1562 /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1563 * we can fix skb->truesize to its real value to avoid future drops.
1564 * This is valid because skb is not yet charged to the socket.
1565 * It has been noticed pure SACK packets were sometimes dropped
1566 * (if cooked by drivers without copybreak feature).
1570 if (unlikely(sk_add_backlog(sk, skb, limit))) {
1572 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1577 EXPORT_SYMBOL(tcp_add_backlog);
1579 int tcp_filter(struct sock *sk, struct sk_buff *skb)
1581 struct tcphdr *th = (struct tcphdr *)skb->data;
1582 unsigned int eaten = skb->len;
1585 err = sk_filter_trim_cap(sk, skb, th->doff * 4);
1588 TCP_SKB_CB(skb)->end_seq -= eaten;
1592 EXPORT_SYMBOL(tcp_filter);
1598 int tcp_v4_rcv(struct sk_buff *skb)
1600 struct net *net = dev_net(skb->dev);
1601 int sdif = inet_sdif(skb);
1602 const struct iphdr *iph;
1603 const struct tcphdr *th;
1608 if (skb->pkt_type != PACKET_HOST)
1611 /* Count it even if it's bad */
1612 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1614 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1617 th = (const struct tcphdr *)skb->data;
1619 if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
1621 if (!pskb_may_pull(skb, th->doff * 4))
1624 /* An explanation is required here, I think.
1625 * Packet length and doff are validated by header prediction,
1626 * provided case of th->doff==0 is eliminated.
1627 * So, we defer the checks. */
1629 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1632 th = (const struct tcphdr *)skb->data;
1634 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1635 * barrier() makes sure compiler wont play fool^Waliasing games.
1637 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1638 sizeof(struct inet_skb_parm));
1641 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1642 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1643 skb->len - th->doff * 4);
1644 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1645 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1646 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1647 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1648 TCP_SKB_CB(skb)->sacked = 0;
1649 TCP_SKB_CB(skb)->has_rxtstamp =
1650 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1653 sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1654 th->dest, sdif, &refcounted);
1659 if (sk->sk_state == TCP_TIME_WAIT)
1662 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1663 struct request_sock *req = inet_reqsk(sk);
1666 sk = req->rsk_listener;
1667 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
1668 sk_drops_add(sk, skb);
1672 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1673 inet_csk_reqsk_queue_drop_and_put(sk, req);
1676 /* We own a reference on the listener, increase it again
1677 * as we might lose it too soon.
1682 if (!tcp_filter(sk, skb))
1683 nsk = tcp_check_req(sk, skb, req, false);
1686 goto discard_and_relse;
1690 } else if (tcp_child_process(sk, nsk, skb)) {
1691 tcp_v4_send_reset(nsk, skb);
1692 goto discard_and_relse;
1698 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1699 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1700 goto discard_and_relse;
1703 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1704 goto discard_and_relse;
1706 if (tcp_v4_inbound_md5_hash(sk, skb))
1707 goto discard_and_relse;
1711 if (tcp_filter(sk, skb))
1712 goto discard_and_relse;
1713 th = (const struct tcphdr *)skb->data;
1718 if (sk->sk_state == TCP_LISTEN) {
1719 ret = tcp_v4_do_rcv(sk, skb);
1720 goto put_and_return;
1723 sk_incoming_cpu_update(sk);
1725 bh_lock_sock_nested(sk);
1726 tcp_segs_in(tcp_sk(sk), skb);
1728 if (!sock_owned_by_user(sk)) {
1729 ret = tcp_v4_do_rcv(sk, skb);
1730 } else if (tcp_add_backlog(sk, skb)) {
1731 goto discard_and_relse;
1742 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1745 if (tcp_checksum_complete(skb)) {
1747 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1749 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1751 tcp_v4_send_reset(NULL, skb);
1755 /* Discard frame. */
1760 sk_drops_add(sk, skb);
1766 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1767 inet_twsk_put(inet_twsk(sk));
1771 if (tcp_checksum_complete(skb)) {
1772 inet_twsk_put(inet_twsk(sk));
1775 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1777 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1780 iph->saddr, th->source,
1781 iph->daddr, th->dest,
1785 inet_twsk_deschedule_put(inet_twsk(sk));
1794 tcp_v4_timewait_ack(sk, skb);
1797 tcp_v4_send_reset(sk, skb);
1798 inet_twsk_deschedule_put(inet_twsk(sk));
1800 case TCP_TW_SUCCESS:;
1805 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1806 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1807 .twsk_unique = tcp_twsk_unique,
1808 .twsk_destructor= tcp_twsk_destructor,
1811 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1813 struct dst_entry *dst = skb_dst(skb);
1815 if (dst && dst_hold_safe(dst)) {
1816 sk->sk_rx_dst = dst;
1817 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1820 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1822 const struct inet_connection_sock_af_ops ipv4_specific = {
1823 .queue_xmit = ip_queue_xmit,
1824 .send_check = tcp_v4_send_check,
1825 .rebuild_header = inet_sk_rebuild_header,
1826 .sk_rx_dst_set = inet_sk_rx_dst_set,
1827 .conn_request = tcp_v4_conn_request,
1828 .syn_recv_sock = tcp_v4_syn_recv_sock,
1829 .net_header_len = sizeof(struct iphdr),
1830 .setsockopt = ip_setsockopt,
1831 .getsockopt = ip_getsockopt,
1832 .addr2sockaddr = inet_csk_addr2sockaddr,
1833 .sockaddr_len = sizeof(struct sockaddr_in),
1834 #ifdef CONFIG_COMPAT
1835 .compat_setsockopt = compat_ip_setsockopt,
1836 .compat_getsockopt = compat_ip_getsockopt,
1838 .mtu_reduced = tcp_v4_mtu_reduced,
1840 EXPORT_SYMBOL(ipv4_specific);
1842 #ifdef CONFIG_TCP_MD5SIG
1843 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1844 .md5_lookup = tcp_v4_md5_lookup,
1845 .calc_md5_hash = tcp_v4_md5_hash_skb,
1846 .md5_parse = tcp_v4_parse_md5_keys,
1850 /* NOTE: A lot of things set to zero explicitly by call to
1851 * sk_alloc() so need not be done here.
1853 static int tcp_v4_init_sock(struct sock *sk)
1855 struct inet_connection_sock *icsk = inet_csk(sk);
1859 icsk->icsk_af_ops = &ipv4_specific;
1861 #ifdef CONFIG_TCP_MD5SIG
1862 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1868 void tcp_v4_destroy_sock(struct sock *sk)
1870 struct tcp_sock *tp = tcp_sk(sk);
1872 trace_tcp_destroy_sock(sk);
1874 tcp_clear_xmit_timers(sk);
1876 tcp_cleanup_congestion_control(sk);
1878 tcp_cleanup_ulp(sk);
1880 /* Cleanup up the write buffer. */
1881 tcp_write_queue_purge(sk);
1883 /* Check if we want to disable active TFO */
1884 tcp_fastopen_active_disable_ofo_check(sk);
1886 /* Cleans up our, hopefully empty, out_of_order_queue. */
1887 skb_rbtree_purge(&tp->out_of_order_queue);
1889 #ifdef CONFIG_TCP_MD5SIG
1890 /* Clean up the MD5 key list, if any */
1891 if (tp->md5sig_info) {
1892 tcp_clear_md5_list(sk);
1893 kfree_rcu(tp->md5sig_info, rcu);
1894 tp->md5sig_info = NULL;
1898 /* Clean up a referenced TCP bind bucket. */
1899 if (inet_csk(sk)->icsk_bind_hash)
1902 BUG_ON(tp->fastopen_rsk);
1904 /* If socket is aborted during connect operation */
1905 tcp_free_fastopen_req(tp);
1906 tcp_fastopen_destroy_cipher(sk);
1907 tcp_saved_syn_free(tp);
1909 sk_sockets_allocated_dec(sk);
1911 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1913 #ifdef CONFIG_PROC_FS
1914 /* Proc filesystem TCP sock list dumping. */
1917 * Get next listener socket follow cur. If cur is NULL, get first socket
1918 * starting from bucket given in st->bucket; when st->bucket is zero the
1919 * very first socket in the hash table is returned.
1921 static void *listening_get_next(struct seq_file *seq, void *cur)
1923 struct tcp_iter_state *st = seq->private;
1924 struct net *net = seq_file_net(seq);
1925 struct inet_listen_hashbucket *ilb;
1926 struct sock *sk = cur;
1930 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1931 spin_lock(&ilb->lock);
1932 sk = sk_head(&ilb->head);
1936 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1942 sk_for_each_from(sk) {
1943 if (!net_eq(sock_net(sk), net))
1945 if (sk->sk_family == st->family)
1948 spin_unlock(&ilb->lock);
1950 if (++st->bucket < INET_LHTABLE_SIZE)
1955 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1957 struct tcp_iter_state *st = seq->private;
1962 rc = listening_get_next(seq, NULL);
1964 while (rc && *pos) {
1965 rc = listening_get_next(seq, rc);
1971 static inline bool empty_bucket(const struct tcp_iter_state *st)
1973 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1977 * Get first established socket starting from bucket given in st->bucket.
1978 * If st->bucket is zero, the very first socket in the hash is returned.
1980 static void *established_get_first(struct seq_file *seq)
1982 struct tcp_iter_state *st = seq->private;
1983 struct net *net = seq_file_net(seq);
1987 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1989 struct hlist_nulls_node *node;
1990 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1992 /* Lockless fast path for the common case of empty buckets */
1993 if (empty_bucket(st))
1997 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1998 if (sk->sk_family != st->family ||
1999 !net_eq(sock_net(sk), net)) {
2005 spin_unlock_bh(lock);
2011 static void *established_get_next(struct seq_file *seq, void *cur)
2013 struct sock *sk = cur;
2014 struct hlist_nulls_node *node;
2015 struct tcp_iter_state *st = seq->private;
2016 struct net *net = seq_file_net(seq);
2021 sk = sk_nulls_next(sk);
2023 sk_nulls_for_each_from(sk, node) {
2024 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2028 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2030 return established_get_first(seq);
2033 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2035 struct tcp_iter_state *st = seq->private;
2039 rc = established_get_first(seq);
2042 rc = established_get_next(seq, rc);
2048 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2051 struct tcp_iter_state *st = seq->private;
2053 st->state = TCP_SEQ_STATE_LISTENING;
2054 rc = listening_get_idx(seq, &pos);
2057 st->state = TCP_SEQ_STATE_ESTABLISHED;
2058 rc = established_get_idx(seq, pos);
2064 static void *tcp_seek_last_pos(struct seq_file *seq)
2066 struct tcp_iter_state *st = seq->private;
2067 int offset = st->offset;
2068 int orig_num = st->num;
2071 switch (st->state) {
2072 case TCP_SEQ_STATE_LISTENING:
2073 if (st->bucket >= INET_LHTABLE_SIZE)
2075 st->state = TCP_SEQ_STATE_LISTENING;
2076 rc = listening_get_next(seq, NULL);
2077 while (offset-- && rc)
2078 rc = listening_get_next(seq, rc);
2082 st->state = TCP_SEQ_STATE_ESTABLISHED;
2084 case TCP_SEQ_STATE_ESTABLISHED:
2085 if (st->bucket > tcp_hashinfo.ehash_mask)
2087 rc = established_get_first(seq);
2088 while (offset-- && rc)
2089 rc = established_get_next(seq, rc);
2097 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2099 struct tcp_iter_state *st = seq->private;
2102 if (*pos && *pos == st->last_pos) {
2103 rc = tcp_seek_last_pos(seq);
2108 st->state = TCP_SEQ_STATE_LISTENING;
2112 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2115 st->last_pos = *pos;
2119 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2121 struct tcp_iter_state *st = seq->private;
2124 if (v == SEQ_START_TOKEN) {
2125 rc = tcp_get_idx(seq, 0);
2129 switch (st->state) {
2130 case TCP_SEQ_STATE_LISTENING:
2131 rc = listening_get_next(seq, v);
2133 st->state = TCP_SEQ_STATE_ESTABLISHED;
2136 rc = established_get_first(seq);
2139 case TCP_SEQ_STATE_ESTABLISHED:
2140 rc = established_get_next(seq, v);
2145 st->last_pos = *pos;
2149 static void tcp_seq_stop(struct seq_file *seq, void *v)
2151 struct tcp_iter_state *st = seq->private;
2153 switch (st->state) {
2154 case TCP_SEQ_STATE_LISTENING:
2155 if (v != SEQ_START_TOKEN)
2156 spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
2158 case TCP_SEQ_STATE_ESTABLISHED:
2160 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2165 int tcp_seq_open(struct inode *inode, struct file *file)
2167 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2168 struct tcp_iter_state *s;
2171 err = seq_open_net(inode, file, &afinfo->seq_ops,
2172 sizeof(struct tcp_iter_state));
2176 s = ((struct seq_file *)file->private_data)->private;
2177 s->family = afinfo->family;
2181 EXPORT_SYMBOL(tcp_seq_open);
2183 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2186 struct proc_dir_entry *p;
2188 afinfo->seq_ops.start = tcp_seq_start;
2189 afinfo->seq_ops.next = tcp_seq_next;
2190 afinfo->seq_ops.stop = tcp_seq_stop;
2192 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2193 afinfo->seq_fops, afinfo);
2198 EXPORT_SYMBOL(tcp_proc_register);
2200 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2202 remove_proc_entry(afinfo->name, net->proc_net);
2204 EXPORT_SYMBOL(tcp_proc_unregister);
2206 static void get_openreq4(const struct request_sock *req,
2207 struct seq_file *f, int i)
2209 const struct inet_request_sock *ireq = inet_rsk(req);
2210 long delta = req->rsk_timer.expires - jiffies;
2212 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2213 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2218 ntohs(ireq->ir_rmt_port),
2220 0, 0, /* could print option size, but that is af dependent. */
2221 1, /* timers active (only the expire timer) */
2222 jiffies_delta_to_clock_t(delta),
2224 from_kuid_munged(seq_user_ns(f),
2225 sock_i_uid(req->rsk_listener)),
2226 0, /* non standard timer */
2227 0, /* open_requests have no inode */
2232 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2235 unsigned long timer_expires;
2236 const struct tcp_sock *tp = tcp_sk(sk);
2237 const struct inet_connection_sock *icsk = inet_csk(sk);
2238 const struct inet_sock *inet = inet_sk(sk);
2239 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2240 __be32 dest = inet->inet_daddr;
2241 __be32 src = inet->inet_rcv_saddr;
2242 __u16 destp = ntohs(inet->inet_dport);
2243 __u16 srcp = ntohs(inet->inet_sport);
2247 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2248 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2249 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2251 timer_expires = icsk->icsk_timeout;
2252 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2254 timer_expires = icsk->icsk_timeout;
2255 } else if (timer_pending(&sk->sk_timer)) {
2257 timer_expires = sk->sk_timer.expires;
2260 timer_expires = jiffies;
2263 state = sk_state_load(sk);
2264 if (state == TCP_LISTEN)
2265 rx_queue = sk->sk_ack_backlog;
2267 /* Because we don't lock the socket,
2268 * we might find a transient negative value.
2270 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2272 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2273 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2274 i, src, srcp, dest, destp, state,
2275 tp->write_seq - tp->snd_una,
2278 jiffies_delta_to_clock_t(timer_expires - jiffies),
2279 icsk->icsk_retransmits,
2280 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2281 icsk->icsk_probes_out,
2283 refcount_read(&sk->sk_refcnt), sk,
2284 jiffies_to_clock_t(icsk->icsk_rto),
2285 jiffies_to_clock_t(icsk->icsk_ack.ato),
2286 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2288 state == TCP_LISTEN ?
2289 fastopenq->max_qlen :
2290 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2293 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2294 struct seq_file *f, int i)
2296 long delta = tw->tw_timer.expires - jiffies;
2300 dest = tw->tw_daddr;
2301 src = tw->tw_rcv_saddr;
2302 destp = ntohs(tw->tw_dport);
2303 srcp = ntohs(tw->tw_sport);
2305 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2306 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2307 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2308 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2309 refcount_read(&tw->tw_refcnt), tw);
2314 static int tcp4_seq_show(struct seq_file *seq, void *v)
2316 struct tcp_iter_state *st;
2317 struct sock *sk = v;
2319 seq_setwidth(seq, TMPSZ - 1);
2320 if (v == SEQ_START_TOKEN) {
2321 seq_puts(seq, " sl local_address rem_address st tx_queue "
2322 "rx_queue tr tm->when retrnsmt uid timeout "
2328 if (sk->sk_state == TCP_TIME_WAIT)
2329 get_timewait4_sock(v, seq, st->num);
2330 else if (sk->sk_state == TCP_NEW_SYN_RECV)
2331 get_openreq4(v, seq, st->num);
2333 get_tcp4_sock(v, seq, st->num);
2339 static const struct file_operations tcp_afinfo_seq_fops = {
2340 .owner = THIS_MODULE,
2341 .open = tcp_seq_open,
2343 .llseek = seq_lseek,
2344 .release = seq_release_net
2347 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2350 .seq_fops = &tcp_afinfo_seq_fops,
2352 .show = tcp4_seq_show,
2356 static int __net_init tcp4_proc_init_net(struct net *net)
2358 return tcp_proc_register(net, &tcp4_seq_afinfo);
2361 static void __net_exit tcp4_proc_exit_net(struct net *net)
2363 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2366 static struct pernet_operations tcp4_net_ops = {
2367 .init = tcp4_proc_init_net,
2368 .exit = tcp4_proc_exit_net,
2371 int __init tcp4_proc_init(void)
2373 return register_pernet_subsys(&tcp4_net_ops);
2376 void tcp4_proc_exit(void)
2378 unregister_pernet_subsys(&tcp4_net_ops);
2380 #endif /* CONFIG_PROC_FS */
2382 struct proto tcp_prot = {
2384 .owner = THIS_MODULE,
2386 .connect = tcp_v4_connect,
2387 .disconnect = tcp_disconnect,
2388 .accept = inet_csk_accept,
2390 .init = tcp_v4_init_sock,
2391 .destroy = tcp_v4_destroy_sock,
2392 .shutdown = tcp_shutdown,
2393 .setsockopt = tcp_setsockopt,
2394 .getsockopt = tcp_getsockopt,
2395 .keepalive = tcp_set_keepalive,
2396 .recvmsg = tcp_recvmsg,
2397 .sendmsg = tcp_sendmsg,
2398 .sendpage = tcp_sendpage,
2399 .backlog_rcv = tcp_v4_do_rcv,
2400 .release_cb = tcp_release_cb,
2402 .unhash = inet_unhash,
2403 .get_port = inet_csk_get_port,
2404 .enter_memory_pressure = tcp_enter_memory_pressure,
2405 .leave_memory_pressure = tcp_leave_memory_pressure,
2406 .stream_memory_free = tcp_stream_memory_free,
2407 .sockets_allocated = &tcp_sockets_allocated,
2408 .orphan_count = &tcp_orphan_count,
2409 .memory_allocated = &tcp_memory_allocated,
2410 .memory_pressure = &tcp_memory_pressure,
2411 .sysctl_mem = sysctl_tcp_mem,
2412 .sysctl_wmem = sysctl_tcp_wmem,
2413 .sysctl_rmem = sysctl_tcp_rmem,
2414 .max_header = MAX_TCP_HEADER,
2415 .obj_size = sizeof(struct tcp_sock),
2416 .slab_flags = SLAB_TYPESAFE_BY_RCU,
2417 .twsk_prot = &tcp_timewait_sock_ops,
2418 .rsk_prot = &tcp_request_sock_ops,
2419 .h.hashinfo = &tcp_hashinfo,
2420 .no_autobind = true,
2421 #ifdef CONFIG_COMPAT
2422 .compat_setsockopt = compat_tcp_setsockopt,
2423 .compat_getsockopt = compat_tcp_getsockopt,
2425 .diag_destroy = tcp_abort,
2427 EXPORT_SYMBOL(tcp_prot);
2429 static void __net_exit tcp_sk_exit(struct net *net)
2433 for_each_possible_cpu(cpu)
2434 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2435 free_percpu(net->ipv4.tcp_sk);
2438 static int __net_init tcp_sk_init(struct net *net)
2442 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2443 if (!net->ipv4.tcp_sk)
2446 for_each_possible_cpu(cpu) {
2449 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2453 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2454 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2457 net->ipv4.sysctl_tcp_ecn = 2;
2458 net->ipv4.sysctl_tcp_ecn_fallback = 1;
2460 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2461 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2462 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2464 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2465 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2466 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2468 net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
2469 net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
2470 net->ipv4.sysctl_tcp_syncookies = 1;
2471 net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
2472 net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
2473 net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
2474 net->ipv4.sysctl_tcp_orphan_retries = 0;
2475 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2476 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
2477 net->ipv4.sysctl_tcp_tw_reuse = 0;
2479 cnt = tcp_hashinfo.ehash_mask + 1;
2480 net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2;
2481 net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
2483 net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
2484 net->ipv4.sysctl_tcp_sack = 1;
2485 net->ipv4.sysctl_tcp_window_scaling = 1;
2486 net->ipv4.sysctl_tcp_timestamps = 1;
2487 net->ipv4.sysctl_tcp_early_retrans = 3;
2489 net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
2490 spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
2491 net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
2492 atomic_set(&net->ipv4.tfo_active_disable_times, 0);
2501 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2505 inet_twsk_purge(&tcp_hashinfo, AF_INET);
2507 list_for_each_entry(net, net_exit_list, exit_list)
2508 tcp_fastopen_ctx_destroy(net);
2511 static struct pernet_operations __net_initdata tcp_sk_ops = {
2512 .init = tcp_sk_init,
2513 .exit = tcp_sk_exit,
2514 .exit_batch = tcp_sk_exit_batch,
2517 void __init tcp_v4_init(void)
2519 if (register_pernet_subsys(&tcp_sk_ops))
2520 panic("Failed to create the TCP control socket.\n");