2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/secure_seq.h>
76 #include <net/busy_poll.h>
78 #include <linux/inet.h>
79 #include <linux/ipv6.h>
80 #include <linux/stddef.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
83 #include <linux/inetdevice.h>
85 #include <crypto/hash.h>
86 #include <linux/scatterlist.h>
88 #include <trace/events/tcp.h>
90 #ifdef CONFIG_TCP_MD5SIG
91 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
92 __be32 daddr, __be32 saddr, const struct tcphdr *th);
95 struct inet_hashinfo tcp_hashinfo;
96 EXPORT_SYMBOL(tcp_hashinfo);
98 static u32 tcp_v4_init_seq(const struct sk_buff *skb)
100 return secure_tcp_seq(ip_hdr(skb)->daddr,
103 tcp_hdr(skb)->source);
106 static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
108 return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
111 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
113 const struct inet_timewait_sock *tw = inet_twsk(sktw);
114 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
115 struct tcp_sock *tp = tcp_sk(sk);
116 int reuse = sock_net(sk)->ipv4.sysctl_tcp_tw_reuse;
119 /* Still does not detect *everything* that goes through
120 * lo, since we require a loopback src or dst address
121 * or direct binding to 'lo' interface.
123 bool loopback = false;
124 if (tw->tw_bound_dev_if == LOOPBACK_IFINDEX)
126 #if IS_ENABLED(CONFIG_IPV6)
127 if (tw->tw_family == AF_INET6) {
128 if (ipv6_addr_loopback(&tw->tw_v6_daddr) ||
129 (ipv6_addr_v4mapped(&tw->tw_v6_daddr) &&
130 (tw->tw_v6_daddr.s6_addr[12] == 127)) ||
131 ipv6_addr_loopback(&tw->tw_v6_rcv_saddr) ||
132 (ipv6_addr_v4mapped(&tw->tw_v6_rcv_saddr) &&
133 (tw->tw_v6_rcv_saddr.s6_addr[12] == 127)))
138 if (ipv4_is_loopback(tw->tw_daddr) ||
139 ipv4_is_loopback(tw->tw_rcv_saddr))
146 /* With PAWS, it is safe from the viewpoint
147 of data integrity. Even without PAWS it is safe provided sequence
148 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
150 Actually, the idea is close to VJ's one, only timestamp cache is
151 held not per host, but per port pair and TW bucket is used as state
154 If TW bucket has been already destroyed we fall back to VJ's scheme
155 and use initial timestamp retrieved from peer table.
157 if (tcptw->tw_ts_recent_stamp &&
158 (!twp || (reuse && get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
159 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
160 if (tp->write_seq == 0)
162 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
163 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
170 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
172 static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
175 /* This check is replicated from tcp_v4_connect() and intended to
176 * prevent BPF program called below from accessing bytes that are out
177 * of the bound specified by user in addr_len.
179 if (addr_len < sizeof(struct sockaddr_in))
182 sock_owned_by_me(sk);
184 return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr);
187 /* This will initiate an outgoing connection. */
188 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
190 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
191 struct inet_sock *inet = inet_sk(sk);
192 struct tcp_sock *tp = tcp_sk(sk);
193 __be16 orig_sport, orig_dport;
194 __be32 daddr, nexthop;
198 struct ip_options_rcu *inet_opt;
199 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
201 if (addr_len < sizeof(struct sockaddr_in))
204 if (usin->sin_family != AF_INET)
205 return -EAFNOSUPPORT;
207 nexthop = daddr = usin->sin_addr.s_addr;
208 inet_opt = rcu_dereference_protected(inet->inet_opt,
209 lockdep_sock_is_held(sk));
210 if (inet_opt && inet_opt->opt.srr) {
213 nexthop = inet_opt->opt.faddr;
216 orig_sport = inet->inet_sport;
217 orig_dport = usin->sin_port;
218 fl4 = &inet->cork.fl.u.ip4;
219 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
220 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
222 orig_sport, orig_dport, sk);
225 if (err == -ENETUNREACH)
226 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
230 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
235 if (!inet_opt || !inet_opt->opt.srr)
238 if (!inet->inet_saddr)
239 inet->inet_saddr = fl4->saddr;
240 sk_rcv_saddr_set(sk, inet->inet_saddr);
242 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
243 /* Reset inherited state */
244 tp->rx_opt.ts_recent = 0;
245 tp->rx_opt.ts_recent_stamp = 0;
246 if (likely(!tp->repair))
250 inet->inet_dport = usin->sin_port;
251 sk_daddr_set(sk, daddr);
253 inet_csk(sk)->icsk_ext_hdr_len = 0;
255 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
257 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
259 /* Socket identity is still unknown (sport may be zero).
260 * However we set state to SYN-SENT and not releasing socket
261 * lock select source port, enter ourselves into the hash tables and
262 * complete initialization after this.
264 tcp_set_state(sk, TCP_SYN_SENT);
265 err = inet_hash_connect(tcp_death_row, sk);
271 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
272 inet->inet_sport, inet->inet_dport, sk);
278 /* OK, now commit destination to socket. */
279 sk->sk_gso_type = SKB_GSO_TCPV4;
280 sk_setup_caps(sk, &rt->dst);
283 if (likely(!tp->repair)) {
285 tp->write_seq = secure_tcp_seq(inet->inet_saddr,
289 tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
294 inet->inet_id = tp->write_seq ^ jiffies;
296 if (tcp_fastopen_defer_connect(sk, &err))
301 err = tcp_connect(sk);
310 * This unhashes the socket and releases the local port,
313 tcp_set_state(sk, TCP_CLOSE);
315 sk->sk_route_caps = 0;
316 inet->inet_dport = 0;
319 EXPORT_SYMBOL(tcp_v4_connect);
322 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
323 * It can be called through tcp_release_cb() if socket was owned by user
324 * at the time tcp_v4_err() was called to handle ICMP message.
326 void tcp_v4_mtu_reduced(struct sock *sk)
328 struct inet_sock *inet = inet_sk(sk);
329 struct dst_entry *dst;
332 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
334 mtu = tcp_sk(sk)->mtu_info;
335 dst = inet_csk_update_pmtu(sk, mtu);
339 /* Something is about to be wrong... Remember soft error
340 * for the case, if this connection will not able to recover.
342 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
343 sk->sk_err_soft = EMSGSIZE;
347 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
348 ip_sk_accept_pmtu(sk) &&
349 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
350 tcp_sync_mss(sk, mtu);
352 /* Resend the TCP packet because it's
353 * clear that the old packet has been
354 * dropped. This is the new "fast" path mtu
357 tcp_simple_retransmit(sk);
358 } /* else let the usual retransmit timer handle it */
360 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
362 static void do_redirect(struct sk_buff *skb, struct sock *sk)
364 struct dst_entry *dst = __sk_dst_check(sk, 0);
367 dst->ops->redirect(dst, sk, skb);
371 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
372 void tcp_req_err(struct sock *sk, u32 seq, bool abort)
374 struct request_sock *req = inet_reqsk(sk);
375 struct net *net = sock_net(sk);
377 /* ICMPs are not backlogged, hence we cannot get
378 * an established socket here.
380 if (seq != tcp_rsk(req)->snt_isn) {
381 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
384 * Still in SYN_RECV, just remove it silently.
385 * There is no good way to pass the error to the newly
386 * created socket, and POSIX does not want network
387 * errors returned from accept().
389 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
390 tcp_listendrop(req->rsk_listener);
394 EXPORT_SYMBOL(tcp_req_err);
397 * This routine is called by the ICMP module when it gets some
398 * sort of error condition. If err < 0 then the socket should
399 * be closed and the error returned to the user. If err > 0
400 * it's just the icmp type << 8 | icmp code. After adjustment
401 * header points to the first 8 bytes of the tcp header. We need
402 * to find the appropriate port.
404 * The locking strategy used here is very "optimistic". When
405 * someone else accesses the socket the ICMP is just dropped
406 * and for some paths there is no check at all.
407 * A more general error queue to queue errors for later handling
408 * is probably better.
412 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
414 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
415 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
416 struct inet_connection_sock *icsk;
418 struct inet_sock *inet;
419 const int type = icmp_hdr(icmp_skb)->type;
420 const int code = icmp_hdr(icmp_skb)->code;
423 struct request_sock *fastopen;
428 struct net *net = dev_net(icmp_skb->dev);
430 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
431 th->dest, iph->saddr, ntohs(th->source),
432 inet_iif(icmp_skb), 0);
434 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
437 if (sk->sk_state == TCP_TIME_WAIT) {
438 inet_twsk_put(inet_twsk(sk));
441 seq = ntohl(th->seq);
442 if (sk->sk_state == TCP_NEW_SYN_RECV)
443 return tcp_req_err(sk, seq,
444 type == ICMP_PARAMETERPROB ||
445 type == ICMP_TIME_EXCEEDED ||
446 (type == ICMP_DEST_UNREACH &&
447 (code == ICMP_NET_UNREACH ||
448 code == ICMP_HOST_UNREACH)));
451 /* If too many ICMPs get dropped on busy
452 * servers this needs to be solved differently.
453 * We do take care of PMTU discovery (RFC1191) special case :
454 * we can receive locally generated ICMP messages while socket is held.
456 if (sock_owned_by_user(sk)) {
457 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
458 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
460 if (sk->sk_state == TCP_CLOSE)
463 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
464 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
470 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
471 fastopen = tp->fastopen_rsk;
472 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
473 if (sk->sk_state != TCP_LISTEN &&
474 !between(seq, snd_una, tp->snd_nxt)) {
475 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
481 if (!sock_owned_by_user(sk))
482 do_redirect(icmp_skb, sk);
484 case ICMP_SOURCE_QUENCH:
485 /* Just silently ignore these. */
487 case ICMP_PARAMETERPROB:
490 case ICMP_DEST_UNREACH:
491 if (code > NR_ICMP_UNREACH)
494 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
495 /* We are not interested in TCP_LISTEN and open_requests
496 * (SYN-ACKs send out by Linux are always <576bytes so
497 * they should go through unfragmented).
499 if (sk->sk_state == TCP_LISTEN)
503 if (!sock_owned_by_user(sk)) {
504 tcp_v4_mtu_reduced(sk);
506 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
512 err = icmp_err_convert[code].errno;
513 /* check if icmp_skb allows revert of backoff
514 * (see draft-zimmermann-tcp-lcd) */
515 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
517 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
518 !icsk->icsk_backoff || fastopen)
521 if (sock_owned_by_user(sk))
524 icsk->icsk_backoff--;
525 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
527 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
529 skb = tcp_rtx_queue_head(sk);
532 tcp_mstamp_refresh(tp);
533 delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp);
534 remaining = icsk->icsk_rto -
535 usecs_to_jiffies(delta_us);
538 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
539 remaining, TCP_RTO_MAX);
541 /* RTO revert clocked out retransmission.
542 * Will retransmit now */
543 tcp_retransmit_timer(sk);
547 case ICMP_TIME_EXCEEDED:
554 switch (sk->sk_state) {
557 /* Only in fast or simultaneous open. If a fast open socket is
558 * is already accepted it is treated as a connected one below.
560 if (fastopen && !fastopen->sk)
563 if (!sock_owned_by_user(sk)) {
566 sk->sk_error_report(sk);
570 sk->sk_err_soft = err;
575 /* If we've already connected we will keep trying
576 * until we time out, or the user gives up.
578 * rfc1122 4.2.3.9 allows to consider as hard errors
579 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
580 * but it is obsoleted by pmtu discovery).
582 * Note, that in modern internet, where routing is unreliable
583 * and in each dark corner broken firewalls sit, sending random
584 * errors ordered by their masters even this two messages finally lose
585 * their original sense (even Linux sends invalid PORT_UNREACHs)
587 * Now we are in compliance with RFCs.
592 if (!sock_owned_by_user(sk) && inet->recverr) {
594 sk->sk_error_report(sk);
595 } else { /* Only an error on timeout */
596 sk->sk_err_soft = err;
604 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
606 struct tcphdr *th = tcp_hdr(skb);
608 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
609 skb->csum_start = skb_transport_header(skb) - skb->head;
610 skb->csum_offset = offsetof(struct tcphdr, check);
613 /* This routine computes an IPv4 TCP checksum. */
614 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
616 const struct inet_sock *inet = inet_sk(sk);
618 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
620 EXPORT_SYMBOL(tcp_v4_send_check);
623 * This routine will send an RST to the other tcp.
625 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
627 * Answer: if a packet caused RST, it is not for a socket
628 * existing in our system, if it is matched to a socket,
629 * it is just duplicate segment or bug in other side's TCP.
630 * So that we build reply only basing on parameters
631 * arrived with segment.
632 * Exception: precedence violation. We do not implement it in any case.
635 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
637 const struct tcphdr *th = tcp_hdr(skb);
640 #ifdef CONFIG_TCP_MD5SIG
641 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
644 struct ip_reply_arg arg;
645 #ifdef CONFIG_TCP_MD5SIG
646 struct tcp_md5sig_key *key = NULL;
647 const __u8 *hash_location = NULL;
648 unsigned char newhash[16];
650 struct sock *sk1 = NULL;
655 /* Never send a reset in response to a reset. */
659 /* If sk not NULL, it means we did a successful lookup and incoming
660 * route had to be correct. prequeue might have dropped our dst.
662 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
665 /* Swap the send and the receive. */
666 memset(&rep, 0, sizeof(rep));
667 rep.th.dest = th->source;
668 rep.th.source = th->dest;
669 rep.th.doff = sizeof(struct tcphdr) / 4;
673 rep.th.seq = th->ack_seq;
676 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
677 skb->len - (th->doff << 2));
680 memset(&arg, 0, sizeof(arg));
681 arg.iov[0].iov_base = (unsigned char *)&rep;
682 arg.iov[0].iov_len = sizeof(rep.th);
684 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
685 #ifdef CONFIG_TCP_MD5SIG
687 hash_location = tcp_parse_md5sig_option(th);
688 if (sk && sk_fullsock(sk)) {
689 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
690 &ip_hdr(skb)->saddr, AF_INET);
691 } else if (hash_location) {
693 * active side is lost. Try to find listening socket through
694 * source port, and then find md5 key through listening socket.
695 * we are not loose security here:
696 * Incoming packet is checked with md5 hash with finding key,
697 * no RST generated if md5 hash doesn't match.
699 sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
701 th->source, ip_hdr(skb)->daddr,
702 ntohs(th->source), inet_iif(skb),
704 /* don't send rst if it can't find key */
708 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
709 &ip_hdr(skb)->saddr, AF_INET);
714 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
715 if (genhash || memcmp(hash_location, newhash, 16) != 0)
721 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
723 (TCPOPT_MD5SIG << 8) |
725 /* Update length and the length the header thinks exists */
726 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
727 rep.th.doff = arg.iov[0].iov_len / 4;
729 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
730 key, ip_hdr(skb)->saddr,
731 ip_hdr(skb)->daddr, &rep.th);
734 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
735 ip_hdr(skb)->saddr, /* XXX */
736 arg.iov[0].iov_len, IPPROTO_TCP, 0);
737 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
738 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
740 /* When socket is gone, all binding information is lost.
741 * routing might fail in this case. No choice here, if we choose to force
742 * input interface, we will misroute in case of asymmetric route.
745 arg.bound_dev_if = sk->sk_bound_dev_if;
747 trace_tcp_send_reset(sk, skb);
750 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
751 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
753 arg.tos = ip_hdr(skb)->tos;
754 arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
756 ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk);
758 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
759 inet_twsk(sk)->tw_mark : sk->sk_mark;
760 ip_send_unicast_reply(ctl_sk,
761 skb, &TCP_SKB_CB(skb)->header.h4.opt,
762 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
763 &arg, arg.iov[0].iov_len);
766 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
767 __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
770 #ifdef CONFIG_TCP_MD5SIG
776 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
777 outside socket context is ugly, certainly. What can I do?
780 static void tcp_v4_send_ack(const struct sock *sk,
781 struct sk_buff *skb, u32 seq, u32 ack,
782 u32 win, u32 tsval, u32 tsecr, int oif,
783 struct tcp_md5sig_key *key,
784 int reply_flags, u8 tos)
786 const struct tcphdr *th = tcp_hdr(skb);
789 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
790 #ifdef CONFIG_TCP_MD5SIG
791 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
795 struct net *net = sock_net(sk);
796 struct ip_reply_arg arg;
799 memset(&rep.th, 0, sizeof(struct tcphdr));
800 memset(&arg, 0, sizeof(arg));
802 arg.iov[0].iov_base = (unsigned char *)&rep;
803 arg.iov[0].iov_len = sizeof(rep.th);
805 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
806 (TCPOPT_TIMESTAMP << 8) |
808 rep.opt[1] = htonl(tsval);
809 rep.opt[2] = htonl(tsecr);
810 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
813 /* Swap the send and the receive. */
814 rep.th.dest = th->source;
815 rep.th.source = th->dest;
816 rep.th.doff = arg.iov[0].iov_len / 4;
817 rep.th.seq = htonl(seq);
818 rep.th.ack_seq = htonl(ack);
820 rep.th.window = htons(win);
822 #ifdef CONFIG_TCP_MD5SIG
824 int offset = (tsecr) ? 3 : 0;
826 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
828 (TCPOPT_MD5SIG << 8) |
830 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
831 rep.th.doff = arg.iov[0].iov_len/4;
833 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
834 key, ip_hdr(skb)->saddr,
835 ip_hdr(skb)->daddr, &rep.th);
838 arg.flags = reply_flags;
839 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
840 ip_hdr(skb)->saddr, /* XXX */
841 arg.iov[0].iov_len, IPPROTO_TCP, 0);
842 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
844 arg.bound_dev_if = oif;
846 arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
848 ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk);
850 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
851 inet_twsk(sk)->tw_mark : sk->sk_mark;
852 ip_send_unicast_reply(ctl_sk,
853 skb, &TCP_SKB_CB(skb)->header.h4.opt,
854 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
855 &arg, arg.iov[0].iov_len);
858 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
862 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
864 struct inet_timewait_sock *tw = inet_twsk(sk);
865 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
867 tcp_v4_send_ack(sk, skb,
868 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
869 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
870 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
873 tcp_twsk_md5_key(tcptw),
874 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
881 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
882 struct request_sock *req)
884 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
885 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
887 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
891 * The window field (SEG.WND) of every outgoing segment, with the
892 * exception of <SYN> segments, MUST be right-shifted by
893 * Rcv.Wind.Shift bits:
895 tcp_v4_send_ack(sk, skb, seq,
896 tcp_rsk(req)->rcv_nxt,
897 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
898 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
901 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr,
903 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
908 * Send a SYN-ACK after having received a SYN.
909 * This still operates on a request_sock only, not on a big
912 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
914 struct request_sock *req,
915 struct tcp_fastopen_cookie *foc,
916 enum tcp_synack_type synack_type)
918 const struct inet_request_sock *ireq = inet_rsk(req);
923 /* First, grab a route. */
924 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
927 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
930 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
932 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
934 ireq_opt_deref(ireq));
935 err = net_xmit_eval(err);
942 * IPv4 request_sock destructor.
944 static void tcp_v4_reqsk_destructor(struct request_sock *req)
946 kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
949 #ifdef CONFIG_TCP_MD5SIG
951 * RFC2385 MD5 checksumming requires a mapping of
952 * IP address->MD5 Key.
953 * We need to maintain these in the sk structure.
956 /* Find the Key structure for an address. */
957 struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
958 const union tcp_md5_addr *addr,
961 const struct tcp_sock *tp = tcp_sk(sk);
962 struct tcp_md5sig_key *key;
963 const struct tcp_md5sig_info *md5sig;
965 struct tcp_md5sig_key *best_match = NULL;
968 /* caller either holds rcu_read_lock() or socket lock */
969 md5sig = rcu_dereference_check(tp->md5sig_info,
970 lockdep_sock_is_held(sk));
974 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
975 if (key->family != family)
978 if (family == AF_INET) {
979 mask = inet_make_mask(key->prefixlen);
980 match = (key->addr.a4.s_addr & mask) ==
981 (addr->a4.s_addr & mask);
982 #if IS_ENABLED(CONFIG_IPV6)
983 } else if (family == AF_INET6) {
984 match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
991 if (match && (!best_match ||
992 key->prefixlen > best_match->prefixlen))
997 EXPORT_SYMBOL(tcp_md5_do_lookup);
999 static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
1000 const union tcp_md5_addr *addr,
1001 int family, u8 prefixlen)
1003 const struct tcp_sock *tp = tcp_sk(sk);
1004 struct tcp_md5sig_key *key;
1005 unsigned int size = sizeof(struct in_addr);
1006 const struct tcp_md5sig_info *md5sig;
1008 /* caller either holds rcu_read_lock() or socket lock */
1009 md5sig = rcu_dereference_check(tp->md5sig_info,
1010 lockdep_sock_is_held(sk));
1013 #if IS_ENABLED(CONFIG_IPV6)
1014 if (family == AF_INET6)
1015 size = sizeof(struct in6_addr);
1017 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
1018 if (key->family != family)
1020 if (!memcmp(&key->addr, addr, size) &&
1021 key->prefixlen == prefixlen)
1027 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1028 const struct sock *addr_sk)
1030 const union tcp_md5_addr *addr;
1032 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
1033 return tcp_md5_do_lookup(sk, addr, AF_INET);
1035 EXPORT_SYMBOL(tcp_v4_md5_lookup);
1037 /* This can be called on a newly created socket, from other files */
1038 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1039 int family, u8 prefixlen, const u8 *newkey, u8 newkeylen,
1042 /* Add Key to the list */
1043 struct tcp_md5sig_key *key;
1044 struct tcp_sock *tp = tcp_sk(sk);
1045 struct tcp_md5sig_info *md5sig;
1047 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
1049 /* Pre-existing entry - just update that one. */
1050 memcpy(key->key, newkey, newkeylen);
1051 key->keylen = newkeylen;
1055 md5sig = rcu_dereference_protected(tp->md5sig_info,
1056 lockdep_sock_is_held(sk));
1058 md5sig = kmalloc(sizeof(*md5sig), gfp);
1062 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1063 INIT_HLIST_HEAD(&md5sig->head);
1064 rcu_assign_pointer(tp->md5sig_info, md5sig);
1067 key = sock_kmalloc(sk, sizeof(*key), gfp);
1070 if (!tcp_alloc_md5sig_pool()) {
1071 sock_kfree_s(sk, key, sizeof(*key));
1075 memcpy(key->key, newkey, newkeylen);
1076 key->keylen = newkeylen;
1077 key->family = family;
1078 key->prefixlen = prefixlen;
1079 memcpy(&key->addr, addr,
1080 (family == AF_INET6) ? sizeof(struct in6_addr) :
1081 sizeof(struct in_addr));
1082 hlist_add_head_rcu(&key->node, &md5sig->head);
1085 EXPORT_SYMBOL(tcp_md5_do_add);
1087 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1090 struct tcp_md5sig_key *key;
1092 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
1095 hlist_del_rcu(&key->node);
1096 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1097 kfree_rcu(key, rcu);
1100 EXPORT_SYMBOL(tcp_md5_do_del);
1102 static void tcp_clear_md5_list(struct sock *sk)
1104 struct tcp_sock *tp = tcp_sk(sk);
1105 struct tcp_md5sig_key *key;
1106 struct hlist_node *n;
1107 struct tcp_md5sig_info *md5sig;
1109 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1111 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1112 hlist_del_rcu(&key->node);
1113 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1114 kfree_rcu(key, rcu);
1118 static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
1119 char __user *optval, int optlen)
1121 struct tcp_md5sig cmd;
1122 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1125 if (optlen < sizeof(cmd))
1128 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1131 if (sin->sin_family != AF_INET)
1134 if (optname == TCP_MD5SIG_EXT &&
1135 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
1136 prefixlen = cmd.tcpm_prefixlen;
1141 if (!cmd.tcpm_keylen)
1142 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1143 AF_INET, prefixlen);
1145 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1148 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1149 AF_INET, prefixlen, cmd.tcpm_key, cmd.tcpm_keylen,
1153 static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1154 __be32 daddr, __be32 saddr,
1155 const struct tcphdr *th, int nbytes)
1157 struct tcp4_pseudohdr *bp;
1158 struct scatterlist sg;
1165 bp->protocol = IPPROTO_TCP;
1166 bp->len = cpu_to_be16(nbytes);
1168 _th = (struct tcphdr *)(bp + 1);
1169 memcpy(_th, th, sizeof(*th));
1172 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1173 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1174 sizeof(*bp) + sizeof(*th));
1175 return crypto_ahash_update(hp->md5_req);
1178 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1179 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1181 struct tcp_md5sig_pool *hp;
1182 struct ahash_request *req;
1184 hp = tcp_get_md5sig_pool();
1186 goto clear_hash_noput;
1189 if (crypto_ahash_init(req))
1191 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
1193 if (tcp_md5_hash_key(hp, key))
1195 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1196 if (crypto_ahash_final(req))
1199 tcp_put_md5sig_pool();
1203 tcp_put_md5sig_pool();
1205 memset(md5_hash, 0, 16);
1209 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1210 const struct sock *sk,
1211 const struct sk_buff *skb)
1213 struct tcp_md5sig_pool *hp;
1214 struct ahash_request *req;
1215 const struct tcphdr *th = tcp_hdr(skb);
1216 __be32 saddr, daddr;
1218 if (sk) { /* valid for establish/request sockets */
1219 saddr = sk->sk_rcv_saddr;
1220 daddr = sk->sk_daddr;
1222 const struct iphdr *iph = ip_hdr(skb);
1227 hp = tcp_get_md5sig_pool();
1229 goto clear_hash_noput;
1232 if (crypto_ahash_init(req))
1235 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
1237 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1239 if (tcp_md5_hash_key(hp, key))
1241 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1242 if (crypto_ahash_final(req))
1245 tcp_put_md5sig_pool();
1249 tcp_put_md5sig_pool();
1251 memset(md5_hash, 0, 16);
1254 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1258 /* Called with rcu_read_lock() */
1259 static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1260 const struct sk_buff *skb)
1262 #ifdef CONFIG_TCP_MD5SIG
1264 * This gets called for each TCP segment that arrives
1265 * so we want to be efficient.
1266 * We have 3 drop cases:
1267 * o No MD5 hash and one expected.
1268 * o MD5 hash and we're not expecting one.
1269 * o MD5 hash and its wrong.
1271 const __u8 *hash_location = NULL;
1272 struct tcp_md5sig_key *hash_expected;
1273 const struct iphdr *iph = ip_hdr(skb);
1274 const struct tcphdr *th = tcp_hdr(skb);
1276 unsigned char newhash[16];
1278 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1280 hash_location = tcp_parse_md5sig_option(th);
1282 /* We've parsed the options - do we have a hash? */
1283 if (!hash_expected && !hash_location)
1286 if (hash_expected && !hash_location) {
1287 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1291 if (!hash_expected && hash_location) {
1292 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1296 /* Okay, so this is hash_expected and hash_location -
1297 * so we need to calculate the checksum.
1299 genhash = tcp_v4_md5_hash_skb(newhash,
1303 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1304 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
1305 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1306 &iph->saddr, ntohs(th->source),
1307 &iph->daddr, ntohs(th->dest),
1308 genhash ? " tcp_v4_calc_md5_hash failed"
1317 static void tcp_v4_init_req(struct request_sock *req,
1318 const struct sock *sk_listener,
1319 struct sk_buff *skb)
1321 struct inet_request_sock *ireq = inet_rsk(req);
1322 struct net *net = sock_net(sk_listener);
1324 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1325 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1326 RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
1329 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1331 const struct request_sock *req)
1333 return inet_csk_route_req(sk, &fl->u.ip4, req);
1336 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1338 .obj_size = sizeof(struct tcp_request_sock),
1339 .rtx_syn_ack = tcp_rtx_synack,
1340 .send_ack = tcp_v4_reqsk_send_ack,
1341 .destructor = tcp_v4_reqsk_destructor,
1342 .send_reset = tcp_v4_send_reset,
1343 .syn_ack_timeout = tcp_syn_ack_timeout,
1346 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1347 .mss_clamp = TCP_MSS_DEFAULT,
1348 #ifdef CONFIG_TCP_MD5SIG
1349 .req_md5_lookup = tcp_v4_md5_lookup,
1350 .calc_md5_hash = tcp_v4_md5_hash_skb,
1352 .init_req = tcp_v4_init_req,
1353 #ifdef CONFIG_SYN_COOKIES
1354 .cookie_init_seq = cookie_v4_init_sequence,
1356 .route_req = tcp_v4_route_req,
1357 .init_seq = tcp_v4_init_seq,
1358 .init_ts_off = tcp_v4_init_ts_off,
1359 .send_synack = tcp_v4_send_synack,
1362 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1364 /* Never answer to SYNs send to broadcast or multicast */
1365 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1368 return tcp_conn_request(&tcp_request_sock_ops,
1369 &tcp_request_sock_ipv4_ops, sk, skb);
1375 EXPORT_SYMBOL(tcp_v4_conn_request);
1379 * The three way handshake has completed - we got a valid synack -
1380 * now create the new socket.
1382 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1383 struct request_sock *req,
1384 struct dst_entry *dst,
1385 struct request_sock *req_unhash,
1388 struct inet_request_sock *ireq;
1389 struct inet_sock *newinet;
1390 struct tcp_sock *newtp;
1392 #ifdef CONFIG_TCP_MD5SIG
1393 struct tcp_md5sig_key *key;
1395 struct ip_options_rcu *inet_opt;
1397 if (sk_acceptq_is_full(sk))
1400 newsk = tcp_create_openreq_child(sk, req, skb);
1404 newsk->sk_gso_type = SKB_GSO_TCPV4;
1405 inet_sk_rx_dst_set(newsk, skb);
1407 newtp = tcp_sk(newsk);
1408 newinet = inet_sk(newsk);
1409 ireq = inet_rsk(req);
1410 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1411 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1412 newsk->sk_bound_dev_if = ireq->ir_iif;
1413 newinet->inet_saddr = ireq->ir_loc_addr;
1414 inet_opt = rcu_dereference(ireq->ireq_opt);
1415 RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
1416 newinet->mc_index = inet_iif(skb);
1417 newinet->mc_ttl = ip_hdr(skb)->ttl;
1418 newinet->rcv_tos = ip_hdr(skb)->tos;
1419 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1421 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1422 newinet->inet_id = newtp->write_seq ^ jiffies;
1425 dst = inet_csk_route_child_sock(sk, newsk, req);
1429 /* syncookie case : see end of cookie_v4_check() */
1431 sk_setup_caps(newsk, dst);
1433 tcp_ca_openreq_child(newsk, dst);
1435 tcp_sync_mss(newsk, dst_mtu(dst));
1436 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1438 tcp_initialize_rcv_mss(newsk);
1440 #ifdef CONFIG_TCP_MD5SIG
1441 /* Copy over the MD5 key from the original socket */
1442 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1446 * We're using one, so create a matching key
1447 * on the newsk structure. If we fail to get
1448 * memory, then we end up not copying the key
1451 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1452 AF_INET, 32, key->key, key->keylen, GFP_ATOMIC);
1453 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1457 if (__inet_inherit_port(sk, newsk) < 0)
1459 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1460 if (likely(*own_req)) {
1461 tcp_move_syn(newtp, req);
1462 ireq->ireq_opt = NULL;
1464 newinet->inet_opt = NULL;
1469 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1476 newinet->inet_opt = NULL;
1477 inet_csk_prepare_forced_close(newsk);
1481 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1483 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1485 #ifdef CONFIG_SYN_COOKIES
1486 const struct tcphdr *th = tcp_hdr(skb);
1489 sk = cookie_v4_check(sk, skb);
1494 /* The socket must have it's spinlock held when we get
1495 * here, unless it is a TCP_LISTEN socket.
1497 * We have a potential double-lock case here, so even when
1498 * doing backlog processing we use the BH locking scheme.
1499 * This is because we cannot sleep with the original spinlock
1502 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1506 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1507 struct dst_entry *dst = sk->sk_rx_dst;
1509 sock_rps_save_rxhash(sk, skb);
1510 sk_mark_napi_id(sk, skb);
1512 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1513 !dst->ops->check(dst, 0)) {
1515 sk->sk_rx_dst = NULL;
1518 tcp_rcv_established(sk, skb);
1522 if (tcp_checksum_complete(skb))
1525 if (sk->sk_state == TCP_LISTEN) {
1526 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1531 if (tcp_child_process(sk, nsk, skb)) {
1538 sock_rps_save_rxhash(sk, skb);
1540 if (tcp_rcv_state_process(sk, skb)) {
1547 tcp_v4_send_reset(rsk, skb);
1550 /* Be careful here. If this function gets more complicated and
1551 * gcc suffers from register pressure on the x86, sk (in %ebx)
1552 * might be destroyed here. This current version compiles correctly,
1553 * but you have been warned.
1558 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1559 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1562 EXPORT_SYMBOL(tcp_v4_do_rcv);
1564 int tcp_v4_early_demux(struct sk_buff *skb)
1566 const struct iphdr *iph;
1567 const struct tcphdr *th;
1570 if (skb->pkt_type != PACKET_HOST)
1573 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1579 if (th->doff < sizeof(struct tcphdr) / 4)
1582 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1583 iph->saddr, th->source,
1584 iph->daddr, ntohs(th->dest),
1585 skb->skb_iif, inet_sdif(skb));
1588 skb->destructor = sock_edemux;
1589 if (sk_fullsock(sk)) {
1590 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1593 dst = dst_check(dst, 0);
1595 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1596 skb_dst_set_noref(skb, dst);
1602 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1604 u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
1606 /* Only socket owner can try to collapse/prune rx queues
1607 * to reduce memory overhead, so add a little headroom here.
1608 * Few sockets backlog are possibly concurrently non empty.
1612 /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1613 * we can fix skb->truesize to its real value to avoid future drops.
1614 * This is valid because skb is not yet charged to the socket.
1615 * It has been noticed pure SACK packets were sometimes dropped
1616 * (if cooked by drivers without copybreak feature).
1620 if (unlikely(sk_add_backlog(sk, skb, limit))) {
1622 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1627 EXPORT_SYMBOL(tcp_add_backlog);
1629 int tcp_filter(struct sock *sk, struct sk_buff *skb)
1631 struct tcphdr *th = (struct tcphdr *)skb->data;
1632 unsigned int eaten = skb->len;
1635 err = sk_filter_trim_cap(sk, skb, th->doff * 4);
1638 TCP_SKB_CB(skb)->end_seq -= eaten;
1642 EXPORT_SYMBOL(tcp_filter);
1644 static void tcp_v4_restore_cb(struct sk_buff *skb)
1646 memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
1647 sizeof(struct inet_skb_parm));
1650 static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
1651 const struct tcphdr *th)
1653 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1654 * barrier() makes sure compiler wont play fool^Waliasing games.
1656 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1657 sizeof(struct inet_skb_parm));
1660 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1661 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1662 skb->len - th->doff * 4);
1663 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1664 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1665 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1666 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1667 TCP_SKB_CB(skb)->sacked = 0;
1668 TCP_SKB_CB(skb)->has_rxtstamp =
1669 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1676 int tcp_v4_rcv(struct sk_buff *skb)
1678 struct net *net = dev_net(skb->dev);
1679 int sdif = inet_sdif(skb);
1680 const struct iphdr *iph;
1681 const struct tcphdr *th;
1686 if (skb->pkt_type != PACKET_HOST)
1689 /* Count it even if it's bad */
1690 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1692 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1695 th = (const struct tcphdr *)skb->data;
1697 if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
1699 if (!pskb_may_pull(skb, th->doff * 4))
1702 /* An explanation is required here, I think.
1703 * Packet length and doff are validated by header prediction,
1704 * provided case of th->doff==0 is eliminated.
1705 * So, we defer the checks. */
1707 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1710 th = (const struct tcphdr *)skb->data;
1713 sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1714 th->dest, sdif, &refcounted);
1719 if (sk->sk_state == TCP_TIME_WAIT)
1722 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1723 struct request_sock *req = inet_reqsk(sk);
1724 bool req_stolen = false;
1727 sk = req->rsk_listener;
1728 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
1729 sk_drops_add(sk, skb);
1733 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1734 inet_csk_reqsk_queue_drop_and_put(sk, req);
1737 /* We own a reference on the listener, increase it again
1738 * as we might lose it too soon.
1743 if (!tcp_filter(sk, skb)) {
1744 th = (const struct tcphdr *)skb->data;
1746 tcp_v4_fill_cb(skb, iph, th);
1747 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1752 /* Another cpu got exclusive access to req
1753 * and created a full blown socket.
1754 * Try to feed this packet to this socket
1755 * instead of discarding it.
1757 tcp_v4_restore_cb(skb);
1761 goto discard_and_relse;
1765 tcp_v4_restore_cb(skb);
1766 } else if (tcp_child_process(sk, nsk, skb)) {
1767 tcp_v4_send_reset(nsk, skb);
1768 goto discard_and_relse;
1774 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1775 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1776 goto discard_and_relse;
1779 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1780 goto discard_and_relse;
1782 if (tcp_v4_inbound_md5_hash(sk, skb))
1783 goto discard_and_relse;
1787 if (tcp_filter(sk, skb))
1788 goto discard_and_relse;
1789 th = (const struct tcphdr *)skb->data;
1791 tcp_v4_fill_cb(skb, iph, th);
1795 if (sk->sk_state == TCP_LISTEN) {
1796 ret = tcp_v4_do_rcv(sk, skb);
1797 goto put_and_return;
1800 sk_incoming_cpu_update(sk);
1802 bh_lock_sock_nested(sk);
1803 tcp_segs_in(tcp_sk(sk), skb);
1805 if (!sock_owned_by_user(sk)) {
1806 ret = tcp_v4_do_rcv(sk, skb);
1807 } else if (tcp_add_backlog(sk, skb)) {
1808 goto discard_and_relse;
1819 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1822 tcp_v4_fill_cb(skb, iph, th);
1824 if (tcp_checksum_complete(skb)) {
1826 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1828 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1830 tcp_v4_send_reset(NULL, skb);
1834 /* Discard frame. */
1839 sk_drops_add(sk, skb);
1845 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1846 inet_twsk_put(inet_twsk(sk));
1850 tcp_v4_fill_cb(skb, iph, th);
1852 if (tcp_checksum_complete(skb)) {
1853 inet_twsk_put(inet_twsk(sk));
1856 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1858 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1861 iph->saddr, th->source,
1862 iph->daddr, th->dest,
1866 inet_twsk_deschedule_put(inet_twsk(sk));
1868 tcp_v4_restore_cb(skb);
1876 tcp_v4_timewait_ack(sk, skb);
1879 tcp_v4_send_reset(sk, skb);
1880 inet_twsk_deschedule_put(inet_twsk(sk));
1882 case TCP_TW_SUCCESS:;
1887 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1888 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1889 .twsk_unique = tcp_twsk_unique,
1890 .twsk_destructor= tcp_twsk_destructor,
1893 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1895 struct dst_entry *dst = skb_dst(skb);
1897 if (dst && dst_hold_safe(dst)) {
1898 sk->sk_rx_dst = dst;
1899 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1902 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1904 const struct inet_connection_sock_af_ops ipv4_specific = {
1905 .queue_xmit = ip_queue_xmit,
1906 .send_check = tcp_v4_send_check,
1907 .rebuild_header = inet_sk_rebuild_header,
1908 .sk_rx_dst_set = inet_sk_rx_dst_set,
1909 .conn_request = tcp_v4_conn_request,
1910 .syn_recv_sock = tcp_v4_syn_recv_sock,
1911 .net_header_len = sizeof(struct iphdr),
1912 .setsockopt = ip_setsockopt,
1913 .getsockopt = ip_getsockopt,
1914 .addr2sockaddr = inet_csk_addr2sockaddr,
1915 .sockaddr_len = sizeof(struct sockaddr_in),
1916 #ifdef CONFIG_COMPAT
1917 .compat_setsockopt = compat_ip_setsockopt,
1918 .compat_getsockopt = compat_ip_getsockopt,
1920 .mtu_reduced = tcp_v4_mtu_reduced,
1922 EXPORT_SYMBOL(ipv4_specific);
1924 #ifdef CONFIG_TCP_MD5SIG
1925 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1926 .md5_lookup = tcp_v4_md5_lookup,
1927 .calc_md5_hash = tcp_v4_md5_hash_skb,
1928 .md5_parse = tcp_v4_parse_md5_keys,
1932 /* NOTE: A lot of things set to zero explicitly by call to
1933 * sk_alloc() so need not be done here.
1935 static int tcp_v4_init_sock(struct sock *sk)
1937 struct inet_connection_sock *icsk = inet_csk(sk);
1941 icsk->icsk_af_ops = &ipv4_specific;
1943 #ifdef CONFIG_TCP_MD5SIG
1944 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1950 void tcp_v4_destroy_sock(struct sock *sk)
1952 struct tcp_sock *tp = tcp_sk(sk);
1954 trace_tcp_destroy_sock(sk);
1956 tcp_clear_xmit_timers(sk);
1958 tcp_cleanup_congestion_control(sk);
1960 tcp_cleanup_ulp(sk);
1962 /* Cleanup up the write buffer. */
1963 tcp_write_queue_purge(sk);
1965 /* Check if we want to disable active TFO */
1966 tcp_fastopen_active_disable_ofo_check(sk);
1968 /* Cleans up our, hopefully empty, out_of_order_queue. */
1969 skb_rbtree_purge(&tp->out_of_order_queue);
1971 #ifdef CONFIG_TCP_MD5SIG
1972 /* Clean up the MD5 key list, if any */
1973 if (tp->md5sig_info) {
1974 tcp_clear_md5_list(sk);
1975 kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
1976 tp->md5sig_info = NULL;
1980 /* Clean up a referenced TCP bind bucket. */
1981 if (inet_csk(sk)->icsk_bind_hash)
1984 BUG_ON(tp->fastopen_rsk);
1986 /* If socket is aborted during connect operation */
1987 tcp_free_fastopen_req(tp);
1988 tcp_fastopen_destroy_cipher(sk);
1989 tcp_saved_syn_free(tp);
1991 sk_sockets_allocated_dec(sk);
1993 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1995 #ifdef CONFIG_PROC_FS
1996 /* Proc filesystem TCP sock list dumping. */
1999 * Get next listener socket follow cur. If cur is NULL, get first socket
2000 * starting from bucket given in st->bucket; when st->bucket is zero the
2001 * very first socket in the hash table is returned.
2003 static void *listening_get_next(struct seq_file *seq, void *cur)
2005 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
2006 struct tcp_iter_state *st = seq->private;
2007 struct net *net = seq_file_net(seq);
2008 struct inet_listen_hashbucket *ilb;
2009 struct sock *sk = cur;
2013 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2014 spin_lock(&ilb->lock);
2015 sk = sk_head(&ilb->head);
2019 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2025 sk_for_each_from(sk) {
2026 if (!net_eq(sock_net(sk), net))
2028 if (sk->sk_family == afinfo->family)
2031 spin_unlock(&ilb->lock);
2033 if (++st->bucket < INET_LHTABLE_SIZE)
2038 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2040 struct tcp_iter_state *st = seq->private;
2045 rc = listening_get_next(seq, NULL);
2047 while (rc && *pos) {
2048 rc = listening_get_next(seq, rc);
2054 static inline bool empty_bucket(const struct tcp_iter_state *st)
2056 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
2060 * Get first established socket starting from bucket given in st->bucket.
2061 * If st->bucket is zero, the very first socket in the hash is returned.
2063 static void *established_get_first(struct seq_file *seq)
2065 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
2066 struct tcp_iter_state *st = seq->private;
2067 struct net *net = seq_file_net(seq);
2071 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2073 struct hlist_nulls_node *node;
2074 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2076 /* Lockless fast path for the common case of empty buckets */
2077 if (empty_bucket(st))
2081 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2082 if (sk->sk_family != afinfo->family ||
2083 !net_eq(sock_net(sk), net)) {
2089 spin_unlock_bh(lock);
2095 static void *established_get_next(struct seq_file *seq, void *cur)
2097 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
2098 struct sock *sk = cur;
2099 struct hlist_nulls_node *node;
2100 struct tcp_iter_state *st = seq->private;
2101 struct net *net = seq_file_net(seq);
2106 sk = sk_nulls_next(sk);
2108 sk_nulls_for_each_from(sk, node) {
2109 if (sk->sk_family == afinfo->family &&
2110 net_eq(sock_net(sk), net))
2114 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2116 return established_get_first(seq);
2119 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2121 struct tcp_iter_state *st = seq->private;
2125 rc = established_get_first(seq);
2128 rc = established_get_next(seq, rc);
2134 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2137 struct tcp_iter_state *st = seq->private;
2139 st->state = TCP_SEQ_STATE_LISTENING;
2140 rc = listening_get_idx(seq, &pos);
2143 st->state = TCP_SEQ_STATE_ESTABLISHED;
2144 rc = established_get_idx(seq, pos);
2150 static void *tcp_seek_last_pos(struct seq_file *seq)
2152 struct tcp_iter_state *st = seq->private;
2153 int offset = st->offset;
2154 int orig_num = st->num;
2157 switch (st->state) {
2158 case TCP_SEQ_STATE_LISTENING:
2159 if (st->bucket >= INET_LHTABLE_SIZE)
2161 st->state = TCP_SEQ_STATE_LISTENING;
2162 rc = listening_get_next(seq, NULL);
2163 while (offset-- && rc)
2164 rc = listening_get_next(seq, rc);
2168 st->state = TCP_SEQ_STATE_ESTABLISHED;
2170 case TCP_SEQ_STATE_ESTABLISHED:
2171 if (st->bucket > tcp_hashinfo.ehash_mask)
2173 rc = established_get_first(seq);
2174 while (offset-- && rc)
2175 rc = established_get_next(seq, rc);
2183 void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2185 struct tcp_iter_state *st = seq->private;
2188 if (*pos && *pos == st->last_pos) {
2189 rc = tcp_seek_last_pos(seq);
2194 st->state = TCP_SEQ_STATE_LISTENING;
2198 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2201 st->last_pos = *pos;
2204 EXPORT_SYMBOL(tcp_seq_start);
2206 void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2208 struct tcp_iter_state *st = seq->private;
2211 if (v == SEQ_START_TOKEN) {
2212 rc = tcp_get_idx(seq, 0);
2216 switch (st->state) {
2217 case TCP_SEQ_STATE_LISTENING:
2218 rc = listening_get_next(seq, v);
2220 st->state = TCP_SEQ_STATE_ESTABLISHED;
2223 rc = established_get_first(seq);
2226 case TCP_SEQ_STATE_ESTABLISHED:
2227 rc = established_get_next(seq, v);
2232 st->last_pos = *pos;
2235 EXPORT_SYMBOL(tcp_seq_next);
2237 void tcp_seq_stop(struct seq_file *seq, void *v)
2239 struct tcp_iter_state *st = seq->private;
2241 switch (st->state) {
2242 case TCP_SEQ_STATE_LISTENING:
2243 if (v != SEQ_START_TOKEN)
2244 spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
2246 case TCP_SEQ_STATE_ESTABLISHED:
2248 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2252 EXPORT_SYMBOL(tcp_seq_stop);
2254 static void get_openreq4(const struct request_sock *req,
2255 struct seq_file *f, int i)
2257 const struct inet_request_sock *ireq = inet_rsk(req);
2258 long delta = req->rsk_timer.expires - jiffies;
2260 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2261 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2266 ntohs(ireq->ir_rmt_port),
2268 0, 0, /* could print option size, but that is af dependent. */
2269 1, /* timers active (only the expire timer) */
2270 jiffies_delta_to_clock_t(delta),
2272 from_kuid_munged(seq_user_ns(f),
2273 sock_i_uid(req->rsk_listener)),
2274 0, /* non standard timer */
2275 0, /* open_requests have no inode */
2280 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2283 unsigned long timer_expires;
2284 const struct tcp_sock *tp = tcp_sk(sk);
2285 const struct inet_connection_sock *icsk = inet_csk(sk);
2286 const struct inet_sock *inet = inet_sk(sk);
2287 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2288 __be32 dest = inet->inet_daddr;
2289 __be32 src = inet->inet_rcv_saddr;
2290 __u16 destp = ntohs(inet->inet_dport);
2291 __u16 srcp = ntohs(inet->inet_sport);
2295 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2296 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2297 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2299 timer_expires = icsk->icsk_timeout;
2300 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2302 timer_expires = icsk->icsk_timeout;
2303 } else if (timer_pending(&sk->sk_timer)) {
2305 timer_expires = sk->sk_timer.expires;
2308 timer_expires = jiffies;
2311 state = inet_sk_state_load(sk);
2312 if (state == TCP_LISTEN)
2313 rx_queue = sk->sk_ack_backlog;
2315 /* Because we don't lock the socket,
2316 * we might find a transient negative value.
2318 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2320 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2321 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2322 i, src, srcp, dest, destp, state,
2323 tp->write_seq - tp->snd_una,
2326 jiffies_delta_to_clock_t(timer_expires - jiffies),
2327 icsk->icsk_retransmits,
2328 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2329 icsk->icsk_probes_out,
2331 refcount_read(&sk->sk_refcnt), sk,
2332 jiffies_to_clock_t(icsk->icsk_rto),
2333 jiffies_to_clock_t(icsk->icsk_ack.ato),
2334 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2336 state == TCP_LISTEN ?
2337 fastopenq->max_qlen :
2338 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2341 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2342 struct seq_file *f, int i)
2344 long delta = tw->tw_timer.expires - jiffies;
2348 dest = tw->tw_daddr;
2349 src = tw->tw_rcv_saddr;
2350 destp = ntohs(tw->tw_dport);
2351 srcp = ntohs(tw->tw_sport);
2353 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2354 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2355 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2356 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2357 refcount_read(&tw->tw_refcnt), tw);
2362 static int tcp4_seq_show(struct seq_file *seq, void *v)
2364 struct tcp_iter_state *st;
2365 struct sock *sk = v;
2367 seq_setwidth(seq, TMPSZ - 1);
2368 if (v == SEQ_START_TOKEN) {
2369 seq_puts(seq, " sl local_address rem_address st tx_queue "
2370 "rx_queue tr tm->when retrnsmt uid timeout "
2376 if (sk->sk_state == TCP_TIME_WAIT)
2377 get_timewait4_sock(v, seq, st->num);
2378 else if (sk->sk_state == TCP_NEW_SYN_RECV)
2379 get_openreq4(v, seq, st->num);
2381 get_tcp4_sock(v, seq, st->num);
2387 static const struct seq_operations tcp4_seq_ops = {
2388 .show = tcp4_seq_show,
2389 .start = tcp_seq_start,
2390 .next = tcp_seq_next,
2391 .stop = tcp_seq_stop,
2394 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2398 static int __net_init tcp4_proc_init_net(struct net *net)
2400 if (!proc_create_net_data("tcp", 0444, net->proc_net, &tcp4_seq_ops,
2401 sizeof(struct tcp_iter_state), &tcp4_seq_afinfo))
2406 static void __net_exit tcp4_proc_exit_net(struct net *net)
2408 remove_proc_entry("tcp", net->proc_net);
2411 static struct pernet_operations tcp4_net_ops = {
2412 .init = tcp4_proc_init_net,
2413 .exit = tcp4_proc_exit_net,
2416 int __init tcp4_proc_init(void)
2418 return register_pernet_subsys(&tcp4_net_ops);
2421 void tcp4_proc_exit(void)
2423 unregister_pernet_subsys(&tcp4_net_ops);
2425 #endif /* CONFIG_PROC_FS */
2427 struct proto tcp_prot = {
2429 .owner = THIS_MODULE,
2431 .pre_connect = tcp_v4_pre_connect,
2432 .connect = tcp_v4_connect,
2433 .disconnect = tcp_disconnect,
2434 .accept = inet_csk_accept,
2436 .init = tcp_v4_init_sock,
2437 .destroy = tcp_v4_destroy_sock,
2438 .shutdown = tcp_shutdown,
2439 .setsockopt = tcp_setsockopt,
2440 .getsockopt = tcp_getsockopt,
2441 .keepalive = tcp_set_keepalive,
2442 .recvmsg = tcp_recvmsg,
2443 .sendmsg = tcp_sendmsg,
2444 .sendpage = tcp_sendpage,
2445 .backlog_rcv = tcp_v4_do_rcv,
2446 .release_cb = tcp_release_cb,
2448 .unhash = inet_unhash,
2449 .get_port = inet_csk_get_port,
2450 .enter_memory_pressure = tcp_enter_memory_pressure,
2451 .leave_memory_pressure = tcp_leave_memory_pressure,
2452 .stream_memory_free = tcp_stream_memory_free,
2453 .sockets_allocated = &tcp_sockets_allocated,
2454 .orphan_count = &tcp_orphan_count,
2455 .memory_allocated = &tcp_memory_allocated,
2456 .memory_pressure = &tcp_memory_pressure,
2457 .sysctl_mem = sysctl_tcp_mem,
2458 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
2459 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
2460 .max_header = MAX_TCP_HEADER,
2461 .obj_size = sizeof(struct tcp_sock),
2462 .slab_flags = SLAB_TYPESAFE_BY_RCU,
2463 .twsk_prot = &tcp_timewait_sock_ops,
2464 .rsk_prot = &tcp_request_sock_ops,
2465 .h.hashinfo = &tcp_hashinfo,
2466 .no_autobind = true,
2467 #ifdef CONFIG_COMPAT
2468 .compat_setsockopt = compat_tcp_setsockopt,
2469 .compat_getsockopt = compat_tcp_getsockopt,
2471 .diag_destroy = tcp_abort,
2473 EXPORT_SYMBOL(tcp_prot);
2475 static void __net_exit tcp_sk_exit(struct net *net)
2479 module_put(net->ipv4.tcp_congestion_control->owner);
2481 for_each_possible_cpu(cpu)
2482 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2483 free_percpu(net->ipv4.tcp_sk);
2486 static int __net_init tcp_sk_init(struct net *net)
2490 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2491 if (!net->ipv4.tcp_sk)
2494 for_each_possible_cpu(cpu) {
2497 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2501 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2502 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2505 net->ipv4.sysctl_tcp_ecn = 2;
2506 net->ipv4.sysctl_tcp_ecn_fallback = 1;
2508 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2509 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2510 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2512 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2513 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2514 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2516 net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
2517 net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
2518 net->ipv4.sysctl_tcp_syncookies = 1;
2519 net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
2520 net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
2521 net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
2522 net->ipv4.sysctl_tcp_orphan_retries = 0;
2523 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2524 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
2525 net->ipv4.sysctl_tcp_tw_reuse = 2;
2527 cnt = tcp_hashinfo.ehash_mask + 1;
2528 net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2;
2529 net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
2531 net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
2532 net->ipv4.sysctl_tcp_sack = 1;
2533 net->ipv4.sysctl_tcp_window_scaling = 1;
2534 net->ipv4.sysctl_tcp_timestamps = 1;
2535 net->ipv4.sysctl_tcp_early_retrans = 3;
2536 net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION;
2537 net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior. */
2538 net->ipv4.sysctl_tcp_retrans_collapse = 1;
2539 net->ipv4.sysctl_tcp_max_reordering = 300;
2540 net->ipv4.sysctl_tcp_dsack = 1;
2541 net->ipv4.sysctl_tcp_app_win = 31;
2542 net->ipv4.sysctl_tcp_adv_win_scale = 1;
2543 net->ipv4.sysctl_tcp_frto = 2;
2544 net->ipv4.sysctl_tcp_moderate_rcvbuf = 1;
2545 /* This limits the percentage of the congestion window which we
2546 * will allow a single TSO frame to consume. Building TSO frames
2547 * which are too large can cause TCP streams to be bursty.
2549 net->ipv4.sysctl_tcp_tso_win_divisor = 3;
2550 /* Default TSQ limit of four TSO segments */
2551 net->ipv4.sysctl_tcp_limit_output_bytes = 262144;
2552 /* rfc5961 challenge ack rate limiting */
2553 net->ipv4.sysctl_tcp_challenge_ack_limit = 1000;
2554 net->ipv4.sysctl_tcp_min_tso_segs = 2;
2555 net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
2556 net->ipv4.sysctl_tcp_autocorking = 1;
2557 net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2;
2558 net->ipv4.sysctl_tcp_pacing_ss_ratio = 200;
2559 net->ipv4.sysctl_tcp_pacing_ca_ratio = 120;
2560 if (net != &init_net) {
2561 memcpy(net->ipv4.sysctl_tcp_rmem,
2562 init_net.ipv4.sysctl_tcp_rmem,
2563 sizeof(init_net.ipv4.sysctl_tcp_rmem));
2564 memcpy(net->ipv4.sysctl_tcp_wmem,
2565 init_net.ipv4.sysctl_tcp_wmem,
2566 sizeof(init_net.ipv4.sysctl_tcp_wmem));
2568 net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC;
2569 net->ipv4.sysctl_tcp_comp_sack_nr = 44;
2570 net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
2571 spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
2572 net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
2573 atomic_set(&net->ipv4.tfo_active_disable_times, 0);
2575 /* Reno is always built in */
2576 if (!net_eq(net, &init_net) &&
2577 try_module_get(init_net.ipv4.tcp_congestion_control->owner))
2578 net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control;
2580 net->ipv4.tcp_congestion_control = &tcp_reno;
2589 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2593 inet_twsk_purge(&tcp_hashinfo, AF_INET);
2595 list_for_each_entry(net, net_exit_list, exit_list)
2596 tcp_fastopen_ctx_destroy(net);
2599 static struct pernet_operations __net_initdata tcp_sk_ops = {
2600 .init = tcp_sk_init,
2601 .exit = tcp_sk_exit,
2602 .exit_batch = tcp_sk_exit_batch,
2605 void __init tcp_v4_init(void)
2607 if (register_pernet_subsys(&tcp_sk_ops))
2608 panic("Failed to create the TCP control socket.\n");