Merge tag 'ext4_for_linus-6.7-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / net / netfilter / nf_conntrack_proto_tcp.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* (C) 1999-2001 Paul `Rusty' Russell
3  * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
4  * (C) 2002-2013 Jozsef Kadlecsik <kadlec@netfilter.org>
5  * (C) 2006-2012 Patrick McHardy <kaber@trash.net>
6  */
7
8 #include <linux/types.h>
9 #include <linux/timer.h>
10 #include <linux/module.h>
11 #include <linux/in.h>
12 #include <linux/tcp.h>
13 #include <linux/spinlock.h>
14 #include <linux/skbuff.h>
15 #include <linux/ipv6.h>
16 #include <net/ip6_checksum.h>
17 #include <asm/unaligned.h>
18
19 #include <net/tcp.h>
20
21 #include <linux/netfilter.h>
22 #include <linux/netfilter_ipv4.h>
23 #include <linux/netfilter_ipv6.h>
24 #include <net/netfilter/nf_conntrack.h>
25 #include <net/netfilter/nf_conntrack_l4proto.h>
26 #include <net/netfilter/nf_conntrack_ecache.h>
27 #include <net/netfilter/nf_conntrack_seqadj.h>
28 #include <net/netfilter/nf_conntrack_synproxy.h>
29 #include <net/netfilter/nf_conntrack_timeout.h>
30 #include <net/netfilter/nf_log.h>
31 #include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
32 #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
33
34   /* FIXME: Examine ipfilter's timeouts and conntrack transitions more
35      closely.  They're more complex. --RR */
36
37 static const char *const tcp_conntrack_names[] = {
38         "NONE",
39         "SYN_SENT",
40         "SYN_RECV",
41         "ESTABLISHED",
42         "FIN_WAIT",
43         "CLOSE_WAIT",
44         "LAST_ACK",
45         "TIME_WAIT",
46         "CLOSE",
47         "SYN_SENT2",
48 };
49
50 enum nf_ct_tcp_action {
51         NFCT_TCP_IGNORE,
52         NFCT_TCP_INVALID,
53         NFCT_TCP_ACCEPT,
54 };
55
56 #define SECS * HZ
57 #define MINS * 60 SECS
58 #define HOURS * 60 MINS
59 #define DAYS * 24 HOURS
60
61 static const unsigned int tcp_timeouts[TCP_CONNTRACK_TIMEOUT_MAX] = {
62         [TCP_CONNTRACK_SYN_SENT]        = 2 MINS,
63         [TCP_CONNTRACK_SYN_RECV]        = 60 SECS,
64         [TCP_CONNTRACK_ESTABLISHED]     = 5 DAYS,
65         [TCP_CONNTRACK_FIN_WAIT]        = 2 MINS,
66         [TCP_CONNTRACK_CLOSE_WAIT]      = 60 SECS,
67         [TCP_CONNTRACK_LAST_ACK]        = 30 SECS,
68         [TCP_CONNTRACK_TIME_WAIT]       = 2 MINS,
69         [TCP_CONNTRACK_CLOSE]           = 10 SECS,
70         [TCP_CONNTRACK_SYN_SENT2]       = 2 MINS,
71 /* RFC1122 says the R2 limit should be at least 100 seconds.
72    Linux uses 15 packets as limit, which corresponds
73    to ~13-30min depending on RTO. */
74         [TCP_CONNTRACK_RETRANS]         = 5 MINS,
75         [TCP_CONNTRACK_UNACK]           = 5 MINS,
76 };
77
78 #define sNO TCP_CONNTRACK_NONE
79 #define sSS TCP_CONNTRACK_SYN_SENT
80 #define sSR TCP_CONNTRACK_SYN_RECV
81 #define sES TCP_CONNTRACK_ESTABLISHED
82 #define sFW TCP_CONNTRACK_FIN_WAIT
83 #define sCW TCP_CONNTRACK_CLOSE_WAIT
84 #define sLA TCP_CONNTRACK_LAST_ACK
85 #define sTW TCP_CONNTRACK_TIME_WAIT
86 #define sCL TCP_CONNTRACK_CLOSE
87 #define sS2 TCP_CONNTRACK_SYN_SENT2
88 #define sIV TCP_CONNTRACK_MAX
89 #define sIG TCP_CONNTRACK_IGNORE
90
91 /* What TCP flags are set from RST/SYN/FIN/ACK. */
92 enum tcp_bit_set {
93         TCP_SYN_SET,
94         TCP_SYNACK_SET,
95         TCP_FIN_SET,
96         TCP_ACK_SET,
97         TCP_RST_SET,
98         TCP_NONE_SET,
99 };
100
101 /*
102  * The TCP state transition table needs a few words...
103  *
104  * We are the man in the middle. All the packets go through us
105  * but might get lost in transit to the destination.
106  * It is assumed that the destinations can't receive segments
107  * we haven't seen.
108  *
109  * The checked segment is in window, but our windows are *not*
110  * equivalent with the ones of the sender/receiver. We always
111  * try to guess the state of the current sender.
112  *
113  * The meaning of the states are:
114  *
115  * NONE:        initial state
116  * SYN_SENT:    SYN-only packet seen
117  * SYN_SENT2:   SYN-only packet seen from reply dir, simultaneous open
118  * SYN_RECV:    SYN-ACK packet seen
119  * ESTABLISHED: ACK packet seen
120  * FIN_WAIT:    FIN packet seen
121  * CLOSE_WAIT:  ACK seen (after FIN)
122  * LAST_ACK:    FIN seen (after FIN)
123  * TIME_WAIT:   last ACK seen
124  * CLOSE:       closed connection (RST)
125  *
126  * Packets marked as IGNORED (sIG):
127  *      if they may be either invalid or valid
128  *      and the receiver may send back a connection
129  *      closing RST or a SYN/ACK.
130  *
131  * Packets marked as INVALID (sIV):
132  *      if we regard them as truly invalid packets
133  */
134 static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
135         {
136 /* ORIGINAL */
137 /*           sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2   */
138 /*syn*/    { sSS, sSS, sIG, sIG, sIG, sIG, sIG, sSS, sSS, sS2 },
139 /*
140  *      sNO -> sSS      Initialize a new connection
141  *      sSS -> sSS      Retransmitted SYN
142  *      sS2 -> sS2      Late retransmitted SYN
143  *      sSR -> sIG
144  *      sES -> sIG      Error: SYNs in window outside the SYN_SENT state
145  *                      are errors. Receiver will reply with RST
146  *                      and close the connection.
147  *                      Or we are not in sync and hold a dead connection.
148  *      sFW -> sIG
149  *      sCW -> sIG
150  *      sLA -> sIG
151  *      sTW -> sSS      Reopened connection (RFC 1122).
152  *      sCL -> sSS
153  */
154 /*           sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2   */
155 /*synack*/ { sIV, sIV, sSR, sIV, sIV, sIV, sIV, sIV, sIV, sSR },
156 /*
157  *      sNO -> sIV      Too late and no reason to do anything
158  *      sSS -> sIV      Client can't send SYN and then SYN/ACK
159  *      sS2 -> sSR      SYN/ACK sent to SYN2 in simultaneous open
160  *      sSR -> sSR      Late retransmitted SYN/ACK in simultaneous open
161  *      sES -> sIV      Invalid SYN/ACK packets sent by the client
162  *      sFW -> sIV
163  *      sCW -> sIV
164  *      sLA -> sIV
165  *      sTW -> sIV
166  *      sCL -> sIV
167  */
168 /*           sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2   */
169 /*fin*/    { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
170 /*
171  *      sNO -> sIV      Too late and no reason to do anything...
172  *      sSS -> sIV      Client migth not send FIN in this state:
173  *                      we enforce waiting for a SYN/ACK reply first.
174  *      sS2 -> sIV
175  *      sSR -> sFW      Close started.
176  *      sES -> sFW
177  *      sFW -> sLA      FIN seen in both directions, waiting for
178  *                      the last ACK.
179  *                      Migth be a retransmitted FIN as well...
180  *      sCW -> sLA
181  *      sLA -> sLA      Retransmitted FIN. Remain in the same state.
182  *      sTW -> sTW
183  *      sCL -> sCL
184  */
185 /*           sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2   */
186 /*ack*/    { sES, sIV, sES, sES, sCW, sCW, sTW, sTW, sCL, sIV },
187 /*
188  *      sNO -> sES      Assumed.
189  *      sSS -> sIV      ACK is invalid: we haven't seen a SYN/ACK yet.
190  *      sS2 -> sIV
191  *      sSR -> sES      Established state is reached.
192  *      sES -> sES      :-)
193  *      sFW -> sCW      Normal close request answered by ACK.
194  *      sCW -> sCW
195  *      sLA -> sTW      Last ACK detected (RFC5961 challenged)
196  *      sTW -> sTW      Retransmitted last ACK. Remain in the same state.
197  *      sCL -> sCL
198  */
199 /*           sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2   */
200 /*rst*/    { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
201 /*none*/   { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
202         },
203         {
204 /* REPLY */
205 /*           sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2   */
206 /*syn*/    { sIV, sS2, sIV, sIV, sIV, sIV, sIV, sSS, sIV, sS2 },
207 /*
208  *      sNO -> sIV      Never reached.
209  *      sSS -> sS2      Simultaneous open
210  *      sS2 -> sS2      Retransmitted simultaneous SYN
211  *      sSR -> sIV      Invalid SYN packets sent by the server
212  *      sES -> sIV
213  *      sFW -> sIV
214  *      sCW -> sIV
215  *      sLA -> sIV
216  *      sTW -> sSS      Reopened connection, but server may have switched role
217  *      sCL -> sIV
218  */
219 /*           sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2   */
220 /*synack*/ { sIV, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
221 /*
222  *      sSS -> sSR      Standard open.
223  *      sS2 -> sSR      Simultaneous open
224  *      sSR -> sIG      Retransmitted SYN/ACK, ignore it.
225  *      sES -> sIG      Late retransmitted SYN/ACK?
226  *      sFW -> sIG      Might be SYN/ACK answering ignored SYN
227  *      sCW -> sIG
228  *      sLA -> sIG
229  *      sTW -> sIG
230  *      sCL -> sIG
231  */
232 /*           sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2   */
233 /*fin*/    { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
234 /*
235  *      sSS -> sIV      Server might not send FIN in this state.
236  *      sS2 -> sIV
237  *      sSR -> sFW      Close started.
238  *      sES -> sFW
239  *      sFW -> sLA      FIN seen in both directions.
240  *      sCW -> sLA
241  *      sLA -> sLA      Retransmitted FIN.
242  *      sTW -> sTW
243  *      sCL -> sCL
244  */
245 /*           sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2   */
246 /*ack*/    { sIV, sIG, sSR, sES, sCW, sCW, sTW, sTW, sCL, sIG },
247 /*
248  *      sSS -> sIG      Might be a half-open connection.
249  *      sS2 -> sIG
250  *      sSR -> sSR      Might answer late resent SYN.
251  *      sES -> sES      :-)
252  *      sFW -> sCW      Normal close request answered by ACK.
253  *      sCW -> sCW
254  *      sLA -> sTW      Last ACK detected (RFC5961 challenged)
255  *      sTW -> sTW      Retransmitted last ACK.
256  *      sCL -> sCL
257  */
258 /*           sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2   */
259 /*rst*/    { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
260 /*none*/   { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
261         }
262 };
263
264 #ifdef CONFIG_NF_CONNTRACK_PROCFS
265 /* Print out the private part of the conntrack. */
266 static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
267 {
268         if (test_bit(IPS_OFFLOAD_BIT, &ct->status))
269                 return;
270
271         seq_printf(s, "%s ", tcp_conntrack_names[ct->proto.tcp.state]);
272 }
273 #endif
274
275 static unsigned int get_conntrack_index(const struct tcphdr *tcph)
276 {
277         if (tcph->rst) return TCP_RST_SET;
278         else if (tcph->syn) return (tcph->ack ? TCP_SYNACK_SET : TCP_SYN_SET);
279         else if (tcph->fin) return TCP_FIN_SET;
280         else if (tcph->ack) return TCP_ACK_SET;
281         else return TCP_NONE_SET;
282 }
283
284 /* TCP connection tracking based on 'Real Stateful TCP Packet Filtering
285    in IP Filter' by Guido van Rooij.
286
287    http://www.sane.nl/events/sane2000/papers.html
288    http://www.darkart.com/mirrors/www.obfuscation.org/ipf/
289
290    The boundaries and the conditions are changed according to RFC793:
291    the packet must intersect the window (i.e. segments may be
292    after the right or before the left edge) and thus receivers may ACK
293    segments after the right edge of the window.
294
295         td_maxend = max(sack + max(win,1)) seen in reply packets
296         td_maxwin = max(max(win, 1)) + (sack - ack) seen in sent packets
297         td_maxwin += seq + len - sender.td_maxend
298                         if seq + len > sender.td_maxend
299         td_end    = max(seq + len) seen in sent packets
300
301    I.   Upper bound for valid data:     seq <= sender.td_maxend
302    II.  Lower bound for valid data:     seq + len >= sender.td_end - receiver.td_maxwin
303    III. Upper bound for valid (s)ack:   sack <= receiver.td_end
304    IV.  Lower bound for valid (s)ack:   sack >= receiver.td_end - MAXACKWINDOW
305
306    where sack is the highest right edge of sack block found in the packet
307    or ack in the case of packet without SACK option.
308
309    The upper bound limit for a valid (s)ack is not ignored -
310    we doesn't have to deal with fragments.
311 */
312
313 static inline __u32 segment_seq_plus_len(__u32 seq,
314                                          size_t len,
315                                          unsigned int dataoff,
316                                          const struct tcphdr *tcph)
317 {
318         /* XXX Should I use payload length field in IP/IPv6 header ?
319          * - YK */
320         return (seq + len - dataoff - tcph->doff*4
321                 + (tcph->syn ? 1 : 0) + (tcph->fin ? 1 : 0));
322 }
323
324 /* Fixme: what about big packets? */
325 #define MAXACKWINCONST                  66000
326 #define MAXACKWINDOW(sender)                                            \
327         ((sender)->td_maxwin > MAXACKWINCONST ? (sender)->td_maxwin     \
328                                               : MAXACKWINCONST)
329
330 /*
331  * Simplified tcp_parse_options routine from tcp_input.c
332  */
333 static void tcp_options(const struct sk_buff *skb,
334                         unsigned int dataoff,
335                         const struct tcphdr *tcph,
336                         struct ip_ct_tcp_state *state)
337 {
338         unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
339         const unsigned char *ptr;
340         int length = (tcph->doff*4) - sizeof(struct tcphdr);
341
342         if (!length)
343                 return;
344
345         ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
346                                  length, buff);
347         if (!ptr)
348                 return;
349
350         state->td_scale = 0;
351         state->flags &= IP_CT_TCP_FLAG_BE_LIBERAL;
352
353         while (length > 0) {
354                 int opcode=*ptr++;
355                 int opsize;
356
357                 switch (opcode) {
358                 case TCPOPT_EOL:
359                         return;
360                 case TCPOPT_NOP:        /* Ref: RFC 793 section 3.1 */
361                         length--;
362                         continue;
363                 default:
364                         if (length < 2)
365                                 return;
366                         opsize=*ptr++;
367                         if (opsize < 2) /* "silly options" */
368                                 return;
369                         if (opsize > length)
370                                 return; /* don't parse partial options */
371
372                         if (opcode == TCPOPT_SACK_PERM
373                             && opsize == TCPOLEN_SACK_PERM)
374                                 state->flags |= IP_CT_TCP_FLAG_SACK_PERM;
375                         else if (opcode == TCPOPT_WINDOW
376                                  && opsize == TCPOLEN_WINDOW) {
377                                 state->td_scale = *(u_int8_t *)ptr;
378
379                                 if (state->td_scale > TCP_MAX_WSCALE)
380                                         state->td_scale = TCP_MAX_WSCALE;
381
382                                 state->flags |=
383                                         IP_CT_TCP_FLAG_WINDOW_SCALE;
384                         }
385                         ptr += opsize - 2;
386                         length -= opsize;
387                 }
388         }
389 }
390
391 static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
392                      const struct tcphdr *tcph, __u32 *sack)
393 {
394         unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
395         const unsigned char *ptr;
396         int length = (tcph->doff*4) - sizeof(struct tcphdr);
397         __u32 tmp;
398
399         if (!length)
400                 return;
401
402         ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
403                                  length, buff);
404         if (!ptr)
405                 return;
406
407         /* Fast path for timestamp-only option */
408         if (length == TCPOLEN_TSTAMP_ALIGNED
409             && *(__be32 *)ptr == htonl((TCPOPT_NOP << 24)
410                                        | (TCPOPT_NOP << 16)
411                                        | (TCPOPT_TIMESTAMP << 8)
412                                        | TCPOLEN_TIMESTAMP))
413                 return;
414
415         while (length > 0) {
416                 int opcode = *ptr++;
417                 int opsize, i;
418
419                 switch (opcode) {
420                 case TCPOPT_EOL:
421                         return;
422                 case TCPOPT_NOP:        /* Ref: RFC 793 section 3.1 */
423                         length--;
424                         continue;
425                 default:
426                         if (length < 2)
427                                 return;
428                         opsize = *ptr++;
429                         if (opsize < 2) /* "silly options" */
430                                 return;
431                         if (opsize > length)
432                                 return; /* don't parse partial options */
433
434                         if (opcode == TCPOPT_SACK
435                             && opsize >= (TCPOLEN_SACK_BASE
436                                           + TCPOLEN_SACK_PERBLOCK)
437                             && !((opsize - TCPOLEN_SACK_BASE)
438                                  % TCPOLEN_SACK_PERBLOCK)) {
439                                 for (i = 0;
440                                      i < (opsize - TCPOLEN_SACK_BASE);
441                                      i += TCPOLEN_SACK_PERBLOCK) {
442                                         tmp = get_unaligned_be32((__be32 *)(ptr+i)+1);
443
444                                         if (after(tmp, *sack))
445                                                 *sack = tmp;
446                                 }
447                                 return;
448                         }
449                         ptr += opsize - 2;
450                         length -= opsize;
451                 }
452         }
453 }
454
455 static void tcp_init_sender(struct ip_ct_tcp_state *sender,
456                             struct ip_ct_tcp_state *receiver,
457                             const struct sk_buff *skb,
458                             unsigned int dataoff,
459                             const struct tcphdr *tcph,
460                             u32 end, u32 win)
461 {
462         /* SYN-ACK in reply to a SYN
463          * or SYN from reply direction in simultaneous open.
464          */
465         sender->td_end =
466         sender->td_maxend = end;
467         sender->td_maxwin = (win == 0 ? 1 : win);
468
469         tcp_options(skb, dataoff, tcph, sender);
470         /* RFC 1323:
471          * Both sides must send the Window Scale option
472          * to enable window scaling in either direction.
473          */
474         if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
475               receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE)) {
476                 sender->td_scale = 0;
477                 receiver->td_scale = 0;
478         }
479 }
480
481 __printf(6, 7)
482 static enum nf_ct_tcp_action nf_tcp_log_invalid(const struct sk_buff *skb,
483                                                 const struct nf_conn *ct,
484                                                 const struct nf_hook_state *state,
485                                                 const struct ip_ct_tcp_state *sender,
486                                                 enum nf_ct_tcp_action ret,
487                                                 const char *fmt, ...)
488 {
489         const struct nf_tcp_net *tn = nf_tcp_pernet(nf_ct_net(ct));
490         struct va_format vaf;
491         va_list args;
492         bool be_liberal;
493
494         be_liberal = sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL || tn->tcp_be_liberal;
495         if (be_liberal)
496                 return NFCT_TCP_ACCEPT;
497
498         va_start(args, fmt);
499         vaf.fmt = fmt;
500         vaf.va = &args;
501         nf_ct_l4proto_log_invalid(skb, ct, state, "%pV", &vaf);
502         va_end(args);
503
504         return ret;
505 }
506
507 static enum nf_ct_tcp_action
508 tcp_in_window(struct nf_conn *ct, enum ip_conntrack_dir dir,
509               unsigned int index, const struct sk_buff *skb,
510               unsigned int dataoff, const struct tcphdr *tcph,
511               const struct nf_hook_state *hook_state)
512 {
513         struct ip_ct_tcp *state = &ct->proto.tcp;
514         struct ip_ct_tcp_state *sender = &state->seen[dir];
515         struct ip_ct_tcp_state *receiver = &state->seen[!dir];
516         __u32 seq, ack, sack, end, win, swin;
517         bool in_recv_win, seq_ok;
518         s32 receiver_offset;
519         u16 win_raw;
520
521         /*
522          * Get the required data from the packet.
523          */
524         seq = ntohl(tcph->seq);
525         ack = sack = ntohl(tcph->ack_seq);
526         win_raw = ntohs(tcph->window);
527         win = win_raw;
528         end = segment_seq_plus_len(seq, skb->len, dataoff, tcph);
529
530         if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
531                 tcp_sack(skb, dataoff, tcph, &sack);
532
533         /* Take into account NAT sequence number mangling */
534         receiver_offset = nf_ct_seq_offset(ct, !dir, ack - 1);
535         ack -= receiver_offset;
536         sack -= receiver_offset;
537
538         if (sender->td_maxwin == 0) {
539                 /*
540                  * Initialize sender data.
541                  */
542                 if (tcph->syn) {
543                         tcp_init_sender(sender, receiver,
544                                         skb, dataoff, tcph,
545                                         end, win);
546                         if (!tcph->ack)
547                                 /* Simultaneous open */
548                                 return NFCT_TCP_ACCEPT;
549                 } else {
550                         /*
551                          * We are in the middle of a connection,
552                          * its history is lost for us.
553                          * Let's try to use the data from the packet.
554                          */
555                         sender->td_end = end;
556                         swin = win << sender->td_scale;
557                         sender->td_maxwin = (swin == 0 ? 1 : swin);
558                         sender->td_maxend = end + sender->td_maxwin;
559                         if (receiver->td_maxwin == 0) {
560                                 /* We haven't seen traffic in the other
561                                  * direction yet but we have to tweak window
562                                  * tracking to pass III and IV until that
563                                  * happens.
564                                  */
565                                 receiver->td_end = receiver->td_maxend = sack;
566                         } else if (sack == receiver->td_end + 1) {
567                                 /* Likely a reply to a keepalive.
568                                  * Needed for III.
569                                  */
570                                 receiver->td_end++;
571                         }
572
573                 }
574         } else if (tcph->syn &&
575                    after(end, sender->td_end) &&
576                    (state->state == TCP_CONNTRACK_SYN_SENT ||
577                     state->state == TCP_CONNTRACK_SYN_RECV)) {
578                 /*
579                  * RFC 793: "if a TCP is reinitialized ... then it need
580                  * not wait at all; it must only be sure to use sequence
581                  * numbers larger than those recently used."
582                  *
583                  * Re-init state for this direction, just like for the first
584                  * syn(-ack) reply, it might differ in seq, ack or tcp options.
585                  */
586                 tcp_init_sender(sender, receiver,
587                                 skb, dataoff, tcph,
588                                 end, win);
589
590                 if (dir == IP_CT_DIR_REPLY && !tcph->ack)
591                         return NFCT_TCP_ACCEPT;
592         }
593
594         if (!(tcph->ack)) {
595                 /*
596                  * If there is no ACK, just pretend it was set and OK.
597                  */
598                 ack = sack = receiver->td_end;
599         } else if (((tcp_flag_word(tcph) & (TCP_FLAG_ACK|TCP_FLAG_RST)) ==
600                     (TCP_FLAG_ACK|TCP_FLAG_RST))
601                    && (ack == 0)) {
602                 /*
603                  * Broken TCP stacks, that set ACK in RST packets as well
604                  * with zero ack value.
605                  */
606                 ack = sack = receiver->td_end;
607         }
608
609         if (tcph->rst && seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT)
610                 /*
611                  * RST sent answering SYN.
612                  */
613                 seq = end = sender->td_end;
614
615         seq_ok = before(seq, sender->td_maxend + 1);
616         if (!seq_ok) {
617                 u32 overshot = end - sender->td_maxend + 1;
618                 bool ack_ok;
619
620                 ack_ok = after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1);
621                 in_recv_win = receiver->td_maxwin &&
622                               after(end, sender->td_end - receiver->td_maxwin - 1);
623
624                 if (in_recv_win &&
625                     ack_ok &&
626                     overshot <= receiver->td_maxwin &&
627                     before(sack, receiver->td_end + 1)) {
628                         /* Work around TCPs that send more bytes than allowed by
629                          * the receive window.
630                          *
631                          * If the (marked as invalid) packet is allowed to pass by
632                          * the ruleset and the peer acks this data, then its possible
633                          * all future packets will trigger 'ACK is over upper bound' check.
634                          *
635                          * Thus if only the sequence check fails then do update td_end so
636                          * possible ACK for this data can update internal state.
637                          */
638                         sender->td_end = end;
639                         sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
640
641                         return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_IGNORE,
642                                                   "%u bytes more than expected", overshot);
643                 }
644
645                 return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_INVALID,
646                                           "SEQ is over upper bound %u (over the window of the receiver)",
647                                           sender->td_maxend + 1);
648         }
649
650         if (!before(sack, receiver->td_end + 1))
651                 return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_INVALID,
652                                           "ACK is over upper bound %u (ACKed data not seen yet)",
653                                           receiver->td_end + 1);
654
655         /* Is the ending sequence in the receive window (if available)? */
656         in_recv_win = !receiver->td_maxwin ||
657                       after(end, sender->td_end - receiver->td_maxwin - 1);
658         if (!in_recv_win)
659                 return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_IGNORE,
660                                           "SEQ is under lower bound %u (already ACKed data retransmitted)",
661                                           sender->td_end - receiver->td_maxwin - 1);
662         if (!after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1))
663                 return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_IGNORE,
664                                           "ignored ACK under lower bound %u (possible overly delayed)",
665                                           receiver->td_end - MAXACKWINDOW(sender) - 1);
666
667         /* Take into account window scaling (RFC 1323). */
668         if (!tcph->syn)
669                 win <<= sender->td_scale;
670
671         /* Update sender data. */
672         swin = win + (sack - ack);
673         if (sender->td_maxwin < swin)
674                 sender->td_maxwin = swin;
675         if (after(end, sender->td_end)) {
676                 sender->td_end = end;
677                 sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
678         }
679         if (tcph->ack) {
680                 if (!(sender->flags & IP_CT_TCP_FLAG_MAXACK_SET)) {
681                         sender->td_maxack = ack;
682                         sender->flags |= IP_CT_TCP_FLAG_MAXACK_SET;
683                 } else if (after(ack, sender->td_maxack)) {
684                         sender->td_maxack = ack;
685                 }
686         }
687
688         /* Update receiver data. */
689         if (receiver->td_maxwin != 0 && after(end, sender->td_maxend))
690                 receiver->td_maxwin += end - sender->td_maxend;
691         if (after(sack + win, receiver->td_maxend - 1)) {
692                 receiver->td_maxend = sack + win;
693                 if (win == 0)
694                         receiver->td_maxend++;
695         }
696         if (ack == receiver->td_end)
697                 receiver->flags &= ~IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
698
699         /* Check retransmissions. */
700         if (index == TCP_ACK_SET) {
701                 if (state->last_dir == dir &&
702                     state->last_seq == seq &&
703                     state->last_ack == ack &&
704                     state->last_end == end &&
705                     state->last_win == win_raw) {
706                         state->retrans++;
707                 } else {
708                         state->last_dir = dir;
709                         state->last_seq = seq;
710                         state->last_ack = ack;
711                         state->last_end = end;
712                         state->last_win = win_raw;
713                         state->retrans = 0;
714                 }
715         }
716
717         return NFCT_TCP_ACCEPT;
718 }
719
720 static void __cold nf_tcp_handle_invalid(struct nf_conn *ct,
721                                          enum ip_conntrack_dir dir,
722                                          int index,
723                                          const struct sk_buff *skb,
724                                          const struct nf_hook_state *hook_state)
725 {
726         const unsigned int *timeouts;
727         const struct nf_tcp_net *tn;
728         unsigned int timeout;
729         u32 expires;
730
731         if (!test_bit(IPS_ASSURED_BIT, &ct->status) ||
732             test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
733                 return;
734
735         /* We don't want to have connections hanging around in ESTABLISHED
736          * state for long time 'just because' conntrack deemed a FIN/RST
737          * out-of-window.
738          *
739          * Shrink the timeout just like when there is unacked data.
740          * This speeds up eviction of 'dead' connections where the
741          * connection and conntracks internal state are out of sync.
742          */
743         switch (index) {
744         case TCP_RST_SET:
745         case TCP_FIN_SET:
746                 break;
747         default:
748                 return;
749         }
750
751         if (ct->proto.tcp.last_dir != dir &&
752             (ct->proto.tcp.last_index == TCP_FIN_SET ||
753              ct->proto.tcp.last_index == TCP_RST_SET)) {
754                 expires = nf_ct_expires(ct);
755                 if (expires < 120 * HZ)
756                         return;
757
758                 tn = nf_tcp_pernet(nf_ct_net(ct));
759                 timeouts = nf_ct_timeout_lookup(ct);
760                 if (!timeouts)
761                         timeouts = tn->timeouts;
762
763                 timeout = READ_ONCE(timeouts[TCP_CONNTRACK_UNACK]);
764                 if (expires > timeout) {
765                         nf_ct_l4proto_log_invalid(skb, ct, hook_state,
766                                           "packet (index %d, dir %d) response for index %d lower timeout to %u",
767                                           index, dir, ct->proto.tcp.last_index, timeout);
768
769                         WRITE_ONCE(ct->timeout, timeout + nfct_time_stamp);
770                 }
771         } else {
772                 ct->proto.tcp.last_index = index;
773                 ct->proto.tcp.last_dir = dir;
774         }
775 }
776
777 /* table of valid flag combinations - PUSH, ECE and CWR are always valid */
778 static const u8 tcp_valid_flags[(TCPHDR_FIN|TCPHDR_SYN|TCPHDR_RST|TCPHDR_ACK|
779                                  TCPHDR_URG) + 1] =
780 {
781         [TCPHDR_SYN]                            = 1,
782         [TCPHDR_SYN|TCPHDR_URG]                 = 1,
783         [TCPHDR_SYN|TCPHDR_ACK]                 = 1,
784         [TCPHDR_RST]                            = 1,
785         [TCPHDR_RST|TCPHDR_ACK]                 = 1,
786         [TCPHDR_FIN|TCPHDR_ACK]                 = 1,
787         [TCPHDR_FIN|TCPHDR_ACK|TCPHDR_URG]      = 1,
788         [TCPHDR_ACK]                            = 1,
789         [TCPHDR_ACK|TCPHDR_URG]                 = 1,
790 };
791
792 static void tcp_error_log(const struct sk_buff *skb,
793                           const struct nf_hook_state *state,
794                           const char *msg)
795 {
796         nf_l4proto_log_invalid(skb, state, IPPROTO_TCP, "%s", msg);
797 }
798
799 /* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c.  */
800 static bool tcp_error(const struct tcphdr *th,
801                       struct sk_buff *skb,
802                       unsigned int dataoff,
803                       const struct nf_hook_state *state)
804 {
805         unsigned int tcplen = skb->len - dataoff;
806         u8 tcpflags;
807
808         /* Not whole TCP header or malformed packet */
809         if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) {
810                 tcp_error_log(skb, state, "truncated packet");
811                 return true;
812         }
813
814         /* Checksum invalid? Ignore.
815          * We skip checking packets on the outgoing path
816          * because the checksum is assumed to be correct.
817          */
818         /* FIXME: Source route IP option packets --RR */
819         if (state->net->ct.sysctl_checksum &&
820             state->hook == NF_INET_PRE_ROUTING &&
821             nf_checksum(skb, state->hook, dataoff, IPPROTO_TCP, state->pf)) {
822                 tcp_error_log(skb, state, "bad checksum");
823                 return true;
824         }
825
826         /* Check TCP flags. */
827         tcpflags = (tcp_flag_byte(th) & ~(TCPHDR_ECE|TCPHDR_CWR|TCPHDR_PSH));
828         if (!tcp_valid_flags[tcpflags]) {
829                 tcp_error_log(skb, state, "invalid tcp flag combination");
830                 return true;
831         }
832
833         return false;
834 }
835
836 static noinline bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
837                              unsigned int dataoff,
838                              const struct tcphdr *th,
839                              const struct nf_hook_state *state)
840 {
841         enum tcp_conntrack new_state;
842         struct net *net = nf_ct_net(ct);
843         const struct nf_tcp_net *tn = nf_tcp_pernet(net);
844
845         /* Don't need lock here: this conntrack not in circulation yet */
846         new_state = tcp_conntracks[0][get_conntrack_index(th)][TCP_CONNTRACK_NONE];
847
848         /* Invalid: delete conntrack */
849         if (new_state >= TCP_CONNTRACK_MAX) {
850                 tcp_error_log(skb, state, "invalid new");
851                 return false;
852         }
853
854         if (new_state == TCP_CONNTRACK_SYN_SENT) {
855                 memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
856                 /* SYN packet */
857                 ct->proto.tcp.seen[0].td_end =
858                         segment_seq_plus_len(ntohl(th->seq), skb->len,
859                                              dataoff, th);
860                 ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
861                 if (ct->proto.tcp.seen[0].td_maxwin == 0)
862                         ct->proto.tcp.seen[0].td_maxwin = 1;
863                 ct->proto.tcp.seen[0].td_maxend =
864                         ct->proto.tcp.seen[0].td_end;
865
866                 tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]);
867         } else if (tn->tcp_loose == 0) {
868                 /* Don't try to pick up connections. */
869                 return false;
870         } else {
871                 memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
872                 /*
873                  * We are in the middle of a connection,
874                  * its history is lost for us.
875                  * Let's try to use the data from the packet.
876                  */
877                 ct->proto.tcp.seen[0].td_end =
878                         segment_seq_plus_len(ntohl(th->seq), skb->len,
879                                              dataoff, th);
880                 ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
881                 if (ct->proto.tcp.seen[0].td_maxwin == 0)
882                         ct->proto.tcp.seen[0].td_maxwin = 1;
883                 ct->proto.tcp.seen[0].td_maxend =
884                         ct->proto.tcp.seen[0].td_end +
885                         ct->proto.tcp.seen[0].td_maxwin;
886
887                 /* We assume SACK and liberal window checking to handle
888                  * window scaling */
889                 ct->proto.tcp.seen[0].flags =
890                 ct->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM |
891                                               IP_CT_TCP_FLAG_BE_LIBERAL;
892         }
893
894         /* tcp_packet will set them */
895         ct->proto.tcp.last_index = TCP_NONE_SET;
896         return true;
897 }
898
899 static bool tcp_can_early_drop(const struct nf_conn *ct)
900 {
901         switch (ct->proto.tcp.state) {
902         case TCP_CONNTRACK_FIN_WAIT:
903         case TCP_CONNTRACK_LAST_ACK:
904         case TCP_CONNTRACK_TIME_WAIT:
905         case TCP_CONNTRACK_CLOSE:
906         case TCP_CONNTRACK_CLOSE_WAIT:
907                 return true;
908         default:
909                 break;
910         }
911
912         return false;
913 }
914
915 void nf_conntrack_tcp_set_closing(struct nf_conn *ct)
916 {
917         enum tcp_conntrack old_state;
918         const unsigned int *timeouts;
919         u32 timeout;
920
921         if (!nf_ct_is_confirmed(ct))
922                 return;
923
924         spin_lock_bh(&ct->lock);
925         old_state = ct->proto.tcp.state;
926         ct->proto.tcp.state = TCP_CONNTRACK_CLOSE;
927
928         if (old_state == TCP_CONNTRACK_CLOSE ||
929             test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) {
930                 spin_unlock_bh(&ct->lock);
931                 return;
932         }
933
934         timeouts = nf_ct_timeout_lookup(ct);
935         if (!timeouts) {
936                 const struct nf_tcp_net *tn;
937
938                 tn = nf_tcp_pernet(nf_ct_net(ct));
939                 timeouts = tn->timeouts;
940         }
941
942         timeout = timeouts[TCP_CONNTRACK_CLOSE];
943         WRITE_ONCE(ct->timeout, timeout + nfct_time_stamp);
944
945         spin_unlock_bh(&ct->lock);
946
947         nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
948 }
949
950 static void nf_ct_tcp_state_reset(struct ip_ct_tcp_state *state)
951 {
952         state->td_end           = 0;
953         state->td_maxend        = 0;
954         state->td_maxwin        = 0;
955         state->td_maxack        = 0;
956         state->td_scale         = 0;
957         state->flags            &= IP_CT_TCP_FLAG_BE_LIBERAL;
958 }
959
960 /* Returns verdict for packet, or -1 for invalid. */
961 int nf_conntrack_tcp_packet(struct nf_conn *ct,
962                             struct sk_buff *skb,
963                             unsigned int dataoff,
964                             enum ip_conntrack_info ctinfo,
965                             const struct nf_hook_state *state)
966 {
967         struct net *net = nf_ct_net(ct);
968         struct nf_tcp_net *tn = nf_tcp_pernet(net);
969         enum tcp_conntrack new_state, old_state;
970         unsigned int index, *timeouts;
971         enum nf_ct_tcp_action res;
972         enum ip_conntrack_dir dir;
973         const struct tcphdr *th;
974         struct tcphdr _tcph;
975         unsigned long timeout;
976
977         th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
978         if (th == NULL)
979                 return -NF_ACCEPT;
980
981         if (tcp_error(th, skb, dataoff, state))
982                 return -NF_ACCEPT;
983
984         if (!nf_ct_is_confirmed(ct) && !tcp_new(ct, skb, dataoff, th, state))
985                 return -NF_ACCEPT;
986
987         spin_lock_bh(&ct->lock);
988         old_state = ct->proto.tcp.state;
989         dir = CTINFO2DIR(ctinfo);
990         index = get_conntrack_index(th);
991         new_state = tcp_conntracks[dir][index][old_state];
992
993         switch (new_state) {
994         case TCP_CONNTRACK_SYN_SENT:
995                 if (old_state < TCP_CONNTRACK_TIME_WAIT)
996                         break;
997                 /* RFC 1122: "When a connection is closed actively,
998                  * it MUST linger in TIME-WAIT state for a time 2xMSL
999                  * (Maximum Segment Lifetime). However, it MAY accept
1000                  * a new SYN from the remote TCP to reopen the connection
1001                  * directly from TIME-WAIT state, if..."
1002                  * We ignore the conditions because we are in the
1003                  * TIME-WAIT state anyway.
1004                  *
1005                  * Handle aborted connections: we and the server
1006                  * think there is an existing connection but the client
1007                  * aborts it and starts a new one.
1008                  */
1009                 if (((ct->proto.tcp.seen[dir].flags
1010                       | ct->proto.tcp.seen[!dir].flags)
1011                      & IP_CT_TCP_FLAG_CLOSE_INIT)
1012                     || (ct->proto.tcp.last_dir == dir
1013                         && ct->proto.tcp.last_index == TCP_RST_SET)) {
1014                         /* Attempt to reopen a closed/aborted connection.
1015                          * Delete this connection and look up again. */
1016                         spin_unlock_bh(&ct->lock);
1017
1018                         /* Only repeat if we can actually remove the timer.
1019                          * Destruction may already be in progress in process
1020                          * context and we must give it a chance to terminate.
1021                          */
1022                         if (nf_ct_kill(ct))
1023                                 return -NF_REPEAT;
1024                         return NF_DROP;
1025                 }
1026                 fallthrough;
1027         case TCP_CONNTRACK_IGNORE:
1028                 /* Ignored packets:
1029                  *
1030                  * Our connection entry may be out of sync, so ignore
1031                  * packets which may signal the real connection between
1032                  * the client and the server.
1033                  *
1034                  * a) SYN in ORIGINAL
1035                  * b) SYN/ACK in REPLY
1036                  * c) ACK in reply direction after initial SYN in original.
1037                  *
1038                  * If the ignored packet is invalid, the receiver will send
1039                  * a RST we'll catch below.
1040                  */
1041                 if (index == TCP_SYNACK_SET
1042                     && ct->proto.tcp.last_index == TCP_SYN_SET
1043                     && ct->proto.tcp.last_dir != dir
1044                     && ntohl(th->ack_seq) == ct->proto.tcp.last_end) {
1045                         /* b) This SYN/ACK acknowledges a SYN that we earlier
1046                          * ignored as invalid. This means that the client and
1047                          * the server are both in sync, while the firewall is
1048                          * not. We get in sync from the previously annotated
1049                          * values.
1050                          */
1051                         old_state = TCP_CONNTRACK_SYN_SENT;
1052                         new_state = TCP_CONNTRACK_SYN_RECV;
1053                         ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_end =
1054                                 ct->proto.tcp.last_end;
1055                         ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_maxend =
1056                                 ct->proto.tcp.last_end;
1057                         ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_maxwin =
1058                                 ct->proto.tcp.last_win == 0 ?
1059                                         1 : ct->proto.tcp.last_win;
1060                         ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_scale =
1061                                 ct->proto.tcp.last_wscale;
1062                         ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
1063                         ct->proto.tcp.seen[ct->proto.tcp.last_dir].flags =
1064                                 ct->proto.tcp.last_flags;
1065                         nf_ct_tcp_state_reset(&ct->proto.tcp.seen[dir]);
1066                         break;
1067                 }
1068                 ct->proto.tcp.last_index = index;
1069                 ct->proto.tcp.last_dir = dir;
1070                 ct->proto.tcp.last_seq = ntohl(th->seq);
1071                 ct->proto.tcp.last_end =
1072                     segment_seq_plus_len(ntohl(th->seq), skb->len, dataoff, th);
1073                 ct->proto.tcp.last_win = ntohs(th->window);
1074
1075                 /* a) This is a SYN in ORIGINAL. The client and the server
1076                  * may be in sync but we are not. In that case, we annotate
1077                  * the TCP options and let the packet go through. If it is a
1078                  * valid SYN packet, the server will reply with a SYN/ACK, and
1079                  * then we'll get in sync. Otherwise, the server potentially
1080                  * responds with a challenge ACK if implementing RFC5961.
1081                  */
1082                 if (index == TCP_SYN_SET && dir == IP_CT_DIR_ORIGINAL) {
1083                         struct ip_ct_tcp_state seen = {};
1084
1085                         ct->proto.tcp.last_flags =
1086                         ct->proto.tcp.last_wscale = 0;
1087                         tcp_options(skb, dataoff, th, &seen);
1088                         if (seen.flags & IP_CT_TCP_FLAG_WINDOW_SCALE) {
1089                                 ct->proto.tcp.last_flags |=
1090                                         IP_CT_TCP_FLAG_WINDOW_SCALE;
1091                                 ct->proto.tcp.last_wscale = seen.td_scale;
1092                         }
1093                         if (seen.flags & IP_CT_TCP_FLAG_SACK_PERM) {
1094                                 ct->proto.tcp.last_flags |=
1095                                         IP_CT_TCP_FLAG_SACK_PERM;
1096                         }
1097                         /* Mark the potential for RFC5961 challenge ACK,
1098                          * this pose a special problem for LAST_ACK state
1099                          * as ACK is intrepretated as ACKing last FIN.
1100                          */
1101                         if (old_state == TCP_CONNTRACK_LAST_ACK)
1102                                 ct->proto.tcp.last_flags |=
1103                                         IP_CT_EXP_CHALLENGE_ACK;
1104                 }
1105
1106                 /* possible challenge ack reply to syn */
1107                 if (old_state == TCP_CONNTRACK_SYN_SENT &&
1108                     index == TCP_ACK_SET &&
1109                     dir == IP_CT_DIR_REPLY)
1110                         ct->proto.tcp.last_ack = ntohl(th->ack_seq);
1111
1112                 spin_unlock_bh(&ct->lock);
1113                 nf_ct_l4proto_log_invalid(skb, ct, state,
1114                                           "packet (index %d) in dir %d ignored, state %s",
1115                                           index, dir,
1116                                           tcp_conntrack_names[old_state]);
1117                 return NF_ACCEPT;
1118         case TCP_CONNTRACK_MAX:
1119                 /* Special case for SYN proxy: when the SYN to the server or
1120                  * the SYN/ACK from the server is lost, the client may transmit
1121                  * a keep-alive packet while in SYN_SENT state. This needs to
1122                  * be associated with the original conntrack entry in order to
1123                  * generate a new SYN with the correct sequence number.
1124                  */
1125                 if (nfct_synproxy(ct) && old_state == TCP_CONNTRACK_SYN_SENT &&
1126                     index == TCP_ACK_SET && dir == IP_CT_DIR_ORIGINAL &&
1127                     ct->proto.tcp.last_dir == IP_CT_DIR_ORIGINAL &&
1128                     ct->proto.tcp.seen[dir].td_end - 1 == ntohl(th->seq)) {
1129                         pr_debug("nf_ct_tcp: SYN proxy client keep alive\n");
1130                         spin_unlock_bh(&ct->lock);
1131                         return NF_ACCEPT;
1132                 }
1133
1134                 /* Invalid packet */
1135                 spin_unlock_bh(&ct->lock);
1136                 nf_ct_l4proto_log_invalid(skb, ct, state,
1137                                           "packet (index %d) in dir %d invalid, state %s",
1138                                           index, dir,
1139                                           tcp_conntrack_names[old_state]);
1140                 return -NF_ACCEPT;
1141         case TCP_CONNTRACK_TIME_WAIT:
1142                 /* RFC5961 compliance cause stack to send "challenge-ACK"
1143                  * e.g. in response to spurious SYNs.  Conntrack MUST
1144                  * not believe this ACK is acking last FIN.
1145                  */
1146                 if (old_state == TCP_CONNTRACK_LAST_ACK &&
1147                     index == TCP_ACK_SET &&
1148                     ct->proto.tcp.last_dir != dir &&
1149                     ct->proto.tcp.last_index == TCP_SYN_SET &&
1150                     (ct->proto.tcp.last_flags & IP_CT_EXP_CHALLENGE_ACK)) {
1151                         /* Detected RFC5961 challenge ACK */
1152                         ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
1153                         spin_unlock_bh(&ct->lock);
1154                         nf_ct_l4proto_log_invalid(skb, ct, state, "challenge-ack ignored");
1155                         return NF_ACCEPT; /* Don't change state */
1156                 }
1157                 break;
1158         case TCP_CONNTRACK_SYN_SENT2:
1159                 /* tcp_conntracks table is not smart enough to handle
1160                  * simultaneous open.
1161                  */
1162                 ct->proto.tcp.last_flags |= IP_CT_TCP_SIMULTANEOUS_OPEN;
1163                 break;
1164         case TCP_CONNTRACK_SYN_RECV:
1165                 if (dir == IP_CT_DIR_REPLY && index == TCP_ACK_SET &&
1166                     ct->proto.tcp.last_flags & IP_CT_TCP_SIMULTANEOUS_OPEN)
1167                         new_state = TCP_CONNTRACK_ESTABLISHED;
1168                 break;
1169         case TCP_CONNTRACK_CLOSE:
1170                 if (index != TCP_RST_SET)
1171                         break;
1172
1173                 /* If we are closing, tuple might have been re-used already.
1174                  * last_index, last_ack, and all other ct fields used for
1175                  * sequence/window validation are outdated in that case.
1176                  *
1177                  * As the conntrack can already be expired by GC under pressure,
1178                  * just skip validation checks.
1179                  */
1180                 if (tcp_can_early_drop(ct))
1181                         goto in_window;
1182
1183                 /* td_maxack might be outdated if we let a SYN through earlier */
1184                 if ((ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) &&
1185                     ct->proto.tcp.last_index != TCP_SYN_SET) {
1186                         u32 seq = ntohl(th->seq);
1187
1188                         /* If we are not in established state and SEQ=0 this is most
1189                          * likely an answer to a SYN we let go through above (last_index
1190                          * can be updated due to out-of-order ACKs).
1191                          */
1192                         if (seq == 0 && !nf_conntrack_tcp_established(ct))
1193                                 break;
1194
1195                         if (before(seq, ct->proto.tcp.seen[!dir].td_maxack) &&
1196                             !tn->tcp_ignore_invalid_rst) {
1197                                 /* Invalid RST  */
1198                                 spin_unlock_bh(&ct->lock);
1199                                 nf_ct_l4proto_log_invalid(skb, ct, state, "invalid rst");
1200                                 return -NF_ACCEPT;
1201                         }
1202
1203                         if (!nf_conntrack_tcp_established(ct) ||
1204                             seq == ct->proto.tcp.seen[!dir].td_maxack)
1205                                 break;
1206
1207                         /* Check if rst is part of train, such as
1208                          *   foo:80 > bar:4379: P, 235946583:235946602(19) ack 42
1209                          *   foo:80 > bar:4379: R, 235946602:235946602(0)  ack 42
1210                          */
1211                         if (ct->proto.tcp.last_index == TCP_ACK_SET &&
1212                             ct->proto.tcp.last_dir == dir &&
1213                             seq == ct->proto.tcp.last_end)
1214                                 break;
1215
1216                         /* ... RST sequence number doesn't match exactly, keep
1217                          * established state to allow a possible challenge ACK.
1218                          */
1219                         new_state = old_state;
1220                 }
1221                 if (((test_bit(IPS_SEEN_REPLY_BIT, &ct->status)
1222                          && ct->proto.tcp.last_index == TCP_SYN_SET)
1223                         || (!test_bit(IPS_ASSURED_BIT, &ct->status)
1224                             && ct->proto.tcp.last_index == TCP_ACK_SET))
1225                     && ntohl(th->ack_seq) == ct->proto.tcp.last_end) {
1226                         /* RST sent to invalid SYN or ACK we had let through
1227                          * at a) and c) above:
1228                          *
1229                          * a) SYN was in window then
1230                          * c) we hold a half-open connection.
1231                          *
1232                          * Delete our connection entry.
1233                          * We skip window checking, because packet might ACK
1234                          * segments we ignored. */
1235                         goto in_window;
1236                 }
1237
1238                 /* Reset in response to a challenge-ack we let through earlier */
1239                 if (old_state == TCP_CONNTRACK_SYN_SENT &&
1240                     ct->proto.tcp.last_index == TCP_ACK_SET &&
1241                     ct->proto.tcp.last_dir == IP_CT_DIR_REPLY &&
1242                     ntohl(th->seq) == ct->proto.tcp.last_ack)
1243                         goto in_window;
1244
1245                 break;
1246         default:
1247                 /* Keep compilers happy. */
1248                 break;
1249         }
1250
1251         res = tcp_in_window(ct, dir, index,
1252                             skb, dataoff, th, state);
1253         switch (res) {
1254         case NFCT_TCP_IGNORE:
1255                 spin_unlock_bh(&ct->lock);
1256                 return NF_ACCEPT;
1257         case NFCT_TCP_INVALID:
1258                 nf_tcp_handle_invalid(ct, dir, index, skb, state);
1259                 spin_unlock_bh(&ct->lock);
1260                 return -NF_ACCEPT;
1261         case NFCT_TCP_ACCEPT:
1262                 break;
1263         }
1264      in_window:
1265         /* From now on we have got in-window packets */
1266         ct->proto.tcp.last_index = index;
1267         ct->proto.tcp.last_dir = dir;
1268
1269         ct->proto.tcp.state = new_state;
1270         if (old_state != new_state
1271             && new_state == TCP_CONNTRACK_FIN_WAIT)
1272                 ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
1273
1274         timeouts = nf_ct_timeout_lookup(ct);
1275         if (!timeouts)
1276                 timeouts = tn->timeouts;
1277
1278         if (ct->proto.tcp.retrans >= tn->tcp_max_retrans &&
1279             timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
1280                 timeout = timeouts[TCP_CONNTRACK_RETRANS];
1281         else if (unlikely(index == TCP_RST_SET))
1282                 timeout = timeouts[TCP_CONNTRACK_CLOSE];
1283         else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) &
1284                  IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
1285                  timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
1286                 timeout = timeouts[TCP_CONNTRACK_UNACK];
1287         else if (ct->proto.tcp.last_win == 0 &&
1288                  timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
1289                 timeout = timeouts[TCP_CONNTRACK_RETRANS];
1290         else
1291                 timeout = timeouts[new_state];
1292         spin_unlock_bh(&ct->lock);
1293
1294         if (new_state != old_state)
1295                 nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
1296
1297         if (!test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
1298                 /* If only reply is a RST, we can consider ourselves not to
1299                    have an established connection: this is a fairly common
1300                    problem case, so we can delete the conntrack
1301                    immediately.  --RR */
1302                 if (th->rst) {
1303                         nf_ct_kill_acct(ct, ctinfo, skb);
1304                         return NF_ACCEPT;
1305                 }
1306
1307                 if (index == TCP_SYN_SET && old_state == TCP_CONNTRACK_SYN_SENT) {
1308                         /* do not renew timeout on SYN retransmit.
1309                          *
1310                          * Else port reuse by client or NAT middlebox can keep
1311                          * entry alive indefinitely (including nat info).
1312                          */
1313                         return NF_ACCEPT;
1314                 }
1315
1316                 /* ESTABLISHED without SEEN_REPLY, i.e. mid-connection
1317                  * pickup with loose=1. Avoid large ESTABLISHED timeout.
1318                  */
1319                 if (new_state == TCP_CONNTRACK_ESTABLISHED &&
1320                     timeout > timeouts[TCP_CONNTRACK_UNACK])
1321                         timeout = timeouts[TCP_CONNTRACK_UNACK];
1322         } else if (!test_bit(IPS_ASSURED_BIT, &ct->status)
1323                    && (old_state == TCP_CONNTRACK_SYN_RECV
1324                        || old_state == TCP_CONNTRACK_ESTABLISHED)
1325                    && new_state == TCP_CONNTRACK_ESTABLISHED) {
1326                 /* Set ASSURED if we see valid ack in ESTABLISHED
1327                    after SYN_RECV or a valid answer for a picked up
1328                    connection. */
1329                 set_bit(IPS_ASSURED_BIT, &ct->status);
1330                 nf_conntrack_event_cache(IPCT_ASSURED, ct);
1331         }
1332         nf_ct_refresh_acct(ct, ctinfo, skb, timeout);
1333
1334         return NF_ACCEPT;
1335 }
1336
1337 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1338
1339 #include <linux/netfilter/nfnetlink.h>
1340 #include <linux/netfilter/nfnetlink_conntrack.h>
1341
1342 static int tcp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
1343                          struct nf_conn *ct, bool destroy)
1344 {
1345         struct nlattr *nest_parms;
1346         struct nf_ct_tcp_flags tmp = {};
1347
1348         spin_lock_bh(&ct->lock);
1349         nest_parms = nla_nest_start(skb, CTA_PROTOINFO_TCP);
1350         if (!nest_parms)
1351                 goto nla_put_failure;
1352
1353         if (nla_put_u8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state))
1354                 goto nla_put_failure;
1355
1356         if (destroy)
1357                 goto skip_state;
1358
1359         if (nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL,
1360                        ct->proto.tcp.seen[0].td_scale) ||
1361             nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY,
1362                        ct->proto.tcp.seen[1].td_scale))
1363                 goto nla_put_failure;
1364
1365         tmp.flags = ct->proto.tcp.seen[0].flags;
1366         if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL,
1367                     sizeof(struct nf_ct_tcp_flags), &tmp))
1368                 goto nla_put_failure;
1369
1370         tmp.flags = ct->proto.tcp.seen[1].flags;
1371         if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY,
1372                     sizeof(struct nf_ct_tcp_flags), &tmp))
1373                 goto nla_put_failure;
1374 skip_state:
1375         spin_unlock_bh(&ct->lock);
1376         nla_nest_end(skb, nest_parms);
1377
1378         return 0;
1379
1380 nla_put_failure:
1381         spin_unlock_bh(&ct->lock);
1382         return -1;
1383 }
1384
1385 static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = {
1386         [CTA_PROTOINFO_TCP_STATE]           = { .type = NLA_U8 },
1387         [CTA_PROTOINFO_TCP_WSCALE_ORIGINAL] = { .type = NLA_U8 },
1388         [CTA_PROTOINFO_TCP_WSCALE_REPLY]    = { .type = NLA_U8 },
1389         [CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]  = { .len = sizeof(struct nf_ct_tcp_flags) },
1390         [CTA_PROTOINFO_TCP_FLAGS_REPLY]     = { .len = sizeof(struct nf_ct_tcp_flags) },
1391 };
1392
1393 #define TCP_NLATTR_SIZE ( \
1394         NLA_ALIGN(NLA_HDRLEN + 1) + \
1395         NLA_ALIGN(NLA_HDRLEN + 1) + \
1396         NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)) + \
1397         NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)))
1398
1399 static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
1400 {
1401         struct nlattr *pattr = cda[CTA_PROTOINFO_TCP];
1402         struct nlattr *tb[CTA_PROTOINFO_TCP_MAX+1];
1403         int err;
1404
1405         /* updates could not contain anything about the private
1406          * protocol info, in that case skip the parsing */
1407         if (!pattr)
1408                 return 0;
1409
1410         err = nla_parse_nested_deprecated(tb, CTA_PROTOINFO_TCP_MAX, pattr,
1411                                           tcp_nla_policy, NULL);
1412         if (err < 0)
1413                 return err;
1414
1415         if (tb[CTA_PROTOINFO_TCP_STATE] &&
1416             nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]) >= TCP_CONNTRACK_MAX)
1417                 return -EINVAL;
1418
1419         spin_lock_bh(&ct->lock);
1420         if (tb[CTA_PROTOINFO_TCP_STATE])
1421                 ct->proto.tcp.state = nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]);
1422
1423         if (tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]) {
1424                 struct nf_ct_tcp_flags *attr =
1425                         nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]);
1426                 ct->proto.tcp.seen[0].flags &= ~attr->mask;
1427                 ct->proto.tcp.seen[0].flags |= attr->flags & attr->mask;
1428         }
1429
1430         if (tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]) {
1431                 struct nf_ct_tcp_flags *attr =
1432                         nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]);
1433                 ct->proto.tcp.seen[1].flags &= ~attr->mask;
1434                 ct->proto.tcp.seen[1].flags |= attr->flags & attr->mask;
1435         }
1436
1437         if (tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL] &&
1438             tb[CTA_PROTOINFO_TCP_WSCALE_REPLY] &&
1439             ct->proto.tcp.seen[0].flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
1440             ct->proto.tcp.seen[1].flags & IP_CT_TCP_FLAG_WINDOW_SCALE) {
1441                 ct->proto.tcp.seen[0].td_scale =
1442                         nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL]);
1443                 ct->proto.tcp.seen[1].td_scale =
1444                         nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_REPLY]);
1445         }
1446         spin_unlock_bh(&ct->lock);
1447
1448         return 0;
1449 }
1450
1451 static unsigned int tcp_nlattr_tuple_size(void)
1452 {
1453         static unsigned int size __read_mostly;
1454
1455         if (!size)
1456                 size = nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1457
1458         return size;
1459 }
1460 #endif
1461
1462 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
1463
1464 #include <linux/netfilter/nfnetlink.h>
1465 #include <linux/netfilter/nfnetlink_cttimeout.h>
1466
1467 static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
1468                                      struct net *net, void *data)
1469 {
1470         struct nf_tcp_net *tn = nf_tcp_pernet(net);
1471         unsigned int *timeouts = data;
1472         int i;
1473
1474         if (!timeouts)
1475                 timeouts = tn->timeouts;
1476         /* set default TCP timeouts. */
1477         for (i=0; i<TCP_CONNTRACK_TIMEOUT_MAX; i++)
1478                 timeouts[i] = tn->timeouts[i];
1479
1480         if (tb[CTA_TIMEOUT_TCP_SYN_SENT]) {
1481                 timeouts[TCP_CONNTRACK_SYN_SENT] =
1482                         ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ;
1483         }
1484
1485         if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) {
1486                 timeouts[TCP_CONNTRACK_SYN_RECV] =
1487                         ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ;
1488         }
1489         if (tb[CTA_TIMEOUT_TCP_ESTABLISHED]) {
1490                 timeouts[TCP_CONNTRACK_ESTABLISHED] =
1491                         ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_ESTABLISHED]))*HZ;
1492         }
1493         if (tb[CTA_TIMEOUT_TCP_FIN_WAIT]) {
1494                 timeouts[TCP_CONNTRACK_FIN_WAIT] =
1495                         ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_FIN_WAIT]))*HZ;
1496         }
1497         if (tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]) {
1498                 timeouts[TCP_CONNTRACK_CLOSE_WAIT] =
1499                         ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]))*HZ;
1500         }
1501         if (tb[CTA_TIMEOUT_TCP_LAST_ACK]) {
1502                 timeouts[TCP_CONNTRACK_LAST_ACK] =
1503                         ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_LAST_ACK]))*HZ;
1504         }
1505         if (tb[CTA_TIMEOUT_TCP_TIME_WAIT]) {
1506                 timeouts[TCP_CONNTRACK_TIME_WAIT] =
1507                         ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_TIME_WAIT]))*HZ;
1508         }
1509         if (tb[CTA_TIMEOUT_TCP_CLOSE]) {
1510                 timeouts[TCP_CONNTRACK_CLOSE] =
1511                         ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE]))*HZ;
1512         }
1513         if (tb[CTA_TIMEOUT_TCP_SYN_SENT2]) {
1514                 timeouts[TCP_CONNTRACK_SYN_SENT2] =
1515                         ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT2]))*HZ;
1516         }
1517         if (tb[CTA_TIMEOUT_TCP_RETRANS]) {
1518                 timeouts[TCP_CONNTRACK_RETRANS] =
1519                         ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_RETRANS]))*HZ;
1520         }
1521         if (tb[CTA_TIMEOUT_TCP_UNACK]) {
1522                 timeouts[TCP_CONNTRACK_UNACK] =
1523                         ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ;
1524         }
1525
1526         timeouts[CTA_TIMEOUT_TCP_UNSPEC] = timeouts[CTA_TIMEOUT_TCP_SYN_SENT];
1527         return 0;
1528 }
1529
1530 static int
1531 tcp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
1532 {
1533         const unsigned int *timeouts = data;
1534
1535         if (nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT,
1536                         htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ)) ||
1537             nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_RECV,
1538                          htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ)) ||
1539             nla_put_be32(skb, CTA_TIMEOUT_TCP_ESTABLISHED,
1540                          htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ)) ||
1541             nla_put_be32(skb, CTA_TIMEOUT_TCP_FIN_WAIT,
1542                          htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ)) ||
1543             nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT,
1544                          htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ)) ||
1545             nla_put_be32(skb, CTA_TIMEOUT_TCP_LAST_ACK,
1546                          htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ)) ||
1547             nla_put_be32(skb, CTA_TIMEOUT_TCP_TIME_WAIT,
1548                          htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ)) ||
1549             nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE,
1550                          htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ)) ||
1551             nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT2,
1552                          htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ)) ||
1553             nla_put_be32(skb, CTA_TIMEOUT_TCP_RETRANS,
1554                          htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ)) ||
1555             nla_put_be32(skb, CTA_TIMEOUT_TCP_UNACK,
1556                          htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ)))
1557                 goto nla_put_failure;
1558         return 0;
1559
1560 nla_put_failure:
1561         return -ENOSPC;
1562 }
1563
1564 static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = {
1565         [CTA_TIMEOUT_TCP_SYN_SENT]      = { .type = NLA_U32 },
1566         [CTA_TIMEOUT_TCP_SYN_RECV]      = { .type = NLA_U32 },
1567         [CTA_TIMEOUT_TCP_ESTABLISHED]   = { .type = NLA_U32 },
1568         [CTA_TIMEOUT_TCP_FIN_WAIT]      = { .type = NLA_U32 },
1569         [CTA_TIMEOUT_TCP_CLOSE_WAIT]    = { .type = NLA_U32 },
1570         [CTA_TIMEOUT_TCP_LAST_ACK]      = { .type = NLA_U32 },
1571         [CTA_TIMEOUT_TCP_TIME_WAIT]     = { .type = NLA_U32 },
1572         [CTA_TIMEOUT_TCP_CLOSE]         = { .type = NLA_U32 },
1573         [CTA_TIMEOUT_TCP_SYN_SENT2]     = { .type = NLA_U32 },
1574         [CTA_TIMEOUT_TCP_RETRANS]       = { .type = NLA_U32 },
1575         [CTA_TIMEOUT_TCP_UNACK]         = { .type = NLA_U32 },
1576 };
1577 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
1578
1579 void nf_conntrack_tcp_init_net(struct net *net)
1580 {
1581         struct nf_tcp_net *tn = nf_tcp_pernet(net);
1582         int i;
1583
1584         for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++)
1585                 tn->timeouts[i] = tcp_timeouts[i];
1586
1587         /* timeouts[0] is unused, make it same as SYN_SENT so
1588          * ->timeouts[0] contains 'new' timeout, like udp or icmp.
1589          */
1590         tn->timeouts[0] = tcp_timeouts[TCP_CONNTRACK_SYN_SENT];
1591
1592         /* If it is set to zero, we disable picking up already established
1593          * connections.
1594          */
1595         tn->tcp_loose = 1;
1596
1597         /* "Be conservative in what you do,
1598          *  be liberal in what you accept from others."
1599          * If it's non-zero, we mark only out of window RST segments as INVALID.
1600          */
1601         tn->tcp_be_liberal = 0;
1602
1603         /* If it's non-zero, we turn off RST sequence number check */
1604         tn->tcp_ignore_invalid_rst = 0;
1605
1606         /* Max number of the retransmitted packets without receiving an (acceptable)
1607          * ACK from the destination. If this number is reached, a shorter timer
1608          * will be started.
1609          */
1610         tn->tcp_max_retrans = 3;
1611
1612 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
1613         tn->offload_timeout = 30 * HZ;
1614 #endif
1615 }
1616
1617 const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp =
1618 {
1619         .l4proto                = IPPROTO_TCP,
1620 #ifdef CONFIG_NF_CONNTRACK_PROCFS
1621         .print_conntrack        = tcp_print_conntrack,
1622 #endif
1623         .can_early_drop         = tcp_can_early_drop,
1624 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1625         .to_nlattr              = tcp_to_nlattr,
1626         .from_nlattr            = nlattr_to_tcp,
1627         .tuple_to_nlattr        = nf_ct_port_tuple_to_nlattr,
1628         .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
1629         .nlattr_tuple_size      = tcp_nlattr_tuple_size,
1630         .nlattr_size            = TCP_NLATTR_SIZE,
1631         .nla_policy             = nf_ct_port_nla_policy,
1632 #endif
1633 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
1634         .ctnl_timeout           = {
1635                 .nlattr_to_obj  = tcp_timeout_nlattr_to_obj,
1636                 .obj_to_nlattr  = tcp_timeout_obj_to_nlattr,
1637                 .nlattr_max     = CTA_TIMEOUT_TCP_MAX,
1638                 .obj_size       = sizeof(unsigned int) *
1639                                         TCP_CONNTRACK_TIMEOUT_MAX,
1640                 .nla_policy     = tcp_timeout_nla_policy,
1641         },
1642 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
1643 };