Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[linux-2.6-microblaze.git] / net / netfilter / nf_conntrack_proto_tcp.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* (C) 1999-2001 Paul `Rusty' Russell
3  * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
4  * (C) 2002-2013 Jozsef Kadlecsik <kadlec@netfilter.org>
5  * (C) 2006-2012 Patrick McHardy <kaber@trash.net>
6  */
7
8 #include <linux/types.h>
9 #include <linux/timer.h>
10 #include <linux/module.h>
11 #include <linux/in.h>
12 #include <linux/tcp.h>
13 #include <linux/spinlock.h>
14 #include <linux/skbuff.h>
15 #include <linux/ipv6.h>
16 #include <net/ip6_checksum.h>
17 #include <asm/unaligned.h>
18
19 #include <net/tcp.h>
20
21 #include <linux/netfilter.h>
22 #include <linux/netfilter_ipv4.h>
23 #include <linux/netfilter_ipv6.h>
24 #include <net/netfilter/nf_conntrack.h>
25 #include <net/netfilter/nf_conntrack_l4proto.h>
26 #include <net/netfilter/nf_conntrack_ecache.h>
27 #include <net/netfilter/nf_conntrack_seqadj.h>
28 #include <net/netfilter/nf_conntrack_synproxy.h>
29 #include <net/netfilter/nf_conntrack_timeout.h>
30 #include <net/netfilter/nf_log.h>
31 #include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
32 #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
33
34   /* FIXME: Examine ipfilter's timeouts and conntrack transitions more
35      closely.  They're more complex. --RR */
36
37 static const char *const tcp_conntrack_names[] = {
38         "NONE",
39         "SYN_SENT",
40         "SYN_RECV",
41         "ESTABLISHED",
42         "FIN_WAIT",
43         "CLOSE_WAIT",
44         "LAST_ACK",
45         "TIME_WAIT",
46         "CLOSE",
47         "SYN_SENT2",
48 };
49
50 enum nf_ct_tcp_action {
51         NFCT_TCP_IGNORE,
52         NFCT_TCP_INVALID,
53         NFCT_TCP_ACCEPT,
54 };
55
56 #define SECS * HZ
57 #define MINS * 60 SECS
58 #define HOURS * 60 MINS
59 #define DAYS * 24 HOURS
60
61 static const unsigned int tcp_timeouts[TCP_CONNTRACK_TIMEOUT_MAX] = {
62         [TCP_CONNTRACK_SYN_SENT]        = 2 MINS,
63         [TCP_CONNTRACK_SYN_RECV]        = 60 SECS,
64         [TCP_CONNTRACK_ESTABLISHED]     = 5 DAYS,
65         [TCP_CONNTRACK_FIN_WAIT]        = 2 MINS,
66         [TCP_CONNTRACK_CLOSE_WAIT]      = 60 SECS,
67         [TCP_CONNTRACK_LAST_ACK]        = 30 SECS,
68         [TCP_CONNTRACK_TIME_WAIT]       = 2 MINS,
69         [TCP_CONNTRACK_CLOSE]           = 10 SECS,
70         [TCP_CONNTRACK_SYN_SENT2]       = 2 MINS,
71 /* RFC1122 says the R2 limit should be at least 100 seconds.
72    Linux uses 15 packets as limit, which corresponds
73    to ~13-30min depending on RTO. */
74         [TCP_CONNTRACK_RETRANS]         = 5 MINS,
75         [TCP_CONNTRACK_UNACK]           = 5 MINS,
76 };
77
78 #define sNO TCP_CONNTRACK_NONE
79 #define sSS TCP_CONNTRACK_SYN_SENT
80 #define sSR TCP_CONNTRACK_SYN_RECV
81 #define sES TCP_CONNTRACK_ESTABLISHED
82 #define sFW TCP_CONNTRACK_FIN_WAIT
83 #define sCW TCP_CONNTRACK_CLOSE_WAIT
84 #define sLA TCP_CONNTRACK_LAST_ACK
85 #define sTW TCP_CONNTRACK_TIME_WAIT
86 #define sCL TCP_CONNTRACK_CLOSE
87 #define sS2 TCP_CONNTRACK_SYN_SENT2
88 #define sIV TCP_CONNTRACK_MAX
89 #define sIG TCP_CONNTRACK_IGNORE
90
91 /* What TCP flags are set from RST/SYN/FIN/ACK. */
92 enum tcp_bit_set {
93         TCP_SYN_SET,
94         TCP_SYNACK_SET,
95         TCP_FIN_SET,
96         TCP_ACK_SET,
97         TCP_RST_SET,
98         TCP_NONE_SET,
99 };
100
101 /*
102  * The TCP state transition table needs a few words...
103  *
104  * We are the man in the middle. All the packets go through us
105  * but might get lost in transit to the destination.
106  * It is assumed that the destinations can't receive segments
107  * we haven't seen.
108  *
109  * The checked segment is in window, but our windows are *not*
110  * equivalent with the ones of the sender/receiver. We always
111  * try to guess the state of the current sender.
112  *
113  * The meaning of the states are:
114  *
115  * NONE:        initial state
116  * SYN_SENT:    SYN-only packet seen
117  * SYN_SENT2:   SYN-only packet seen from reply dir, simultaneous open
118  * SYN_RECV:    SYN-ACK packet seen
119  * ESTABLISHED: ACK packet seen
120  * FIN_WAIT:    FIN packet seen
121  * CLOSE_WAIT:  ACK seen (after FIN)
122  * LAST_ACK:    FIN seen (after FIN)
123  * TIME_WAIT:   last ACK seen
124  * CLOSE:       closed connection (RST)
125  *
126  * Packets marked as IGNORED (sIG):
127  *      if they may be either invalid or valid
128  *      and the receiver may send back a connection
129  *      closing RST or a SYN/ACK.
130  *
131  * Packets marked as INVALID (sIV):
132  *      if we regard them as truly invalid packets
133  */
134 static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
135         {
136 /* ORIGINAL */
137 /*           sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2   */
138 /*syn*/    { sSS, sSS, sIG, sIG, sIG, sIG, sIG, sSS, sSS, sS2 },
139 /*
140  *      sNO -> sSS      Initialize a new connection
141  *      sSS -> sSS      Retransmitted SYN
142  *      sS2 -> sS2      Late retransmitted SYN
143  *      sSR -> sIG
144  *      sES -> sIG      Error: SYNs in window outside the SYN_SENT state
145  *                      are errors. Receiver will reply with RST
146  *                      and close the connection.
147  *                      Or we are not in sync and hold a dead connection.
148  *      sFW -> sIG
149  *      sCW -> sIG
150  *      sLA -> sIG
151  *      sTW -> sSS      Reopened connection (RFC 1122).
152  *      sCL -> sSS
153  */
154 /*           sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2   */
155 /*synack*/ { sIV, sIV, sSR, sIV, sIV, sIV, sIV, sIV, sIV, sSR },
156 /*
157  *      sNO -> sIV      Too late and no reason to do anything
158  *      sSS -> sIV      Client can't send SYN and then SYN/ACK
159  *      sS2 -> sSR      SYN/ACK sent to SYN2 in simultaneous open
160  *      sSR -> sSR      Late retransmitted SYN/ACK in simultaneous open
161  *      sES -> sIV      Invalid SYN/ACK packets sent by the client
162  *      sFW -> sIV
163  *      sCW -> sIV
164  *      sLA -> sIV
165  *      sTW -> sIV
166  *      sCL -> sIV
167  */
168 /*           sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2   */
169 /*fin*/    { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
170 /*
171  *      sNO -> sIV      Too late and no reason to do anything...
172  *      sSS -> sIV      Client migth not send FIN in this state:
173  *                      we enforce waiting for a SYN/ACK reply first.
174  *      sS2 -> sIV
175  *      sSR -> sFW      Close started.
176  *      sES -> sFW
177  *      sFW -> sLA      FIN seen in both directions, waiting for
178  *                      the last ACK.
179  *                      Migth be a retransmitted FIN as well...
180  *      sCW -> sLA
181  *      sLA -> sLA      Retransmitted FIN. Remain in the same state.
182  *      sTW -> sTW
183  *      sCL -> sCL
184  */
185 /*           sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2   */
186 /*ack*/    { sES, sIV, sES, sES, sCW, sCW, sTW, sTW, sCL, sIV },
187 /*
188  *      sNO -> sES      Assumed.
189  *      sSS -> sIV      ACK is invalid: we haven't seen a SYN/ACK yet.
190  *      sS2 -> sIV
191  *      sSR -> sES      Established state is reached.
192  *      sES -> sES      :-)
193  *      sFW -> sCW      Normal close request answered by ACK.
194  *      sCW -> sCW
195  *      sLA -> sTW      Last ACK detected (RFC5961 challenged)
196  *      sTW -> sTW      Retransmitted last ACK. Remain in the same state.
197  *      sCL -> sCL
198  */
199 /*           sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2   */
200 /*rst*/    { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
201 /*none*/   { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
202         },
203         {
204 /* REPLY */
205 /*           sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2   */
206 /*syn*/    { sIV, sS2, sIV, sIV, sIV, sIV, sIV, sSS, sIV, sS2 },
207 /*
208  *      sNO -> sIV      Never reached.
209  *      sSS -> sS2      Simultaneous open
210  *      sS2 -> sS2      Retransmitted simultaneous SYN
211  *      sSR -> sIV      Invalid SYN packets sent by the server
212  *      sES -> sIV
213  *      sFW -> sIV
214  *      sCW -> sIV
215  *      sLA -> sIV
216  *      sTW -> sSS      Reopened connection, but server may have switched role
217  *      sCL -> sIV
218  */
219 /*           sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2   */
220 /*synack*/ { sIV, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
221 /*
222  *      sSS -> sSR      Standard open.
223  *      sS2 -> sSR      Simultaneous open
224  *      sSR -> sIG      Retransmitted SYN/ACK, ignore it.
225  *      sES -> sIG      Late retransmitted SYN/ACK?
226  *      sFW -> sIG      Might be SYN/ACK answering ignored SYN
227  *      sCW -> sIG
228  *      sLA -> sIG
229  *      sTW -> sIG
230  *      sCL -> sIG
231  */
232 /*           sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2   */
233 /*fin*/    { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
234 /*
235  *      sSS -> sIV      Server might not send FIN in this state.
236  *      sS2 -> sIV
237  *      sSR -> sFW      Close started.
238  *      sES -> sFW
239  *      sFW -> sLA      FIN seen in both directions.
240  *      sCW -> sLA
241  *      sLA -> sLA      Retransmitted FIN.
242  *      sTW -> sTW
243  *      sCL -> sCL
244  */
245 /*           sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2   */
246 /*ack*/    { sIV, sIG, sSR, sES, sCW, sCW, sTW, sTW, sCL, sIG },
247 /*
248  *      sSS -> sIG      Might be a half-open connection.
249  *      sS2 -> sIG
250  *      sSR -> sSR      Might answer late resent SYN.
251  *      sES -> sES      :-)
252  *      sFW -> sCW      Normal close request answered by ACK.
253  *      sCW -> sCW
254  *      sLA -> sTW      Last ACK detected (RFC5961 challenged)
255  *      sTW -> sTW      Retransmitted last ACK.
256  *      sCL -> sCL
257  */
258 /*           sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2   */
259 /*rst*/    { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
260 /*none*/   { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
261         }
262 };
263
264 #ifdef CONFIG_NF_CONNTRACK_PROCFS
265 /* Print out the private part of the conntrack. */
266 static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
267 {
268         if (test_bit(IPS_OFFLOAD_BIT, &ct->status))
269                 return;
270
271         seq_printf(s, "%s ", tcp_conntrack_names[ct->proto.tcp.state]);
272 }
273 #endif
274
275 static unsigned int get_conntrack_index(const struct tcphdr *tcph)
276 {
277         if (tcph->rst) return TCP_RST_SET;
278         else if (tcph->syn) return (tcph->ack ? TCP_SYNACK_SET : TCP_SYN_SET);
279         else if (tcph->fin) return TCP_FIN_SET;
280         else if (tcph->ack) return TCP_ACK_SET;
281         else return TCP_NONE_SET;
282 }
283
284 /* TCP connection tracking based on 'Real Stateful TCP Packet Filtering
285    in IP Filter' by Guido van Rooij.
286
287    http://www.sane.nl/events/sane2000/papers.html
288    http://www.darkart.com/mirrors/www.obfuscation.org/ipf/
289
290    The boundaries and the conditions are changed according to RFC793:
291    the packet must intersect the window (i.e. segments may be
292    after the right or before the left edge) and thus receivers may ACK
293    segments after the right edge of the window.
294
295         td_maxend = max(sack + max(win,1)) seen in reply packets
296         td_maxwin = max(max(win, 1)) + (sack - ack) seen in sent packets
297         td_maxwin += seq + len - sender.td_maxend
298                         if seq + len > sender.td_maxend
299         td_end    = max(seq + len) seen in sent packets
300
301    I.   Upper bound for valid data:     seq <= sender.td_maxend
302    II.  Lower bound for valid data:     seq + len >= sender.td_end - receiver.td_maxwin
303    III. Upper bound for valid (s)ack:   sack <= receiver.td_end
304    IV.  Lower bound for valid (s)ack:   sack >= receiver.td_end - MAXACKWINDOW
305
306    where sack is the highest right edge of sack block found in the packet
307    or ack in the case of packet without SACK option.
308
309    The upper bound limit for a valid (s)ack is not ignored -
310    we doesn't have to deal with fragments.
311 */
312
313 static inline __u32 segment_seq_plus_len(__u32 seq,
314                                          size_t len,
315                                          unsigned int dataoff,
316                                          const struct tcphdr *tcph)
317 {
318         /* XXX Should I use payload length field in IP/IPv6 header ?
319          * - YK */
320         return (seq + len - dataoff - tcph->doff*4
321                 + (tcph->syn ? 1 : 0) + (tcph->fin ? 1 : 0));
322 }
323
324 /* Fixme: what about big packets? */
325 #define MAXACKWINCONST                  66000
326 #define MAXACKWINDOW(sender)                                            \
327         ((sender)->td_maxwin > MAXACKWINCONST ? (sender)->td_maxwin     \
328                                               : MAXACKWINCONST)
329
330 /*
331  * Simplified tcp_parse_options routine from tcp_input.c
332  */
333 static void tcp_options(const struct sk_buff *skb,
334                         unsigned int dataoff,
335                         const struct tcphdr *tcph,
336                         struct ip_ct_tcp_state *state)
337 {
338         unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
339         const unsigned char *ptr;
340         int length = (tcph->doff*4) - sizeof(struct tcphdr);
341
342         if (!length)
343                 return;
344
345         ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
346                                  length, buff);
347         if (!ptr)
348                 return;
349
350         state->td_scale = 0;
351         state->flags &= IP_CT_TCP_FLAG_BE_LIBERAL;
352
353         while (length > 0) {
354                 int opcode=*ptr++;
355                 int opsize;
356
357                 switch (opcode) {
358                 case TCPOPT_EOL:
359                         return;
360                 case TCPOPT_NOP:        /* Ref: RFC 793 section 3.1 */
361                         length--;
362                         continue;
363                 default:
364                         if (length < 2)
365                                 return;
366                         opsize=*ptr++;
367                         if (opsize < 2) /* "silly options" */
368                                 return;
369                         if (opsize > length)
370                                 return; /* don't parse partial options */
371
372                         if (opcode == TCPOPT_SACK_PERM
373                             && opsize == TCPOLEN_SACK_PERM)
374                                 state->flags |= IP_CT_TCP_FLAG_SACK_PERM;
375                         else if (opcode == TCPOPT_WINDOW
376                                  && opsize == TCPOLEN_WINDOW) {
377                                 state->td_scale = *(u_int8_t *)ptr;
378
379                                 if (state->td_scale > TCP_MAX_WSCALE)
380                                         state->td_scale = TCP_MAX_WSCALE;
381
382                                 state->flags |=
383                                         IP_CT_TCP_FLAG_WINDOW_SCALE;
384                         }
385                         ptr += opsize - 2;
386                         length -= opsize;
387                 }
388         }
389 }
390
391 static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
392                      const struct tcphdr *tcph, __u32 *sack)
393 {
394         unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
395         const unsigned char *ptr;
396         int length = (tcph->doff*4) - sizeof(struct tcphdr);
397         __u32 tmp;
398
399         if (!length)
400                 return;
401
402         ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
403                                  length, buff);
404         if (!ptr)
405                 return;
406
407         /* Fast path for timestamp-only option */
408         if (length == TCPOLEN_TSTAMP_ALIGNED
409             && *(__be32 *)ptr == htonl((TCPOPT_NOP << 24)
410                                        | (TCPOPT_NOP << 16)
411                                        | (TCPOPT_TIMESTAMP << 8)
412                                        | TCPOLEN_TIMESTAMP))
413                 return;
414
415         while (length > 0) {
416                 int opcode = *ptr++;
417                 int opsize, i;
418
419                 switch (opcode) {
420                 case TCPOPT_EOL:
421                         return;
422                 case TCPOPT_NOP:        /* Ref: RFC 793 section 3.1 */
423                         length--;
424                         continue;
425                 default:
426                         if (length < 2)
427                                 return;
428                         opsize = *ptr++;
429                         if (opsize < 2) /* "silly options" */
430                                 return;
431                         if (opsize > length)
432                                 return; /* don't parse partial options */
433
434                         if (opcode == TCPOPT_SACK
435                             && opsize >= (TCPOLEN_SACK_BASE
436                                           + TCPOLEN_SACK_PERBLOCK)
437                             && !((opsize - TCPOLEN_SACK_BASE)
438                                  % TCPOLEN_SACK_PERBLOCK)) {
439                                 for (i = 0;
440                                      i < (opsize - TCPOLEN_SACK_BASE);
441                                      i += TCPOLEN_SACK_PERBLOCK) {
442                                         tmp = get_unaligned_be32((__be32 *)(ptr+i)+1);
443
444                                         if (after(tmp, *sack))
445                                                 *sack = tmp;
446                                 }
447                                 return;
448                         }
449                         ptr += opsize - 2;
450                         length -= opsize;
451                 }
452         }
453 }
454
455 static void tcp_init_sender(struct ip_ct_tcp_state *sender,
456                             struct ip_ct_tcp_state *receiver,
457                             const struct sk_buff *skb,
458                             unsigned int dataoff,
459                             const struct tcphdr *tcph,
460                             u32 end, u32 win,
461                             enum ip_conntrack_dir dir)
462 {
463         /* SYN-ACK in reply to a SYN
464          * or SYN from reply direction in simultaneous open.
465          */
466         sender->td_end =
467         sender->td_maxend = end;
468         sender->td_maxwin = (win == 0 ? 1 : win);
469
470         tcp_options(skb, dataoff, tcph, sender);
471         /* RFC 1323:
472          * Both sides must send the Window Scale option
473          * to enable window scaling in either direction.
474          */
475         if (dir == IP_CT_DIR_REPLY &&
476             !(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
477               receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE)) {
478                 sender->td_scale = 0;
479                 receiver->td_scale = 0;
480         }
481 }
482
483 __printf(6, 7)
484 static enum nf_ct_tcp_action nf_tcp_log_invalid(const struct sk_buff *skb,
485                                                 const struct nf_conn *ct,
486                                                 const struct nf_hook_state *state,
487                                                 const struct ip_ct_tcp_state *sender,
488                                                 enum nf_ct_tcp_action ret,
489                                                 const char *fmt, ...)
490 {
491         const struct nf_tcp_net *tn = nf_tcp_pernet(nf_ct_net(ct));
492         struct va_format vaf;
493         va_list args;
494         bool be_liberal;
495
496         be_liberal = sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL || tn->tcp_be_liberal;
497         if (be_liberal)
498                 return NFCT_TCP_ACCEPT;
499
500         va_start(args, fmt);
501         vaf.fmt = fmt;
502         vaf.va = &args;
503         nf_ct_l4proto_log_invalid(skb, ct, state, "%pV", &vaf);
504         va_end(args);
505
506         return ret;
507 }
508
509 static enum nf_ct_tcp_action
510 tcp_in_window(struct nf_conn *ct, enum ip_conntrack_dir dir,
511               unsigned int index, const struct sk_buff *skb,
512               unsigned int dataoff, const struct tcphdr *tcph,
513               const struct nf_hook_state *hook_state)
514 {
515         struct ip_ct_tcp *state = &ct->proto.tcp;
516         struct ip_ct_tcp_state *sender = &state->seen[dir];
517         struct ip_ct_tcp_state *receiver = &state->seen[!dir];
518         __u32 seq, ack, sack, end, win, swin;
519         bool in_recv_win, seq_ok;
520         s32 receiver_offset;
521         u16 win_raw;
522
523         /*
524          * Get the required data from the packet.
525          */
526         seq = ntohl(tcph->seq);
527         ack = sack = ntohl(tcph->ack_seq);
528         win_raw = ntohs(tcph->window);
529         win = win_raw;
530         end = segment_seq_plus_len(seq, skb->len, dataoff, tcph);
531
532         if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
533                 tcp_sack(skb, dataoff, tcph, &sack);
534
535         /* Take into account NAT sequence number mangling */
536         receiver_offset = nf_ct_seq_offset(ct, !dir, ack - 1);
537         ack -= receiver_offset;
538         sack -= receiver_offset;
539
540         if (sender->td_maxwin == 0) {
541                 /*
542                  * Initialize sender data.
543                  */
544                 if (tcph->syn) {
545                         tcp_init_sender(sender, receiver,
546                                         skb, dataoff, tcph,
547                                         end, win, dir);
548                         if (!tcph->ack)
549                                 /* Simultaneous open */
550                                 return NFCT_TCP_ACCEPT;
551                 } else {
552                         /*
553                          * We are in the middle of a connection,
554                          * its history is lost for us.
555                          * Let's try to use the data from the packet.
556                          */
557                         sender->td_end = end;
558                         swin = win << sender->td_scale;
559                         sender->td_maxwin = (swin == 0 ? 1 : swin);
560                         sender->td_maxend = end + sender->td_maxwin;
561                         if (receiver->td_maxwin == 0) {
562                                 /* We haven't seen traffic in the other
563                                  * direction yet but we have to tweak window
564                                  * tracking to pass III and IV until that
565                                  * happens.
566                                  */
567                                 receiver->td_end = receiver->td_maxend = sack;
568                         } else if (sack == receiver->td_end + 1) {
569                                 /* Likely a reply to a keepalive.
570                                  * Needed for III.
571                                  */
572                                 receiver->td_end++;
573                         }
574
575                 }
576         } else if (tcph->syn &&
577                    after(end, sender->td_end) &&
578                    (state->state == TCP_CONNTRACK_SYN_SENT ||
579                     state->state == TCP_CONNTRACK_SYN_RECV)) {
580                 /*
581                  * RFC 793: "if a TCP is reinitialized ... then it need
582                  * not wait at all; it must only be sure to use sequence
583                  * numbers larger than those recently used."
584                  *
585                  * Re-init state for this direction, just like for the first
586                  * syn(-ack) reply, it might differ in seq, ack or tcp options.
587                  */
588                 tcp_init_sender(sender, receiver,
589                                 skb, dataoff, tcph,
590                                 end, win, dir);
591
592                 if (dir == IP_CT_DIR_REPLY && !tcph->ack)
593                         return NFCT_TCP_ACCEPT;
594         }
595
596         if (!(tcph->ack)) {
597                 /*
598                  * If there is no ACK, just pretend it was set and OK.
599                  */
600                 ack = sack = receiver->td_end;
601         } else if (((tcp_flag_word(tcph) & (TCP_FLAG_ACK|TCP_FLAG_RST)) ==
602                     (TCP_FLAG_ACK|TCP_FLAG_RST))
603                    && (ack == 0)) {
604                 /*
605                  * Broken TCP stacks, that set ACK in RST packets as well
606                  * with zero ack value.
607                  */
608                 ack = sack = receiver->td_end;
609         }
610
611         if (tcph->rst && seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT)
612                 /*
613                  * RST sent answering SYN.
614                  */
615                 seq = end = sender->td_end;
616
617         seq_ok = before(seq, sender->td_maxend + 1);
618         if (!seq_ok) {
619                 u32 overshot = end - sender->td_maxend + 1;
620                 bool ack_ok;
621
622                 ack_ok = after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1);
623                 in_recv_win = receiver->td_maxwin &&
624                               after(end, sender->td_end - receiver->td_maxwin - 1);
625
626                 if (in_recv_win &&
627                     ack_ok &&
628                     overshot <= receiver->td_maxwin &&
629                     before(sack, receiver->td_end + 1)) {
630                         /* Work around TCPs that send more bytes than allowed by
631                          * the receive window.
632                          *
633                          * If the (marked as invalid) packet is allowed to pass by
634                          * the ruleset and the peer acks this data, then its possible
635                          * all future packets will trigger 'ACK is over upper bound' check.
636                          *
637                          * Thus if only the sequence check fails then do update td_end so
638                          * possible ACK for this data can update internal state.
639                          */
640                         sender->td_end = end;
641                         sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
642
643                         return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_IGNORE,
644                                                   "%u bytes more than expected", overshot);
645                 }
646
647                 return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_INVALID,
648                                           "SEQ is over upper bound %u (over the window of the receiver)",
649                                           sender->td_maxend + 1);
650         }
651
652         if (!before(sack, receiver->td_end + 1))
653                 return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_INVALID,
654                                           "ACK is over upper bound %u (ACKed data not seen yet)",
655                                           receiver->td_end + 1);
656
657         /* Is the ending sequence in the receive window (if available)? */
658         in_recv_win = !receiver->td_maxwin ||
659                       after(end, sender->td_end - receiver->td_maxwin - 1);
660         if (!in_recv_win)
661                 return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_IGNORE,
662                                           "SEQ is under lower bound %u (already ACKed data retransmitted)",
663                                           sender->td_end - receiver->td_maxwin - 1);
664         if (!after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1))
665                 return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_IGNORE,
666                                           "ignored ACK under lower bound %u (possible overly delayed)",
667                                           receiver->td_end - MAXACKWINDOW(sender) - 1);
668
669         /* Take into account window scaling (RFC 1323). */
670         if (!tcph->syn)
671                 win <<= sender->td_scale;
672
673         /* Update sender data. */
674         swin = win + (sack - ack);
675         if (sender->td_maxwin < swin)
676                 sender->td_maxwin = swin;
677         if (after(end, sender->td_end)) {
678                 sender->td_end = end;
679                 sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
680         }
681         if (tcph->ack) {
682                 if (!(sender->flags & IP_CT_TCP_FLAG_MAXACK_SET)) {
683                         sender->td_maxack = ack;
684                         sender->flags |= IP_CT_TCP_FLAG_MAXACK_SET;
685                 } else if (after(ack, sender->td_maxack)) {
686                         sender->td_maxack = ack;
687                 }
688         }
689
690         /* Update receiver data. */
691         if (receiver->td_maxwin != 0 && after(end, sender->td_maxend))
692                 receiver->td_maxwin += end - sender->td_maxend;
693         if (after(sack + win, receiver->td_maxend - 1)) {
694                 receiver->td_maxend = sack + win;
695                 if (win == 0)
696                         receiver->td_maxend++;
697         }
698         if (ack == receiver->td_end)
699                 receiver->flags &= ~IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
700
701         /* Check retransmissions. */
702         if (index == TCP_ACK_SET) {
703                 if (state->last_dir == dir &&
704                     state->last_seq == seq &&
705                     state->last_ack == ack &&
706                     state->last_end == end &&
707                     state->last_win == win_raw) {
708                         state->retrans++;
709                 } else {
710                         state->last_dir = dir;
711                         state->last_seq = seq;
712                         state->last_ack = ack;
713                         state->last_end = end;
714                         state->last_win = win_raw;
715                         state->retrans = 0;
716                 }
717         }
718
719         return NFCT_TCP_ACCEPT;
720 }
721
722 static void __cold nf_tcp_handle_invalid(struct nf_conn *ct,
723                                          enum ip_conntrack_dir dir,
724                                          int index,
725                                          const struct sk_buff *skb,
726                                          const struct nf_hook_state *hook_state)
727 {
728         const unsigned int *timeouts;
729         const struct nf_tcp_net *tn;
730         unsigned int timeout;
731         u32 expires;
732
733         if (!test_bit(IPS_ASSURED_BIT, &ct->status) ||
734             test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
735                 return;
736
737         /* We don't want to have connections hanging around in ESTABLISHED
738          * state for long time 'just because' conntrack deemed a FIN/RST
739          * out-of-window.
740          *
741          * Shrink the timeout just like when there is unacked data.
742          * This speeds up eviction of 'dead' connections where the
743          * connection and conntracks internal state are out of sync.
744          */
745         switch (index) {
746         case TCP_RST_SET:
747         case TCP_FIN_SET:
748                 break;
749         default:
750                 return;
751         }
752
753         if (ct->proto.tcp.last_dir != dir &&
754             (ct->proto.tcp.last_index == TCP_FIN_SET ||
755              ct->proto.tcp.last_index == TCP_RST_SET)) {
756                 expires = nf_ct_expires(ct);
757                 if (expires < 120 * HZ)
758                         return;
759
760                 tn = nf_tcp_pernet(nf_ct_net(ct));
761                 timeouts = nf_ct_timeout_lookup(ct);
762                 if (!timeouts)
763                         timeouts = tn->timeouts;
764
765                 timeout = READ_ONCE(timeouts[TCP_CONNTRACK_UNACK]);
766                 if (expires > timeout) {
767                         nf_ct_l4proto_log_invalid(skb, ct, hook_state,
768                                           "packet (index %d, dir %d) response for index %d lower timeout to %u",
769                                           index, dir, ct->proto.tcp.last_index, timeout);
770
771                         WRITE_ONCE(ct->timeout, timeout + nfct_time_stamp);
772                 }
773         } else {
774                 ct->proto.tcp.last_index = index;
775                 ct->proto.tcp.last_dir = dir;
776         }
777 }
778
779 /* table of valid flag combinations - PUSH, ECE and CWR are always valid */
780 static const u8 tcp_valid_flags[(TCPHDR_FIN|TCPHDR_SYN|TCPHDR_RST|TCPHDR_ACK|
781                                  TCPHDR_URG) + 1] =
782 {
783         [TCPHDR_SYN]                            = 1,
784         [TCPHDR_SYN|TCPHDR_URG]                 = 1,
785         [TCPHDR_SYN|TCPHDR_ACK]                 = 1,
786         [TCPHDR_RST]                            = 1,
787         [TCPHDR_RST|TCPHDR_ACK]                 = 1,
788         [TCPHDR_FIN|TCPHDR_ACK]                 = 1,
789         [TCPHDR_FIN|TCPHDR_ACK|TCPHDR_URG]      = 1,
790         [TCPHDR_ACK]                            = 1,
791         [TCPHDR_ACK|TCPHDR_URG]                 = 1,
792 };
793
794 static void tcp_error_log(const struct sk_buff *skb,
795                           const struct nf_hook_state *state,
796                           const char *msg)
797 {
798         nf_l4proto_log_invalid(skb, state, IPPROTO_TCP, "%s", msg);
799 }
800
801 /* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c.  */
802 static bool tcp_error(const struct tcphdr *th,
803                       struct sk_buff *skb,
804                       unsigned int dataoff,
805                       const struct nf_hook_state *state)
806 {
807         unsigned int tcplen = skb->len - dataoff;
808         u8 tcpflags;
809
810         /* Not whole TCP header or malformed packet */
811         if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) {
812                 tcp_error_log(skb, state, "truncated packet");
813                 return true;
814         }
815
816         /* Checksum invalid? Ignore.
817          * We skip checking packets on the outgoing path
818          * because the checksum is assumed to be correct.
819          */
820         /* FIXME: Source route IP option packets --RR */
821         if (state->net->ct.sysctl_checksum &&
822             state->hook == NF_INET_PRE_ROUTING &&
823             nf_checksum(skb, state->hook, dataoff, IPPROTO_TCP, state->pf)) {
824                 tcp_error_log(skb, state, "bad checksum");
825                 return true;
826         }
827
828         /* Check TCP flags. */
829         tcpflags = (tcp_flag_byte(th) & ~(TCPHDR_ECE|TCPHDR_CWR|TCPHDR_PSH));
830         if (!tcp_valid_flags[tcpflags]) {
831                 tcp_error_log(skb, state, "invalid tcp flag combination");
832                 return true;
833         }
834
835         return false;
836 }
837
838 static noinline bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
839                              unsigned int dataoff,
840                              const struct tcphdr *th,
841                              const struct nf_hook_state *state)
842 {
843         enum tcp_conntrack new_state;
844         struct net *net = nf_ct_net(ct);
845         const struct nf_tcp_net *tn = nf_tcp_pernet(net);
846
847         /* Don't need lock here: this conntrack not in circulation yet */
848         new_state = tcp_conntracks[0][get_conntrack_index(th)][TCP_CONNTRACK_NONE];
849
850         /* Invalid: delete conntrack */
851         if (new_state >= TCP_CONNTRACK_MAX) {
852                 tcp_error_log(skb, state, "invalid new");
853                 return false;
854         }
855
856         if (new_state == TCP_CONNTRACK_SYN_SENT) {
857                 memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
858                 /* SYN packet */
859                 ct->proto.tcp.seen[0].td_end =
860                         segment_seq_plus_len(ntohl(th->seq), skb->len,
861                                              dataoff, th);
862                 ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
863                 if (ct->proto.tcp.seen[0].td_maxwin == 0)
864                         ct->proto.tcp.seen[0].td_maxwin = 1;
865                 ct->proto.tcp.seen[0].td_maxend =
866                         ct->proto.tcp.seen[0].td_end;
867
868                 tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]);
869         } else if (tn->tcp_loose == 0) {
870                 /* Don't try to pick up connections. */
871                 return false;
872         } else {
873                 memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
874                 /*
875                  * We are in the middle of a connection,
876                  * its history is lost for us.
877                  * Let's try to use the data from the packet.
878                  */
879                 ct->proto.tcp.seen[0].td_end =
880                         segment_seq_plus_len(ntohl(th->seq), skb->len,
881                                              dataoff, th);
882                 ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
883                 if (ct->proto.tcp.seen[0].td_maxwin == 0)
884                         ct->proto.tcp.seen[0].td_maxwin = 1;
885                 ct->proto.tcp.seen[0].td_maxend =
886                         ct->proto.tcp.seen[0].td_end +
887                         ct->proto.tcp.seen[0].td_maxwin;
888
889                 /* We assume SACK and liberal window checking to handle
890                  * window scaling */
891                 ct->proto.tcp.seen[0].flags =
892                 ct->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM |
893                                               IP_CT_TCP_FLAG_BE_LIBERAL;
894         }
895
896         /* tcp_packet will set them */
897         ct->proto.tcp.last_index = TCP_NONE_SET;
898         return true;
899 }
900
901 static bool tcp_can_early_drop(const struct nf_conn *ct)
902 {
903         switch (ct->proto.tcp.state) {
904         case TCP_CONNTRACK_FIN_WAIT:
905         case TCP_CONNTRACK_LAST_ACK:
906         case TCP_CONNTRACK_TIME_WAIT:
907         case TCP_CONNTRACK_CLOSE:
908         case TCP_CONNTRACK_CLOSE_WAIT:
909                 return true;
910         default:
911                 break;
912         }
913
914         return false;
915 }
916
917 void nf_conntrack_tcp_set_closing(struct nf_conn *ct)
918 {
919         enum tcp_conntrack old_state;
920         const unsigned int *timeouts;
921         u32 timeout;
922
923         if (!nf_ct_is_confirmed(ct))
924                 return;
925
926         spin_lock_bh(&ct->lock);
927         old_state = ct->proto.tcp.state;
928         ct->proto.tcp.state = TCP_CONNTRACK_CLOSE;
929
930         if (old_state == TCP_CONNTRACK_CLOSE ||
931             test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) {
932                 spin_unlock_bh(&ct->lock);
933                 return;
934         }
935
936         timeouts = nf_ct_timeout_lookup(ct);
937         if (!timeouts) {
938                 const struct nf_tcp_net *tn;
939
940                 tn = nf_tcp_pernet(nf_ct_net(ct));
941                 timeouts = tn->timeouts;
942         }
943
944         timeout = timeouts[TCP_CONNTRACK_CLOSE];
945         WRITE_ONCE(ct->timeout, timeout + nfct_time_stamp);
946
947         spin_unlock_bh(&ct->lock);
948
949         nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
950 }
951
952 static void nf_ct_tcp_state_reset(struct ip_ct_tcp_state *state)
953 {
954         state->td_end           = 0;
955         state->td_maxend        = 0;
956         state->td_maxwin        = 0;
957         state->td_maxack        = 0;
958         state->td_scale         = 0;
959         state->flags            &= IP_CT_TCP_FLAG_BE_LIBERAL;
960 }
961
962 /* Returns verdict for packet, or -1 for invalid. */
963 int nf_conntrack_tcp_packet(struct nf_conn *ct,
964                             struct sk_buff *skb,
965                             unsigned int dataoff,
966                             enum ip_conntrack_info ctinfo,
967                             const struct nf_hook_state *state)
968 {
969         struct net *net = nf_ct_net(ct);
970         struct nf_tcp_net *tn = nf_tcp_pernet(net);
971         enum tcp_conntrack new_state, old_state;
972         unsigned int index, *timeouts;
973         enum nf_ct_tcp_action res;
974         enum ip_conntrack_dir dir;
975         const struct tcphdr *th;
976         struct tcphdr _tcph;
977         unsigned long timeout;
978
979         th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
980         if (th == NULL)
981                 return -NF_ACCEPT;
982
983         if (tcp_error(th, skb, dataoff, state))
984                 return -NF_ACCEPT;
985
986         if (!nf_ct_is_confirmed(ct) && !tcp_new(ct, skb, dataoff, th, state))
987                 return -NF_ACCEPT;
988
989         spin_lock_bh(&ct->lock);
990         old_state = ct->proto.tcp.state;
991         dir = CTINFO2DIR(ctinfo);
992         index = get_conntrack_index(th);
993         new_state = tcp_conntracks[dir][index][old_state];
994
995         switch (new_state) {
996         case TCP_CONNTRACK_SYN_SENT:
997                 if (old_state < TCP_CONNTRACK_TIME_WAIT)
998                         break;
999                 /* RFC 1122: "When a connection is closed actively,
1000                  * it MUST linger in TIME-WAIT state for a time 2xMSL
1001                  * (Maximum Segment Lifetime). However, it MAY accept
1002                  * a new SYN from the remote TCP to reopen the connection
1003                  * directly from TIME-WAIT state, if..."
1004                  * We ignore the conditions because we are in the
1005                  * TIME-WAIT state anyway.
1006                  *
1007                  * Handle aborted connections: we and the server
1008                  * think there is an existing connection but the client
1009                  * aborts it and starts a new one.
1010                  */
1011                 if (((ct->proto.tcp.seen[dir].flags
1012                       | ct->proto.tcp.seen[!dir].flags)
1013                      & IP_CT_TCP_FLAG_CLOSE_INIT)
1014                     || (ct->proto.tcp.last_dir == dir
1015                         && ct->proto.tcp.last_index == TCP_RST_SET)) {
1016                         /* Attempt to reopen a closed/aborted connection.
1017                          * Delete this connection and look up again. */
1018                         spin_unlock_bh(&ct->lock);
1019
1020                         /* Only repeat if we can actually remove the timer.
1021                          * Destruction may already be in progress in process
1022                          * context and we must give it a chance to terminate.
1023                          */
1024                         if (nf_ct_kill(ct))
1025                                 return -NF_REPEAT;
1026                         return NF_DROP;
1027                 }
1028                 fallthrough;
1029         case TCP_CONNTRACK_IGNORE:
1030                 /* Ignored packets:
1031                  *
1032                  * Our connection entry may be out of sync, so ignore
1033                  * packets which may signal the real connection between
1034                  * the client and the server.
1035                  *
1036                  * a) SYN in ORIGINAL
1037                  * b) SYN/ACK in REPLY
1038                  * c) ACK in reply direction after initial SYN in original.
1039                  *
1040                  * If the ignored packet is invalid, the receiver will send
1041                  * a RST we'll catch below.
1042                  */
1043                 if (index == TCP_SYNACK_SET
1044                     && ct->proto.tcp.last_index == TCP_SYN_SET
1045                     && ct->proto.tcp.last_dir != dir
1046                     && ntohl(th->ack_seq) == ct->proto.tcp.last_end) {
1047                         /* b) This SYN/ACK acknowledges a SYN that we earlier
1048                          * ignored as invalid. This means that the client and
1049                          * the server are both in sync, while the firewall is
1050                          * not. We get in sync from the previously annotated
1051                          * values.
1052                          */
1053                         old_state = TCP_CONNTRACK_SYN_SENT;
1054                         new_state = TCP_CONNTRACK_SYN_RECV;
1055                         ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_end =
1056                                 ct->proto.tcp.last_end;
1057                         ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_maxend =
1058                                 ct->proto.tcp.last_end;
1059                         ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_maxwin =
1060                                 ct->proto.tcp.last_win == 0 ?
1061                                         1 : ct->proto.tcp.last_win;
1062                         ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_scale =
1063                                 ct->proto.tcp.last_wscale;
1064                         ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
1065                         ct->proto.tcp.seen[ct->proto.tcp.last_dir].flags =
1066                                 ct->proto.tcp.last_flags;
1067                         nf_ct_tcp_state_reset(&ct->proto.tcp.seen[dir]);
1068                         break;
1069                 }
1070                 ct->proto.tcp.last_index = index;
1071                 ct->proto.tcp.last_dir = dir;
1072                 ct->proto.tcp.last_seq = ntohl(th->seq);
1073                 ct->proto.tcp.last_end =
1074                     segment_seq_plus_len(ntohl(th->seq), skb->len, dataoff, th);
1075                 ct->proto.tcp.last_win = ntohs(th->window);
1076
1077                 /* a) This is a SYN in ORIGINAL. The client and the server
1078                  * may be in sync but we are not. In that case, we annotate
1079                  * the TCP options and let the packet go through. If it is a
1080                  * valid SYN packet, the server will reply with a SYN/ACK, and
1081                  * then we'll get in sync. Otherwise, the server potentially
1082                  * responds with a challenge ACK if implementing RFC5961.
1083                  */
1084                 if (index == TCP_SYN_SET && dir == IP_CT_DIR_ORIGINAL) {
1085                         struct ip_ct_tcp_state seen = {};
1086
1087                         ct->proto.tcp.last_flags =
1088                         ct->proto.tcp.last_wscale = 0;
1089                         tcp_options(skb, dataoff, th, &seen);
1090                         if (seen.flags & IP_CT_TCP_FLAG_WINDOW_SCALE) {
1091                                 ct->proto.tcp.last_flags |=
1092                                         IP_CT_TCP_FLAG_WINDOW_SCALE;
1093                                 ct->proto.tcp.last_wscale = seen.td_scale;
1094                         }
1095                         if (seen.flags & IP_CT_TCP_FLAG_SACK_PERM) {
1096                                 ct->proto.tcp.last_flags |=
1097                                         IP_CT_TCP_FLAG_SACK_PERM;
1098                         }
1099                         /* Mark the potential for RFC5961 challenge ACK,
1100                          * this pose a special problem for LAST_ACK state
1101                          * as ACK is intrepretated as ACKing last FIN.
1102                          */
1103                         if (old_state == TCP_CONNTRACK_LAST_ACK)
1104                                 ct->proto.tcp.last_flags |=
1105                                         IP_CT_EXP_CHALLENGE_ACK;
1106                 }
1107
1108                 /* possible challenge ack reply to syn */
1109                 if (old_state == TCP_CONNTRACK_SYN_SENT &&
1110                     index == TCP_ACK_SET &&
1111                     dir == IP_CT_DIR_REPLY)
1112                         ct->proto.tcp.last_ack = ntohl(th->ack_seq);
1113
1114                 spin_unlock_bh(&ct->lock);
1115                 nf_ct_l4proto_log_invalid(skb, ct, state,
1116                                           "packet (index %d) in dir %d ignored, state %s",
1117                                           index, dir,
1118                                           tcp_conntrack_names[old_state]);
1119                 return NF_ACCEPT;
1120         case TCP_CONNTRACK_MAX:
1121                 /* Special case for SYN proxy: when the SYN to the server or
1122                  * the SYN/ACK from the server is lost, the client may transmit
1123                  * a keep-alive packet while in SYN_SENT state. This needs to
1124                  * be associated with the original conntrack entry in order to
1125                  * generate a new SYN with the correct sequence number.
1126                  */
1127                 if (nfct_synproxy(ct) && old_state == TCP_CONNTRACK_SYN_SENT &&
1128                     index == TCP_ACK_SET && dir == IP_CT_DIR_ORIGINAL &&
1129                     ct->proto.tcp.last_dir == IP_CT_DIR_ORIGINAL &&
1130                     ct->proto.tcp.seen[dir].td_end - 1 == ntohl(th->seq)) {
1131                         pr_debug("nf_ct_tcp: SYN proxy client keep alive\n");
1132                         spin_unlock_bh(&ct->lock);
1133                         return NF_ACCEPT;
1134                 }
1135
1136                 /* Invalid packet */
1137                 spin_unlock_bh(&ct->lock);
1138                 nf_ct_l4proto_log_invalid(skb, ct, state,
1139                                           "packet (index %d) in dir %d invalid, state %s",
1140                                           index, dir,
1141                                           tcp_conntrack_names[old_state]);
1142                 return -NF_ACCEPT;
1143         case TCP_CONNTRACK_TIME_WAIT:
1144                 /* RFC5961 compliance cause stack to send "challenge-ACK"
1145                  * e.g. in response to spurious SYNs.  Conntrack MUST
1146                  * not believe this ACK is acking last FIN.
1147                  */
1148                 if (old_state == TCP_CONNTRACK_LAST_ACK &&
1149                     index == TCP_ACK_SET &&
1150                     ct->proto.tcp.last_dir != dir &&
1151                     ct->proto.tcp.last_index == TCP_SYN_SET &&
1152                     (ct->proto.tcp.last_flags & IP_CT_EXP_CHALLENGE_ACK)) {
1153                         /* Detected RFC5961 challenge ACK */
1154                         ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
1155                         spin_unlock_bh(&ct->lock);
1156                         nf_ct_l4proto_log_invalid(skb, ct, state, "challenge-ack ignored");
1157                         return NF_ACCEPT; /* Don't change state */
1158                 }
1159                 break;
1160         case TCP_CONNTRACK_SYN_SENT2:
1161                 /* tcp_conntracks table is not smart enough to handle
1162                  * simultaneous open.
1163                  */
1164                 ct->proto.tcp.last_flags |= IP_CT_TCP_SIMULTANEOUS_OPEN;
1165                 break;
1166         case TCP_CONNTRACK_SYN_RECV:
1167                 if (dir == IP_CT_DIR_REPLY && index == TCP_ACK_SET &&
1168                     ct->proto.tcp.last_flags & IP_CT_TCP_SIMULTANEOUS_OPEN)
1169                         new_state = TCP_CONNTRACK_ESTABLISHED;
1170                 break;
1171         case TCP_CONNTRACK_CLOSE:
1172                 if (index != TCP_RST_SET)
1173                         break;
1174
1175                 /* If we are closing, tuple might have been re-used already.
1176                  * last_index, last_ack, and all other ct fields used for
1177                  * sequence/window validation are outdated in that case.
1178                  *
1179                  * As the conntrack can already be expired by GC under pressure,
1180                  * just skip validation checks.
1181                  */
1182                 if (tcp_can_early_drop(ct))
1183                         goto in_window;
1184
1185                 /* td_maxack might be outdated if we let a SYN through earlier */
1186                 if ((ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) &&
1187                     ct->proto.tcp.last_index != TCP_SYN_SET) {
1188                         u32 seq = ntohl(th->seq);
1189
1190                         /* If we are not in established state and SEQ=0 this is most
1191                          * likely an answer to a SYN we let go through above (last_index
1192                          * can be updated due to out-of-order ACKs).
1193                          */
1194                         if (seq == 0 && !nf_conntrack_tcp_established(ct))
1195                                 break;
1196
1197                         if (before(seq, ct->proto.tcp.seen[!dir].td_maxack) &&
1198                             !tn->tcp_ignore_invalid_rst) {
1199                                 /* Invalid RST  */
1200                                 spin_unlock_bh(&ct->lock);
1201                                 nf_ct_l4proto_log_invalid(skb, ct, state, "invalid rst");
1202                                 return -NF_ACCEPT;
1203                         }
1204
1205                         if (!nf_conntrack_tcp_established(ct) ||
1206                             seq == ct->proto.tcp.seen[!dir].td_maxack)
1207                                 break;
1208
1209                         /* Check if rst is part of train, such as
1210                          *   foo:80 > bar:4379: P, 235946583:235946602(19) ack 42
1211                          *   foo:80 > bar:4379: R, 235946602:235946602(0)  ack 42
1212                          */
1213                         if (ct->proto.tcp.last_index == TCP_ACK_SET &&
1214                             ct->proto.tcp.last_dir == dir &&
1215                             seq == ct->proto.tcp.last_end)
1216                                 break;
1217
1218                         /* ... RST sequence number doesn't match exactly, keep
1219                          * established state to allow a possible challenge ACK.
1220                          */
1221                         new_state = old_state;
1222                 }
1223                 if (((test_bit(IPS_SEEN_REPLY_BIT, &ct->status)
1224                          && ct->proto.tcp.last_index == TCP_SYN_SET)
1225                         || (!test_bit(IPS_ASSURED_BIT, &ct->status)
1226                             && ct->proto.tcp.last_index == TCP_ACK_SET))
1227                     && ntohl(th->ack_seq) == ct->proto.tcp.last_end) {
1228                         /* RST sent to invalid SYN or ACK we had let through
1229                          * at a) and c) above:
1230                          *
1231                          * a) SYN was in window then
1232                          * c) we hold a half-open connection.
1233                          *
1234                          * Delete our connection entry.
1235                          * We skip window checking, because packet might ACK
1236                          * segments we ignored. */
1237                         goto in_window;
1238                 }
1239
1240                 /* Reset in response to a challenge-ack we let through earlier */
1241                 if (old_state == TCP_CONNTRACK_SYN_SENT &&
1242                     ct->proto.tcp.last_index == TCP_ACK_SET &&
1243                     ct->proto.tcp.last_dir == IP_CT_DIR_REPLY &&
1244                     ntohl(th->seq) == ct->proto.tcp.last_ack)
1245                         goto in_window;
1246
1247                 break;
1248         default:
1249                 /* Keep compilers happy. */
1250                 break;
1251         }
1252
1253         res = tcp_in_window(ct, dir, index,
1254                             skb, dataoff, th, state);
1255         switch (res) {
1256         case NFCT_TCP_IGNORE:
1257                 spin_unlock_bh(&ct->lock);
1258                 return NF_ACCEPT;
1259         case NFCT_TCP_INVALID:
1260                 nf_tcp_handle_invalid(ct, dir, index, skb, state);
1261                 spin_unlock_bh(&ct->lock);
1262                 return -NF_ACCEPT;
1263         case NFCT_TCP_ACCEPT:
1264                 break;
1265         }
1266      in_window:
1267         /* From now on we have got in-window packets */
1268         ct->proto.tcp.last_index = index;
1269         ct->proto.tcp.last_dir = dir;
1270
1271         ct->proto.tcp.state = new_state;
1272         if (old_state != new_state
1273             && new_state == TCP_CONNTRACK_FIN_WAIT)
1274                 ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
1275
1276         timeouts = nf_ct_timeout_lookup(ct);
1277         if (!timeouts)
1278                 timeouts = tn->timeouts;
1279
1280         if (ct->proto.tcp.retrans >= tn->tcp_max_retrans &&
1281             timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
1282                 timeout = timeouts[TCP_CONNTRACK_RETRANS];
1283         else if (unlikely(index == TCP_RST_SET))
1284                 timeout = timeouts[TCP_CONNTRACK_CLOSE];
1285         else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) &
1286                  IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
1287                  timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
1288                 timeout = timeouts[TCP_CONNTRACK_UNACK];
1289         else if (ct->proto.tcp.last_win == 0 &&
1290                  timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
1291                 timeout = timeouts[TCP_CONNTRACK_RETRANS];
1292         else
1293                 timeout = timeouts[new_state];
1294         spin_unlock_bh(&ct->lock);
1295
1296         if (new_state != old_state)
1297                 nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
1298
1299         if (!test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
1300                 /* If only reply is a RST, we can consider ourselves not to
1301                    have an established connection: this is a fairly common
1302                    problem case, so we can delete the conntrack
1303                    immediately.  --RR */
1304                 if (th->rst) {
1305                         nf_ct_kill_acct(ct, ctinfo, skb);
1306                         return NF_ACCEPT;
1307                 }
1308
1309                 if (index == TCP_SYN_SET && old_state == TCP_CONNTRACK_SYN_SENT) {
1310                         /* do not renew timeout on SYN retransmit.
1311                          *
1312                          * Else port reuse by client or NAT middlebox can keep
1313                          * entry alive indefinitely (including nat info).
1314                          */
1315                         return NF_ACCEPT;
1316                 }
1317
1318                 /* ESTABLISHED without SEEN_REPLY, i.e. mid-connection
1319                  * pickup with loose=1. Avoid large ESTABLISHED timeout.
1320                  */
1321                 if (new_state == TCP_CONNTRACK_ESTABLISHED &&
1322                     timeout > timeouts[TCP_CONNTRACK_UNACK])
1323                         timeout = timeouts[TCP_CONNTRACK_UNACK];
1324         } else if (!test_bit(IPS_ASSURED_BIT, &ct->status)
1325                    && (old_state == TCP_CONNTRACK_SYN_RECV
1326                        || old_state == TCP_CONNTRACK_ESTABLISHED)
1327                    && new_state == TCP_CONNTRACK_ESTABLISHED) {
1328                 /* Set ASSURED if we see valid ack in ESTABLISHED
1329                    after SYN_RECV or a valid answer for a picked up
1330                    connection. */
1331                 set_bit(IPS_ASSURED_BIT, &ct->status);
1332                 nf_conntrack_event_cache(IPCT_ASSURED, ct);
1333         }
1334         nf_ct_refresh_acct(ct, ctinfo, skb, timeout);
1335
1336         return NF_ACCEPT;
1337 }
1338
1339 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1340
1341 #include <linux/netfilter/nfnetlink.h>
1342 #include <linux/netfilter/nfnetlink_conntrack.h>
1343
1344 static int tcp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
1345                          struct nf_conn *ct, bool destroy)
1346 {
1347         struct nlattr *nest_parms;
1348         struct nf_ct_tcp_flags tmp = {};
1349
1350         spin_lock_bh(&ct->lock);
1351         nest_parms = nla_nest_start(skb, CTA_PROTOINFO_TCP);
1352         if (!nest_parms)
1353                 goto nla_put_failure;
1354
1355         if (nla_put_u8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state))
1356                 goto nla_put_failure;
1357
1358         if (destroy)
1359                 goto skip_state;
1360
1361         if (nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL,
1362                        ct->proto.tcp.seen[0].td_scale) ||
1363             nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY,
1364                        ct->proto.tcp.seen[1].td_scale))
1365                 goto nla_put_failure;
1366
1367         tmp.flags = ct->proto.tcp.seen[0].flags;
1368         if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL,
1369                     sizeof(struct nf_ct_tcp_flags), &tmp))
1370                 goto nla_put_failure;
1371
1372         tmp.flags = ct->proto.tcp.seen[1].flags;
1373         if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY,
1374                     sizeof(struct nf_ct_tcp_flags), &tmp))
1375                 goto nla_put_failure;
1376 skip_state:
1377         spin_unlock_bh(&ct->lock);
1378         nla_nest_end(skb, nest_parms);
1379
1380         return 0;
1381
1382 nla_put_failure:
1383         spin_unlock_bh(&ct->lock);
1384         return -1;
1385 }
1386
1387 static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = {
1388         [CTA_PROTOINFO_TCP_STATE]           = { .type = NLA_U8 },
1389         [CTA_PROTOINFO_TCP_WSCALE_ORIGINAL] = { .type = NLA_U8 },
1390         [CTA_PROTOINFO_TCP_WSCALE_REPLY]    = { .type = NLA_U8 },
1391         [CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]  = { .len = sizeof(struct nf_ct_tcp_flags) },
1392         [CTA_PROTOINFO_TCP_FLAGS_REPLY]     = { .len = sizeof(struct nf_ct_tcp_flags) },
1393 };
1394
1395 #define TCP_NLATTR_SIZE ( \
1396         NLA_ALIGN(NLA_HDRLEN + 1) + \
1397         NLA_ALIGN(NLA_HDRLEN + 1) + \
1398         NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)) + \
1399         NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)))
1400
1401 static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
1402 {
1403         struct nlattr *pattr = cda[CTA_PROTOINFO_TCP];
1404         struct nlattr *tb[CTA_PROTOINFO_TCP_MAX+1];
1405         int err;
1406
1407         /* updates could not contain anything about the private
1408          * protocol info, in that case skip the parsing */
1409         if (!pattr)
1410                 return 0;
1411
1412         err = nla_parse_nested_deprecated(tb, CTA_PROTOINFO_TCP_MAX, pattr,
1413                                           tcp_nla_policy, NULL);
1414         if (err < 0)
1415                 return err;
1416
1417         if (tb[CTA_PROTOINFO_TCP_STATE] &&
1418             nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]) >= TCP_CONNTRACK_MAX)
1419                 return -EINVAL;
1420
1421         spin_lock_bh(&ct->lock);
1422         if (tb[CTA_PROTOINFO_TCP_STATE])
1423                 ct->proto.tcp.state = nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]);
1424
1425         if (tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]) {
1426                 struct nf_ct_tcp_flags *attr =
1427                         nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]);
1428                 ct->proto.tcp.seen[0].flags &= ~attr->mask;
1429                 ct->proto.tcp.seen[0].flags |= attr->flags & attr->mask;
1430         }
1431
1432         if (tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]) {
1433                 struct nf_ct_tcp_flags *attr =
1434                         nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]);
1435                 ct->proto.tcp.seen[1].flags &= ~attr->mask;
1436                 ct->proto.tcp.seen[1].flags |= attr->flags & attr->mask;
1437         }
1438
1439         if (tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL] &&
1440             tb[CTA_PROTOINFO_TCP_WSCALE_REPLY] &&
1441             ct->proto.tcp.seen[0].flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
1442             ct->proto.tcp.seen[1].flags & IP_CT_TCP_FLAG_WINDOW_SCALE) {
1443                 ct->proto.tcp.seen[0].td_scale =
1444                         nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL]);
1445                 ct->proto.tcp.seen[1].td_scale =
1446                         nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_REPLY]);
1447         }
1448         spin_unlock_bh(&ct->lock);
1449
1450         return 0;
1451 }
1452
1453 static unsigned int tcp_nlattr_tuple_size(void)
1454 {
1455         static unsigned int size __read_mostly;
1456
1457         if (!size)
1458                 size = nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1459
1460         return size;
1461 }
1462 #endif
1463
1464 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
1465
1466 #include <linux/netfilter/nfnetlink.h>
1467 #include <linux/netfilter/nfnetlink_cttimeout.h>
1468
1469 static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
1470                                      struct net *net, void *data)
1471 {
1472         struct nf_tcp_net *tn = nf_tcp_pernet(net);
1473         unsigned int *timeouts = data;
1474         int i;
1475
1476         if (!timeouts)
1477                 timeouts = tn->timeouts;
1478         /* set default TCP timeouts. */
1479         for (i=0; i<TCP_CONNTRACK_TIMEOUT_MAX; i++)
1480                 timeouts[i] = tn->timeouts[i];
1481
1482         if (tb[CTA_TIMEOUT_TCP_SYN_SENT]) {
1483                 timeouts[TCP_CONNTRACK_SYN_SENT] =
1484                         ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ;
1485         }
1486
1487         if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) {
1488                 timeouts[TCP_CONNTRACK_SYN_RECV] =
1489                         ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ;
1490         }
1491         if (tb[CTA_TIMEOUT_TCP_ESTABLISHED]) {
1492                 timeouts[TCP_CONNTRACK_ESTABLISHED] =
1493                         ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_ESTABLISHED]))*HZ;
1494         }
1495         if (tb[CTA_TIMEOUT_TCP_FIN_WAIT]) {
1496                 timeouts[TCP_CONNTRACK_FIN_WAIT] =
1497                         ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_FIN_WAIT]))*HZ;
1498         }
1499         if (tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]) {
1500                 timeouts[TCP_CONNTRACK_CLOSE_WAIT] =
1501                         ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]))*HZ;
1502         }
1503         if (tb[CTA_TIMEOUT_TCP_LAST_ACK]) {
1504                 timeouts[TCP_CONNTRACK_LAST_ACK] =
1505                         ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_LAST_ACK]))*HZ;
1506         }
1507         if (tb[CTA_TIMEOUT_TCP_TIME_WAIT]) {
1508                 timeouts[TCP_CONNTRACK_TIME_WAIT] =
1509                         ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_TIME_WAIT]))*HZ;
1510         }
1511         if (tb[CTA_TIMEOUT_TCP_CLOSE]) {
1512                 timeouts[TCP_CONNTRACK_CLOSE] =
1513                         ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE]))*HZ;
1514         }
1515         if (tb[CTA_TIMEOUT_TCP_SYN_SENT2]) {
1516                 timeouts[TCP_CONNTRACK_SYN_SENT2] =
1517                         ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT2]))*HZ;
1518         }
1519         if (tb[CTA_TIMEOUT_TCP_RETRANS]) {
1520                 timeouts[TCP_CONNTRACK_RETRANS] =
1521                         ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_RETRANS]))*HZ;
1522         }
1523         if (tb[CTA_TIMEOUT_TCP_UNACK]) {
1524                 timeouts[TCP_CONNTRACK_UNACK] =
1525                         ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ;
1526         }
1527
1528         timeouts[CTA_TIMEOUT_TCP_UNSPEC] = timeouts[CTA_TIMEOUT_TCP_SYN_SENT];
1529         return 0;
1530 }
1531
1532 static int
1533 tcp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
1534 {
1535         const unsigned int *timeouts = data;
1536
1537         if (nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT,
1538                         htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ)) ||
1539             nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_RECV,
1540                          htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ)) ||
1541             nla_put_be32(skb, CTA_TIMEOUT_TCP_ESTABLISHED,
1542                          htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ)) ||
1543             nla_put_be32(skb, CTA_TIMEOUT_TCP_FIN_WAIT,
1544                          htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ)) ||
1545             nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT,
1546                          htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ)) ||
1547             nla_put_be32(skb, CTA_TIMEOUT_TCP_LAST_ACK,
1548                          htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ)) ||
1549             nla_put_be32(skb, CTA_TIMEOUT_TCP_TIME_WAIT,
1550                          htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ)) ||
1551             nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE,
1552                          htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ)) ||
1553             nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT2,
1554                          htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ)) ||
1555             nla_put_be32(skb, CTA_TIMEOUT_TCP_RETRANS,
1556                          htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ)) ||
1557             nla_put_be32(skb, CTA_TIMEOUT_TCP_UNACK,
1558                          htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ)))
1559                 goto nla_put_failure;
1560         return 0;
1561
1562 nla_put_failure:
1563         return -ENOSPC;
1564 }
1565
1566 static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = {
1567         [CTA_TIMEOUT_TCP_SYN_SENT]      = { .type = NLA_U32 },
1568         [CTA_TIMEOUT_TCP_SYN_RECV]      = { .type = NLA_U32 },
1569         [CTA_TIMEOUT_TCP_ESTABLISHED]   = { .type = NLA_U32 },
1570         [CTA_TIMEOUT_TCP_FIN_WAIT]      = { .type = NLA_U32 },
1571         [CTA_TIMEOUT_TCP_CLOSE_WAIT]    = { .type = NLA_U32 },
1572         [CTA_TIMEOUT_TCP_LAST_ACK]      = { .type = NLA_U32 },
1573         [CTA_TIMEOUT_TCP_TIME_WAIT]     = { .type = NLA_U32 },
1574         [CTA_TIMEOUT_TCP_CLOSE]         = { .type = NLA_U32 },
1575         [CTA_TIMEOUT_TCP_SYN_SENT2]     = { .type = NLA_U32 },
1576         [CTA_TIMEOUT_TCP_RETRANS]       = { .type = NLA_U32 },
1577         [CTA_TIMEOUT_TCP_UNACK]         = { .type = NLA_U32 },
1578 };
1579 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
1580
1581 void nf_conntrack_tcp_init_net(struct net *net)
1582 {
1583         struct nf_tcp_net *tn = nf_tcp_pernet(net);
1584         int i;
1585
1586         for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++)
1587                 tn->timeouts[i] = tcp_timeouts[i];
1588
1589         /* timeouts[0] is unused, make it same as SYN_SENT so
1590          * ->timeouts[0] contains 'new' timeout, like udp or icmp.
1591          */
1592         tn->timeouts[0] = tcp_timeouts[TCP_CONNTRACK_SYN_SENT];
1593
1594         /* If it is set to zero, we disable picking up already established
1595          * connections.
1596          */
1597         tn->tcp_loose = 1;
1598
1599         /* "Be conservative in what you do,
1600          *  be liberal in what you accept from others."
1601          * If it's non-zero, we mark only out of window RST segments as INVALID.
1602          */
1603         tn->tcp_be_liberal = 0;
1604
1605         /* If it's non-zero, we turn off RST sequence number check */
1606         tn->tcp_ignore_invalid_rst = 0;
1607
1608         /* Max number of the retransmitted packets without receiving an (acceptable)
1609          * ACK from the destination. If this number is reached, a shorter timer
1610          * will be started.
1611          */
1612         tn->tcp_max_retrans = 3;
1613
1614 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
1615         tn->offload_timeout = 30 * HZ;
1616 #endif
1617 }
1618
1619 const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp =
1620 {
1621         .l4proto                = IPPROTO_TCP,
1622 #ifdef CONFIG_NF_CONNTRACK_PROCFS
1623         .print_conntrack        = tcp_print_conntrack,
1624 #endif
1625         .can_early_drop         = tcp_can_early_drop,
1626 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1627         .to_nlattr              = tcp_to_nlattr,
1628         .from_nlattr            = nlattr_to_tcp,
1629         .tuple_to_nlattr        = nf_ct_port_tuple_to_nlattr,
1630         .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
1631         .nlattr_tuple_size      = tcp_nlattr_tuple_size,
1632         .nlattr_size            = TCP_NLATTR_SIZE,
1633         .nla_policy             = nf_ct_port_nla_policy,
1634 #endif
1635 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
1636         .ctnl_timeout           = {
1637                 .nlattr_to_obj  = tcp_timeout_nlattr_to_obj,
1638                 .obj_to_nlattr  = tcp_timeout_obj_to_nlattr,
1639                 .nlattr_max     = CTA_TIMEOUT_TCP_MAX,
1640                 .obj_size       = sizeof(unsigned int) *
1641                                         TCP_CONNTRACK_TIMEOUT_MAX,
1642                 .nla_policy     = tcp_timeout_nla_policy,
1643         },
1644 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
1645 };