1 /* (C) 1999-2001 Paul `Rusty' Russell
2 * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
3 * (C) 2002-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
4 * (C) 2006-2012 Patrick McHardy <kaber@trash.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/types.h>
12 #include <linux/timer.h>
13 #include <linux/module.h>
15 #include <linux/tcp.h>
16 #include <linux/spinlock.h>
17 #include <linux/skbuff.h>
18 #include <linux/ipv6.h>
19 #include <net/ip6_checksum.h>
20 #include <asm/unaligned.h>
24 #include <linux/netfilter.h>
25 #include <linux/netfilter_ipv4.h>
26 #include <linux/netfilter_ipv6.h>
27 #include <net/netfilter/nf_conntrack.h>
28 #include <net/netfilter/nf_conntrack_l4proto.h>
29 #include <net/netfilter/nf_conntrack_ecache.h>
30 #include <net/netfilter/nf_conntrack_seqadj.h>
31 #include <net/netfilter/nf_conntrack_synproxy.h>
32 #include <net/netfilter/nf_conntrack_timeout.h>
33 #include <net/netfilter/nf_log.h>
34 #include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
35 #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
37 /* "Be conservative in what you do,
38 be liberal in what you accept from others."
39 If it's non-zero, we mark only out of window RST segments as INVALID. */
40 static int nf_ct_tcp_be_liberal __read_mostly = 0;
42 /* If it is set to zero, we disable picking up already established
44 static int nf_ct_tcp_loose __read_mostly = 1;
46 /* Max number of the retransmitted packets without receiving an (acceptable)
47 ACK from the destination. If this number is reached, a shorter timer
49 static int nf_ct_tcp_max_retrans __read_mostly = 3;
51 /* FIXME: Examine ipfilter's timeouts and conntrack transitions more
52 closely. They're more complex. --RR */
54 static const char *const tcp_conntrack_names[] = {
68 #define MINS * 60 SECS
69 #define HOURS * 60 MINS
70 #define DAYS * 24 HOURS
72 static const unsigned int tcp_timeouts[TCP_CONNTRACK_TIMEOUT_MAX] = {
73 [TCP_CONNTRACK_SYN_SENT] = 2 MINS,
74 [TCP_CONNTRACK_SYN_RECV] = 60 SECS,
75 [TCP_CONNTRACK_ESTABLISHED] = 5 DAYS,
76 [TCP_CONNTRACK_FIN_WAIT] = 2 MINS,
77 [TCP_CONNTRACK_CLOSE_WAIT] = 60 SECS,
78 [TCP_CONNTRACK_LAST_ACK] = 30 SECS,
79 [TCP_CONNTRACK_TIME_WAIT] = 2 MINS,
80 [TCP_CONNTRACK_CLOSE] = 10 SECS,
81 [TCP_CONNTRACK_SYN_SENT2] = 2 MINS,
82 /* RFC1122 says the R2 limit should be at least 100 seconds.
83 Linux uses 15 packets as limit, which corresponds
84 to ~13-30min depending on RTO. */
85 [TCP_CONNTRACK_RETRANS] = 5 MINS,
86 [TCP_CONNTRACK_UNACK] = 5 MINS,
89 #define sNO TCP_CONNTRACK_NONE
90 #define sSS TCP_CONNTRACK_SYN_SENT
91 #define sSR TCP_CONNTRACK_SYN_RECV
92 #define sES TCP_CONNTRACK_ESTABLISHED
93 #define sFW TCP_CONNTRACK_FIN_WAIT
94 #define sCW TCP_CONNTRACK_CLOSE_WAIT
95 #define sLA TCP_CONNTRACK_LAST_ACK
96 #define sTW TCP_CONNTRACK_TIME_WAIT
97 #define sCL TCP_CONNTRACK_CLOSE
98 #define sS2 TCP_CONNTRACK_SYN_SENT2
99 #define sIV TCP_CONNTRACK_MAX
100 #define sIG TCP_CONNTRACK_IGNORE
102 /* What TCP flags are set from RST/SYN/FIN/ACK. */
113 * The TCP state transition table needs a few words...
115 * We are the man in the middle. All the packets go through us
116 * but might get lost in transit to the destination.
117 * It is assumed that the destinations can't receive segments
120 * The checked segment is in window, but our windows are *not*
121 * equivalent with the ones of the sender/receiver. We always
122 * try to guess the state of the current sender.
124 * The meaning of the states are:
126 * NONE: initial state
127 * SYN_SENT: SYN-only packet seen
128 * SYN_SENT2: SYN-only packet seen from reply dir, simultaneous open
129 * SYN_RECV: SYN-ACK packet seen
130 * ESTABLISHED: ACK packet seen
131 * FIN_WAIT: FIN packet seen
132 * CLOSE_WAIT: ACK seen (after FIN)
133 * LAST_ACK: FIN seen (after FIN)
134 * TIME_WAIT: last ACK seen
135 * CLOSE: closed connection (RST)
137 * Packets marked as IGNORED (sIG):
138 * if they may be either invalid or valid
139 * and the receiver may send back a connection
140 * closing RST or a SYN/ACK.
142 * Packets marked as INVALID (sIV):
143 * if we regard them as truly invalid packets
145 static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
148 /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
149 /*syn*/ { sSS, sSS, sIG, sIG, sIG, sIG, sIG, sSS, sSS, sS2 },
151 * sNO -> sSS Initialize a new connection
152 * sSS -> sSS Retransmitted SYN
153 * sS2 -> sS2 Late retransmitted SYN
155 * sES -> sIG Error: SYNs in window outside the SYN_SENT state
156 * are errors. Receiver will reply with RST
157 * and close the connection.
158 * Or we are not in sync and hold a dead connection.
162 * sTW -> sSS Reopened connection (RFC 1122).
165 /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
166 /*synack*/ { sIV, sIV, sSR, sIV, sIV, sIV, sIV, sIV, sIV, sSR },
168 * sNO -> sIV Too late and no reason to do anything
169 * sSS -> sIV Client can't send SYN and then SYN/ACK
170 * sS2 -> sSR SYN/ACK sent to SYN2 in simultaneous open
171 * sSR -> sSR Late retransmitted SYN/ACK in simultaneous open
172 * sES -> sIV Invalid SYN/ACK packets sent by the client
179 /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
180 /*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
182 * sNO -> sIV Too late and no reason to do anything...
183 * sSS -> sIV Client migth not send FIN in this state:
184 * we enforce waiting for a SYN/ACK reply first.
186 * sSR -> sFW Close started.
188 * sFW -> sLA FIN seen in both directions, waiting for
190 * Migth be a retransmitted FIN as well...
192 * sLA -> sLA Retransmitted FIN. Remain in the same state.
196 /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
197 /*ack*/ { sES, sIV, sES, sES, sCW, sCW, sTW, sTW, sCL, sIV },
199 * sNO -> sES Assumed.
200 * sSS -> sIV ACK is invalid: we haven't seen a SYN/ACK yet.
202 * sSR -> sES Established state is reached.
204 * sFW -> sCW Normal close request answered by ACK.
206 * sLA -> sTW Last ACK detected (RFC5961 challenged)
207 * sTW -> sTW Retransmitted last ACK. Remain in the same state.
210 /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
211 /*rst*/ { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
212 /*none*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
216 /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
217 /*syn*/ { sIV, sS2, sIV, sIV, sIV, sIV, sIV, sSS, sIV, sS2 },
219 * sNO -> sIV Never reached.
220 * sSS -> sS2 Simultaneous open
221 * sS2 -> sS2 Retransmitted simultaneous SYN
222 * sSR -> sIV Invalid SYN packets sent by the server
227 * sTW -> sSS Reopened connection, but server may have switched role
230 /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
231 /*synack*/ { sIV, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
233 * sSS -> sSR Standard open.
234 * sS2 -> sSR Simultaneous open
235 * sSR -> sIG Retransmitted SYN/ACK, ignore it.
236 * sES -> sIG Late retransmitted SYN/ACK?
237 * sFW -> sIG Might be SYN/ACK answering ignored SYN
243 /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
244 /*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
246 * sSS -> sIV Server might not send FIN in this state.
248 * sSR -> sFW Close started.
250 * sFW -> sLA FIN seen in both directions.
252 * sLA -> sLA Retransmitted FIN.
256 /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
257 /*ack*/ { sIV, sIG, sSR, sES, sCW, sCW, sTW, sTW, sCL, sIG },
259 * sSS -> sIG Might be a half-open connection.
261 * sSR -> sSR Might answer late resent SYN.
263 * sFW -> sCW Normal close request answered by ACK.
265 * sLA -> sTW Last ACK detected (RFC5961 challenged)
266 * sTW -> sTW Retransmitted last ACK.
269 /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
270 /*rst*/ { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
271 /*none*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
275 #ifdef CONFIG_NF_CONNTRACK_PROCFS
276 /* Print out the private part of the conntrack. */
277 static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
279 if (test_bit(IPS_OFFLOAD_BIT, &ct->status))
282 seq_printf(s, "%s ", tcp_conntrack_names[ct->proto.tcp.state]);
286 static unsigned int get_conntrack_index(const struct tcphdr *tcph)
288 if (tcph->rst) return TCP_RST_SET;
289 else if (tcph->syn) return (tcph->ack ? TCP_SYNACK_SET : TCP_SYN_SET);
290 else if (tcph->fin) return TCP_FIN_SET;
291 else if (tcph->ack) return TCP_ACK_SET;
292 else return TCP_NONE_SET;
295 /* TCP connection tracking based on 'Real Stateful TCP Packet Filtering
296 in IP Filter' by Guido van Rooij.
298 http://www.sane.nl/events/sane2000/papers.html
299 http://www.darkart.com/mirrors/www.obfuscation.org/ipf/
301 The boundaries and the conditions are changed according to RFC793:
302 the packet must intersect the window (i.e. segments may be
303 after the right or before the left edge) and thus receivers may ACK
304 segments after the right edge of the window.
306 td_maxend = max(sack + max(win,1)) seen in reply packets
307 td_maxwin = max(max(win, 1)) + (sack - ack) seen in sent packets
308 td_maxwin += seq + len - sender.td_maxend
309 if seq + len > sender.td_maxend
310 td_end = max(seq + len) seen in sent packets
312 I. Upper bound for valid data: seq <= sender.td_maxend
313 II. Lower bound for valid data: seq + len >= sender.td_end - receiver.td_maxwin
314 III. Upper bound for valid (s)ack: sack <= receiver.td_end
315 IV. Lower bound for valid (s)ack: sack >= receiver.td_end - MAXACKWINDOW
317 where sack is the highest right edge of sack block found in the packet
318 or ack in the case of packet without SACK option.
320 The upper bound limit for a valid (s)ack is not ignored -
321 we doesn't have to deal with fragments.
324 static inline __u32 segment_seq_plus_len(__u32 seq,
326 unsigned int dataoff,
327 const struct tcphdr *tcph)
329 /* XXX Should I use payload length field in IP/IPv6 header ?
331 return (seq + len - dataoff - tcph->doff*4
332 + (tcph->syn ? 1 : 0) + (tcph->fin ? 1 : 0));
335 /* Fixme: what about big packets? */
336 #define MAXACKWINCONST 66000
337 #define MAXACKWINDOW(sender) \
338 ((sender)->td_maxwin > MAXACKWINCONST ? (sender)->td_maxwin \
342 * Simplified tcp_parse_options routine from tcp_input.c
344 static void tcp_options(const struct sk_buff *skb,
345 unsigned int dataoff,
346 const struct tcphdr *tcph,
347 struct ip_ct_tcp_state *state)
349 unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
350 const unsigned char *ptr;
351 int length = (tcph->doff*4) - sizeof(struct tcphdr);
356 ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
370 case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */
377 if (opsize < 2) /* "silly options" */
380 return; /* don't parse partial options */
382 if (opcode == TCPOPT_SACK_PERM
383 && opsize == TCPOLEN_SACK_PERM)
384 state->flags |= IP_CT_TCP_FLAG_SACK_PERM;
385 else if (opcode == TCPOPT_WINDOW
386 && opsize == TCPOLEN_WINDOW) {
387 state->td_scale = *(u_int8_t *)ptr;
389 if (state->td_scale > TCP_MAX_WSCALE)
390 state->td_scale = TCP_MAX_WSCALE;
393 IP_CT_TCP_FLAG_WINDOW_SCALE;
401 static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
402 const struct tcphdr *tcph, __u32 *sack)
404 unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
405 const unsigned char *ptr;
406 int length = (tcph->doff*4) - sizeof(struct tcphdr);
412 ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
416 /* Fast path for timestamp-only option */
417 if (length == TCPOLEN_TSTAMP_ALIGNED
418 && *(__be32 *)ptr == htonl((TCPOPT_NOP << 24)
420 | (TCPOPT_TIMESTAMP << 8)
421 | TCPOLEN_TIMESTAMP))
431 case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */
438 if (opsize < 2) /* "silly options" */
441 return; /* don't parse partial options */
443 if (opcode == TCPOPT_SACK
444 && opsize >= (TCPOLEN_SACK_BASE
445 + TCPOLEN_SACK_PERBLOCK)
446 && !((opsize - TCPOLEN_SACK_BASE)
447 % TCPOLEN_SACK_PERBLOCK)) {
449 i < (opsize - TCPOLEN_SACK_BASE);
450 i += TCPOLEN_SACK_PERBLOCK) {
451 tmp = get_unaligned_be32((__be32 *)(ptr+i)+1);
453 if (after(tmp, *sack))
464 static bool tcp_in_window(const struct nf_conn *ct,
465 struct ip_ct_tcp *state,
466 enum ip_conntrack_dir dir,
468 const struct sk_buff *skb,
469 unsigned int dataoff,
470 const struct tcphdr *tcph)
472 struct net *net = nf_ct_net(ct);
473 struct nf_tcp_net *tn = nf_tcp_pernet(net);
474 struct ip_ct_tcp_state *sender = &state->seen[dir];
475 struct ip_ct_tcp_state *receiver = &state->seen[!dir];
476 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
477 __u32 seq, ack, sack, end, win, swin;
479 bool res, in_recv_win;
482 * Get the required data from the packet.
484 seq = ntohl(tcph->seq);
485 ack = sack = ntohl(tcph->ack_seq);
486 win = ntohs(tcph->window);
487 end = segment_seq_plus_len(seq, skb->len, dataoff, tcph);
489 if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
490 tcp_sack(skb, dataoff, tcph, &sack);
492 /* Take into account NAT sequence number mangling */
493 receiver_offset = nf_ct_seq_offset(ct, !dir, ack - 1);
494 ack -= receiver_offset;
495 sack -= receiver_offset;
497 pr_debug("tcp_in_window: START\n");
498 pr_debug("tcp_in_window: ");
499 nf_ct_dump_tuple(tuple);
500 pr_debug("seq=%u ack=%u+(%d) sack=%u+(%d) win=%u end=%u\n",
501 seq, ack, receiver_offset, sack, receiver_offset, win, end);
502 pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
503 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
504 sender->td_end, sender->td_maxend, sender->td_maxwin,
506 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
509 if (sender->td_maxwin == 0) {
511 * Initialize sender data.
515 * SYN-ACK in reply to a SYN
516 * or SYN from reply direction in simultaneous open.
519 sender->td_maxend = end;
520 sender->td_maxwin = (win == 0 ? 1 : win);
522 tcp_options(skb, dataoff, tcph, sender);
525 * Both sides must send the Window Scale option
526 * to enable window scaling in either direction.
528 if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE
529 && receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE))
531 receiver->td_scale = 0;
533 /* Simultaneous open */
537 * We are in the middle of a connection,
538 * its history is lost for us.
539 * Let's try to use the data from the packet.
541 sender->td_end = end;
542 swin = win << sender->td_scale;
543 sender->td_maxwin = (swin == 0 ? 1 : swin);
544 sender->td_maxend = end + sender->td_maxwin;
546 * We haven't seen traffic in the other direction yet
547 * but we have to tweak window tracking to pass III
548 * and IV until that happens.
550 if (receiver->td_maxwin == 0)
551 receiver->td_end = receiver->td_maxend = sack;
553 } else if (((state->state == TCP_CONNTRACK_SYN_SENT
554 && dir == IP_CT_DIR_ORIGINAL)
555 || (state->state == TCP_CONNTRACK_SYN_RECV
556 && dir == IP_CT_DIR_REPLY))
557 && after(end, sender->td_end)) {
559 * RFC 793: "if a TCP is reinitialized ... then it need
560 * not wait at all; it must only be sure to use sequence
561 * numbers larger than those recently used."
564 sender->td_maxend = end;
565 sender->td_maxwin = (win == 0 ? 1 : win);
567 tcp_options(skb, dataoff, tcph, sender);
572 * If there is no ACK, just pretend it was set and OK.
574 ack = sack = receiver->td_end;
575 } else if (((tcp_flag_word(tcph) & (TCP_FLAG_ACK|TCP_FLAG_RST)) ==
576 (TCP_FLAG_ACK|TCP_FLAG_RST))
579 * Broken TCP stacks, that set ACK in RST packets as well
580 * with zero ack value.
582 ack = sack = receiver->td_end;
585 if (tcph->rst && seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT)
587 * RST sent answering SYN.
589 seq = end = sender->td_end;
591 pr_debug("tcp_in_window: ");
592 nf_ct_dump_tuple(tuple);
593 pr_debug("seq=%u ack=%u+(%d) sack=%u+(%d) win=%u end=%u\n",
594 seq, ack, receiver_offset, sack, receiver_offset, win, end);
595 pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
596 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
597 sender->td_end, sender->td_maxend, sender->td_maxwin,
599 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
602 /* Is the ending sequence in the receive window (if available)? */
603 in_recv_win = !receiver->td_maxwin ||
604 after(end, sender->td_end - receiver->td_maxwin - 1);
606 pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
607 before(seq, sender->td_maxend + 1),
608 (in_recv_win ? 1 : 0),
609 before(sack, receiver->td_end + 1),
610 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1));
612 if (before(seq, sender->td_maxend + 1) &&
614 before(sack, receiver->td_end + 1) &&
615 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) {
617 * Take into account window scaling (RFC 1323).
620 win <<= sender->td_scale;
623 * Update sender data.
625 swin = win + (sack - ack);
626 if (sender->td_maxwin < swin)
627 sender->td_maxwin = swin;
628 if (after(end, sender->td_end)) {
629 sender->td_end = end;
630 sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
633 if (!(sender->flags & IP_CT_TCP_FLAG_MAXACK_SET)) {
634 sender->td_maxack = ack;
635 sender->flags |= IP_CT_TCP_FLAG_MAXACK_SET;
636 } else if (after(ack, sender->td_maxack))
637 sender->td_maxack = ack;
641 * Update receiver data.
643 if (receiver->td_maxwin != 0 && after(end, sender->td_maxend))
644 receiver->td_maxwin += end - sender->td_maxend;
645 if (after(sack + win, receiver->td_maxend - 1)) {
646 receiver->td_maxend = sack + win;
648 receiver->td_maxend++;
650 if (ack == receiver->td_end)
651 receiver->flags &= ~IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
654 * Check retransmissions.
656 if (index == TCP_ACK_SET) {
657 if (state->last_dir == dir
658 && state->last_seq == seq
659 && state->last_ack == ack
660 && state->last_end == end
661 && state->last_win == win)
664 state->last_dir = dir;
665 state->last_seq = seq;
666 state->last_ack = ack;
667 state->last_end = end;
668 state->last_win = win;
675 if (sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL ||
679 nf_ct_l4proto_log_invalid(skb, ct,
681 before(seq, sender->td_maxend + 1) ?
683 before(sack, receiver->td_end + 1) ?
684 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG"
685 : "ACK is under the lower bound (possible overly delayed ACK)"
686 : "ACK is over the upper bound (ACKed data not seen yet)"
687 : "SEQ is under the lower bound (already ACKed data retransmitted)"
688 : "SEQ is over the upper bound (over the window of the receiver)");
692 pr_debug("tcp_in_window: res=%u sender end=%u maxend=%u maxwin=%u "
693 "receiver end=%u maxend=%u maxwin=%u\n",
694 res, sender->td_end, sender->td_maxend, sender->td_maxwin,
695 receiver->td_end, receiver->td_maxend, receiver->td_maxwin);
700 /* table of valid flag combinations - PUSH, ECE and CWR are always valid */
701 static const u8 tcp_valid_flags[(TCPHDR_FIN|TCPHDR_SYN|TCPHDR_RST|TCPHDR_ACK|
705 [TCPHDR_SYN|TCPHDR_URG] = 1,
706 [TCPHDR_SYN|TCPHDR_ACK] = 1,
708 [TCPHDR_RST|TCPHDR_ACK] = 1,
709 [TCPHDR_FIN|TCPHDR_ACK] = 1,
710 [TCPHDR_FIN|TCPHDR_ACK|TCPHDR_URG] = 1,
712 [TCPHDR_ACK|TCPHDR_URG] = 1,
715 static void tcp_error_log(const struct sk_buff *skb,
716 const struct nf_hook_state *state,
719 nf_l4proto_log_invalid(skb, state->net, state->pf, IPPROTO_TCP, "%s", msg);
722 /* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c. */
723 static bool tcp_error(const struct tcphdr *th,
725 unsigned int dataoff,
726 const struct nf_hook_state *state)
728 unsigned int tcplen = skb->len - dataoff;
731 /* Not whole TCP header or malformed packet */
732 if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) {
733 tcp_error_log(skb, state, "truncated packet");
737 /* Checksum invalid? Ignore.
738 * We skip checking packets on the outgoing path
739 * because the checksum is assumed to be correct.
741 /* FIXME: Source route IP option packets --RR */
742 if (state->net->ct.sysctl_checksum &&
743 state->hook == NF_INET_PRE_ROUTING &&
744 nf_checksum(skb, state->hook, dataoff, IPPROTO_TCP, state->pf)) {
745 tcp_error_log(skb, state, "bad checksum");
749 /* Check TCP flags. */
750 tcpflags = (tcp_flag_byte(th) & ~(TCPHDR_ECE|TCPHDR_CWR|TCPHDR_PSH));
751 if (!tcp_valid_flags[tcpflags]) {
752 tcp_error_log(skb, state, "invalid tcp flag combination");
759 static noinline bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
760 unsigned int dataoff,
761 const struct tcphdr *th)
763 enum tcp_conntrack new_state;
764 struct net *net = nf_ct_net(ct);
765 const struct nf_tcp_net *tn = nf_tcp_pernet(net);
766 const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[0];
767 const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[1];
769 /* Don't need lock here: this conntrack not in circulation yet */
770 new_state = tcp_conntracks[0][get_conntrack_index(th)][TCP_CONNTRACK_NONE];
772 /* Invalid: delete conntrack */
773 if (new_state >= TCP_CONNTRACK_MAX) {
774 pr_debug("nf_ct_tcp: invalid new deleting.\n");
778 if (new_state == TCP_CONNTRACK_SYN_SENT) {
779 memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
781 ct->proto.tcp.seen[0].td_end =
782 segment_seq_plus_len(ntohl(th->seq), skb->len,
784 ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
785 if (ct->proto.tcp.seen[0].td_maxwin == 0)
786 ct->proto.tcp.seen[0].td_maxwin = 1;
787 ct->proto.tcp.seen[0].td_maxend =
788 ct->proto.tcp.seen[0].td_end;
790 tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]);
791 } else if (tn->tcp_loose == 0) {
792 /* Don't try to pick up connections. */
795 memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
797 * We are in the middle of a connection,
798 * its history is lost for us.
799 * Let's try to use the data from the packet.
801 ct->proto.tcp.seen[0].td_end =
802 segment_seq_plus_len(ntohl(th->seq), skb->len,
804 ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
805 if (ct->proto.tcp.seen[0].td_maxwin == 0)
806 ct->proto.tcp.seen[0].td_maxwin = 1;
807 ct->proto.tcp.seen[0].td_maxend =
808 ct->proto.tcp.seen[0].td_end +
809 ct->proto.tcp.seen[0].td_maxwin;
811 /* We assume SACK and liberal window checking to handle
813 ct->proto.tcp.seen[0].flags =
814 ct->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM |
815 IP_CT_TCP_FLAG_BE_LIBERAL;
818 /* tcp_packet will set them */
819 ct->proto.tcp.last_index = TCP_NONE_SET;
821 pr_debug("%s: sender end=%u maxend=%u maxwin=%u scale=%i "
822 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
824 sender->td_end, sender->td_maxend, sender->td_maxwin,
826 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
831 static bool nf_conntrack_tcp_established(const struct nf_conn *ct)
833 return ct->proto.tcp.state == TCP_CONNTRACK_ESTABLISHED &&
834 test_bit(IPS_ASSURED_BIT, &ct->status);
837 /* Returns verdict for packet, or -1 for invalid. */
838 int nf_conntrack_tcp_packet(struct nf_conn *ct,
840 unsigned int dataoff,
841 enum ip_conntrack_info ctinfo,
842 const struct nf_hook_state *state)
844 struct net *net = nf_ct_net(ct);
845 struct nf_tcp_net *tn = nf_tcp_pernet(net);
846 struct nf_conntrack_tuple *tuple;
847 enum tcp_conntrack new_state, old_state;
848 unsigned int index, *timeouts;
849 enum ip_conntrack_dir dir;
850 const struct tcphdr *th;
852 unsigned long timeout;
854 th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
858 if (tcp_error(th, skb, dataoff, state))
861 if (!nf_ct_is_confirmed(ct) && !tcp_new(ct, skb, dataoff, th))
864 spin_lock_bh(&ct->lock);
865 old_state = ct->proto.tcp.state;
866 dir = CTINFO2DIR(ctinfo);
867 index = get_conntrack_index(th);
868 new_state = tcp_conntracks[dir][index][old_state];
869 tuple = &ct->tuplehash[dir].tuple;
872 case TCP_CONNTRACK_SYN_SENT:
873 if (old_state < TCP_CONNTRACK_TIME_WAIT)
875 /* RFC 1122: "When a connection is closed actively,
876 * it MUST linger in TIME-WAIT state for a time 2xMSL
877 * (Maximum Segment Lifetime). However, it MAY accept
878 * a new SYN from the remote TCP to reopen the connection
879 * directly from TIME-WAIT state, if..."
880 * We ignore the conditions because we are in the
881 * TIME-WAIT state anyway.
883 * Handle aborted connections: we and the server
884 * think there is an existing connection but the client
885 * aborts it and starts a new one.
887 if (((ct->proto.tcp.seen[dir].flags
888 | ct->proto.tcp.seen[!dir].flags)
889 & IP_CT_TCP_FLAG_CLOSE_INIT)
890 || (ct->proto.tcp.last_dir == dir
891 && ct->proto.tcp.last_index == TCP_RST_SET)) {
892 /* Attempt to reopen a closed/aborted connection.
893 * Delete this connection and look up again. */
894 spin_unlock_bh(&ct->lock);
896 /* Only repeat if we can actually remove the timer.
897 * Destruction may already be in progress in process
898 * context and we must give it a chance to terminate.
905 case TCP_CONNTRACK_IGNORE:
908 * Our connection entry may be out of sync, so ignore
909 * packets which may signal the real connection between
910 * the client and the server.
913 * b) SYN/ACK in REPLY
914 * c) ACK in reply direction after initial SYN in original.
916 * If the ignored packet is invalid, the receiver will send
917 * a RST we'll catch below.
919 if (index == TCP_SYNACK_SET
920 && ct->proto.tcp.last_index == TCP_SYN_SET
921 && ct->proto.tcp.last_dir != dir
922 && ntohl(th->ack_seq) == ct->proto.tcp.last_end) {
923 /* b) This SYN/ACK acknowledges a SYN that we earlier
924 * ignored as invalid. This means that the client and
925 * the server are both in sync, while the firewall is
926 * not. We get in sync from the previously annotated
929 old_state = TCP_CONNTRACK_SYN_SENT;
930 new_state = TCP_CONNTRACK_SYN_RECV;
931 ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_end =
932 ct->proto.tcp.last_end;
933 ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_maxend =
934 ct->proto.tcp.last_end;
935 ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_maxwin =
936 ct->proto.tcp.last_win == 0 ?
937 1 : ct->proto.tcp.last_win;
938 ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_scale =
939 ct->proto.tcp.last_wscale;
940 ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
941 ct->proto.tcp.seen[ct->proto.tcp.last_dir].flags =
942 ct->proto.tcp.last_flags;
943 memset(&ct->proto.tcp.seen[dir], 0,
944 sizeof(struct ip_ct_tcp_state));
947 ct->proto.tcp.last_index = index;
948 ct->proto.tcp.last_dir = dir;
949 ct->proto.tcp.last_seq = ntohl(th->seq);
950 ct->proto.tcp.last_end =
951 segment_seq_plus_len(ntohl(th->seq), skb->len, dataoff, th);
952 ct->proto.tcp.last_win = ntohs(th->window);
954 /* a) This is a SYN in ORIGINAL. The client and the server
955 * may be in sync but we are not. In that case, we annotate
956 * the TCP options and let the packet go through. If it is a
957 * valid SYN packet, the server will reply with a SYN/ACK, and
958 * then we'll get in sync. Otherwise, the server potentially
959 * responds with a challenge ACK if implementing RFC5961.
961 if (index == TCP_SYN_SET && dir == IP_CT_DIR_ORIGINAL) {
962 struct ip_ct_tcp_state seen = {};
964 ct->proto.tcp.last_flags =
965 ct->proto.tcp.last_wscale = 0;
966 tcp_options(skb, dataoff, th, &seen);
967 if (seen.flags & IP_CT_TCP_FLAG_WINDOW_SCALE) {
968 ct->proto.tcp.last_flags |=
969 IP_CT_TCP_FLAG_WINDOW_SCALE;
970 ct->proto.tcp.last_wscale = seen.td_scale;
972 if (seen.flags & IP_CT_TCP_FLAG_SACK_PERM) {
973 ct->proto.tcp.last_flags |=
974 IP_CT_TCP_FLAG_SACK_PERM;
976 /* Mark the potential for RFC5961 challenge ACK,
977 * this pose a special problem for LAST_ACK state
978 * as ACK is intrepretated as ACKing last FIN.
980 if (old_state == TCP_CONNTRACK_LAST_ACK)
981 ct->proto.tcp.last_flags |=
982 IP_CT_EXP_CHALLENGE_ACK;
984 spin_unlock_bh(&ct->lock);
985 nf_ct_l4proto_log_invalid(skb, ct, "invalid packet ignored in "
986 "state %s ", tcp_conntrack_names[old_state]);
988 case TCP_CONNTRACK_MAX:
989 /* Special case for SYN proxy: when the SYN to the server or
990 * the SYN/ACK from the server is lost, the client may transmit
991 * a keep-alive packet while in SYN_SENT state. This needs to
992 * be associated with the original conntrack entry in order to
993 * generate a new SYN with the correct sequence number.
995 if (nfct_synproxy(ct) && old_state == TCP_CONNTRACK_SYN_SENT &&
996 index == TCP_ACK_SET && dir == IP_CT_DIR_ORIGINAL &&
997 ct->proto.tcp.last_dir == IP_CT_DIR_ORIGINAL &&
998 ct->proto.tcp.seen[dir].td_end - 1 == ntohl(th->seq)) {
999 pr_debug("nf_ct_tcp: SYN proxy client keep alive\n");
1000 spin_unlock_bh(&ct->lock);
1004 /* Invalid packet */
1005 pr_debug("nf_ct_tcp: Invalid dir=%i index=%u ostate=%u\n",
1006 dir, get_conntrack_index(th), old_state);
1007 spin_unlock_bh(&ct->lock);
1008 nf_ct_l4proto_log_invalid(skb, ct, "invalid state");
1010 case TCP_CONNTRACK_TIME_WAIT:
1011 /* RFC5961 compliance cause stack to send "challenge-ACK"
1012 * e.g. in response to spurious SYNs. Conntrack MUST
1013 * not believe this ACK is acking last FIN.
1015 if (old_state == TCP_CONNTRACK_LAST_ACK &&
1016 index == TCP_ACK_SET &&
1017 ct->proto.tcp.last_dir != dir &&
1018 ct->proto.tcp.last_index == TCP_SYN_SET &&
1019 (ct->proto.tcp.last_flags & IP_CT_EXP_CHALLENGE_ACK)) {
1020 /* Detected RFC5961 challenge ACK */
1021 ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
1022 spin_unlock_bh(&ct->lock);
1023 nf_ct_l4proto_log_invalid(skb, ct, "challenge-ack ignored");
1024 return NF_ACCEPT; /* Don't change state */
1027 case TCP_CONNTRACK_SYN_SENT2:
1028 /* tcp_conntracks table is not smart enough to handle
1029 * simultaneous open.
1031 ct->proto.tcp.last_flags |= IP_CT_TCP_SIMULTANEOUS_OPEN;
1033 case TCP_CONNTRACK_SYN_RECV:
1034 if (dir == IP_CT_DIR_REPLY && index == TCP_ACK_SET &&
1035 ct->proto.tcp.last_flags & IP_CT_TCP_SIMULTANEOUS_OPEN)
1036 new_state = TCP_CONNTRACK_ESTABLISHED;
1038 case TCP_CONNTRACK_CLOSE:
1039 if (index != TCP_RST_SET)
1042 if (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) {
1043 u32 seq = ntohl(th->seq);
1045 if (before(seq, ct->proto.tcp.seen[!dir].td_maxack)) {
1047 spin_unlock_bh(&ct->lock);
1048 nf_ct_l4proto_log_invalid(skb, ct, "invalid rst");
1052 if (!nf_conntrack_tcp_established(ct) ||
1053 seq == ct->proto.tcp.seen[!dir].td_maxack)
1056 /* Check if rst is part of train, such as
1057 * foo:80 > bar:4379: P, 235946583:235946602(19) ack 42
1058 * foo:80 > bar:4379: R, 235946602:235946602(0) ack 42
1060 if (ct->proto.tcp.last_index == TCP_ACK_SET &&
1061 ct->proto.tcp.last_dir == dir &&
1062 seq == ct->proto.tcp.last_end)
1065 /* ... RST sequence number doesn't match exactly, keep
1066 * established state to allow a possible challenge ACK.
1068 new_state = old_state;
1070 if (((test_bit(IPS_SEEN_REPLY_BIT, &ct->status)
1071 && ct->proto.tcp.last_index == TCP_SYN_SET)
1072 || (!test_bit(IPS_ASSURED_BIT, &ct->status)
1073 && ct->proto.tcp.last_index == TCP_ACK_SET))
1074 && ntohl(th->ack_seq) == ct->proto.tcp.last_end) {
1075 /* RST sent to invalid SYN or ACK we had let through
1076 * at a) and c) above:
1078 * a) SYN was in window then
1079 * c) we hold a half-open connection.
1081 * Delete our connection entry.
1082 * We skip window checking, because packet might ACK
1083 * segments we ignored. */
1088 /* Keep compilers happy. */
1092 if (!tcp_in_window(ct, &ct->proto.tcp, dir, index,
1093 skb, dataoff, th)) {
1094 spin_unlock_bh(&ct->lock);
1098 /* From now on we have got in-window packets */
1099 ct->proto.tcp.last_index = index;
1100 ct->proto.tcp.last_dir = dir;
1102 pr_debug("tcp_conntracks: ");
1103 nf_ct_dump_tuple(tuple);
1104 pr_debug("syn=%i ack=%i fin=%i rst=%i old=%i new=%i\n",
1105 (th->syn ? 1 : 0), (th->ack ? 1 : 0),
1106 (th->fin ? 1 : 0), (th->rst ? 1 : 0),
1107 old_state, new_state);
1109 ct->proto.tcp.state = new_state;
1110 if (old_state != new_state
1111 && new_state == TCP_CONNTRACK_FIN_WAIT)
1112 ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
1114 timeouts = nf_ct_timeout_lookup(ct);
1116 timeouts = tn->timeouts;
1118 if (ct->proto.tcp.retrans >= tn->tcp_max_retrans &&
1119 timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
1120 timeout = timeouts[TCP_CONNTRACK_RETRANS];
1121 else if (unlikely(index == TCP_RST_SET))
1122 timeout = timeouts[TCP_CONNTRACK_CLOSE];
1123 else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) &
1124 IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
1125 timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
1126 timeout = timeouts[TCP_CONNTRACK_UNACK];
1127 else if (ct->proto.tcp.last_win == 0 &&
1128 timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
1129 timeout = timeouts[TCP_CONNTRACK_RETRANS];
1131 timeout = timeouts[new_state];
1132 spin_unlock_bh(&ct->lock);
1134 if (new_state != old_state)
1135 nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
1137 if (!test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
1138 /* If only reply is a RST, we can consider ourselves not to
1139 have an established connection: this is a fairly common
1140 problem case, so we can delete the conntrack
1141 immediately. --RR */
1143 nf_ct_kill_acct(ct, ctinfo, skb);
1146 /* ESTABLISHED without SEEN_REPLY, i.e. mid-connection
1147 * pickup with loose=1. Avoid large ESTABLISHED timeout.
1149 if (new_state == TCP_CONNTRACK_ESTABLISHED &&
1150 timeout > timeouts[TCP_CONNTRACK_UNACK])
1151 timeout = timeouts[TCP_CONNTRACK_UNACK];
1152 } else if (!test_bit(IPS_ASSURED_BIT, &ct->status)
1153 && (old_state == TCP_CONNTRACK_SYN_RECV
1154 || old_state == TCP_CONNTRACK_ESTABLISHED)
1155 && new_state == TCP_CONNTRACK_ESTABLISHED) {
1156 /* Set ASSURED if we see see valid ack in ESTABLISHED
1157 after SYN_RECV or a valid answer for a picked up
1159 set_bit(IPS_ASSURED_BIT, &ct->status);
1160 nf_conntrack_event_cache(IPCT_ASSURED, ct);
1162 nf_ct_refresh_acct(ct, ctinfo, skb, timeout);
1167 static bool tcp_can_early_drop(const struct nf_conn *ct)
1169 switch (ct->proto.tcp.state) {
1170 case TCP_CONNTRACK_FIN_WAIT:
1171 case TCP_CONNTRACK_LAST_ACK:
1172 case TCP_CONNTRACK_TIME_WAIT:
1173 case TCP_CONNTRACK_CLOSE:
1174 case TCP_CONNTRACK_CLOSE_WAIT:
1183 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1185 #include <linux/netfilter/nfnetlink.h>
1186 #include <linux/netfilter/nfnetlink_conntrack.h>
1188 static int tcp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
1191 struct nlattr *nest_parms;
1192 struct nf_ct_tcp_flags tmp = {};
1194 spin_lock_bh(&ct->lock);
1195 nest_parms = nla_nest_start(skb, CTA_PROTOINFO_TCP);
1197 goto nla_put_failure;
1199 if (nla_put_u8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state) ||
1200 nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL,
1201 ct->proto.tcp.seen[0].td_scale) ||
1202 nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY,
1203 ct->proto.tcp.seen[1].td_scale))
1204 goto nla_put_failure;
1206 tmp.flags = ct->proto.tcp.seen[0].flags;
1207 if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL,
1208 sizeof(struct nf_ct_tcp_flags), &tmp))
1209 goto nla_put_failure;
1211 tmp.flags = ct->proto.tcp.seen[1].flags;
1212 if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY,
1213 sizeof(struct nf_ct_tcp_flags), &tmp))
1214 goto nla_put_failure;
1215 spin_unlock_bh(&ct->lock);
1217 nla_nest_end(skb, nest_parms);
1222 spin_unlock_bh(&ct->lock);
1226 static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = {
1227 [CTA_PROTOINFO_TCP_STATE] = { .type = NLA_U8 },
1228 [CTA_PROTOINFO_TCP_WSCALE_ORIGINAL] = { .type = NLA_U8 },
1229 [CTA_PROTOINFO_TCP_WSCALE_REPLY] = { .type = NLA_U8 },
1230 [CTA_PROTOINFO_TCP_FLAGS_ORIGINAL] = { .len = sizeof(struct nf_ct_tcp_flags) },
1231 [CTA_PROTOINFO_TCP_FLAGS_REPLY] = { .len = sizeof(struct nf_ct_tcp_flags) },
1234 #define TCP_NLATTR_SIZE ( \
1235 NLA_ALIGN(NLA_HDRLEN + 1) + \
1236 NLA_ALIGN(NLA_HDRLEN + 1) + \
1237 NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)) + \
1238 NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)))
1240 static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
1242 struct nlattr *pattr = cda[CTA_PROTOINFO_TCP];
1243 struct nlattr *tb[CTA_PROTOINFO_TCP_MAX+1];
1246 /* updates could not contain anything about the private
1247 * protocol info, in that case skip the parsing */
1251 err = nla_parse_nested_deprecated(tb, CTA_PROTOINFO_TCP_MAX, pattr,
1252 tcp_nla_policy, NULL);
1256 if (tb[CTA_PROTOINFO_TCP_STATE] &&
1257 nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]) >= TCP_CONNTRACK_MAX)
1260 spin_lock_bh(&ct->lock);
1261 if (tb[CTA_PROTOINFO_TCP_STATE])
1262 ct->proto.tcp.state = nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]);
1264 if (tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]) {
1265 struct nf_ct_tcp_flags *attr =
1266 nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]);
1267 ct->proto.tcp.seen[0].flags &= ~attr->mask;
1268 ct->proto.tcp.seen[0].flags |= attr->flags & attr->mask;
1271 if (tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]) {
1272 struct nf_ct_tcp_flags *attr =
1273 nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]);
1274 ct->proto.tcp.seen[1].flags &= ~attr->mask;
1275 ct->proto.tcp.seen[1].flags |= attr->flags & attr->mask;
1278 if (tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL] &&
1279 tb[CTA_PROTOINFO_TCP_WSCALE_REPLY] &&
1280 ct->proto.tcp.seen[0].flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
1281 ct->proto.tcp.seen[1].flags & IP_CT_TCP_FLAG_WINDOW_SCALE) {
1282 ct->proto.tcp.seen[0].td_scale =
1283 nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL]);
1284 ct->proto.tcp.seen[1].td_scale =
1285 nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_REPLY]);
1287 spin_unlock_bh(&ct->lock);
1292 static unsigned int tcp_nlattr_tuple_size(void)
1294 static unsigned int size __read_mostly;
1297 size = nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1303 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
1305 #include <linux/netfilter/nfnetlink.h>
1306 #include <linux/netfilter/nfnetlink_cttimeout.h>
1308 static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
1309 struct net *net, void *data)
1311 struct nf_tcp_net *tn = nf_tcp_pernet(net);
1312 unsigned int *timeouts = data;
1316 timeouts = tn->timeouts;
1317 /* set default TCP timeouts. */
1318 for (i=0; i<TCP_CONNTRACK_TIMEOUT_MAX; i++)
1319 timeouts[i] = tn->timeouts[i];
1321 if (tb[CTA_TIMEOUT_TCP_SYN_SENT]) {
1322 timeouts[TCP_CONNTRACK_SYN_SENT] =
1323 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ;
1326 if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) {
1327 timeouts[TCP_CONNTRACK_SYN_RECV] =
1328 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ;
1330 if (tb[CTA_TIMEOUT_TCP_ESTABLISHED]) {
1331 timeouts[TCP_CONNTRACK_ESTABLISHED] =
1332 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_ESTABLISHED]))*HZ;
1334 if (tb[CTA_TIMEOUT_TCP_FIN_WAIT]) {
1335 timeouts[TCP_CONNTRACK_FIN_WAIT] =
1336 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_FIN_WAIT]))*HZ;
1338 if (tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]) {
1339 timeouts[TCP_CONNTRACK_CLOSE_WAIT] =
1340 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]))*HZ;
1342 if (tb[CTA_TIMEOUT_TCP_LAST_ACK]) {
1343 timeouts[TCP_CONNTRACK_LAST_ACK] =
1344 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_LAST_ACK]))*HZ;
1346 if (tb[CTA_TIMEOUT_TCP_TIME_WAIT]) {
1347 timeouts[TCP_CONNTRACK_TIME_WAIT] =
1348 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_TIME_WAIT]))*HZ;
1350 if (tb[CTA_TIMEOUT_TCP_CLOSE]) {
1351 timeouts[TCP_CONNTRACK_CLOSE] =
1352 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE]))*HZ;
1354 if (tb[CTA_TIMEOUT_TCP_SYN_SENT2]) {
1355 timeouts[TCP_CONNTRACK_SYN_SENT2] =
1356 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT2]))*HZ;
1358 if (tb[CTA_TIMEOUT_TCP_RETRANS]) {
1359 timeouts[TCP_CONNTRACK_RETRANS] =
1360 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_RETRANS]))*HZ;
1362 if (tb[CTA_TIMEOUT_TCP_UNACK]) {
1363 timeouts[TCP_CONNTRACK_UNACK] =
1364 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ;
1367 timeouts[CTA_TIMEOUT_TCP_UNSPEC] = timeouts[CTA_TIMEOUT_TCP_SYN_SENT];
1372 tcp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
1374 const unsigned int *timeouts = data;
1376 if (nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT,
1377 htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ)) ||
1378 nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_RECV,
1379 htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ)) ||
1380 nla_put_be32(skb, CTA_TIMEOUT_TCP_ESTABLISHED,
1381 htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ)) ||
1382 nla_put_be32(skb, CTA_TIMEOUT_TCP_FIN_WAIT,
1383 htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ)) ||
1384 nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT,
1385 htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ)) ||
1386 nla_put_be32(skb, CTA_TIMEOUT_TCP_LAST_ACK,
1387 htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ)) ||
1388 nla_put_be32(skb, CTA_TIMEOUT_TCP_TIME_WAIT,
1389 htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ)) ||
1390 nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE,
1391 htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ)) ||
1392 nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT2,
1393 htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ)) ||
1394 nla_put_be32(skb, CTA_TIMEOUT_TCP_RETRANS,
1395 htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ)) ||
1396 nla_put_be32(skb, CTA_TIMEOUT_TCP_UNACK,
1397 htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ)))
1398 goto nla_put_failure;
1405 static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = {
1406 [CTA_TIMEOUT_TCP_SYN_SENT] = { .type = NLA_U32 },
1407 [CTA_TIMEOUT_TCP_SYN_RECV] = { .type = NLA_U32 },
1408 [CTA_TIMEOUT_TCP_ESTABLISHED] = { .type = NLA_U32 },
1409 [CTA_TIMEOUT_TCP_FIN_WAIT] = { .type = NLA_U32 },
1410 [CTA_TIMEOUT_TCP_CLOSE_WAIT] = { .type = NLA_U32 },
1411 [CTA_TIMEOUT_TCP_LAST_ACK] = { .type = NLA_U32 },
1412 [CTA_TIMEOUT_TCP_TIME_WAIT] = { .type = NLA_U32 },
1413 [CTA_TIMEOUT_TCP_CLOSE] = { .type = NLA_U32 },
1414 [CTA_TIMEOUT_TCP_SYN_SENT2] = { .type = NLA_U32 },
1415 [CTA_TIMEOUT_TCP_RETRANS] = { .type = NLA_U32 },
1416 [CTA_TIMEOUT_TCP_UNACK] = { .type = NLA_U32 },
1418 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
1420 void nf_conntrack_tcp_init_net(struct net *net)
1422 struct nf_tcp_net *tn = nf_tcp_pernet(net);
1425 for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++)
1426 tn->timeouts[i] = tcp_timeouts[i];
1428 /* timeouts[0] is unused, make it same as SYN_SENT so
1429 * ->timeouts[0] contains 'new' timeout, like udp or icmp.
1431 tn->timeouts[0] = tcp_timeouts[TCP_CONNTRACK_SYN_SENT];
1432 tn->tcp_loose = nf_ct_tcp_loose;
1433 tn->tcp_be_liberal = nf_ct_tcp_be_liberal;
1434 tn->tcp_max_retrans = nf_ct_tcp_max_retrans;
1437 const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp =
1439 .l4proto = IPPROTO_TCP,
1440 #ifdef CONFIG_NF_CONNTRACK_PROCFS
1441 .print_conntrack = tcp_print_conntrack,
1443 .can_early_drop = tcp_can_early_drop,
1444 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1445 .to_nlattr = tcp_to_nlattr,
1446 .from_nlattr = nlattr_to_tcp,
1447 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
1448 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
1449 .nlattr_tuple_size = tcp_nlattr_tuple_size,
1450 .nlattr_size = TCP_NLATTR_SIZE,
1451 .nla_policy = nf_ct_port_nla_policy,
1453 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
1455 .nlattr_to_obj = tcp_timeout_nlattr_to_obj,
1456 .obj_to_nlattr = tcp_timeout_obj_to_nlattr,
1457 .nlattr_max = CTA_TIMEOUT_TCP_MAX,
1458 .obj_size = sizeof(unsigned int) *
1459 TCP_CONNTRACK_TIMEOUT_MAX,
1460 .nla_policy = tcp_timeout_nla_policy,
1462 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */