1 // SPDX-License-Identifier: GPL-2.0-only
3 * (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2011 Patrick McHardy <kaber@trash.net>
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/timer.h>
13 #include <linux/skbuff.h>
14 #include <linux/gfp.h>
16 #include <linux/siphash.h>
17 #include <linux/rtnetlink.h>
19 #include <net/netfilter/nf_conntrack.h>
20 #include <net/netfilter/nf_conntrack_core.h>
21 #include <net/netfilter/nf_conntrack_helper.h>
22 #include <net/netfilter/nf_conntrack_seqadj.h>
23 #include <net/netfilter/nf_conntrack_zones.h>
24 #include <net/netfilter/nf_nat.h>
25 #include <net/netfilter/nf_nat_helper.h>
26 #include <uapi/linux/netfilter/nf_nat.h>
28 #include "nf_internals.h"
30 static spinlock_t nf_nat_locks[CONNTRACK_LOCKS];
32 static DEFINE_MUTEX(nf_nat_proto_mutex);
33 static unsigned int nat_net_id __read_mostly;
35 static struct hlist_head *nf_nat_bysource __read_mostly;
36 static unsigned int nf_nat_htable_size __read_mostly;
37 static siphash_key_t nf_nat_hash_rnd __read_mostly;
39 struct nf_nat_lookup_hook_priv {
40 struct nf_hook_entries __rcu *entries;
42 struct rcu_head rcu_head;
45 struct nf_nat_hooks_net {
46 struct nf_hook_ops *nat_hook_ops;
51 struct nf_nat_hooks_net nat_proto_net[NFPROTO_NUMPROTO];
55 static void nf_nat_ipv4_decode_session(struct sk_buff *skb,
56 const struct nf_conn *ct,
57 enum ip_conntrack_dir dir,
58 unsigned long statusbit,
61 const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple;
62 struct flowi4 *fl4 = &fl->u.ip4;
64 if (ct->status & statusbit) {
65 fl4->daddr = t->dst.u3.ip;
66 if (t->dst.protonum == IPPROTO_TCP ||
67 t->dst.protonum == IPPROTO_UDP ||
68 t->dst.protonum == IPPROTO_UDPLITE ||
69 t->dst.protonum == IPPROTO_DCCP ||
70 t->dst.protonum == IPPROTO_SCTP)
71 fl4->fl4_dport = t->dst.u.all;
74 statusbit ^= IPS_NAT_MASK;
76 if (ct->status & statusbit) {
77 fl4->saddr = t->src.u3.ip;
78 if (t->dst.protonum == IPPROTO_TCP ||
79 t->dst.protonum == IPPROTO_UDP ||
80 t->dst.protonum == IPPROTO_UDPLITE ||
81 t->dst.protonum == IPPROTO_DCCP ||
82 t->dst.protonum == IPPROTO_SCTP)
83 fl4->fl4_sport = t->src.u.all;
87 static void nf_nat_ipv6_decode_session(struct sk_buff *skb,
88 const struct nf_conn *ct,
89 enum ip_conntrack_dir dir,
90 unsigned long statusbit,
93 #if IS_ENABLED(CONFIG_IPV6)
94 const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple;
95 struct flowi6 *fl6 = &fl->u.ip6;
97 if (ct->status & statusbit) {
98 fl6->daddr = t->dst.u3.in6;
99 if (t->dst.protonum == IPPROTO_TCP ||
100 t->dst.protonum == IPPROTO_UDP ||
101 t->dst.protonum == IPPROTO_UDPLITE ||
102 t->dst.protonum == IPPROTO_DCCP ||
103 t->dst.protonum == IPPROTO_SCTP)
104 fl6->fl6_dport = t->dst.u.all;
107 statusbit ^= IPS_NAT_MASK;
109 if (ct->status & statusbit) {
110 fl6->saddr = t->src.u3.in6;
111 if (t->dst.protonum == IPPROTO_TCP ||
112 t->dst.protonum == IPPROTO_UDP ||
113 t->dst.protonum == IPPROTO_UDPLITE ||
114 t->dst.protonum == IPPROTO_DCCP ||
115 t->dst.protonum == IPPROTO_SCTP)
116 fl6->fl6_sport = t->src.u.all;
121 static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl)
123 const struct nf_conn *ct;
124 enum ip_conntrack_info ctinfo;
125 enum ip_conntrack_dir dir;
126 unsigned long statusbit;
129 ct = nf_ct_get(skb, &ctinfo);
133 family = nf_ct_l3num(ct);
134 dir = CTINFO2DIR(ctinfo);
135 if (dir == IP_CT_DIR_ORIGINAL)
136 statusbit = IPS_DST_NAT;
138 statusbit = IPS_SRC_NAT;
142 nf_nat_ipv4_decode_session(skb, ct, dir, statusbit, fl);
145 nf_nat_ipv6_decode_session(skb, ct, dir, statusbit, fl);
149 #endif /* CONFIG_XFRM */
151 /* We keep an extra hash for each conntrack, for fast searching. */
153 hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)
157 struct nf_conntrack_man src;
160 } __aligned(SIPHASH_ALIGNMENT) combined;
162 get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd));
164 memset(&combined, 0, sizeof(combined));
166 /* Original src, to ensure we map it consistently if poss. */
167 combined.src = tuple->src;
168 combined.net_mix = net_hash_mix(n);
169 combined.protonum = tuple->dst.protonum;
171 hash = siphash(&combined, sizeof(combined), &nf_nat_hash_rnd);
173 return reciprocal_scale(hash, nf_nat_htable_size);
176 /* Is this tuple already taken? (not by us) */
178 nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
179 const struct nf_conn *ignored_conntrack)
181 /* Conntrack tracking doesn't keep track of outgoing tuples; only
182 * incoming ones. NAT means they don't have a fixed mapping,
183 * so we invert the tuple and look for the incoming reply.
185 * We could keep a separate hash if this proves too slow.
187 struct nf_conntrack_tuple reply;
189 nf_ct_invert_tuple(&reply, tuple);
190 return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
193 static bool nf_nat_inet_in_range(const struct nf_conntrack_tuple *t,
194 const struct nf_nat_range2 *range)
196 if (t->src.l3num == NFPROTO_IPV4)
197 return ntohl(t->src.u3.ip) >= ntohl(range->min_addr.ip) &&
198 ntohl(t->src.u3.ip) <= ntohl(range->max_addr.ip);
200 return ipv6_addr_cmp(&t->src.u3.in6, &range->min_addr.in6) >= 0 &&
201 ipv6_addr_cmp(&t->src.u3.in6, &range->max_addr.in6) <= 0;
204 /* Is the manipable part of the tuple between min and max incl? */
205 static bool l4proto_in_range(const struct nf_conntrack_tuple *tuple,
206 enum nf_nat_manip_type maniptype,
207 const union nf_conntrack_man_proto *min,
208 const union nf_conntrack_man_proto *max)
212 switch (tuple->dst.protonum) {
215 return ntohs(tuple->src.u.icmp.id) >= ntohs(min->icmp.id) &&
216 ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id);
217 case IPPROTO_GRE: /* all fall though */
220 case IPPROTO_UDPLITE:
223 if (maniptype == NF_NAT_MANIP_SRC)
224 port = tuple->src.u.all;
226 port = tuple->dst.u.all;
228 return ntohs(port) >= ntohs(min->all) &&
229 ntohs(port) <= ntohs(max->all);
235 /* If we source map this tuple so reply looks like reply_tuple, will
236 * that meet the constraints of range.
238 static int in_range(const struct nf_conntrack_tuple *tuple,
239 const struct nf_nat_range2 *range)
241 /* If we are supposed to map IPs, then we must be in the
242 * range specified, otherwise let this drag us onto a new src IP.
244 if (range->flags & NF_NAT_RANGE_MAP_IPS &&
245 !nf_nat_inet_in_range(tuple, range))
248 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED))
251 return l4proto_in_range(tuple, NF_NAT_MANIP_SRC,
252 &range->min_proto, &range->max_proto);
256 same_src(const struct nf_conn *ct,
257 const struct nf_conntrack_tuple *tuple)
259 const struct nf_conntrack_tuple *t;
261 t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
262 return (t->dst.protonum == tuple->dst.protonum &&
263 nf_inet_addr_cmp(&t->src.u3, &tuple->src.u3) &&
264 t->src.u.all == tuple->src.u.all);
267 /* Only called for SRC manip */
269 find_appropriate_src(struct net *net,
270 const struct nf_conntrack_zone *zone,
271 const struct nf_conntrack_tuple *tuple,
272 struct nf_conntrack_tuple *result,
273 const struct nf_nat_range2 *range)
275 unsigned int h = hash_by_src(net, tuple);
276 const struct nf_conn *ct;
278 hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) {
279 if (same_src(ct, tuple) &&
280 net_eq(net, nf_ct_net(ct)) &&
281 nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
282 /* Copy source part from reply tuple. */
283 nf_ct_invert_tuple(result,
284 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
285 result->dst = tuple->dst;
287 if (in_range(result, range))
294 /* For [FUTURE] fragmentation handling, we want the least-used
295 * src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus
296 * if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
297 * 1-65535, we don't do pro-rata allocation based on ports; we choose
298 * the ip with the lowest src-ip/dst-ip/proto usage.
301 find_best_ips_proto(const struct nf_conntrack_zone *zone,
302 struct nf_conntrack_tuple *tuple,
303 const struct nf_nat_range2 *range,
304 const struct nf_conn *ct,
305 enum nf_nat_manip_type maniptype)
307 union nf_inet_addr *var_ipp;
310 u32 minip, maxip, j, dist;
313 /* No IP mapping? Do nothing. */
314 if (!(range->flags & NF_NAT_RANGE_MAP_IPS))
317 if (maniptype == NF_NAT_MANIP_SRC)
318 var_ipp = &tuple->src.u3;
320 var_ipp = &tuple->dst.u3;
322 /* Fast path: only one choice. */
323 if (nf_inet_addr_cmp(&range->min_addr, &range->max_addr)) {
324 *var_ipp = range->min_addr;
328 if (nf_ct_l3num(ct) == NFPROTO_IPV4)
329 max = sizeof(var_ipp->ip) / sizeof(u32) - 1;
331 max = sizeof(var_ipp->ip6) / sizeof(u32) - 1;
333 /* Hashing source and destination IPs gives a fairly even
334 * spread in practice (if there are a small number of IPs
335 * involved, there usually aren't that many connections
336 * anyway). The consistency means that servers see the same
337 * client coming from the same IP (some Internet Banking sites
338 * like this), even across reboots.
340 j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32),
341 range->flags & NF_NAT_RANGE_PERSISTENT ?
342 0 : (__force u32)tuple->dst.u3.all[max] ^ zone->id);
345 for (i = 0; i <= max; i++) {
346 /* If first bytes of the address are at the maximum, use the
347 * distance. Otherwise use the full range.
350 minip = ntohl((__force __be32)range->min_addr.all[i]);
351 maxip = ntohl((__force __be32)range->max_addr.all[i]);
352 dist = maxip - minip + 1;
358 var_ipp->all[i] = (__force __u32)
359 htonl(minip + reciprocal_scale(j, dist));
360 if (var_ipp->all[i] != range->max_addr.all[i])
363 if (!(range->flags & NF_NAT_RANGE_PERSISTENT))
364 j ^= (__force u32)tuple->dst.u3.all[i];
368 /* Alter the per-proto part of the tuple (depending on maniptype), to
369 * give a unique tuple in the given range if possible.
371 * Per-protocol part of tuple is initialized to the incoming packet.
373 static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple,
374 const struct nf_nat_range2 *range,
375 enum nf_nat_manip_type maniptype,
376 const struct nf_conn *ct)
378 unsigned int range_size, min, max, i, attempts;
381 static const unsigned int max_attempts = 128;
383 switch (tuple->dst.protonum) {
386 /* id is same for either direction... */
387 keyptr = &tuple->src.u.icmp.id;
388 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
392 min = ntohs(range->min_proto.icmp.id);
393 range_size = ntohs(range->max_proto.icmp.id) -
394 ntohs(range->min_proto.icmp.id) + 1;
397 #if IS_ENABLED(CONFIG_NF_CT_PROTO_GRE)
399 /* If there is no master conntrack we are not PPTP,
400 do not change tuples */
404 if (maniptype == NF_NAT_MANIP_SRC)
405 keyptr = &tuple->src.u.gre.key;
407 keyptr = &tuple->dst.u.gre.key;
409 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
413 min = ntohs(range->min_proto.gre.key);
414 range_size = ntohs(range->max_proto.gre.key) - min + 1;
419 case IPPROTO_UDPLITE:
423 if (maniptype == NF_NAT_MANIP_SRC)
424 keyptr = &tuple->src.u.all;
426 keyptr = &tuple->dst.u.all;
433 /* If no range specified... */
434 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
435 /* If it's dst rewrite, can't change port */
436 if (maniptype == NF_NAT_MANIP_DST)
439 if (ntohs(*keyptr) < 1024) {
440 /* Loose convention: >> 512 is credential passing */
441 if (ntohs(*keyptr) < 512) {
443 range_size = 511 - min + 1;
446 range_size = 1023 - min + 1;
450 range_size = 65535 - 1024 + 1;
453 min = ntohs(range->min_proto.all);
454 max = ntohs(range->max_proto.all);
455 if (unlikely(max < min))
457 range_size = max - min + 1;
461 if (range->flags & NF_NAT_RANGE_PROTO_OFFSET)
462 off = (ntohs(*keyptr) - ntohs(range->base_proto.all));
466 attempts = range_size;
467 if (attempts > max_attempts)
468 attempts = max_attempts;
470 /* We are in softirq; doing a search of the entire range risks
471 * soft lockup when all tuples are already used.
473 * If we can't find any free port from first offset, pick a new
474 * one and try again, with ever smaller search window.
477 for (i = 0; i < attempts; i++, off++) {
478 *keyptr = htons(min + off % range_size);
479 if (!nf_nat_used_tuple(tuple, ct))
483 if (attempts >= range_size || attempts < 16)
490 /* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING,
491 * we change the source to map into the range. For NF_INET_PRE_ROUTING
492 * and NF_INET_LOCAL_OUT, we change the destination to map into the
493 * range. It might not be possible to get a unique tuple, but we try.
494 * At worst (or if we race), we will end up with a final duplicate in
495 * __nf_conntrack_confirm and drop the packet. */
497 get_unique_tuple(struct nf_conntrack_tuple *tuple,
498 const struct nf_conntrack_tuple *orig_tuple,
499 const struct nf_nat_range2 *range,
501 enum nf_nat_manip_type maniptype)
503 const struct nf_conntrack_zone *zone;
504 struct net *net = nf_ct_net(ct);
506 zone = nf_ct_zone(ct);
508 /* 1) If this srcip/proto/src-proto-part is currently mapped,
509 * and that same mapping gives a unique tuple within the given
512 * This is only required for source (ie. NAT/masq) mappings.
513 * So far, we don't do local source mappings, so multiple
514 * manips not an issue.
516 if (maniptype == NF_NAT_MANIP_SRC &&
517 !(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
518 /* try the original tuple first */
519 if (in_range(orig_tuple, range)) {
520 if (!nf_nat_used_tuple(orig_tuple, ct)) {
521 *tuple = *orig_tuple;
524 } else if (find_appropriate_src(net, zone,
525 orig_tuple, tuple, range)) {
526 pr_debug("get_unique_tuple: Found current src map\n");
527 if (!nf_nat_used_tuple(tuple, ct))
532 /* 2) Select the least-used IP/proto combination in the given range */
533 *tuple = *orig_tuple;
534 find_best_ips_proto(zone, tuple, range, ct, maniptype);
536 /* 3) The per-protocol part of the manip is made to map into
537 * the range to make a unique tuple.
540 /* Only bother mapping if it's not already in range and unique */
541 if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
542 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
543 if (!(range->flags & NF_NAT_RANGE_PROTO_OFFSET) &&
544 l4proto_in_range(tuple, maniptype,
546 &range->max_proto) &&
547 (range->min_proto.all == range->max_proto.all ||
548 !nf_nat_used_tuple(tuple, ct)))
550 } else if (!nf_nat_used_tuple(tuple, ct)) {
555 /* Last chance: get protocol to try to obtain unique tuple. */
556 nf_nat_l4proto_unique_tuple(tuple, range, maniptype, ct);
559 struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct)
561 struct nf_conn_nat *nat = nfct_nat(ct);
565 if (!nf_ct_is_confirmed(ct))
566 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
570 EXPORT_SYMBOL_GPL(nf_ct_nat_ext_add);
573 nf_nat_setup_info(struct nf_conn *ct,
574 const struct nf_nat_range2 *range,
575 enum nf_nat_manip_type maniptype)
577 struct net *net = nf_ct_net(ct);
578 struct nf_conntrack_tuple curr_tuple, new_tuple;
580 /* Can't setup nat info for confirmed ct. */
581 if (nf_ct_is_confirmed(ct))
584 WARN_ON(maniptype != NF_NAT_MANIP_SRC &&
585 maniptype != NF_NAT_MANIP_DST);
587 if (WARN_ON(nf_nat_initialized(ct, maniptype)))
590 /* What we've got will look like inverse of reply. Normally
591 * this is what is in the conntrack, except for prior
592 * manipulations (future optimization: if num_manips == 0,
593 * orig_tp = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
595 nf_ct_invert_tuple(&curr_tuple,
596 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
598 get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype);
600 if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) {
601 struct nf_conntrack_tuple reply;
603 /* Alter conntrack table so will recognize replies. */
604 nf_ct_invert_tuple(&reply, &new_tuple);
605 nf_conntrack_alter_reply(ct, &reply);
607 /* Non-atomic: we own this at the moment. */
608 if (maniptype == NF_NAT_MANIP_SRC)
609 ct->status |= IPS_SRC_NAT;
611 ct->status |= IPS_DST_NAT;
613 if (nfct_help(ct) && !nfct_seqadj(ct))
614 if (!nfct_seqadj_ext_add(ct))
618 if (maniptype == NF_NAT_MANIP_SRC) {
619 unsigned int srchash;
622 srchash = hash_by_src(net,
623 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
624 lock = &nf_nat_locks[srchash % CONNTRACK_LOCKS];
626 hlist_add_head_rcu(&ct->nat_bysource,
627 &nf_nat_bysource[srchash]);
628 spin_unlock_bh(lock);
632 if (maniptype == NF_NAT_MANIP_DST)
633 ct->status |= IPS_DST_NAT_DONE;
635 ct->status |= IPS_SRC_NAT_DONE;
639 EXPORT_SYMBOL(nf_nat_setup_info);
642 __nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip)
644 /* Force range to this IP; let proto decide mapping for
645 * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
646 * Use reply in case it's already been mangled (eg local packet).
648 union nf_inet_addr ip =
649 (manip == NF_NAT_MANIP_SRC ?
650 ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 :
651 ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3);
652 struct nf_nat_range2 range = {
653 .flags = NF_NAT_RANGE_MAP_IPS,
657 return nf_nat_setup_info(ct, &range, manip);
661 nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
663 return __nf_nat_alloc_null_binding(ct, HOOK2MANIP(hooknum));
665 EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding);
667 /* Do packet manipulations according to nf_nat_setup_info. */
668 unsigned int nf_nat_packet(struct nf_conn *ct,
669 enum ip_conntrack_info ctinfo,
670 unsigned int hooknum,
673 enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
674 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
675 unsigned int verdict = NF_ACCEPT;
676 unsigned long statusbit;
678 if (mtype == NF_NAT_MANIP_SRC)
679 statusbit = IPS_SRC_NAT;
681 statusbit = IPS_DST_NAT;
683 /* Invert if this is reply dir. */
684 if (dir == IP_CT_DIR_REPLY)
685 statusbit ^= IPS_NAT_MASK;
687 /* Non-atomic: these bits don't change. */
688 if (ct->status & statusbit)
689 verdict = nf_nat_manip_pkt(skb, ct, mtype, dir);
693 EXPORT_SYMBOL_GPL(nf_nat_packet);
696 nf_nat_inet_fn(void *priv, struct sk_buff *skb,
697 const struct nf_hook_state *state)
700 enum ip_conntrack_info ctinfo;
701 struct nf_conn_nat *nat;
702 /* maniptype == SRC for postrouting. */
703 enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook);
705 ct = nf_ct_get(skb, &ctinfo);
706 /* Can't track? It's not due to stress, or conntrack would
707 * have dropped it. Hence it's the user's responsibilty to
708 * packet filter it out, or implement conntrack/NAT for that
718 case IP_CT_RELATED_REPLY:
719 /* Only ICMPs can be IP_CT_IS_REPLY. Fallthrough */
721 /* Seen it before? This can happen for loopback, retrans,
724 if (!nf_nat_initialized(ct, maniptype)) {
725 struct nf_nat_lookup_hook_priv *lpriv = priv;
726 struct nf_hook_entries *e = rcu_dereference(lpriv->entries);
733 for (i = 0; i < e->num_hook_entries; i++) {
734 ret = e->hooks[i].hook(e->hooks[i].priv, skb,
736 if (ret != NF_ACCEPT)
738 if (nf_nat_initialized(ct, maniptype))
742 ret = nf_nat_alloc_null_binding(ct, state->hook);
743 if (ret != NF_ACCEPT)
746 pr_debug("Already setup manip %s for ct %p (status bits 0x%lx)\n",
747 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
749 if (nf_nat_oif_changed(state->hook, ctinfo, nat,
756 WARN_ON(ctinfo != IP_CT_ESTABLISHED &&
757 ctinfo != IP_CT_ESTABLISHED_REPLY);
758 if (nf_nat_oif_changed(state->hook, ctinfo, nat, state->out))
762 return nf_nat_packet(ct, ctinfo, state->hook, skb);
765 nf_ct_kill_acct(ct, ctinfo, skb);
768 EXPORT_SYMBOL_GPL(nf_nat_inet_fn);
770 struct nf_nat_proto_clean {
775 /* kill conntracks with affected NAT section */
776 static int nf_nat_proto_remove(struct nf_conn *i, void *data)
778 const struct nf_nat_proto_clean *clean = data;
780 if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) ||
781 (clean->l4proto && nf_ct_protonum(i) != clean->l4proto))
784 return i->status & IPS_NAT_MASK ? 1 : 0;
787 static void __nf_nat_cleanup_conntrack(struct nf_conn *ct)
791 h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
792 spin_lock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
793 hlist_del_rcu(&ct->nat_bysource);
794 spin_unlock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
797 static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
799 if (nf_nat_proto_remove(ct, data))
802 /* This module is being removed and conntrack has nat null binding.
803 * Remove it from bysource hash, as the table will be freed soon.
805 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
806 * will delete entry from already-freed table.
808 if (test_and_clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status))
809 __nf_nat_cleanup_conntrack(ct);
811 /* don't delete conntrack. Although that would make things a lot
812 * simpler, we'd end up flushing all conntracks on nat rmmod.
817 /* No one using conntrack by the time this called. */
818 static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
820 if (ct->status & IPS_SRC_NAT_DONE)
821 __nf_nat_cleanup_conntrack(ct);
824 static struct nf_ct_ext_type nat_extend __read_mostly = {
825 .len = sizeof(struct nf_conn_nat),
826 .align = __alignof__(struct nf_conn_nat),
827 .destroy = nf_nat_cleanup_conntrack,
831 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
833 #include <linux/netfilter/nfnetlink.h>
834 #include <linux/netfilter/nfnetlink_conntrack.h>
836 static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
837 [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 },
838 [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 },
841 static int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
842 struct nf_nat_range2 *range)
844 if (tb[CTA_PROTONAT_PORT_MIN]) {
845 range->min_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]);
846 range->max_proto.all = range->min_proto.all;
847 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
849 if (tb[CTA_PROTONAT_PORT_MAX]) {
850 range->max_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]);
851 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
856 static int nfnetlink_parse_nat_proto(struct nlattr *attr,
857 const struct nf_conn *ct,
858 struct nf_nat_range2 *range)
860 struct nlattr *tb[CTA_PROTONAT_MAX+1];
863 err = nla_parse_nested_deprecated(tb, CTA_PROTONAT_MAX, attr,
864 protonat_nla_policy, NULL);
868 return nf_nat_l4proto_nlattr_to_range(tb, range);
871 static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
872 [CTA_NAT_V4_MINIP] = { .type = NLA_U32 },
873 [CTA_NAT_V4_MAXIP] = { .type = NLA_U32 },
874 [CTA_NAT_V6_MINIP] = { .len = sizeof(struct in6_addr) },
875 [CTA_NAT_V6_MAXIP] = { .len = sizeof(struct in6_addr) },
876 [CTA_NAT_PROTO] = { .type = NLA_NESTED },
879 static int nf_nat_ipv4_nlattr_to_range(struct nlattr *tb[],
880 struct nf_nat_range2 *range)
882 if (tb[CTA_NAT_V4_MINIP]) {
883 range->min_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MINIP]);
884 range->flags |= NF_NAT_RANGE_MAP_IPS;
887 if (tb[CTA_NAT_V4_MAXIP])
888 range->max_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MAXIP]);
890 range->max_addr.ip = range->min_addr.ip;
895 static int nf_nat_ipv6_nlattr_to_range(struct nlattr *tb[],
896 struct nf_nat_range2 *range)
898 if (tb[CTA_NAT_V6_MINIP]) {
899 nla_memcpy(&range->min_addr.ip6, tb[CTA_NAT_V6_MINIP],
900 sizeof(struct in6_addr));
901 range->flags |= NF_NAT_RANGE_MAP_IPS;
904 if (tb[CTA_NAT_V6_MAXIP])
905 nla_memcpy(&range->max_addr.ip6, tb[CTA_NAT_V6_MAXIP],
906 sizeof(struct in6_addr));
908 range->max_addr = range->min_addr;
914 nfnetlink_parse_nat(const struct nlattr *nat,
915 const struct nf_conn *ct, struct nf_nat_range2 *range)
917 struct nlattr *tb[CTA_NAT_MAX+1];
920 memset(range, 0, sizeof(*range));
922 err = nla_parse_nested_deprecated(tb, CTA_NAT_MAX, nat,
923 nat_nla_policy, NULL);
927 switch (nf_ct_l3num(ct)) {
929 err = nf_nat_ipv4_nlattr_to_range(tb, range);
932 err = nf_nat_ipv6_nlattr_to_range(tb, range);
935 err = -EPROTONOSUPPORT;
942 if (!tb[CTA_NAT_PROTO])
945 return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
948 /* This function is called under rcu_read_lock() */
950 nfnetlink_parse_nat_setup(struct nf_conn *ct,
951 enum nf_nat_manip_type manip,
952 const struct nlattr *attr)
954 struct nf_nat_range2 range;
957 /* Should not happen, restricted to creating new conntracks
960 if (WARN_ON_ONCE(nf_nat_initialized(ct, manip)))
963 /* No NAT information has been passed, allocate the null-binding */
965 return __nf_nat_alloc_null_binding(ct, manip) == NF_DROP ? -ENOMEM : 0;
967 err = nfnetlink_parse_nat(attr, ct, &range);
971 return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0;
975 nfnetlink_parse_nat_setup(struct nf_conn *ct,
976 enum nf_nat_manip_type manip,
977 const struct nlattr *attr)
983 static struct nf_ct_helper_expectfn follow_master_nat = {
984 .name = "nat-follow-master",
985 .expectfn = nf_nat_follow_master,
988 int nf_nat_register_fn(struct net *net, u8 pf, const struct nf_hook_ops *ops,
989 const struct nf_hook_ops *orig_nat_ops, unsigned int ops_count)
991 struct nat_net *nat_net = net_generic(net, nat_net_id);
992 struct nf_nat_hooks_net *nat_proto_net;
993 struct nf_nat_lookup_hook_priv *priv;
994 unsigned int hooknum = ops->hooknum;
995 struct nf_hook_ops *nat_ops;
998 if (WARN_ON_ONCE(pf >= ARRAY_SIZE(nat_net->nat_proto_net)))
1001 nat_proto_net = &nat_net->nat_proto_net[pf];
1003 for (i = 0; i < ops_count; i++) {
1004 if (orig_nat_ops[i].hooknum == hooknum) {
1010 if (WARN_ON_ONCE(i == ops_count))
1013 mutex_lock(&nf_nat_proto_mutex);
1014 if (!nat_proto_net->nat_hook_ops) {
1015 WARN_ON(nat_proto_net->users != 0);
1017 nat_ops = kmemdup(orig_nat_ops, sizeof(*orig_nat_ops) * ops_count, GFP_KERNEL);
1019 mutex_unlock(&nf_nat_proto_mutex);
1023 for (i = 0; i < ops_count; i++) {
1024 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1026 nat_ops[i].priv = priv;
1029 mutex_unlock(&nf_nat_proto_mutex);
1031 kfree(nat_ops[--i].priv);
1036 ret = nf_register_net_hooks(net, nat_ops, ops_count);
1038 mutex_unlock(&nf_nat_proto_mutex);
1039 for (i = 0; i < ops_count; i++)
1040 kfree(nat_ops[i].priv);
1045 nat_proto_net->nat_hook_ops = nat_ops;
1048 nat_ops = nat_proto_net->nat_hook_ops;
1049 priv = nat_ops[hooknum].priv;
1050 if (WARN_ON_ONCE(!priv)) {
1051 mutex_unlock(&nf_nat_proto_mutex);
1055 ret = nf_hook_entries_insert_raw(&priv->entries, ops);
1057 nat_proto_net->users++;
1059 mutex_unlock(&nf_nat_proto_mutex);
1063 void nf_nat_unregister_fn(struct net *net, u8 pf, const struct nf_hook_ops *ops,
1064 unsigned int ops_count)
1066 struct nat_net *nat_net = net_generic(net, nat_net_id);
1067 struct nf_nat_hooks_net *nat_proto_net;
1068 struct nf_nat_lookup_hook_priv *priv;
1069 struct nf_hook_ops *nat_ops;
1070 int hooknum = ops->hooknum;
1073 if (pf >= ARRAY_SIZE(nat_net->nat_proto_net))
1076 nat_proto_net = &nat_net->nat_proto_net[pf];
1078 mutex_lock(&nf_nat_proto_mutex);
1079 if (WARN_ON(nat_proto_net->users == 0))
1082 nat_proto_net->users--;
1084 nat_ops = nat_proto_net->nat_hook_ops;
1085 for (i = 0; i < ops_count; i++) {
1086 if (nat_ops[i].hooknum == hooknum) {
1091 if (WARN_ON_ONCE(i == ops_count))
1093 priv = nat_ops[hooknum].priv;
1094 nf_hook_entries_delete_raw(&priv->entries, ops);
1096 if (nat_proto_net->users == 0) {
1097 nf_unregister_net_hooks(net, nat_ops, ops_count);
1099 for (i = 0; i < ops_count; i++) {
1100 priv = nat_ops[i].priv;
1101 kfree_rcu(priv, rcu_head);
1104 nat_proto_net->nat_hook_ops = NULL;
1108 mutex_unlock(&nf_nat_proto_mutex);
1111 static struct pernet_operations nat_net_ops = {
1113 .size = sizeof(struct nat_net),
1116 static struct nf_nat_hook nat_hook = {
1117 .parse_nat_setup = nfnetlink_parse_nat_setup,
1119 .decode_session = __nf_nat_decode_session,
1121 .manip_pkt = nf_nat_manip_pkt,
1124 static int __init nf_nat_init(void)
1128 /* Leave them the same for the moment. */
1129 nf_nat_htable_size = nf_conntrack_htable_size;
1130 if (nf_nat_htable_size < CONNTRACK_LOCKS)
1131 nf_nat_htable_size = CONNTRACK_LOCKS;
1133 nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0);
1134 if (!nf_nat_bysource)
1137 ret = nf_ct_extend_register(&nat_extend);
1139 kvfree(nf_nat_bysource);
1140 pr_err("Unable to register extension\n");
1144 for (i = 0; i < CONNTRACK_LOCKS; i++)
1145 spin_lock_init(&nf_nat_locks[i]);
1147 ret = register_pernet_subsys(&nat_net_ops);
1149 nf_ct_extend_unregister(&nat_extend);
1150 kvfree(nf_nat_bysource);
1154 nf_ct_helper_expectfn_register(&follow_master_nat);
1156 WARN_ON(nf_nat_hook != NULL);
1157 RCU_INIT_POINTER(nf_nat_hook, &nat_hook);
1162 static void __exit nf_nat_cleanup(void)
1164 struct nf_nat_proto_clean clean = {};
1166 nf_ct_iterate_destroy(nf_nat_proto_clean, &clean);
1168 nf_ct_extend_unregister(&nat_extend);
1169 nf_ct_helper_expectfn_unregister(&follow_master_nat);
1170 RCU_INIT_POINTER(nf_nat_hook, NULL);
1173 kvfree(nf_nat_bysource);
1174 unregister_pernet_subsys(&nat_net_ops);
1177 MODULE_LICENSE("GPL");
1179 module_init(nf_nat_init);
1180 module_exit(nf_nat_cleanup);