1 // SPDX-License-Identifier: GPL-2.0-only
3 * (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2011 Patrick McHardy <kaber@trash.net>
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/timer.h>
13 #include <linux/skbuff.h>
14 #include <linux/gfp.h>
16 #include <linux/jhash.h>
17 #include <linux/rtnetlink.h>
19 #include <net/netfilter/nf_conntrack.h>
20 #include <net/netfilter/nf_conntrack_core.h>
21 #include <net/netfilter/nf_conntrack_helper.h>
22 #include <net/netfilter/nf_conntrack_seqadj.h>
23 #include <net/netfilter/nf_conntrack_zones.h>
24 #include <net/netfilter/nf_nat.h>
25 #include <net/netfilter/nf_nat_helper.h>
26 #include <uapi/linux/netfilter/nf_nat.h>
28 #include "nf_internals.h"
30 static spinlock_t nf_nat_locks[CONNTRACK_LOCKS];
32 static DEFINE_MUTEX(nf_nat_proto_mutex);
33 static unsigned int nat_net_id __read_mostly;
35 static struct hlist_head *nf_nat_bysource __read_mostly;
36 static unsigned int nf_nat_htable_size __read_mostly;
37 static unsigned int nf_nat_hash_rnd __read_mostly;
39 struct nf_nat_lookup_hook_priv {
40 struct nf_hook_entries __rcu *entries;
42 struct rcu_head rcu_head;
45 struct nf_nat_hooks_net {
46 struct nf_hook_ops *nat_hook_ops;
51 struct nf_nat_hooks_net nat_proto_net[NFPROTO_NUMPROTO];
55 static void nf_nat_ipv4_decode_session(struct sk_buff *skb,
56 const struct nf_conn *ct,
57 enum ip_conntrack_dir dir,
58 unsigned long statusbit,
61 const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple;
62 struct flowi4 *fl4 = &fl->u.ip4;
64 if (ct->status & statusbit) {
65 fl4->daddr = t->dst.u3.ip;
66 if (t->dst.protonum == IPPROTO_TCP ||
67 t->dst.protonum == IPPROTO_UDP ||
68 t->dst.protonum == IPPROTO_UDPLITE ||
69 t->dst.protonum == IPPROTO_DCCP ||
70 t->dst.protonum == IPPROTO_SCTP)
71 fl4->fl4_dport = t->dst.u.all;
74 statusbit ^= IPS_NAT_MASK;
76 if (ct->status & statusbit) {
77 fl4->saddr = t->src.u3.ip;
78 if (t->dst.protonum == IPPROTO_TCP ||
79 t->dst.protonum == IPPROTO_UDP ||
80 t->dst.protonum == IPPROTO_UDPLITE ||
81 t->dst.protonum == IPPROTO_DCCP ||
82 t->dst.protonum == IPPROTO_SCTP)
83 fl4->fl4_sport = t->src.u.all;
87 static void nf_nat_ipv6_decode_session(struct sk_buff *skb,
88 const struct nf_conn *ct,
89 enum ip_conntrack_dir dir,
90 unsigned long statusbit,
93 #if IS_ENABLED(CONFIG_IPV6)
94 const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple;
95 struct flowi6 *fl6 = &fl->u.ip6;
97 if (ct->status & statusbit) {
98 fl6->daddr = t->dst.u3.in6;
99 if (t->dst.protonum == IPPROTO_TCP ||
100 t->dst.protonum == IPPROTO_UDP ||
101 t->dst.protonum == IPPROTO_UDPLITE ||
102 t->dst.protonum == IPPROTO_DCCP ||
103 t->dst.protonum == IPPROTO_SCTP)
104 fl6->fl6_dport = t->dst.u.all;
107 statusbit ^= IPS_NAT_MASK;
109 if (ct->status & statusbit) {
110 fl6->saddr = t->src.u3.in6;
111 if (t->dst.protonum == IPPROTO_TCP ||
112 t->dst.protonum == IPPROTO_UDP ||
113 t->dst.protonum == IPPROTO_UDPLITE ||
114 t->dst.protonum == IPPROTO_DCCP ||
115 t->dst.protonum == IPPROTO_SCTP)
116 fl6->fl6_sport = t->src.u.all;
121 static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl)
123 const struct nf_conn *ct;
124 enum ip_conntrack_info ctinfo;
125 enum ip_conntrack_dir dir;
126 unsigned long statusbit;
129 ct = nf_ct_get(skb, &ctinfo);
133 family = nf_ct_l3num(ct);
134 dir = CTINFO2DIR(ctinfo);
135 if (dir == IP_CT_DIR_ORIGINAL)
136 statusbit = IPS_DST_NAT;
138 statusbit = IPS_SRC_NAT;
142 nf_nat_ipv4_decode_session(skb, ct, dir, statusbit, fl);
145 nf_nat_ipv6_decode_session(skb, ct, dir, statusbit, fl);
149 #endif /* CONFIG_XFRM */
151 /* We keep an extra hash for each conntrack, for fast searching. */
153 hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)
157 get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd));
159 /* Original src, to ensure we map it consistently if poss. */
160 hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32),
161 tuple->dst.protonum ^ nf_nat_hash_rnd ^ net_hash_mix(n));
163 return reciprocal_scale(hash, nf_nat_htable_size);
166 /* Is this tuple already taken? (not by us) */
168 nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
169 const struct nf_conn *ignored_conntrack)
171 /* Conntrack tracking doesn't keep track of outgoing tuples; only
172 * incoming ones. NAT means they don't have a fixed mapping,
173 * so we invert the tuple and look for the incoming reply.
175 * We could keep a separate hash if this proves too slow.
177 struct nf_conntrack_tuple reply;
179 nf_ct_invert_tuple(&reply, tuple);
180 return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
183 static bool nf_nat_inet_in_range(const struct nf_conntrack_tuple *t,
184 const struct nf_nat_range2 *range)
186 if (t->src.l3num == NFPROTO_IPV4)
187 return ntohl(t->src.u3.ip) >= ntohl(range->min_addr.ip) &&
188 ntohl(t->src.u3.ip) <= ntohl(range->max_addr.ip);
190 return ipv6_addr_cmp(&t->src.u3.in6, &range->min_addr.in6) >= 0 &&
191 ipv6_addr_cmp(&t->src.u3.in6, &range->max_addr.in6) <= 0;
194 /* Is the manipable part of the tuple between min and max incl? */
195 static bool l4proto_in_range(const struct nf_conntrack_tuple *tuple,
196 enum nf_nat_manip_type maniptype,
197 const union nf_conntrack_man_proto *min,
198 const union nf_conntrack_man_proto *max)
202 switch (tuple->dst.protonum) {
205 return ntohs(tuple->src.u.icmp.id) >= ntohs(min->icmp.id) &&
206 ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id);
207 case IPPROTO_GRE: /* all fall though */
210 case IPPROTO_UDPLITE:
213 if (maniptype == NF_NAT_MANIP_SRC)
214 port = tuple->src.u.all;
216 port = tuple->dst.u.all;
218 return ntohs(port) >= ntohs(min->all) &&
219 ntohs(port) <= ntohs(max->all);
225 /* If we source map this tuple so reply looks like reply_tuple, will
226 * that meet the constraints of range.
228 static int in_range(const struct nf_conntrack_tuple *tuple,
229 const struct nf_nat_range2 *range)
231 /* If we are supposed to map IPs, then we must be in the
232 * range specified, otherwise let this drag us onto a new src IP.
234 if (range->flags & NF_NAT_RANGE_MAP_IPS &&
235 !nf_nat_inet_in_range(tuple, range))
238 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED))
241 return l4proto_in_range(tuple, NF_NAT_MANIP_SRC,
242 &range->min_proto, &range->max_proto);
246 same_src(const struct nf_conn *ct,
247 const struct nf_conntrack_tuple *tuple)
249 const struct nf_conntrack_tuple *t;
251 t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
252 return (t->dst.protonum == tuple->dst.protonum &&
253 nf_inet_addr_cmp(&t->src.u3, &tuple->src.u3) &&
254 t->src.u.all == tuple->src.u.all);
257 /* Only called for SRC manip */
259 find_appropriate_src(struct net *net,
260 const struct nf_conntrack_zone *zone,
261 const struct nf_conntrack_tuple *tuple,
262 struct nf_conntrack_tuple *result,
263 const struct nf_nat_range2 *range)
265 unsigned int h = hash_by_src(net, tuple);
266 const struct nf_conn *ct;
268 hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) {
269 if (same_src(ct, tuple) &&
270 net_eq(net, nf_ct_net(ct)) &&
271 nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
272 /* Copy source part from reply tuple. */
273 nf_ct_invert_tuple(result,
274 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
275 result->dst = tuple->dst;
277 if (in_range(result, range))
284 /* For [FUTURE] fragmentation handling, we want the least-used
285 * src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus
286 * if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
287 * 1-65535, we don't do pro-rata allocation based on ports; we choose
288 * the ip with the lowest src-ip/dst-ip/proto usage.
291 find_best_ips_proto(const struct nf_conntrack_zone *zone,
292 struct nf_conntrack_tuple *tuple,
293 const struct nf_nat_range2 *range,
294 const struct nf_conn *ct,
295 enum nf_nat_manip_type maniptype)
297 union nf_inet_addr *var_ipp;
300 u32 minip, maxip, j, dist;
303 /* No IP mapping? Do nothing. */
304 if (!(range->flags & NF_NAT_RANGE_MAP_IPS))
307 if (maniptype == NF_NAT_MANIP_SRC)
308 var_ipp = &tuple->src.u3;
310 var_ipp = &tuple->dst.u3;
312 /* Fast path: only one choice. */
313 if (nf_inet_addr_cmp(&range->min_addr, &range->max_addr)) {
314 *var_ipp = range->min_addr;
318 if (nf_ct_l3num(ct) == NFPROTO_IPV4)
319 max = sizeof(var_ipp->ip) / sizeof(u32) - 1;
321 max = sizeof(var_ipp->ip6) / sizeof(u32) - 1;
323 /* Hashing source and destination IPs gives a fairly even
324 * spread in practice (if there are a small number of IPs
325 * involved, there usually aren't that many connections
326 * anyway). The consistency means that servers see the same
327 * client coming from the same IP (some Internet Banking sites
328 * like this), even across reboots.
330 j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32),
331 range->flags & NF_NAT_RANGE_PERSISTENT ?
332 0 : (__force u32)tuple->dst.u3.all[max] ^ zone->id);
335 for (i = 0; i <= max; i++) {
336 /* If first bytes of the address are at the maximum, use the
337 * distance. Otherwise use the full range.
340 minip = ntohl((__force __be32)range->min_addr.all[i]);
341 maxip = ntohl((__force __be32)range->max_addr.all[i]);
342 dist = maxip - minip + 1;
348 var_ipp->all[i] = (__force __u32)
349 htonl(minip + reciprocal_scale(j, dist));
350 if (var_ipp->all[i] != range->max_addr.all[i])
353 if (!(range->flags & NF_NAT_RANGE_PERSISTENT))
354 j ^= (__force u32)tuple->dst.u3.all[i];
358 /* Alter the per-proto part of the tuple (depending on maniptype), to
359 * give a unique tuple in the given range if possible.
361 * Per-protocol part of tuple is initialized to the incoming packet.
363 static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple,
364 const struct nf_nat_range2 *range,
365 enum nf_nat_manip_type maniptype,
366 const struct nf_conn *ct)
368 unsigned int range_size, min, max, i, attempts;
371 static const unsigned int max_attempts = 128;
373 switch (tuple->dst.protonum) {
376 /* id is same for either direction... */
377 keyptr = &tuple->src.u.icmp.id;
378 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
382 min = ntohs(range->min_proto.icmp.id);
383 range_size = ntohs(range->max_proto.icmp.id) -
384 ntohs(range->min_proto.icmp.id) + 1;
387 #if IS_ENABLED(CONFIG_NF_CT_PROTO_GRE)
389 /* If there is no master conntrack we are not PPTP,
390 do not change tuples */
394 if (maniptype == NF_NAT_MANIP_SRC)
395 keyptr = &tuple->src.u.gre.key;
397 keyptr = &tuple->dst.u.gre.key;
399 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
403 min = ntohs(range->min_proto.gre.key);
404 range_size = ntohs(range->max_proto.gre.key) - min + 1;
409 case IPPROTO_UDPLITE:
413 if (maniptype == NF_NAT_MANIP_SRC)
414 keyptr = &tuple->src.u.all;
416 keyptr = &tuple->dst.u.all;
423 /* If no range specified... */
424 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
425 /* If it's dst rewrite, can't change port */
426 if (maniptype == NF_NAT_MANIP_DST)
429 if (ntohs(*keyptr) < 1024) {
430 /* Loose convention: >> 512 is credential passing */
431 if (ntohs(*keyptr) < 512) {
433 range_size = 511 - min + 1;
436 range_size = 1023 - min + 1;
440 range_size = 65535 - 1024 + 1;
443 min = ntohs(range->min_proto.all);
444 max = ntohs(range->max_proto.all);
445 if (unlikely(max < min))
447 range_size = max - min + 1;
451 if (range->flags & NF_NAT_RANGE_PROTO_OFFSET)
452 off = (ntohs(*keyptr) - ntohs(range->base_proto.all));
456 attempts = range_size;
457 if (attempts > max_attempts)
458 attempts = max_attempts;
460 /* We are in softirq; doing a search of the entire range risks
461 * soft lockup when all tuples are already used.
463 * If we can't find any free port from first offset, pick a new
464 * one and try again, with ever smaller search window.
467 for (i = 0; i < attempts; i++, off++) {
468 *keyptr = htons(min + off % range_size);
469 if (!nf_nat_used_tuple(tuple, ct))
473 if (attempts >= range_size || attempts < 16)
480 /* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING,
481 * we change the source to map into the range. For NF_INET_PRE_ROUTING
482 * and NF_INET_LOCAL_OUT, we change the destination to map into the
483 * range. It might not be possible to get a unique tuple, but we try.
484 * At worst (or if we race), we will end up with a final duplicate in
485 * __nf_conntrack_confirm and drop the packet. */
487 get_unique_tuple(struct nf_conntrack_tuple *tuple,
488 const struct nf_conntrack_tuple *orig_tuple,
489 const struct nf_nat_range2 *range,
491 enum nf_nat_manip_type maniptype)
493 const struct nf_conntrack_zone *zone;
494 struct net *net = nf_ct_net(ct);
496 zone = nf_ct_zone(ct);
498 /* 1) If this srcip/proto/src-proto-part is currently mapped,
499 * and that same mapping gives a unique tuple within the given
502 * This is only required for source (ie. NAT/masq) mappings.
503 * So far, we don't do local source mappings, so multiple
504 * manips not an issue.
506 if (maniptype == NF_NAT_MANIP_SRC &&
507 !(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
508 /* try the original tuple first */
509 if (in_range(orig_tuple, range)) {
510 if (!nf_nat_used_tuple(orig_tuple, ct)) {
511 *tuple = *orig_tuple;
514 } else if (find_appropriate_src(net, zone,
515 orig_tuple, tuple, range)) {
516 pr_debug("get_unique_tuple: Found current src map\n");
517 if (!nf_nat_used_tuple(tuple, ct))
522 /* 2) Select the least-used IP/proto combination in the given range */
523 *tuple = *orig_tuple;
524 find_best_ips_proto(zone, tuple, range, ct, maniptype);
526 /* 3) The per-protocol part of the manip is made to map into
527 * the range to make a unique tuple.
530 /* Only bother mapping if it's not already in range and unique */
531 if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
532 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
533 if (!(range->flags & NF_NAT_RANGE_PROTO_OFFSET) &&
534 l4proto_in_range(tuple, maniptype,
536 &range->max_proto) &&
537 (range->min_proto.all == range->max_proto.all ||
538 !nf_nat_used_tuple(tuple, ct)))
540 } else if (!nf_nat_used_tuple(tuple, ct)) {
545 /* Last chance: get protocol to try to obtain unique tuple. */
546 nf_nat_l4proto_unique_tuple(tuple, range, maniptype, ct);
549 struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct)
551 struct nf_conn_nat *nat = nfct_nat(ct);
555 if (!nf_ct_is_confirmed(ct))
556 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
560 EXPORT_SYMBOL_GPL(nf_ct_nat_ext_add);
563 nf_nat_setup_info(struct nf_conn *ct,
564 const struct nf_nat_range2 *range,
565 enum nf_nat_manip_type maniptype)
567 struct net *net = nf_ct_net(ct);
568 struct nf_conntrack_tuple curr_tuple, new_tuple;
570 /* Can't setup nat info for confirmed ct. */
571 if (nf_ct_is_confirmed(ct))
574 WARN_ON(maniptype != NF_NAT_MANIP_SRC &&
575 maniptype != NF_NAT_MANIP_DST);
577 if (WARN_ON(nf_nat_initialized(ct, maniptype)))
580 /* What we've got will look like inverse of reply. Normally
581 * this is what is in the conntrack, except for prior
582 * manipulations (future optimization: if num_manips == 0,
583 * orig_tp = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
585 nf_ct_invert_tuple(&curr_tuple,
586 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
588 get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype);
590 if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) {
591 struct nf_conntrack_tuple reply;
593 /* Alter conntrack table so will recognize replies. */
594 nf_ct_invert_tuple(&reply, &new_tuple);
595 nf_conntrack_alter_reply(ct, &reply);
597 /* Non-atomic: we own this at the moment. */
598 if (maniptype == NF_NAT_MANIP_SRC)
599 ct->status |= IPS_SRC_NAT;
601 ct->status |= IPS_DST_NAT;
603 if (nfct_help(ct) && !nfct_seqadj(ct))
604 if (!nfct_seqadj_ext_add(ct))
608 if (maniptype == NF_NAT_MANIP_SRC) {
609 unsigned int srchash;
612 srchash = hash_by_src(net,
613 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
614 lock = &nf_nat_locks[srchash % CONNTRACK_LOCKS];
616 hlist_add_head_rcu(&ct->nat_bysource,
617 &nf_nat_bysource[srchash]);
618 spin_unlock_bh(lock);
622 if (maniptype == NF_NAT_MANIP_DST)
623 ct->status |= IPS_DST_NAT_DONE;
625 ct->status |= IPS_SRC_NAT_DONE;
629 EXPORT_SYMBOL(nf_nat_setup_info);
632 __nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip)
634 /* Force range to this IP; let proto decide mapping for
635 * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
636 * Use reply in case it's already been mangled (eg local packet).
638 union nf_inet_addr ip =
639 (manip == NF_NAT_MANIP_SRC ?
640 ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 :
641 ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3);
642 struct nf_nat_range2 range = {
643 .flags = NF_NAT_RANGE_MAP_IPS,
647 return nf_nat_setup_info(ct, &range, manip);
651 nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
653 return __nf_nat_alloc_null_binding(ct, HOOK2MANIP(hooknum));
655 EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding);
657 /* Do packet manipulations according to nf_nat_setup_info. */
658 unsigned int nf_nat_packet(struct nf_conn *ct,
659 enum ip_conntrack_info ctinfo,
660 unsigned int hooknum,
663 enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
664 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
665 unsigned int verdict = NF_ACCEPT;
666 unsigned long statusbit;
668 if (mtype == NF_NAT_MANIP_SRC)
669 statusbit = IPS_SRC_NAT;
671 statusbit = IPS_DST_NAT;
673 /* Invert if this is reply dir. */
674 if (dir == IP_CT_DIR_REPLY)
675 statusbit ^= IPS_NAT_MASK;
677 /* Non-atomic: these bits don't change. */
678 if (ct->status & statusbit)
679 verdict = nf_nat_manip_pkt(skb, ct, mtype, dir);
683 EXPORT_SYMBOL_GPL(nf_nat_packet);
686 nf_nat_inet_fn(void *priv, struct sk_buff *skb,
687 const struct nf_hook_state *state)
690 enum ip_conntrack_info ctinfo;
691 struct nf_conn_nat *nat;
692 /* maniptype == SRC for postrouting. */
693 enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook);
695 ct = nf_ct_get(skb, &ctinfo);
696 /* Can't track? It's not due to stress, or conntrack would
697 * have dropped it. Hence it's the user's responsibilty to
698 * packet filter it out, or implement conntrack/NAT for that
708 case IP_CT_RELATED_REPLY:
709 /* Only ICMPs can be IP_CT_IS_REPLY. Fallthrough */
711 /* Seen it before? This can happen for loopback, retrans,
714 if (!nf_nat_initialized(ct, maniptype)) {
715 struct nf_nat_lookup_hook_priv *lpriv = priv;
716 struct nf_hook_entries *e = rcu_dereference(lpriv->entries);
723 for (i = 0; i < e->num_hook_entries; i++) {
724 ret = e->hooks[i].hook(e->hooks[i].priv, skb,
726 if (ret != NF_ACCEPT)
728 if (nf_nat_initialized(ct, maniptype))
732 ret = nf_nat_alloc_null_binding(ct, state->hook);
733 if (ret != NF_ACCEPT)
736 pr_debug("Already setup manip %s for ct %p (status bits 0x%lx)\n",
737 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
739 if (nf_nat_oif_changed(state->hook, ctinfo, nat,
746 WARN_ON(ctinfo != IP_CT_ESTABLISHED &&
747 ctinfo != IP_CT_ESTABLISHED_REPLY);
748 if (nf_nat_oif_changed(state->hook, ctinfo, nat, state->out))
752 return nf_nat_packet(ct, ctinfo, state->hook, skb);
755 nf_ct_kill_acct(ct, ctinfo, skb);
758 EXPORT_SYMBOL_GPL(nf_nat_inet_fn);
760 struct nf_nat_proto_clean {
765 /* kill conntracks with affected NAT section */
766 static int nf_nat_proto_remove(struct nf_conn *i, void *data)
768 const struct nf_nat_proto_clean *clean = data;
770 if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) ||
771 (clean->l4proto && nf_ct_protonum(i) != clean->l4proto))
774 return i->status & IPS_NAT_MASK ? 1 : 0;
777 static void __nf_nat_cleanup_conntrack(struct nf_conn *ct)
781 h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
782 spin_lock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
783 hlist_del_rcu(&ct->nat_bysource);
784 spin_unlock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
787 static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
789 if (nf_nat_proto_remove(ct, data))
792 /* This module is being removed and conntrack has nat null binding.
793 * Remove it from bysource hash, as the table will be freed soon.
795 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
796 * will delete entry from already-freed table.
798 if (test_and_clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status))
799 __nf_nat_cleanup_conntrack(ct);
801 /* don't delete conntrack. Although that would make things a lot
802 * simpler, we'd end up flushing all conntracks on nat rmmod.
807 /* No one using conntrack by the time this called. */
808 static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
810 if (ct->status & IPS_SRC_NAT_DONE)
811 __nf_nat_cleanup_conntrack(ct);
814 static struct nf_ct_ext_type nat_extend __read_mostly = {
815 .len = sizeof(struct nf_conn_nat),
816 .align = __alignof__(struct nf_conn_nat),
817 .destroy = nf_nat_cleanup_conntrack,
821 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
823 #include <linux/netfilter/nfnetlink.h>
824 #include <linux/netfilter/nfnetlink_conntrack.h>
826 static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
827 [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 },
828 [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 },
831 static int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
832 struct nf_nat_range2 *range)
834 if (tb[CTA_PROTONAT_PORT_MIN]) {
835 range->min_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]);
836 range->max_proto.all = range->min_proto.all;
837 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
839 if (tb[CTA_PROTONAT_PORT_MAX]) {
840 range->max_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]);
841 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
846 static int nfnetlink_parse_nat_proto(struct nlattr *attr,
847 const struct nf_conn *ct,
848 struct nf_nat_range2 *range)
850 struct nlattr *tb[CTA_PROTONAT_MAX+1];
853 err = nla_parse_nested_deprecated(tb, CTA_PROTONAT_MAX, attr,
854 protonat_nla_policy, NULL);
858 return nf_nat_l4proto_nlattr_to_range(tb, range);
861 static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
862 [CTA_NAT_V4_MINIP] = { .type = NLA_U32 },
863 [CTA_NAT_V4_MAXIP] = { .type = NLA_U32 },
864 [CTA_NAT_V6_MINIP] = { .len = sizeof(struct in6_addr) },
865 [CTA_NAT_V6_MAXIP] = { .len = sizeof(struct in6_addr) },
866 [CTA_NAT_PROTO] = { .type = NLA_NESTED },
869 static int nf_nat_ipv4_nlattr_to_range(struct nlattr *tb[],
870 struct nf_nat_range2 *range)
872 if (tb[CTA_NAT_V4_MINIP]) {
873 range->min_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MINIP]);
874 range->flags |= NF_NAT_RANGE_MAP_IPS;
877 if (tb[CTA_NAT_V4_MAXIP])
878 range->max_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MAXIP]);
880 range->max_addr.ip = range->min_addr.ip;
885 static int nf_nat_ipv6_nlattr_to_range(struct nlattr *tb[],
886 struct nf_nat_range2 *range)
888 if (tb[CTA_NAT_V6_MINIP]) {
889 nla_memcpy(&range->min_addr.ip6, tb[CTA_NAT_V6_MINIP],
890 sizeof(struct in6_addr));
891 range->flags |= NF_NAT_RANGE_MAP_IPS;
894 if (tb[CTA_NAT_V6_MAXIP])
895 nla_memcpy(&range->max_addr.ip6, tb[CTA_NAT_V6_MAXIP],
896 sizeof(struct in6_addr));
898 range->max_addr = range->min_addr;
904 nfnetlink_parse_nat(const struct nlattr *nat,
905 const struct nf_conn *ct, struct nf_nat_range2 *range)
907 struct nlattr *tb[CTA_NAT_MAX+1];
910 memset(range, 0, sizeof(*range));
912 err = nla_parse_nested_deprecated(tb, CTA_NAT_MAX, nat,
913 nat_nla_policy, NULL);
917 switch (nf_ct_l3num(ct)) {
919 err = nf_nat_ipv4_nlattr_to_range(tb, range);
922 err = nf_nat_ipv6_nlattr_to_range(tb, range);
925 err = -EPROTONOSUPPORT;
932 if (!tb[CTA_NAT_PROTO])
935 return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
938 /* This function is called under rcu_read_lock() */
940 nfnetlink_parse_nat_setup(struct nf_conn *ct,
941 enum nf_nat_manip_type manip,
942 const struct nlattr *attr)
944 struct nf_nat_range2 range;
947 /* Should not happen, restricted to creating new conntracks
950 if (WARN_ON_ONCE(nf_nat_initialized(ct, manip)))
953 /* No NAT information has been passed, allocate the null-binding */
955 return __nf_nat_alloc_null_binding(ct, manip) == NF_DROP ? -ENOMEM : 0;
957 err = nfnetlink_parse_nat(attr, ct, &range);
961 return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0;
965 nfnetlink_parse_nat_setup(struct nf_conn *ct,
966 enum nf_nat_manip_type manip,
967 const struct nlattr *attr)
973 static struct nf_ct_helper_expectfn follow_master_nat = {
974 .name = "nat-follow-master",
975 .expectfn = nf_nat_follow_master,
978 int nf_nat_register_fn(struct net *net, u8 pf, const struct nf_hook_ops *ops,
979 const struct nf_hook_ops *orig_nat_ops, unsigned int ops_count)
981 struct nat_net *nat_net = net_generic(net, nat_net_id);
982 struct nf_nat_hooks_net *nat_proto_net;
983 struct nf_nat_lookup_hook_priv *priv;
984 unsigned int hooknum = ops->hooknum;
985 struct nf_hook_ops *nat_ops;
988 if (WARN_ON_ONCE(pf >= ARRAY_SIZE(nat_net->nat_proto_net)))
991 nat_proto_net = &nat_net->nat_proto_net[pf];
993 for (i = 0; i < ops_count; i++) {
994 if (orig_nat_ops[i].hooknum == hooknum) {
1000 if (WARN_ON_ONCE(i == ops_count))
1003 mutex_lock(&nf_nat_proto_mutex);
1004 if (!nat_proto_net->nat_hook_ops) {
1005 WARN_ON(nat_proto_net->users != 0);
1007 nat_ops = kmemdup(orig_nat_ops, sizeof(*orig_nat_ops) * ops_count, GFP_KERNEL);
1009 mutex_unlock(&nf_nat_proto_mutex);
1013 for (i = 0; i < ops_count; i++) {
1014 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1016 nat_ops[i].priv = priv;
1019 mutex_unlock(&nf_nat_proto_mutex);
1021 kfree(nat_ops[--i].priv);
1026 ret = nf_register_net_hooks(net, nat_ops, ops_count);
1028 mutex_unlock(&nf_nat_proto_mutex);
1029 for (i = 0; i < ops_count; i++)
1030 kfree(nat_ops[i].priv);
1035 nat_proto_net->nat_hook_ops = nat_ops;
1038 nat_ops = nat_proto_net->nat_hook_ops;
1039 priv = nat_ops[hooknum].priv;
1040 if (WARN_ON_ONCE(!priv)) {
1041 mutex_unlock(&nf_nat_proto_mutex);
1045 ret = nf_hook_entries_insert_raw(&priv->entries, ops);
1047 nat_proto_net->users++;
1049 mutex_unlock(&nf_nat_proto_mutex);
1053 void nf_nat_unregister_fn(struct net *net, u8 pf, const struct nf_hook_ops *ops,
1054 unsigned int ops_count)
1056 struct nat_net *nat_net = net_generic(net, nat_net_id);
1057 struct nf_nat_hooks_net *nat_proto_net;
1058 struct nf_nat_lookup_hook_priv *priv;
1059 struct nf_hook_ops *nat_ops;
1060 int hooknum = ops->hooknum;
1063 if (pf >= ARRAY_SIZE(nat_net->nat_proto_net))
1066 nat_proto_net = &nat_net->nat_proto_net[pf];
1068 mutex_lock(&nf_nat_proto_mutex);
1069 if (WARN_ON(nat_proto_net->users == 0))
1072 nat_proto_net->users--;
1074 nat_ops = nat_proto_net->nat_hook_ops;
1075 for (i = 0; i < ops_count; i++) {
1076 if (nat_ops[i].hooknum == hooknum) {
1081 if (WARN_ON_ONCE(i == ops_count))
1083 priv = nat_ops[hooknum].priv;
1084 nf_hook_entries_delete_raw(&priv->entries, ops);
1086 if (nat_proto_net->users == 0) {
1087 nf_unregister_net_hooks(net, nat_ops, ops_count);
1089 for (i = 0; i < ops_count; i++) {
1090 priv = nat_ops[i].priv;
1091 kfree_rcu(priv, rcu_head);
1094 nat_proto_net->nat_hook_ops = NULL;
1098 mutex_unlock(&nf_nat_proto_mutex);
1101 static struct pernet_operations nat_net_ops = {
1103 .size = sizeof(struct nat_net),
1106 static struct nf_nat_hook nat_hook = {
1107 .parse_nat_setup = nfnetlink_parse_nat_setup,
1109 .decode_session = __nf_nat_decode_session,
1111 .manip_pkt = nf_nat_manip_pkt,
1114 static int __init nf_nat_init(void)
1118 /* Leave them the same for the moment. */
1119 nf_nat_htable_size = nf_conntrack_htable_size;
1120 if (nf_nat_htable_size < CONNTRACK_LOCKS)
1121 nf_nat_htable_size = CONNTRACK_LOCKS;
1123 nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0);
1124 if (!nf_nat_bysource)
1127 ret = nf_ct_extend_register(&nat_extend);
1129 kvfree(nf_nat_bysource);
1130 pr_err("Unable to register extension\n");
1134 for (i = 0; i < CONNTRACK_LOCKS; i++)
1135 spin_lock_init(&nf_nat_locks[i]);
1137 ret = register_pernet_subsys(&nat_net_ops);
1139 nf_ct_extend_unregister(&nat_extend);
1140 kvfree(nf_nat_bysource);
1144 nf_ct_helper_expectfn_register(&follow_master_nat);
1146 WARN_ON(nf_nat_hook != NULL);
1147 RCU_INIT_POINTER(nf_nat_hook, &nat_hook);
1152 static void __exit nf_nat_cleanup(void)
1154 struct nf_nat_proto_clean clean = {};
1156 nf_ct_iterate_destroy(nf_nat_proto_clean, &clean);
1158 nf_ct_extend_unregister(&nat_extend);
1159 nf_ct_helper_expectfn_unregister(&follow_master_nat);
1160 RCU_INIT_POINTER(nf_nat_hook, NULL);
1163 kvfree(nf_nat_bysource);
1164 unregister_pernet_subsys(&nat_net_ops);
1167 MODULE_LICENSE("GPL");
1169 module_init(nf_nat_init);
1170 module_exit(nf_nat_cleanup);