2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv6 packet filter");
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) pr_info(format , ## args)
45 #define dprintf(format, args...)
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) pr_info(format , ## args)
51 #define duprintf(format, args...)
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) WARN_ON(!(x))
57 #define IP_NF_ASSERT(x)
61 /* All the better to debug you with... */
66 void *ip6t_alloc_initial_table(const struct xt_table *info)
68 return xt_alloc_initial_table(ip6t, IP6T);
70 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
73 We keep a set of rules for each CPU, so we can avoid write-locking
74 them in the softirq when updating the counters and therefore
75 only need to read-lock in the softirq; doing a write_lock_bh() in user
76 context stops packets coming through and allows user context to read
77 the counters or update the rules.
79 Hence the start of any table is given by get_table() below. */
81 /* Returns whether matches rule or not. */
82 /* Performance critical - called for every packet */
84 ip6_packet_match(const struct sk_buff *skb,
87 const struct ip6t_ip6 *ip6info,
88 unsigned int *protoff,
89 int *fragoff, bool *hotdrop)
92 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
94 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
96 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
97 &ip6info->src), IP6T_INV_SRCIP) ||
98 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
99 &ip6info->dst), IP6T_INV_DSTIP)) {
100 dprintf("Source or dest mismatch.\n");
102 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
103 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
104 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
105 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
106 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
107 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
111 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
113 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
114 dprintf("VIA in mismatch (%s vs %s).%s\n",
115 indev, ip6info->iniface,
116 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
120 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
122 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
123 dprintf("VIA out mismatch (%s vs %s).%s\n",
124 outdev, ip6info->outiface,
125 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
129 /* ... might want to do something with class and flowlabel here ... */
131 /* look for the desired protocol header */
132 if((ip6info->flags & IP6T_F_PROTO)) {
134 unsigned short _frag_off;
136 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
142 *fragoff = _frag_off;
144 dprintf("Packet protocol %hi ?= %s%hi.\n",
146 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
149 if (ip6info->proto == protohdr) {
150 if(ip6info->invflags & IP6T_INV_PROTO) {
156 /* We need match for the '-p all', too! */
157 if ((ip6info->proto != 0) &&
158 !(ip6info->invflags & IP6T_INV_PROTO))
164 /* should be ip6 safe */
166 ip6_checkentry(const struct ip6t_ip6 *ipv6)
168 if (ipv6->flags & ~IP6T_F_MASK) {
169 duprintf("Unknown flag bits set: %08X\n",
170 ipv6->flags & ~IP6T_F_MASK);
173 if (ipv6->invflags & ~IP6T_INV_MASK) {
174 duprintf("Unknown invflag bits set: %08X\n",
175 ipv6->invflags & ~IP6T_INV_MASK);
182 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
184 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
189 static inline struct ip6t_entry *
190 get_entry(const void *base, unsigned int offset)
192 return (struct ip6t_entry *)(base + offset);
195 /* All zeroes == unconditional rule. */
196 /* Mildly perf critical (only if packet tracing is on) */
197 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
199 static const struct ip6t_ip6 uncond;
201 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
204 static inline const struct xt_entry_target *
205 ip6t_get_target_c(const struct ip6t_entry *e)
207 return ip6t_get_target((struct ip6t_entry *)e);
210 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
211 /* This cries for unification! */
212 static const char *const hooknames[] = {
213 [NF_INET_PRE_ROUTING] = "PREROUTING",
214 [NF_INET_LOCAL_IN] = "INPUT",
215 [NF_INET_FORWARD] = "FORWARD",
216 [NF_INET_LOCAL_OUT] = "OUTPUT",
217 [NF_INET_POST_ROUTING] = "POSTROUTING",
220 enum nf_ip_trace_comments {
221 NF_IP6_TRACE_COMMENT_RULE,
222 NF_IP6_TRACE_COMMENT_RETURN,
223 NF_IP6_TRACE_COMMENT_POLICY,
226 static const char *const comments[] = {
227 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
228 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
229 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
232 static struct nf_loginfo trace_loginfo = {
233 .type = NF_LOG_TYPE_LOG,
237 .logflags = NF_LOG_MASK,
242 /* Mildly perf critical (only if packet tracing is on) */
244 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
245 const char *hookname, const char **chainname,
246 const char **comment, unsigned int *rulenum)
248 const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
250 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
251 /* Head of user chain: ERROR target with chainname */
252 *chainname = t->target.data;
257 if (s->target_offset == sizeof(struct ip6t_entry) &&
258 strcmp(t->target.u.kernel.target->name,
259 XT_STANDARD_TARGET) == 0 &&
261 unconditional(&s->ipv6)) {
262 /* Tail of chains: STANDARD target (return/policy) */
263 *comment = *chainname == hookname
264 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
265 : comments[NF_IP6_TRACE_COMMENT_RETURN];
274 static void trace_packet(const struct sk_buff *skb,
276 const struct net_device *in,
277 const struct net_device *out,
278 const char *tablename,
279 const struct xt_table_info *private,
280 const struct ip6t_entry *e)
282 const void *table_base;
283 const struct ip6t_entry *root;
284 const char *hookname, *chainname, *comment;
285 const struct ip6t_entry *iter;
286 unsigned int rulenum = 0;
287 struct net *net = dev_net(in ? in : out);
289 table_base = private->entries[smp_processor_id()];
290 root = get_entry(table_base, private->hook_entry[hook]);
292 hookname = chainname = hooknames[hook];
293 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
295 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
296 if (get_chainname_rulenum(iter, e, hookname,
297 &chainname, &comment, &rulenum) != 0)
300 nf_log_packet(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
301 "TRACE: %s:%s:%s:%u ",
302 tablename, chainname, comment, rulenum);
306 static inline __pure struct ip6t_entry *
307 ip6t_next_entry(const struct ip6t_entry *entry)
309 return (void *)entry + entry->next_offset;
312 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
314 ip6t_do_table(struct sk_buff *skb,
316 const struct net_device *in,
317 const struct net_device *out,
318 struct xt_table *table)
320 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
321 /* Initializing verdict to NF_DROP keeps gcc happy. */
322 unsigned int verdict = NF_DROP;
323 const char *indev, *outdev;
324 const void *table_base;
325 struct ip6t_entry *e, **jumpstack;
326 unsigned int *stackptr, origptr, cpu;
327 const struct xt_table_info *private;
328 struct xt_action_param acpar;
332 indev = in ? in->name : nulldevname;
333 outdev = out ? out->name : nulldevname;
334 /* We handle fragments by dealing with the first fragment as
335 * if it was a normal packet. All other fragments are treated
336 * normally, except that they will NEVER match rules that ask
337 * things we don't know, ie. tcp syn flag or ports). If the
338 * rule is also a fragment-specific rule, non-fragments won't
340 acpar.hotdrop = false;
343 acpar.family = NFPROTO_IPV6;
344 acpar.hooknum = hook;
346 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
349 addend = xt_write_recseq_begin();
350 private = table->private;
351 cpu = smp_processor_id();
352 table_base = private->entries[cpu];
353 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
354 stackptr = per_cpu_ptr(private->stackptr, cpu);
357 e = get_entry(table_base, private->hook_entry[hook]);
360 const struct xt_entry_target *t;
361 const struct xt_entry_match *ematch;
365 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
366 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
368 e = ip6t_next_entry(e);
372 xt_ematch_foreach(ematch, e) {
373 acpar.match = ematch->u.kernel.match;
374 acpar.matchinfo = ematch->data;
375 if (!acpar.match->match(skb, &acpar))
379 ADD_COUNTER(e->counters, skb->len, 1);
381 t = ip6t_get_target_c(e);
382 IP_NF_ASSERT(t->u.kernel.target);
384 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
385 /* The packet is traced: log it */
386 if (unlikely(skb->nf_trace))
387 trace_packet(skb, hook, in, out,
388 table->name, private, e);
390 /* Standard target? */
391 if (!t->u.kernel.target->target) {
394 v = ((struct xt_standard_target *)t)->verdict;
396 /* Pop from stack? */
397 if (v != XT_RETURN) {
398 verdict = (unsigned int)(-v) - 1;
401 if (*stackptr <= origptr)
402 e = get_entry(table_base,
403 private->underflow[hook]);
405 e = ip6t_next_entry(jumpstack[--*stackptr]);
408 if (table_base + v != ip6t_next_entry(e) &&
409 !(e->ipv6.flags & IP6T_F_GOTO)) {
410 if (*stackptr >= private->stacksize) {
414 jumpstack[(*stackptr)++] = e;
417 e = get_entry(table_base, v);
421 acpar.target = t->u.kernel.target;
422 acpar.targinfo = t->data;
424 verdict = t->u.kernel.target->target(skb, &acpar);
425 if (verdict == XT_CONTINUE)
426 e = ip6t_next_entry(e);
430 } while (!acpar.hotdrop);
434 xt_write_recseq_end(addend);
437 #ifdef DEBUG_ALLOW_ALL
446 /* Figures out from what hook each rule can be called: returns 0 if
447 there are loops. Puts hook bitmask in comefrom. */
449 mark_source_chains(const struct xt_table_info *newinfo,
450 unsigned int valid_hooks, void *entry0)
454 /* No recursion; use packet counter to save back ptrs (reset
455 to 0 as we leave), and comefrom to save source hook bitmask */
456 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
457 unsigned int pos = newinfo->hook_entry[hook];
458 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
460 if (!(valid_hooks & (1 << hook)))
463 /* Set initial back pointer. */
464 e->counters.pcnt = pos;
467 const struct xt_standard_target *t
468 = (void *)ip6t_get_target_c(e);
469 int visited = e->comefrom & (1 << hook);
471 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
472 pr_err("iptables: loop hook %u pos %u %08X.\n",
473 hook, pos, e->comefrom);
476 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
478 /* Unconditional return/END. */
479 if ((e->target_offset == sizeof(struct ip6t_entry) &&
480 (strcmp(t->target.u.user.name,
481 XT_STANDARD_TARGET) == 0) &&
483 unconditional(&e->ipv6)) || visited) {
484 unsigned int oldpos, size;
486 if ((strcmp(t->target.u.user.name,
487 XT_STANDARD_TARGET) == 0) &&
488 t->verdict < -NF_MAX_VERDICT - 1) {
489 duprintf("mark_source_chains: bad "
490 "negative verdict (%i)\n",
495 /* Return: backtrack through the last
498 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
499 #ifdef DEBUG_IP_FIREWALL_USER
501 & (1 << NF_INET_NUMHOOKS)) {
502 duprintf("Back unset "
509 pos = e->counters.pcnt;
510 e->counters.pcnt = 0;
512 /* We're at the start. */
516 e = (struct ip6t_entry *)
518 } while (oldpos == pos + e->next_offset);
521 size = e->next_offset;
522 e = (struct ip6t_entry *)
523 (entry0 + pos + size);
524 e->counters.pcnt = pos;
527 int newpos = t->verdict;
529 if (strcmp(t->target.u.user.name,
530 XT_STANDARD_TARGET) == 0 &&
532 if (newpos > newinfo->size -
533 sizeof(struct ip6t_entry)) {
534 duprintf("mark_source_chains: "
535 "bad verdict (%i)\n",
539 /* This a jump; chase it. */
540 duprintf("Jump rule %u -> %u\n",
543 /* ... this is a fallthru */
544 newpos = pos + e->next_offset;
546 e = (struct ip6t_entry *)
548 e->counters.pcnt = pos;
553 duprintf("Finished chain %u\n", hook);
558 static void cleanup_match(struct xt_entry_match *m, struct net *net)
560 struct xt_mtdtor_param par;
563 par.match = m->u.kernel.match;
564 par.matchinfo = m->data;
565 par.family = NFPROTO_IPV6;
566 if (par.match->destroy != NULL)
567 par.match->destroy(&par);
568 module_put(par.match->me);
572 check_entry(const struct ip6t_entry *e, const char *name)
574 const struct xt_entry_target *t;
576 if (!ip6_checkentry(&e->ipv6)) {
577 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
581 if (e->target_offset + sizeof(struct xt_entry_target) >
585 t = ip6t_get_target_c(e);
586 if (e->target_offset + t->u.target_size > e->next_offset)
592 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
594 const struct ip6t_ip6 *ipv6 = par->entryinfo;
597 par->match = m->u.kernel.match;
598 par->matchinfo = m->data;
600 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
601 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
603 duprintf("ip_tables: check failed for `%s'.\n",
611 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
613 struct xt_match *match;
616 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
619 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
620 return PTR_ERR(match);
622 m->u.kernel.match = match;
624 ret = check_match(m, par);
630 module_put(m->u.kernel.match->me);
634 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
636 struct xt_entry_target *t = ip6t_get_target(e);
637 struct xt_tgchk_param par = {
641 .target = t->u.kernel.target,
643 .hook_mask = e->comefrom,
644 .family = NFPROTO_IPV6,
648 t = ip6t_get_target(e);
649 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
650 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
652 duprintf("ip_tables: check failed for `%s'.\n",
653 t->u.kernel.target->name);
660 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
663 struct xt_entry_target *t;
664 struct xt_target *target;
667 struct xt_mtchk_param mtpar;
668 struct xt_entry_match *ematch;
670 ret = check_entry(e, name);
677 mtpar.entryinfo = &e->ipv6;
678 mtpar.hook_mask = e->comefrom;
679 mtpar.family = NFPROTO_IPV6;
680 xt_ematch_foreach(ematch, e) {
681 ret = find_check_match(ematch, &mtpar);
683 goto cleanup_matches;
687 t = ip6t_get_target(e);
688 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
690 if (IS_ERR(target)) {
691 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
692 ret = PTR_ERR(target);
693 goto cleanup_matches;
695 t->u.kernel.target = target;
697 ret = check_target(e, net, name);
702 module_put(t->u.kernel.target->me);
704 xt_ematch_foreach(ematch, e) {
707 cleanup_match(ematch, net);
712 static bool check_underflow(const struct ip6t_entry *e)
714 const struct xt_entry_target *t;
715 unsigned int verdict;
717 if (!unconditional(&e->ipv6))
719 t = ip6t_get_target_c(e);
720 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
722 verdict = ((struct xt_standard_target *)t)->verdict;
723 verdict = -verdict - 1;
724 return verdict == NF_DROP || verdict == NF_ACCEPT;
728 check_entry_size_and_hooks(struct ip6t_entry *e,
729 struct xt_table_info *newinfo,
730 const unsigned char *base,
731 const unsigned char *limit,
732 const unsigned int *hook_entries,
733 const unsigned int *underflows,
734 unsigned int valid_hooks)
738 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
739 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
740 duprintf("Bad offset %p\n", e);
745 < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) {
746 duprintf("checking: element %p size %u\n",
751 /* Check hooks & underflows */
752 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
753 if (!(valid_hooks & (1 << h)))
755 if ((unsigned char *)e - base == hook_entries[h])
756 newinfo->hook_entry[h] = hook_entries[h];
757 if ((unsigned char *)e - base == underflows[h]) {
758 if (!check_underflow(e)) {
759 pr_err("Underflows must be unconditional and "
760 "use the STANDARD target with "
764 newinfo->underflow[h] = underflows[h];
768 /* Clear counters and comefrom */
769 e->counters = ((struct xt_counters) { 0, 0 });
774 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
776 struct xt_tgdtor_param par;
777 struct xt_entry_target *t;
778 struct xt_entry_match *ematch;
780 /* Cleanup all matches */
781 xt_ematch_foreach(ematch, e)
782 cleanup_match(ematch, net);
783 t = ip6t_get_target(e);
786 par.target = t->u.kernel.target;
787 par.targinfo = t->data;
788 par.family = NFPROTO_IPV6;
789 if (par.target->destroy != NULL)
790 par.target->destroy(&par);
791 module_put(par.target->me);
794 /* Checks and translates the user-supplied table segment (held in
797 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
798 const struct ip6t_replace *repl)
800 struct ip6t_entry *iter;
804 newinfo->size = repl->size;
805 newinfo->number = repl->num_entries;
807 /* Init all hooks to impossible value. */
808 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
809 newinfo->hook_entry[i] = 0xFFFFFFFF;
810 newinfo->underflow[i] = 0xFFFFFFFF;
813 duprintf("translate_table: size %u\n", newinfo->size);
815 /* Walk through entries, checking offsets. */
816 xt_entry_foreach(iter, entry0, newinfo->size) {
817 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
825 if (strcmp(ip6t_get_target(iter)->u.user.name,
826 XT_ERROR_TARGET) == 0)
827 ++newinfo->stacksize;
830 if (i != repl->num_entries) {
831 duprintf("translate_table: %u not %u entries\n",
832 i, repl->num_entries);
836 /* Check hooks all assigned */
837 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
838 /* Only hooks which are valid */
839 if (!(repl->valid_hooks & (1 << i)))
841 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
842 duprintf("Invalid hook entry %u %u\n",
843 i, repl->hook_entry[i]);
846 if (newinfo->underflow[i] == 0xFFFFFFFF) {
847 duprintf("Invalid underflow %u %u\n",
848 i, repl->underflow[i]);
853 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
856 /* Finally, each sanity check must pass */
858 xt_entry_foreach(iter, entry0, newinfo->size) {
859 ret = find_check_entry(iter, net, repl->name, repl->size);
866 xt_entry_foreach(iter, entry0, newinfo->size) {
869 cleanup_entry(iter, net);
874 /* And one copy for every other CPU */
875 for_each_possible_cpu(i) {
876 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
877 memcpy(newinfo->entries[i], entry0, newinfo->size);
884 get_counters(const struct xt_table_info *t,
885 struct xt_counters counters[])
887 struct ip6t_entry *iter;
891 for_each_possible_cpu(cpu) {
892 seqcount_t *s = &per_cpu(xt_recseq, cpu);
895 xt_entry_foreach(iter, t->entries[cpu], t->size) {
900 start = read_seqcount_begin(s);
901 bcnt = iter->counters.bcnt;
902 pcnt = iter->counters.pcnt;
903 } while (read_seqcount_retry(s, start));
905 ADD_COUNTER(counters[i], bcnt, pcnt);
911 static struct xt_counters *alloc_counters(const struct xt_table *table)
913 unsigned int countersize;
914 struct xt_counters *counters;
915 const struct xt_table_info *private = table->private;
917 /* We need atomic snapshot of counters: rest doesn't change
918 (other than comefrom, which userspace doesn't care
920 countersize = sizeof(struct xt_counters) * private->number;
921 counters = vzalloc(countersize);
923 if (counters == NULL)
924 return ERR_PTR(-ENOMEM);
926 get_counters(private, counters);
932 copy_entries_to_user(unsigned int total_size,
933 const struct xt_table *table,
934 void __user *userptr)
936 unsigned int off, num;
937 const struct ip6t_entry *e;
938 struct xt_counters *counters;
939 const struct xt_table_info *private = table->private;
941 const void *loc_cpu_entry;
943 counters = alloc_counters(table);
944 if (IS_ERR(counters))
945 return PTR_ERR(counters);
947 /* choose the copy that is on our node/cpu, ...
948 * This choice is lazy (because current thread is
949 * allowed to migrate to another cpu)
951 loc_cpu_entry = private->entries[raw_smp_processor_id()];
952 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
957 /* FIXME: use iterator macros --RR */
958 /* ... then go back and fix counters and names */
959 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
961 const struct xt_entry_match *m;
962 const struct xt_entry_target *t;
964 e = (struct ip6t_entry *)(loc_cpu_entry + off);
965 if (copy_to_user(userptr + off
966 + offsetof(struct ip6t_entry, counters),
968 sizeof(counters[num])) != 0) {
973 for (i = sizeof(struct ip6t_entry);
974 i < e->target_offset;
975 i += m->u.match_size) {
978 if (copy_to_user(userptr + off + i
979 + offsetof(struct xt_entry_match,
981 m->u.kernel.match->name,
982 strlen(m->u.kernel.match->name)+1)
989 t = ip6t_get_target_c(e);
990 if (copy_to_user(userptr + off + e->target_offset
991 + offsetof(struct xt_entry_target,
993 t->u.kernel.target->name,
994 strlen(t->u.kernel.target->name)+1) != 0) {
1005 #ifdef CONFIG_COMPAT
1006 static void compat_standard_from_user(void *dst, const void *src)
1008 int v = *(compat_int_t *)src;
1011 v += xt_compat_calc_jump(AF_INET6, v);
1012 memcpy(dst, &v, sizeof(v));
1015 static int compat_standard_to_user(void __user *dst, const void *src)
1017 compat_int_t cv = *(int *)src;
1020 cv -= xt_compat_calc_jump(AF_INET6, cv);
1021 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1024 static int compat_calc_entry(const struct ip6t_entry *e,
1025 const struct xt_table_info *info,
1026 const void *base, struct xt_table_info *newinfo)
1028 const struct xt_entry_match *ematch;
1029 const struct xt_entry_target *t;
1030 unsigned int entry_offset;
1033 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1034 entry_offset = (void *)e - base;
1035 xt_ematch_foreach(ematch, e)
1036 off += xt_compat_match_offset(ematch->u.kernel.match);
1037 t = ip6t_get_target_c(e);
1038 off += xt_compat_target_offset(t->u.kernel.target);
1039 newinfo->size -= off;
1040 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1044 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1045 if (info->hook_entry[i] &&
1046 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1047 newinfo->hook_entry[i] -= off;
1048 if (info->underflow[i] &&
1049 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1050 newinfo->underflow[i] -= off;
1055 static int compat_table_info(const struct xt_table_info *info,
1056 struct xt_table_info *newinfo)
1058 struct ip6t_entry *iter;
1059 void *loc_cpu_entry;
1062 if (!newinfo || !info)
1065 /* we dont care about newinfo->entries[] */
1066 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1067 newinfo->initial_entries = 0;
1068 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1069 xt_compat_init_offsets(AF_INET6, info->number);
1070 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1071 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1079 static int get_info(struct net *net, void __user *user,
1080 const int *len, int compat)
1082 char name[XT_TABLE_MAXNAMELEN];
1086 if (*len != sizeof(struct ip6t_getinfo)) {
1087 duprintf("length %u != %zu\n", *len,
1088 sizeof(struct ip6t_getinfo));
1092 if (copy_from_user(name, user, sizeof(name)) != 0)
1095 name[XT_TABLE_MAXNAMELEN-1] = '\0';
1096 #ifdef CONFIG_COMPAT
1098 xt_compat_lock(AF_INET6);
1100 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1101 "ip6table_%s", name);
1102 if (!IS_ERR_OR_NULL(t)) {
1103 struct ip6t_getinfo info;
1104 const struct xt_table_info *private = t->private;
1105 #ifdef CONFIG_COMPAT
1106 struct xt_table_info tmp;
1109 ret = compat_table_info(private, &tmp);
1110 xt_compat_flush_offsets(AF_INET6);
1114 memset(&info, 0, sizeof(info));
1115 info.valid_hooks = t->valid_hooks;
1116 memcpy(info.hook_entry, private->hook_entry,
1117 sizeof(info.hook_entry));
1118 memcpy(info.underflow, private->underflow,
1119 sizeof(info.underflow));
1120 info.num_entries = private->number;
1121 info.size = private->size;
1122 strcpy(info.name, name);
1124 if (copy_to_user(user, &info, *len) != 0)
1132 ret = t ? PTR_ERR(t) : -ENOENT;
1133 #ifdef CONFIG_COMPAT
1135 xt_compat_unlock(AF_INET6);
1141 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1145 struct ip6t_get_entries get;
1148 if (*len < sizeof(get)) {
1149 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1152 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1154 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1155 duprintf("get_entries: %u != %zu\n",
1156 *len, sizeof(get) + get.size);
1160 t = xt_find_table_lock(net, AF_INET6, get.name);
1161 if (!IS_ERR_OR_NULL(t)) {
1162 struct xt_table_info *private = t->private;
1163 duprintf("t->private->number = %u\n", private->number);
1164 if (get.size == private->size)
1165 ret = copy_entries_to_user(private->size,
1166 t, uptr->entrytable);
1168 duprintf("get_entries: I've got %u not %u!\n",
1169 private->size, get.size);
1175 ret = t ? PTR_ERR(t) : -ENOENT;
1181 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1182 struct xt_table_info *newinfo, unsigned int num_counters,
1183 void __user *counters_ptr)
1187 struct xt_table_info *oldinfo;
1188 struct xt_counters *counters;
1189 const void *loc_cpu_old_entry;
1190 struct ip6t_entry *iter;
1193 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1199 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1200 "ip6table_%s", name);
1201 if (IS_ERR_OR_NULL(t)) {
1202 ret = t ? PTR_ERR(t) : -ENOENT;
1203 goto free_newinfo_counters_untrans;
1207 if (valid_hooks != t->valid_hooks) {
1208 duprintf("Valid hook crap: %08X vs %08X\n",
1209 valid_hooks, t->valid_hooks);
1214 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1218 /* Update module usage count based on number of rules */
1219 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1220 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1221 if ((oldinfo->number > oldinfo->initial_entries) ||
1222 (newinfo->number <= oldinfo->initial_entries))
1224 if ((oldinfo->number > oldinfo->initial_entries) &&
1225 (newinfo->number <= oldinfo->initial_entries))
1228 /* Get the old counters, and synchronize with replace */
1229 get_counters(oldinfo, counters);
1231 /* Decrease module usage counts and free resource */
1232 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1233 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1234 cleanup_entry(iter, net);
1236 xt_free_table_info(oldinfo);
1237 if (copy_to_user(counters_ptr, counters,
1238 sizeof(struct xt_counters) * num_counters) != 0)
1247 free_newinfo_counters_untrans:
1254 do_replace(struct net *net, const void __user *user, unsigned int len)
1257 struct ip6t_replace tmp;
1258 struct xt_table_info *newinfo;
1259 void *loc_cpu_entry;
1260 struct ip6t_entry *iter;
1262 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1265 /* overflow check */
1266 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1268 tmp.name[sizeof(tmp.name)-1] = 0;
1270 newinfo = xt_alloc_table_info(tmp.size);
1274 /* choose the copy that is on our node/cpu */
1275 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1276 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1282 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1286 duprintf("ip_tables: Translated table\n");
1288 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1289 tmp.num_counters, tmp.counters);
1291 goto free_newinfo_untrans;
1294 free_newinfo_untrans:
1295 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1296 cleanup_entry(iter, net);
1298 xt_free_table_info(newinfo);
1303 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1306 unsigned int i, curcpu;
1307 struct xt_counters_info tmp;
1308 struct xt_counters *paddc;
1309 unsigned int num_counters;
1314 const struct xt_table_info *private;
1316 const void *loc_cpu_entry;
1317 struct ip6t_entry *iter;
1318 unsigned int addend;
1319 #ifdef CONFIG_COMPAT
1320 struct compat_xt_counters_info compat_tmp;
1324 size = sizeof(struct compat_xt_counters_info);
1329 size = sizeof(struct xt_counters_info);
1332 if (copy_from_user(ptmp, user, size) != 0)
1335 #ifdef CONFIG_COMPAT
1337 num_counters = compat_tmp.num_counters;
1338 name = compat_tmp.name;
1342 num_counters = tmp.num_counters;
1346 if (len != size + num_counters * sizeof(struct xt_counters))
1349 paddc = vmalloc(len - size);
1353 if (copy_from_user(paddc, user + size, len - size) != 0) {
1358 t = xt_find_table_lock(net, AF_INET6, name);
1359 if (IS_ERR_OR_NULL(t)) {
1360 ret = t ? PTR_ERR(t) : -ENOENT;
1366 private = t->private;
1367 if (private->number != num_counters) {
1369 goto unlock_up_free;
1373 /* Choose the copy that is on our node */
1374 curcpu = smp_processor_id();
1375 addend = xt_write_recseq_begin();
1376 loc_cpu_entry = private->entries[curcpu];
1377 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1378 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1381 xt_write_recseq_end(addend);
1393 #ifdef CONFIG_COMPAT
1394 struct compat_ip6t_replace {
1395 char name[XT_TABLE_MAXNAMELEN];
1399 u32 hook_entry[NF_INET_NUMHOOKS];
1400 u32 underflow[NF_INET_NUMHOOKS];
1402 compat_uptr_t counters; /* struct xt_counters * */
1403 struct compat_ip6t_entry entries[0];
1407 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1408 unsigned int *size, struct xt_counters *counters,
1411 struct xt_entry_target *t;
1412 struct compat_ip6t_entry __user *ce;
1413 u_int16_t target_offset, next_offset;
1414 compat_uint_t origsize;
1415 const struct xt_entry_match *ematch;
1419 ce = (struct compat_ip6t_entry __user *)*dstptr;
1420 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1421 copy_to_user(&ce->counters, &counters[i],
1422 sizeof(counters[i])) != 0)
1425 *dstptr += sizeof(struct compat_ip6t_entry);
1426 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1428 xt_ematch_foreach(ematch, e) {
1429 ret = xt_compat_match_to_user(ematch, dstptr, size);
1433 target_offset = e->target_offset - (origsize - *size);
1434 t = ip6t_get_target(e);
1435 ret = xt_compat_target_to_user(t, dstptr, size);
1438 next_offset = e->next_offset - (origsize - *size);
1439 if (put_user(target_offset, &ce->target_offset) != 0 ||
1440 put_user(next_offset, &ce->next_offset) != 0)
1446 compat_find_calc_match(struct xt_entry_match *m,
1448 const struct ip6t_ip6 *ipv6,
1449 unsigned int hookmask,
1452 struct xt_match *match;
1454 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1455 m->u.user.revision);
1456 if (IS_ERR(match)) {
1457 duprintf("compat_check_calc_match: `%s' not found\n",
1459 return PTR_ERR(match);
1461 m->u.kernel.match = match;
1462 *size += xt_compat_match_offset(match);
1466 static void compat_release_entry(struct compat_ip6t_entry *e)
1468 struct xt_entry_target *t;
1469 struct xt_entry_match *ematch;
1471 /* Cleanup all matches */
1472 xt_ematch_foreach(ematch, e)
1473 module_put(ematch->u.kernel.match->me);
1474 t = compat_ip6t_get_target(e);
1475 module_put(t->u.kernel.target->me);
1479 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1480 struct xt_table_info *newinfo,
1482 const unsigned char *base,
1483 const unsigned char *limit,
1484 const unsigned int *hook_entries,
1485 const unsigned int *underflows,
1488 struct xt_entry_match *ematch;
1489 struct xt_entry_target *t;
1490 struct xt_target *target;
1491 unsigned int entry_offset;
1495 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1496 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1497 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1498 duprintf("Bad offset %p, limit = %p\n", e, limit);
1502 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1503 sizeof(struct compat_xt_entry_target)) {
1504 duprintf("checking: element %p size %u\n",
1509 /* For purposes of check_entry casting the compat entry is fine */
1510 ret = check_entry((struct ip6t_entry *)e, name);
1514 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1515 entry_offset = (void *)e - (void *)base;
1517 xt_ematch_foreach(ematch, e) {
1518 ret = compat_find_calc_match(ematch, name,
1519 &e->ipv6, e->comefrom, &off);
1521 goto release_matches;
1525 t = compat_ip6t_get_target(e);
1526 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1527 t->u.user.revision);
1528 if (IS_ERR(target)) {
1529 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1531 ret = PTR_ERR(target);
1532 goto release_matches;
1534 t->u.kernel.target = target;
1536 off += xt_compat_target_offset(target);
1538 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1542 /* Check hooks & underflows */
1543 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1544 if ((unsigned char *)e - base == hook_entries[h])
1545 newinfo->hook_entry[h] = hook_entries[h];
1546 if ((unsigned char *)e - base == underflows[h])
1547 newinfo->underflow[h] = underflows[h];
1550 /* Clear counters and comefrom */
1551 memset(&e->counters, 0, sizeof(e->counters));
1556 module_put(t->u.kernel.target->me);
1558 xt_ematch_foreach(ematch, e) {
1561 module_put(ematch->u.kernel.match->me);
1567 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1568 unsigned int *size, const char *name,
1569 struct xt_table_info *newinfo, unsigned char *base)
1571 struct xt_entry_target *t;
1572 struct ip6t_entry *de;
1573 unsigned int origsize;
1575 struct xt_entry_match *ematch;
1579 de = (struct ip6t_entry *)*dstptr;
1580 memcpy(de, e, sizeof(struct ip6t_entry));
1581 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1583 *dstptr += sizeof(struct ip6t_entry);
1584 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1586 xt_ematch_foreach(ematch, e) {
1587 ret = xt_compat_match_from_user(ematch, dstptr, size);
1591 de->target_offset = e->target_offset - (origsize - *size);
1592 t = compat_ip6t_get_target(e);
1593 xt_compat_target_from_user(t, dstptr, size);
1595 de->next_offset = e->next_offset - (origsize - *size);
1596 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1597 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1598 newinfo->hook_entry[h] -= origsize - *size;
1599 if ((unsigned char *)de - base < newinfo->underflow[h])
1600 newinfo->underflow[h] -= origsize - *size;
1605 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1610 struct xt_mtchk_param mtpar;
1611 struct xt_entry_match *ematch;
1616 mtpar.entryinfo = &e->ipv6;
1617 mtpar.hook_mask = e->comefrom;
1618 mtpar.family = NFPROTO_IPV6;
1619 xt_ematch_foreach(ematch, e) {
1620 ret = check_match(ematch, &mtpar);
1622 goto cleanup_matches;
1626 ret = check_target(e, net, name);
1628 goto cleanup_matches;
1632 xt_ematch_foreach(ematch, e) {
1635 cleanup_match(ematch, net);
1641 translate_compat_table(struct net *net,
1643 unsigned int valid_hooks,
1644 struct xt_table_info **pinfo,
1646 unsigned int total_size,
1647 unsigned int number,
1648 unsigned int *hook_entries,
1649 unsigned int *underflows)
1652 struct xt_table_info *newinfo, *info;
1653 void *pos, *entry0, *entry1;
1654 struct compat_ip6t_entry *iter0;
1655 struct ip6t_entry *iter1;
1662 info->number = number;
1664 /* Init all hooks to impossible value. */
1665 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1666 info->hook_entry[i] = 0xFFFFFFFF;
1667 info->underflow[i] = 0xFFFFFFFF;
1670 duprintf("translate_compat_table: size %u\n", info->size);
1672 xt_compat_lock(AF_INET6);
1673 xt_compat_init_offsets(AF_INET6, number);
1674 /* Walk through entries, checking offsets. */
1675 xt_entry_foreach(iter0, entry0, total_size) {
1676 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1678 entry0 + total_size,
1689 duprintf("translate_compat_table: %u not %u entries\n",
1694 /* Check hooks all assigned */
1695 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1696 /* Only hooks which are valid */
1697 if (!(valid_hooks & (1 << i)))
1699 if (info->hook_entry[i] == 0xFFFFFFFF) {
1700 duprintf("Invalid hook entry %u %u\n",
1701 i, hook_entries[i]);
1704 if (info->underflow[i] == 0xFFFFFFFF) {
1705 duprintf("Invalid underflow %u %u\n",
1712 newinfo = xt_alloc_table_info(size);
1716 newinfo->number = number;
1717 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1718 newinfo->hook_entry[i] = info->hook_entry[i];
1719 newinfo->underflow[i] = info->underflow[i];
1721 entry1 = newinfo->entries[raw_smp_processor_id()];
1724 xt_entry_foreach(iter0, entry0, total_size) {
1725 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1726 name, newinfo, entry1);
1730 xt_compat_flush_offsets(AF_INET6);
1731 xt_compat_unlock(AF_INET6);
1736 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1740 xt_entry_foreach(iter1, entry1, newinfo->size) {
1741 ret = compat_check_entry(iter1, net, name);
1745 if (strcmp(ip6t_get_target(iter1)->u.user.name,
1746 XT_ERROR_TARGET) == 0)
1747 ++newinfo->stacksize;
1751 * The first i matches need cleanup_entry (calls ->destroy)
1752 * because they had called ->check already. The other j-i
1753 * entries need only release.
1757 xt_entry_foreach(iter0, entry0, newinfo->size) {
1762 compat_release_entry(iter0);
1764 xt_entry_foreach(iter1, entry1, newinfo->size) {
1767 cleanup_entry(iter1, net);
1769 xt_free_table_info(newinfo);
1773 /* And one copy for every other CPU */
1774 for_each_possible_cpu(i)
1775 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1776 memcpy(newinfo->entries[i], entry1, newinfo->size);
1780 xt_free_table_info(info);
1784 xt_free_table_info(newinfo);
1786 xt_entry_foreach(iter0, entry0, total_size) {
1789 compat_release_entry(iter0);
1793 xt_compat_flush_offsets(AF_INET6);
1794 xt_compat_unlock(AF_INET6);
1799 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1802 struct compat_ip6t_replace tmp;
1803 struct xt_table_info *newinfo;
1804 void *loc_cpu_entry;
1805 struct ip6t_entry *iter;
1807 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1810 /* overflow check */
1811 if (tmp.size >= INT_MAX / num_possible_cpus())
1813 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1815 tmp.name[sizeof(tmp.name)-1] = 0;
1817 newinfo = xt_alloc_table_info(tmp.size);
1821 /* choose the copy that is on our node/cpu */
1822 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1823 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1829 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1830 &newinfo, &loc_cpu_entry, tmp.size,
1831 tmp.num_entries, tmp.hook_entry,
1836 duprintf("compat_do_replace: Translated table\n");
1838 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1839 tmp.num_counters, compat_ptr(tmp.counters));
1841 goto free_newinfo_untrans;
1844 free_newinfo_untrans:
1845 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1846 cleanup_entry(iter, net);
1848 xt_free_table_info(newinfo);
1853 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1858 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1862 case IP6T_SO_SET_REPLACE:
1863 ret = compat_do_replace(sock_net(sk), user, len);
1866 case IP6T_SO_SET_ADD_COUNTERS:
1867 ret = do_add_counters(sock_net(sk), user, len, 1);
1871 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1878 struct compat_ip6t_get_entries {
1879 char name[XT_TABLE_MAXNAMELEN];
1881 struct compat_ip6t_entry entrytable[0];
1885 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1886 void __user *userptr)
1888 struct xt_counters *counters;
1889 const struct xt_table_info *private = table->private;
1893 const void *loc_cpu_entry;
1895 struct ip6t_entry *iter;
1897 counters = alloc_counters(table);
1898 if (IS_ERR(counters))
1899 return PTR_ERR(counters);
1901 /* choose the copy that is on our node/cpu, ...
1902 * This choice is lazy (because current thread is
1903 * allowed to migrate to another cpu)
1905 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1908 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1909 ret = compat_copy_entry_to_user(iter, &pos,
1910 &size, counters, i++);
1920 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1924 struct compat_ip6t_get_entries get;
1927 if (*len < sizeof(get)) {
1928 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1932 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1935 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1936 duprintf("compat_get_entries: %u != %zu\n",
1937 *len, sizeof(get) + get.size);
1941 xt_compat_lock(AF_INET6);
1942 t = xt_find_table_lock(net, AF_INET6, get.name);
1943 if (!IS_ERR_OR_NULL(t)) {
1944 const struct xt_table_info *private = t->private;
1945 struct xt_table_info info;
1946 duprintf("t->private->number = %u\n", private->number);
1947 ret = compat_table_info(private, &info);
1948 if (!ret && get.size == info.size) {
1949 ret = compat_copy_entries_to_user(private->size,
1950 t, uptr->entrytable);
1952 duprintf("compat_get_entries: I've got %u not %u!\n",
1953 private->size, get.size);
1956 xt_compat_flush_offsets(AF_INET6);
1960 ret = t ? PTR_ERR(t) : -ENOENT;
1962 xt_compat_unlock(AF_INET6);
1966 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1969 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1973 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1977 case IP6T_SO_GET_INFO:
1978 ret = get_info(sock_net(sk), user, len, 1);
1980 case IP6T_SO_GET_ENTRIES:
1981 ret = compat_get_entries(sock_net(sk), user, len);
1984 ret = do_ip6t_get_ctl(sk, cmd, user, len);
1991 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1995 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1999 case IP6T_SO_SET_REPLACE:
2000 ret = do_replace(sock_net(sk), user, len);
2003 case IP6T_SO_SET_ADD_COUNTERS:
2004 ret = do_add_counters(sock_net(sk), user, len, 0);
2008 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2016 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2020 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2024 case IP6T_SO_GET_INFO:
2025 ret = get_info(sock_net(sk), user, len, 0);
2028 case IP6T_SO_GET_ENTRIES:
2029 ret = get_entries(sock_net(sk), user, len);
2032 case IP6T_SO_GET_REVISION_MATCH:
2033 case IP6T_SO_GET_REVISION_TARGET: {
2034 struct xt_get_revision rev;
2037 if (*len != sizeof(rev)) {
2041 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2045 rev.name[sizeof(rev.name)-1] = 0;
2047 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2052 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2055 "ip6t_%s", rev.name);
2060 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2067 struct xt_table *ip6t_register_table(struct net *net,
2068 const struct xt_table *table,
2069 const struct ip6t_replace *repl)
2072 struct xt_table_info *newinfo;
2073 struct xt_table_info bootstrap = {0};
2074 void *loc_cpu_entry;
2075 struct xt_table *new_table;
2077 newinfo = xt_alloc_table_info(repl->size);
2083 /* choose the copy on our node/cpu, but dont care about preemption */
2084 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2085 memcpy(loc_cpu_entry, repl->entries, repl->size);
2087 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2091 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2092 if (IS_ERR(new_table)) {
2093 ret = PTR_ERR(new_table);
2099 xt_free_table_info(newinfo);
2101 return ERR_PTR(ret);
2104 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2106 struct xt_table_info *private;
2107 void *loc_cpu_entry;
2108 struct module *table_owner = table->me;
2109 struct ip6t_entry *iter;
2111 private = xt_unregister_table(table);
2113 /* Decrease module usage counts and free resources */
2114 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2115 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2116 cleanup_entry(iter, net);
2117 if (private->number > private->initial_entries)
2118 module_put(table_owner);
2119 xt_free_table_info(private);
2122 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2124 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2125 u_int8_t type, u_int8_t code,
2128 return (type == test_type && code >= min_code && code <= max_code)
2133 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
2135 const struct icmp6hdr *ic;
2136 struct icmp6hdr _icmph;
2137 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2139 /* Must not be a fragment. */
2140 if (par->fragoff != 0)
2143 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2145 /* We've been asked to examine this packet, and we
2146 * can't. Hence, no choice but to drop.
2148 duprintf("Dropping evil ICMP tinygram.\n");
2149 par->hotdrop = true;
2153 return icmp6_type_code_match(icmpinfo->type,
2156 ic->icmp6_type, ic->icmp6_code,
2157 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2160 /* Called when user tries to insert an entry of this type. */
2161 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2163 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2165 /* Must specify no unknown invflags */
2166 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2169 /* The built-in targets: standard (NULL) and error. */
2170 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
2172 .name = XT_STANDARD_TARGET,
2173 .targetsize = sizeof(int),
2174 .family = NFPROTO_IPV6,
2175 #ifdef CONFIG_COMPAT
2176 .compatsize = sizeof(compat_int_t),
2177 .compat_from_user = compat_standard_from_user,
2178 .compat_to_user = compat_standard_to_user,
2182 .name = XT_ERROR_TARGET,
2183 .target = ip6t_error,
2184 .targetsize = XT_FUNCTION_MAXNAMELEN,
2185 .family = NFPROTO_IPV6,
2189 static struct nf_sockopt_ops ip6t_sockopts = {
2191 .set_optmin = IP6T_BASE_CTL,
2192 .set_optmax = IP6T_SO_SET_MAX+1,
2193 .set = do_ip6t_set_ctl,
2194 #ifdef CONFIG_COMPAT
2195 .compat_set = compat_do_ip6t_set_ctl,
2197 .get_optmin = IP6T_BASE_CTL,
2198 .get_optmax = IP6T_SO_GET_MAX+1,
2199 .get = do_ip6t_get_ctl,
2200 #ifdef CONFIG_COMPAT
2201 .compat_get = compat_do_ip6t_get_ctl,
2203 .owner = THIS_MODULE,
2206 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
2209 .match = icmp6_match,
2210 .matchsize = sizeof(struct ip6t_icmp),
2211 .checkentry = icmp6_checkentry,
2212 .proto = IPPROTO_ICMPV6,
2213 .family = NFPROTO_IPV6,
2217 static int __net_init ip6_tables_net_init(struct net *net)
2219 return xt_proto_init(net, NFPROTO_IPV6);
2222 static void __net_exit ip6_tables_net_exit(struct net *net)
2224 xt_proto_fini(net, NFPROTO_IPV6);
2227 static struct pernet_operations ip6_tables_net_ops = {
2228 .init = ip6_tables_net_init,
2229 .exit = ip6_tables_net_exit,
2232 static int __init ip6_tables_init(void)
2236 ret = register_pernet_subsys(&ip6_tables_net_ops);
2240 /* No one else will be downing sem now, so we won't sleep */
2241 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2244 ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2248 /* Register setsockopt */
2249 ret = nf_register_sockopt(&ip6t_sockopts);
2253 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2257 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2259 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2261 unregister_pernet_subsys(&ip6_tables_net_ops);
2266 static void __exit ip6_tables_fini(void)
2268 nf_unregister_sockopt(&ip6t_sockopts);
2270 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2271 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2272 unregister_pernet_subsys(&ip6_tables_net_ops);
2275 EXPORT_SYMBOL(ip6t_register_table);
2276 EXPORT_SYMBOL(ip6t_unregister_table);
2277 EXPORT_SYMBOL(ip6t_do_table);
2279 module_init(ip6_tables_init);
2280 module_exit(ip6_tables_fini);