2 * Packet matching code for ARP packets.
4 * Based heavily, if not almost entirely, upon ip_tables.c framework.
6 * Some ARP specific bits are:
8 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
9 * Copyright (C) 2006-2009 Patrick McHardy <kaber@trash.net>
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/kernel.h>
14 #include <linux/skbuff.h>
15 #include <linux/netdevice.h>
16 #include <linux/capability.h>
17 #include <linux/if_arp.h>
18 #include <linux/kmod.h>
19 #include <linux/vmalloc.h>
20 #include <linux/proc_fs.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/mutex.h>
24 #include <linux/err.h>
25 #include <net/compat.h>
27 #include <linux/uaccess.h>
29 #include <linux/netfilter/x_tables.h>
30 #include <linux/netfilter_arp/arp_tables.h>
31 #include "../../netfilter/xt_repldata.h"
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
35 MODULE_DESCRIPTION("arptables core");
37 void *arpt_alloc_initial_table(const struct xt_table *info)
39 return xt_alloc_initial_table(arpt, ARPT);
41 EXPORT_SYMBOL_GPL(arpt_alloc_initial_table);
43 static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap,
44 const char *hdr_addr, int len)
48 if (len > ARPT_DEV_ADDR_LEN_MAX)
49 len = ARPT_DEV_ADDR_LEN_MAX;
52 for (i = 0; i < len; i++)
53 ret |= (hdr_addr[i] ^ ap->addr[i]) & ap->mask[i];
59 * Unfortunately, _b and _mask are not aligned to an int (or long int)
60 * Some arches dont care, unrolling the loop is a win on them.
61 * For other arches, we only have a 16bit alignement.
63 static unsigned long ifname_compare(const char *_a, const char *_b, const char *_mask)
65 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
66 unsigned long ret = ifname_compare_aligned(_a, _b, _mask);
68 unsigned long ret = 0;
69 const u16 *a = (const u16 *)_a;
70 const u16 *b = (const u16 *)_b;
71 const u16 *mask = (const u16 *)_mask;
74 for (i = 0; i < IFNAMSIZ/sizeof(u16); i++)
75 ret |= (a[i] ^ b[i]) & mask[i];
80 /* Returns whether packet matches rule or not. */
81 static inline int arp_packet_match(const struct arphdr *arphdr,
82 struct net_device *dev,
85 const struct arpt_arp *arpinfo)
87 const char *arpptr = (char *)(arphdr + 1);
88 const char *src_devaddr, *tgt_devaddr;
89 __be32 src_ipaddr, tgt_ipaddr;
92 if (NF_INVF(arpinfo, ARPT_INV_ARPOP,
93 (arphdr->ar_op & arpinfo->arpop_mask) != arpinfo->arpop))
96 if (NF_INVF(arpinfo, ARPT_INV_ARPHRD,
97 (arphdr->ar_hrd & arpinfo->arhrd_mask) != arpinfo->arhrd))
100 if (NF_INVF(arpinfo, ARPT_INV_ARPPRO,
101 (arphdr->ar_pro & arpinfo->arpro_mask) != arpinfo->arpro))
104 if (NF_INVF(arpinfo, ARPT_INV_ARPHLN,
105 (arphdr->ar_hln & arpinfo->arhln_mask) != arpinfo->arhln))
108 src_devaddr = arpptr;
109 arpptr += dev->addr_len;
110 memcpy(&src_ipaddr, arpptr, sizeof(u32));
111 arpptr += sizeof(u32);
112 tgt_devaddr = arpptr;
113 arpptr += dev->addr_len;
114 memcpy(&tgt_ipaddr, arpptr, sizeof(u32));
116 if (NF_INVF(arpinfo, ARPT_INV_SRCDEVADDR,
117 arp_devaddr_compare(&arpinfo->src_devaddr, src_devaddr,
119 NF_INVF(arpinfo, ARPT_INV_TGTDEVADDR,
120 arp_devaddr_compare(&arpinfo->tgt_devaddr, tgt_devaddr,
124 if (NF_INVF(arpinfo, ARPT_INV_SRCIP,
125 (src_ipaddr & arpinfo->smsk.s_addr) != arpinfo->src.s_addr) ||
126 NF_INVF(arpinfo, ARPT_INV_TGTIP,
127 (tgt_ipaddr & arpinfo->tmsk.s_addr) != arpinfo->tgt.s_addr))
130 /* Look for ifname matches. */
131 ret = ifname_compare(indev, arpinfo->iniface, arpinfo->iniface_mask);
133 if (NF_INVF(arpinfo, ARPT_INV_VIA_IN, ret != 0))
136 ret = ifname_compare(outdev, arpinfo->outiface, arpinfo->outiface_mask);
138 if (NF_INVF(arpinfo, ARPT_INV_VIA_OUT, ret != 0))
144 static inline int arp_checkentry(const struct arpt_arp *arp)
146 if (arp->flags & ~ARPT_F_MASK)
148 if (arp->invflags & ~ARPT_INV_MASK)
155 arpt_error(struct sk_buff *skb, const struct xt_action_param *par)
157 net_err_ratelimited("arp_tables: error: '%s'\n",
158 (const char *)par->targinfo);
163 static inline const struct xt_entry_target *
164 arpt_get_target_c(const struct arpt_entry *e)
166 return arpt_get_target((struct arpt_entry *)e);
169 static inline struct arpt_entry *
170 get_entry(const void *base, unsigned int offset)
172 return (struct arpt_entry *)(base + offset);
176 struct arpt_entry *arpt_next_entry(const struct arpt_entry *entry)
178 return (void *)entry + entry->next_offset;
181 unsigned int arpt_do_table(struct sk_buff *skb,
182 const struct nf_hook_state *state,
183 struct xt_table *table)
185 unsigned int hook = state->hook;
186 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
187 unsigned int verdict = NF_DROP;
188 const struct arphdr *arp;
189 struct arpt_entry *e, **jumpstack;
190 const char *indev, *outdev;
191 const void *table_base;
192 unsigned int cpu, stackidx = 0;
193 const struct xt_table_info *private;
194 struct xt_action_param acpar;
197 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
200 indev = state->in ? state->in->name : nulldevname;
201 outdev = state->out ? state->out->name : nulldevname;
204 addend = xt_write_recseq_begin();
205 private = READ_ONCE(table->private); /* Address dependency. */
206 cpu = smp_processor_id();
207 table_base = private->entries;
208 jumpstack = (struct arpt_entry **)private->jumpstack[cpu];
210 /* No TEE support for arptables, so no need to switch to alternate
211 * stack. All targets that reenter must return absolute verdicts.
213 e = get_entry(table_base, private->hook_entry[hook]);
216 acpar.hotdrop = false;
220 const struct xt_entry_target *t;
221 struct xt_counters *counter;
223 if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) {
224 e = arpt_next_entry(e);
228 counter = xt_get_this_cpu_counter(&e->counters);
229 ADD_COUNTER(*counter, arp_hdr_len(skb->dev), 1);
231 t = arpt_get_target_c(e);
233 /* Standard target? */
234 if (!t->u.kernel.target->target) {
237 v = ((struct xt_standard_target *)t)->verdict;
239 /* Pop from stack? */
240 if (v != XT_RETURN) {
241 verdict = (unsigned int)(-v) - 1;
245 e = get_entry(table_base,
246 private->underflow[hook]);
248 e = jumpstack[--stackidx];
249 e = arpt_next_entry(e);
254 != arpt_next_entry(e)) {
255 if (unlikely(stackidx >= private->stacksize)) {
259 jumpstack[stackidx++] = e;
262 e = get_entry(table_base, v);
266 acpar.target = t->u.kernel.target;
267 acpar.targinfo = t->data;
268 verdict = t->u.kernel.target->target(skb, &acpar);
270 if (verdict == XT_CONTINUE) {
271 /* Target might have changed stuff. */
273 e = arpt_next_entry(e);
278 } while (!acpar.hotdrop);
279 xt_write_recseq_end(addend);
288 /* All zeroes == unconditional rule. */
289 static inline bool unconditional(const struct arpt_entry *e)
291 static const struct arpt_arp uncond;
293 return e->target_offset == sizeof(struct arpt_entry) &&
294 memcmp(&e->arp, &uncond, sizeof(uncond)) == 0;
297 /* Figures out from what hook each rule can be called: returns 0 if
298 * there are loops. Puts hook bitmask in comefrom.
300 static int mark_source_chains(const struct xt_table_info *newinfo,
301 unsigned int valid_hooks, void *entry0,
302 unsigned int *offsets)
306 /* No recursion; use packet counter to save back ptrs (reset
307 * to 0 as we leave), and comefrom to save source hook bitmask.
309 for (hook = 0; hook < NF_ARP_NUMHOOKS; hook++) {
310 unsigned int pos = newinfo->hook_entry[hook];
311 struct arpt_entry *e = entry0 + pos;
313 if (!(valid_hooks & (1 << hook)))
316 /* Set initial back pointer. */
317 e->counters.pcnt = pos;
320 const struct xt_standard_target *t
321 = (void *)arpt_get_target_c(e);
322 int visited = e->comefrom & (1 << hook);
324 if (e->comefrom & (1 << NF_ARP_NUMHOOKS))
328 |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS));
330 /* Unconditional return/END. */
331 if ((unconditional(e) &&
332 (strcmp(t->target.u.user.name,
333 XT_STANDARD_TARGET) == 0) &&
334 t->verdict < 0) || visited) {
335 unsigned int oldpos, size;
337 /* Return: backtrack through the last
341 e->comefrom ^= (1<<NF_ARP_NUMHOOKS);
343 pos = e->counters.pcnt;
344 e->counters.pcnt = 0;
346 /* We're at the start. */
351 } while (oldpos == pos + e->next_offset);
354 size = e->next_offset;
355 e = entry0 + pos + size;
356 if (pos + size >= newinfo->size)
358 e->counters.pcnt = pos;
361 int newpos = t->verdict;
363 if (strcmp(t->target.u.user.name,
364 XT_STANDARD_TARGET) == 0 &&
366 /* This a jump; chase it. */
367 if (!xt_find_jump_offset(offsets, newpos,
371 /* ... this is a fallthru */
372 newpos = pos + e->next_offset;
373 if (newpos >= newinfo->size)
377 e->counters.pcnt = pos;
386 static inline int check_target(struct arpt_entry *e, const char *name)
388 struct xt_entry_target *t = arpt_get_target(e);
389 struct xt_tgchk_param par = {
392 .target = t->u.kernel.target,
394 .hook_mask = e->comefrom,
395 .family = NFPROTO_ARP,
398 return xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false);
402 find_check_entry(struct arpt_entry *e, const char *name, unsigned int size,
403 struct xt_percpu_counter_alloc_state *alloc_state)
405 struct xt_entry_target *t;
406 struct xt_target *target;
409 if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
412 t = arpt_get_target(e);
413 target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
415 if (IS_ERR(target)) {
416 ret = PTR_ERR(target);
419 t->u.kernel.target = target;
421 ret = check_target(e, name);
426 module_put(t->u.kernel.target->me);
428 xt_percpu_counter_free(&e->counters);
433 static bool check_underflow(const struct arpt_entry *e)
435 const struct xt_entry_target *t;
436 unsigned int verdict;
438 if (!unconditional(e))
440 t = arpt_get_target_c(e);
441 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
443 verdict = ((struct xt_standard_target *)t)->verdict;
444 verdict = -verdict - 1;
445 return verdict == NF_DROP || verdict == NF_ACCEPT;
448 static inline int check_entry_size_and_hooks(struct arpt_entry *e,
449 struct xt_table_info *newinfo,
450 const unsigned char *base,
451 const unsigned char *limit,
452 const unsigned int *hook_entries,
453 const unsigned int *underflows,
454 unsigned int valid_hooks)
459 if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 ||
460 (unsigned char *)e + sizeof(struct arpt_entry) >= limit ||
461 (unsigned char *)e + e->next_offset > limit)
465 < sizeof(struct arpt_entry) + sizeof(struct xt_entry_target))
468 if (!arp_checkentry(&e->arp))
471 err = xt_check_entry_offsets(e, e->elems, e->target_offset,
476 /* Check hooks & underflows */
477 for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
478 if (!(valid_hooks & (1 << h)))
480 if ((unsigned char *)e - base == hook_entries[h])
481 newinfo->hook_entry[h] = hook_entries[h];
482 if ((unsigned char *)e - base == underflows[h]) {
483 if (!check_underflow(e))
486 newinfo->underflow[h] = underflows[h];
490 /* Clear counters and comefrom */
491 e->counters = ((struct xt_counters) { 0, 0 });
496 static inline void cleanup_entry(struct arpt_entry *e)
498 struct xt_tgdtor_param par;
499 struct xt_entry_target *t;
501 t = arpt_get_target(e);
502 par.target = t->u.kernel.target;
503 par.targinfo = t->data;
504 par.family = NFPROTO_ARP;
505 if (par.target->destroy != NULL)
506 par.target->destroy(&par);
507 module_put(par.target->me);
508 xt_percpu_counter_free(&e->counters);
511 /* Checks and translates the user-supplied table segment (held in
514 static int translate_table(struct xt_table_info *newinfo, void *entry0,
515 const struct arpt_replace *repl)
517 struct xt_percpu_counter_alloc_state alloc_state = { 0 };
518 struct arpt_entry *iter;
519 unsigned int *offsets;
523 newinfo->size = repl->size;
524 newinfo->number = repl->num_entries;
526 /* Init all hooks to impossible value. */
527 for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
528 newinfo->hook_entry[i] = 0xFFFFFFFF;
529 newinfo->underflow[i] = 0xFFFFFFFF;
532 offsets = xt_alloc_entry_offsets(newinfo->number);
537 /* Walk through entries, checking offsets. */
538 xt_entry_foreach(iter, entry0, newinfo->size) {
539 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
546 if (i < repl->num_entries)
547 offsets[i] = (void *)iter - entry0;
549 if (strcmp(arpt_get_target(iter)->u.user.name,
550 XT_ERROR_TARGET) == 0)
551 ++newinfo->stacksize;
555 if (i != repl->num_entries)
558 ret = xt_check_table_hooks(newinfo, repl->valid_hooks);
562 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
568 /* Finally, each sanity check must pass */
570 xt_entry_foreach(iter, entry0, newinfo->size) {
571 ret = find_check_entry(iter, repl->name, repl->size,
579 xt_entry_foreach(iter, entry0, newinfo->size) {
593 static void get_counters(const struct xt_table_info *t,
594 struct xt_counters counters[])
596 struct arpt_entry *iter;
600 for_each_possible_cpu(cpu) {
601 seqcount_t *s = &per_cpu(xt_recseq, cpu);
604 xt_entry_foreach(iter, t->entries, t->size) {
605 struct xt_counters *tmp;
609 tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
611 start = read_seqcount_begin(s);
614 } while (read_seqcount_retry(s, start));
616 ADD_COUNTER(counters[i], bcnt, pcnt);
623 static void get_old_counters(const struct xt_table_info *t,
624 struct xt_counters counters[])
626 struct arpt_entry *iter;
629 for_each_possible_cpu(cpu) {
631 xt_entry_foreach(iter, t->entries, t->size) {
632 struct xt_counters *tmp;
634 tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
635 ADD_COUNTER(counters[i], tmp->bcnt, tmp->pcnt);
642 static struct xt_counters *alloc_counters(const struct xt_table *table)
644 unsigned int countersize;
645 struct xt_counters *counters;
646 const struct xt_table_info *private = table->private;
648 /* We need atomic snapshot of counters: rest doesn't change
649 * (other than comefrom, which userspace doesn't care
652 countersize = sizeof(struct xt_counters) * private->number;
653 counters = vzalloc(countersize);
655 if (counters == NULL)
656 return ERR_PTR(-ENOMEM);
658 get_counters(private, counters);
663 static int copy_entries_to_user(unsigned int total_size,
664 const struct xt_table *table,
665 void __user *userptr)
667 unsigned int off, num;
668 const struct arpt_entry *e;
669 struct xt_counters *counters;
670 struct xt_table_info *private = table->private;
674 counters = alloc_counters(table);
675 if (IS_ERR(counters))
676 return PTR_ERR(counters);
678 loc_cpu_entry = private->entries;
680 /* FIXME: use iterator macros --RR */
681 /* ... then go back and fix counters and names */
682 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
683 const struct xt_entry_target *t;
685 e = loc_cpu_entry + off;
686 if (copy_to_user(userptr + off, e, sizeof(*e))) {
690 if (copy_to_user(userptr + off
691 + offsetof(struct arpt_entry, counters),
693 sizeof(counters[num])) != 0) {
698 t = arpt_get_target_c(e);
699 if (xt_target_to_user(t, userptr + off + e->target_offset)) {
711 static void compat_standard_from_user(void *dst, const void *src)
713 int v = *(compat_int_t *)src;
716 v += xt_compat_calc_jump(NFPROTO_ARP, v);
717 memcpy(dst, &v, sizeof(v));
720 static int compat_standard_to_user(void __user *dst, const void *src)
722 compat_int_t cv = *(int *)src;
725 cv -= xt_compat_calc_jump(NFPROTO_ARP, cv);
726 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
729 static int compat_calc_entry(const struct arpt_entry *e,
730 const struct xt_table_info *info,
731 const void *base, struct xt_table_info *newinfo)
733 const struct xt_entry_target *t;
734 unsigned int entry_offset;
737 off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
738 entry_offset = (void *)e - base;
740 t = arpt_get_target_c(e);
741 off += xt_compat_target_offset(t->u.kernel.target);
742 newinfo->size -= off;
743 ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off);
747 for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
748 if (info->hook_entry[i] &&
749 (e < (struct arpt_entry *)(base + info->hook_entry[i])))
750 newinfo->hook_entry[i] -= off;
751 if (info->underflow[i] &&
752 (e < (struct arpt_entry *)(base + info->underflow[i])))
753 newinfo->underflow[i] -= off;
758 static int compat_table_info(const struct xt_table_info *info,
759 struct xt_table_info *newinfo)
761 struct arpt_entry *iter;
762 const void *loc_cpu_entry;
765 if (!newinfo || !info)
768 /* we dont care about newinfo->entries */
769 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
770 newinfo->initial_entries = 0;
771 loc_cpu_entry = info->entries;
772 xt_compat_init_offsets(NFPROTO_ARP, info->number);
773 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
774 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
782 static int get_info(struct net *net, void __user *user,
783 const int *len, int compat)
785 char name[XT_TABLE_MAXNAMELEN];
789 if (*len != sizeof(struct arpt_getinfo))
792 if (copy_from_user(name, user, sizeof(name)) != 0)
795 name[XT_TABLE_MAXNAMELEN-1] = '\0';
798 xt_compat_lock(NFPROTO_ARP);
800 t = xt_request_find_table_lock(net, NFPROTO_ARP, name);
802 struct arpt_getinfo info;
803 const struct xt_table_info *private = t->private;
805 struct xt_table_info tmp;
808 ret = compat_table_info(private, &tmp);
809 xt_compat_flush_offsets(NFPROTO_ARP);
813 memset(&info, 0, sizeof(info));
814 info.valid_hooks = t->valid_hooks;
815 memcpy(info.hook_entry, private->hook_entry,
816 sizeof(info.hook_entry));
817 memcpy(info.underflow, private->underflow,
818 sizeof(info.underflow));
819 info.num_entries = private->number;
820 info.size = private->size;
821 strcpy(info.name, name);
823 if (copy_to_user(user, &info, *len) != 0)
833 xt_compat_unlock(NFPROTO_ARP);
838 static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
842 struct arpt_get_entries get;
845 if (*len < sizeof(get))
847 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
849 if (*len != sizeof(struct arpt_get_entries) + get.size)
852 get.name[sizeof(get.name) - 1] = '\0';
854 t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
856 const struct xt_table_info *private = t->private;
858 if (get.size == private->size)
859 ret = copy_entries_to_user(private->size,
860 t, uptr->entrytable);
872 static int __do_replace(struct net *net, const char *name,
873 unsigned int valid_hooks,
874 struct xt_table_info *newinfo,
875 unsigned int num_counters,
876 void __user *counters_ptr)
880 struct xt_table_info *oldinfo;
881 struct xt_counters *counters;
882 void *loc_cpu_old_entry;
883 struct arpt_entry *iter;
886 counters = vzalloc(num_counters * sizeof(struct xt_counters));
892 t = xt_request_find_table_lock(net, NFPROTO_ARP, name);
895 goto free_newinfo_counters_untrans;
899 if (valid_hooks != t->valid_hooks) {
904 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
908 /* Update module usage count based on number of rules */
909 if ((oldinfo->number > oldinfo->initial_entries) ||
910 (newinfo->number <= oldinfo->initial_entries))
912 if ((oldinfo->number > oldinfo->initial_entries) &&
913 (newinfo->number <= oldinfo->initial_entries))
918 get_old_counters(oldinfo, counters);
920 /* Decrease module usage counts and free resource */
921 loc_cpu_old_entry = oldinfo->entries;
922 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
925 xt_free_table_info(oldinfo);
926 if (copy_to_user(counters_ptr, counters,
927 sizeof(struct xt_counters) * num_counters) != 0) {
928 /* Silent error, can't fail, new table is already in place */
929 net_warn_ratelimited("arptables: counters copy to user failed while replacing table\n");
937 free_newinfo_counters_untrans:
943 static int do_replace(struct net *net, const void __user *user,
947 struct arpt_replace tmp;
948 struct xt_table_info *newinfo;
950 struct arpt_entry *iter;
952 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
956 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
958 if (tmp.num_counters == 0)
961 tmp.name[sizeof(tmp.name)-1] = 0;
963 newinfo = xt_alloc_table_info(tmp.size);
967 loc_cpu_entry = newinfo->entries;
968 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
974 ret = translate_table(newinfo, loc_cpu_entry, &tmp);
978 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
979 tmp.num_counters, tmp.counters);
981 goto free_newinfo_untrans;
984 free_newinfo_untrans:
985 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
988 xt_free_table_info(newinfo);
992 static int do_add_counters(struct net *net, const void __user *user,
993 unsigned int len, int compat)
996 struct xt_counters_info tmp;
997 struct xt_counters *paddc;
999 const struct xt_table_info *private;
1001 struct arpt_entry *iter;
1002 unsigned int addend;
1004 paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
1006 return PTR_ERR(paddc);
1008 t = xt_find_table_lock(net, NFPROTO_ARP, tmp.name);
1015 private = t->private;
1016 if (private->number != tmp.num_counters) {
1018 goto unlock_up_free;
1023 addend = xt_write_recseq_begin();
1024 xt_entry_foreach(iter, private->entries, private->size) {
1025 struct xt_counters *tmp;
1027 tmp = xt_get_this_cpu_counter(&iter->counters);
1028 ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt);
1031 xt_write_recseq_end(addend);
1042 #ifdef CONFIG_COMPAT
1043 struct compat_arpt_replace {
1044 char name[XT_TABLE_MAXNAMELEN];
1048 u32 hook_entry[NF_ARP_NUMHOOKS];
1049 u32 underflow[NF_ARP_NUMHOOKS];
1051 compat_uptr_t counters;
1052 struct compat_arpt_entry entries[0];
1055 static inline void compat_release_entry(struct compat_arpt_entry *e)
1057 struct xt_entry_target *t;
1059 t = compat_arpt_get_target(e);
1060 module_put(t->u.kernel.target->me);
1064 check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
1065 struct xt_table_info *newinfo,
1067 const unsigned char *base,
1068 const unsigned char *limit)
1070 struct xt_entry_target *t;
1071 struct xt_target *target;
1072 unsigned int entry_offset;
1075 if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 ||
1076 (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit ||
1077 (unsigned char *)e + e->next_offset > limit)
1080 if (e->next_offset < sizeof(struct compat_arpt_entry) +
1081 sizeof(struct compat_xt_entry_target))
1084 if (!arp_checkentry(&e->arp))
1087 ret = xt_compat_check_entry_offsets(e, e->elems, e->target_offset,
1092 off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
1093 entry_offset = (void *)e - (void *)base;
1095 t = compat_arpt_get_target(e);
1096 target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
1097 t->u.user.revision);
1098 if (IS_ERR(target)) {
1099 ret = PTR_ERR(target);
1102 t->u.kernel.target = target;
1104 off += xt_compat_target_offset(target);
1106 ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off);
1108 goto release_target;
1113 module_put(t->u.kernel.target->me);
1119 compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
1121 struct xt_table_info *newinfo, unsigned char *base)
1123 struct xt_entry_target *t;
1124 struct arpt_entry *de;
1125 unsigned int origsize;
1130 memcpy(de, e, sizeof(struct arpt_entry));
1131 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1133 *dstptr += sizeof(struct arpt_entry);
1134 *size += sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
1136 de->target_offset = e->target_offset - (origsize - *size);
1137 t = compat_arpt_get_target(e);
1138 xt_compat_target_from_user(t, dstptr, size);
1140 de->next_offset = e->next_offset - (origsize - *size);
1141 for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
1142 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1143 newinfo->hook_entry[h] -= origsize - *size;
1144 if ((unsigned char *)de - base < newinfo->underflow[h])
1145 newinfo->underflow[h] -= origsize - *size;
1149 static int translate_compat_table(struct xt_table_info **pinfo,
1151 const struct compat_arpt_replace *compatr)
1154 struct xt_table_info *newinfo, *info;
1155 void *pos, *entry0, *entry1;
1156 struct compat_arpt_entry *iter0;
1157 struct arpt_replace repl;
1163 size = compatr->size;
1164 info->number = compatr->num_entries;
1167 xt_compat_lock(NFPROTO_ARP);
1168 xt_compat_init_offsets(NFPROTO_ARP, compatr->num_entries);
1169 /* Walk through entries, checking offsets. */
1170 xt_entry_foreach(iter0, entry0, compatr->size) {
1171 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1173 entry0 + compatr->size);
1180 if (j != compatr->num_entries)
1184 newinfo = xt_alloc_table_info(size);
1188 newinfo->number = compatr->num_entries;
1189 for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
1190 newinfo->hook_entry[i] = compatr->hook_entry[i];
1191 newinfo->underflow[i] = compatr->underflow[i];
1193 entry1 = newinfo->entries;
1195 size = compatr->size;
1196 xt_entry_foreach(iter0, entry0, compatr->size)
1197 compat_copy_entry_from_user(iter0, &pos, &size,
1200 /* all module references in entry0 are now gone */
1202 xt_compat_flush_offsets(NFPROTO_ARP);
1203 xt_compat_unlock(NFPROTO_ARP);
1205 memcpy(&repl, compatr, sizeof(*compatr));
1207 for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
1208 repl.hook_entry[i] = newinfo->hook_entry[i];
1209 repl.underflow[i] = newinfo->underflow[i];
1212 repl.num_counters = 0;
1213 repl.counters = NULL;
1214 repl.size = newinfo->size;
1215 ret = translate_table(newinfo, entry1, &repl);
1221 xt_free_table_info(info);
1225 xt_free_table_info(newinfo);
1228 xt_compat_flush_offsets(NFPROTO_ARP);
1229 xt_compat_unlock(NFPROTO_ARP);
1230 xt_entry_foreach(iter0, entry0, compatr->size) {
1233 compat_release_entry(iter0);
1238 static int compat_do_replace(struct net *net, void __user *user,
1242 struct compat_arpt_replace tmp;
1243 struct xt_table_info *newinfo;
1244 void *loc_cpu_entry;
1245 struct arpt_entry *iter;
1247 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1250 /* overflow check */
1251 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1253 if (tmp.num_counters == 0)
1256 tmp.name[sizeof(tmp.name)-1] = 0;
1258 newinfo = xt_alloc_table_info(tmp.size);
1262 loc_cpu_entry = newinfo->entries;
1263 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) {
1268 ret = translate_compat_table(&newinfo, &loc_cpu_entry, &tmp);
1272 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1273 tmp.num_counters, compat_ptr(tmp.counters));
1275 goto free_newinfo_untrans;
1278 free_newinfo_untrans:
1279 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1280 cleanup_entry(iter);
1282 xt_free_table_info(newinfo);
1286 static int compat_do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user,
1291 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1295 case ARPT_SO_SET_REPLACE:
1296 ret = compat_do_replace(sock_net(sk), user, len);
1299 case ARPT_SO_SET_ADD_COUNTERS:
1300 ret = do_add_counters(sock_net(sk), user, len, 1);
1310 static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr,
1311 compat_uint_t *size,
1312 struct xt_counters *counters,
1315 struct xt_entry_target *t;
1316 struct compat_arpt_entry __user *ce;
1317 u_int16_t target_offset, next_offset;
1318 compat_uint_t origsize;
1323 if (copy_to_user(ce, e, sizeof(struct arpt_entry)) != 0 ||
1324 copy_to_user(&ce->counters, &counters[i],
1325 sizeof(counters[i])) != 0)
1328 *dstptr += sizeof(struct compat_arpt_entry);
1329 *size -= sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
1331 target_offset = e->target_offset - (origsize - *size);
1333 t = arpt_get_target(e);
1334 ret = xt_compat_target_to_user(t, dstptr, size);
1337 next_offset = e->next_offset - (origsize - *size);
1338 if (put_user(target_offset, &ce->target_offset) != 0 ||
1339 put_user(next_offset, &ce->next_offset) != 0)
1344 static int compat_copy_entries_to_user(unsigned int total_size,
1345 struct xt_table *table,
1346 void __user *userptr)
1348 struct xt_counters *counters;
1349 const struct xt_table_info *private = table->private;
1354 struct arpt_entry *iter;
1356 counters = alloc_counters(table);
1357 if (IS_ERR(counters))
1358 return PTR_ERR(counters);
1362 xt_entry_foreach(iter, private->entries, total_size) {
1363 ret = compat_copy_entry_to_user(iter, &pos,
1364 &size, counters, i++);
1372 struct compat_arpt_get_entries {
1373 char name[XT_TABLE_MAXNAMELEN];
1375 struct compat_arpt_entry entrytable[0];
1378 static int compat_get_entries(struct net *net,
1379 struct compat_arpt_get_entries __user *uptr,
1383 struct compat_arpt_get_entries get;
1386 if (*len < sizeof(get))
1388 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1390 if (*len != sizeof(struct compat_arpt_get_entries) + get.size)
1393 get.name[sizeof(get.name) - 1] = '\0';
1395 xt_compat_lock(NFPROTO_ARP);
1396 t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
1398 const struct xt_table_info *private = t->private;
1399 struct xt_table_info info;
1401 ret = compat_table_info(private, &info);
1402 if (!ret && get.size == info.size) {
1403 ret = compat_copy_entries_to_user(private->size,
1404 t, uptr->entrytable);
1408 xt_compat_flush_offsets(NFPROTO_ARP);
1414 xt_compat_unlock(NFPROTO_ARP);
1418 static int do_arpt_get_ctl(struct sock *, int, void __user *, int *);
1420 static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
1425 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1429 case ARPT_SO_GET_INFO:
1430 ret = get_info(sock_net(sk), user, len, 1);
1432 case ARPT_SO_GET_ENTRIES:
1433 ret = compat_get_entries(sock_net(sk), user, len);
1436 ret = do_arpt_get_ctl(sk, cmd, user, len);
1442 static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1446 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1450 case ARPT_SO_SET_REPLACE:
1451 ret = do_replace(sock_net(sk), user, len);
1454 case ARPT_SO_SET_ADD_COUNTERS:
1455 ret = do_add_counters(sock_net(sk), user, len, 0);
1465 static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1469 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1473 case ARPT_SO_GET_INFO:
1474 ret = get_info(sock_net(sk), user, len, 0);
1477 case ARPT_SO_GET_ENTRIES:
1478 ret = get_entries(sock_net(sk), user, len);
1481 case ARPT_SO_GET_REVISION_TARGET: {
1482 struct xt_get_revision rev;
1484 if (*len != sizeof(rev)) {
1488 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1492 rev.name[sizeof(rev.name)-1] = 0;
1494 try_then_request_module(xt_find_revision(NFPROTO_ARP, rev.name,
1495 rev.revision, 1, &ret),
1496 "arpt_%s", rev.name);
1507 static void __arpt_unregister_table(struct xt_table *table)
1509 struct xt_table_info *private;
1510 void *loc_cpu_entry;
1511 struct module *table_owner = table->me;
1512 struct arpt_entry *iter;
1514 private = xt_unregister_table(table);
1516 /* Decrease module usage counts and free resources */
1517 loc_cpu_entry = private->entries;
1518 xt_entry_foreach(iter, loc_cpu_entry, private->size)
1519 cleanup_entry(iter);
1520 if (private->number > private->initial_entries)
1521 module_put(table_owner);
1522 xt_free_table_info(private);
1525 int arpt_register_table(struct net *net,
1526 const struct xt_table *table,
1527 const struct arpt_replace *repl,
1528 const struct nf_hook_ops *ops,
1529 struct xt_table **res)
1532 struct xt_table_info *newinfo;
1533 struct xt_table_info bootstrap = {0};
1534 void *loc_cpu_entry;
1535 struct xt_table *new_table;
1537 newinfo = xt_alloc_table_info(repl->size);
1541 loc_cpu_entry = newinfo->entries;
1542 memcpy(loc_cpu_entry, repl->entries, repl->size);
1544 ret = translate_table(newinfo, loc_cpu_entry, repl);
1548 new_table = xt_register_table(net, table, &bootstrap, newinfo);
1549 if (IS_ERR(new_table)) {
1550 ret = PTR_ERR(new_table);
1554 /* set res now, will see skbs right after nf_register_net_hooks */
1555 WRITE_ONCE(*res, new_table);
1557 ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks));
1559 __arpt_unregister_table(new_table);
1566 xt_free_table_info(newinfo);
1570 void arpt_unregister_table(struct net *net, struct xt_table *table,
1571 const struct nf_hook_ops *ops)
1573 nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
1574 __arpt_unregister_table(table);
1577 /* The built-in targets: standard (NULL) and error. */
1578 static struct xt_target arpt_builtin_tg[] __read_mostly = {
1580 .name = XT_STANDARD_TARGET,
1581 .targetsize = sizeof(int),
1582 .family = NFPROTO_ARP,
1583 #ifdef CONFIG_COMPAT
1584 .compatsize = sizeof(compat_int_t),
1585 .compat_from_user = compat_standard_from_user,
1586 .compat_to_user = compat_standard_to_user,
1590 .name = XT_ERROR_TARGET,
1591 .target = arpt_error,
1592 .targetsize = XT_FUNCTION_MAXNAMELEN,
1593 .family = NFPROTO_ARP,
1597 static struct nf_sockopt_ops arpt_sockopts = {
1599 .set_optmin = ARPT_BASE_CTL,
1600 .set_optmax = ARPT_SO_SET_MAX+1,
1601 .set = do_arpt_set_ctl,
1602 #ifdef CONFIG_COMPAT
1603 .compat_set = compat_do_arpt_set_ctl,
1605 .get_optmin = ARPT_BASE_CTL,
1606 .get_optmax = ARPT_SO_GET_MAX+1,
1607 .get = do_arpt_get_ctl,
1608 #ifdef CONFIG_COMPAT
1609 .compat_get = compat_do_arpt_get_ctl,
1611 .owner = THIS_MODULE,
1614 static int __net_init arp_tables_net_init(struct net *net)
1616 return xt_proto_init(net, NFPROTO_ARP);
1619 static void __net_exit arp_tables_net_exit(struct net *net)
1621 xt_proto_fini(net, NFPROTO_ARP);
1624 static struct pernet_operations arp_tables_net_ops = {
1625 .init = arp_tables_net_init,
1626 .exit = arp_tables_net_exit,
1630 static int __init arp_tables_init(void)
1634 ret = register_pernet_subsys(&arp_tables_net_ops);
1638 /* No one else will be downing sem now, so we won't sleep */
1639 ret = xt_register_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg));
1643 /* Register setsockopt */
1644 ret = nf_register_sockopt(&arpt_sockopts);
1651 xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg));
1653 unregister_pernet_subsys(&arp_tables_net_ops);
1658 static void __exit arp_tables_fini(void)
1660 nf_unregister_sockopt(&arpt_sockopts);
1661 xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg));
1662 unregister_pernet_subsys(&arp_tables_net_ops);
1665 EXPORT_SYMBOL(arpt_register_table);
1666 EXPORT_SYMBOL(arpt_unregister_table);
1667 EXPORT_SYMBOL(arpt_do_table);
1669 module_init(arp_tables_init);
1670 module_exit(arp_tables_fini);