1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
3 * Patrick Schaaf <bof@bof.de>
4 * Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org>
7 /* Kernel module for IP set management */
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
13 #include <linux/skbuff.h>
14 #include <linux/spinlock.h>
15 #include <linux/rculist.h>
16 #include <net/netlink.h>
17 #include <net/net_namespace.h>
18 #include <net/netns/generic.h>
20 #include <linux/netfilter.h>
21 #include <linux/netfilter/x_tables.h>
22 #include <linux/netfilter/nfnetlink.h>
23 #include <linux/netfilter/ipset/ip_set.h>
25 static LIST_HEAD(ip_set_type_list); /* all registered set types */
26 static DEFINE_MUTEX(ip_set_type_mutex); /* protects ip_set_type_list */
27 static DEFINE_RWLOCK(ip_set_ref_lock); /* protects the set refs */
30 struct ip_set * __rcu *ip_set_list; /* all individual sets */
31 ip_set_id_t ip_set_max; /* max number of sets */
32 bool is_deleted; /* deleted by ip_set_net_exit */
33 bool is_destroyed; /* all sets are destroyed */
36 static unsigned int ip_set_net_id __read_mostly;
38 static struct ip_set_net *ip_set_pernet(struct net *net)
40 return net_generic(net, ip_set_net_id);
44 #define STRNCMP(a, b) (strncmp(a, b, IPSET_MAXNAMELEN) == 0)
46 static unsigned int max_sets;
48 module_param(max_sets, int, 0600);
49 MODULE_PARM_DESC(max_sets, "maximal number of sets");
50 MODULE_LICENSE("GPL");
51 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
52 MODULE_DESCRIPTION("core IP set support");
53 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
55 /* When the nfnl mutex or ip_set_ref_lock is held: */
56 #define ip_set_dereference(p) \
57 rcu_dereference_protected(p, \
58 lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \
59 lockdep_is_held(&ip_set_ref_lock))
60 #define ip_set(inst, id) \
61 ip_set_dereference((inst)->ip_set_list)[id]
62 #define ip_set_ref_netlink(inst,id) \
63 rcu_dereference_raw((inst)->ip_set_list)[id]
65 /* The set types are implemented in modules and registered set types
66 * can be found in ip_set_type_list. Adding/deleting types is
67 * serialized by ip_set_type_mutex.
71 ip_set_type_lock(void)
73 mutex_lock(&ip_set_type_mutex);
77 ip_set_type_unlock(void)
79 mutex_unlock(&ip_set_type_mutex);
82 /* Register and deregister settype */
84 static struct ip_set_type *
85 find_set_type(const char *name, u8 family, u8 revision)
87 struct ip_set_type *type;
89 list_for_each_entry_rcu(type, &ip_set_type_list, list,
90 lockdep_is_held(&ip_set_type_mutex))
91 if (STRNCMP(type->name, name) &&
92 (type->family == family ||
93 type->family == NFPROTO_UNSPEC) &&
94 revision >= type->revision_min &&
95 revision <= type->revision_max)
100 /* Unlock, try to load a set type module and lock again */
102 load_settype(const char *name)
104 nfnl_unlock(NFNL_SUBSYS_IPSET);
105 pr_debug("try to load ip_set_%s\n", name);
106 if (request_module("ip_set_%s", name) < 0) {
107 pr_warn("Can't find ip_set type %s\n", name);
108 nfnl_lock(NFNL_SUBSYS_IPSET);
111 nfnl_lock(NFNL_SUBSYS_IPSET);
115 /* Find a set type and reference it */
116 #define find_set_type_get(name, family, revision, found) \
117 __find_set_type_get(name, family, revision, found, false)
120 __find_set_type_get(const char *name, u8 family, u8 revision,
121 struct ip_set_type **found, bool retry)
123 struct ip_set_type *type;
126 if (retry && !load_settype(name))
127 return -IPSET_ERR_FIND_TYPE;
130 *found = find_set_type(name, family, revision);
132 err = !try_module_get((*found)->me) ? -EFAULT : 0;
135 /* Make sure the type is already loaded
136 * but we don't support the revision
138 list_for_each_entry_rcu(type, &ip_set_type_list, list)
139 if (STRNCMP(type->name, name)) {
140 err = -IPSET_ERR_FIND_TYPE;
145 return retry ? -IPSET_ERR_FIND_TYPE :
146 __find_set_type_get(name, family, revision, found, true);
153 /* Find a given set type by name and family.
154 * If we succeeded, the supported minimal and maximum revisions are
157 #define find_set_type_minmax(name, family, min, max) \
158 __find_set_type_minmax(name, family, min, max, false)
161 __find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max,
164 struct ip_set_type *type;
167 if (retry && !load_settype(name))
168 return -IPSET_ERR_FIND_TYPE;
170 *min = 255; *max = 0;
172 list_for_each_entry_rcu(type, &ip_set_type_list, list)
173 if (STRNCMP(type->name, name) &&
174 (type->family == family ||
175 type->family == NFPROTO_UNSPEC)) {
177 if (type->revision_min < *min)
178 *min = type->revision_min;
179 if (type->revision_max > *max)
180 *max = type->revision_max;
186 return retry ? -IPSET_ERR_FIND_TYPE :
187 __find_set_type_minmax(name, family, min, max, true);
190 #define family_name(f) ((f) == NFPROTO_IPV4 ? "inet" : \
191 (f) == NFPROTO_IPV6 ? "inet6" : "any")
193 /* Register a set type structure. The type is identified by
194 * the unique triple of name, family and revision.
197 ip_set_type_register(struct ip_set_type *type)
201 if (type->protocol != IPSET_PROTOCOL) {
202 pr_warn("ip_set type %s, family %s, revision %u:%u uses wrong protocol version %u (want %u)\n",
203 type->name, family_name(type->family),
204 type->revision_min, type->revision_max,
205 type->protocol, IPSET_PROTOCOL);
210 if (find_set_type(type->name, type->family, type->revision_min)) {
212 pr_warn("ip_set type %s, family %s with revision min %u already registered!\n",
213 type->name, family_name(type->family),
215 ip_set_type_unlock();
218 list_add_rcu(&type->list, &ip_set_type_list);
219 pr_debug("type %s, family %s, revision %u:%u registered.\n",
220 type->name, family_name(type->family),
221 type->revision_min, type->revision_max);
222 ip_set_type_unlock();
226 EXPORT_SYMBOL_GPL(ip_set_type_register);
228 /* Unregister a set type. There's a small race with ip_set_create */
230 ip_set_type_unregister(struct ip_set_type *type)
233 if (!find_set_type(type->name, type->family, type->revision_min)) {
234 pr_warn("ip_set type %s, family %s with revision min %u not registered\n",
235 type->name, family_name(type->family),
237 ip_set_type_unlock();
240 list_del_rcu(&type->list);
241 pr_debug("type %s, family %s with revision min %u unregistered.\n",
242 type->name, family_name(type->family), type->revision_min);
243 ip_set_type_unlock();
247 EXPORT_SYMBOL_GPL(ip_set_type_unregister);
249 /* Utility functions */
251 ip_set_alloc(size_t size)
253 return kvzalloc(size, GFP_KERNEL_ACCOUNT);
255 EXPORT_SYMBOL_GPL(ip_set_alloc);
258 ip_set_free(void *members)
260 pr_debug("%p: free with %s\n", members,
261 is_vmalloc_addr(members) ? "vfree" : "kfree");
264 EXPORT_SYMBOL_GPL(ip_set_free);
267 flag_nested(const struct nlattr *nla)
269 return nla->nla_type & NLA_F_NESTED;
272 static const struct nla_policy ipaddr_policy[IPSET_ATTR_IPADDR_MAX + 1] = {
273 [IPSET_ATTR_IPADDR_IPV4] = { .type = NLA_U32 },
274 [IPSET_ATTR_IPADDR_IPV6] = { .type = NLA_BINARY,
275 .len = sizeof(struct in6_addr) },
279 ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr)
281 struct nlattr *tb[IPSET_ATTR_IPADDR_MAX + 1];
283 if (unlikely(!flag_nested(nla)))
284 return -IPSET_ERR_PROTOCOL;
285 if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla,
286 ipaddr_policy, NULL))
287 return -IPSET_ERR_PROTOCOL;
288 if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV4)))
289 return -IPSET_ERR_PROTOCOL;
291 *ipaddr = nla_get_be32(tb[IPSET_ATTR_IPADDR_IPV4]);
294 EXPORT_SYMBOL_GPL(ip_set_get_ipaddr4);
297 ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr)
299 struct nlattr *tb[IPSET_ATTR_IPADDR_MAX + 1];
301 if (unlikely(!flag_nested(nla)))
302 return -IPSET_ERR_PROTOCOL;
304 if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla,
305 ipaddr_policy, NULL))
306 return -IPSET_ERR_PROTOCOL;
307 if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV6)))
308 return -IPSET_ERR_PROTOCOL;
310 memcpy(ipaddr, nla_data(tb[IPSET_ATTR_IPADDR_IPV6]),
311 sizeof(struct in6_addr));
314 EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6);
317 ip_set_timeout_get(const unsigned long *timeout)
321 if (*timeout == IPSET_ELEM_PERMANENT)
324 t = jiffies_to_msecs(*timeout - jiffies) / MSEC_PER_SEC;
325 /* Zero value in userspace means no timeout */
326 return t == 0 ? 1 : t;
330 ip_set_comment_uget(struct nlattr *tb)
335 /* Called from uadd only, protected by the set spinlock.
336 * The kadt functions don't use the comment extensions in any way.
339 ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment,
340 const struct ip_set_ext *ext)
342 struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1);
343 size_t len = ext->comment ? strlen(ext->comment) : 0;
346 set->ext_size -= sizeof(*c) + strlen(c->str) + 1;
348 rcu_assign_pointer(comment->c, NULL);
352 if (unlikely(len > IPSET_MAX_COMMENT_SIZE))
353 len = IPSET_MAX_COMMENT_SIZE;
354 c = kmalloc(sizeof(*c) + len + 1, GFP_ATOMIC);
357 strlcpy(c->str, ext->comment, len + 1);
358 set->ext_size += sizeof(*c) + strlen(c->str) + 1;
359 rcu_assign_pointer(comment->c, c);
361 EXPORT_SYMBOL_GPL(ip_set_init_comment);
363 /* Used only when dumping a set, protected by rcu_read_lock() */
365 ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment)
367 struct ip_set_comment_rcu *c = rcu_dereference(comment->c);
371 return nla_put_string(skb, IPSET_ATTR_COMMENT, c->str);
374 /* Called from uadd/udel, flush or the garbage collectors protected
375 * by the set spinlock.
376 * Called when the set is destroyed and when there can't be any user
377 * of the set data anymore.
380 ip_set_comment_free(struct ip_set *set, void *ptr)
382 struct ip_set_comment *comment = ptr;
383 struct ip_set_comment_rcu *c;
385 c = rcu_dereference_protected(comment->c, 1);
388 set->ext_size -= sizeof(*c) + strlen(c->str) + 1;
390 rcu_assign_pointer(comment->c, NULL);
393 typedef void (*destroyer)(struct ip_set *, void *);
394 /* ipset data extension types, in size order */
396 const struct ip_set_ext_type ip_set_extensions[] = {
397 [IPSET_EXT_ID_COUNTER] = {
398 .type = IPSET_EXT_COUNTER,
399 .flag = IPSET_FLAG_WITH_COUNTERS,
400 .len = sizeof(struct ip_set_counter),
401 .align = __alignof__(struct ip_set_counter),
403 [IPSET_EXT_ID_TIMEOUT] = {
404 .type = IPSET_EXT_TIMEOUT,
405 .len = sizeof(unsigned long),
406 .align = __alignof__(unsigned long),
408 [IPSET_EXT_ID_SKBINFO] = {
409 .type = IPSET_EXT_SKBINFO,
410 .flag = IPSET_FLAG_WITH_SKBINFO,
411 .len = sizeof(struct ip_set_skbinfo),
412 .align = __alignof__(struct ip_set_skbinfo),
414 [IPSET_EXT_ID_COMMENT] = {
415 .type = IPSET_EXT_COMMENT | IPSET_EXT_DESTROY,
416 .flag = IPSET_FLAG_WITH_COMMENT,
417 .len = sizeof(struct ip_set_comment),
418 .align = __alignof__(struct ip_set_comment),
419 .destroy = ip_set_comment_free,
422 EXPORT_SYMBOL_GPL(ip_set_extensions);
425 add_extension(enum ip_set_ext_id id, u32 flags, struct nlattr *tb[])
427 return ip_set_extensions[id].flag ?
428 (flags & ip_set_extensions[id].flag) :
429 !!tb[IPSET_ATTR_TIMEOUT];
433 ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len,
436 enum ip_set_ext_id id;
439 if (tb[IPSET_ATTR_CADT_FLAGS])
440 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
441 if (cadt_flags & IPSET_FLAG_WITH_FORCEADD)
442 set->flags |= IPSET_CREATE_FLAG_FORCEADD;
445 for (id = 0; id < IPSET_EXT_ID_MAX; id++) {
446 if (!add_extension(id, cadt_flags, tb))
448 if (align < ip_set_extensions[id].align)
449 align = ip_set_extensions[id].align;
450 len = ALIGN(len, ip_set_extensions[id].align);
451 set->offset[id] = len;
452 set->extensions |= ip_set_extensions[id].type;
453 len += ip_set_extensions[id].len;
455 return ALIGN(len, align);
457 EXPORT_SYMBOL_GPL(ip_set_elem_len);
460 ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
461 struct ip_set_ext *ext)
465 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
466 !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
467 !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
468 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
469 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
470 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
471 return -IPSET_ERR_PROTOCOL;
473 if (tb[IPSET_ATTR_TIMEOUT]) {
474 if (!SET_WITH_TIMEOUT(set))
475 return -IPSET_ERR_TIMEOUT;
476 ext->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
478 if (tb[IPSET_ATTR_BYTES] || tb[IPSET_ATTR_PACKETS]) {
479 if (!SET_WITH_COUNTER(set))
480 return -IPSET_ERR_COUNTER;
481 if (tb[IPSET_ATTR_BYTES])
482 ext->bytes = be64_to_cpu(nla_get_be64(
483 tb[IPSET_ATTR_BYTES]));
484 if (tb[IPSET_ATTR_PACKETS])
485 ext->packets = be64_to_cpu(nla_get_be64(
486 tb[IPSET_ATTR_PACKETS]));
488 if (tb[IPSET_ATTR_COMMENT]) {
489 if (!SET_WITH_COMMENT(set))
490 return -IPSET_ERR_COMMENT;
491 ext->comment = ip_set_comment_uget(tb[IPSET_ATTR_COMMENT]);
493 if (tb[IPSET_ATTR_SKBMARK]) {
494 if (!SET_WITH_SKBINFO(set))
495 return -IPSET_ERR_SKBINFO;
496 fullmark = be64_to_cpu(nla_get_be64(tb[IPSET_ATTR_SKBMARK]));
497 ext->skbinfo.skbmark = fullmark >> 32;
498 ext->skbinfo.skbmarkmask = fullmark & 0xffffffff;
500 if (tb[IPSET_ATTR_SKBPRIO]) {
501 if (!SET_WITH_SKBINFO(set))
502 return -IPSET_ERR_SKBINFO;
503 ext->skbinfo.skbprio =
504 be32_to_cpu(nla_get_be32(tb[IPSET_ATTR_SKBPRIO]));
506 if (tb[IPSET_ATTR_SKBQUEUE]) {
507 if (!SET_WITH_SKBINFO(set))
508 return -IPSET_ERR_SKBINFO;
509 ext->skbinfo.skbqueue =
510 be16_to_cpu(nla_get_be16(tb[IPSET_ATTR_SKBQUEUE]));
514 EXPORT_SYMBOL_GPL(ip_set_get_extensions);
517 ip_set_get_bytes(const struct ip_set_counter *counter)
519 return (u64)atomic64_read(&(counter)->bytes);
523 ip_set_get_packets(const struct ip_set_counter *counter)
525 return (u64)atomic64_read(&(counter)->packets);
529 ip_set_put_counter(struct sk_buff *skb, const struct ip_set_counter *counter)
531 return nla_put_net64(skb, IPSET_ATTR_BYTES,
532 cpu_to_be64(ip_set_get_bytes(counter)),
534 nla_put_net64(skb, IPSET_ATTR_PACKETS,
535 cpu_to_be64(ip_set_get_packets(counter)),
540 ip_set_put_skbinfo(struct sk_buff *skb, const struct ip_set_skbinfo *skbinfo)
542 /* Send nonzero parameters only */
543 return ((skbinfo->skbmark || skbinfo->skbmarkmask) &&
544 nla_put_net64(skb, IPSET_ATTR_SKBMARK,
545 cpu_to_be64((u64)skbinfo->skbmark << 32 |
546 skbinfo->skbmarkmask),
549 nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
550 cpu_to_be32(skbinfo->skbprio))) ||
551 (skbinfo->skbqueue &&
552 nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
553 cpu_to_be16(skbinfo->skbqueue)));
557 ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
558 const void *e, bool active)
560 if (SET_WITH_TIMEOUT(set)) {
561 unsigned long *timeout = ext_timeout(e, set);
563 if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
564 htonl(active ? ip_set_timeout_get(timeout)
568 if (SET_WITH_COUNTER(set) &&
569 ip_set_put_counter(skb, ext_counter(e, set)))
571 if (SET_WITH_COMMENT(set) &&
572 ip_set_put_comment(skb, ext_comment(e, set)))
574 if (SET_WITH_SKBINFO(set) &&
575 ip_set_put_skbinfo(skb, ext_skbinfo(e, set)))
579 EXPORT_SYMBOL_GPL(ip_set_put_extensions);
582 ip_set_match_counter(u64 counter, u64 match, u8 op)
585 case IPSET_COUNTER_NONE:
587 case IPSET_COUNTER_EQ:
588 return counter == match;
589 case IPSET_COUNTER_NE:
590 return counter != match;
591 case IPSET_COUNTER_LT:
592 return counter < match;
593 case IPSET_COUNTER_GT:
594 return counter > match;
600 ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter)
602 atomic64_add((long long)bytes, &(counter)->bytes);
606 ip_set_add_packets(u64 packets, struct ip_set_counter *counter)
608 atomic64_add((long long)packets, &(counter)->packets);
612 ip_set_update_counter(struct ip_set_counter *counter,
613 const struct ip_set_ext *ext, u32 flags)
615 if (ext->packets != ULLONG_MAX &&
616 !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) {
617 ip_set_add_bytes(ext->bytes, counter);
618 ip_set_add_packets(ext->packets, counter);
623 ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo,
624 const struct ip_set_ext *ext,
625 struct ip_set_ext *mext, u32 flags)
627 mext->skbinfo = *skbinfo;
631 ip_set_match_extensions(struct ip_set *set, const struct ip_set_ext *ext,
632 struct ip_set_ext *mext, u32 flags, void *data)
634 if (SET_WITH_TIMEOUT(set) &&
635 ip_set_timeout_expired(ext_timeout(data, set)))
637 if (SET_WITH_COUNTER(set)) {
638 struct ip_set_counter *counter = ext_counter(data, set);
640 ip_set_update_counter(counter, ext, flags);
642 if (flags & IPSET_FLAG_MATCH_COUNTERS &&
643 !(ip_set_match_counter(ip_set_get_packets(counter),
644 mext->packets, mext->packets_op) &&
645 ip_set_match_counter(ip_set_get_bytes(counter),
646 mext->bytes, mext->bytes_op)))
649 if (SET_WITH_SKBINFO(set))
650 ip_set_get_skbinfo(ext_skbinfo(data, set),
654 EXPORT_SYMBOL_GPL(ip_set_match_extensions);
656 /* Creating/destroying/renaming/swapping affect the existence and
657 * the properties of a set. All of these can be executed from userspace
658 * only and serialized by the nfnl mutex indirectly from nfnetlink.
660 * Sets are identified by their index in ip_set_list and the index
661 * is used by the external references (set/SET netfilter modules).
663 * The set behind an index may change by swapping only, from userspace.
667 __ip_set_get(struct ip_set *set)
669 write_lock_bh(&ip_set_ref_lock);
671 write_unlock_bh(&ip_set_ref_lock);
675 __ip_set_put(struct ip_set *set)
677 write_lock_bh(&ip_set_ref_lock);
678 BUG_ON(set->ref == 0);
680 write_unlock_bh(&ip_set_ref_lock);
683 /* set->ref can be swapped out by ip_set_swap, netlink events (like dump) need
684 * a separate reference counter
687 __ip_set_put_netlink(struct ip_set *set)
689 write_lock_bh(&ip_set_ref_lock);
690 BUG_ON(set->ref_netlink == 0);
692 write_unlock_bh(&ip_set_ref_lock);
695 /* Add, del and test set entries from kernel.
697 * The set behind the index must exist and must be referenced
698 * so it can't be destroyed (or changed) under our foot.
701 static struct ip_set *
702 ip_set_rcu_get(struct net *net, ip_set_id_t index)
705 struct ip_set_net *inst = ip_set_pernet(net);
708 /* ip_set_list itself needs to be protected */
709 set = rcu_dereference(inst->ip_set_list)[index];
716 ip_set_lock(struct ip_set *set)
718 if (!set->variant->region_lock)
719 spin_lock_bh(&set->lock);
723 ip_set_unlock(struct ip_set *set)
725 if (!set->variant->region_lock)
726 spin_unlock_bh(&set->lock);
730 ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
731 const struct xt_action_param *par, struct ip_set_adt_opt *opt)
733 struct ip_set *set = ip_set_rcu_get(xt_net(par), index);
737 pr_debug("set %s, index %u\n", set->name, index);
739 if (opt->dim < set->type->dimension ||
740 !(opt->family == set->family || set->family == NFPROTO_UNSPEC))
744 ret = set->variant->kadt(set, skb, par, IPSET_TEST, opt);
745 rcu_read_unlock_bh();
747 if (ret == -EAGAIN) {
748 /* Type requests element to be completed */
749 pr_debug("element must be completed, ADD is triggered\n");
751 set->variant->kadt(set, skb, par, IPSET_ADD, opt);
755 /* --return-nomatch: invert matched element */
756 if ((opt->cmdflags & IPSET_FLAG_RETURN_NOMATCH) &&
757 (set->type->features & IPSET_TYPE_NOMATCH) &&
758 (ret > 0 || ret == -ENOTEMPTY))
762 /* Convert error codes to nomatch */
763 return (ret < 0 ? 0 : ret);
765 EXPORT_SYMBOL_GPL(ip_set_test);
768 ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
769 const struct xt_action_param *par, struct ip_set_adt_opt *opt)
771 struct ip_set *set = ip_set_rcu_get(xt_net(par), index);
775 pr_debug("set %s, index %u\n", set->name, index);
777 if (opt->dim < set->type->dimension ||
778 !(opt->family == set->family || set->family == NFPROTO_UNSPEC))
779 return -IPSET_ERR_TYPE_MISMATCH;
782 ret = set->variant->kadt(set, skb, par, IPSET_ADD, opt);
787 EXPORT_SYMBOL_GPL(ip_set_add);
790 ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
791 const struct xt_action_param *par, struct ip_set_adt_opt *opt)
793 struct ip_set *set = ip_set_rcu_get(xt_net(par), index);
797 pr_debug("set %s, index %u\n", set->name, index);
799 if (opt->dim < set->type->dimension ||
800 !(opt->family == set->family || set->family == NFPROTO_UNSPEC))
801 return -IPSET_ERR_TYPE_MISMATCH;
804 ret = set->variant->kadt(set, skb, par, IPSET_DEL, opt);
809 EXPORT_SYMBOL_GPL(ip_set_del);
811 /* Find set by name, reference it once. The reference makes sure the
812 * thing pointed to, does not go away under our feet.
816 ip_set_get_byname(struct net *net, const char *name, struct ip_set **set)
818 ip_set_id_t i, index = IPSET_INVALID_ID;
820 struct ip_set_net *inst = ip_set_pernet(net);
823 for (i = 0; i < inst->ip_set_max; i++) {
824 s = rcu_dereference(inst->ip_set_list)[i];
825 if (s && STRNCMP(s->name, name)) {
836 EXPORT_SYMBOL_GPL(ip_set_get_byname);
838 /* If the given set pointer points to a valid set, decrement
839 * reference count by 1. The caller shall not assume the index
840 * to be valid, after calling this function.
845 __ip_set_put_byindex(struct ip_set_net *inst, ip_set_id_t index)
850 set = rcu_dereference(inst->ip_set_list)[index];
857 ip_set_put_byindex(struct net *net, ip_set_id_t index)
859 struct ip_set_net *inst = ip_set_pernet(net);
861 __ip_set_put_byindex(inst, index);
863 EXPORT_SYMBOL_GPL(ip_set_put_byindex);
865 /* Get the name of a set behind a set index.
866 * Set itself is protected by RCU, but its name isn't: to protect against
867 * renaming, grab ip_set_ref_lock as reader (see ip_set_rename()) and copy the
871 ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name)
873 struct ip_set *set = ip_set_rcu_get(net, index);
877 read_lock_bh(&ip_set_ref_lock);
878 strncpy(name, set->name, IPSET_MAXNAMELEN);
879 read_unlock_bh(&ip_set_ref_lock);
881 EXPORT_SYMBOL_GPL(ip_set_name_byindex);
883 /* Routines to call by external subsystems, which do not
884 * call nfnl_lock for us.
887 /* Find set by index, reference it once. The reference makes sure the
888 * thing pointed to, does not go away under our feet.
890 * The nfnl mutex is used in the function.
893 ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index)
896 struct ip_set_net *inst = ip_set_pernet(net);
898 if (index >= inst->ip_set_max)
899 return IPSET_INVALID_ID;
901 nfnl_lock(NFNL_SUBSYS_IPSET);
902 set = ip_set(inst, index);
906 index = IPSET_INVALID_ID;
907 nfnl_unlock(NFNL_SUBSYS_IPSET);
911 EXPORT_SYMBOL_GPL(ip_set_nfnl_get_byindex);
913 /* If the given set pointer points to a valid set, decrement
914 * reference count by 1. The caller shall not assume the index
915 * to be valid, after calling this function.
917 * The nfnl mutex is used in the function.
920 ip_set_nfnl_put(struct net *net, ip_set_id_t index)
923 struct ip_set_net *inst = ip_set_pernet(net);
925 nfnl_lock(NFNL_SUBSYS_IPSET);
926 if (!inst->is_deleted) { /* already deleted from ip_set_net_exit() */
927 set = ip_set(inst, index);
931 nfnl_unlock(NFNL_SUBSYS_IPSET);
933 EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
935 /* Communication protocol with userspace over netlink.
937 * The commands are serialized by the nfnl mutex.
940 static inline u8 protocol(const struct nlattr * const tb[])
942 return nla_get_u8(tb[IPSET_ATTR_PROTOCOL]);
946 protocol_failed(const struct nlattr * const tb[])
948 return !tb[IPSET_ATTR_PROTOCOL] || protocol(tb) != IPSET_PROTOCOL;
952 protocol_min_failed(const struct nlattr * const tb[])
954 return !tb[IPSET_ATTR_PROTOCOL] || protocol(tb) < IPSET_PROTOCOL_MIN;
958 flag_exist(const struct nlmsghdr *nlh)
960 return nlh->nlmsg_flags & NLM_F_EXCL ? 0 : IPSET_FLAG_EXIST;
963 static struct nlmsghdr *
964 start_msg(struct sk_buff *skb, u32 portid, u32 seq, unsigned int flags,
967 struct nlmsghdr *nlh;
968 struct nfgenmsg *nfmsg;
970 nlh = nlmsg_put(skb, portid, seq, nfnl_msg_type(NFNL_SUBSYS_IPSET, cmd),
971 sizeof(*nfmsg), flags);
975 nfmsg = nlmsg_data(nlh);
976 nfmsg->nfgen_family = NFPROTO_IPV4;
977 nfmsg->version = NFNETLINK_V0;
985 static const struct nla_policy ip_set_create_policy[IPSET_ATTR_CMD_MAX + 1] = {
986 [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
987 [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
988 .len = IPSET_MAXNAMELEN - 1 },
989 [IPSET_ATTR_TYPENAME] = { .type = NLA_NUL_STRING,
990 .len = IPSET_MAXNAMELEN - 1},
991 [IPSET_ATTR_REVISION] = { .type = NLA_U8 },
992 [IPSET_ATTR_FAMILY] = { .type = NLA_U8 },
993 [IPSET_ATTR_DATA] = { .type = NLA_NESTED },
996 static struct ip_set *
997 find_set_and_id(struct ip_set_net *inst, const char *name, ip_set_id_t *id)
999 struct ip_set *set = NULL;
1002 *id = IPSET_INVALID_ID;
1003 for (i = 0; i < inst->ip_set_max; i++) {
1004 set = ip_set(inst, i);
1005 if (set && STRNCMP(set->name, name)) {
1010 return (*id == IPSET_INVALID_ID ? NULL : set);
1013 static inline struct ip_set *
1014 find_set(struct ip_set_net *inst, const char *name)
1018 return find_set_and_id(inst, name, &id);
1022 find_free_id(struct ip_set_net *inst, const char *name, ip_set_id_t *index,
1023 struct ip_set **set)
1028 *index = IPSET_INVALID_ID;
1029 for (i = 0; i < inst->ip_set_max; i++) {
1030 s = ip_set(inst, i);
1032 if (*index == IPSET_INVALID_ID)
1034 } else if (STRNCMP(name, s->name)) {
1040 if (*index == IPSET_INVALID_ID)
1041 /* No free slot remained */
1042 return -IPSET_ERR_MAX_SETS;
1046 static int ip_set_none(struct net *net, struct sock *ctnl, struct sk_buff *skb,
1047 const struct nlmsghdr *nlh,
1048 const struct nlattr * const attr[],
1049 struct netlink_ext_ack *extack)
1054 static int ip_set_create(struct net *net, struct sock *ctnl,
1055 struct sk_buff *skb, const struct nlmsghdr *nlh,
1056 const struct nlattr * const attr[],
1057 struct netlink_ext_ack *extack)
1059 struct ip_set_net *inst = ip_set_pernet(net);
1060 struct ip_set *set, *clash = NULL;
1061 ip_set_id_t index = IPSET_INVALID_ID;
1062 struct nlattr *tb[IPSET_ATTR_CREATE_MAX + 1] = {};
1063 const char *name, *typename;
1064 u8 family, revision;
1065 u32 flags = flag_exist(nlh);
1068 if (unlikely(protocol_min_failed(attr) ||
1069 !attr[IPSET_ATTR_SETNAME] ||
1070 !attr[IPSET_ATTR_TYPENAME] ||
1071 !attr[IPSET_ATTR_REVISION] ||
1072 !attr[IPSET_ATTR_FAMILY] ||
1073 (attr[IPSET_ATTR_DATA] &&
1074 !flag_nested(attr[IPSET_ATTR_DATA]))))
1075 return -IPSET_ERR_PROTOCOL;
1077 name = nla_data(attr[IPSET_ATTR_SETNAME]);
1078 typename = nla_data(attr[IPSET_ATTR_TYPENAME]);
1079 family = nla_get_u8(attr[IPSET_ATTR_FAMILY]);
1080 revision = nla_get_u8(attr[IPSET_ATTR_REVISION]);
1081 pr_debug("setname: %s, typename: %s, family: %s, revision: %u\n",
1082 name, typename, family_name(family), revision);
1084 /* First, and without any locks, allocate and initialize
1085 * a normal base set structure.
1087 set = kzalloc(sizeof(*set), GFP_KERNEL);
1090 spin_lock_init(&set->lock);
1091 strlcpy(set->name, name, IPSET_MAXNAMELEN);
1092 set->family = family;
1093 set->revision = revision;
1095 /* Next, check that we know the type, and take
1096 * a reference on the type, to make sure it stays available
1097 * while constructing our new set.
1099 * After referencing the type, we try to create the type
1100 * specific part of the set without holding any locks.
1102 ret = find_set_type_get(typename, family, revision, &set->type);
1106 /* Without holding any locks, create private part. */
1107 if (attr[IPSET_ATTR_DATA] &&
1108 nla_parse_nested(tb, IPSET_ATTR_CREATE_MAX, attr[IPSET_ATTR_DATA],
1109 set->type->create_policy, NULL)) {
1110 ret = -IPSET_ERR_PROTOCOL;
1114 ret = set->type->create(net, set, tb, flags);
1118 /* BTW, ret==0 here. */
1120 /* Here, we have a valid, constructed set and we are protected
1121 * by the nfnl mutex. Find the first free index in ip_set_list
1122 * and check clashing.
1124 ret = find_free_id(inst, set->name, &index, &clash);
1125 if (ret == -EEXIST) {
1126 /* If this is the same set and requested, ignore error */
1127 if ((flags & IPSET_FLAG_EXIST) &&
1128 STRNCMP(set->type->name, clash->type->name) &&
1129 set->type->family == clash->type->family &&
1130 set->type->revision_min == clash->type->revision_min &&
1131 set->type->revision_max == clash->type->revision_max &&
1132 set->variant->same_set(set, clash))
1135 } else if (ret == -IPSET_ERR_MAX_SETS) {
1136 struct ip_set **list, **tmp;
1137 ip_set_id_t i = inst->ip_set_max + IP_SET_INC;
1139 if (i < inst->ip_set_max || i == IPSET_INVALID_ID)
1143 list = kvcalloc(i, sizeof(struct ip_set *), GFP_KERNEL);
1146 /* nfnl mutex is held, both lists are valid */
1147 tmp = ip_set_dereference(inst->ip_set_list);
1148 memcpy(list, tmp, sizeof(struct ip_set *) * inst->ip_set_max);
1149 rcu_assign_pointer(inst->ip_set_list, list);
1150 /* Make sure all current packets have passed through */
1153 index = inst->ip_set_max;
1154 inst->ip_set_max = i;
1161 /* Finally! Add our shiny new set to the list, and be done. */
1162 pr_debug("create: '%s' created with index %u!\n", set->name, index);
1163 ip_set(inst, index) = set;
1168 set->variant->destroy(set);
1170 module_put(set->type->me);
1178 static const struct nla_policy
1179 ip_set_setname_policy[IPSET_ATTR_CMD_MAX + 1] = {
1180 [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
1181 [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
1182 .len = IPSET_MAXNAMELEN - 1 },
1186 ip_set_destroy_set(struct ip_set *set)
1188 pr_debug("set: %s\n", set->name);
1190 /* Must call it without holding any lock */
1191 set->variant->destroy(set);
1192 module_put(set->type->me);
1196 static int ip_set_destroy(struct net *net, struct sock *ctnl,
1197 struct sk_buff *skb, const struct nlmsghdr *nlh,
1198 const struct nlattr * const attr[],
1199 struct netlink_ext_ack *extack)
1201 struct ip_set_net *inst = ip_set_pernet(net);
1206 if (unlikely(protocol_min_failed(attr)))
1207 return -IPSET_ERR_PROTOCOL;
1209 /* Must wait for flush to be really finished in list:set */
1212 /* Commands are serialized and references are
1213 * protected by the ip_set_ref_lock.
1214 * External systems (i.e. xt_set) must call
1215 * ip_set_put|get_nfnl_* functions, that way we
1216 * can safely check references here.
1218 * list:set timer can only decrement the reference
1219 * counter, so if it's already zero, we can proceed
1220 * without holding the lock.
1222 read_lock_bh(&ip_set_ref_lock);
1223 if (!attr[IPSET_ATTR_SETNAME]) {
1224 for (i = 0; i < inst->ip_set_max; i++) {
1225 s = ip_set(inst, i);
1226 if (s && (s->ref || s->ref_netlink)) {
1227 ret = -IPSET_ERR_BUSY;
1231 inst->is_destroyed = true;
1232 read_unlock_bh(&ip_set_ref_lock);
1233 for (i = 0; i < inst->ip_set_max; i++) {
1234 s = ip_set(inst, i);
1236 ip_set(inst, i) = NULL;
1237 ip_set_destroy_set(s);
1240 /* Modified by ip_set_destroy() only, which is serialized */
1241 inst->is_destroyed = false;
1243 s = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]),
1248 } else if (s->ref || s->ref_netlink) {
1249 ret = -IPSET_ERR_BUSY;
1252 ip_set(inst, i) = NULL;
1253 read_unlock_bh(&ip_set_ref_lock);
1255 ip_set_destroy_set(s);
1259 read_unlock_bh(&ip_set_ref_lock);
1266 ip_set_flush_set(struct ip_set *set)
1268 pr_debug("set: %s\n", set->name);
1271 set->variant->flush(set);
1275 static int ip_set_flush(struct net *net, struct sock *ctnl, struct sk_buff *skb,
1276 const struct nlmsghdr *nlh,
1277 const struct nlattr * const attr[],
1278 struct netlink_ext_ack *extack)
1280 struct ip_set_net *inst = ip_set_pernet(net);
1284 if (unlikely(protocol_min_failed(attr)))
1285 return -IPSET_ERR_PROTOCOL;
1287 if (!attr[IPSET_ATTR_SETNAME]) {
1288 for (i = 0; i < inst->ip_set_max; i++) {
1289 s = ip_set(inst, i);
1291 ip_set_flush_set(s);
1294 s = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
1298 ip_set_flush_set(s);
1306 static const struct nla_policy
1307 ip_set_setname2_policy[IPSET_ATTR_CMD_MAX + 1] = {
1308 [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
1309 [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
1310 .len = IPSET_MAXNAMELEN - 1 },
1311 [IPSET_ATTR_SETNAME2] = { .type = NLA_NUL_STRING,
1312 .len = IPSET_MAXNAMELEN - 1 },
1315 static int ip_set_rename(struct net *net, struct sock *ctnl,
1316 struct sk_buff *skb, const struct nlmsghdr *nlh,
1317 const struct nlattr * const attr[],
1318 struct netlink_ext_ack *extack)
1320 struct ip_set_net *inst = ip_set_pernet(net);
1321 struct ip_set *set, *s;
1326 if (unlikely(protocol_min_failed(attr) ||
1327 !attr[IPSET_ATTR_SETNAME] ||
1328 !attr[IPSET_ATTR_SETNAME2]))
1329 return -IPSET_ERR_PROTOCOL;
1331 set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
1335 write_lock_bh(&ip_set_ref_lock);
1336 if (set->ref != 0 || set->ref_netlink != 0) {
1337 ret = -IPSET_ERR_REFERENCED;
1341 name2 = nla_data(attr[IPSET_ATTR_SETNAME2]);
1342 for (i = 0; i < inst->ip_set_max; i++) {
1343 s = ip_set(inst, i);
1344 if (s && STRNCMP(s->name, name2)) {
1345 ret = -IPSET_ERR_EXIST_SETNAME2;
1349 strncpy(set->name, name2, IPSET_MAXNAMELEN);
1352 write_unlock_bh(&ip_set_ref_lock);
1356 /* Swap two sets so that name/index points to the other.
1357 * References and set names are also swapped.
1359 * The commands are serialized by the nfnl mutex and references are
1360 * protected by the ip_set_ref_lock. The kernel interfaces
1361 * do not hold the mutex but the pointer settings are atomic
1362 * so the ip_set_list always contains valid pointers to the sets.
1365 static int ip_set_swap(struct net *net, struct sock *ctnl, struct sk_buff *skb,
1366 const struct nlmsghdr *nlh,
1367 const struct nlattr * const attr[],
1368 struct netlink_ext_ack *extack)
1370 struct ip_set_net *inst = ip_set_pernet(net);
1371 struct ip_set *from, *to;
1372 ip_set_id_t from_id, to_id;
1373 char from_name[IPSET_MAXNAMELEN];
1375 if (unlikely(protocol_min_failed(attr) ||
1376 !attr[IPSET_ATTR_SETNAME] ||
1377 !attr[IPSET_ATTR_SETNAME2]))
1378 return -IPSET_ERR_PROTOCOL;
1380 from = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]),
1385 to = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME2]),
1388 return -IPSET_ERR_EXIST_SETNAME2;
1390 /* Features must not change.
1391 * Not an artifical restriction anymore, as we must prevent
1392 * possible loops created by swapping in setlist type of sets.
1394 if (!(from->type->features == to->type->features &&
1395 from->family == to->family))
1396 return -IPSET_ERR_TYPE_MISMATCH;
1398 write_lock_bh(&ip_set_ref_lock);
1400 if (from->ref_netlink || to->ref_netlink) {
1401 write_unlock_bh(&ip_set_ref_lock);
1405 strncpy(from_name, from->name, IPSET_MAXNAMELEN);
1406 strncpy(from->name, to->name, IPSET_MAXNAMELEN);
1407 strncpy(to->name, from_name, IPSET_MAXNAMELEN);
1409 swap(from->ref, to->ref);
1410 ip_set(inst, from_id) = to;
1411 ip_set(inst, to_id) = from;
1412 write_unlock_bh(&ip_set_ref_lock);
1417 /* List/save set data */
1424 #define DUMP_TYPE(arg) (((u32)(arg)) & 0x0000FFFF)
1425 #define DUMP_FLAGS(arg) (((u32)(arg)) >> 16)
1428 ip_set_put_flags(struct sk_buff *skb, struct ip_set *set)
1432 if (SET_WITH_TIMEOUT(set))
1433 if (unlikely(nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
1434 htonl(set->timeout))))
1436 if (SET_WITH_COUNTER(set))
1437 cadt_flags |= IPSET_FLAG_WITH_COUNTERS;
1438 if (SET_WITH_COMMENT(set))
1439 cadt_flags |= IPSET_FLAG_WITH_COMMENT;
1440 if (SET_WITH_SKBINFO(set))
1441 cadt_flags |= IPSET_FLAG_WITH_SKBINFO;
1442 if (SET_WITH_FORCEADD(set))
1443 cadt_flags |= IPSET_FLAG_WITH_FORCEADD;
1447 return nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(cadt_flags));
1449 EXPORT_SYMBOL_GPL(ip_set_put_flags);
1452 ip_set_dump_done(struct netlink_callback *cb)
1454 if (cb->args[IPSET_CB_ARG0]) {
1455 struct ip_set_net *inst =
1456 (struct ip_set_net *)cb->args[IPSET_CB_NET];
1457 ip_set_id_t index = (ip_set_id_t)cb->args[IPSET_CB_INDEX];
1458 struct ip_set *set = ip_set_ref_netlink(inst, index);
1460 if (set->variant->uref)
1461 set->variant->uref(set, cb, false);
1462 pr_debug("release set %s\n", set->name);
1463 __ip_set_put_netlink(set);
1469 dump_attrs(struct nlmsghdr *nlh)
1471 const struct nlattr *attr;
1474 pr_debug("dump nlmsg\n");
1475 nlmsg_for_each_attr(attr, nlh, sizeof(struct nfgenmsg), rem) {
1476 pr_debug("type: %u, len %u\n", nla_type(attr), attr->nla_len);
1480 static const struct nla_policy
1481 ip_set_dump_policy[IPSET_ATTR_CMD_MAX + 1] = {
1482 [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
1483 [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
1484 .len = IPSET_MAXNAMELEN - 1 },
1485 [IPSET_ATTR_FLAGS] = { .type = NLA_U32 },
1489 ip_set_dump_start(struct netlink_callback *cb)
1491 struct nlmsghdr *nlh = nlmsg_hdr(cb->skb);
1492 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
1493 struct nlattr *cda[IPSET_ATTR_CMD_MAX + 1];
1494 struct nlattr *attr = (void *)nlh + min_len;
1495 struct sk_buff *skb = cb->skb;
1496 struct ip_set_net *inst = ip_set_pernet(sock_net(skb->sk));
1500 ret = nla_parse(cda, IPSET_ATTR_CMD_MAX, attr,
1501 nlh->nlmsg_len - min_len,
1502 ip_set_dump_policy, NULL);
1506 cb->args[IPSET_CB_PROTO] = nla_get_u8(cda[IPSET_ATTR_PROTOCOL]);
1507 if (cda[IPSET_ATTR_SETNAME]) {
1511 set = find_set_and_id(inst, nla_data(cda[IPSET_ATTR_SETNAME]),
1517 dump_type = DUMP_ONE;
1518 cb->args[IPSET_CB_INDEX] = index;
1520 dump_type = DUMP_ALL;
1523 if (cda[IPSET_ATTR_FLAGS]) {
1524 u32 f = ip_set_get_h32(cda[IPSET_ATTR_FLAGS]);
1526 dump_type |= (f << 16);
1528 cb->args[IPSET_CB_NET] = (unsigned long)inst;
1529 cb->args[IPSET_CB_DUMP] = dump_type;
1534 /* We have to create and send the error message manually :-( */
1535 if (nlh->nlmsg_flags & NLM_F_ACK) {
1536 netlink_ack(cb->skb, nlh, ret, NULL);
1542 ip_set_dump_do(struct sk_buff *skb, struct netlink_callback *cb)
1544 ip_set_id_t index = IPSET_INVALID_ID, max;
1545 struct ip_set *set = NULL;
1546 struct nlmsghdr *nlh = NULL;
1547 unsigned int flags = NETLINK_CB(cb->skb).portid ? NLM_F_MULTI : 0;
1548 struct ip_set_net *inst = ip_set_pernet(sock_net(skb->sk));
1549 u32 dump_type, dump_flags;
1553 if (!cb->args[IPSET_CB_DUMP])
1556 if (cb->args[IPSET_CB_INDEX] >= inst->ip_set_max)
1559 dump_type = DUMP_TYPE(cb->args[IPSET_CB_DUMP]);
1560 dump_flags = DUMP_FLAGS(cb->args[IPSET_CB_DUMP]);
1561 max = dump_type == DUMP_ONE ? cb->args[IPSET_CB_INDEX] + 1
1564 pr_debug("dump type, flag: %u %u index: %ld\n",
1565 dump_type, dump_flags, cb->args[IPSET_CB_INDEX]);
1566 for (; cb->args[IPSET_CB_INDEX] < max; cb->args[IPSET_CB_INDEX]++) {
1567 index = (ip_set_id_t)cb->args[IPSET_CB_INDEX];
1568 write_lock_bh(&ip_set_ref_lock);
1569 set = ip_set(inst, index);
1570 is_destroyed = inst->is_destroyed;
1571 if (!set || is_destroyed) {
1572 write_unlock_bh(&ip_set_ref_lock);
1573 if (dump_type == DUMP_ONE) {
1578 /* All sets are just being destroyed */
1584 /* When dumping all sets, we must dump "sorted"
1585 * so that lists (unions of sets) are dumped last.
1587 if (dump_type != DUMP_ONE &&
1588 ((dump_type == DUMP_ALL) ==
1589 !!(set->type->features & IPSET_DUMP_LAST))) {
1590 write_unlock_bh(&ip_set_ref_lock);
1593 pr_debug("List set: %s\n", set->name);
1594 if (!cb->args[IPSET_CB_ARG0]) {
1595 /* Start listing: make sure set won't be destroyed */
1596 pr_debug("reference set\n");
1599 write_unlock_bh(&ip_set_ref_lock);
1600 nlh = start_msg(skb, NETLINK_CB(cb->skb).portid,
1601 cb->nlh->nlmsg_seq, flags,
1605 goto release_refcount;
1607 if (nla_put_u8(skb, IPSET_ATTR_PROTOCOL,
1608 cb->args[IPSET_CB_PROTO]) ||
1609 nla_put_string(skb, IPSET_ATTR_SETNAME, set->name))
1610 goto nla_put_failure;
1611 if (dump_flags & IPSET_FLAG_LIST_SETNAME)
1613 switch (cb->args[IPSET_CB_ARG0]) {
1615 /* Core header data */
1616 if (nla_put_string(skb, IPSET_ATTR_TYPENAME,
1618 nla_put_u8(skb, IPSET_ATTR_FAMILY,
1620 nla_put_u8(skb, IPSET_ATTR_REVISION,
1622 goto nla_put_failure;
1623 if (cb->args[IPSET_CB_PROTO] > IPSET_PROTOCOL_MIN &&
1624 nla_put_net16(skb, IPSET_ATTR_INDEX, htons(index)))
1625 goto nla_put_failure;
1626 ret = set->variant->head(set, skb);
1628 goto release_refcount;
1629 if (dump_flags & IPSET_FLAG_LIST_HEADER)
1631 if (set->variant->uref)
1632 set->variant->uref(set, cb, true);
1635 ret = set->variant->list(set, skb, cb);
1636 if (!cb->args[IPSET_CB_ARG0])
1637 /* Set is done, proceed with next one */
1639 goto release_refcount;
1642 /* If we dump all sets, continue with dumping last ones */
1643 if (dump_type == DUMP_ALL) {
1644 dump_type = DUMP_LAST;
1645 cb->args[IPSET_CB_DUMP] = dump_type | (dump_flags << 16);
1646 cb->args[IPSET_CB_INDEX] = 0;
1647 if (set && set->variant->uref)
1648 set->variant->uref(set, cb, false);
1656 if (dump_type == DUMP_ONE)
1657 cb->args[IPSET_CB_INDEX] = IPSET_INVALID_ID;
1659 cb->args[IPSET_CB_INDEX]++;
1661 /* If there was an error or set is done, release set */
1662 if (ret || !cb->args[IPSET_CB_ARG0]) {
1663 set = ip_set_ref_netlink(inst, index);
1664 if (set->variant->uref)
1665 set->variant->uref(set, cb, false);
1666 pr_debug("release set %s\n", set->name);
1667 __ip_set_put_netlink(set);
1668 cb->args[IPSET_CB_ARG0] = 0;
1672 nlmsg_end(skb, nlh);
1673 pr_debug("nlmsg_len: %u\n", nlh->nlmsg_len);
1677 return ret < 0 ? ret : skb->len;
1680 static int ip_set_dump(struct net *net, struct sock *ctnl, struct sk_buff *skb,
1681 const struct nlmsghdr *nlh,
1682 const struct nlattr * const attr[],
1683 struct netlink_ext_ack *extack)
1685 if (unlikely(protocol_min_failed(attr)))
1686 return -IPSET_ERR_PROTOCOL;
1689 struct netlink_dump_control c = {
1690 .start = ip_set_dump_start,
1691 .dump = ip_set_dump_do,
1692 .done = ip_set_dump_done,
1694 return netlink_dump_start(ctnl, skb, nlh, &c);
1698 /* Add, del and test */
1700 static const struct nla_policy ip_set_adt_policy[IPSET_ATTR_CMD_MAX + 1] = {
1701 [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
1702 [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
1703 .len = IPSET_MAXNAMELEN - 1 },
1704 [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
1705 [IPSET_ATTR_DATA] = { .type = NLA_NESTED },
1706 [IPSET_ATTR_ADT] = { .type = NLA_NESTED },
1710 call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
1711 struct nlattr *tb[], enum ipset_adt adt,
1712 u32 flags, bool use_lineno)
1716 bool eexist = flags & IPSET_FLAG_EXIST, retried = false;
1720 ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried);
1723 } while (ret == -EAGAIN &&
1724 set->variant->resize &&
1725 (ret = set->variant->resize(set, retried)) == 0);
1727 if (!ret || (ret == -IPSET_ERR_EXIST && eexist))
1729 if (lineno && use_lineno) {
1730 /* Error in restore/batch mode: send back lineno */
1731 struct nlmsghdr *rep, *nlh = nlmsg_hdr(skb);
1732 struct sk_buff *skb2;
1733 struct nlmsgerr *errmsg;
1734 size_t payload = min(SIZE_MAX,
1735 sizeof(*errmsg) + nlmsg_len(nlh));
1736 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
1737 struct nlattr *cda[IPSET_ATTR_CMD_MAX + 1];
1738 struct nlattr *cmdattr;
1741 skb2 = nlmsg_new(payload, GFP_KERNEL);
1744 rep = __nlmsg_put(skb2, NETLINK_CB(skb).portid,
1745 nlh->nlmsg_seq, NLMSG_ERROR, payload, 0);
1746 errmsg = nlmsg_data(rep);
1747 errmsg->error = ret;
1748 memcpy(&errmsg->msg, nlh, nlh->nlmsg_len);
1749 cmdattr = (void *)&errmsg->msg + min_len;
1751 ret = nla_parse(cda, IPSET_ATTR_CMD_MAX, cmdattr,
1752 nlh->nlmsg_len - min_len, ip_set_adt_policy,
1759 errline = nla_data(cda[IPSET_ATTR_LINENO]);
1763 netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid,
1765 /* Signal netlink not to send its ACK/errmsg. */
1772 static int ip_set_ad(struct net *net, struct sock *ctnl,
1773 struct sk_buff *skb,
1775 const struct nlmsghdr *nlh,
1776 const struct nlattr * const attr[],
1777 struct netlink_ext_ack *extack)
1779 struct ip_set_net *inst = ip_set_pernet(net);
1781 struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {};
1782 const struct nlattr *nla;
1783 u32 flags = flag_exist(nlh);
1787 if (unlikely(protocol_min_failed(attr) ||
1788 !attr[IPSET_ATTR_SETNAME] ||
1789 !((attr[IPSET_ATTR_DATA] != NULL) ^
1790 (attr[IPSET_ATTR_ADT] != NULL)) ||
1791 (attr[IPSET_ATTR_DATA] &&
1792 !flag_nested(attr[IPSET_ATTR_DATA])) ||
1793 (attr[IPSET_ATTR_ADT] &&
1794 (!flag_nested(attr[IPSET_ATTR_ADT]) ||
1795 !attr[IPSET_ATTR_LINENO]))))
1796 return -IPSET_ERR_PROTOCOL;
1798 set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
1802 use_lineno = !!attr[IPSET_ATTR_LINENO];
1803 if (attr[IPSET_ATTR_DATA]) {
1804 if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX,
1805 attr[IPSET_ATTR_DATA],
1806 set->type->adt_policy, NULL))
1807 return -IPSET_ERR_PROTOCOL;
1808 ret = call_ad(ctnl, skb, set, tb, adt, flags,
1813 nla_for_each_nested(nla, attr[IPSET_ATTR_ADT], nla_rem) {
1814 if (nla_type(nla) != IPSET_ATTR_DATA ||
1815 !flag_nested(nla) ||
1816 nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, nla,
1817 set->type->adt_policy, NULL))
1818 return -IPSET_ERR_PROTOCOL;
1819 ret = call_ad(ctnl, skb, set, tb, adt,
1828 static int ip_set_uadd(struct net *net, struct sock *ctnl,
1829 struct sk_buff *skb, const struct nlmsghdr *nlh,
1830 const struct nlattr * const attr[],
1831 struct netlink_ext_ack *extack)
1833 return ip_set_ad(net, ctnl, skb,
1834 IPSET_ADD, nlh, attr, extack);
1837 static int ip_set_udel(struct net *net, struct sock *ctnl,
1838 struct sk_buff *skb, const struct nlmsghdr *nlh,
1839 const struct nlattr * const attr[],
1840 struct netlink_ext_ack *extack)
1842 return ip_set_ad(net, ctnl, skb,
1843 IPSET_DEL, nlh, attr, extack);
1846 static int ip_set_utest(struct net *net, struct sock *ctnl, struct sk_buff *skb,
1847 const struct nlmsghdr *nlh,
1848 const struct nlattr * const attr[],
1849 struct netlink_ext_ack *extack)
1851 struct ip_set_net *inst = ip_set_pernet(net);
1853 struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {};
1857 if (unlikely(protocol_min_failed(attr) ||
1858 !attr[IPSET_ATTR_SETNAME] ||
1859 !attr[IPSET_ATTR_DATA] ||
1860 !flag_nested(attr[IPSET_ATTR_DATA])))
1861 return -IPSET_ERR_PROTOCOL;
1863 set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
1867 if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA],
1868 set->type->adt_policy, NULL))
1869 return -IPSET_ERR_PROTOCOL;
1872 ret = set->variant->uadt(set, tb, IPSET_TEST, &lineno, 0, 0);
1873 rcu_read_unlock_bh();
1874 /* Userspace can't trigger element to be re-added */
1878 return ret > 0 ? 0 : -IPSET_ERR_EXIST;
1881 /* Get headed data of a set */
1883 static int ip_set_header(struct net *net, struct sock *ctnl,
1884 struct sk_buff *skb, const struct nlmsghdr *nlh,
1885 const struct nlattr * const attr[],
1886 struct netlink_ext_ack *extack)
1888 struct ip_set_net *inst = ip_set_pernet(net);
1889 const struct ip_set *set;
1890 struct sk_buff *skb2;
1891 struct nlmsghdr *nlh2;
1894 if (unlikely(protocol_min_failed(attr) ||
1895 !attr[IPSET_ATTR_SETNAME]))
1896 return -IPSET_ERR_PROTOCOL;
1898 set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
1902 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1906 nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
1910 if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) ||
1911 nla_put_string(skb2, IPSET_ATTR_SETNAME, set->name) ||
1912 nla_put_string(skb2, IPSET_ATTR_TYPENAME, set->type->name) ||
1913 nla_put_u8(skb2, IPSET_ATTR_FAMILY, set->family) ||
1914 nla_put_u8(skb2, IPSET_ATTR_REVISION, set->revision))
1915 goto nla_put_failure;
1916 nlmsg_end(skb2, nlh2);
1918 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
1925 nlmsg_cancel(skb2, nlh2);
1933 static const struct nla_policy ip_set_type_policy[IPSET_ATTR_CMD_MAX + 1] = {
1934 [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
1935 [IPSET_ATTR_TYPENAME] = { .type = NLA_NUL_STRING,
1936 .len = IPSET_MAXNAMELEN - 1 },
1937 [IPSET_ATTR_FAMILY] = { .type = NLA_U8 },
1940 static int ip_set_type(struct net *net, struct sock *ctnl, struct sk_buff *skb,
1941 const struct nlmsghdr *nlh,
1942 const struct nlattr * const attr[],
1943 struct netlink_ext_ack *extack)
1945 struct sk_buff *skb2;
1946 struct nlmsghdr *nlh2;
1947 u8 family, min, max;
1948 const char *typename;
1951 if (unlikely(protocol_min_failed(attr) ||
1952 !attr[IPSET_ATTR_TYPENAME] ||
1953 !attr[IPSET_ATTR_FAMILY]))
1954 return -IPSET_ERR_PROTOCOL;
1956 family = nla_get_u8(attr[IPSET_ATTR_FAMILY]);
1957 typename = nla_data(attr[IPSET_ATTR_TYPENAME]);
1958 ret = find_set_type_minmax(typename, family, &min, &max);
1962 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1966 nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
1970 if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) ||
1971 nla_put_string(skb2, IPSET_ATTR_TYPENAME, typename) ||
1972 nla_put_u8(skb2, IPSET_ATTR_FAMILY, family) ||
1973 nla_put_u8(skb2, IPSET_ATTR_REVISION, max) ||
1974 nla_put_u8(skb2, IPSET_ATTR_REVISION_MIN, min))
1975 goto nla_put_failure;
1976 nlmsg_end(skb2, nlh2);
1978 pr_debug("Send TYPE, nlmsg_len: %u\n", nlh2->nlmsg_len);
1979 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
1986 nlmsg_cancel(skb2, nlh2);
1992 /* Get protocol version */
1994 static const struct nla_policy
1995 ip_set_protocol_policy[IPSET_ATTR_CMD_MAX + 1] = {
1996 [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
1999 static int ip_set_protocol(struct net *net, struct sock *ctnl,
2000 struct sk_buff *skb, const struct nlmsghdr *nlh,
2001 const struct nlattr * const attr[],
2002 struct netlink_ext_ack *extack)
2004 struct sk_buff *skb2;
2005 struct nlmsghdr *nlh2;
2008 if (unlikely(!attr[IPSET_ATTR_PROTOCOL]))
2009 return -IPSET_ERR_PROTOCOL;
2011 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2015 nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
2016 IPSET_CMD_PROTOCOL);
2019 if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL))
2020 goto nla_put_failure;
2021 if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL_MIN, IPSET_PROTOCOL_MIN))
2022 goto nla_put_failure;
2023 nlmsg_end(skb2, nlh2);
2025 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
2032 nlmsg_cancel(skb2, nlh2);
2038 /* Get set by name or index, from userspace */
2040 static int ip_set_byname(struct net *net, struct sock *ctnl,
2041 struct sk_buff *skb, const struct nlmsghdr *nlh,
2042 const struct nlattr * const attr[],
2043 struct netlink_ext_ack *extack)
2045 struct ip_set_net *inst = ip_set_pernet(net);
2046 struct sk_buff *skb2;
2047 struct nlmsghdr *nlh2;
2048 ip_set_id_t id = IPSET_INVALID_ID;
2049 const struct ip_set *set;
2052 if (unlikely(protocol_failed(attr) ||
2053 !attr[IPSET_ATTR_SETNAME]))
2054 return -IPSET_ERR_PROTOCOL;
2056 set = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]), &id);
2057 if (id == IPSET_INVALID_ID)
2060 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2064 nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
2065 IPSET_CMD_GET_BYNAME);
2068 if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) ||
2069 nla_put_u8(skb2, IPSET_ATTR_FAMILY, set->family) ||
2070 nla_put_net16(skb2, IPSET_ATTR_INDEX, htons(id)))
2071 goto nla_put_failure;
2072 nlmsg_end(skb2, nlh2);
2074 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
2081 nlmsg_cancel(skb2, nlh2);
2087 static const struct nla_policy ip_set_index_policy[IPSET_ATTR_CMD_MAX + 1] = {
2088 [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
2089 [IPSET_ATTR_INDEX] = { .type = NLA_U16 },
2092 static int ip_set_byindex(struct net *net, struct sock *ctnl,
2093 struct sk_buff *skb, const struct nlmsghdr *nlh,
2094 const struct nlattr * const attr[],
2095 struct netlink_ext_ack *extack)
2097 struct ip_set_net *inst = ip_set_pernet(net);
2098 struct sk_buff *skb2;
2099 struct nlmsghdr *nlh2;
2100 ip_set_id_t id = IPSET_INVALID_ID;
2101 const struct ip_set *set;
2104 if (unlikely(protocol_failed(attr) ||
2105 !attr[IPSET_ATTR_INDEX]))
2106 return -IPSET_ERR_PROTOCOL;
2108 id = ip_set_get_h16(attr[IPSET_ATTR_INDEX]);
2109 if (id >= inst->ip_set_max)
2111 set = ip_set(inst, id);
2115 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2119 nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
2120 IPSET_CMD_GET_BYINDEX);
2123 if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) ||
2124 nla_put_string(skb2, IPSET_ATTR_SETNAME, set->name))
2125 goto nla_put_failure;
2126 nlmsg_end(skb2, nlh2);
2128 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
2135 nlmsg_cancel(skb2, nlh2);
2141 static const struct nfnl_callback ip_set_netlink_subsys_cb[IPSET_MSG_MAX] = {
2142 [IPSET_CMD_NONE] = {
2143 .call = ip_set_none,
2144 .attr_count = IPSET_ATTR_CMD_MAX,
2146 [IPSET_CMD_CREATE] = {
2147 .call = ip_set_create,
2148 .attr_count = IPSET_ATTR_CMD_MAX,
2149 .policy = ip_set_create_policy,
2151 [IPSET_CMD_DESTROY] = {
2152 .call = ip_set_destroy,
2153 .attr_count = IPSET_ATTR_CMD_MAX,
2154 .policy = ip_set_setname_policy,
2156 [IPSET_CMD_FLUSH] = {
2157 .call = ip_set_flush,
2158 .attr_count = IPSET_ATTR_CMD_MAX,
2159 .policy = ip_set_setname_policy,
2161 [IPSET_CMD_RENAME] = {
2162 .call = ip_set_rename,
2163 .attr_count = IPSET_ATTR_CMD_MAX,
2164 .policy = ip_set_setname2_policy,
2166 [IPSET_CMD_SWAP] = {
2167 .call = ip_set_swap,
2168 .attr_count = IPSET_ATTR_CMD_MAX,
2169 .policy = ip_set_setname2_policy,
2171 [IPSET_CMD_LIST] = {
2172 .call = ip_set_dump,
2173 .attr_count = IPSET_ATTR_CMD_MAX,
2174 .policy = ip_set_dump_policy,
2176 [IPSET_CMD_SAVE] = {
2177 .call = ip_set_dump,
2178 .attr_count = IPSET_ATTR_CMD_MAX,
2179 .policy = ip_set_setname_policy,
2182 .call = ip_set_uadd,
2183 .attr_count = IPSET_ATTR_CMD_MAX,
2184 .policy = ip_set_adt_policy,
2187 .call = ip_set_udel,
2188 .attr_count = IPSET_ATTR_CMD_MAX,
2189 .policy = ip_set_adt_policy,
2191 [IPSET_CMD_TEST] = {
2192 .call = ip_set_utest,
2193 .attr_count = IPSET_ATTR_CMD_MAX,
2194 .policy = ip_set_adt_policy,
2196 [IPSET_CMD_HEADER] = {
2197 .call = ip_set_header,
2198 .attr_count = IPSET_ATTR_CMD_MAX,
2199 .policy = ip_set_setname_policy,
2201 [IPSET_CMD_TYPE] = {
2202 .call = ip_set_type,
2203 .attr_count = IPSET_ATTR_CMD_MAX,
2204 .policy = ip_set_type_policy,
2206 [IPSET_CMD_PROTOCOL] = {
2207 .call = ip_set_protocol,
2208 .attr_count = IPSET_ATTR_CMD_MAX,
2209 .policy = ip_set_protocol_policy,
2211 [IPSET_CMD_GET_BYNAME] = {
2212 .call = ip_set_byname,
2213 .attr_count = IPSET_ATTR_CMD_MAX,
2214 .policy = ip_set_setname_policy,
2216 [IPSET_CMD_GET_BYINDEX] = {
2217 .call = ip_set_byindex,
2218 .attr_count = IPSET_ATTR_CMD_MAX,
2219 .policy = ip_set_index_policy,
2223 static struct nfnetlink_subsystem ip_set_netlink_subsys __read_mostly = {
2225 .subsys_id = NFNL_SUBSYS_IPSET,
2226 .cb_count = IPSET_MSG_MAX,
2227 .cb = ip_set_netlink_subsys_cb,
2230 /* Interface to iptables/ip6tables */
2233 ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
2237 int copylen = *len, ret = 0;
2238 struct net *net = sock_net(sk);
2239 struct ip_set_net *inst = ip_set_pernet(net);
2241 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2243 if (optval != SO_IP_SET)
2245 if (*len < sizeof(unsigned int))
2248 data = vmalloc(*len);
2251 if (copy_from_user(data, user, *len) != 0) {
2257 if (*op < IP_SET_OP_VERSION) {
2258 /* Check the version at the beginning of operations */
2259 struct ip_set_req_version *req_version = data;
2261 if (*len < sizeof(struct ip_set_req_version)) {
2266 if (req_version->version < IPSET_PROTOCOL_MIN) {
2273 case IP_SET_OP_VERSION: {
2274 struct ip_set_req_version *req_version = data;
2276 if (*len != sizeof(struct ip_set_req_version)) {
2281 req_version->version = IPSET_PROTOCOL;
2282 if (copy_to_user(user, req_version,
2283 sizeof(struct ip_set_req_version)))
2287 case IP_SET_OP_GET_BYNAME: {
2288 struct ip_set_req_get_set *req_get = data;
2291 if (*len != sizeof(struct ip_set_req_get_set)) {
2295 req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0';
2296 nfnl_lock(NFNL_SUBSYS_IPSET);
2297 find_set_and_id(inst, req_get->set.name, &id);
2298 req_get->set.index = id;
2299 nfnl_unlock(NFNL_SUBSYS_IPSET);
2302 case IP_SET_OP_GET_FNAME: {
2303 struct ip_set_req_get_set_family *req_get = data;
2306 if (*len != sizeof(struct ip_set_req_get_set_family)) {
2310 req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0';
2311 nfnl_lock(NFNL_SUBSYS_IPSET);
2312 find_set_and_id(inst, req_get->set.name, &id);
2313 req_get->set.index = id;
2314 if (id != IPSET_INVALID_ID)
2315 req_get->family = ip_set(inst, id)->family;
2316 nfnl_unlock(NFNL_SUBSYS_IPSET);
2319 case IP_SET_OP_GET_BYINDEX: {
2320 struct ip_set_req_get_set *req_get = data;
2323 if (*len != sizeof(struct ip_set_req_get_set) ||
2324 req_get->set.index >= inst->ip_set_max) {
2328 nfnl_lock(NFNL_SUBSYS_IPSET);
2329 set = ip_set(inst, req_get->set.index);
2330 ret = strscpy(req_get->set.name, set ? set->name : "",
2332 nfnl_unlock(NFNL_SUBSYS_IPSET);
2340 } /* end of switch(op) */
2343 if (copy_to_user(user, data, copylen))
2353 static struct nf_sockopt_ops so_set __read_mostly = {
2355 .get_optmin = SO_IP_SET,
2356 .get_optmax = SO_IP_SET + 1,
2357 .get = ip_set_sockfn_get,
2358 .owner = THIS_MODULE,
2361 static int __net_init
2362 ip_set_net_init(struct net *net)
2364 struct ip_set_net *inst = ip_set_pernet(net);
2365 struct ip_set **list;
2367 inst->ip_set_max = max_sets ? max_sets : CONFIG_IP_SET_MAX;
2368 if (inst->ip_set_max >= IPSET_INVALID_ID)
2369 inst->ip_set_max = IPSET_INVALID_ID - 1;
2371 list = kvcalloc(inst->ip_set_max, sizeof(struct ip_set *), GFP_KERNEL);
2374 inst->is_deleted = false;
2375 inst->is_destroyed = false;
2376 rcu_assign_pointer(inst->ip_set_list, list);
2380 static void __net_exit
2381 ip_set_net_exit(struct net *net)
2383 struct ip_set_net *inst = ip_set_pernet(net);
2385 struct ip_set *set = NULL;
2388 inst->is_deleted = true; /* flag for ip_set_nfnl_put */
2390 nfnl_lock(NFNL_SUBSYS_IPSET);
2391 for (i = 0; i < inst->ip_set_max; i++) {
2392 set = ip_set(inst, i);
2394 ip_set(inst, i) = NULL;
2395 ip_set_destroy_set(set);
2398 nfnl_unlock(NFNL_SUBSYS_IPSET);
2399 kvfree(rcu_dereference_protected(inst->ip_set_list, 1));
2402 static struct pernet_operations ip_set_net_ops = {
2403 .init = ip_set_net_init,
2404 .exit = ip_set_net_exit,
2405 .id = &ip_set_net_id,
2406 .size = sizeof(struct ip_set_net),
2412 int ret = register_pernet_subsys(&ip_set_net_ops);
2415 pr_err("ip_set: cannot register pernet_subsys.\n");
2419 ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
2421 pr_err("ip_set: cannot register with nfnetlink.\n");
2422 unregister_pernet_subsys(&ip_set_net_ops);
2426 ret = nf_register_sockopt(&so_set);
2428 pr_err("SO_SET registry failed: %d\n", ret);
2429 nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
2430 unregister_pernet_subsys(&ip_set_net_ops);
2440 nf_unregister_sockopt(&so_set);
2441 nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
2443 unregister_pernet_subsys(&ip_set_net_ops);
2444 pr_debug("these are the famous last words\n");
2447 module_init(ip_set_init);
2448 module_exit(ip_set_fini);
2450 MODULE_DESCRIPTION("ip_set: protocol " __stringify(IPSET_PROTOCOL));