1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (C) 2013 Jozsef Kadlecsik <kadlec@netfilter.org> */
4 #ifndef _IP_SET_HASH_GEN_H
5 #define _IP_SET_HASH_GEN_H
7 #include <linux/rcupdate.h>
8 #include <linux/jhash.h>
9 #include <linux/types.h>
10 #include <linux/netfilter/nfnetlink.h>
11 #include <linux/netfilter/ipset/ip_set.h>
13 #define __ipset_dereference(p) \
14 rcu_dereference_protected(p, 1)
15 #define ipset_dereference_nfnl(p) \
16 rcu_dereference_protected(p, \
17 lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET))
18 #define ipset_dereference_set(p, set) \
19 rcu_dereference_protected(p, \
20 lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \
21 lockdep_is_held(&(set)->lock))
22 #define ipset_dereference_bh_nfnl(p) \
23 rcu_dereference_bh_check(p, \
24 lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET))
26 /* Hashing which uses arrays to resolve clashing. The hash table is resized
27 * (doubled) when searching becomes too long.
28 * Internally jhash is used with the assumption that the size of the
29 * stored data is a multiple of sizeof(u32).
31 * Readers and resizing
33 * Resizing can be triggered by userspace command only, and those
34 * are serialized by the nfnl mutex. During resizing the set is
35 * read-locked, so the only possible concurrent operations are
36 * the kernel side readers. Those must be protected by proper RCU locking.
39 /* Number of elements to store in an initial array block */
40 #define AHASH_INIT_SIZE 2
41 /* Max number of elements to store in an array block */
42 #define AHASH_MAX_SIZE (6 * AHASH_INIT_SIZE)
43 /* Max muber of elements in the array block when tuned */
44 #define AHASH_MAX_TUNED 64
46 #define AHASH_MAX(h) ((h)->bucketsize)
48 /* Max number of elements can be tuned */
49 #ifdef IP_SET_HASH_WITH_MULTI
51 tune_bucketsize(u8 curr, u32 multi)
58 n = curr + AHASH_INIT_SIZE;
59 /* Currently, at listing one hash bucket must fit into a message.
60 * Therefore we have a hard limit here.
62 return n > curr && n <= AHASH_MAX_TUNED ? n : curr;
64 #define TUNE_BUCKETSIZE(h, multi) \
65 ((h)->bucketsize = tune_bucketsize((h)->bucketsize, multi))
67 #define TUNE_BUCKETSIZE(h, multi)
72 struct rcu_head rcu; /* for call_rcu */
73 /* Which positions are used in the array */
74 DECLARE_BITMAP(used, AHASH_MAX_TUNED);
75 u8 size; /* size of the array */
76 u8 pos; /* position of the first free entry */
77 unsigned char value[] /* the array of the values */
78 __aligned(__alignof__(u64));
81 /* Region size for locking == 2^HTABLE_REGION_BITS */
82 #define HTABLE_REGION_BITS 10
83 #define ahash_numof_locks(htable_bits) \
84 ((htable_bits) < HTABLE_REGION_BITS ? 1 \
85 : jhash_size((htable_bits) - HTABLE_REGION_BITS))
86 #define ahash_sizeof_regions(htable_bits) \
87 (ahash_numof_locks(htable_bits) * sizeof(struct ip_set_region))
88 #define ahash_region(n, htable_bits) \
89 ((n) % ahash_numof_locks(htable_bits))
90 #define ahash_bucket_start(h, htable_bits) \
91 ((htable_bits) < HTABLE_REGION_BITS ? 0 \
92 : (h) * jhash_size(HTABLE_REGION_BITS))
93 #define ahash_bucket_end(h, htable_bits) \
94 ((htable_bits) < HTABLE_REGION_BITS ? jhash_size(htable_bits) \
95 : ((h) + 1) * jhash_size(HTABLE_REGION_BITS))
98 struct delayed_work dwork;
99 struct ip_set *set; /* Set the gc belongs to */
100 u32 region; /* Last gc run position */
103 /* The hash table: the table size stored here in order to make resizing easy */
105 atomic_t ref; /* References for resizing */
106 atomic_t uref; /* References for dumping and gc */
107 u8 htable_bits; /* size of hash table == 2^htable_bits */
108 u32 maxelem; /* Maxelem per region */
109 struct ip_set_region *hregion; /* Region locks and ext sizes */
110 struct hbucket __rcu *bucket[]; /* hashtable buckets */
113 #define hbucket(h, i) ((h)->bucket[i])
114 #define ext_size(n, dsize) \
115 (sizeof(struct hbucket) + (n) * (dsize))
117 #ifndef IPSET_NET_COUNT
118 #define IPSET_NET_COUNT 1
121 /* Book-keeping of the prefixes added to the set */
122 struct net_prefixes {
123 u32 nets[IPSET_NET_COUNT]; /* number of elements for this cidr */
124 u8 cidr[IPSET_NET_COUNT]; /* the cidr value */
127 /* Compute the hash table size */
129 htable_size(u8 hbits)
133 /* We must fit both into u32 in jhash and size_t */
136 hsize = jhash_size(hbits);
137 if ((((size_t)-1) - sizeof(struct htable)) / sizeof(struct hbucket *)
141 return hsize * sizeof(struct hbucket *) + sizeof(struct htable);
144 /* Compute htable_bits from the user input parameter hashsize */
146 htable_bits(u32 hashsize)
148 /* Assume that hashsize == 2^htable_bits */
149 u8 bits = fls(hashsize - 1);
151 if (jhash_size(bits) != hashsize)
152 /* Round up to the first 2^n value */
153 bits = fls(hashsize);
158 #ifdef IP_SET_HASH_WITH_NETS
159 #if IPSET_NET_COUNT > 1
160 #define __CIDR(cidr, i) (cidr[i])
162 #define __CIDR(cidr, i) (cidr)
165 /* cidr + 1 is stored in net_prefixes to support /0 */
166 #define NCIDR_PUT(cidr) ((cidr) + 1)
167 #define NCIDR_GET(cidr) ((cidr) - 1)
169 #ifdef IP_SET_HASH_WITH_NETS_PACKED
170 /* When cidr is packed with nomatch, cidr - 1 is stored in the data entry */
171 #define DCIDR_PUT(cidr) ((cidr) - 1)
172 #define DCIDR_GET(cidr, i) (__CIDR(cidr, i) + 1)
174 #define DCIDR_PUT(cidr) (cidr)
175 #define DCIDR_GET(cidr, i) __CIDR(cidr, i)
178 #define INIT_CIDR(cidr, host_mask) \
179 DCIDR_PUT(((cidr) ? NCIDR_GET(cidr) : host_mask))
181 #ifdef IP_SET_HASH_WITH_NET0
182 /* cidr from 0 to HOST_MASK value and c = cidr + 1 */
183 #define NLEN (HOST_MASK + 1)
184 #define CIDR_POS(c) ((c) - 1)
186 /* cidr from 1 to HOST_MASK value and c = cidr + 1 */
187 #define NLEN HOST_MASK
188 #define CIDR_POS(c) ((c) - 2)
193 #endif /* IP_SET_HASH_WITH_NETS */
195 #define SET_ELEM_EXPIRED(set, d) \
196 (SET_WITH_TIMEOUT(set) && \
197 ip_set_timeout_expired(ext_timeout(d, set)))
199 #endif /* _IP_SET_HASH_GEN_H */
202 #error "MTYPE is not defined!"
206 #error "HTYPE is not defined!"
210 #error "HOST_MASK is not defined!"
213 /* Family dependent templates */
216 #undef mtype_data_equal
217 #undef mtype_do_data_match
218 #undef mtype_data_set_flags
219 #undef mtype_data_reset_elem
220 #undef mtype_data_reset_flags
221 #undef mtype_data_netmask
222 #undef mtype_data_list
223 #undef mtype_data_next
226 #undef mtype_ahash_destroy
227 #undef mtype_ext_cleanup
228 #undef mtype_add_cidr
229 #undef mtype_del_cidr
230 #undef mtype_ahash_memsize
233 #undef mtype_same_set
239 #undef mtype_test_cidrs
243 #undef mtype_ext_size
244 #undef mtype_resize_ad
251 #undef mtype_data_match
256 #define mtype_data_equal IPSET_TOKEN(MTYPE, _data_equal)
257 #ifdef IP_SET_HASH_WITH_NETS
258 #define mtype_do_data_match IPSET_TOKEN(MTYPE, _do_data_match)
260 #define mtype_do_data_match(d) 1
262 #define mtype_data_set_flags IPSET_TOKEN(MTYPE, _data_set_flags)
263 #define mtype_data_reset_elem IPSET_TOKEN(MTYPE, _data_reset_elem)
264 #define mtype_data_reset_flags IPSET_TOKEN(MTYPE, _data_reset_flags)
265 #define mtype_data_netmask IPSET_TOKEN(MTYPE, _data_netmask)
266 #define mtype_data_list IPSET_TOKEN(MTYPE, _data_list)
267 #define mtype_data_next IPSET_TOKEN(MTYPE, _data_next)
268 #define mtype_elem IPSET_TOKEN(MTYPE, _elem)
270 #define mtype_ahash_destroy IPSET_TOKEN(MTYPE, _ahash_destroy)
271 #define mtype_ext_cleanup IPSET_TOKEN(MTYPE, _ext_cleanup)
272 #define mtype_add_cidr IPSET_TOKEN(MTYPE, _add_cidr)
273 #define mtype_del_cidr IPSET_TOKEN(MTYPE, _del_cidr)
274 #define mtype_ahash_memsize IPSET_TOKEN(MTYPE, _ahash_memsize)
275 #define mtype_flush IPSET_TOKEN(MTYPE, _flush)
276 #define mtype_destroy IPSET_TOKEN(MTYPE, _destroy)
277 #define mtype_same_set IPSET_TOKEN(MTYPE, _same_set)
278 #define mtype_kadt IPSET_TOKEN(MTYPE, _kadt)
279 #define mtype_uadt IPSET_TOKEN(MTYPE, _uadt)
281 #define mtype_add IPSET_TOKEN(MTYPE, _add)
282 #define mtype_del IPSET_TOKEN(MTYPE, _del)
283 #define mtype_test_cidrs IPSET_TOKEN(MTYPE, _test_cidrs)
284 #define mtype_test IPSET_TOKEN(MTYPE, _test)
285 #define mtype_uref IPSET_TOKEN(MTYPE, _uref)
286 #define mtype_resize IPSET_TOKEN(MTYPE, _resize)
287 #define mtype_ext_size IPSET_TOKEN(MTYPE, _ext_size)
288 #define mtype_resize_ad IPSET_TOKEN(MTYPE, _resize_ad)
289 #define mtype_head IPSET_TOKEN(MTYPE, _head)
290 #define mtype_list IPSET_TOKEN(MTYPE, _list)
291 #define mtype_gc_do IPSET_TOKEN(MTYPE, _gc_do)
292 #define mtype_gc IPSET_TOKEN(MTYPE, _gc)
293 #define mtype_gc_init IPSET_TOKEN(MTYPE, _gc_init)
294 #define mtype_variant IPSET_TOKEN(MTYPE, _variant)
295 #define mtype_data_match IPSET_TOKEN(MTYPE, _data_match)
298 #define HKEY_DATALEN sizeof(struct mtype_elem)
303 #define HKEY(data, initval, htable_bits) \
305 const u32 *__k = (const u32 *)data; \
306 u32 __l = HKEY_DATALEN / sizeof(u32); \
308 BUILD_BUG_ON(HKEY_DATALEN % sizeof(u32) != 0); \
310 jhash2(__k, __l, initval) & jhash_mask(htable_bits); \
313 /* The generic hash structure */
315 struct htable __rcu *table; /* the hash table */
316 struct htable_gc gc; /* gc workqueue */
317 u32 maxelem; /* max elements in the hash */
318 u32 initval; /* random jhash init value */
319 #ifdef IP_SET_HASH_WITH_MARKMASK
320 u32 markmask; /* markmask value for mark mask to store */
322 u8 bucketsize; /* max elements in an array block */
323 #ifdef IP_SET_HASH_WITH_NETMASK
324 u8 netmask; /* netmask value for subnets to store */
326 struct list_head ad; /* Resize add|del backlist */
327 struct mtype_elem next; /* temporary storage for uadd */
328 #ifdef IP_SET_HASH_WITH_NETS
329 struct net_prefixes nets[NLEN]; /* book-keeping of prefixes */
333 /* ADD|DEL entries saved during resize */
334 struct mtype_resize_ad {
335 struct list_head list;
336 enum ipset_adt ad; /* ADD|DEL element */
337 struct mtype_elem d; /* Element value */
338 struct ip_set_ext ext; /* Extensions for ADD */
339 struct ip_set_ext mext; /* Target extensions for ADD */
340 u32 flags; /* Flags for ADD */
343 #ifdef IP_SET_HASH_WITH_NETS
344 /* Network cidr size book keeping when the hash stores different
345 * sized networks. cidr == real cidr + 1 to support /0.
348 mtype_add_cidr(struct ip_set *set, struct htype *h, u8 cidr, u8 n)
352 spin_lock_bh(&set->lock);
353 /* Add in increasing prefix order, so larger cidr first */
354 for (i = 0, j = -1; i < NLEN && h->nets[i].cidr[n]; i++) {
357 } else if (h->nets[i].cidr[n] < cidr) {
359 } else if (h->nets[i].cidr[n] == cidr) {
360 h->nets[CIDR_POS(cidr)].nets[n]++;
366 h->nets[i].cidr[n] = h->nets[i - 1].cidr[n];
368 h->nets[i].cidr[n] = cidr;
369 h->nets[CIDR_POS(cidr)].nets[n] = 1;
371 spin_unlock_bh(&set->lock);
375 mtype_del_cidr(struct ip_set *set, struct htype *h, u8 cidr, u8 n)
377 u8 i, j, net_end = NLEN - 1;
379 spin_lock_bh(&set->lock);
380 for (i = 0; i < NLEN; i++) {
381 if (h->nets[i].cidr[n] != cidr)
383 h->nets[CIDR_POS(cidr)].nets[n]--;
384 if (h->nets[CIDR_POS(cidr)].nets[n] > 0)
386 for (j = i; j < net_end && h->nets[j].cidr[n]; j++)
387 h->nets[j].cidr[n] = h->nets[j + 1].cidr[n];
388 h->nets[j].cidr[n] = 0;
392 spin_unlock_bh(&set->lock);
396 /* Calculate the actual memory size of the set data */
398 mtype_ahash_memsize(const struct htype *h, const struct htable *t)
400 return sizeof(*h) + sizeof(*t) + ahash_sizeof_regions(t->htable_bits);
403 /* Get the ith element from the array block n */
404 #define ahash_data(n, i, dsize) \
405 ((struct mtype_elem *)((n)->value + ((i) * (dsize))))
408 mtype_ext_cleanup(struct ip_set *set, struct hbucket *n)
412 for (i = 0; i < n->pos; i++)
413 if (test_bit(i, n->used))
414 ip_set_ext_destroy(set, ahash_data(n, i, set->dsize));
417 /* Flush a hash type of set: destroy all elements */
419 mtype_flush(struct ip_set *set)
421 struct htype *h = set->data;
426 t = ipset_dereference_nfnl(h->table);
427 for (r = 0; r < ahash_numof_locks(t->htable_bits); r++) {
428 spin_lock_bh(&t->hregion[r].lock);
429 for (i = ahash_bucket_start(r, t->htable_bits);
430 i < ahash_bucket_end(r, t->htable_bits); i++) {
431 n = __ipset_dereference(hbucket(t, i));
434 if (set->extensions & IPSET_EXT_DESTROY)
435 mtype_ext_cleanup(set, n);
436 /* FIXME: use slab cache */
437 rcu_assign_pointer(hbucket(t, i), NULL);
440 t->hregion[r].ext_size = 0;
441 t->hregion[r].elements = 0;
442 spin_unlock_bh(&t->hregion[r].lock);
444 #ifdef IP_SET_HASH_WITH_NETS
445 memset(h->nets, 0, sizeof(h->nets));
449 /* Destroy the hashtable part of the set */
451 mtype_ahash_destroy(struct ip_set *set, struct htable *t, bool ext_destroy)
456 for (i = 0; i < jhash_size(t->htable_bits); i++) {
457 n = __ipset_dereference(hbucket(t, i));
460 if (set->extensions & IPSET_EXT_DESTROY && ext_destroy)
461 mtype_ext_cleanup(set, n);
462 /* FIXME: use slab cache */
466 ip_set_free(t->hregion);
470 /* Destroy a hash type of set */
472 mtype_destroy(struct ip_set *set)
474 struct htype *h = set->data;
475 struct list_head *l, *lt;
477 if (SET_WITH_TIMEOUT(set))
478 cancel_delayed_work_sync(&h->gc.dwork);
480 mtype_ahash_destroy(set, ipset_dereference_nfnl(h->table), true);
481 list_for_each_safe(l, lt, &h->ad) {
491 mtype_same_set(const struct ip_set *a, const struct ip_set *b)
493 const struct htype *x = a->data;
494 const struct htype *y = b->data;
496 /* Resizing changes htable_bits, so we ignore it */
497 return x->maxelem == y->maxelem &&
498 a->timeout == b->timeout &&
499 #ifdef IP_SET_HASH_WITH_NETMASK
500 x->netmask == y->netmask &&
502 #ifdef IP_SET_HASH_WITH_MARKMASK
503 x->markmask == y->markmask &&
505 a->extensions == b->extensions;
509 mtype_gc_do(struct ip_set *set, struct htype *h, struct htable *t, u32 r)
511 struct hbucket *n, *tmp;
512 struct mtype_elem *data;
514 size_t dsize = set->dsize;
515 #ifdef IP_SET_HASH_WITH_NETS
518 u8 htable_bits = t->htable_bits;
520 spin_lock_bh(&t->hregion[r].lock);
521 for (i = ahash_bucket_start(r, htable_bits);
522 i < ahash_bucket_end(r, htable_bits); i++) {
523 n = __ipset_dereference(hbucket(t, i));
526 for (j = 0, d = 0; j < n->pos; j++) {
527 if (!test_bit(j, n->used)) {
531 data = ahash_data(n, j, dsize);
532 if (!ip_set_timeout_expired(ext_timeout(data, set)))
534 pr_debug("expired %u/%u\n", i, j);
535 clear_bit(j, n->used);
536 smp_mb__after_atomic();
537 #ifdef IP_SET_HASH_WITH_NETS
538 for (k = 0; k < IPSET_NET_COUNT; k++)
539 mtype_del_cidr(set, h,
540 NCIDR_PUT(DCIDR_GET(data->cidr, k)),
543 t->hregion[r].elements--;
544 ip_set_ext_destroy(set, data);
547 if (d >= AHASH_INIT_SIZE) {
549 t->hregion[r].ext_size -=
550 ext_size(n->size, dsize);
551 rcu_assign_pointer(hbucket(t, i), NULL);
555 tmp = kzalloc(sizeof(*tmp) +
556 (n->size - AHASH_INIT_SIZE) * dsize,
559 /* Still try to delete expired elements. */
561 tmp->size = n->size - AHASH_INIT_SIZE;
562 for (j = 0, d = 0; j < n->pos; j++) {
563 if (!test_bit(j, n->used))
565 data = ahash_data(n, j, dsize);
566 memcpy(tmp->value + d * dsize,
568 set_bit(d, tmp->used);
572 t->hregion[r].ext_size -=
573 ext_size(AHASH_INIT_SIZE, dsize);
574 rcu_assign_pointer(hbucket(t, i), tmp);
578 spin_unlock_bh(&t->hregion[r].lock);
582 mtype_gc(struct work_struct *work)
584 struct htable_gc *gc;
589 unsigned int next_run;
591 gc = container_of(work, struct htable_gc, dwork.work);
595 spin_lock_bh(&set->lock);
596 t = ipset_dereference_set(h->table, set);
597 atomic_inc(&t->uref);
598 numof_locks = ahash_numof_locks(t->htable_bits);
600 if (r >= numof_locks) {
603 next_run = (IPSET_GC_PERIOD(set->timeout) * HZ) / numof_locks;
604 if (next_run < HZ/10)
606 spin_unlock_bh(&set->lock);
608 mtype_gc_do(set, h, t, r);
610 if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
611 pr_debug("Table destroy after resize by expire: %p\n", t);
612 mtype_ahash_destroy(set, t, false);
615 queue_delayed_work(system_power_efficient_wq, &gc->dwork, next_run);
620 mtype_gc_init(struct htable_gc *gc)
622 INIT_DEFERRABLE_WORK(&gc->dwork, mtype_gc);
623 queue_delayed_work(system_power_efficient_wq, &gc->dwork, HZ);
627 mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
628 struct ip_set_ext *mext, u32 flags);
630 mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
631 struct ip_set_ext *mext, u32 flags);
633 /* Resize a hash: create a new hash table with doubling the hashsize
634 * and inserting the elements to it. Repeat until we succeed or
635 * fail due to memory pressures.
638 mtype_resize(struct ip_set *set, bool retried)
640 struct htype *h = set->data;
641 struct htable *t, *orig;
643 size_t dsize = set->dsize;
644 #ifdef IP_SET_HASH_WITH_NETS
646 struct mtype_elem *tmp;
648 struct mtype_elem *data;
649 struct mtype_elem *d;
650 struct hbucket *n, *m;
651 struct list_head *l, *lt;
652 struct mtype_resize_ad *x;
653 u32 i, j, r, nr, key;
656 #ifdef IP_SET_HASH_WITH_NETS
657 tmp = kmalloc(dsize, GFP_KERNEL);
661 orig = ipset_dereference_bh_nfnl(h->table);
662 htable_bits = orig->htable_bits;
668 /* In case we have plenty of memory :-) */
669 pr_warn("Cannot increase the hashsize of set %s further\n",
671 ret = -IPSET_ERR_HASH_FULL;
674 t = ip_set_alloc(htable_size(htable_bits));
679 t->hregion = ip_set_alloc(ahash_sizeof_regions(htable_bits));
685 t->htable_bits = htable_bits;
686 t->maxelem = h->maxelem / ahash_numof_locks(htable_bits);
687 for (i = 0; i < ahash_numof_locks(htable_bits); i++)
688 spin_lock_init(&t->hregion[i].lock);
690 /* There can't be another parallel resizing,
691 * but dumping, gc, kernel side add/del are possible
693 orig = ipset_dereference_bh_nfnl(h->table);
694 atomic_set(&orig->ref, 1);
695 atomic_inc(&orig->uref);
696 pr_debug("attempt to resize set %s from %u to %u, t %p\n",
697 set->name, orig->htable_bits, htable_bits, orig);
698 for (r = 0; r < ahash_numof_locks(orig->htable_bits); r++) {
699 /* Expire may replace a hbucket with another one */
701 for (i = ahash_bucket_start(r, orig->htable_bits);
702 i < ahash_bucket_end(r, orig->htable_bits); i++) {
703 n = __ipset_dereference(hbucket(orig, i));
706 for (j = 0; j < n->pos; j++) {
707 if (!test_bit(j, n->used))
709 data = ahash_data(n, j, dsize);
710 if (SET_ELEM_EXPIRED(set, data))
712 #ifdef IP_SET_HASH_WITH_NETS
713 /* We have readers running parallel with us,
714 * so the live data cannot be modified.
717 memcpy(tmp, data, dsize);
719 mtype_data_reset_flags(data, &flags);
721 key = HKEY(data, h->initval, htable_bits);
722 m = __ipset_dereference(hbucket(t, key));
723 nr = ahash_region(key, htable_bits);
725 m = kzalloc(sizeof(*m) +
726 AHASH_INIT_SIZE * dsize,
732 m->size = AHASH_INIT_SIZE;
733 t->hregion[nr].ext_size +=
734 ext_size(AHASH_INIT_SIZE,
736 RCU_INIT_POINTER(hbucket(t, key), m);
737 } else if (m->pos >= m->size) {
740 if (m->size >= AHASH_MAX(h)) {
743 ht = kzalloc(sizeof(*ht) +
744 (m->size + AHASH_INIT_SIZE)
752 memcpy(ht, m, sizeof(struct hbucket) +
754 ht->size = m->size + AHASH_INIT_SIZE;
755 t->hregion[nr].ext_size +=
756 ext_size(AHASH_INIT_SIZE,
760 RCU_INIT_POINTER(hbucket(t, key), ht);
762 d = ahash_data(m, m->pos, dsize);
763 memcpy(d, data, dsize);
764 set_bit(m->pos++, m->used);
765 t->hregion[nr].elements++;
766 #ifdef IP_SET_HASH_WITH_NETS
767 mtype_data_reset_flags(d, &flags);
771 rcu_read_unlock_bh();
774 /* There can't be any other writer. */
775 rcu_assign_pointer(h->table, t);
777 /* Give time to other readers of the set */
780 pr_debug("set %s resized from %u (%p) to %u (%p)\n", set->name,
781 orig->htable_bits, orig, t->htable_bits, t);
782 /* Add/delete elements processed by the SET target during resize.
783 * Kernel-side add cannot trigger a resize and userspace actions
784 * are serialized by the mutex.
786 list_for_each_safe(l, lt, &h->ad) {
787 x = list_entry(l, struct mtype_resize_ad, list);
788 if (x->ad == IPSET_ADD) {
789 mtype_add(set, &x->d, &x->ext, &x->mext, x->flags);
791 mtype_del(set, &x->d, NULL, NULL, 0);
796 /* If there's nobody else using the table, destroy it */
797 if (atomic_dec_and_test(&orig->uref)) {
798 pr_debug("Table destroy by resize %p\n", orig);
799 mtype_ahash_destroy(set, orig, false);
803 #ifdef IP_SET_HASH_WITH_NETS
809 rcu_read_unlock_bh();
810 atomic_set(&orig->ref, 0);
811 atomic_dec(&orig->uref);
812 mtype_ahash_destroy(set, t, false);
818 /* Get the current number of elements and ext_size in the set */
820 mtype_ext_size(struct ip_set *set, u32 *elements, size_t *ext_size)
822 struct htype *h = set->data;
823 const struct htable *t;
826 struct mtype_elem *data;
828 t = rcu_dereference_bh(h->table);
829 for (r = 0; r < ahash_numof_locks(t->htable_bits); r++) {
830 for (i = ahash_bucket_start(r, t->htable_bits);
831 i < ahash_bucket_end(r, t->htable_bits); i++) {
832 n = rcu_dereference_bh(hbucket(t, i));
835 for (j = 0; j < n->pos; j++) {
836 if (!test_bit(j, n->used))
838 data = ahash_data(n, j, set->dsize);
839 if (!SET_ELEM_EXPIRED(set, data))
843 *ext_size += t->hregion[r].ext_size;
847 /* Add an element to a hash and update the internal counters when succeeded,
848 * otherwise report the proper error code.
851 mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
852 struct ip_set_ext *mext, u32 flags)
854 struct htype *h = set->data;
856 const struct mtype_elem *d = value;
857 struct mtype_elem *data;
858 struct hbucket *n, *old = ERR_PTR(-ENOENT);
860 bool flag_exist = flags & IPSET_FLAG_EXIST;
861 bool deleted = false, forceadd = false, reuse = false;
862 u32 r, key, multi = 0, elements, maxelem;
865 t = rcu_dereference_bh(h->table);
866 key = HKEY(value, h->initval, t->htable_bits);
867 r = ahash_region(key, t->htable_bits);
868 atomic_inc(&t->uref);
869 elements = t->hregion[r].elements;
870 maxelem = t->maxelem;
871 if (elements >= maxelem) {
873 if (SET_WITH_TIMEOUT(set)) {
874 rcu_read_unlock_bh();
875 mtype_gc_do(set, h, t, r);
878 maxelem = h->maxelem;
880 for (e = 0; e < ahash_numof_locks(t->htable_bits); e++)
881 elements += t->hregion[e].elements;
882 if (elements >= maxelem && SET_WITH_FORCEADD(set))
885 rcu_read_unlock_bh();
887 spin_lock_bh(&t->hregion[r].lock);
888 n = rcu_dereference_bh(hbucket(t, key));
890 if (forceadd || elements >= maxelem)
893 n = kzalloc(sizeof(*n) + AHASH_INIT_SIZE * set->dsize,
899 n->size = AHASH_INIT_SIZE;
900 t->hregion[r].ext_size +=
901 ext_size(AHASH_INIT_SIZE, set->dsize);
904 for (i = 0; i < n->pos; i++) {
905 if (!test_bit(i, n->used)) {
906 /* Reuse first deleted entry */
908 deleted = reuse = true;
913 data = ahash_data(n, i, set->dsize);
914 if (mtype_data_equal(data, d, &multi)) {
915 if (flag_exist || SET_ELEM_EXPIRED(set, data)) {
916 /* Just the extensions could be overwritten */
918 goto overwrite_extensions;
920 ret = -IPSET_ERR_EXIST;
923 /* Reuse first timed out entry */
924 if (SET_ELEM_EXPIRED(set, data) && j == -1) {
929 if (reuse || forceadd) {
932 data = ahash_data(n, j, set->dsize);
934 #ifdef IP_SET_HASH_WITH_NETS
935 for (i = 0; i < IPSET_NET_COUNT; i++)
936 mtype_del_cidr(set, h,
937 NCIDR_PUT(DCIDR_GET(data->cidr, i)),
940 ip_set_ext_destroy(set, data);
941 t->hregion[r].elements--;
945 if (elements >= maxelem)
947 /* Create a new slot */
948 if (n->pos >= n->size) {
949 TUNE_BUCKETSIZE(h, multi);
950 if (n->size >= AHASH_MAX(h)) {
951 /* Trigger rehashing */
952 mtype_data_next(&h->next, d);
957 n = kzalloc(sizeof(*n) +
958 (old->size + AHASH_INIT_SIZE) * set->dsize,
964 memcpy(n, old, sizeof(struct hbucket) +
965 old->size * set->dsize);
966 n->size = old->size + AHASH_INIT_SIZE;
967 t->hregion[r].ext_size +=
968 ext_size(AHASH_INIT_SIZE, set->dsize);
973 data = ahash_data(n, j, set->dsize);
975 t->hregion[r].elements++;
976 #ifdef IP_SET_HASH_WITH_NETS
977 for (i = 0; i < IPSET_NET_COUNT; i++)
978 mtype_add_cidr(set, h, NCIDR_PUT(DCIDR_GET(d->cidr, i)), i);
980 memcpy(data, d, sizeof(struct mtype_elem));
981 overwrite_extensions:
982 #ifdef IP_SET_HASH_WITH_NETS
983 mtype_data_set_flags(data, flags);
985 if (SET_WITH_COUNTER(set))
986 ip_set_init_counter(ext_counter(data, set), ext);
987 if (SET_WITH_COMMENT(set))
988 ip_set_init_comment(set, ext_comment(data, set), ext);
989 if (SET_WITH_SKBINFO(set))
990 ip_set_init_skbinfo(ext_skbinfo(data, set), ext);
991 /* Must come last for the case when timed out entry is reused */
992 if (SET_WITH_TIMEOUT(set))
993 ip_set_timeout_set(ext_timeout(data, set), ext->timeout);
994 smp_mb__before_atomic();
996 if (old != ERR_PTR(-ENOENT)) {
997 rcu_assign_pointer(hbucket(t, key), n);
1003 spin_unlock_bh(&t->hregion[r].lock);
1004 if (atomic_read(&t->ref) && ext->target) {
1005 /* Resize is in process and kernel side add, save values */
1006 struct mtype_resize_ad *x;
1008 x = kzalloc(sizeof(struct mtype_resize_ad), GFP_ATOMIC);
1013 memcpy(&x->d, value, sizeof(struct mtype_elem));
1014 memcpy(&x->ext, ext, sizeof(struct ip_set_ext));
1015 memcpy(&x->mext, mext, sizeof(struct ip_set_ext));
1017 spin_lock_bh(&set->lock);
1018 list_add_tail(&x->list, &h->ad);
1019 spin_unlock_bh(&set->lock);
1024 if (net_ratelimit())
1025 pr_warn("Set %s is full, maxelem %u reached\n",
1026 set->name, maxelem);
1027 ret = -IPSET_ERR_HASH_FULL;
1029 spin_unlock_bh(&t->hregion[r].lock);
1031 if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
1032 pr_debug("Table destroy after resize by add: %p\n", t);
1033 mtype_ahash_destroy(set, t, false);
1038 /* Delete an element from the hash and free up space if possible.
1041 mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
1042 struct ip_set_ext *mext, u32 flags)
1044 struct htype *h = set->data;
1046 const struct mtype_elem *d = value;
1047 struct mtype_elem *data;
1049 struct mtype_resize_ad *x = NULL;
1050 int i, j, k, r, ret = -IPSET_ERR_EXIST;
1052 size_t dsize = set->dsize;
1054 /* Userspace add and resize is excluded by the mutex.
1055 * Kernespace add does not trigger resize.
1058 t = rcu_dereference_bh(h->table);
1059 key = HKEY(value, h->initval, t->htable_bits);
1060 r = ahash_region(key, t->htable_bits);
1061 atomic_inc(&t->uref);
1062 rcu_read_unlock_bh();
1064 spin_lock_bh(&t->hregion[r].lock);
1065 n = rcu_dereference_bh(hbucket(t, key));
1068 for (i = 0, k = 0; i < n->pos; i++) {
1069 if (!test_bit(i, n->used)) {
1073 data = ahash_data(n, i, dsize);
1074 if (!mtype_data_equal(data, d, &multi))
1076 if (SET_ELEM_EXPIRED(set, data))
1080 clear_bit(i, n->used);
1081 smp_mb__after_atomic();
1082 if (i + 1 == n->pos)
1084 t->hregion[r].elements--;
1085 #ifdef IP_SET_HASH_WITH_NETS
1086 for (j = 0; j < IPSET_NET_COUNT; j++)
1087 mtype_del_cidr(set, h,
1088 NCIDR_PUT(DCIDR_GET(d->cidr, j)), j);
1090 ip_set_ext_destroy(set, data);
1092 if (atomic_read(&t->ref) && ext->target) {
1093 /* Resize is in process and kernel side del,
1096 x = kzalloc(sizeof(struct mtype_resize_ad),
1100 memcpy(&x->d, value,
1101 sizeof(struct mtype_elem));
1105 for (; i < n->pos; i++) {
1106 if (!test_bit(i, n->used))
1109 if (n->pos == 0 && k == 0) {
1110 t->hregion[r].ext_size -= ext_size(n->size, dsize);
1111 rcu_assign_pointer(hbucket(t, key), NULL);
1113 } else if (k >= AHASH_INIT_SIZE) {
1114 struct hbucket *tmp = kzalloc(sizeof(*tmp) +
1115 (n->size - AHASH_INIT_SIZE) * dsize,
1119 tmp->size = n->size - AHASH_INIT_SIZE;
1120 for (j = 0, k = 0; j < n->pos; j++) {
1121 if (!test_bit(j, n->used))
1123 data = ahash_data(n, j, dsize);
1124 memcpy(tmp->value + k * dsize, data, dsize);
1125 set_bit(k, tmp->used);
1129 t->hregion[r].ext_size -=
1130 ext_size(AHASH_INIT_SIZE, dsize);
1131 rcu_assign_pointer(hbucket(t, key), tmp);
1138 spin_unlock_bh(&t->hregion[r].lock);
1140 spin_lock_bh(&set->lock);
1141 list_add(&x->list, &h->ad);
1142 spin_unlock_bh(&set->lock);
1144 if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
1145 pr_debug("Table destroy after resize by del: %p\n", t);
1146 mtype_ahash_destroy(set, t, false);
1152 mtype_data_match(struct mtype_elem *data, const struct ip_set_ext *ext,
1153 struct ip_set_ext *mext, struct ip_set *set, u32 flags)
1155 if (!ip_set_match_extensions(set, ext, mext, flags, data))
1157 /* nomatch entries return -ENOTEMPTY */
1158 return mtype_do_data_match(data);
1161 #ifdef IP_SET_HASH_WITH_NETS
1162 /* Special test function which takes into account the different network
1163 * sizes added to the set
1166 mtype_test_cidrs(struct ip_set *set, struct mtype_elem *d,
1167 const struct ip_set_ext *ext,
1168 struct ip_set_ext *mext, u32 flags)
1170 struct htype *h = set->data;
1171 struct htable *t = rcu_dereference_bh(h->table);
1173 struct mtype_elem *data;
1174 #if IPSET_NET_COUNT == 2
1175 struct mtype_elem orig = *d;
1176 int ret, i, j = 0, k;
1182 pr_debug("test by nets\n");
1183 for (; j < NLEN && h->nets[j].cidr[0] && !multi; j++) {
1184 #if IPSET_NET_COUNT == 2
1185 mtype_data_reset_elem(d, &orig);
1186 mtype_data_netmask(d, NCIDR_GET(h->nets[j].cidr[0]), false);
1187 for (k = 0; k < NLEN && h->nets[k].cidr[1] && !multi;
1189 mtype_data_netmask(d, NCIDR_GET(h->nets[k].cidr[1]),
1192 mtype_data_netmask(d, NCIDR_GET(h->nets[j].cidr[0]));
1194 key = HKEY(d, h->initval, t->htable_bits);
1195 n = rcu_dereference_bh(hbucket(t, key));
1198 for (i = 0; i < n->pos; i++) {
1199 if (!test_bit(i, n->used))
1201 data = ahash_data(n, i, set->dsize);
1202 if (!mtype_data_equal(data, d, &multi))
1204 ret = mtype_data_match(data, ext, mext, set, flags);
1207 #ifdef IP_SET_HASH_WITH_MULTI
1208 /* No match, reset multiple match flag */
1212 #if IPSET_NET_COUNT == 2
1220 /* Test whether the element is added to the set */
1222 mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
1223 struct ip_set_ext *mext, u32 flags)
1225 struct htype *h = set->data;
1227 struct mtype_elem *d = value;
1229 struct mtype_elem *data;
1234 t = rcu_dereference_bh(h->table);
1235 #ifdef IP_SET_HASH_WITH_NETS
1236 /* If we test an IP address and not a network address,
1237 * try all possible network sizes
1239 for (i = 0; i < IPSET_NET_COUNT; i++)
1240 if (DCIDR_GET(d->cidr, i) != HOST_MASK)
1242 if (i == IPSET_NET_COUNT) {
1243 ret = mtype_test_cidrs(set, d, ext, mext, flags);
1248 key = HKEY(d, h->initval, t->htable_bits);
1249 n = rcu_dereference_bh(hbucket(t, key));
1254 for (i = 0; i < n->pos; i++) {
1255 if (!test_bit(i, n->used))
1257 data = ahash_data(n, i, set->dsize);
1258 if (!mtype_data_equal(data, d, &multi))
1260 ret = mtype_data_match(data, ext, mext, set, flags);
1265 rcu_read_unlock_bh();
1269 /* Reply a HEADER request: fill out the header part of the set */
1271 mtype_head(struct ip_set *set, struct sk_buff *skb)
1273 struct htype *h = set->data;
1274 const struct htable *t;
1275 struct nlattr *nested;
1278 size_t ext_size = 0;
1282 t = rcu_dereference_bh(h->table);
1283 mtype_ext_size(set, &elements, &ext_size);
1284 memsize = mtype_ahash_memsize(h, t) + ext_size + set->ext_size;
1285 htable_bits = t->htable_bits;
1286 rcu_read_unlock_bh();
1288 nested = nla_nest_start(skb, IPSET_ATTR_DATA);
1290 goto nla_put_failure;
1291 if (nla_put_net32(skb, IPSET_ATTR_HASHSIZE,
1292 htonl(jhash_size(htable_bits))) ||
1293 nla_put_net32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem)))
1294 goto nla_put_failure;
1295 #ifdef IP_SET_HASH_WITH_NETMASK
1296 if (h->netmask != HOST_MASK &&
1297 nla_put_u8(skb, IPSET_ATTR_NETMASK, h->netmask))
1298 goto nla_put_failure;
1300 #ifdef IP_SET_HASH_WITH_MARKMASK
1301 if (nla_put_u32(skb, IPSET_ATTR_MARKMASK, h->markmask))
1302 goto nla_put_failure;
1304 if (set->flags & IPSET_CREATE_FLAG_BUCKETSIZE) {
1305 if (nla_put_u8(skb, IPSET_ATTR_BUCKETSIZE, h->bucketsize) ||
1306 nla_put_net32(skb, IPSET_ATTR_INITVAL, htonl(h->initval)))
1307 goto nla_put_failure;
1309 if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
1310 nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) ||
1311 nla_put_net32(skb, IPSET_ATTR_ELEMENTS, htonl(elements)))
1312 goto nla_put_failure;
1313 if (unlikely(ip_set_put_flags(skb, set)))
1314 goto nla_put_failure;
1315 nla_nest_end(skb, nested);
1322 /* Make possible to run dumping parallel with resizing */
1324 mtype_uref(struct ip_set *set, struct netlink_callback *cb, bool start)
1326 struct htype *h = set->data;
1331 t = ipset_dereference_bh_nfnl(h->table);
1332 atomic_inc(&t->uref);
1333 cb->args[IPSET_CB_PRIVATE] = (unsigned long)t;
1334 rcu_read_unlock_bh();
1335 } else if (cb->args[IPSET_CB_PRIVATE]) {
1336 t = (struct htable *)cb->args[IPSET_CB_PRIVATE];
1337 if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
1338 pr_debug("Table destroy after resize "
1339 " by dump: %p\n", t);
1340 mtype_ahash_destroy(set, t, false);
1342 cb->args[IPSET_CB_PRIVATE] = 0;
1346 /* Reply a LIST/SAVE request: dump the elements of the specified set */
1348 mtype_list(const struct ip_set *set,
1349 struct sk_buff *skb, struct netlink_callback *cb)
1351 const struct htable *t;
1352 struct nlattr *atd, *nested;
1353 const struct hbucket *n;
1354 const struct mtype_elem *e;
1355 u32 first = cb->args[IPSET_CB_ARG0];
1356 /* We assume that one hash bucket fills into one page */
1360 atd = nla_nest_start(skb, IPSET_ATTR_ADT);
1364 pr_debug("list hash set %s\n", set->name);
1365 t = (const struct htable *)cb->args[IPSET_CB_PRIVATE];
1366 /* Expire may replace a hbucket with another one */
1368 for (; cb->args[IPSET_CB_ARG0] < jhash_size(t->htable_bits);
1369 cb->args[IPSET_CB_ARG0]++) {
1371 incomplete = skb_tail_pointer(skb);
1372 n = rcu_dereference(hbucket(t, cb->args[IPSET_CB_ARG0]));
1373 pr_debug("cb->arg bucket: %lu, t %p n %p\n",
1374 cb->args[IPSET_CB_ARG0], t, n);
1377 for (i = 0; i < n->pos; i++) {
1378 if (!test_bit(i, n->used))
1380 e = ahash_data(n, i, set->dsize);
1381 if (SET_ELEM_EXPIRED(set, e))
1383 pr_debug("list hash %lu hbucket %p i %u, data %p\n",
1384 cb->args[IPSET_CB_ARG0], n, i, e);
1385 nested = nla_nest_start(skb, IPSET_ATTR_DATA);
1387 if (cb->args[IPSET_CB_ARG0] == first) {
1388 nla_nest_cancel(skb, atd);
1392 goto nla_put_failure;
1394 if (mtype_data_list(skb, e))
1395 goto nla_put_failure;
1396 if (ip_set_put_extensions(skb, set, e, true))
1397 goto nla_put_failure;
1398 nla_nest_end(skb, nested);
1401 nla_nest_end(skb, atd);
1402 /* Set listing finished */
1403 cb->args[IPSET_CB_ARG0] = 0;
1408 nlmsg_trim(skb, incomplete);
1409 if (unlikely(first == cb->args[IPSET_CB_ARG0])) {
1410 pr_warn("Can't list set %s: one bucket does not fit into a message. Please report it!\n",
1412 cb->args[IPSET_CB_ARG0] = 0;
1415 nla_nest_end(skb, atd);
1423 IPSET_TOKEN(MTYPE, _kadt)(struct ip_set *set, const struct sk_buff *skb,
1424 const struct xt_action_param *par,
1425 enum ipset_adt adt, struct ip_set_adt_opt *opt);
1428 IPSET_TOKEN(MTYPE, _uadt)(struct ip_set *set, struct nlattr *tb[],
1429 enum ipset_adt adt, u32 *lineno, u32 flags,
1432 static const struct ip_set_type_variant mtype_variant = {
1436 [IPSET_ADD] = mtype_add,
1437 [IPSET_DEL] = mtype_del,
1438 [IPSET_TEST] = mtype_test,
1440 .destroy = mtype_destroy,
1441 .flush = mtype_flush,
1445 .resize = mtype_resize,
1446 .same_set = mtype_same_set,
1447 .region_lock = true,
1450 #ifdef IP_SET_EMIT_CREATE
1452 IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
1453 struct nlattr *tb[], u32 flags)
1455 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
1456 #ifdef IP_SET_HASH_WITH_MARKMASK
1460 #ifdef IP_SET_HASH_WITH_NETMASK
1468 pr_debug("Create set %s with family %s\n",
1469 set->name, set->family == NFPROTO_IPV4 ? "inet" : "inet6");
1471 #ifdef IP_SET_PROTO_UNDEF
1472 if (set->family != NFPROTO_UNSPEC)
1473 return -IPSET_ERR_INVALID_FAMILY;
1475 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
1476 return -IPSET_ERR_INVALID_FAMILY;
1479 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
1480 !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
1481 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
1482 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
1483 return -IPSET_ERR_PROTOCOL;
1485 #ifdef IP_SET_HASH_WITH_MARKMASK
1486 /* Separated condition in order to avoid directive in argument list */
1487 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_MARKMASK)))
1488 return -IPSET_ERR_PROTOCOL;
1490 markmask = 0xffffffff;
1491 if (tb[IPSET_ATTR_MARKMASK]) {
1492 markmask = ntohl(nla_get_be32(tb[IPSET_ATTR_MARKMASK]));
1494 return -IPSET_ERR_INVALID_MARKMASK;
1498 #ifdef IP_SET_HASH_WITH_NETMASK
1499 netmask = set->family == NFPROTO_IPV4 ? 32 : 128;
1500 if (tb[IPSET_ATTR_NETMASK]) {
1501 netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
1503 if ((set->family == NFPROTO_IPV4 && netmask > 32) ||
1504 (set->family == NFPROTO_IPV6 && netmask > 128) ||
1506 return -IPSET_ERR_INVALID_NETMASK;
1510 if (tb[IPSET_ATTR_HASHSIZE]) {
1511 hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
1512 if (hashsize < IPSET_MIMINAL_HASHSIZE)
1513 hashsize = IPSET_MIMINAL_HASHSIZE;
1516 if (tb[IPSET_ATTR_MAXELEM])
1517 maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
1520 h = kzalloc(hsize, GFP_KERNEL);
1524 hbits = htable_bits(hashsize);
1525 hsize = htable_size(hbits);
1530 t = ip_set_alloc(hsize);
1535 t->hregion = ip_set_alloc(ahash_sizeof_regions(hbits));
1542 for (i = 0; i < ahash_numof_locks(hbits); i++)
1543 spin_lock_init(&t->hregion[i].lock);
1544 h->maxelem = maxelem;
1545 #ifdef IP_SET_HASH_WITH_NETMASK
1546 h->netmask = netmask;
1548 #ifdef IP_SET_HASH_WITH_MARKMASK
1549 h->markmask = markmask;
1551 if (tb[IPSET_ATTR_INITVAL])
1552 h->initval = ntohl(nla_get_be32(tb[IPSET_ATTR_INITVAL]));
1554 get_random_bytes(&h->initval, sizeof(h->initval));
1555 h->bucketsize = AHASH_MAX_SIZE;
1556 if (tb[IPSET_ATTR_BUCKETSIZE]) {
1557 h->bucketsize = nla_get_u8(tb[IPSET_ATTR_BUCKETSIZE]);
1558 if (h->bucketsize < AHASH_INIT_SIZE)
1559 h->bucketsize = AHASH_INIT_SIZE;
1560 else if (h->bucketsize > AHASH_MAX_SIZE)
1561 h->bucketsize = AHASH_MAX_SIZE;
1562 else if (h->bucketsize % 2)
1565 t->htable_bits = hbits;
1566 t->maxelem = h->maxelem / ahash_numof_locks(hbits);
1567 RCU_INIT_POINTER(h->table, t);
1569 INIT_LIST_HEAD(&h->ad);
1571 #ifndef IP_SET_PROTO_UNDEF
1572 if (set->family == NFPROTO_IPV4) {
1574 set->variant = &IPSET_TOKEN(HTYPE, 4_variant);
1575 set->dsize = ip_set_elem_len(set, tb,
1576 sizeof(struct IPSET_TOKEN(HTYPE, 4_elem)),
1577 __alignof__(struct IPSET_TOKEN(HTYPE, 4_elem)));
1578 #ifndef IP_SET_PROTO_UNDEF
1580 set->variant = &IPSET_TOKEN(HTYPE, 6_variant);
1581 set->dsize = ip_set_elem_len(set, tb,
1582 sizeof(struct IPSET_TOKEN(HTYPE, 6_elem)),
1583 __alignof__(struct IPSET_TOKEN(HTYPE, 6_elem)));
1586 set->timeout = IPSET_NO_TIMEOUT;
1587 if (tb[IPSET_ATTR_TIMEOUT]) {
1588 set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
1589 #ifndef IP_SET_PROTO_UNDEF
1590 if (set->family == NFPROTO_IPV4)
1592 IPSET_TOKEN(HTYPE, 4_gc_init)(&h->gc);
1593 #ifndef IP_SET_PROTO_UNDEF
1595 IPSET_TOKEN(HTYPE, 6_gc_init)(&h->gc);
1598 pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
1599 set->name, jhash_size(t->htable_bits),
1600 t->htable_bits, h->maxelem, set->data, t);
1604 #endif /* IP_SET_EMIT_CREATE */