1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
5 * Development of this code funded by Astaro AG (http://www.astaro.com/)
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/rbtree.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
21 seqcount_rwlock_t count;
22 struct delayed_work gc_work;
25 struct nft_rbtree_elem {
27 struct nft_set_ext ext;
30 static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe)
32 return nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) &&
33 (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END);
36 static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe)
38 return !nft_rbtree_interval_end(rbe);
41 static bool nft_rbtree_equal(const struct nft_set *set, const void *this,
42 const struct nft_rbtree_elem *interval)
44 return memcmp(this, nft_set_ext_key(&interval->ext), set->klen) == 0;
47 static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
48 const u32 *key, const struct nft_set_ext **ext,
51 struct nft_rbtree *priv = nft_set_priv(set);
52 const struct nft_rbtree_elem *rbe, *interval = NULL;
53 u8 genmask = nft_genmask_cur(net);
54 const struct rb_node *parent;
58 parent = rcu_dereference_raw(priv->root.rb_node);
59 while (parent != NULL) {
60 if (read_seqcount_retry(&priv->count, seq))
63 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
65 this = nft_set_ext_key(&rbe->ext);
66 d = memcmp(this, key, set->klen);
68 parent = rcu_dereference_raw(parent->rb_left);
70 nft_rbtree_equal(set, this, interval) &&
71 nft_rbtree_interval_end(rbe) &&
72 nft_rbtree_interval_start(interval))
76 parent = rcu_dereference_raw(parent->rb_right);
78 if (!nft_set_elem_active(&rbe->ext, genmask)) {
79 parent = rcu_dereference_raw(parent->rb_left);
83 if (nft_set_elem_expired(&rbe->ext))
86 if (nft_rbtree_interval_end(rbe)) {
87 if (nft_set_is_anonymous(set))
89 parent = rcu_dereference_raw(parent->rb_left);
99 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
100 nft_set_elem_active(&interval->ext, genmask) &&
101 !nft_set_elem_expired(&interval->ext) &&
102 nft_rbtree_interval_start(interval)) {
103 *ext = &interval->ext;
110 INDIRECT_CALLABLE_SCOPE
111 bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
112 const u32 *key, const struct nft_set_ext **ext)
114 struct nft_rbtree *priv = nft_set_priv(set);
115 unsigned int seq = read_seqcount_begin(&priv->count);
118 ret = __nft_rbtree_lookup(net, set, key, ext, seq);
119 if (ret || !read_seqcount_retry(&priv->count, seq))
122 read_lock_bh(&priv->lock);
123 seq = read_seqcount_begin(&priv->count);
124 ret = __nft_rbtree_lookup(net, set, key, ext, seq);
125 read_unlock_bh(&priv->lock);
130 static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
131 const u32 *key, struct nft_rbtree_elem **elem,
132 unsigned int seq, unsigned int flags, u8 genmask)
134 struct nft_rbtree_elem *rbe, *interval = NULL;
135 struct nft_rbtree *priv = nft_set_priv(set);
136 const struct rb_node *parent;
140 parent = rcu_dereference_raw(priv->root.rb_node);
141 while (parent != NULL) {
142 if (read_seqcount_retry(&priv->count, seq))
145 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
147 this = nft_set_ext_key(&rbe->ext);
148 d = memcmp(this, key, set->klen);
150 parent = rcu_dereference_raw(parent->rb_left);
151 if (!(flags & NFT_SET_ELEM_INTERVAL_END))
154 parent = rcu_dereference_raw(parent->rb_right);
155 if (flags & NFT_SET_ELEM_INTERVAL_END)
158 if (!nft_set_elem_active(&rbe->ext, genmask)) {
159 parent = rcu_dereference_raw(parent->rb_left);
163 if (nft_set_elem_expired(&rbe->ext))
166 if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) ||
167 (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) ==
168 (flags & NFT_SET_ELEM_INTERVAL_END)) {
173 if (nft_rbtree_interval_end(rbe))
176 parent = rcu_dereference_raw(parent->rb_left);
180 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
181 nft_set_elem_active(&interval->ext, genmask) &&
182 !nft_set_elem_expired(&interval->ext) &&
183 ((!nft_rbtree_interval_end(interval) &&
184 !(flags & NFT_SET_ELEM_INTERVAL_END)) ||
185 (nft_rbtree_interval_end(interval) &&
186 (flags & NFT_SET_ELEM_INTERVAL_END)))) {
194 static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
195 const struct nft_set_elem *elem, unsigned int flags)
197 struct nft_rbtree *priv = nft_set_priv(set);
198 unsigned int seq = read_seqcount_begin(&priv->count);
199 struct nft_rbtree_elem *rbe = ERR_PTR(-ENOENT);
200 const u32 *key = (const u32 *)&elem->key.val;
201 u8 genmask = nft_genmask_cur(net);
204 ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
205 if (ret || !read_seqcount_retry(&priv->count, seq))
208 read_lock_bh(&priv->lock);
209 seq = read_seqcount_begin(&priv->count);
210 ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
212 rbe = ERR_PTR(-ENOENT);
213 read_unlock_bh(&priv->lock);
218 static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
219 struct nft_rbtree_elem *new,
220 struct nft_set_ext **ext)
222 bool overlap = false, dup_end_left = false, dup_end_right = false;
223 struct nft_rbtree *priv = nft_set_priv(set);
224 u8 genmask = nft_genmask_next(net);
225 struct nft_rbtree_elem *rbe;
226 struct rb_node *parent, **p;
229 /* Detect overlaps as we descend the tree. Set the flag in these cases:
231 * a1. _ _ __>| ?_ _ __| (insert end before existing end)
232 * a2. _ _ ___| ?_ _ _>| (insert end after existing end)
233 * a3. _ _ ___? >|_ _ __| (insert start before existing end)
235 * and clear it later on, as we eventually reach the points indicated by
236 * '?' above, in the cases described below. We'll always meet these
237 * later, locally, due to tree ordering, and overlaps for the intervals
238 * that are the closest together are always evaluated last.
240 * b1. _ _ __>| !_ _ __| (insert end before existing start)
241 * b2. _ _ ___| !_ _ _>| (insert end after existing start)
242 * b3. _ _ ___! >|_ _ __| (insert start after existing end, as a leaf)
243 * '--' no nodes falling in this range
244 * b4. >|_ _ ! (insert start before existing start)
246 * Case a3. resolves to b3.:
247 * - if the inserted start element is the leftmost, because the '0'
248 * element in the tree serves as end element
249 * - otherwise, if an existing end is found immediately to the left. If
250 * there are existing nodes in between, we need to further descend the
251 * tree before we can conclude the new start isn't causing an overlap
253 * or to b4., which, preceded by a3., means we already traversed one or
254 * more existing intervals entirely, from the right.
256 * For a new, rightmost pair of elements, we'll hit cases b3. and b2.,
259 * The flag is also cleared in two special cases:
261 * b5. |__ _ _!|<_ _ _ (insert start right before existing end)
262 * b6. |__ _ >|!__ _ _ (insert end right after existing start)
264 * which always happen as last step and imply that no further
265 * overlapping is possible.
267 * Another special case comes from the fact that start elements matching
268 * an already existing start element are allowed: insertion is not
269 * performed but we return -EEXIST in that case, and the error will be
270 * cleared by the caller if NLM_F_EXCL is not present in the request.
271 * This way, request for insertion of an exact overlap isn't reported as
272 * error to userspace if not desired.
274 * However, if the existing start matches a pre-existing start, but the
275 * end element doesn't match the corresponding pre-existing end element,
276 * we need to report a partial overlap. This is a local condition that
277 * can be noticed without need for a tracking flag, by checking for a
278 * local duplicated end for a corresponding start, from left and right,
283 p = &priv->root.rb_node;
286 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
287 d = memcmp(nft_set_ext_key(&rbe->ext),
288 nft_set_ext_key(&new->ext),
291 p = &parent->rb_left;
293 if (nft_rbtree_interval_start(new)) {
294 if (nft_rbtree_interval_end(rbe) &&
295 nft_set_elem_active(&rbe->ext, genmask) &&
296 !nft_set_elem_expired(&rbe->ext) && !*p)
299 if (dup_end_left && !*p)
302 overlap = nft_rbtree_interval_end(rbe) &&
303 nft_set_elem_active(&rbe->ext,
305 !nft_set_elem_expired(&rbe->ext);
308 dup_end_right = true;
313 p = &parent->rb_right;
315 if (nft_rbtree_interval_end(new)) {
316 if (dup_end_right && !*p)
319 overlap = nft_rbtree_interval_end(rbe) &&
320 nft_set_elem_active(&rbe->ext,
322 !nft_set_elem_expired(&rbe->ext);
328 } else if (nft_set_elem_active(&rbe->ext, genmask) &&
329 !nft_set_elem_expired(&rbe->ext)) {
330 overlap = nft_rbtree_interval_end(rbe);
333 if (nft_rbtree_interval_end(rbe) &&
334 nft_rbtree_interval_start(new)) {
335 p = &parent->rb_left;
337 if (nft_set_elem_active(&rbe->ext, genmask) &&
338 !nft_set_elem_expired(&rbe->ext))
340 } else if (nft_rbtree_interval_start(rbe) &&
341 nft_rbtree_interval_end(new)) {
342 p = &parent->rb_right;
344 if (nft_set_elem_active(&rbe->ext, genmask) &&
345 !nft_set_elem_expired(&rbe->ext))
347 } else if (nft_set_elem_active(&rbe->ext, genmask) &&
348 !nft_set_elem_expired(&rbe->ext)) {
352 p = &parent->rb_left;
356 dup_end_left = dup_end_right = false;
362 rb_link_node_rcu(&new->node, parent, p);
363 rb_insert_color(&new->node, &priv->root);
367 static int nft_rbtree_insert(const struct net *net, const struct nft_set *set,
368 const struct nft_set_elem *elem,
369 struct nft_set_ext **ext)
371 struct nft_rbtree *priv = nft_set_priv(set);
372 struct nft_rbtree_elem *rbe = elem->priv;
375 write_lock_bh(&priv->lock);
376 write_seqcount_begin(&priv->count);
377 err = __nft_rbtree_insert(net, set, rbe, ext);
378 write_seqcount_end(&priv->count);
379 write_unlock_bh(&priv->lock);
384 static void nft_rbtree_remove(const struct net *net,
385 const struct nft_set *set,
386 const struct nft_set_elem *elem)
388 struct nft_rbtree *priv = nft_set_priv(set);
389 struct nft_rbtree_elem *rbe = elem->priv;
391 write_lock_bh(&priv->lock);
392 write_seqcount_begin(&priv->count);
393 rb_erase(&rbe->node, &priv->root);
394 write_seqcount_end(&priv->count);
395 write_unlock_bh(&priv->lock);
398 static void nft_rbtree_activate(const struct net *net,
399 const struct nft_set *set,
400 const struct nft_set_elem *elem)
402 struct nft_rbtree_elem *rbe = elem->priv;
404 nft_set_elem_change_active(net, set, &rbe->ext);
405 nft_set_elem_clear_busy(&rbe->ext);
408 static bool nft_rbtree_flush(const struct net *net,
409 const struct nft_set *set, void *priv)
411 struct nft_rbtree_elem *rbe = priv;
413 if (!nft_set_elem_mark_busy(&rbe->ext) ||
414 !nft_is_active(net, &rbe->ext)) {
415 nft_set_elem_change_active(net, set, &rbe->ext);
421 static void *nft_rbtree_deactivate(const struct net *net,
422 const struct nft_set *set,
423 const struct nft_set_elem *elem)
425 const struct nft_rbtree *priv = nft_set_priv(set);
426 const struct rb_node *parent = priv->root.rb_node;
427 struct nft_rbtree_elem *rbe, *this = elem->priv;
428 u8 genmask = nft_genmask_next(net);
431 while (parent != NULL) {
432 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
434 d = memcmp(nft_set_ext_key(&rbe->ext), &elem->key.val,
437 parent = parent->rb_left;
439 parent = parent->rb_right;
441 if (nft_rbtree_interval_end(rbe) &&
442 nft_rbtree_interval_start(this)) {
443 parent = parent->rb_left;
445 } else if (nft_rbtree_interval_start(rbe) &&
446 nft_rbtree_interval_end(this)) {
447 parent = parent->rb_right;
449 } else if (!nft_set_elem_active(&rbe->ext, genmask)) {
450 parent = parent->rb_left;
453 nft_rbtree_flush(net, set, rbe);
460 static void nft_rbtree_walk(const struct nft_ctx *ctx,
462 struct nft_set_iter *iter)
464 struct nft_rbtree *priv = nft_set_priv(set);
465 struct nft_rbtree_elem *rbe;
466 struct nft_set_elem elem;
467 struct rb_node *node;
469 read_lock_bh(&priv->lock);
470 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
471 rbe = rb_entry(node, struct nft_rbtree_elem, node);
473 if (iter->count < iter->skip)
475 if (nft_set_elem_expired(&rbe->ext))
477 if (!nft_set_elem_active(&rbe->ext, iter->genmask))
482 iter->err = iter->fn(ctx, set, iter, &elem);
484 read_unlock_bh(&priv->lock);
490 read_unlock_bh(&priv->lock);
493 static void nft_rbtree_gc(struct work_struct *work)
495 struct nft_rbtree_elem *rbe, *rbe_end = NULL, *rbe_prev = NULL;
496 struct nft_set_gc_batch *gcb = NULL;
497 struct nft_rbtree *priv;
498 struct rb_node *node;
501 priv = container_of(work, struct nft_rbtree, gc_work.work);
502 set = nft_set_container_of(priv);
504 write_lock_bh(&priv->lock);
505 write_seqcount_begin(&priv->count);
506 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
507 rbe = rb_entry(node, struct nft_rbtree_elem, node);
509 if (nft_rbtree_interval_end(rbe)) {
513 if (!nft_set_elem_expired(&rbe->ext))
515 if (nft_set_elem_mark_busy(&rbe->ext))
519 rb_erase(&rbe_prev->node, &priv->root);
522 gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
526 atomic_dec(&set->nelems);
527 nft_set_gc_batch_add(gcb, rbe);
531 atomic_dec(&set->nelems);
532 nft_set_gc_batch_add(gcb, rbe_end);
533 rb_erase(&rbe_end->node, &priv->root);
536 node = rb_next(node);
541 rb_erase(&rbe_prev->node, &priv->root);
542 write_seqcount_end(&priv->count);
543 write_unlock_bh(&priv->lock);
545 rbe = nft_set_catchall_gc(set);
547 gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
549 nft_set_gc_batch_add(gcb, rbe);
551 nft_set_gc_batch_complete(gcb);
553 queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
554 nft_set_gc_interval(set));
557 static u64 nft_rbtree_privsize(const struct nlattr * const nla[],
558 const struct nft_set_desc *desc)
560 return sizeof(struct nft_rbtree);
563 static int nft_rbtree_init(const struct nft_set *set,
564 const struct nft_set_desc *desc,
565 const struct nlattr * const nla[])
567 struct nft_rbtree *priv = nft_set_priv(set);
569 rwlock_init(&priv->lock);
570 seqcount_rwlock_init(&priv->count, &priv->lock);
571 priv->root = RB_ROOT;
573 INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rbtree_gc);
574 if (set->flags & NFT_SET_TIMEOUT)
575 queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
576 nft_set_gc_interval(set));
581 static void nft_rbtree_destroy(const struct nft_set *set)
583 struct nft_rbtree *priv = nft_set_priv(set);
584 struct nft_rbtree_elem *rbe;
585 struct rb_node *node;
587 cancel_delayed_work_sync(&priv->gc_work);
589 while ((node = priv->root.rb_node) != NULL) {
590 rb_erase(node, &priv->root);
591 rbe = rb_entry(node, struct nft_rbtree_elem, node);
592 nft_set_elem_destroy(set, rbe, true);
596 static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
597 struct nft_set_estimate *est)
599 if (desc->field_count > 1)
603 est->size = sizeof(struct nft_rbtree) +
604 desc->size * sizeof(struct nft_rbtree_elem);
608 est->lookup = NFT_SET_CLASS_O_LOG_N;
609 est->space = NFT_SET_CLASS_O_N;
614 const struct nft_set_type nft_set_rbtree_type = {
615 .features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
617 .privsize = nft_rbtree_privsize,
618 .elemsize = offsetof(struct nft_rbtree_elem, ext),
619 .estimate = nft_rbtree_estimate,
620 .init = nft_rbtree_init,
621 .destroy = nft_rbtree_destroy,
622 .insert = nft_rbtree_insert,
623 .remove = nft_rbtree_remove,
624 .deactivate = nft_rbtree_deactivate,
625 .flush = nft_rbtree_flush,
626 .activate = nft_rbtree_activate,
627 .lookup = nft_rbtree_lookup,
628 .walk = nft_rbtree_walk,
629 .get = nft_rbtree_get,