1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
5 * Development of this code funded by Astaro AG (http://www.astaro.com/)
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/rbtree.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
21 seqcount_rwlock_t count;
22 struct delayed_work gc_work;
25 struct nft_rbtree_elem {
27 struct nft_set_ext ext;
30 static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe)
32 return nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) &&
33 (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END);
36 static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe)
38 return !nft_rbtree_interval_end(rbe);
41 static bool nft_rbtree_equal(const struct nft_set *set, const void *this,
42 const struct nft_rbtree_elem *interval)
44 return memcmp(this, nft_set_ext_key(&interval->ext), set->klen) == 0;
47 static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
48 const u32 *key, const struct nft_set_ext **ext,
51 struct nft_rbtree *priv = nft_set_priv(set);
52 const struct nft_rbtree_elem *rbe, *interval = NULL;
53 u8 genmask = nft_genmask_cur(net);
54 const struct rb_node *parent;
58 parent = rcu_dereference_raw(priv->root.rb_node);
59 while (parent != NULL) {
60 if (read_seqcount_retry(&priv->count, seq))
63 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
65 this = nft_set_ext_key(&rbe->ext);
66 d = memcmp(this, key, set->klen);
68 parent = rcu_dereference_raw(parent->rb_left);
70 nft_rbtree_equal(set, this, interval) &&
71 nft_rbtree_interval_end(rbe) &&
72 nft_rbtree_interval_start(interval))
76 parent = rcu_dereference_raw(parent->rb_right);
78 if (!nft_set_elem_active(&rbe->ext, genmask)) {
79 parent = rcu_dereference_raw(parent->rb_left);
83 if (nft_set_elem_expired(&rbe->ext))
86 if (nft_rbtree_interval_end(rbe)) {
87 if (nft_set_is_anonymous(set))
89 parent = rcu_dereference_raw(parent->rb_left);
99 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
100 nft_set_elem_active(&interval->ext, genmask) &&
101 !nft_set_elem_expired(&interval->ext) &&
102 nft_rbtree_interval_start(interval)) {
103 *ext = &interval->ext;
110 static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
111 const u32 *key, const struct nft_set_ext **ext)
113 struct nft_rbtree *priv = nft_set_priv(set);
114 unsigned int seq = read_seqcount_begin(&priv->count);
117 ret = __nft_rbtree_lookup(net, set, key, ext, seq);
118 if (ret || !read_seqcount_retry(&priv->count, seq))
121 read_lock_bh(&priv->lock);
122 seq = read_seqcount_begin(&priv->count);
123 ret = __nft_rbtree_lookup(net, set, key, ext, seq);
124 read_unlock_bh(&priv->lock);
129 static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
130 const u32 *key, struct nft_rbtree_elem **elem,
131 unsigned int seq, unsigned int flags, u8 genmask)
133 struct nft_rbtree_elem *rbe, *interval = NULL;
134 struct nft_rbtree *priv = nft_set_priv(set);
135 const struct rb_node *parent;
139 parent = rcu_dereference_raw(priv->root.rb_node);
140 while (parent != NULL) {
141 if (read_seqcount_retry(&priv->count, seq))
144 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
146 this = nft_set_ext_key(&rbe->ext);
147 d = memcmp(this, key, set->klen);
149 parent = rcu_dereference_raw(parent->rb_left);
150 if (!(flags & NFT_SET_ELEM_INTERVAL_END))
153 parent = rcu_dereference_raw(parent->rb_right);
154 if (flags & NFT_SET_ELEM_INTERVAL_END)
157 if (!nft_set_elem_active(&rbe->ext, genmask)) {
158 parent = rcu_dereference_raw(parent->rb_left);
162 if (nft_set_elem_expired(&rbe->ext))
165 if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) ||
166 (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) ==
167 (flags & NFT_SET_ELEM_INTERVAL_END)) {
172 if (nft_rbtree_interval_end(rbe))
175 parent = rcu_dereference_raw(parent->rb_left);
179 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
180 nft_set_elem_active(&interval->ext, genmask) &&
181 !nft_set_elem_expired(&interval->ext) &&
182 ((!nft_rbtree_interval_end(interval) &&
183 !(flags & NFT_SET_ELEM_INTERVAL_END)) ||
184 (nft_rbtree_interval_end(interval) &&
185 (flags & NFT_SET_ELEM_INTERVAL_END)))) {
193 static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
194 const struct nft_set_elem *elem, unsigned int flags)
196 struct nft_rbtree *priv = nft_set_priv(set);
197 unsigned int seq = read_seqcount_begin(&priv->count);
198 struct nft_rbtree_elem *rbe = ERR_PTR(-ENOENT);
199 const u32 *key = (const u32 *)&elem->key.val;
200 u8 genmask = nft_genmask_cur(net);
203 ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
204 if (ret || !read_seqcount_retry(&priv->count, seq))
207 read_lock_bh(&priv->lock);
208 seq = read_seqcount_begin(&priv->count);
209 ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
211 rbe = ERR_PTR(-ENOENT);
212 read_unlock_bh(&priv->lock);
217 static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
218 struct nft_rbtree_elem *new,
219 struct nft_set_ext **ext)
221 bool overlap = false, dup_end_left = false, dup_end_right = false;
222 struct nft_rbtree *priv = nft_set_priv(set);
223 u8 genmask = nft_genmask_next(net);
224 struct nft_rbtree_elem *rbe;
225 struct rb_node *parent, **p;
228 /* Detect overlaps as we descend the tree. Set the flag in these cases:
230 * a1. _ _ __>| ?_ _ __| (insert end before existing end)
231 * a2. _ _ ___| ?_ _ _>| (insert end after existing end)
232 * a3. _ _ ___? >|_ _ __| (insert start before existing end)
234 * and clear it later on, as we eventually reach the points indicated by
235 * '?' above, in the cases described below. We'll always meet these
236 * later, locally, due to tree ordering, and overlaps for the intervals
237 * that are the closest together are always evaluated last.
239 * b1. _ _ __>| !_ _ __| (insert end before existing start)
240 * b2. _ _ ___| !_ _ _>| (insert end after existing start)
241 * b3. _ _ ___! >|_ _ __| (insert start after existing end, as a leaf)
242 * '--' no nodes falling in this range
243 * b4. >|_ _ ! (insert start before existing start)
245 * Case a3. resolves to b3.:
246 * - if the inserted start element is the leftmost, because the '0'
247 * element in the tree serves as end element
248 * - otherwise, if an existing end is found immediately to the left. If
249 * there are existing nodes in between, we need to further descend the
250 * tree before we can conclude the new start isn't causing an overlap
252 * or to b4., which, preceded by a3., means we already traversed one or
253 * more existing intervals entirely, from the right.
255 * For a new, rightmost pair of elements, we'll hit cases b3. and b2.,
258 * The flag is also cleared in two special cases:
260 * b5. |__ _ _!|<_ _ _ (insert start right before existing end)
261 * b6. |__ _ >|!__ _ _ (insert end right after existing start)
263 * which always happen as last step and imply that no further
264 * overlapping is possible.
266 * Another special case comes from the fact that start elements matching
267 * an already existing start element are allowed: insertion is not
268 * performed but we return -EEXIST in that case, and the error will be
269 * cleared by the caller if NLM_F_EXCL is not present in the request.
270 * This way, request for insertion of an exact overlap isn't reported as
271 * error to userspace if not desired.
273 * However, if the existing start matches a pre-existing start, but the
274 * end element doesn't match the corresponding pre-existing end element,
275 * we need to report a partial overlap. This is a local condition that
276 * can be noticed without need for a tracking flag, by checking for a
277 * local duplicated end for a corresponding start, from left and right,
282 p = &priv->root.rb_node;
285 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
286 d = memcmp(nft_set_ext_key(&rbe->ext),
287 nft_set_ext_key(&new->ext),
290 p = &parent->rb_left;
292 if (nft_rbtree_interval_start(new)) {
293 if (nft_rbtree_interval_end(rbe) &&
294 nft_set_elem_active(&rbe->ext, genmask) &&
295 !nft_set_elem_expired(&rbe->ext) && !*p)
298 if (dup_end_left && !*p)
301 overlap = nft_rbtree_interval_end(rbe) &&
302 nft_set_elem_active(&rbe->ext,
304 !nft_set_elem_expired(&rbe->ext);
307 dup_end_right = true;
312 p = &parent->rb_right;
314 if (nft_rbtree_interval_end(new)) {
315 if (dup_end_right && !*p)
318 overlap = nft_rbtree_interval_end(rbe) &&
319 nft_set_elem_active(&rbe->ext,
321 !nft_set_elem_expired(&rbe->ext);
327 } else if (nft_set_elem_active(&rbe->ext, genmask) &&
328 !nft_set_elem_expired(&rbe->ext)) {
329 overlap = nft_rbtree_interval_end(rbe);
332 if (nft_rbtree_interval_end(rbe) &&
333 nft_rbtree_interval_start(new)) {
334 p = &parent->rb_left;
336 if (nft_set_elem_active(&rbe->ext, genmask) &&
337 !nft_set_elem_expired(&rbe->ext))
339 } else if (nft_rbtree_interval_start(rbe) &&
340 nft_rbtree_interval_end(new)) {
341 p = &parent->rb_right;
343 if (nft_set_elem_active(&rbe->ext, genmask) &&
344 !nft_set_elem_expired(&rbe->ext))
346 } else if (nft_set_elem_active(&rbe->ext, genmask) &&
347 !nft_set_elem_expired(&rbe->ext)) {
351 p = &parent->rb_left;
355 dup_end_left = dup_end_right = false;
361 rb_link_node_rcu(&new->node, parent, p);
362 rb_insert_color(&new->node, &priv->root);
366 static int nft_rbtree_insert(const struct net *net, const struct nft_set *set,
367 const struct nft_set_elem *elem,
368 struct nft_set_ext **ext)
370 struct nft_rbtree *priv = nft_set_priv(set);
371 struct nft_rbtree_elem *rbe = elem->priv;
374 write_lock_bh(&priv->lock);
375 write_seqcount_begin(&priv->count);
376 err = __nft_rbtree_insert(net, set, rbe, ext);
377 write_seqcount_end(&priv->count);
378 write_unlock_bh(&priv->lock);
383 static void nft_rbtree_remove(const struct net *net,
384 const struct nft_set *set,
385 const struct nft_set_elem *elem)
387 struct nft_rbtree *priv = nft_set_priv(set);
388 struct nft_rbtree_elem *rbe = elem->priv;
390 write_lock_bh(&priv->lock);
391 write_seqcount_begin(&priv->count);
392 rb_erase(&rbe->node, &priv->root);
393 write_seqcount_end(&priv->count);
394 write_unlock_bh(&priv->lock);
397 static void nft_rbtree_activate(const struct net *net,
398 const struct nft_set *set,
399 const struct nft_set_elem *elem)
401 struct nft_rbtree_elem *rbe = elem->priv;
403 nft_set_elem_change_active(net, set, &rbe->ext);
404 nft_set_elem_clear_busy(&rbe->ext);
407 static bool nft_rbtree_flush(const struct net *net,
408 const struct nft_set *set, void *priv)
410 struct nft_rbtree_elem *rbe = priv;
412 if (!nft_set_elem_mark_busy(&rbe->ext) ||
413 !nft_is_active(net, &rbe->ext)) {
414 nft_set_elem_change_active(net, set, &rbe->ext);
420 static void *nft_rbtree_deactivate(const struct net *net,
421 const struct nft_set *set,
422 const struct nft_set_elem *elem)
424 const struct nft_rbtree *priv = nft_set_priv(set);
425 const struct rb_node *parent = priv->root.rb_node;
426 struct nft_rbtree_elem *rbe, *this = elem->priv;
427 u8 genmask = nft_genmask_next(net);
430 while (parent != NULL) {
431 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
433 d = memcmp(nft_set_ext_key(&rbe->ext), &elem->key.val,
436 parent = parent->rb_left;
438 parent = parent->rb_right;
440 if (nft_rbtree_interval_end(rbe) &&
441 nft_rbtree_interval_start(this)) {
442 parent = parent->rb_left;
444 } else if (nft_rbtree_interval_start(rbe) &&
445 nft_rbtree_interval_end(this)) {
446 parent = parent->rb_right;
448 } else if (!nft_set_elem_active(&rbe->ext, genmask)) {
449 parent = parent->rb_left;
452 nft_rbtree_flush(net, set, rbe);
459 static void nft_rbtree_walk(const struct nft_ctx *ctx,
461 struct nft_set_iter *iter)
463 struct nft_rbtree *priv = nft_set_priv(set);
464 struct nft_rbtree_elem *rbe;
465 struct nft_set_elem elem;
466 struct rb_node *node;
468 read_lock_bh(&priv->lock);
469 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
470 rbe = rb_entry(node, struct nft_rbtree_elem, node);
472 if (iter->count < iter->skip)
474 if (nft_set_elem_expired(&rbe->ext))
476 if (!nft_set_elem_active(&rbe->ext, iter->genmask))
481 iter->err = iter->fn(ctx, set, iter, &elem);
483 read_unlock_bh(&priv->lock);
489 read_unlock_bh(&priv->lock);
492 static void nft_rbtree_gc(struct work_struct *work)
494 struct nft_rbtree_elem *rbe, *rbe_end = NULL, *rbe_prev = NULL;
495 struct nft_set_gc_batch *gcb = NULL;
496 struct nft_rbtree *priv;
497 struct rb_node *node;
500 priv = container_of(work, struct nft_rbtree, gc_work.work);
501 set = nft_set_container_of(priv);
503 write_lock_bh(&priv->lock);
504 write_seqcount_begin(&priv->count);
505 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
506 rbe = rb_entry(node, struct nft_rbtree_elem, node);
508 if (nft_rbtree_interval_end(rbe)) {
512 if (!nft_set_elem_expired(&rbe->ext))
514 if (nft_set_elem_mark_busy(&rbe->ext))
518 rb_erase(&rbe_prev->node, &priv->root);
521 gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
525 atomic_dec(&set->nelems);
526 nft_set_gc_batch_add(gcb, rbe);
530 atomic_dec(&set->nelems);
531 nft_set_gc_batch_add(gcb, rbe_end);
532 rb_erase(&rbe_end->node, &priv->root);
535 node = rb_next(node);
540 rb_erase(&rbe_prev->node, &priv->root);
541 write_seqcount_end(&priv->count);
542 write_unlock_bh(&priv->lock);
544 nft_set_gc_batch_complete(gcb);
546 queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
547 nft_set_gc_interval(set));
550 static u64 nft_rbtree_privsize(const struct nlattr * const nla[],
551 const struct nft_set_desc *desc)
553 return sizeof(struct nft_rbtree);
556 static int nft_rbtree_init(const struct nft_set *set,
557 const struct nft_set_desc *desc,
558 const struct nlattr * const nla[])
560 struct nft_rbtree *priv = nft_set_priv(set);
562 rwlock_init(&priv->lock);
563 seqcount_rwlock_init(&priv->count, &priv->lock);
564 priv->root = RB_ROOT;
566 INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rbtree_gc);
567 if (set->flags & NFT_SET_TIMEOUT)
568 queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
569 nft_set_gc_interval(set));
574 static void nft_rbtree_destroy(const struct nft_set *set)
576 struct nft_rbtree *priv = nft_set_priv(set);
577 struct nft_rbtree_elem *rbe;
578 struct rb_node *node;
580 cancel_delayed_work_sync(&priv->gc_work);
582 while ((node = priv->root.rb_node) != NULL) {
583 rb_erase(node, &priv->root);
584 rbe = rb_entry(node, struct nft_rbtree_elem, node);
585 nft_set_elem_destroy(set, rbe, true);
589 static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
590 struct nft_set_estimate *est)
592 if (desc->field_count > 1)
596 est->size = sizeof(struct nft_rbtree) +
597 desc->size * sizeof(struct nft_rbtree_elem);
601 est->lookup = NFT_SET_CLASS_O_LOG_N;
602 est->space = NFT_SET_CLASS_O_N;
607 const struct nft_set_type nft_set_rbtree_type = {
608 .features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
610 .privsize = nft_rbtree_privsize,
611 .elemsize = offsetof(struct nft_rbtree_elem, ext),
612 .estimate = nft_rbtree_estimate,
613 .init = nft_rbtree_init,
614 .destroy = nft_rbtree_destroy,
615 .insert = nft_rbtree_insert,
616 .remove = nft_rbtree_remove,
617 .deactivate = nft_rbtree_deactivate,
618 .flush = nft_rbtree_flush,
619 .activate = nft_rbtree_activate,
620 .lookup = nft_rbtree_lookup,
621 .walk = nft_rbtree_walk,
622 .get = nft_rbtree_get,