2 * Copyright (C) 2001 Momchil Velikov
3 * Portions Copyright (C) 2001 Christoph Hellwig
4 * Copyright (C) 2005 SGI, Christoph Lameter
5 * Copyright (C) 2006 Nick Piggin
6 * Copyright (C) 2012 Konstantin Khlebnikov
7 * Copyright (C) 2016 Intel, Matthew Wilcox
8 * Copyright (C) 2016 Intel, Ross Zwisler
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2, or (at
13 * your option) any later version.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/bitmap.h>
26 #include <linux/bitops.h>
27 #include <linux/bug.h>
28 #include <linux/cpu.h>
29 #include <linux/errno.h>
30 #include <linux/export.h>
31 #include <linux/idr.h>
32 #include <linux/init.h>
33 #include <linux/kernel.h>
34 #include <linux/kmemleak.h>
35 #include <linux/percpu.h>
36 #include <linux/preempt.h> /* in_interrupt() */
37 #include <linux/radix-tree.h>
38 #include <linux/rcupdate.h>
39 #include <linux/slab.h>
40 #include <linux/string.h>
41 #include <linux/xarray.h>
45 * Radix tree node cache.
47 struct kmem_cache *radix_tree_node_cachep;
50 * The radix tree is variable-height, so an insert operation not only has
51 * to build the branch to its corresponding item, it also has to build the
52 * branch to existing items if the size has to be increased (by
55 * The worst case is a zero height tree with just a single item at index 0,
56 * and then inserting an item at index ULONG_MAX. This requires 2 new branches
57 * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared.
60 #define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1)
63 * The IDR does not have to be as high as the radix tree since it uses
64 * signed integers, not unsigned longs.
66 #define IDR_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(int) - 1)
67 #define IDR_MAX_PATH (DIV_ROUND_UP(IDR_INDEX_BITS, \
68 RADIX_TREE_MAP_SHIFT))
69 #define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1)
72 * The IDA is even shorter since it uses a bitmap at the last level.
74 #define IDA_INDEX_BITS (8 * sizeof(int) - 1 - ilog2(IDA_BITMAP_BITS))
75 #define IDA_MAX_PATH (DIV_ROUND_UP(IDA_INDEX_BITS, \
76 RADIX_TREE_MAP_SHIFT))
77 #define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1)
80 * Per-cpu pool of preloaded nodes
82 struct radix_tree_preload {
84 /* nodes->parent points to next preallocated node */
85 struct radix_tree_node *nodes;
87 static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
89 static inline struct radix_tree_node *entry_to_node(void *ptr)
91 return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE);
94 static inline void *node_to_entry(void *ptr)
96 return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE);
99 #define RADIX_TREE_RETRY XA_RETRY_ENTRY
101 static inline unsigned long
102 get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot)
104 return parent ? slot - parent->slots : 0;
107 static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
108 struct radix_tree_node **nodep, unsigned long index)
110 unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK;
111 void __rcu **entry = rcu_dereference_raw(parent->slots[offset]);
113 if (xa_is_sibling(entry)) {
114 offset = xa_to_sibling(entry);
115 entry = rcu_dereference_raw(parent->slots[offset]);
118 *nodep = (void *)entry;
122 static inline gfp_t root_gfp_mask(const struct radix_tree_root *root)
124 return root->xa_flags & (__GFP_BITS_MASK & ~GFP_ZONEMASK);
127 static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
130 __set_bit(offset, node->tags[tag]);
133 static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
136 __clear_bit(offset, node->tags[tag]);
139 static inline int tag_get(const struct radix_tree_node *node, unsigned int tag,
142 return test_bit(offset, node->tags[tag]);
145 static inline void root_tag_set(struct radix_tree_root *root, unsigned tag)
147 root->xa_flags |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT));
150 static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag)
152 root->xa_flags &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT));
155 static inline void root_tag_clear_all(struct radix_tree_root *root)
157 root->xa_flags &= (__force gfp_t)((1 << ROOT_TAG_SHIFT) - 1);
160 static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag)
162 return (__force int)root->xa_flags & (1 << (tag + ROOT_TAG_SHIFT));
165 static inline unsigned root_tags_get(const struct radix_tree_root *root)
167 return (__force unsigned)root->xa_flags >> ROOT_TAG_SHIFT;
170 static inline bool is_idr(const struct radix_tree_root *root)
172 return !!(root->xa_flags & ROOT_IS_IDR);
176 * Returns 1 if any slot in the node has this tag set.
177 * Otherwise returns 0.
179 static inline int any_tag_set(const struct radix_tree_node *node,
183 for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
184 if (node->tags[tag][idx])
190 static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag)
192 bitmap_fill(node->tags[tag], RADIX_TREE_MAP_SIZE);
196 * radix_tree_find_next_bit - find the next set bit in a memory region
198 * @addr: The address to base the search on
199 * @size: The bitmap size in bits
200 * @offset: The bitnumber to start searching at
202 * Unrollable variant of find_next_bit() for constant size arrays.
203 * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero.
204 * Returns next bit offset, or size if nothing found.
206 static __always_inline unsigned long
207 radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag,
208 unsigned long offset)
210 const unsigned long *addr = node->tags[tag];
212 if (offset < RADIX_TREE_MAP_SIZE) {
215 addr += offset / BITS_PER_LONG;
216 tmp = *addr >> (offset % BITS_PER_LONG);
218 return __ffs(tmp) + offset;
219 offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1);
220 while (offset < RADIX_TREE_MAP_SIZE) {
223 return __ffs(tmp) + offset;
224 offset += BITS_PER_LONG;
227 return RADIX_TREE_MAP_SIZE;
230 static unsigned int iter_offset(const struct radix_tree_iter *iter)
232 return (iter->index >> iter_shift(iter)) & RADIX_TREE_MAP_MASK;
236 * The maximum index which can be stored in a radix tree
238 static inline unsigned long shift_maxindex(unsigned int shift)
240 return (RADIX_TREE_MAP_SIZE << shift) - 1;
243 static inline unsigned long node_maxindex(const struct radix_tree_node *node)
245 return shift_maxindex(node->shift);
248 static unsigned long next_index(unsigned long index,
249 const struct radix_tree_node *node,
250 unsigned long offset)
252 return (index & ~node_maxindex(node)) + (offset << node->shift);
256 * This assumes that the caller has performed appropriate preallocation, and
257 * that the caller has pinned this thread of control to the current CPU.
259 static struct radix_tree_node *
260 radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
261 struct radix_tree_root *root,
262 unsigned int shift, unsigned int offset,
263 unsigned int count, unsigned int nr_values)
265 struct radix_tree_node *ret = NULL;
268 * Preload code isn't irq safe and it doesn't make sense to use
269 * preloading during an interrupt anyway as all the allocations have
270 * to be atomic. So just do normal allocation when in interrupt.
272 if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) {
273 struct radix_tree_preload *rtp;
276 * Even if the caller has preloaded, try to allocate from the
277 * cache first for the new node to get accounted to the memory
280 ret = kmem_cache_alloc(radix_tree_node_cachep,
281 gfp_mask | __GFP_NOWARN);
286 * Provided the caller has preloaded here, we will always
287 * succeed in getting a node here (and never reach
290 rtp = this_cpu_ptr(&radix_tree_preloads);
293 rtp->nodes = ret->parent;
297 * Update the allocation stack trace as this is more useful
300 kmemleak_update_trace(ret);
303 ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
305 BUG_ON(radix_tree_is_internal_node(ret));
308 ret->offset = offset;
310 ret->nr_values = nr_values;
311 ret->parent = parent;
317 void radix_tree_node_rcu_free(struct rcu_head *head)
319 struct radix_tree_node *node =
320 container_of(head, struct radix_tree_node, rcu_head);
323 * Must only free zeroed nodes into the slab. We can be left with
324 * non-NULL entries by radix_tree_free_nodes, so clear the entries
327 memset(node->slots, 0, sizeof(node->slots));
328 memset(node->tags, 0, sizeof(node->tags));
329 INIT_LIST_HEAD(&node->private_list);
331 kmem_cache_free(radix_tree_node_cachep, node);
335 radix_tree_node_free(struct radix_tree_node *node)
337 call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
341 * Load up this CPU's radix_tree_node buffer with sufficient objects to
342 * ensure that the addition of a single element in the tree cannot fail. On
343 * success, return zero, with preemption disabled. On error, return -ENOMEM
344 * with preemption not disabled.
346 * To make use of this facility, the radix tree must be initialised without
347 * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
349 static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
351 struct radix_tree_preload *rtp;
352 struct radix_tree_node *node;
356 * Nodes preloaded by one cgroup can be be used by another cgroup, so
357 * they should never be accounted to any particular memory cgroup.
359 gfp_mask &= ~__GFP_ACCOUNT;
362 rtp = this_cpu_ptr(&radix_tree_preloads);
363 while (rtp->nr < nr) {
365 node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
369 rtp = this_cpu_ptr(&radix_tree_preloads);
371 node->parent = rtp->nodes;
375 kmem_cache_free(radix_tree_node_cachep, node);
384 * Load up this CPU's radix_tree_node buffer with sufficient objects to
385 * ensure that the addition of a single element in the tree cannot fail. On
386 * success, return zero, with preemption disabled. On error, return -ENOMEM
387 * with preemption not disabled.
389 * To make use of this facility, the radix tree must be initialised without
390 * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
392 int radix_tree_preload(gfp_t gfp_mask)
394 /* Warn on non-sensical use... */
395 WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask));
396 return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
398 EXPORT_SYMBOL(radix_tree_preload);
401 * The same as above function, except we don't guarantee preloading happens.
402 * We do it, if we decide it helps. On success, return zero with preemption
403 * disabled. On error, return -ENOMEM with preemption not disabled.
405 int radix_tree_maybe_preload(gfp_t gfp_mask)
407 if (gfpflags_allow_blocking(gfp_mask))
408 return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
409 /* Preloading doesn't help anything with this gfp mask, skip it */
413 EXPORT_SYMBOL(radix_tree_maybe_preload);
415 static unsigned radix_tree_load_root(const struct radix_tree_root *root,
416 struct radix_tree_node **nodep, unsigned long *maxindex)
418 struct radix_tree_node *node = rcu_dereference_raw(root->xa_head);
422 if (likely(radix_tree_is_internal_node(node))) {
423 node = entry_to_node(node);
424 *maxindex = node_maxindex(node);
425 return node->shift + RADIX_TREE_MAP_SHIFT;
433 * Extend a radix tree so it can store key @index.
435 static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp,
436 unsigned long index, unsigned int shift)
439 unsigned int maxshift;
442 /* Figure out what the shift should be. */
444 while (index > shift_maxindex(maxshift))
445 maxshift += RADIX_TREE_MAP_SHIFT;
447 entry = rcu_dereference_raw(root->xa_head);
448 if (!entry && (!is_idr(root) || root_tag_get(root, IDR_FREE)))
452 struct radix_tree_node *node = radix_tree_node_alloc(gfp, NULL,
453 root, shift, 0, 1, 0);
458 all_tag_set(node, IDR_FREE);
459 if (!root_tag_get(root, IDR_FREE)) {
460 tag_clear(node, IDR_FREE, 0);
461 root_tag_set(root, IDR_FREE);
464 /* Propagate the aggregated tag info to the new child */
465 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
466 if (root_tag_get(root, tag))
467 tag_set(node, tag, 0);
471 BUG_ON(shift > BITS_PER_LONG);
472 if (radix_tree_is_internal_node(entry)) {
473 entry_to_node(entry)->parent = node;
474 } else if (xa_is_value(entry)) {
475 /* Moving a value entry root->xa_head to a node */
479 * entry was already in the radix tree, so we do not need
480 * rcu_assign_pointer here
482 node->slots[0] = (void __rcu *)entry;
483 entry = node_to_entry(node);
484 rcu_assign_pointer(root->xa_head, entry);
485 shift += RADIX_TREE_MAP_SHIFT;
486 } while (shift <= maxshift);
488 return maxshift + RADIX_TREE_MAP_SHIFT;
492 * radix_tree_shrink - shrink radix tree to minimum height
493 * @root radix tree root
495 static inline bool radix_tree_shrink(struct radix_tree_root *root)
500 struct radix_tree_node *node = rcu_dereference_raw(root->xa_head);
501 struct radix_tree_node *child;
503 if (!radix_tree_is_internal_node(node))
505 node = entry_to_node(node);
508 * The candidate node has more than one child, or its child
509 * is not at the leftmost slot, or the child is a multiorder
510 * entry, we cannot shrink.
512 if (node->count != 1)
514 child = rcu_dereference_raw(node->slots[0]);
517 if (!radix_tree_is_internal_node(child) && node->shift)
521 * For an IDR, we must not shrink entry 0 into the root in
522 * case somebody calls idr_replace() with a pointer that
523 * appears to be an internal entry
525 if (!node->shift && is_idr(root))
528 if (radix_tree_is_internal_node(child))
529 entry_to_node(child)->parent = NULL;
532 * We don't need rcu_assign_pointer(), since we are simply
533 * moving the node from one part of the tree to another: if it
534 * was safe to dereference the old pointer to it
535 * (node->slots[0]), it will be safe to dereference the new
536 * one (root->xa_head) as far as dependent read barriers go.
538 root->xa_head = (void __rcu *)child;
539 if (is_idr(root) && !tag_get(node, IDR_FREE, 0))
540 root_tag_clear(root, IDR_FREE);
543 * We have a dilemma here. The node's slot[0] must not be
544 * NULLed in case there are concurrent lookups expecting to
545 * find the item. However if this was a bottom-level node,
546 * then it may be subject to the slot pointer being visible
547 * to callers dereferencing it. If item corresponding to
548 * slot[0] is subsequently deleted, these callers would expect
549 * their slot to become empty sooner or later.
551 * For example, lockless pagecache will look up a slot, deref
552 * the page pointer, and if the page has 0 refcount it means it
553 * was concurrently deleted from pagecache so try the deref
554 * again. Fortunately there is already a requirement for logic
555 * to retry the entire slot lookup -- the indirect pointer
556 * problem (replacing direct root node with an indirect pointer
557 * also results in a stale slot). So tag the slot as indirect
558 * to force callers to retry.
561 if (!radix_tree_is_internal_node(child)) {
562 node->slots[0] = (void __rcu *)RADIX_TREE_RETRY;
565 WARN_ON_ONCE(!list_empty(&node->private_list));
566 radix_tree_node_free(node);
573 static bool delete_node(struct radix_tree_root *root,
574 struct radix_tree_node *node)
576 bool deleted = false;
579 struct radix_tree_node *parent;
582 if (node_to_entry(node) ==
583 rcu_dereference_raw(root->xa_head))
584 deleted |= radix_tree_shrink(root);
588 parent = node->parent;
590 parent->slots[node->offset] = NULL;
594 * Shouldn't the tags already have all been cleared
598 root_tag_clear_all(root);
599 root->xa_head = NULL;
602 WARN_ON_ONCE(!list_empty(&node->private_list));
603 radix_tree_node_free(node);
613 * __radix_tree_create - create a slot in a radix tree
614 * @root: radix tree root
616 * @order: index occupies 2^order aligned slots
617 * @nodep: returns node
618 * @slotp: returns slot
620 * Create, if necessary, and return the node and slot for an item
621 * at position @index in the radix tree @root.
623 * Until there is more than one item in the tree, no nodes are
624 * allocated and @root->xa_head is used as a direct slot instead of
625 * pointing to a node, in which case *@nodep will be NULL.
627 * Returns -ENOMEM, or 0 for success.
629 static int __radix_tree_create(struct radix_tree_root *root,
630 unsigned long index, unsigned order,
631 struct radix_tree_node **nodep, void __rcu ***slotp)
633 struct radix_tree_node *node = NULL, *child;
634 void __rcu **slot = (void __rcu **)&root->xa_head;
635 unsigned long maxindex;
636 unsigned int shift, offset = 0;
637 unsigned long max = index | ((1UL << order) - 1);
638 gfp_t gfp = root_gfp_mask(root);
640 shift = radix_tree_load_root(root, &child, &maxindex);
642 /* Make sure the tree is high enough. */
643 if (order > 0 && max == ((1UL << order) - 1))
645 if (max > maxindex) {
646 int error = radix_tree_extend(root, gfp, max, shift);
650 child = rcu_dereference_raw(root->xa_head);
653 while (shift > order) {
654 shift -= RADIX_TREE_MAP_SHIFT;
656 /* Have to add a child node. */
657 child = radix_tree_node_alloc(gfp, node, root, shift,
661 rcu_assign_pointer(*slot, node_to_entry(child));
664 } else if (!radix_tree_is_internal_node(child))
667 /* Go a level down */
668 node = entry_to_node(child);
669 offset = radix_tree_descend(node, &child, index);
670 slot = &node->slots[offset];
681 * Free any nodes below this node. The tree is presumed to not need
682 * shrinking, and any user data in the tree is presumed to not need a
683 * destructor called on it. If we need to add a destructor, we can
684 * add that functionality later. Note that we may not clear tags or
685 * slots from the tree as an RCU walker may still have a pointer into
686 * this subtree. We could replace the entries with RADIX_TREE_RETRY,
687 * but we'll still have to clear those in rcu_free.
689 static void radix_tree_free_nodes(struct radix_tree_node *node)
692 struct radix_tree_node *child = entry_to_node(node);
695 void *entry = rcu_dereference_raw(child->slots[offset]);
696 if (xa_is_node(entry) && child->shift) {
697 child = entry_to_node(entry);
702 while (offset == RADIX_TREE_MAP_SIZE) {
703 struct radix_tree_node *old = child;
704 offset = child->offset + 1;
705 child = child->parent;
706 WARN_ON_ONCE(!list_empty(&old->private_list));
707 radix_tree_node_free(old);
708 if (old == entry_to_node(node))
714 #ifdef CONFIG_RADIX_TREE_MULTIORDER
715 static inline int insert_entries(struct radix_tree_node *node,
716 void __rcu **slot, void *item, unsigned order, bool replace)
719 unsigned i, n, tag, offset, tags = 0;
722 if (order > node->shift)
723 n = 1 << (order - node->shift);
726 offset = get_slot_offset(node, slot);
733 offset = offset & ~(n - 1);
734 slot = &node->slots[offset];
736 sibling = xa_mk_sibling(offset);
738 for (i = 0; i < n; i++) {
742 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
743 if (tag_get(node, tag, offset + i))
750 for (i = 0; i < n; i++) {
751 struct radix_tree_node *old = rcu_dereference_raw(slot[i]);
753 rcu_assign_pointer(slot[i], sibling);
754 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
755 if (tags & (1 << tag))
756 tag_clear(node, tag, offset + i);
758 rcu_assign_pointer(slot[i], item);
759 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
760 if (tags & (1 << tag))
761 tag_set(node, tag, offset);
764 radix_tree_free_nodes(old);
765 if (xa_is_value(old))
770 if (xa_is_value(item))
771 node->nr_values += n;
776 static inline int insert_entries(struct radix_tree_node *node,
777 void __rcu **slot, void *item, unsigned order, bool replace)
781 rcu_assign_pointer(*slot, item);
784 if (xa_is_value(item))
792 * __radix_tree_insert - insert into a radix tree
793 * @root: radix tree root
795 * @order: key covers the 2^order indices around index
796 * @item: item to insert
798 * Insert an item into the radix tree at position @index.
800 int __radix_tree_insert(struct radix_tree_root *root, unsigned long index,
801 unsigned order, void *item)
803 struct radix_tree_node *node;
807 BUG_ON(radix_tree_is_internal_node(item));
809 error = __radix_tree_create(root, index, order, &node, &slot);
813 error = insert_entries(node, slot, item, order, false);
818 unsigned offset = get_slot_offset(node, slot);
819 BUG_ON(tag_get(node, 0, offset));
820 BUG_ON(tag_get(node, 1, offset));
821 BUG_ON(tag_get(node, 2, offset));
823 BUG_ON(root_tags_get(root));
828 EXPORT_SYMBOL(__radix_tree_insert);
831 * __radix_tree_lookup - lookup an item in a radix tree
832 * @root: radix tree root
834 * @nodep: returns node
835 * @slotp: returns slot
837 * Lookup and return the item at position @index in the radix
840 * Until there is more than one item in the tree, no nodes are
841 * allocated and @root->xa_head is used as a direct slot instead of
842 * pointing to a node, in which case *@nodep will be NULL.
844 void *__radix_tree_lookup(const struct radix_tree_root *root,
845 unsigned long index, struct radix_tree_node **nodep,
848 struct radix_tree_node *node, *parent;
849 unsigned long maxindex;
854 slot = (void __rcu **)&root->xa_head;
855 radix_tree_load_root(root, &node, &maxindex);
856 if (index > maxindex)
859 while (radix_tree_is_internal_node(node)) {
862 if (node == RADIX_TREE_RETRY)
864 parent = entry_to_node(node);
865 offset = radix_tree_descend(parent, &node, index);
866 slot = parent->slots + offset;
867 if (parent->shift == 0)
879 * radix_tree_lookup_slot - lookup a slot in a radix tree
880 * @root: radix tree root
883 * Returns: the slot corresponding to the position @index in the
884 * radix tree @root. This is useful for update-if-exists operations.
886 * This function can be called under rcu_read_lock iff the slot is not
887 * modified by radix_tree_replace_slot, otherwise it must be called
888 * exclusive from other writers. Any dereference of the slot must be done
889 * using radix_tree_deref_slot.
891 void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *root,
896 if (!__radix_tree_lookup(root, index, NULL, &slot))
900 EXPORT_SYMBOL(radix_tree_lookup_slot);
903 * radix_tree_lookup - perform lookup operation on a radix tree
904 * @root: radix tree root
907 * Lookup the item at the position @index in the radix tree @root.
909 * This function can be called under rcu_read_lock, however the caller
910 * must manage lifetimes of leaf nodes (eg. RCU may also be used to free
911 * them safely). No RCU barriers are required to access or modify the
912 * returned item, however.
914 void *radix_tree_lookup(const struct radix_tree_root *root, unsigned long index)
916 return __radix_tree_lookup(root, index, NULL, NULL);
918 EXPORT_SYMBOL(radix_tree_lookup);
920 static inline void replace_sibling_entries(struct radix_tree_node *node,
921 void __rcu **slot, int count, int values)
923 #ifdef CONFIG_RADIX_TREE_MULTIORDER
924 unsigned offset = get_slot_offset(node, slot);
925 void *ptr = xa_mk_sibling(offset);
927 while (++offset < RADIX_TREE_MAP_SIZE) {
928 if (rcu_dereference_raw(node->slots[offset]) != ptr)
931 node->slots[offset] = NULL;
934 node->nr_values += values;
939 static void replace_slot(void __rcu **slot, void *item,
940 struct radix_tree_node *node, int count, int values)
942 if (node && (count || values)) {
943 node->count += count;
944 node->nr_values += values;
945 replace_sibling_entries(node, slot, count, values);
948 rcu_assign_pointer(*slot, item);
951 static bool node_tag_get(const struct radix_tree_root *root,
952 const struct radix_tree_node *node,
953 unsigned int tag, unsigned int offset)
956 return tag_get(node, tag, offset);
957 return root_tag_get(root, tag);
961 * IDR users want to be able to store NULL in the tree, so if the slot isn't
962 * free, don't adjust the count, even if it's transitioning between NULL and
963 * non-NULL. For the IDA, we mark slots as being IDR_FREE while they still
964 * have empty bits, but it only stores NULL in slots when they're being
967 static int calculate_count(struct radix_tree_root *root,
968 struct radix_tree_node *node, void __rcu **slot,
969 void *item, void *old)
972 unsigned offset = get_slot_offset(node, slot);
973 bool free = node_tag_get(root, node, IDR_FREE, offset);
979 return !!item - !!old;
983 * __radix_tree_replace - replace item in a slot
984 * @root: radix tree root
985 * @node: pointer to tree node
986 * @slot: pointer to slot in @node
987 * @item: new item to store in the slot.
989 * For use with __radix_tree_lookup(). Caller must hold tree write locked
990 * across slot lookup and replacement.
992 void __radix_tree_replace(struct radix_tree_root *root,
993 struct radix_tree_node *node,
994 void __rcu **slot, void *item)
996 void *old = rcu_dereference_raw(*slot);
997 int values = !!xa_is_value(item) - !!xa_is_value(old);
998 int count = calculate_count(root, node, slot, item, old);
1001 * This function supports replacing value entries and
1002 * deleting entries, but that needs accounting against the
1003 * node unless the slot is root->xa_head.
1005 WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->xa_head) &&
1007 replace_slot(slot, item, node, count, values);
1012 delete_node(root, node);
1016 * radix_tree_replace_slot - replace item in a slot
1017 * @root: radix tree root
1018 * @slot: pointer to slot
1019 * @item: new item to store in the slot.
1021 * For use with radix_tree_lookup_slot() and
1022 * radix_tree_gang_lookup_tag_slot(). Caller must hold tree write locked
1023 * across slot lookup and replacement.
1025 * NOTE: This cannot be used to switch between non-entries (empty slots),
1026 * regular entries, and value entries, as that requires accounting
1027 * inside the radix tree node. When switching from one type of entry or
1028 * deleting, use __radix_tree_lookup() and __radix_tree_replace() or
1029 * radix_tree_iter_replace().
1031 void radix_tree_replace_slot(struct radix_tree_root *root,
1032 void __rcu **slot, void *item)
1034 __radix_tree_replace(root, NULL, slot, item);
1036 EXPORT_SYMBOL(radix_tree_replace_slot);
1039 * radix_tree_iter_replace - replace item in a slot
1040 * @root: radix tree root
1041 * @slot: pointer to slot
1042 * @item: new item to store in the slot.
1044 * For use with radix_tree_for_each_slot().
1045 * Caller must hold tree write locked.
1047 void radix_tree_iter_replace(struct radix_tree_root *root,
1048 const struct radix_tree_iter *iter,
1049 void __rcu **slot, void *item)
1051 __radix_tree_replace(root, iter->node, slot, item);
1054 static void node_tag_set(struct radix_tree_root *root,
1055 struct radix_tree_node *node,
1056 unsigned int tag, unsigned int offset)
1059 if (tag_get(node, tag, offset))
1061 tag_set(node, tag, offset);
1062 offset = node->offset;
1063 node = node->parent;
1066 if (!root_tag_get(root, tag))
1067 root_tag_set(root, tag);
1071 * radix_tree_tag_set - set a tag on a radix tree node
1072 * @root: radix tree root
1076 * Set the search tag (which must be < RADIX_TREE_MAX_TAGS)
1077 * corresponding to @index in the radix tree. From
1078 * the root all the way down to the leaf node.
1080 * Returns the address of the tagged item. Setting a tag on a not-present
1083 void *radix_tree_tag_set(struct radix_tree_root *root,
1084 unsigned long index, unsigned int tag)
1086 struct radix_tree_node *node, *parent;
1087 unsigned long maxindex;
1089 radix_tree_load_root(root, &node, &maxindex);
1090 BUG_ON(index > maxindex);
1092 while (radix_tree_is_internal_node(node)) {
1095 parent = entry_to_node(node);
1096 offset = radix_tree_descend(parent, &node, index);
1099 if (!tag_get(parent, tag, offset))
1100 tag_set(parent, tag, offset);
1103 /* set the root's tag bit */
1104 if (!root_tag_get(root, tag))
1105 root_tag_set(root, tag);
1109 EXPORT_SYMBOL(radix_tree_tag_set);
1112 * radix_tree_iter_tag_set - set a tag on the current iterator entry
1113 * @root: radix tree root
1114 * @iter: iterator state
1117 void radix_tree_iter_tag_set(struct radix_tree_root *root,
1118 const struct radix_tree_iter *iter, unsigned int tag)
1120 node_tag_set(root, iter->node, tag, iter_offset(iter));
1123 static void node_tag_clear(struct radix_tree_root *root,
1124 struct radix_tree_node *node,
1125 unsigned int tag, unsigned int offset)
1128 if (!tag_get(node, tag, offset))
1130 tag_clear(node, tag, offset);
1131 if (any_tag_set(node, tag))
1134 offset = node->offset;
1135 node = node->parent;
1138 /* clear the root's tag bit */
1139 if (root_tag_get(root, tag))
1140 root_tag_clear(root, tag);
1144 * radix_tree_tag_clear - clear a tag on a radix tree node
1145 * @root: radix tree root
1149 * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS)
1150 * corresponding to @index in the radix tree. If this causes
1151 * the leaf node to have no tags set then clear the tag in the
1152 * next-to-leaf node, etc.
1154 * Returns the address of the tagged item on success, else NULL. ie:
1155 * has the same return value and semantics as radix_tree_lookup().
1157 void *radix_tree_tag_clear(struct radix_tree_root *root,
1158 unsigned long index, unsigned int tag)
1160 struct radix_tree_node *node, *parent;
1161 unsigned long maxindex;
1162 int uninitialized_var(offset);
1164 radix_tree_load_root(root, &node, &maxindex);
1165 if (index > maxindex)
1170 while (radix_tree_is_internal_node(node)) {
1171 parent = entry_to_node(node);
1172 offset = radix_tree_descend(parent, &node, index);
1176 node_tag_clear(root, parent, tag, offset);
1180 EXPORT_SYMBOL(radix_tree_tag_clear);
1183 * radix_tree_iter_tag_clear - clear a tag on the current iterator entry
1184 * @root: radix tree root
1185 * @iter: iterator state
1186 * @tag: tag to clear
1188 void radix_tree_iter_tag_clear(struct radix_tree_root *root,
1189 const struct radix_tree_iter *iter, unsigned int tag)
1191 node_tag_clear(root, iter->node, tag, iter_offset(iter));
1195 * radix_tree_tag_get - get a tag on a radix tree node
1196 * @root: radix tree root
1198 * @tag: tag index (< RADIX_TREE_MAX_TAGS)
1202 * 0: tag not present or not set
1205 * Note that the return value of this function may not be relied on, even if
1206 * the RCU lock is held, unless tag modification and node deletion are excluded
1209 int radix_tree_tag_get(const struct radix_tree_root *root,
1210 unsigned long index, unsigned int tag)
1212 struct radix_tree_node *node, *parent;
1213 unsigned long maxindex;
1215 if (!root_tag_get(root, tag))
1218 radix_tree_load_root(root, &node, &maxindex);
1219 if (index > maxindex)
1222 while (radix_tree_is_internal_node(node)) {
1225 parent = entry_to_node(node);
1226 offset = radix_tree_descend(parent, &node, index);
1228 if (!tag_get(parent, tag, offset))
1230 if (node == RADIX_TREE_RETRY)
1236 EXPORT_SYMBOL(radix_tree_tag_get);
1238 static inline void __set_iter_shift(struct radix_tree_iter *iter,
1241 #ifdef CONFIG_RADIX_TREE_MULTIORDER
1242 iter->shift = shift;
1246 /* Construct iter->tags bit-mask from node->tags[tag] array */
1247 static void set_iter_tags(struct radix_tree_iter *iter,
1248 struct radix_tree_node *node, unsigned offset,
1251 unsigned tag_long = offset / BITS_PER_LONG;
1252 unsigned tag_bit = offset % BITS_PER_LONG;
1259 iter->tags = node->tags[tag][tag_long] >> tag_bit;
1261 /* This never happens if RADIX_TREE_TAG_LONGS == 1 */
1262 if (tag_long < RADIX_TREE_TAG_LONGS - 1) {
1263 /* Pick tags from next element */
1265 iter->tags |= node->tags[tag][tag_long + 1] <<
1266 (BITS_PER_LONG - tag_bit);
1267 /* Clip chunk size, here only BITS_PER_LONG tags */
1268 iter->next_index = __radix_tree_iter_add(iter, BITS_PER_LONG);
1272 #ifdef CONFIG_RADIX_TREE_MULTIORDER
1273 static void __rcu **skip_siblings(struct radix_tree_node **nodep,
1274 void __rcu **slot, struct radix_tree_iter *iter)
1276 while (iter->index < iter->next_index) {
1277 *nodep = rcu_dereference_raw(*slot);
1278 if (*nodep && !xa_is_sibling(*nodep))
1281 iter->index = __radix_tree_iter_add(iter, 1);
1289 void __rcu **__radix_tree_next_slot(void __rcu **slot,
1290 struct radix_tree_iter *iter, unsigned flags)
1292 unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
1293 struct radix_tree_node *node;
1295 slot = skip_siblings(&node, slot, iter);
1297 while (radix_tree_is_internal_node(node)) {
1299 unsigned long next_index;
1301 if (node == RADIX_TREE_RETRY)
1303 node = entry_to_node(node);
1305 iter->shift = node->shift;
1307 if (flags & RADIX_TREE_ITER_TAGGED) {
1308 offset = radix_tree_find_next_bit(node, tag, 0);
1309 if (offset == RADIX_TREE_MAP_SIZE)
1311 slot = &node->slots[offset];
1312 iter->index = __radix_tree_iter_add(iter, offset);
1313 set_iter_tags(iter, node, offset, tag);
1314 node = rcu_dereference_raw(*slot);
1317 slot = &node->slots[0];
1319 node = rcu_dereference_raw(*slot);
1324 if (offset == RADIX_TREE_MAP_SIZE)
1327 iter->index = __radix_tree_iter_add(iter, offset);
1329 if ((flags & RADIX_TREE_ITER_CONTIG) && (offset > 0))
1331 next_index = (iter->index | shift_maxindex(iter->shift)) + 1;
1332 if (next_index < iter->next_index)
1333 iter->next_index = next_index;
1338 iter->next_index = 0;
1341 EXPORT_SYMBOL(__radix_tree_next_slot);
1343 static void __rcu **skip_siblings(struct radix_tree_node **nodep,
1344 void __rcu **slot, struct radix_tree_iter *iter)
1350 void __rcu **radix_tree_iter_resume(void __rcu **slot,
1351 struct radix_tree_iter *iter)
1353 struct radix_tree_node *node;
1356 iter->index = __radix_tree_iter_add(iter, 1);
1357 skip_siblings(&node, slot, iter);
1358 iter->next_index = iter->index;
1362 EXPORT_SYMBOL(radix_tree_iter_resume);
1365 * radix_tree_next_chunk - find next chunk of slots for iteration
1367 * @root: radix tree root
1368 * @iter: iterator state
1369 * @flags: RADIX_TREE_ITER_* flags and tag index
1370 * Returns: pointer to chunk first slot, or NULL if iteration is over
1372 void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root,
1373 struct radix_tree_iter *iter, unsigned flags)
1375 unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
1376 struct radix_tree_node *node, *child;
1377 unsigned long index, offset, maxindex;
1379 if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag))
1383 * Catch next_index overflow after ~0UL. iter->index never overflows
1384 * during iterating; it can be zero only at the beginning.
1385 * And we cannot overflow iter->next_index in a single step,
1386 * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG.
1388 * This condition also used by radix_tree_next_slot() to stop
1389 * contiguous iterating, and forbid switching to the next chunk.
1391 index = iter->next_index;
1392 if (!index && iter->index)
1396 radix_tree_load_root(root, &child, &maxindex);
1397 if (index > maxindex)
1402 if (!radix_tree_is_internal_node(child)) {
1403 /* Single-slot tree */
1404 iter->index = index;
1405 iter->next_index = maxindex + 1;
1408 __set_iter_shift(iter, 0);
1409 return (void __rcu **)&root->xa_head;
1413 node = entry_to_node(child);
1414 offset = radix_tree_descend(node, &child, index);
1416 if ((flags & RADIX_TREE_ITER_TAGGED) ?
1417 !tag_get(node, tag, offset) : !child) {
1419 if (flags & RADIX_TREE_ITER_CONTIG)
1422 if (flags & RADIX_TREE_ITER_TAGGED)
1423 offset = radix_tree_find_next_bit(node, tag,
1426 while (++offset < RADIX_TREE_MAP_SIZE) {
1427 void *slot = rcu_dereference_raw(
1428 node->slots[offset]);
1429 if (xa_is_sibling(slot))
1434 index &= ~node_maxindex(node);
1435 index += offset << node->shift;
1436 /* Overflow after ~0UL */
1439 if (offset == RADIX_TREE_MAP_SIZE)
1441 child = rcu_dereference_raw(node->slots[offset]);
1446 if (child == RADIX_TREE_RETRY)
1448 } while (node->shift && radix_tree_is_internal_node(child));
1450 /* Update the iterator state */
1451 iter->index = (index &~ node_maxindex(node)) | (offset << node->shift);
1452 iter->next_index = (index | node_maxindex(node)) + 1;
1454 __set_iter_shift(iter, node->shift);
1456 if (flags & RADIX_TREE_ITER_TAGGED)
1457 set_iter_tags(iter, node, offset, tag);
1459 return node->slots + offset;
1461 EXPORT_SYMBOL(radix_tree_next_chunk);
1464 * radix_tree_gang_lookup - perform multiple lookup on a radix tree
1465 * @root: radix tree root
1466 * @results: where the results of the lookup are placed
1467 * @first_index: start the lookup from this key
1468 * @max_items: place up to this many items at *results
1470 * Performs an index-ascending scan of the tree for present items. Places
1471 * them at *@results and returns the number of items which were placed at
1474 * The implementation is naive.
1476 * Like radix_tree_lookup, radix_tree_gang_lookup may be called under
1477 * rcu_read_lock. In this case, rather than the returned results being
1478 * an atomic snapshot of the tree at a single point in time, the
1479 * semantics of an RCU protected gang lookup are as though multiple
1480 * radix_tree_lookups have been issued in individual locks, and results
1481 * stored in 'results'.
1484 radix_tree_gang_lookup(const struct radix_tree_root *root, void **results,
1485 unsigned long first_index, unsigned int max_items)
1487 struct radix_tree_iter iter;
1489 unsigned int ret = 0;
1491 if (unlikely(!max_items))
1494 radix_tree_for_each_slot(slot, root, &iter, first_index) {
1495 results[ret] = rcu_dereference_raw(*slot);
1498 if (radix_tree_is_internal_node(results[ret])) {
1499 slot = radix_tree_iter_retry(&iter);
1502 if (++ret == max_items)
1508 EXPORT_SYMBOL(radix_tree_gang_lookup);
1511 * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree
1513 * @root: radix tree root
1514 * @results: where the results of the lookup are placed
1515 * @first_index: start the lookup from this key
1516 * @max_items: place up to this many items at *results
1517 * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
1519 * Performs an index-ascending scan of the tree for present items which
1520 * have the tag indexed by @tag set. Places the items at *@results and
1521 * returns the number of items which were placed at *@results.
1524 radix_tree_gang_lookup_tag(const struct radix_tree_root *root, void **results,
1525 unsigned long first_index, unsigned int max_items,
1528 struct radix_tree_iter iter;
1530 unsigned int ret = 0;
1532 if (unlikely(!max_items))
1535 radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
1536 results[ret] = rcu_dereference_raw(*slot);
1539 if (radix_tree_is_internal_node(results[ret])) {
1540 slot = radix_tree_iter_retry(&iter);
1543 if (++ret == max_items)
1549 EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
1552 * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a
1553 * radix tree based on a tag
1554 * @root: radix tree root
1555 * @results: where the results of the lookup are placed
1556 * @first_index: start the lookup from this key
1557 * @max_items: place up to this many items at *results
1558 * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
1560 * Performs an index-ascending scan of the tree for present items which
1561 * have the tag indexed by @tag set. Places the slots at *@results and
1562 * returns the number of slots which were placed at *@results.
1565 radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *root,
1566 void __rcu ***results, unsigned long first_index,
1567 unsigned int max_items, unsigned int tag)
1569 struct radix_tree_iter iter;
1571 unsigned int ret = 0;
1573 if (unlikely(!max_items))
1576 radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
1577 results[ret] = slot;
1578 if (++ret == max_items)
1584 EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
1586 static bool __radix_tree_delete(struct radix_tree_root *root,
1587 struct radix_tree_node *node, void __rcu **slot)
1589 void *old = rcu_dereference_raw(*slot);
1590 int values = xa_is_value(old) ? -1 : 0;
1591 unsigned offset = get_slot_offset(node, slot);
1595 node_tag_set(root, node, IDR_FREE, offset);
1597 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
1598 node_tag_clear(root, node, tag, offset);
1600 replace_slot(slot, NULL, node, -1, values);
1601 return node && delete_node(root, node);
1605 * radix_tree_iter_delete - delete the entry at this iterator position
1606 * @root: radix tree root
1607 * @iter: iterator state
1608 * @slot: pointer to slot
1610 * Delete the entry at the position currently pointed to by the iterator.
1611 * This may result in the current node being freed; if it is, the iterator
1612 * is advanced so that it will not reference the freed memory. This
1613 * function may be called without any locking if there are no other threads
1614 * which can access this tree.
1616 void radix_tree_iter_delete(struct radix_tree_root *root,
1617 struct radix_tree_iter *iter, void __rcu **slot)
1619 if (__radix_tree_delete(root, iter->node, slot))
1620 iter->index = iter->next_index;
1622 EXPORT_SYMBOL(radix_tree_iter_delete);
1625 * radix_tree_delete_item - delete an item from a radix tree
1626 * @root: radix tree root
1628 * @item: expected item
1630 * Remove @item at @index from the radix tree rooted at @root.
1632 * Return: the deleted entry, or %NULL if it was not present
1633 * or the entry at the given @index was not @item.
1635 void *radix_tree_delete_item(struct radix_tree_root *root,
1636 unsigned long index, void *item)
1638 struct radix_tree_node *node = NULL;
1639 void __rcu **slot = NULL;
1642 entry = __radix_tree_lookup(root, index, &node, &slot);
1645 if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE,
1646 get_slot_offset(node, slot))))
1649 if (item && entry != item)
1652 __radix_tree_delete(root, node, slot);
1656 EXPORT_SYMBOL(radix_tree_delete_item);
1659 * radix_tree_delete - delete an entry from a radix tree
1660 * @root: radix tree root
1663 * Remove the entry at @index from the radix tree rooted at @root.
1665 * Return: The deleted entry, or %NULL if it was not present.
1667 void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
1669 return radix_tree_delete_item(root, index, NULL);
1671 EXPORT_SYMBOL(radix_tree_delete);
1673 void radix_tree_clear_tags(struct radix_tree_root *root,
1674 struct radix_tree_node *node,
1678 unsigned int tag, offset = get_slot_offset(node, slot);
1679 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
1680 node_tag_clear(root, node, tag, offset);
1682 root_tag_clear_all(root);
1687 * radix_tree_tagged - test whether any items in the tree are tagged
1688 * @root: radix tree root
1691 int radix_tree_tagged(const struct radix_tree_root *root, unsigned int tag)
1693 return root_tag_get(root, tag);
1695 EXPORT_SYMBOL(radix_tree_tagged);
1698 * idr_preload - preload for idr_alloc()
1699 * @gfp_mask: allocation mask to use for preloading
1701 * Preallocate memory to use for the next call to idr_alloc(). This function
1702 * returns with preemption disabled. It will be enabled by idr_preload_end().
1704 void idr_preload(gfp_t gfp_mask)
1706 if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE))
1709 EXPORT_SYMBOL(idr_preload);
1711 void __rcu **idr_get_free(struct radix_tree_root *root,
1712 struct radix_tree_iter *iter, gfp_t gfp,
1715 struct radix_tree_node *node = NULL, *child;
1716 void __rcu **slot = (void __rcu **)&root->xa_head;
1717 unsigned long maxindex, start = iter->next_index;
1718 unsigned int shift, offset = 0;
1721 shift = radix_tree_load_root(root, &child, &maxindex);
1722 if (!radix_tree_tagged(root, IDR_FREE))
1723 start = max(start, maxindex + 1);
1725 return ERR_PTR(-ENOSPC);
1727 if (start > maxindex) {
1728 int error = radix_tree_extend(root, gfp, start, shift);
1730 return ERR_PTR(error);
1732 child = rcu_dereference_raw(root->xa_head);
1734 if (start == 0 && shift == 0)
1735 shift = RADIX_TREE_MAP_SHIFT;
1738 shift -= RADIX_TREE_MAP_SHIFT;
1739 if (child == NULL) {
1740 /* Have to add a child node. */
1741 child = radix_tree_node_alloc(gfp, node, root, shift,
1744 return ERR_PTR(-ENOMEM);
1745 all_tag_set(child, IDR_FREE);
1746 rcu_assign_pointer(*slot, node_to_entry(child));
1749 } else if (!radix_tree_is_internal_node(child))
1752 node = entry_to_node(child);
1753 offset = radix_tree_descend(node, &child, start);
1754 if (!tag_get(node, IDR_FREE, offset)) {
1755 offset = radix_tree_find_next_bit(node, IDR_FREE,
1757 start = next_index(start, node, offset);
1759 return ERR_PTR(-ENOSPC);
1760 while (offset == RADIX_TREE_MAP_SIZE) {
1761 offset = node->offset + 1;
1762 node = node->parent;
1765 shift = node->shift;
1767 child = rcu_dereference_raw(node->slots[offset]);
1769 slot = &node->slots[offset];
1772 iter->index = start;
1774 iter->next_index = 1 + min(max, (start | node_maxindex(node)));
1776 iter->next_index = 1;
1778 __set_iter_shift(iter, shift);
1779 set_iter_tags(iter, node, offset, IDR_FREE);
1785 * idr_destroy - release all internal memory from an IDR
1788 * After this function is called, the IDR is empty, and may be reused or
1789 * the data structure containing it may be freed.
1791 * A typical clean-up sequence for objects stored in an idr tree will use
1792 * idr_for_each() to free all objects, if necessary, then idr_destroy() to
1793 * free the memory used to keep track of those objects.
1795 void idr_destroy(struct idr *idr)
1797 struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.xa_head);
1798 if (radix_tree_is_internal_node(node))
1799 radix_tree_free_nodes(node);
1800 idr->idr_rt.xa_head = NULL;
1801 root_tag_set(&idr->idr_rt, IDR_FREE);
1803 EXPORT_SYMBOL(idr_destroy);
1806 radix_tree_node_ctor(void *arg)
1808 struct radix_tree_node *node = arg;
1810 memset(node, 0, sizeof(*node));
1811 INIT_LIST_HEAD(&node->private_list);
1814 static int radix_tree_cpu_dead(unsigned int cpu)
1816 struct radix_tree_preload *rtp;
1817 struct radix_tree_node *node;
1819 /* Free per-cpu pool of preloaded nodes */
1820 rtp = &per_cpu(radix_tree_preloads, cpu);
1823 rtp->nodes = node->parent;
1824 kmem_cache_free(radix_tree_node_cachep, node);
1830 void __init radix_tree_init(void)
1834 BUILD_BUG_ON(RADIX_TREE_MAX_TAGS + __GFP_BITS_SHIFT > 32);
1835 BUILD_BUG_ON(ROOT_IS_IDR & ~GFP_ZONEMASK);
1836 BUILD_BUG_ON(XA_CHUNK_SIZE > 255);
1837 radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
1838 sizeof(struct radix_tree_node), 0,
1839 SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
1840 radix_tree_node_ctor);
1841 ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead",
1842 NULL, radix_tree_cpu_dead);