1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Copyright (C) 2001 Momchil Velikov
4 * Portions Copyright (C) 2001 Christoph Hellwig
5 * Copyright (C) 2006 Nick Piggin
6 * Copyright (C) 2012 Konstantin Khlebnikov
8 #ifndef _LINUX_RADIX_TREE_H
9 #define _LINUX_RADIX_TREE_H
11 #include <linux/bitops.h>
12 #include <linux/kernel.h>
13 #include <linux/list.h>
14 #include <linux/preempt.h>
15 #include <linux/rcupdate.h>
16 #include <linux/spinlock.h>
17 #include <linux/types.h>
18 #include <linux/xarray.h>
19 #include <linux/local_lock.h>
21 /* Keep unconverted code working */
22 #define radix_tree_root xarray
23 #define radix_tree_node xa_node
25 struct radix_tree_preload {
28 /* nodes->parent points to next preallocated node */
29 struct radix_tree_node *nodes;
31 DECLARE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
34 * The bottom two bits of the slot determine how the remaining bits in the
35 * slot are interpreted:
41 * The internal entry may be a pointer to the next level in the tree, a
42 * sibling entry, or an indicator that the entry in this slot has been moved
43 * to another location in the tree and the lookup should be restarted. While
44 * NULL fits the 'data pointer' pattern, it means that there is no entry in
45 * the tree for this index (no matter what level of the tree it is found at).
46 * This means that storing a NULL entry in the tree is the same as deleting
47 * the entry from the tree.
49 #define RADIX_TREE_ENTRY_MASK 3UL
50 #define RADIX_TREE_INTERNAL_NODE 2UL
52 static inline bool radix_tree_is_internal_node(void *ptr)
54 return ((unsigned long)ptr & RADIX_TREE_ENTRY_MASK) ==
55 RADIX_TREE_INTERNAL_NODE;
58 /*** radix-tree API starts here ***/
60 #define RADIX_TREE_MAP_SHIFT XA_CHUNK_SHIFT
61 #define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT)
62 #define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1)
64 #define RADIX_TREE_MAX_TAGS XA_MAX_MARKS
65 #define RADIX_TREE_TAG_LONGS XA_MARK_LONGS
67 #define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
68 #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
69 RADIX_TREE_MAP_SHIFT))
71 /* The IDR tag is stored in the low bits of xa_flags */
72 #define ROOT_IS_IDR ((__force gfp_t)4)
73 /* The top bits of xa_flags are used to store the root tags */
74 #define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT)
76 #define RADIX_TREE_INIT(name, mask) XARRAY_INIT(name, mask)
78 #define RADIX_TREE(name, mask) \
79 struct radix_tree_root name = RADIX_TREE_INIT(name, mask)
81 #define INIT_RADIX_TREE(root, mask) xa_init_flags(root, mask)
83 static inline bool radix_tree_empty(const struct radix_tree_root *root)
85 return root->xa_head == NULL;
89 * struct radix_tree_iter - radix tree iterator state
91 * @index: index of current slot
92 * @next_index: one beyond the last index for this chunk
93 * @tags: bit-mask for tag-iterating
94 * @node: node that contains current slot
96 * This radix tree iterator works in terms of "chunks" of slots. A chunk is a
97 * subinterval of slots contained within one radix tree leaf node. It is
98 * described by a pointer to its first slot and a struct radix_tree_iter
99 * which holds the chunk's position in the tree and its size. For tagged
100 * iteration radix_tree_iter also holds the slots' bit-mask for one chosen
103 struct radix_tree_iter {
105 unsigned long next_index;
107 struct radix_tree_node *node;
111 * Radix-tree synchronization
113 * The radix-tree API requires that users provide all synchronisation (with
114 * specific exceptions, noted below).
116 * Synchronization of access to the data items being stored in the tree, and
117 * management of their lifetimes must be completely managed by API users.
119 * For API usage, in general,
120 * - any function _modifying_ the tree or tags (inserting or deleting
121 * items, setting or clearing tags) must exclude other modifications, and
122 * exclude any functions reading the tree.
123 * - any function _reading_ the tree or tags (looking up items or tags,
124 * gang lookups) must exclude modifications to the tree, but may occur
125 * concurrently with other readers.
127 * The notable exceptions to this rule are the following functions:
128 * __radix_tree_lookup
130 * radix_tree_lookup_slot
132 * radix_tree_gang_lookup
133 * radix_tree_gang_lookup_tag
134 * radix_tree_gang_lookup_tag_slot
137 * The first 7 functions are able to be called locklessly, using RCU. The
138 * caller must ensure calls to these functions are made within rcu_read_lock()
139 * regions. Other readers (lock-free or otherwise) and modifications may be
140 * running concurrently.
142 * It is still required that the caller manage the synchronization and lifetimes
143 * of the items. So if RCU lock-free lookups are used, typically this would mean
144 * that the items have their own locks, or are amenable to lock-free access; and
145 * that the items are freed by RCU (or only freed after having been deleted from
146 * the radix tree *and* a synchronize_rcu() grace period).
148 * (Note, rcu_assign_pointer and rcu_dereference are not needed to control
149 * access to data items when inserting into or looking up from the radix tree)
151 * Note that the value returned by radix_tree_tag_get() may not be relied upon
152 * if only the RCU read lock is held. Functions to set/clear tags and to
153 * delete nodes running concurrently with it may affect its result such that
154 * two consecutive reads in the same locked section may return different
155 * values. If reliability is required, modification functions must also be
156 * excluded from concurrency.
158 * radix_tree_tagged is able to be called without locking or RCU.
162 * radix_tree_deref_slot - dereference a slot
163 * @slot: slot pointer, returned by radix_tree_lookup_slot
165 * For use with radix_tree_lookup_slot(). Caller must hold tree at least read
166 * locked across slot lookup and dereference. Not required if write lock is
167 * held (ie. items cannot be concurrently inserted).
169 * radix_tree_deref_retry must be used to confirm validity of the pointer if
170 * only the read lock is held.
172 * Return: entry stored in that slot.
174 static inline void *radix_tree_deref_slot(void __rcu **slot)
176 return rcu_dereference(*slot);
180 * radix_tree_deref_slot_protected - dereference a slot with tree lock held
181 * @slot: slot pointer, returned by radix_tree_lookup_slot
183 * Similar to radix_tree_deref_slot. The caller does not hold the RCU read
184 * lock but it must hold the tree lock to prevent parallel updates.
186 * Return: entry stored in that slot.
188 static inline void *radix_tree_deref_slot_protected(void __rcu **slot,
189 spinlock_t *treelock)
191 return rcu_dereference_protected(*slot, lockdep_is_held(treelock));
195 * radix_tree_deref_retry - check radix_tree_deref_slot
196 * @arg: pointer returned by radix_tree_deref_slot
197 * Returns: 0 if retry is not required, otherwise retry is required
199 * radix_tree_deref_retry must be used with radix_tree_deref_slot.
201 static inline int radix_tree_deref_retry(void *arg)
203 return unlikely(radix_tree_is_internal_node(arg));
207 * radix_tree_exception - radix_tree_deref_slot returned either exception?
208 * @arg: value returned by radix_tree_deref_slot
209 * Returns: 0 if well-aligned pointer, non-0 if either kind of exception.
211 static inline int radix_tree_exception(void *arg)
213 return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK);
216 int radix_tree_insert(struct radix_tree_root *, unsigned long index,
218 void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index,
219 struct radix_tree_node **nodep, void __rcu ***slotp);
220 void *radix_tree_lookup(const struct radix_tree_root *, unsigned long);
221 void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *,
222 unsigned long index);
223 void __radix_tree_replace(struct radix_tree_root *, struct radix_tree_node *,
224 void __rcu **slot, void *entry);
225 void radix_tree_iter_replace(struct radix_tree_root *,
226 const struct radix_tree_iter *, void __rcu **slot, void *entry);
227 void radix_tree_replace_slot(struct radix_tree_root *,
228 void __rcu **slot, void *entry);
229 void radix_tree_iter_delete(struct radix_tree_root *,
230 struct radix_tree_iter *iter, void __rcu **slot);
231 void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
232 void *radix_tree_delete(struct radix_tree_root *, unsigned long);
233 unsigned int radix_tree_gang_lookup(const struct radix_tree_root *,
234 void **results, unsigned long first_index,
235 unsigned int max_items);
236 int radix_tree_preload(gfp_t gfp_mask);
237 int radix_tree_maybe_preload(gfp_t gfp_mask);
238 void radix_tree_init(void);
239 void *radix_tree_tag_set(struct radix_tree_root *,
240 unsigned long index, unsigned int tag);
241 void *radix_tree_tag_clear(struct radix_tree_root *,
242 unsigned long index, unsigned int tag);
243 int radix_tree_tag_get(const struct radix_tree_root *,
244 unsigned long index, unsigned int tag);
245 void radix_tree_iter_tag_clear(struct radix_tree_root *,
246 const struct radix_tree_iter *iter, unsigned int tag);
247 unsigned int radix_tree_gang_lookup_tag(const struct radix_tree_root *,
248 void **results, unsigned long first_index,
249 unsigned int max_items, unsigned int tag);
250 unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *,
251 void __rcu ***results, unsigned long first_index,
252 unsigned int max_items, unsigned int tag);
253 int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag);
255 static inline void radix_tree_preload_end(void)
257 local_unlock(&radix_tree_preloads.lock);
260 void __rcu **idr_get_free(struct radix_tree_root *root,
261 struct radix_tree_iter *iter, gfp_t gfp,
265 RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */
266 RADIX_TREE_ITER_TAGGED = 0x10, /* lookup tagged slots */
267 RADIX_TREE_ITER_CONTIG = 0x20, /* stop at first hole */
271 * radix_tree_iter_init - initialize radix tree iterator
273 * @iter: pointer to iterator state
274 * @start: iteration starting index
277 static __always_inline void __rcu **
278 radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start)
281 * Leave iter->tags uninitialized. radix_tree_next_chunk() will fill it
282 * in the case of a successful tagged chunk lookup. If the lookup was
283 * unsuccessful or non-tagged then nobody cares about ->tags.
285 * Set index to zero to bypass next_index overflow protection.
286 * See the comment in radix_tree_next_chunk() for details.
289 iter->next_index = start;
294 * radix_tree_next_chunk - find next chunk of slots for iteration
296 * @root: radix tree root
297 * @iter: iterator state
298 * @flags: RADIX_TREE_ITER_* flags and tag index
299 * Returns: pointer to chunk first slot, or NULL if there no more left
301 * This function looks up the next chunk in the radix tree starting from
302 * @iter->next_index. It returns a pointer to the chunk's first slot.
303 * Also it fills @iter with data about chunk: position in the tree (index),
304 * its end (next_index), and constructs a bit mask for tagged iterating (tags).
306 void __rcu **radix_tree_next_chunk(const struct radix_tree_root *,
307 struct radix_tree_iter *iter, unsigned flags);
310 * radix_tree_iter_lookup - look up an index in the radix tree
311 * @root: radix tree root
312 * @iter: iterator state
313 * @index: key to look up
315 * If @index is present in the radix tree, this function returns the slot
316 * containing it and updates @iter to describe the entry. If @index is not
317 * present, it returns NULL.
319 static inline void __rcu **
320 radix_tree_iter_lookup(const struct radix_tree_root *root,
321 struct radix_tree_iter *iter, unsigned long index)
323 radix_tree_iter_init(iter, index);
324 return radix_tree_next_chunk(root, iter, RADIX_TREE_ITER_CONTIG);
328 * radix_tree_iter_retry - retry this chunk of the iteration
329 * @iter: iterator state
331 * If we iterate over a tree protected only by the RCU lock, a race
332 * against deletion or creation may result in seeing a slot for which
333 * radix_tree_deref_retry() returns true. If so, call this function
334 * and continue the iteration.
336 static inline __must_check
337 void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter)
339 iter->next_index = iter->index;
344 static inline unsigned long
345 __radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots)
347 return iter->index + slots;
351 * radix_tree_iter_resume - resume iterating when the chunk may be invalid
352 * @slot: pointer to current slot
353 * @iter: iterator state
354 * Returns: New slot pointer
356 * If the iterator needs to release then reacquire a lock, the chunk may
357 * have been invalidated by an insertion or deletion. Call this function
358 * before releasing the lock to continue the iteration from the next index.
360 void __rcu **__must_check radix_tree_iter_resume(void __rcu **slot,
361 struct radix_tree_iter *iter);
364 * radix_tree_chunk_size - get current chunk size
366 * @iter: pointer to radix tree iterator
367 * Returns: current chunk size
369 static __always_inline long
370 radix_tree_chunk_size(struct radix_tree_iter *iter)
372 return iter->next_index - iter->index;
376 * radix_tree_next_slot - find next slot in chunk
378 * @slot: pointer to current slot
379 * @iter: pointer to interator state
380 * @flags: RADIX_TREE_ITER_*, should be constant
381 * Returns: pointer to next slot, or NULL if there no more left
383 * This function updates @iter->index in the case of a successful lookup.
384 * For tagged lookup it also eats @iter->tags.
386 * There are several cases where 'slot' can be passed in as NULL to this
387 * function. These cases result from the use of radix_tree_iter_resume() or
388 * radix_tree_iter_retry(). In these cases we don't end up dereferencing
389 * 'slot' because either:
390 * a) we are doing tagged iteration and iter->tags has been set to 0, or
391 * b) we are doing non-tagged iteration, and iter->index and iter->next_index
392 * have been set up so that radix_tree_chunk_size() returns 1 or 0.
394 static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot,
395 struct radix_tree_iter *iter, unsigned flags)
397 if (flags & RADIX_TREE_ITER_TAGGED) {
399 if (unlikely(!iter->tags))
401 if (likely(iter->tags & 1ul)) {
402 iter->index = __radix_tree_iter_add(iter, 1);
406 if (!(flags & RADIX_TREE_ITER_CONTIG)) {
407 unsigned offset = __ffs(iter->tags);
409 iter->tags >>= offset++;
410 iter->index = __radix_tree_iter_add(iter, offset);
415 long count = radix_tree_chunk_size(iter);
417 while (--count > 0) {
419 iter->index = __radix_tree_iter_add(iter, 1);
423 if (flags & RADIX_TREE_ITER_CONTIG) {
424 /* forbid switching to the next chunk */
425 iter->next_index = 0;
437 * radix_tree_for_each_slot - iterate over non-empty slots
439 * @slot: the void** variable for pointer to slot
440 * @root: the struct radix_tree_root pointer
441 * @iter: the struct radix_tree_iter pointer
442 * @start: iteration starting index
444 * @slot points to radix tree slot, @iter->index contains its index.
446 #define radix_tree_for_each_slot(slot, root, iter, start) \
447 for (slot = radix_tree_iter_init(iter, start) ; \
448 slot || (slot = radix_tree_next_chunk(root, iter, 0)) ; \
449 slot = radix_tree_next_slot(slot, iter, 0))
452 * radix_tree_for_each_tagged - iterate over tagged slots
454 * @slot: the void** variable for pointer to slot
455 * @root: the struct radix_tree_root pointer
456 * @iter: the struct radix_tree_iter pointer
457 * @start: iteration starting index
460 * @slot points to radix tree slot, @iter->index contains its index.
462 #define radix_tree_for_each_tagged(slot, root, iter, start, tag) \
463 for (slot = radix_tree_iter_init(iter, start) ; \
464 slot || (slot = radix_tree_next_chunk(root, iter, \
465 RADIX_TREE_ITER_TAGGED | tag)) ; \
466 slot = radix_tree_next_slot(slot, iter, \
467 RADIX_TREE_ITER_TAGGED | tag))
469 #endif /* _LINUX_RADIX_TREE_H */