1 // SPDX-License-Identifier: GPL-2.0+
3 * Maple Tree implementation
4 * Copyright (c) 2018-2022 Oracle Corporation
5 * Authors: Liam R. Howlett <Liam.Howlett@oracle.com>
6 * Matthew Wilcox <willy@infradead.org>
10 * DOC: Interesting implementation details of the Maple Tree
12 * Each node type has a number of slots for entries and a number of slots for
13 * pivots. In the case of dense nodes, the pivots are implied by the position
14 * and are simply the slot index + the minimum of the node.
16 * In regular B-Tree terms, pivots are called keys. The term pivot is used to
17 * indicate that the tree is specifying ranges, Pivots may appear in the
18 * subtree with an entry attached to the value where as keys are unique to a
19 * specific position of a B-tree. Pivot values are inclusive of the slot with
23 * The following illustrates the layout of a range64 nodes slots and pivots.
26 * Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 |
28 * │ │ │ │ │ │ │ │ └─ Implied maximum
29 * │ │ │ │ │ │ │ └─ Pivot 14
30 * │ │ │ │ │ │ └─ Pivot 13
31 * │ │ │ │ │ └─ Pivot 12
39 * Internal (non-leaf) nodes contain pointers to other nodes.
40 * Leaf nodes contain entries.
42 * The location of interest is often referred to as an offset. All offsets have
43 * a slot, but the last offset has an implied pivot from the node above (or
44 * UINT_MAX for the root node.
46 * Ranges complicate certain write activities. When modifying any of
47 * the B-tree variants, it is known that one entry will either be added or
48 * deleted. When modifying the Maple Tree, one store operation may overwrite
49 * the entire data set, or one half of the tree, or the middle half of the tree.
54 #include <linux/maple_tree.h>
55 #include <linux/xarray.h>
56 #include <linux/types.h>
57 #include <linux/export.h>
58 #include <linux/slab.h>
59 #include <linux/limits.h>
60 #include <asm/barrier.h>
62 #define CREATE_TRACE_POINTS
63 #include <trace/events/maple_tree.h>
65 #define MA_ROOT_PARENT 1
69 * * MA_STATE_BULK - Bulk insert mode
70 * * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert
71 * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation
73 #define MA_STATE_BULK 1
74 #define MA_STATE_REBALANCE 2
75 #define MA_STATE_PREALLOC 4
77 #define ma_parent_ptr(x) ((struct maple_pnode *)(x))
78 #define ma_mnode_ptr(x) ((struct maple_node *)(x))
79 #define ma_enode_ptr(x) ((struct maple_enode *)(x))
80 static struct kmem_cache *maple_node_cache;
82 #ifdef CONFIG_DEBUG_MAPLE_TREE
83 static const unsigned long mt_max[] = {
84 [maple_dense] = MAPLE_NODE_SLOTS,
85 [maple_leaf_64] = ULONG_MAX,
86 [maple_range_64] = ULONG_MAX,
87 [maple_arange_64] = ULONG_MAX,
89 #define mt_node_max(x) mt_max[mte_node_type(x)]
92 static const unsigned char mt_slots[] = {
93 [maple_dense] = MAPLE_NODE_SLOTS,
94 [maple_leaf_64] = MAPLE_RANGE64_SLOTS,
95 [maple_range_64] = MAPLE_RANGE64_SLOTS,
96 [maple_arange_64] = MAPLE_ARANGE64_SLOTS,
98 #define mt_slot_count(x) mt_slots[mte_node_type(x)]
100 static const unsigned char mt_pivots[] = {
102 [maple_leaf_64] = MAPLE_RANGE64_SLOTS - 1,
103 [maple_range_64] = MAPLE_RANGE64_SLOTS - 1,
104 [maple_arange_64] = MAPLE_ARANGE64_SLOTS - 1,
106 #define mt_pivot_count(x) mt_pivots[mte_node_type(x)]
108 static const unsigned char mt_min_slots[] = {
109 [maple_dense] = MAPLE_NODE_SLOTS / 2,
110 [maple_leaf_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
111 [maple_range_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
112 [maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 1,
114 #define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)]
116 #define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS * 2 + 2)
117 #define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1)
119 struct maple_big_node {
120 struct maple_pnode *parent;
121 unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1];
123 struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS];
125 unsigned long padding[MAPLE_BIG_NODE_GAPS];
126 unsigned long gap[MAPLE_BIG_NODE_GAPS];
130 enum maple_type type;
134 * The maple_subtree_state is used to build a tree to replace a segment of an
135 * existing tree in a more atomic way. Any walkers of the older tree will hit a
136 * dead node and restart on updates.
138 struct maple_subtree_state {
139 struct ma_state *orig_l; /* Original left side of subtree */
140 struct ma_state *orig_r; /* Original right side of subtree */
141 struct ma_state *l; /* New left side of subtree */
142 struct ma_state *m; /* New middle of subtree (rare) */
143 struct ma_state *r; /* New right side of subtree */
144 struct ma_topiary *free; /* nodes to be freed */
145 struct ma_topiary *destroy; /* Nodes to be destroyed (walked and freed) */
146 struct maple_big_node *bn;
150 static inline struct maple_node *mt_alloc_one(gfp_t gfp)
152 return kmem_cache_alloc(maple_node_cache, gfp | __GFP_ZERO);
155 static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes)
157 return kmem_cache_alloc_bulk(maple_node_cache, gfp | __GFP_ZERO, size,
161 static inline void mt_free_bulk(size_t size, void __rcu **nodes)
163 kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes);
166 static void mt_free_rcu(struct rcu_head *head)
168 struct maple_node *node = container_of(head, struct maple_node, rcu);
170 kmem_cache_free(maple_node_cache, node);
174 * ma_free_rcu() - Use rcu callback to free a maple node
175 * @node: The node to free
177 * The maple tree uses the parent pointer to indicate this node is no longer in
178 * use and will be freed.
180 static void ma_free_rcu(struct maple_node *node)
182 node->parent = ma_parent_ptr(node);
183 call_rcu(&node->rcu, mt_free_rcu);
187 static void mas_set_height(struct ma_state *mas)
189 unsigned int new_flags = mas->tree->ma_flags;
191 new_flags &= ~MT_FLAGS_HEIGHT_MASK;
192 BUG_ON(mas->depth > MAPLE_HEIGHT_MAX);
193 new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET;
194 mas->tree->ma_flags = new_flags;
197 static unsigned int mas_mt_height(struct ma_state *mas)
199 return mt_height(mas->tree);
202 static inline enum maple_type mte_node_type(const struct maple_enode *entry)
204 return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) &
205 MAPLE_NODE_TYPE_MASK;
208 static inline bool ma_is_dense(const enum maple_type type)
210 return type < maple_leaf_64;
213 static inline bool ma_is_leaf(const enum maple_type type)
215 return type < maple_range_64;
218 static inline bool mte_is_leaf(const struct maple_enode *entry)
220 return ma_is_leaf(mte_node_type(entry));
224 * We also reserve values with the bottom two bits set to '10' which are
227 static inline bool mt_is_reserved(const void *entry)
229 return ((unsigned long)entry < MAPLE_RESERVED_RANGE) &&
230 xa_is_internal(entry);
233 static inline void mas_set_err(struct ma_state *mas, long err)
235 mas->node = MA_ERROR(err);
238 static inline bool mas_is_ptr(struct ma_state *mas)
240 return mas->node == MAS_ROOT;
243 static inline bool mas_is_start(struct ma_state *mas)
245 return mas->node == MAS_START;
248 bool mas_is_err(struct ma_state *mas)
250 return xa_is_err(mas->node);
253 static inline bool mas_searchable(struct ma_state *mas)
255 if (mas_is_none(mas))
264 static inline struct maple_node *mte_to_node(const struct maple_enode *entry)
266 return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK);
270 * mte_to_mat() - Convert a maple encoded node to a maple topiary node.
271 * @entry: The maple encoded node
273 * Return: a maple topiary pointer
275 static inline struct maple_topiary *mte_to_mat(const struct maple_enode *entry)
277 return (struct maple_topiary *)
278 ((unsigned long)entry & ~MAPLE_NODE_MASK);
282 * mas_mn() - Get the maple state node.
283 * @mas: The maple state
285 * Return: the maple node (not encoded - bare pointer).
287 static inline struct maple_node *mas_mn(const struct ma_state *mas)
289 return mte_to_node(mas->node);
293 * mte_set_node_dead() - Set a maple encoded node as dead.
294 * @mn: The maple encoded node.
296 static inline void mte_set_node_dead(struct maple_enode *mn)
298 mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn));
299 smp_wmb(); /* Needed for RCU */
302 /* Bit 1 indicates the root is a node */
303 #define MAPLE_ROOT_NODE 0x02
304 /* maple_type stored bit 3-6 */
305 #define MAPLE_ENODE_TYPE_SHIFT 0x03
306 /* Bit 2 means a NULL somewhere below */
307 #define MAPLE_ENODE_NULL 0x04
309 static inline struct maple_enode *mt_mk_node(const struct maple_node *node,
310 enum maple_type type)
312 return (void *)((unsigned long)node |
313 (type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL);
316 static inline void *mte_mk_root(const struct maple_enode *node)
318 return (void *)((unsigned long)node | MAPLE_ROOT_NODE);
321 static inline void *mte_safe_root(const struct maple_enode *node)
323 return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE);
326 static inline void *mte_set_full(const struct maple_enode *node)
328 return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL);
331 static inline void *mte_clear_full(const struct maple_enode *node)
333 return (void *)((unsigned long)node | MAPLE_ENODE_NULL);
336 static inline bool mte_has_null(const struct maple_enode *node)
338 return (unsigned long)node & MAPLE_ENODE_NULL;
341 static inline bool ma_is_root(struct maple_node *node)
343 return ((unsigned long)node->parent & MA_ROOT_PARENT);
346 static inline bool mte_is_root(const struct maple_enode *node)
348 return ma_is_root(mte_to_node(node));
351 static inline bool mas_is_root_limits(const struct ma_state *mas)
353 return !mas->min && mas->max == ULONG_MAX;
356 static inline bool mt_is_alloc(struct maple_tree *mt)
358 return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE);
363 * Excluding root, the parent pointer is 256B aligned like all other tree nodes.
364 * When storing a 32 or 64 bit values, the offset can fit into 5 bits. The 16
365 * bit values need an extra bit to store the offset. This extra bit comes from
366 * a reuse of the last bit in the node type. This is possible by using bit 1 to
367 * indicate if bit 2 is part of the type or the slot.
371 * 0x?00 = 16 bit nodes
372 * 0x010 = 32 bit nodes
373 * 0x110 = 64 bit nodes
375 * Slot size and alignment
377 * 0b?00 : 16 bit values, type in 0-1, slot in 2-7
378 * 0b010 : 32 bit values, type in 0-2, slot in 3-7
379 * 0b110 : 64 bit values, type in 0-2, slot in 3-7
382 #define MAPLE_PARENT_ROOT 0x01
384 #define MAPLE_PARENT_SLOT_SHIFT 0x03
385 #define MAPLE_PARENT_SLOT_MASK 0xF8
387 #define MAPLE_PARENT_16B_SLOT_SHIFT 0x02
388 #define MAPLE_PARENT_16B_SLOT_MASK 0xFC
390 #define MAPLE_PARENT_RANGE64 0x06
391 #define MAPLE_PARENT_RANGE32 0x04
392 #define MAPLE_PARENT_NOT_RANGE16 0x02
395 * mte_parent_shift() - Get the parent shift for the slot storage.
396 * @parent: The parent pointer cast as an unsigned long
397 * Return: The shift into that pointer to the star to of the slot
399 static inline unsigned long mte_parent_shift(unsigned long parent)
401 /* Note bit 1 == 0 means 16B */
402 if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
403 return MAPLE_PARENT_SLOT_SHIFT;
405 return MAPLE_PARENT_16B_SLOT_SHIFT;
409 * mte_parent_slot_mask() - Get the slot mask for the parent.
410 * @parent: The parent pointer cast as an unsigned long.
411 * Return: The slot mask for that parent.
413 static inline unsigned long mte_parent_slot_mask(unsigned long parent)
415 /* Note bit 1 == 0 means 16B */
416 if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
417 return MAPLE_PARENT_SLOT_MASK;
419 return MAPLE_PARENT_16B_SLOT_MASK;
423 * mas_parent_enum() - Return the maple_type of the parent from the stored
425 * @mas: The maple state
426 * @node: The maple_enode to extract the parent's enum
427 * Return: The node->parent maple_type
430 enum maple_type mte_parent_enum(struct maple_enode *p_enode,
431 struct maple_tree *mt)
433 unsigned long p_type;
435 p_type = (unsigned long)p_enode;
436 if (p_type & MAPLE_PARENT_ROOT)
437 return 0; /* Validated in the caller. */
439 p_type &= MAPLE_NODE_MASK;
440 p_type = p_type & ~(MAPLE_PARENT_ROOT | mte_parent_slot_mask(p_type));
443 case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */
445 return maple_arange_64;
446 return maple_range_64;
453 enum maple_type mas_parent_enum(struct ma_state *mas, struct maple_enode *enode)
455 return mte_parent_enum(ma_enode_ptr(mte_to_node(enode)->parent), mas->tree);
459 * mte_set_parent() - Set the parent node and encode the slot
460 * @enode: The encoded maple node.
461 * @parent: The encoded maple node that is the parent of @enode.
462 * @slot: The slot that @enode resides in @parent.
464 * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the
468 void mte_set_parent(struct maple_enode *enode, const struct maple_enode *parent,
471 unsigned long val = (unsigned long) parent;
474 enum maple_type p_type = mte_node_type(parent);
476 BUG_ON(p_type == maple_dense);
477 BUG_ON(p_type == maple_leaf_64);
481 case maple_arange_64:
482 shift = MAPLE_PARENT_SLOT_SHIFT;
483 type = MAPLE_PARENT_RANGE64;
492 val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */
493 val |= (slot << shift) | type;
494 mte_to_node(enode)->parent = ma_parent_ptr(val);
498 * mte_parent_slot() - get the parent slot of @enode.
499 * @enode: The encoded maple node.
501 * Return: The slot in the parent node where @enode resides.
503 static inline unsigned int mte_parent_slot(const struct maple_enode *enode)
505 unsigned long val = (unsigned long) mte_to_node(enode)->parent;
512 * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost
513 * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT
515 return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val);
519 * mte_parent() - Get the parent of @node.
520 * @node: The encoded maple node.
522 * Return: The parent maple node.
524 static inline struct maple_node *mte_parent(const struct maple_enode *enode)
526 return (void *)((unsigned long)
527 (mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK);
531 * ma_dead_node() - check if the @enode is dead.
532 * @enode: The encoded maple node
534 * Return: true if dead, false otherwise.
536 static inline bool ma_dead_node(const struct maple_node *node)
538 struct maple_node *parent = (void *)((unsigned long)
539 node->parent & ~MAPLE_NODE_MASK);
541 return (parent == node);
544 * mte_dead_node() - check if the @enode is dead.
545 * @enode: The encoded maple node
547 * Return: true if dead, false otherwise.
549 static inline bool mte_dead_node(const struct maple_enode *enode)
551 struct maple_node *parent, *node;
553 node = mte_to_node(enode);
554 parent = mte_parent(enode);
555 return (parent == node);
559 * mas_allocated() - Get the number of nodes allocated in a maple state.
560 * @mas: The maple state
562 * The ma_state alloc member is overloaded to hold a pointer to the first
563 * allocated node or to the number of requested nodes to allocate. If bit 0 is
564 * set, then the alloc contains the number of requested nodes. If there is an
565 * allocated node, then the total allocated nodes is in that node.
567 * Return: The total number of nodes allocated
569 static inline unsigned long mas_allocated(const struct ma_state *mas)
571 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1))
574 return mas->alloc->total;
578 * mas_set_alloc_req() - Set the requested number of allocations.
579 * @mas: the maple state
580 * @count: the number of allocations.
582 * The requested number of allocations is either in the first allocated node,
583 * located in @mas->alloc->request_count, or directly in @mas->alloc if there is
584 * no allocated node. Set the request either in the node or do the necessary
585 * encoding to store in @mas->alloc directly.
587 static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count)
589 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) {
593 mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U);
597 mas->alloc->request_count = count;
601 * mas_alloc_req() - get the requested number of allocations.
602 * @mas: The maple state
604 * The alloc count is either stored directly in @mas, or in
605 * @mas->alloc->request_count if there is at least one node allocated. Decode
606 * the request count if it's stored directly in @mas->alloc.
608 * Return: The allocation request count.
610 static inline unsigned int mas_alloc_req(const struct ma_state *mas)
612 if ((unsigned long)mas->alloc & 0x1)
613 return (unsigned long)(mas->alloc) >> 1;
615 return mas->alloc->request_count;
620 * ma_pivots() - Get a pointer to the maple node pivots.
621 * @node - the maple node
622 * @type - the node type
624 * Return: A pointer to the maple node pivots
626 static inline unsigned long *ma_pivots(struct maple_node *node,
627 enum maple_type type)
630 case maple_arange_64:
631 return node->ma64.pivot;
634 return node->mr64.pivot;
642 * ma_gaps() - Get a pointer to the maple node gaps.
643 * @node - the maple node
644 * @type - the node type
646 * Return: A pointer to the maple node gaps
648 static inline unsigned long *ma_gaps(struct maple_node *node,
649 enum maple_type type)
652 case maple_arange_64:
653 return node->ma64.gap;
663 * mte_pivot() - Get the pivot at @piv of the maple encoded node.
664 * @mn: The maple encoded node.
667 * Return: the pivot at @piv of @mn.
669 static inline unsigned long mte_pivot(const struct maple_enode *mn,
672 struct maple_node *node = mte_to_node(mn);
674 if (piv >= mt_pivots[piv]) {
678 switch (mte_node_type(mn)) {
679 case maple_arange_64:
680 return node->ma64.pivot[piv];
683 return node->mr64.pivot[piv];
691 * mas_safe_pivot() - get the pivot at @piv or mas->max.
692 * @mas: The maple state
693 * @pivots: The pointer to the maple node pivots
694 * @piv: The pivot to fetch
695 * @type: The maple node type
697 * Return: The pivot at @piv within the limit of the @pivots array, @mas->max
700 static inline unsigned long
701 mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots,
702 unsigned char piv, enum maple_type type)
704 if (piv >= mt_pivots[type])
711 * mas_safe_min() - Return the minimum for a given offset.
712 * @mas: The maple state
713 * @pivots: The pointer to the maple node pivots
714 * @offset: The offset into the pivot array
716 * Return: The minimum range value that is contained in @offset.
718 static inline unsigned long
719 mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset)
722 return pivots[offset - 1] + 1;
728 * mas_logical_pivot() - Get the logical pivot of a given offset.
729 * @mas: The maple state
730 * @pivots: The pointer to the maple node pivots
731 * @offset: The offset into the pivot array
732 * @type: The maple node type
734 * When there is no value at a pivot (beyond the end of the data), then the
735 * pivot is actually @mas->max.
737 * Return: the logical pivot of a given @offset.
739 static inline unsigned long
740 mas_logical_pivot(struct ma_state *mas, unsigned long *pivots,
741 unsigned char offset, enum maple_type type)
743 unsigned long lpiv = mas_safe_pivot(mas, pivots, offset, type);
755 * mte_set_pivot() - Set a pivot to a value in an encoded maple node.
756 * @mn: The encoded maple node
757 * @piv: The pivot offset
758 * @val: The value of the pivot
760 static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv,
763 struct maple_node *node = mte_to_node(mn);
764 enum maple_type type = mte_node_type(mn);
766 BUG_ON(piv >= mt_pivots[type]);
771 node->mr64.pivot[piv] = val;
773 case maple_arange_64:
774 node->ma64.pivot[piv] = val;
783 * ma_slots() - Get a pointer to the maple node slots.
784 * @mn: The maple node
785 * @mt: The maple node type
787 * Return: A pointer to the maple node slots
789 static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt)
793 case maple_arange_64:
794 return mn->ma64.slot;
797 return mn->mr64.slot;
803 static inline bool mt_locked(const struct maple_tree *mt)
805 return mt_external_lock(mt) ? mt_lock_is_held(mt) :
806 lockdep_is_held(&mt->ma_lock);
809 static inline void *mt_slot(const struct maple_tree *mt,
810 void __rcu **slots, unsigned char offset)
812 return rcu_dereference_check(slots[offset], mt_locked(mt));
816 * mas_slot_locked() - Get the slot value when holding the maple tree lock.
817 * @mas: The maple state
818 * @slots: The pointer to the slots
819 * @offset: The offset into the slots array to fetch
821 * Return: The entry stored in @slots at the @offset.
823 static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots,
824 unsigned char offset)
826 return rcu_dereference_protected(slots[offset], mt_locked(mas->tree));
830 * mas_slot() - Get the slot value when not holding the maple tree lock.
831 * @mas: The maple state
832 * @slots: The pointer to the slots
833 * @offset: The offset into the slots array to fetch
835 * Return: The entry stored in @slots at the @offset
837 static inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
838 unsigned char offset)
840 return mt_slot(mas->tree, slots, offset);
844 * mas_root() - Get the maple tree root.
845 * @mas: The maple state.
847 * Return: The pointer to the root of the tree
849 static inline void *mas_root(struct ma_state *mas)
851 return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree));
854 static inline void *mt_root_locked(struct maple_tree *mt)
856 return rcu_dereference_protected(mt->ma_root, mt_locked(mt));
860 * mas_root_locked() - Get the maple tree root when holding the maple tree lock.
861 * @mas: The maple state.
863 * Return: The pointer to the root of the tree
865 static inline void *mas_root_locked(struct ma_state *mas)
867 return mt_root_locked(mas->tree);
870 static inline struct maple_metadata *ma_meta(struct maple_node *mn,
874 case maple_arange_64:
875 return &mn->ma64.meta;
877 return &mn->mr64.meta;
882 * ma_set_meta() - Set the metadata information of a node.
883 * @mn: The maple node
884 * @mt: The maple node type
885 * @offset: The offset of the highest sub-gap in this node.
886 * @end: The end of the data in this node.
888 static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
889 unsigned char offset, unsigned char end)
891 struct maple_metadata *meta = ma_meta(mn, mt);
898 * ma_meta_end() - Get the data end of a node from the metadata
899 * @mn: The maple node
900 * @mt: The maple node type
902 static inline unsigned char ma_meta_end(struct maple_node *mn,
905 struct maple_metadata *meta = ma_meta(mn, mt);
911 * ma_meta_gap() - Get the largest gap location of a node from the metadata
912 * @mn: The maple node
913 * @mt: The maple node type
915 static inline unsigned char ma_meta_gap(struct maple_node *mn,
918 BUG_ON(mt != maple_arange_64);
920 return mn->ma64.meta.gap;
924 * ma_set_meta_gap() - Set the largest gap location in a nodes metadata
925 * @mn: The maple node
926 * @mn: The maple node type
927 * @offset: The location of the largest gap.
929 static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt,
930 unsigned char offset)
933 struct maple_metadata *meta = ma_meta(mn, mt);
939 * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
940 * @mat - the ma_topiary, a linked list of dead nodes.
941 * @dead_enode - the node to be marked as dead and added to the tail of the list
943 * Add the @dead_enode to the linked list in @mat.
945 static inline void mat_add(struct ma_topiary *mat,
946 struct maple_enode *dead_enode)
948 mte_set_node_dead(dead_enode);
949 mte_to_mat(dead_enode)->next = NULL;
951 mat->tail = mat->head = dead_enode;
955 mte_to_mat(mat->tail)->next = dead_enode;
956 mat->tail = dead_enode;
959 static void mte_destroy_walk(struct maple_enode *, struct maple_tree *);
960 static inline void mas_free(struct ma_state *mas, struct maple_enode *used);
963 * mas_mat_free() - Free all nodes in a dead list.
964 * @mas - the maple state
965 * @mat - the ma_topiary linked list of dead nodes to free.
967 * Free walk a dead list.
969 static void mas_mat_free(struct ma_state *mas, struct ma_topiary *mat)
971 struct maple_enode *next;
974 next = mte_to_mat(mat->head)->next;
975 mas_free(mas, mat->head);
981 * mas_mat_destroy() - Free all nodes and subtrees in a dead list.
982 * @mas - the maple state
983 * @mat - the ma_topiary linked list of dead nodes to free.
985 * Destroy walk a dead list.
987 static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat)
989 struct maple_enode *next;
992 next = mte_to_mat(mat->head)->next;
993 mte_destroy_walk(mat->head, mat->mtree);
998 * mas_descend() - Descend into the slot stored in the ma_state.
999 * @mas - the maple state.
1001 * Note: Not RCU safe, only use in write side or debug code.
1003 static inline void mas_descend(struct ma_state *mas)
1005 enum maple_type type;
1006 unsigned long *pivots;
1007 struct maple_node *node;
1011 type = mte_node_type(mas->node);
1012 pivots = ma_pivots(node, type);
1013 slots = ma_slots(node, type);
1016 mas->min = pivots[mas->offset - 1] + 1;
1017 mas->max = mas_safe_pivot(mas, pivots, mas->offset, type);
1018 mas->node = mas_slot(mas, slots, mas->offset);
1022 * mte_set_gap() - Set a maple node gap.
1023 * @mn: The encoded maple node
1024 * @gap: The offset of the gap to set
1025 * @val: The gap value
1027 static inline void mte_set_gap(const struct maple_enode *mn,
1028 unsigned char gap, unsigned long val)
1030 switch (mte_node_type(mn)) {
1033 case maple_arange_64:
1034 mte_to_node(mn)->ma64.gap[gap] = val;
1040 * mas_ascend() - Walk up a level of the tree.
1041 * @mas: The maple state
1043 * Sets the @mas->max and @mas->min to the correct values when walking up. This
1044 * may cause several levels of walking up to find the correct min and max.
1045 * May find a dead node which will cause a premature return.
1046 * Return: 1 on dead node, 0 otherwise
1048 static int mas_ascend(struct ma_state *mas)
1050 struct maple_enode *p_enode; /* parent enode. */
1051 struct maple_enode *a_enode; /* ancestor enode. */
1052 struct maple_node *a_node; /* ancestor node. */
1053 struct maple_node *p_node; /* parent node. */
1054 unsigned char a_slot;
1055 enum maple_type a_type;
1056 unsigned long min, max;
1057 unsigned long *pivots;
1058 unsigned char offset;
1059 bool set_max = false, set_min = false;
1061 a_node = mas_mn(mas);
1062 if (ma_is_root(a_node)) {
1067 p_node = mte_parent(mas->node);
1068 if (unlikely(a_node == p_node))
1070 a_type = mas_parent_enum(mas, mas->node);
1071 offset = mte_parent_slot(mas->node);
1072 a_enode = mt_mk_node(p_node, a_type);
1074 /* Check to make sure all parent information is still accurate */
1075 if (p_node != mte_parent(mas->node))
1078 mas->node = a_enode;
1079 mas->offset = offset;
1081 if (mte_is_root(a_enode)) {
1082 mas->max = ULONG_MAX;
1091 a_type = mas_parent_enum(mas, p_enode);
1092 a_node = mte_parent(p_enode);
1093 a_slot = mte_parent_slot(p_enode);
1094 pivots = ma_pivots(a_node, a_type);
1095 a_enode = mt_mk_node(a_node, a_type);
1097 if (!set_min && a_slot) {
1099 min = pivots[a_slot - 1] + 1;
1102 if (!set_max && a_slot < mt_pivots[a_type]) {
1104 max = pivots[a_slot];
1107 if (unlikely(ma_dead_node(a_node)))
1110 if (unlikely(ma_is_root(a_node)))
1113 } while (!set_min || !set_max);
1121 * mas_pop_node() - Get a previously allocated maple node from the maple state.
1122 * @mas: The maple state
1124 * Return: A pointer to a maple node.
1126 static inline struct maple_node *mas_pop_node(struct ma_state *mas)
1128 struct maple_alloc *ret, *node = mas->alloc;
1129 unsigned long total = mas_allocated(mas);
1131 /* nothing or a request pending. */
1132 if (unlikely(!total))
1136 /* single allocation in this ma_state */
1142 if (!node->node_count) {
1143 /* Single allocation in this node. */
1144 mas->alloc = node->slot[0];
1145 node->slot[0] = NULL;
1146 mas->alloc->total = node->total - 1;
1152 ret = node->slot[node->node_count];
1153 node->slot[node->node_count--] = NULL;
1158 ret->node_count = 0;
1159 if (ret->request_count) {
1160 mas_set_alloc_req(mas, ret->request_count + 1);
1161 ret->request_count = 0;
1163 return (struct maple_node *)ret;
1167 * mas_push_node() - Push a node back on the maple state allocation.
1168 * @mas: The maple state
1169 * @used: The used maple node
1171 * Stores the maple node back into @mas->alloc for reuse. Updates allocated and
1172 * requested node count as necessary.
1174 static inline void mas_push_node(struct ma_state *mas, struct maple_node *used)
1176 struct maple_alloc *reuse = (struct maple_alloc *)used;
1177 struct maple_alloc *head = mas->alloc;
1178 unsigned long count;
1179 unsigned int requested = mas_alloc_req(mas);
1181 memset(reuse, 0, sizeof(*reuse));
1182 count = mas_allocated(mas);
1184 if (count && (head->node_count < MAPLE_ALLOC_SLOTS - 1)) {
1187 head->slot[head->node_count] = reuse;
1193 if ((head) && !((unsigned long)head & 0x1)) {
1194 head->request_count = 0;
1195 reuse->slot[0] = head;
1196 reuse->total += head->total;
1202 mas_set_alloc_req(mas, requested - 1);
1206 * mas_alloc_nodes() - Allocate nodes into a maple state
1207 * @mas: The maple state
1208 * @gfp: The GFP Flags
1210 static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
1212 struct maple_alloc *node;
1213 unsigned long allocated = mas_allocated(mas);
1214 unsigned long success = allocated;
1215 unsigned int requested = mas_alloc_req(mas);
1217 void **slots = NULL;
1218 unsigned int max_req = 0;
1223 mas_set_alloc_req(mas, 0);
1224 if (mas->mas_flags & MA_STATE_PREALLOC) {
1227 WARN_ON(!allocated);
1230 if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS - 1) {
1231 node = (struct maple_alloc *)mt_alloc_one(gfp);
1236 node->slot[0] = mas->alloc;
1245 max_req = MAPLE_ALLOC_SLOTS;
1246 if (node->slot[0]) {
1247 unsigned int offset = node->node_count + 1;
1249 slots = (void **)&node->slot[offset];
1252 slots = (void **)&node->slot;
1255 max_req = min(requested, max_req);
1256 count = mt_alloc_bulk(gfp, max_req, slots);
1260 node->node_count += count;
1262 if (slots == (void **)&node->slot)
1266 node = node->slot[0];
1269 mas->alloc->total = success;
1273 /* Clean up potential freed allocations on bulk failure */
1274 memset(slots, 0, max_req * sizeof(unsigned long));
1276 mas_set_alloc_req(mas, requested);
1277 if (mas->alloc && !(((unsigned long)mas->alloc & 0x1)))
1278 mas->alloc->total = success;
1279 mas_set_err(mas, -ENOMEM);
1285 * mas_free() - Free an encoded maple node
1286 * @mas: The maple state
1287 * @used: The encoded maple node to free.
1289 * Uses rcu free if necessary, pushes @used back on the maple state allocations
1292 static inline void mas_free(struct ma_state *mas, struct maple_enode *used)
1294 struct maple_node *tmp = mte_to_node(used);
1296 if (mt_in_rcu(mas->tree))
1299 mas_push_node(mas, tmp);
1303 * mas_node_count() - Check if enough nodes are allocated and request more if
1304 * there is not enough nodes.
1305 * @mas: The maple state
1306 * @count: The number of nodes needed
1307 * @gfp: the gfp flags
1309 static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp)
1311 unsigned long allocated = mas_allocated(mas);
1313 if (allocated < count) {
1314 mas_set_alloc_req(mas, count - allocated);
1315 mas_alloc_nodes(mas, gfp);
1320 * mas_node_count() - Check if enough nodes are allocated and request more if
1321 * there is not enough nodes.
1322 * @mas: The maple state
1323 * @count: The number of nodes needed
1325 * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags.
1327 static void mas_node_count(struct ma_state *mas, int count)
1329 return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN);
1333 * mas_start() - Sets up maple state for operations.
1334 * @mas: The maple state.
1336 * If mas->node == MAS_START, then set the min, max, depth, and offset to
1340 * - If mas->node is an error or not MAS_START, return NULL.
1341 * - If it's an empty tree: NULL & mas->node == MAS_NONE
1342 * - If it's a single entry: The entry & mas->node == MAS_ROOT
1343 * - If it's a tree: NULL & mas->node == safe root node.
1345 static inline struct maple_enode *mas_start(struct ma_state *mas)
1347 if (likely(mas_is_start(mas))) {
1348 struct maple_enode *root;
1350 mas->node = MAS_NONE;
1352 mas->max = ULONG_MAX;
1356 root = mas_root(mas);
1357 /* Tree with nodes */
1358 if (likely(xa_is_node(root))) {
1360 mas->node = mte_safe_root(root);
1365 if (unlikely(!root)) {
1366 mas->offset = MAPLE_NODE_SLOTS;
1370 /* Single entry tree */
1371 mas->node = MAS_ROOT;
1372 mas->offset = MAPLE_NODE_SLOTS;
1374 /* Single entry tree. */
1385 * ma_data_end() - Find the end of the data in a node.
1386 * @node: The maple node
1387 * @type: The maple node type
1388 * @pivots: The array of pivots in the node
1389 * @max: The maximum value in the node
1391 * Uses metadata to find the end of the data when possible.
1392 * Return: The zero indexed last slot with data (may be null).
1394 static inline unsigned char ma_data_end(struct maple_node *node,
1395 enum maple_type type,
1396 unsigned long *pivots,
1399 unsigned char offset;
1401 if (type == maple_arange_64)
1402 return ma_meta_end(node, type);
1404 offset = mt_pivots[type] - 1;
1405 if (likely(!pivots[offset]))
1406 return ma_meta_end(node, type);
1408 if (likely(pivots[offset] == max))
1411 return mt_pivots[type];
1415 * mas_data_end() - Find the end of the data (slot).
1416 * @mas: the maple state
1418 * This method is optimized to check the metadata of a node if the node type
1419 * supports data end metadata.
1421 * Return: The zero indexed last slot with data (may be null).
1423 static inline unsigned char mas_data_end(struct ma_state *mas)
1425 enum maple_type type;
1426 struct maple_node *node;
1427 unsigned char offset;
1428 unsigned long *pivots;
1430 type = mte_node_type(mas->node);
1432 if (type == maple_arange_64)
1433 return ma_meta_end(node, type);
1435 pivots = ma_pivots(node, type);
1436 offset = mt_pivots[type] - 1;
1437 if (likely(!pivots[offset]))
1438 return ma_meta_end(node, type);
1440 if (likely(pivots[offset] == mas->max))
1443 return mt_pivots[type];
1447 * mas_leaf_max_gap() - Returns the largest gap in a leaf node
1448 * @mas - the maple state
1450 * Return: The maximum gap in the leaf.
1452 static unsigned long mas_leaf_max_gap(struct ma_state *mas)
1455 unsigned long pstart, gap, max_gap;
1456 struct maple_node *mn;
1457 unsigned long *pivots;
1460 unsigned char max_piv;
1462 mt = mte_node_type(mas->node);
1464 slots = ma_slots(mn, mt);
1466 if (unlikely(ma_is_dense(mt))) {
1468 for (i = 0; i < mt_slots[mt]; i++) {
1483 * Check the first implied pivot optimizes the loop below and slot 1 may
1484 * be skipped if there is a gap in slot 0.
1486 pivots = ma_pivots(mn, mt);
1487 if (likely(!slots[0])) {
1488 max_gap = pivots[0] - mas->min + 1;
1494 /* reduce max_piv as the special case is checked before the loop */
1495 max_piv = ma_data_end(mn, mt, pivots, mas->max) - 1;
1497 * Check end implied pivot which can only be a gap on the right most
1500 if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) {
1501 gap = ULONG_MAX - pivots[max_piv];
1506 for (; i <= max_piv; i++) {
1507 /* data == no gap. */
1508 if (likely(slots[i]))
1511 pstart = pivots[i - 1];
1512 gap = pivots[i] - pstart;
1516 /* There cannot be two gaps in a row. */
1523 * ma_max_gap() - Get the maximum gap in a maple node (non-leaf)
1524 * @node: The maple node
1525 * @gaps: The pointer to the gaps
1526 * @mt: The maple node type
1527 * @*off: Pointer to store the offset location of the gap.
1529 * Uses the metadata data end to scan backwards across set gaps.
1531 * Return: The maximum gap value
1533 static inline unsigned long
1534 ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt,
1537 unsigned char offset, i;
1538 unsigned long max_gap = 0;
1540 i = offset = ma_meta_end(node, mt);
1542 if (gaps[i] > max_gap) {
1553 * mas_max_gap() - find the largest gap in a non-leaf node and set the slot.
1554 * @mas: The maple state.
1556 * If the metadata gap is set to MAPLE_ARANGE64_META_MAX, there is no gap.
1558 * Return: The gap value.
1560 static inline unsigned long mas_max_gap(struct ma_state *mas)
1562 unsigned long *gaps;
1563 unsigned char offset;
1565 struct maple_node *node;
1567 mt = mte_node_type(mas->node);
1569 return mas_leaf_max_gap(mas);
1572 offset = ma_meta_gap(node, mt);
1573 if (offset == MAPLE_ARANGE64_META_MAX)
1576 gaps = ma_gaps(node, mt);
1577 return gaps[offset];
1581 * mas_parent_gap() - Set the parent gap and any gaps above, as needed
1582 * @mas: The maple state
1583 * @offset: The gap offset in the parent to set
1584 * @new: The new gap value.
1586 * Set the parent gap then continue to set the gap upwards, using the metadata
1587 * of the parent to see if it is necessary to check the node above.
1589 static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset,
1592 unsigned long meta_gap = 0;
1593 struct maple_node *pnode;
1594 struct maple_enode *penode;
1595 unsigned long *pgaps;
1596 unsigned char meta_offset;
1597 enum maple_type pmt;
1599 pnode = mte_parent(mas->node);
1600 pmt = mas_parent_enum(mas, mas->node);
1601 penode = mt_mk_node(pnode, pmt);
1602 pgaps = ma_gaps(pnode, pmt);
1605 meta_offset = ma_meta_gap(pnode, pmt);
1606 if (meta_offset == MAPLE_ARANGE64_META_MAX)
1609 meta_gap = pgaps[meta_offset];
1611 pgaps[offset] = new;
1613 if (meta_gap == new)
1616 if (offset != meta_offset) {
1620 ma_set_meta_gap(pnode, pmt, offset);
1621 } else if (new < meta_gap) {
1623 new = ma_max_gap(pnode, pgaps, pmt, &meta_offset);
1624 ma_set_meta_gap(pnode, pmt, meta_offset);
1627 if (ma_is_root(pnode))
1630 /* Go to the parent node. */
1631 pnode = mte_parent(penode);
1632 pmt = mas_parent_enum(mas, penode);
1633 pgaps = ma_gaps(pnode, pmt);
1634 offset = mte_parent_slot(penode);
1635 penode = mt_mk_node(pnode, pmt);
1640 * mas_update_gap() - Update a nodes gaps and propagate up if necessary.
1641 * @mas - the maple state.
1643 static inline void mas_update_gap(struct ma_state *mas)
1645 unsigned char pslot;
1646 unsigned long p_gap;
1647 unsigned long max_gap;
1649 if (!mt_is_alloc(mas->tree))
1652 if (mte_is_root(mas->node))
1655 max_gap = mas_max_gap(mas);
1657 pslot = mte_parent_slot(mas->node);
1658 p_gap = ma_gaps(mte_parent(mas->node),
1659 mas_parent_enum(mas, mas->node))[pslot];
1661 if (p_gap != max_gap)
1662 mas_parent_gap(mas, pslot, max_gap);
1666 * mas_adopt_children() - Set the parent pointer of all nodes in @parent to
1667 * @parent with the slot encoded.
1668 * @mas - the maple state (for the tree)
1669 * @parent - the maple encoded node containing the children.
1671 static inline void mas_adopt_children(struct ma_state *mas,
1672 struct maple_enode *parent)
1674 enum maple_type type = mte_node_type(parent);
1675 struct maple_node *node = mas_mn(mas);
1676 void __rcu **slots = ma_slots(node, type);
1677 unsigned long *pivots = ma_pivots(node, type);
1678 struct maple_enode *child;
1679 unsigned char offset;
1681 offset = ma_data_end(node, type, pivots, mas->max);
1683 child = mas_slot_locked(mas, slots, offset);
1684 mte_set_parent(child, parent, offset);
1689 * mas_replace() - Replace a maple node in the tree with mas->node. Uses the
1690 * parent encoding to locate the maple node in the tree.
1691 * @mas - the ma_state to use for operations.
1692 * @advanced - boolean to adopt the child nodes and free the old node (false) or
1693 * leave the node (true) and handle the adoption and free elsewhere.
1695 static inline void mas_replace(struct ma_state *mas, bool advanced)
1696 __must_hold(mas->tree->lock)
1698 struct maple_node *mn = mas_mn(mas);
1699 struct maple_enode *old_enode;
1700 unsigned char offset = 0;
1701 void __rcu **slots = NULL;
1703 if (ma_is_root(mn)) {
1704 old_enode = mas_root_locked(mas);
1706 offset = mte_parent_slot(mas->node);
1707 slots = ma_slots(mte_parent(mas->node),
1708 mas_parent_enum(mas, mas->node));
1709 old_enode = mas_slot_locked(mas, slots, offset);
1712 if (!advanced && !mte_is_leaf(mas->node))
1713 mas_adopt_children(mas, mas->node);
1715 if (mte_is_root(mas->node)) {
1716 mn->parent = ma_parent_ptr(
1717 ((unsigned long)mas->tree | MA_ROOT_PARENT));
1718 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
1719 mas_set_height(mas);
1721 rcu_assign_pointer(slots[offset], mas->node);
1725 mas_free(mas, old_enode);
1729 * mas_new_child() - Find the new child of a node.
1730 * @mas: the maple state
1731 * @child: the maple state to store the child.
1733 static inline bool mas_new_child(struct ma_state *mas, struct ma_state *child)
1734 __must_hold(mas->tree->lock)
1737 unsigned char offset;
1739 unsigned long *pivots;
1740 struct maple_enode *entry;
1741 struct maple_node *node;
1744 mt = mte_node_type(mas->node);
1746 slots = ma_slots(node, mt);
1747 pivots = ma_pivots(node, mt);
1748 end = ma_data_end(node, mt, pivots, mas->max);
1749 for (offset = mas->offset; offset <= end; offset++) {
1750 entry = mas_slot_locked(mas, slots, offset);
1751 if (mte_parent(entry) == node) {
1753 mas->offset = offset + 1;
1754 child->offset = offset;
1764 * mab_shift_right() - Shift the data in mab right. Note, does not clean out the
1765 * old data or set b_node->b_end.
1766 * @b_node: the maple_big_node
1767 * @shift: the shift count
1769 static inline void mab_shift_right(struct maple_big_node *b_node,
1770 unsigned char shift)
1772 unsigned long size = b_node->b_end * sizeof(unsigned long);
1774 memmove(b_node->pivot + shift, b_node->pivot, size);
1775 memmove(b_node->slot + shift, b_node->slot, size);
1776 if (b_node->type == maple_arange_64)
1777 memmove(b_node->gap + shift, b_node->gap, size);
1781 * mab_middle_node() - Check if a middle node is needed (unlikely)
1782 * @b_node: the maple_big_node that contains the data.
1783 * @size: the amount of data in the b_node
1784 * @split: the potential split location
1785 * @slot_count: the size that can be stored in a single node being considered.
1787 * Return: true if a middle node is required.
1789 static inline bool mab_middle_node(struct maple_big_node *b_node, int split,
1790 unsigned char slot_count)
1792 unsigned char size = b_node->b_end;
1794 if (size >= 2 * slot_count)
1797 if (!b_node->slot[split] && (size >= 2 * slot_count - 1))
1804 * mab_no_null_split() - ensure the split doesn't fall on a NULL
1805 * @b_node: the maple_big_node with the data
1806 * @split: the suggested split location
1807 * @slot_count: the number of slots in the node being considered.
1809 * Return: the split location.
1811 static inline int mab_no_null_split(struct maple_big_node *b_node,
1812 unsigned char split, unsigned char slot_count)
1814 if (!b_node->slot[split]) {
1816 * If the split is less than the max slot && the right side will
1817 * still be sufficient, then increment the split on NULL.
1819 if ((split < slot_count - 1) &&
1820 (b_node->b_end - split) > (mt_min_slots[b_node->type]))
1829 * mab_calc_split() - Calculate the split location and if there needs to be two
1831 * @bn: The maple_big_node with the data
1832 * @mid_split: The second split, if required. 0 otherwise.
1834 * Return: The first split location. The middle split is set in @mid_split.
1836 static inline int mab_calc_split(struct ma_state *mas,
1837 struct maple_big_node *bn, unsigned char *mid_split, unsigned long min)
1839 unsigned char b_end = bn->b_end;
1840 int split = b_end / 2; /* Assume equal split. */
1841 unsigned char slot_min, slot_count = mt_slots[bn->type];
1844 * To support gap tracking, all NULL entries are kept together and a node cannot
1845 * end on a NULL entry, with the exception of the left-most leaf. The
1846 * limitation means that the split of a node must be checked for this condition
1847 * and be able to put more data in one direction or the other.
1849 if (unlikely((mas->mas_flags & MA_STATE_BULK))) {
1851 split = b_end - mt_min_slots[bn->type];
1853 if (!ma_is_leaf(bn->type))
1856 mas->mas_flags |= MA_STATE_REBALANCE;
1857 if (!bn->slot[split])
1863 * Although extremely rare, it is possible to enter what is known as the 3-way
1864 * split scenario. The 3-way split comes about by means of a store of a range
1865 * that overwrites the end and beginning of two full nodes. The result is a set
1866 * of entries that cannot be stored in 2 nodes. Sometimes, these two nodes can
1867 * also be located in different parent nodes which are also full. This can
1868 * carry upwards all the way to the root in the worst case.
1870 if (unlikely(mab_middle_node(bn, split, slot_count))) {
1872 *mid_split = split * 2;
1874 slot_min = mt_min_slots[bn->type];
1878 * Avoid having a range less than the slot count unless it
1879 * causes one node to be deficient.
1880 * NOTE: mt_min_slots is 1 based, b_end and split are zero.
1882 while (((bn->pivot[split] - min) < slot_count - 1) &&
1883 (split < slot_count - 1) && (b_end - split > slot_min))
1887 /* Avoid ending a node on a NULL entry */
1888 split = mab_no_null_split(bn, split, slot_count);
1892 *mid_split = mab_no_null_split(bn, *mid_split, slot_count);
1898 * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node
1899 * and set @b_node->b_end to the next free slot.
1900 * @mas: The maple state
1901 * @mas_start: The starting slot to copy
1902 * @mas_end: The end slot to copy (inclusively)
1903 * @b_node: The maple_big_node to place the data
1904 * @mab_start: The starting location in maple_big_node to store the data.
1906 static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start,
1907 unsigned char mas_end, struct maple_big_node *b_node,
1908 unsigned char mab_start)
1911 struct maple_node *node;
1913 unsigned long *pivots, *gaps;
1914 int i = mas_start, j = mab_start;
1915 unsigned char piv_end;
1918 mt = mte_node_type(mas->node);
1919 pivots = ma_pivots(node, mt);
1921 b_node->pivot[j] = pivots[i++];
1922 if (unlikely(i > mas_end))
1927 piv_end = min(mas_end, mt_pivots[mt]);
1928 for (; i < piv_end; i++, j++) {
1929 b_node->pivot[j] = pivots[i];
1930 if (unlikely(!b_node->pivot[j]))
1933 if (unlikely(mas->max == b_node->pivot[j]))
1937 if (likely(i <= mas_end))
1938 b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt);
1941 b_node->b_end = ++j;
1943 slots = ma_slots(node, mt);
1944 memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j);
1945 if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) {
1946 gaps = ma_gaps(node, mt);
1947 memcpy(b_node->gap + mab_start, gaps + mas_start,
1948 sizeof(unsigned long) * j);
1953 * mas_leaf_set_meta() - Set the metadata of a leaf if possible.
1954 * @mas: The maple state
1955 * @node: The maple node
1956 * @pivots: pointer to the maple node pivots
1957 * @mt: The maple type
1958 * @end: The assumed end
1960 * Note, end may be incremented within this function but not modified at the
1961 * source. This is fine since the metadata is the last thing to be stored in a
1962 * node during a write.
1964 static inline void mas_leaf_set_meta(struct ma_state *mas,
1965 struct maple_node *node, unsigned long *pivots,
1966 enum maple_type mt, unsigned char end)
1968 /* There is no room for metadata already */
1969 if (mt_pivots[mt] <= end)
1972 if (pivots[end] && pivots[end] < mas->max)
1975 if (end < mt_slots[mt] - 1)
1976 ma_set_meta(node, mt, 0, end);
1980 * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node.
1981 * @b_node: the maple_big_node that has the data
1982 * @mab_start: the start location in @b_node.
1983 * @mab_end: The end location in @b_node (inclusively)
1984 * @mas: The maple state with the maple encoded node.
1986 static inline void mab_mas_cp(struct maple_big_node *b_node,
1987 unsigned char mab_start, unsigned char mab_end,
1988 struct ma_state *mas, bool new_max)
1991 enum maple_type mt = mte_node_type(mas->node);
1992 struct maple_node *node = mte_to_node(mas->node);
1993 void __rcu **slots = ma_slots(node, mt);
1994 unsigned long *pivots = ma_pivots(node, mt);
1995 unsigned long *gaps = NULL;
1998 if (mab_end - mab_start > mt_pivots[mt])
2001 if (!pivots[mt_pivots[mt] - 1])
2002 slots[mt_pivots[mt]] = NULL;
2006 pivots[j++] = b_node->pivot[i++];
2007 } while (i <= mab_end && likely(b_node->pivot[i]));
2009 memcpy(slots, b_node->slot + mab_start,
2010 sizeof(void *) * (i - mab_start));
2013 mas->max = b_node->pivot[i - 1];
2016 if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) {
2017 unsigned long max_gap = 0;
2018 unsigned char offset = 15;
2020 gaps = ma_gaps(node, mt);
2022 gaps[--j] = b_node->gap[--i];
2023 if (gaps[j] > max_gap) {
2029 ma_set_meta(node, mt, offset, end);
2031 mas_leaf_set_meta(mas, node, pivots, mt, end);
2036 * mas_descend_adopt() - Descend through a sub-tree and adopt children.
2037 * @mas: the maple state with the maple encoded node of the sub-tree.
2039 * Descend through a sub-tree and adopt children who do not have the correct
2040 * parents set. Follow the parents which have the correct parents as they are
2041 * the new entries which need to be followed to find other incorrectly set
2044 static inline void mas_descend_adopt(struct ma_state *mas)
2046 struct ma_state list[3], next[3];
2050 * At each level there may be up to 3 correct parent pointers which indicates
2051 * the new nodes which need to be walked to find any new nodes at a lower level.
2054 for (i = 0; i < 3; i++) {
2061 while (!mte_is_leaf(list[0].node)) {
2063 for (i = 0; i < 3; i++) {
2064 if (mas_is_none(&list[i]))
2067 if (i && list[i-1].node == list[i].node)
2070 while ((n < 3) && (mas_new_child(&list[i], &next[n])))
2073 mas_adopt_children(&list[i], list[i].node);
2077 next[n++].node = MAS_NONE;
2079 /* descend by setting the list to the children */
2080 for (i = 0; i < 3; i++)
2086 * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert.
2087 * @mas: The maple state
2088 * @end: The maple node end
2089 * @mt: The maple node type
2091 static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end,
2094 if (!(mas->mas_flags & MA_STATE_BULK))
2097 if (mte_is_root(mas->node))
2100 if (end > mt_min_slots[mt]) {
2101 mas->mas_flags &= ~MA_STATE_REBALANCE;
2107 * mas_store_b_node() - Store an @entry into the b_node while also copying the
2108 * data from a maple encoded node.
2109 * @wr_mas: the maple write state
2110 * @b_node: the maple_big_node to fill with data
2111 * @offset_end: the offset to end copying
2113 * Return: The actual end of the data stored in @b_node
2115 static inline void mas_store_b_node(struct ma_wr_state *wr_mas,
2116 struct maple_big_node *b_node, unsigned char offset_end)
2119 unsigned char b_end;
2120 /* Possible underflow of piv will wrap back to 0 before use. */
2122 struct ma_state *mas = wr_mas->mas;
2124 b_node->type = wr_mas->type;
2128 /* Copy start data up to insert. */
2129 mas_mab_cp(mas, 0, slot - 1, b_node, 0);
2130 b_end = b_node->b_end;
2131 piv = b_node->pivot[b_end - 1];
2135 if (piv + 1 < mas->index) {
2136 /* Handle range starting after old range */
2137 b_node->slot[b_end] = wr_mas->content;
2138 if (!wr_mas->content)
2139 b_node->gap[b_end] = mas->index - 1 - piv;
2140 b_node->pivot[b_end++] = mas->index - 1;
2143 /* Store the new entry. */
2144 mas->offset = b_end;
2145 b_node->slot[b_end] = wr_mas->entry;
2146 b_node->pivot[b_end] = mas->last;
2149 if (mas->last >= mas->max)
2152 /* Handle new range ending before old range ends */
2153 piv = mas_logical_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type);
2154 if (piv > mas->last) {
2155 if (piv == ULONG_MAX)
2156 mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type);
2158 if (offset_end != slot)
2159 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
2162 b_node->slot[++b_end] = wr_mas->content;
2163 if (!wr_mas->content)
2164 b_node->gap[b_end] = piv - mas->last + 1;
2165 b_node->pivot[b_end] = piv;
2168 slot = offset_end + 1;
2169 if (slot > wr_mas->node_end)
2172 /* Copy end data to the end of the node. */
2173 mas_mab_cp(mas, slot, wr_mas->node_end + 1, b_node, ++b_end);
2178 b_node->b_end = b_end;
2182 * mas_prev_sibling() - Find the previous node with the same parent.
2183 * @mas: the maple state
2185 * Return: True if there is a previous sibling, false otherwise.
2187 static inline bool mas_prev_sibling(struct ma_state *mas)
2189 unsigned int p_slot = mte_parent_slot(mas->node);
2191 if (mte_is_root(mas->node))
2198 mas->offset = p_slot - 1;
2204 * mas_next_sibling() - Find the next node with the same parent.
2205 * @mas: the maple state
2207 * Return: true if there is a next sibling, false otherwise.
2209 static inline bool mas_next_sibling(struct ma_state *mas)
2211 MA_STATE(parent, mas->tree, mas->index, mas->last);
2213 if (mte_is_root(mas->node))
2217 mas_ascend(&parent);
2218 parent.offset = mte_parent_slot(mas->node) + 1;
2219 if (parent.offset > mas_data_end(&parent))
2228 * mte_node_or_node() - Return the encoded node or MAS_NONE.
2229 * @enode: The encoded maple node.
2231 * Shorthand to avoid setting %NULLs in the tree or maple_subtree_state.
2233 * Return: @enode or MAS_NONE
2235 static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode)
2240 return ma_enode_ptr(MAS_NONE);
2244 * mas_wr_node_walk() - Find the correct offset for the index in the @mas.
2245 * @wr_mas: The maple write state
2247 * Uses mas_slot_locked() and does not need to worry about dead nodes.
2249 static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas)
2251 struct ma_state *mas = wr_mas->mas;
2252 unsigned char count;
2253 unsigned char offset;
2254 unsigned long index, min, max;
2256 if (unlikely(ma_is_dense(wr_mas->type))) {
2257 wr_mas->r_max = wr_mas->r_min = mas->index;
2258 mas->offset = mas->index = mas->min;
2262 wr_mas->node = mas_mn(wr_mas->mas);
2263 wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type);
2264 count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type,
2265 wr_mas->pivots, mas->max);
2266 offset = mas->offset;
2267 min = mas_safe_min(mas, wr_mas->pivots, offset);
2268 if (unlikely(offset == count))
2271 max = wr_mas->pivots[offset];
2273 if (unlikely(index <= max))
2276 if (unlikely(!max && offset))
2280 while (++offset < count) {
2281 max = wr_mas->pivots[offset];
2284 else if (unlikely(!max))
2293 wr_mas->r_max = max;
2294 wr_mas->r_min = min;
2295 wr_mas->offset_end = mas->offset = offset;
2299 * mas_topiary_range() - Add a range of slots to the topiary.
2300 * @mas: The maple state
2301 * @destroy: The topiary to add the slots (usually destroy)
2302 * @start: The starting slot inclusively
2303 * @end: The end slot inclusively
2305 static inline void mas_topiary_range(struct ma_state *mas,
2306 struct ma_topiary *destroy, unsigned char start, unsigned char end)
2309 unsigned char offset;
2311 MT_BUG_ON(mas->tree, mte_is_leaf(mas->node));
2312 slots = ma_slots(mas_mn(mas), mte_node_type(mas->node));
2313 for (offset = start; offset <= end; offset++) {
2314 struct maple_enode *enode = mas_slot_locked(mas, slots, offset);
2316 if (mte_dead_node(enode))
2319 mat_add(destroy, enode);
2324 * mast_topiary() - Add the portions of the tree to the removal list; either to
2325 * be freed or discarded (destroy walk).
2326 * @mast: The maple_subtree_state.
2328 static inline void mast_topiary(struct maple_subtree_state *mast)
2330 MA_WR_STATE(wr_mas, mast->orig_l, NULL);
2331 unsigned char r_start, r_end;
2332 unsigned char l_start, l_end;
2333 void __rcu **l_slots, **r_slots;
2335 wr_mas.type = mte_node_type(mast->orig_l->node);
2336 mast->orig_l->index = mast->orig_l->last;
2337 mas_wr_node_walk(&wr_mas);
2338 l_start = mast->orig_l->offset + 1;
2339 l_end = mas_data_end(mast->orig_l);
2341 r_end = mast->orig_r->offset;
2346 l_slots = ma_slots(mas_mn(mast->orig_l),
2347 mte_node_type(mast->orig_l->node));
2349 r_slots = ma_slots(mas_mn(mast->orig_r),
2350 mte_node_type(mast->orig_r->node));
2352 if ((l_start < l_end) &&
2353 mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_start))) {
2357 if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_end))) {
2362 if ((l_start > r_end) && (mast->orig_l->node == mast->orig_r->node))
2365 /* At the node where left and right sides meet, add the parts between */
2366 if (mast->orig_l->node == mast->orig_r->node) {
2367 return mas_topiary_range(mast->orig_l, mast->destroy,
2371 /* mast->orig_r is different and consumed. */
2372 if (mte_is_leaf(mast->orig_r->node))
2375 if (mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_end)))
2379 if (l_start <= l_end)
2380 mas_topiary_range(mast->orig_l, mast->destroy, l_start, l_end);
2382 if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_start)))
2385 if (r_start <= r_end)
2386 mas_topiary_range(mast->orig_r, mast->destroy, 0, r_end);
2390 * mast_rebalance_next() - Rebalance against the next node
2391 * @mast: The maple subtree state
2392 * @old_r: The encoded maple node to the right (next node).
2394 static inline void mast_rebalance_next(struct maple_subtree_state *mast)
2396 unsigned char b_end = mast->bn->b_end;
2398 mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node),
2400 mast->orig_r->last = mast->orig_r->max;
2404 * mast_rebalance_prev() - Rebalance against the previous node
2405 * @mast: The maple subtree state
2406 * @old_l: The encoded maple node to the left (previous node)
2408 static inline void mast_rebalance_prev(struct maple_subtree_state *mast)
2410 unsigned char end = mas_data_end(mast->orig_l) + 1;
2411 unsigned char b_end = mast->bn->b_end;
2413 mab_shift_right(mast->bn, end);
2414 mas_mab_cp(mast->orig_l, 0, end - 1, mast->bn, 0);
2415 mast->l->min = mast->orig_l->min;
2416 mast->orig_l->index = mast->orig_l->min;
2417 mast->bn->b_end = end + b_end;
2418 mast->l->offset += end;
2422 * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring
2423 * the node to the right. Checking the nodes to the right then the left at each
2424 * level upwards until root is reached. Free and destroy as needed.
2425 * Data is copied into the @mast->bn.
2426 * @mast: The maple_subtree_state.
2429 bool mast_spanning_rebalance(struct maple_subtree_state *mast)
2431 struct ma_state r_tmp = *mast->orig_r;
2432 struct ma_state l_tmp = *mast->orig_l;
2433 struct maple_enode *ancestor = NULL;
2434 unsigned char start, end;
2435 unsigned char depth = 0;
2437 r_tmp = *mast->orig_r;
2438 l_tmp = *mast->orig_l;
2440 mas_ascend(mast->orig_r);
2441 mas_ascend(mast->orig_l);
2444 (mast->orig_r->node == mast->orig_l->node)) {
2445 ancestor = mast->orig_r->node;
2446 end = mast->orig_r->offset - 1;
2447 start = mast->orig_l->offset + 1;
2450 if (mast->orig_r->offset < mas_data_end(mast->orig_r)) {
2452 ancestor = mast->orig_r->node;
2456 mast->orig_r->offset++;
2458 mas_descend(mast->orig_r);
2459 mast->orig_r->offset = 0;
2463 mast_rebalance_next(mast);
2465 unsigned char l_off = 0;
2466 struct maple_enode *child = r_tmp.node;
2469 if (ancestor == r_tmp.node)
2475 if (l_off < r_tmp.offset)
2476 mas_topiary_range(&r_tmp, mast->destroy,
2477 l_off, r_tmp.offset);
2479 if (l_tmp.node != child)
2480 mat_add(mast->free, child);
2482 } while (r_tmp.node != ancestor);
2484 *mast->orig_l = l_tmp;
2487 } else if (mast->orig_l->offset != 0) {
2489 ancestor = mast->orig_l->node;
2490 end = mas_data_end(mast->orig_l);
2493 mast->orig_l->offset--;
2495 mas_descend(mast->orig_l);
2496 mast->orig_l->offset =
2497 mas_data_end(mast->orig_l);
2501 mast_rebalance_prev(mast);
2503 unsigned char r_off;
2504 struct maple_enode *child = l_tmp.node;
2507 if (ancestor == l_tmp.node)
2510 r_off = mas_data_end(&l_tmp);
2512 if (l_tmp.offset < r_off)
2515 if (l_tmp.offset < r_off)
2516 mas_topiary_range(&l_tmp, mast->destroy,
2517 l_tmp.offset, r_off);
2519 if (r_tmp.node != child)
2520 mat_add(mast->free, child);
2522 } while (l_tmp.node != ancestor);
2524 *mast->orig_r = r_tmp;
2527 } while (!mte_is_root(mast->orig_r->node));
2529 *mast->orig_r = r_tmp;
2530 *mast->orig_l = l_tmp;
2535 * mast_ascend_free() - Add current original maple state nodes to the free list
2537 * @mast: the maple subtree state.
2539 * Ascend the original left and right sides and add the previous nodes to the
2540 * free list. Set the slots to point to the correct location in the new nodes.
2543 mast_ascend_free(struct maple_subtree_state *mast)
2545 MA_WR_STATE(wr_mas, mast->orig_r, NULL);
2546 struct maple_enode *left = mast->orig_l->node;
2547 struct maple_enode *right = mast->orig_r->node;
2549 mas_ascend(mast->orig_l);
2550 mas_ascend(mast->orig_r);
2551 mat_add(mast->free, left);
2554 mat_add(mast->free, right);
2556 mast->orig_r->offset = 0;
2557 mast->orig_r->index = mast->r->max;
2558 /* last should be larger than or equal to index */
2559 if (mast->orig_r->last < mast->orig_r->index)
2560 mast->orig_r->last = mast->orig_r->index;
2562 * The node may not contain the value so set slot to ensure all
2563 * of the nodes contents are freed or destroyed.
2565 wr_mas.type = mte_node_type(mast->orig_r->node);
2566 mas_wr_node_walk(&wr_mas);
2567 /* Set up the left side of things */
2568 mast->orig_l->offset = 0;
2569 mast->orig_l->index = mast->l->min;
2570 wr_mas.mas = mast->orig_l;
2571 wr_mas.type = mte_node_type(mast->orig_l->node);
2572 mas_wr_node_walk(&wr_mas);
2574 mast->bn->type = wr_mas.type;
2578 * mas_new_ma_node() - Create and return a new maple node. Helper function.
2579 * @mas: the maple state with the allocations.
2580 * @b_node: the maple_big_node with the type encoding.
2582 * Use the node type from the maple_big_node to allocate a new node from the
2583 * ma_state. This function exists mainly for code readability.
2585 * Return: A new maple encoded node
2587 static inline struct maple_enode
2588 *mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node)
2590 return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type);
2594 * mas_mab_to_node() - Set up right and middle nodes
2596 * @mas: the maple state that contains the allocations.
2597 * @b_node: the node which contains the data.
2598 * @left: The pointer which will have the left node
2599 * @right: The pointer which may have the right node
2600 * @middle: the pointer which may have the middle node (rare)
2601 * @mid_split: the split location for the middle node
2603 * Return: the split of left.
2605 static inline unsigned char mas_mab_to_node(struct ma_state *mas,
2606 struct maple_big_node *b_node, struct maple_enode **left,
2607 struct maple_enode **right, struct maple_enode **middle,
2608 unsigned char *mid_split, unsigned long min)
2610 unsigned char split = 0;
2611 unsigned char slot_count = mt_slots[b_node->type];
2613 *left = mas_new_ma_node(mas, b_node);
2618 if (b_node->b_end < slot_count) {
2619 split = b_node->b_end;
2621 split = mab_calc_split(mas, b_node, mid_split, min);
2622 *right = mas_new_ma_node(mas, b_node);
2626 *middle = mas_new_ma_node(mas, b_node);
2633 * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end
2635 * @b_node - the big node to add the entry
2636 * @mas - the maple state to get the pivot (mas->max)
2637 * @entry - the entry to add, if NULL nothing happens.
2639 static inline void mab_set_b_end(struct maple_big_node *b_node,
2640 struct ma_state *mas,
2646 b_node->slot[b_node->b_end] = entry;
2647 if (mt_is_alloc(mas->tree))
2648 b_node->gap[b_node->b_end] = mas_max_gap(mas);
2649 b_node->pivot[b_node->b_end++] = mas->max;
2653 * mas_set_split_parent() - combine_then_separate helper function. Sets the parent
2654 * of @mas->node to either @left or @right, depending on @slot and @split
2656 * @mas - the maple state with the node that needs a parent
2657 * @left - possible parent 1
2658 * @right - possible parent 2
2659 * @slot - the slot the mas->node was placed
2660 * @split - the split location between @left and @right
2662 static inline void mas_set_split_parent(struct ma_state *mas,
2663 struct maple_enode *left,
2664 struct maple_enode *right,
2665 unsigned char *slot, unsigned char split)
2667 if (mas_is_none(mas))
2670 if ((*slot) <= split)
2671 mte_set_parent(mas->node, left, *slot);
2673 mte_set_parent(mas->node, right, (*slot) - split - 1);
2679 * mte_mid_split_check() - Check if the next node passes the mid-split
2680 * @**l: Pointer to left encoded maple node.
2681 * @**m: Pointer to middle encoded maple node.
2682 * @**r: Pointer to right encoded maple node.
2684 * @*split: The split location.
2685 * @mid_split: The middle split.
2687 static inline void mte_mid_split_check(struct maple_enode **l,
2688 struct maple_enode **r,
2689 struct maple_enode *right,
2691 unsigned char *split,
2692 unsigned char mid_split)
2697 if (slot < mid_split)
2706 * mast_set_split_parents() - Helper function to set three nodes parents. Slot
2707 * is taken from @mast->l.
2708 * @mast - the maple subtree state
2709 * @left - the left node
2710 * @right - the right node
2711 * @split - the split location.
2713 static inline void mast_set_split_parents(struct maple_subtree_state *mast,
2714 struct maple_enode *left,
2715 struct maple_enode *middle,
2716 struct maple_enode *right,
2717 unsigned char split,
2718 unsigned char mid_split)
2721 struct maple_enode *l = left;
2722 struct maple_enode *r = right;
2724 if (mas_is_none(mast->l))
2730 slot = mast->l->offset;
2732 mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2733 mas_set_split_parent(mast->l, l, r, &slot, split);
2735 mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2736 mas_set_split_parent(mast->m, l, r, &slot, split);
2738 mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2739 mas_set_split_parent(mast->r, l, r, &slot, split);
2743 * mas_wmb_replace() - Write memory barrier and replace
2744 * @mas: The maple state
2745 * @free: the maple topiary list of nodes to free
2746 * @destroy: The maple topiary list of nodes to destroy (walk and free)
2748 * Updates gap as necessary.
2750 static inline void mas_wmb_replace(struct ma_state *mas,
2751 struct ma_topiary *free,
2752 struct ma_topiary *destroy)
2754 /* All nodes must see old data as dead prior to replacing that data */
2755 smp_wmb(); /* Needed for RCU */
2757 /* Insert the new data in the tree */
2758 mas_replace(mas, true);
2760 if (!mte_is_leaf(mas->node))
2761 mas_descend_adopt(mas);
2763 mas_mat_free(mas, free);
2766 mas_mat_destroy(mas, destroy);
2768 if (mte_is_leaf(mas->node))
2771 mas_update_gap(mas);
2775 * mast_new_root() - Set a new tree root during subtree creation
2776 * @mast: The maple subtree state
2777 * @mas: The maple state
2779 static inline void mast_new_root(struct maple_subtree_state *mast,
2780 struct ma_state *mas)
2782 mas_mn(mast->l)->parent =
2783 ma_parent_ptr(((unsigned long)mas->tree | MA_ROOT_PARENT));
2784 if (!mte_dead_node(mast->orig_l->node) &&
2785 !mte_is_root(mast->orig_l->node)) {
2787 mast_ascend_free(mast);
2789 } while (!mte_is_root(mast->orig_l->node));
2791 if ((mast->orig_l->node != mas->node) &&
2792 (mast->l->depth > mas_mt_height(mas))) {
2793 mat_add(mast->free, mas->node);
2798 * mast_cp_to_nodes() - Copy data out to nodes.
2799 * @mast: The maple subtree state
2800 * @left: The left encoded maple node
2801 * @middle: The middle encoded maple node
2802 * @right: The right encoded maple node
2803 * @split: The location to split between left and (middle ? middle : right)
2804 * @mid_split: The location to split between middle and right.
2806 static inline void mast_cp_to_nodes(struct maple_subtree_state *mast,
2807 struct maple_enode *left, struct maple_enode *middle,
2808 struct maple_enode *right, unsigned char split, unsigned char mid_split)
2810 bool new_lmax = true;
2812 mast->l->node = mte_node_or_none(left);
2813 mast->m->node = mte_node_or_none(middle);
2814 mast->r->node = mte_node_or_none(right);
2816 mast->l->min = mast->orig_l->min;
2817 if (split == mast->bn->b_end) {
2818 mast->l->max = mast->orig_r->max;
2822 mab_mas_cp(mast->bn, 0, split, mast->l, new_lmax);
2825 mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true);
2826 mast->m->min = mast->bn->pivot[split] + 1;
2830 mast->r->max = mast->orig_r->max;
2832 mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, false);
2833 mast->r->min = mast->bn->pivot[split] + 1;
2838 * mast_combine_cp_left - Copy in the original left side of the tree into the
2839 * combined data set in the maple subtree state big node.
2840 * @mast: The maple subtree state
2842 static inline void mast_combine_cp_left(struct maple_subtree_state *mast)
2844 unsigned char l_slot = mast->orig_l->offset;
2849 mas_mab_cp(mast->orig_l, 0, l_slot - 1, mast->bn, 0);
2853 * mast_combine_cp_right: Copy in the original right side of the tree into the
2854 * combined data set in the maple subtree state big node.
2855 * @mast: The maple subtree state
2857 static inline void mast_combine_cp_right(struct maple_subtree_state *mast)
2859 if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max)
2862 mas_mab_cp(mast->orig_r, mast->orig_r->offset + 1,
2863 mt_slot_count(mast->orig_r->node), mast->bn,
2865 mast->orig_r->last = mast->orig_r->max;
2869 * mast_sufficient: Check if the maple subtree state has enough data in the big
2870 * node to create at least one sufficient node
2871 * @mast: the maple subtree state
2873 static inline bool mast_sufficient(struct maple_subtree_state *mast)
2875 if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node))
2882 * mast_overflow: Check if there is too much data in the subtree state for a
2884 * @mast: The maple subtree state
2886 static inline bool mast_overflow(struct maple_subtree_state *mast)
2888 if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node))
2894 static inline void *mtree_range_walk(struct ma_state *mas)
2896 unsigned long *pivots;
2897 unsigned char offset;
2898 struct maple_node *node;
2899 struct maple_enode *next, *last;
2900 enum maple_type type;
2903 unsigned long max, min;
2904 unsigned long prev_max, prev_min;
2912 node = mte_to_node(next);
2913 type = mte_node_type(next);
2914 pivots = ma_pivots(node, type);
2915 end = ma_data_end(node, type, pivots, max);
2916 if (unlikely(ma_dead_node(node)))
2919 if (pivots[offset] >= mas->index) {
2922 max = pivots[offset];
2928 } while ((offset < end) && (pivots[offset] < mas->index));
2931 min = pivots[offset - 1] + 1;
2933 if (likely(offset < end && pivots[offset]))
2934 max = pivots[offset];
2937 slots = ma_slots(node, type);
2938 next = mt_slot(mas->tree, slots, offset);
2939 if (unlikely(ma_dead_node(node)))
2941 } while (!ma_is_leaf(type));
2943 mas->offset = offset;
2946 mas->min = prev_min;
2947 mas->max = prev_max;
2949 return (void *) next;
2957 * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers.
2958 * @mas: The starting maple state
2959 * @mast: The maple_subtree_state, keeps track of 4 maple states.
2960 * @count: The estimated count of iterations needed.
2962 * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root
2963 * is hit. First @b_node is split into two entries which are inserted into the
2964 * next iteration of the loop. @b_node is returned populated with the final
2965 * iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the
2966 * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last
2967 * to account of what has been copied into the new sub-tree. The update of
2968 * orig_l_mas->last is used in mas_consume to find the slots that will need to
2969 * be either freed or destroyed. orig_l_mas->depth keeps track of the height of
2970 * the new sub-tree in case the sub-tree becomes the full tree.
2972 * Return: the number of elements in b_node during the last loop.
2974 static int mas_spanning_rebalance(struct ma_state *mas,
2975 struct maple_subtree_state *mast, unsigned char count)
2977 unsigned char split, mid_split;
2978 unsigned char slot = 0;
2979 struct maple_enode *left = NULL, *middle = NULL, *right = NULL;
2981 MA_STATE(l_mas, mas->tree, mas->index, mas->index);
2982 MA_STATE(r_mas, mas->tree, mas->index, mas->last);
2983 MA_STATE(m_mas, mas->tree, mas->index, mas->index);
2984 MA_TOPIARY(free, mas->tree);
2985 MA_TOPIARY(destroy, mas->tree);
2988 * The tree needs to be rebalanced and leaves need to be kept at the same level.
2989 * Rebalancing is done by use of the ``struct maple_topiary``.
2995 mast->destroy = &destroy;
2996 l_mas.node = r_mas.node = m_mas.node = MAS_NONE;
2997 if (!(mast->orig_l->min && mast->orig_r->max == ULONG_MAX) &&
2998 unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type]))
2999 mast_spanning_rebalance(mast);
3001 mast->orig_l->depth = 0;
3004 * Each level of the tree is examined and balanced, pushing data to the left or
3005 * right, or rebalancing against left or right nodes is employed to avoid
3006 * rippling up the tree to limit the amount of churn. Once a new sub-section of
3007 * the tree is created, there may be a mix of new and old nodes. The old nodes
3008 * will have the incorrect parent pointers and currently be in two trees: the
3009 * original tree and the partially new tree. To remedy the parent pointers in
3010 * the old tree, the new data is swapped into the active tree and a walk down
3011 * the tree is performed and the parent pointers are updated.
3012 * See mas_descend_adopt() for more information..
3016 mast->bn->type = mte_node_type(mast->orig_l->node);
3017 split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle,
3018 &mid_split, mast->orig_l->min);
3019 mast_set_split_parents(mast, left, middle, right, split,
3021 mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
3024 * Copy data from next level in the tree to mast->bn from next
3027 memset(mast->bn, 0, sizeof(struct maple_big_node));
3028 mast->bn->type = mte_node_type(left);
3029 mast->orig_l->depth++;
3031 /* Root already stored in l->node. */
3032 if (mas_is_root_limits(mast->l))
3035 mast_ascend_free(mast);
3036 mast_combine_cp_left(mast);
3037 l_mas.offset = mast->bn->b_end;
3038 mab_set_b_end(mast->bn, &l_mas, left);
3039 mab_set_b_end(mast->bn, &m_mas, middle);
3040 mab_set_b_end(mast->bn, &r_mas, right);
3042 /* Copy anything necessary out of the right node. */
3043 mast_combine_cp_right(mast);
3045 mast->orig_l->last = mast->orig_l->max;
3047 if (mast_sufficient(mast))
3050 if (mast_overflow(mast))
3053 /* May be a new root stored in mast->bn */
3054 if (mas_is_root_limits(mast->orig_l))
3057 mast_spanning_rebalance(mast);
3059 /* rebalancing from other nodes may require another loop. */
3064 l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
3065 mte_node_type(mast->orig_l->node));
3066 mast->orig_l->depth++;
3067 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true);
3068 mte_set_parent(left, l_mas.node, slot);
3070 mte_set_parent(middle, l_mas.node, ++slot);
3073 mte_set_parent(right, l_mas.node, ++slot);
3075 if (mas_is_root_limits(mast->l)) {
3077 mast_new_root(mast, mas);
3079 mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent;
3082 if (!mte_dead_node(mast->orig_l->node))
3083 mat_add(&free, mast->orig_l->node);
3085 mas->depth = mast->orig_l->depth;
3086 *mast->orig_l = l_mas;
3087 mte_set_node_dead(mas->node);
3089 /* Set up mas for insertion. */
3090 mast->orig_l->depth = mas->depth;
3091 mast->orig_l->alloc = mas->alloc;
3092 *mas = *mast->orig_l;
3093 mas_wmb_replace(mas, &free, &destroy);
3094 mtree_range_walk(mas);
3095 return mast->bn->b_end;
3099 * mas_rebalance() - Rebalance a given node.
3100 * @mas: The maple state
3101 * @b_node: The big maple node.
3103 * Rebalance two nodes into a single node or two new nodes that are sufficient.
3104 * Continue upwards until tree is sufficient.
3106 * Return: the number of elements in b_node during the last loop.
3108 static inline int mas_rebalance(struct ma_state *mas,
3109 struct maple_big_node *b_node)
3111 char empty_count = mas_mt_height(mas);
3112 struct maple_subtree_state mast;
3113 unsigned char shift, b_end = ++b_node->b_end;
3115 MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3116 MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3118 trace_ma_op(__func__, mas);
3121 * Rebalancing occurs if a node is insufficient. Data is rebalanced
3122 * against the node to the right if it exists, otherwise the node to the
3123 * left of this node is rebalanced against this node. If rebalancing
3124 * causes just one node to be produced instead of two, then the parent
3125 * is also examined and rebalanced if it is insufficient. Every level
3126 * tries to combine the data in the same way. If one node contains the
3127 * entire range of the tree, then that node is used as a new root node.
3129 mas_node_count(mas, 1 + empty_count * 3);
3130 if (mas_is_err(mas))
3133 mast.orig_l = &l_mas;
3134 mast.orig_r = &r_mas;
3136 mast.bn->type = mte_node_type(mas->node);
3138 l_mas = r_mas = *mas;
3140 if (mas_next_sibling(&r_mas)) {
3141 mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end);
3142 r_mas.last = r_mas.index = r_mas.max;
3144 mas_prev_sibling(&l_mas);
3145 shift = mas_data_end(&l_mas) + 1;
3146 mab_shift_right(b_node, shift);
3147 mas->offset += shift;
3148 mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0);
3149 b_node->b_end = shift + b_end;
3150 l_mas.index = l_mas.last = l_mas.min;
3153 return mas_spanning_rebalance(mas, &mast, empty_count);
3157 * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple
3159 * @mas: The maple state
3160 * @end: The end of the left-most node.
3162 * During a mass-insert event (such as forking), it may be necessary to
3163 * rebalance the left-most node when it is not sufficient.
3165 static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end)
3167 enum maple_type mt = mte_node_type(mas->node);
3168 struct maple_node reuse, *newnode, *parent, *new_left, *left, *node;
3169 struct maple_enode *eparent;
3170 unsigned char offset, tmp, split = mt_slots[mt] / 2;
3171 void __rcu **l_slots, **slots;
3172 unsigned long *l_pivs, *pivs, gap;
3173 bool in_rcu = mt_in_rcu(mas->tree);
3175 MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3178 mas_prev_sibling(&l_mas);
3182 /* Allocate for both left and right as well as parent. */
3183 mas_node_count(mas, 3);
3184 if (mas_is_err(mas))
3187 newnode = mas_pop_node(mas);
3193 newnode->parent = node->parent;
3194 slots = ma_slots(newnode, mt);
3195 pivs = ma_pivots(newnode, mt);
3196 left = mas_mn(&l_mas);
3197 l_slots = ma_slots(left, mt);
3198 l_pivs = ma_pivots(left, mt);
3199 if (!l_slots[split])
3201 tmp = mas_data_end(&l_mas) - split;
3203 memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp);
3204 memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp);
3205 pivs[tmp] = l_mas.max;
3206 memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end);
3207 memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end);
3209 l_mas.max = l_pivs[split];
3210 mas->min = l_mas.max + 1;
3211 eparent = mt_mk_node(mte_parent(l_mas.node),
3212 mas_parent_enum(&l_mas, l_mas.node));
3215 unsigned char max_p = mt_pivots[mt];
3216 unsigned char max_s = mt_slots[mt];
3219 memset(pivs + tmp, 0,
3220 sizeof(unsigned long *) * (max_p - tmp));
3222 if (tmp < mt_slots[mt])
3223 memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3225 memcpy(node, newnode, sizeof(struct maple_node));
3226 ma_set_meta(node, mt, 0, tmp - 1);
3227 mte_set_pivot(eparent, mte_parent_slot(l_mas.node),
3230 /* Remove data from l_pivs. */
3232 memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp));
3233 memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3234 ma_set_meta(left, mt, 0, split);
3239 /* RCU requires replacing both l_mas, mas, and parent. */
3240 mas->node = mt_mk_node(newnode, mt);
3241 ma_set_meta(newnode, mt, 0, tmp);
3243 new_left = mas_pop_node(mas);
3244 new_left->parent = left->parent;
3245 mt = mte_node_type(l_mas.node);
3246 slots = ma_slots(new_left, mt);
3247 pivs = ma_pivots(new_left, mt);
3248 memcpy(slots, l_slots, sizeof(void *) * split);
3249 memcpy(pivs, l_pivs, sizeof(unsigned long) * split);
3250 ma_set_meta(new_left, mt, 0, split);
3251 l_mas.node = mt_mk_node(new_left, mt);
3253 /* replace parent. */
3254 offset = mte_parent_slot(mas->node);
3255 mt = mas_parent_enum(&l_mas, l_mas.node);
3256 parent = mas_pop_node(mas);
3257 slots = ma_slots(parent, mt);
3258 pivs = ma_pivots(parent, mt);
3259 memcpy(parent, mte_to_node(eparent), sizeof(struct maple_node));
3260 rcu_assign_pointer(slots[offset], mas->node);
3261 rcu_assign_pointer(slots[offset - 1], l_mas.node);
3262 pivs[offset - 1] = l_mas.max;
3263 eparent = mt_mk_node(parent, mt);
3265 gap = mas_leaf_max_gap(mas);
3266 mte_set_gap(eparent, mte_parent_slot(mas->node), gap);
3267 gap = mas_leaf_max_gap(&l_mas);
3268 mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap);
3272 mas_replace(mas, false);
3274 mas_update_gap(mas);
3278 * mas_split_final_node() - Split the final node in a subtree operation.
3279 * @mast: the maple subtree state
3280 * @mas: The maple state
3281 * @height: The height of the tree in case it's a new root.
3283 static inline bool mas_split_final_node(struct maple_subtree_state *mast,
3284 struct ma_state *mas, int height)
3286 struct maple_enode *ancestor;
3288 if (mte_is_root(mas->node)) {
3289 if (mt_is_alloc(mas->tree))
3290 mast->bn->type = maple_arange_64;
3292 mast->bn->type = maple_range_64;
3293 mas->depth = height;
3296 * Only a single node is used here, could be root.
3297 * The Big_node data should just fit in a single node.
3299 ancestor = mas_new_ma_node(mas, mast->bn);
3300 mte_set_parent(mast->l->node, ancestor, mast->l->offset);
3301 mte_set_parent(mast->r->node, ancestor, mast->r->offset);
3302 mte_to_node(ancestor)->parent = mas_mn(mas)->parent;
3304 mast->l->node = ancestor;
3305 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true);
3306 mas->offset = mast->bn->b_end - 1;
3311 * mast_fill_bnode() - Copy data into the big node in the subtree state
3312 * @mast: The maple subtree state
3313 * @mas: the maple state
3314 * @skip: The number of entries to skip for new nodes insertion.
3316 static inline void mast_fill_bnode(struct maple_subtree_state *mast,
3317 struct ma_state *mas,
3321 struct maple_enode *old = mas->node;
3322 unsigned char split;
3324 memset(mast->bn->gap, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->gap));
3325 memset(mast->bn->slot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->slot));
3326 memset(mast->bn->pivot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->pivot));
3327 mast->bn->b_end = 0;
3329 if (mte_is_root(mas->node)) {
3333 mat_add(mast->free, old);
3334 mas->offset = mte_parent_slot(mas->node);
3337 if (cp && mast->l->offset)
3338 mas_mab_cp(mas, 0, mast->l->offset - 1, mast->bn, 0);
3340 split = mast->bn->b_end;
3341 mab_set_b_end(mast->bn, mast->l, mast->l->node);
3342 mast->r->offset = mast->bn->b_end;
3343 mab_set_b_end(mast->bn, mast->r, mast->r->node);
3344 if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max)
3348 mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1,
3349 mast->bn, mast->bn->b_end);
3352 mast->bn->type = mte_node_type(mas->node);
3356 * mast_split_data() - Split the data in the subtree state big node into regular
3358 * @mast: The maple subtree state
3359 * @mas: The maple state
3360 * @split: The location to split the big node
3362 static inline void mast_split_data(struct maple_subtree_state *mast,
3363 struct ma_state *mas, unsigned char split)
3365 unsigned char p_slot;
3367 mab_mas_cp(mast->bn, 0, split, mast->l, true);
3368 mte_set_pivot(mast->r->node, 0, mast->r->max);
3369 mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, false);
3370 mast->l->offset = mte_parent_slot(mas->node);
3371 mast->l->max = mast->bn->pivot[split];
3372 mast->r->min = mast->l->max + 1;
3373 if (mte_is_leaf(mas->node))
3376 p_slot = mast->orig_l->offset;
3377 mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node,
3379 mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node,
3384 * mas_push_data() - Instead of splitting a node, it is beneficial to push the
3385 * data to the right or left node if there is room.
3386 * @mas: The maple state
3387 * @height: The current height of the maple state
3388 * @mast: The maple subtree state
3389 * @left: Push left or not.
3391 * Keeping the height of the tree low means faster lookups.
3393 * Return: True if pushed, false otherwise.
3395 static inline bool mas_push_data(struct ma_state *mas, int height,
3396 struct maple_subtree_state *mast, bool left)
3398 unsigned char slot_total = mast->bn->b_end;
3399 unsigned char end, space, split;
3401 MA_STATE(tmp_mas, mas->tree, mas->index, mas->last);
3403 tmp_mas.depth = mast->l->depth;
3405 if (left && !mas_prev_sibling(&tmp_mas))
3407 else if (!left && !mas_next_sibling(&tmp_mas))
3410 end = mas_data_end(&tmp_mas);
3412 space = 2 * mt_slot_count(mas->node) - 2;
3413 /* -2 instead of -1 to ensure there isn't a triple split */
3414 if (ma_is_leaf(mast->bn->type))
3417 if (mas->max == ULONG_MAX)
3420 if (slot_total >= space)
3423 /* Get the data; Fill mast->bn */
3426 mab_shift_right(mast->bn, end + 1);
3427 mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0);
3428 mast->bn->b_end = slot_total + 1;
3430 mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end);
3433 /* Configure mast for splitting of mast->bn */
3434 split = mt_slots[mast->bn->type] - 2;
3436 /* Switch mas to prev node */
3437 mat_add(mast->free, mas->node);
3439 /* Start using mast->l for the left side. */
3440 tmp_mas.node = mast->l->node;
3443 mat_add(mast->free, tmp_mas.node);
3444 tmp_mas.node = mast->r->node;
3446 split = slot_total - split;
3448 split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]);
3449 /* Update parent slot for split calculation. */
3451 mast->orig_l->offset += end + 1;
3453 mast_split_data(mast, mas, split);
3454 mast_fill_bnode(mast, mas, 2);
3455 mas_split_final_node(mast, mas, height + 1);
3460 * mas_split() - Split data that is too big for one node into two.
3461 * @mas: The maple state
3462 * @b_node: The maple big node
3463 * Return: 1 on success, 0 on failure.
3465 static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
3468 struct maple_subtree_state mast;
3470 unsigned char mid_split, split = 0;
3473 * Splitting is handled differently from any other B-tree; the Maple
3474 * Tree splits upwards. Splitting up means that the split operation
3475 * occurs when the walk of the tree hits the leaves and not on the way
3476 * down. The reason for splitting up is that it is impossible to know
3477 * how much space will be needed until the leaf is (or leaves are)
3478 * reached. Since overwriting data is allowed and a range could
3479 * overwrite more than one range or result in changing one entry into 3
3480 * entries, it is impossible to know if a split is required until the
3483 * Splitting is a balancing act between keeping allocations to a minimum
3484 * and avoiding a 'jitter' event where a tree is expanded to make room
3485 * for an entry followed by a contraction when the entry is removed. To
3486 * accomplish the balance, there are empty slots remaining in both left
3487 * and right nodes after a split.
3489 MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3490 MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3491 MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last);
3492 MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last);
3493 MA_TOPIARY(mat, mas->tree);
3495 trace_ma_op(__func__, mas);
3496 mas->depth = mas_mt_height(mas);
3497 /* Allocation failures will happen early. */
3498 mas_node_count(mas, 1 + mas->depth * 2);
3499 if (mas_is_err(mas))
3504 mast.orig_l = &prev_l_mas;
3505 mast.orig_r = &prev_r_mas;
3509 while (height++ <= mas->depth) {
3510 if (mt_slots[b_node->type] > b_node->b_end) {
3511 mas_split_final_node(&mast, mas, height);
3515 l_mas = r_mas = *mas;
3516 l_mas.node = mas_new_ma_node(mas, b_node);
3517 r_mas.node = mas_new_ma_node(mas, b_node);
3519 * Another way that 'jitter' is avoided is to terminate a split up early if the
3520 * left or right node has space to spare. This is referred to as "pushing left"
3521 * or "pushing right" and is similar to the B* tree, except the nodes left or
3522 * right can rarely be reused due to RCU, but the ripple upwards is halted which
3523 * is a significant savings.
3525 /* Try to push left. */
3526 if (mas_push_data(mas, height, &mast, true))
3529 /* Try to push right. */
3530 if (mas_push_data(mas, height, &mast, false))
3533 split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min);
3534 mast_split_data(&mast, mas, split);
3536 * Usually correct, mab_mas_cp in the above call overwrites
3539 mast.r->max = mas->max;
3540 mast_fill_bnode(&mast, mas, 1);
3541 prev_l_mas = *mast.l;
3542 prev_r_mas = *mast.r;
3545 /* Set the original node as dead */
3546 mat_add(mast.free, mas->node);
3547 mas->node = l_mas.node;
3548 mas_wmb_replace(mas, mast.free, NULL);
3549 mtree_range_walk(mas);
3554 * mas_reuse_node() - Reuse the node to store the data.
3555 * @wr_mas: The maple write state
3556 * @bn: The maple big node
3557 * @end: The end of the data.
3559 * Will always return false in RCU mode.
3561 * Return: True if node was reused, false otherwise.
3563 static inline bool mas_reuse_node(struct ma_wr_state *wr_mas,
3564 struct maple_big_node *bn, unsigned char end)
3566 /* Need to be rcu safe. */
3567 if (mt_in_rcu(wr_mas->mas->tree))
3570 if (end > bn->b_end) {
3571 int clear = mt_slots[wr_mas->type] - bn->b_end;
3573 memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--);
3574 memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear);
3576 mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false);
3581 * mas_commit_b_node() - Commit the big node into the tree.
3582 * @wr_mas: The maple write state
3583 * @b_node: The maple big node
3584 * @end: The end of the data.
3586 static inline int mas_commit_b_node(struct ma_wr_state *wr_mas,
3587 struct maple_big_node *b_node, unsigned char end)
3589 struct maple_node *node;
3590 unsigned char b_end = b_node->b_end;
3591 enum maple_type b_type = b_node->type;
3593 if ((b_end < mt_min_slots[b_type]) &&
3594 (!mte_is_root(wr_mas->mas->node)) &&
3595 (mas_mt_height(wr_mas->mas) > 1))
3596 return mas_rebalance(wr_mas->mas, b_node);
3598 if (b_end >= mt_slots[b_type])
3599 return mas_split(wr_mas->mas, b_node);
3601 if (mas_reuse_node(wr_mas, b_node, end))
3604 mas_node_count(wr_mas->mas, 1);
3605 if (mas_is_err(wr_mas->mas))
3608 node = mas_pop_node(wr_mas->mas);
3609 node->parent = mas_mn(wr_mas->mas)->parent;
3610 wr_mas->mas->node = mt_mk_node(node, b_type);
3611 mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false);
3612 mas_replace(wr_mas->mas, false);
3614 mas_update_gap(wr_mas->mas);
3619 * mas_root_expand() - Expand a root to a node
3620 * @mas: The maple state
3621 * @entry: The entry to store into the tree
3623 static inline int mas_root_expand(struct ma_state *mas, void *entry)
3625 void *contents = mas_root_locked(mas);
3626 enum maple_type type = maple_leaf_64;
3627 struct maple_node *node;
3629 unsigned long *pivots;
3632 mas_node_count(mas, 1);
3633 if (unlikely(mas_is_err(mas)))
3636 node = mas_pop_node(mas);
3637 pivots = ma_pivots(node, type);
3638 slots = ma_slots(node, type);
3639 node->parent = ma_parent_ptr(
3640 ((unsigned long)mas->tree | MA_ROOT_PARENT));
3641 mas->node = mt_mk_node(node, type);
3645 rcu_assign_pointer(slots[slot], contents);
3646 if (likely(mas->index > 1))
3649 pivots[slot++] = mas->index - 1;
3652 rcu_assign_pointer(slots[slot], entry);
3654 pivots[slot] = mas->last;
3655 if (mas->last != ULONG_MAX)
3658 mas_set_height(mas);
3660 /* swap the new root into the tree */
3661 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
3662 ma_set_meta(node, maple_leaf_64, 0, slot);
3666 static inline void mas_store_root(struct ma_state *mas, void *entry)
3668 if (likely((mas->last != 0) || (mas->index != 0)))
3669 mas_root_expand(mas, entry);
3670 else if (((unsigned long) (entry) & 3) == 2)
3671 mas_root_expand(mas, entry);
3673 rcu_assign_pointer(mas->tree->ma_root, entry);
3674 mas->node = MAS_START;
3679 * mas_is_span_wr() - Check if the write needs to be treated as a write that
3681 * @mas: The maple state
3682 * @piv: The pivot value being written
3683 * @type: The maple node type
3684 * @entry: The data to write
3686 * Spanning writes are writes that start in one node and end in another OR if
3687 * the write of a %NULL will cause the node to end with a %NULL.
3689 * Return: True if this is a spanning write, false otherwise.
3691 static bool mas_is_span_wr(struct ma_wr_state *wr_mas)
3694 unsigned long last = wr_mas->mas->last;
3695 unsigned long piv = wr_mas->r_max;
3696 enum maple_type type = wr_mas->type;
3697 void *entry = wr_mas->entry;
3699 /* Contained in this pivot */
3703 max = wr_mas->mas->max;
3704 if (unlikely(ma_is_leaf(type))) {
3705 /* Fits in the node, but may span slots. */
3709 /* Writes to the end of the node but not null. */
3710 if ((last == max) && entry)
3714 * Writing ULONG_MAX is not a spanning write regardless of the
3715 * value being written as long as the range fits in the node.
3717 if ((last == ULONG_MAX) && (last == max))
3719 } else if (piv == last) {
3723 /* Detect spanning store wr walk */
3724 if (last == ULONG_MAX)
3728 trace_ma_write(__func__, wr_mas->mas, piv, entry);
3733 static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas)
3735 wr_mas->type = mte_node_type(wr_mas->mas->node);
3736 mas_wr_node_walk(wr_mas);
3737 wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type);
3740 static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas)
3742 wr_mas->mas->max = wr_mas->r_max;
3743 wr_mas->mas->min = wr_mas->r_min;
3744 wr_mas->mas->node = wr_mas->content;
3745 wr_mas->mas->offset = 0;
3746 wr_mas->mas->depth++;
3749 * mas_wr_walk() - Walk the tree for a write.
3750 * @wr_mas: The maple write state
3752 * Uses mas_slot_locked() and does not need to worry about dead nodes.
3754 * Return: True if it's contained in a node, false on spanning write.
3756 static bool mas_wr_walk(struct ma_wr_state *wr_mas)
3758 struct ma_state *mas = wr_mas->mas;
3761 mas_wr_walk_descend(wr_mas);
3762 if (unlikely(mas_is_span_wr(wr_mas)))
3765 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3767 if (ma_is_leaf(wr_mas->type))
3770 mas_wr_walk_traverse(wr_mas);
3776 static bool mas_wr_walk_index(struct ma_wr_state *wr_mas)
3778 struct ma_state *mas = wr_mas->mas;
3781 mas_wr_walk_descend(wr_mas);
3782 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3784 if (ma_is_leaf(wr_mas->type))
3786 mas_wr_walk_traverse(wr_mas);
3792 * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
3793 * @l_wr_mas: The left maple write state
3794 * @r_wr_mas: The right maple write state
3796 static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas,
3797 struct ma_wr_state *r_wr_mas)
3799 struct ma_state *r_mas = r_wr_mas->mas;
3800 struct ma_state *l_mas = l_wr_mas->mas;
3801 unsigned char l_slot;
3803 l_slot = l_mas->offset;
3804 if (!l_wr_mas->content)
3805 l_mas->index = l_wr_mas->r_min;
3807 if ((l_mas->index == l_wr_mas->r_min) &&
3809 !mas_slot_locked(l_mas, l_wr_mas->slots, l_slot - 1))) {
3811 l_mas->index = l_wr_mas->pivots[l_slot - 2] + 1;
3813 l_mas->index = l_mas->min;
3815 l_mas->offset = l_slot - 1;
3818 if (!r_wr_mas->content) {
3819 if (r_mas->last < r_wr_mas->r_max)
3820 r_mas->last = r_wr_mas->r_max;
3822 } else if ((r_mas->last == r_wr_mas->r_max) &&
3823 (r_mas->last < r_mas->max) &&
3824 !mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) {
3825 r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots,
3826 r_wr_mas->type, r_mas->offset + 1);
3831 static inline void *mas_state_walk(struct ma_state *mas)
3835 entry = mas_start(mas);
3836 if (mas_is_none(mas))
3839 if (mas_is_ptr(mas))
3842 return mtree_range_walk(mas);
3846 * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up
3849 * @mas: The maple state.
3851 * Note: Leaves mas in undesirable state.
3852 * Return: The entry for @mas->index or %NULL on dead node.
3854 static inline void *mtree_lookup_walk(struct ma_state *mas)
3856 unsigned long *pivots;
3857 unsigned char offset;
3858 struct maple_node *node;
3859 struct maple_enode *next;
3860 enum maple_type type;
3869 node = mte_to_node(next);
3870 type = mte_node_type(next);
3871 pivots = ma_pivots(node, type);
3872 end = ma_data_end(node, type, pivots, max);
3873 if (unlikely(ma_dead_node(node)))
3876 if (pivots[offset] >= mas->index)
3881 } while ((offset < end) && (pivots[offset] < mas->index));
3883 if (likely(offset > end))
3884 max = pivots[offset];
3887 slots = ma_slots(node, type);
3888 next = mt_slot(mas->tree, slots, offset);
3889 if (unlikely(ma_dead_node(node)))
3891 } while (!ma_is_leaf(type));
3893 return (void *) next;
3901 * mas_new_root() - Create a new root node that only contains the entry passed
3903 * @mas: The maple state
3904 * @entry: The entry to store.
3906 * Only valid when the index == 0 and the last == ULONG_MAX
3908 * Return 0 on error, 1 on success.
3910 static inline int mas_new_root(struct ma_state *mas, void *entry)
3912 struct maple_enode *root = mas_root_locked(mas);
3913 enum maple_type type = maple_leaf_64;
3914 struct maple_node *node;
3916 unsigned long *pivots;
3918 if (!entry && !mas->index && mas->last == ULONG_MAX) {
3920 mas_set_height(mas);
3921 rcu_assign_pointer(mas->tree->ma_root, entry);
3922 mas->node = MAS_START;
3926 mas_node_count(mas, 1);
3927 if (mas_is_err(mas))
3930 node = mas_pop_node(mas);
3931 pivots = ma_pivots(node, type);
3932 slots = ma_slots(node, type);
3933 node->parent = ma_parent_ptr(
3934 ((unsigned long)mas->tree | MA_ROOT_PARENT));
3935 mas->node = mt_mk_node(node, type);
3936 rcu_assign_pointer(slots[0], entry);
3937 pivots[0] = mas->last;
3939 mas_set_height(mas);
3940 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
3943 if (xa_is_node(root))
3944 mte_destroy_walk(root, mas->tree);
3949 * mas_wr_spanning_store() - Create a subtree with the store operation completed
3950 * and new nodes where necessary, then place the sub-tree in the actual tree.
3951 * Note that mas is expected to point to the node which caused the store to
3953 * @wr_mas: The maple write state
3955 * Return: 0 on error, positive on success.
3957 static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
3959 struct maple_subtree_state mast;
3960 struct maple_big_node b_node;
3961 struct ma_state *mas;
3962 unsigned char height;
3964 /* Left and Right side of spanning store */
3965 MA_STATE(l_mas, NULL, 0, 0);
3966 MA_STATE(r_mas, NULL, 0, 0);
3968 MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry);
3969 MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry);
3972 * A store operation that spans multiple nodes is called a spanning
3973 * store and is handled early in the store call stack by the function
3974 * mas_is_span_wr(). When a spanning store is identified, the maple
3975 * state is duplicated. The first maple state walks the left tree path
3976 * to ``index``, the duplicate walks the right tree path to ``last``.
3977 * The data in the two nodes are combined into a single node, two nodes,
3978 * or possibly three nodes (see the 3-way split above). A ``NULL``
3979 * written to the last entry of a node is considered a spanning store as
3980 * a rebalance is required for the operation to complete and an overflow
3981 * of data may happen.
3984 trace_ma_op(__func__, mas);
3986 if (unlikely(!mas->index && mas->last == ULONG_MAX))
3987 return mas_new_root(mas, wr_mas->entry);
3989 * Node rebalancing may occur due to this store, so there may be three new
3990 * entries per level plus a new root.
3992 height = mas_mt_height(mas);
3993 mas_node_count(mas, 1 + height * 3);
3994 if (mas_is_err(mas))
3998 * Set up right side. Need to get to the next offset after the spanning
3999 * store to ensure it's not NULL and to combine both the next node and
4000 * the node with the start together.
4003 /* Avoid overflow, walk to next slot in the tree. */
4007 r_mas.index = r_mas.last;
4008 mas_wr_walk_index(&r_wr_mas);
4009 r_mas.last = r_mas.index = mas->last;
4011 /* Set up left side. */
4013 mas_wr_walk_index(&l_wr_mas);
4015 if (!wr_mas->entry) {
4016 mas_extend_spanning_null(&l_wr_mas, &r_wr_mas);
4017 mas->offset = l_mas.offset;
4018 mas->index = l_mas.index;
4019 mas->last = l_mas.last = r_mas.last;
4022 /* expanding NULLs may make this cover the entire range */
4023 if (!l_mas.index && r_mas.last == ULONG_MAX) {
4024 mas_set_range(mas, 0, ULONG_MAX);
4025 return mas_new_root(mas, wr_mas->entry);
4028 memset(&b_node, 0, sizeof(struct maple_big_node));
4029 /* Copy l_mas and store the value in b_node. */
4030 mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end);
4031 /* Copy r_mas into b_node. */
4032 if (r_mas.offset <= r_wr_mas.node_end)
4033 mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end,
4034 &b_node, b_node.b_end + 1);
4038 /* Stop spanning searches by searching for just index. */
4039 l_mas.index = l_mas.last = mas->index;
4042 mast.orig_l = &l_mas;
4043 mast.orig_r = &r_mas;
4044 /* Combine l_mas and r_mas and split them up evenly again. */
4045 return mas_spanning_rebalance(mas, &mast, height + 1);
4049 * mas_wr_node_store() - Attempt to store the value in a node
4050 * @wr_mas: The maple write state
4052 * Attempts to reuse the node, but may allocate.
4054 * Return: True if stored, false otherwise
4056 static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas)
4058 struct ma_state *mas = wr_mas->mas;
4059 void __rcu **dst_slots;
4060 unsigned long *dst_pivots;
4061 unsigned char dst_offset;
4062 unsigned char new_end = wr_mas->node_end;
4063 unsigned char offset;
4064 unsigned char node_slots = mt_slots[wr_mas->type];
4065 struct maple_node reuse, *newnode;
4066 unsigned char copy_size, max_piv = mt_pivots[wr_mas->type];
4067 bool in_rcu = mt_in_rcu(mas->tree);
4069 offset = mas->offset;
4070 if (mas->last == wr_mas->r_max) {
4071 /* runs right to the end of the node */
4072 if (mas->last == mas->max)
4074 /* don't copy this offset */
4075 wr_mas->offset_end++;
4076 } else if (mas->last < wr_mas->r_max) {
4077 /* new range ends in this range */
4078 if (unlikely(wr_mas->r_max == ULONG_MAX))
4079 mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type);
4083 if (wr_mas->end_piv == mas->last)
4084 wr_mas->offset_end++;
4086 new_end -= wr_mas->offset_end - offset - 1;
4089 /* new range starts within a range */
4090 if (wr_mas->r_min < mas->index)
4093 /* Not enough room */
4094 if (new_end >= node_slots)
4097 /* Not enough data. */
4098 if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) &&
4099 !(mas->mas_flags & MA_STATE_BULK))
4104 mas_node_count(mas, 1);
4105 if (mas_is_err(mas))
4108 newnode = mas_pop_node(mas);
4110 memset(&reuse, 0, sizeof(struct maple_node));
4114 newnode->parent = mas_mn(mas)->parent;
4115 dst_pivots = ma_pivots(newnode, wr_mas->type);
4116 dst_slots = ma_slots(newnode, wr_mas->type);
4117 /* Copy from start to insert point */
4118 memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * (offset + 1));
4119 memcpy(dst_slots, wr_mas->slots, sizeof(void *) * (offset + 1));
4120 dst_offset = offset;
4122 /* Handle insert of new range starting after old range */
4123 if (wr_mas->r_min < mas->index) {
4125 rcu_assign_pointer(dst_slots[dst_offset], wr_mas->content);
4126 dst_pivots[dst_offset++] = mas->index - 1;
4129 /* Store the new entry and range end. */
4130 if (dst_offset < max_piv)
4131 dst_pivots[dst_offset] = mas->last;
4132 mas->offset = dst_offset;
4133 rcu_assign_pointer(dst_slots[dst_offset], wr_mas->entry);
4136 * this range wrote to the end of the node or it overwrote the rest of
4139 if (wr_mas->offset_end > wr_mas->node_end || mas->last >= mas->max) {
4140 new_end = dst_offset;
4145 /* Copy to the end of node if necessary. */
4146 copy_size = wr_mas->node_end - wr_mas->offset_end + 1;
4147 memcpy(dst_slots + dst_offset, wr_mas->slots + wr_mas->offset_end,
4148 sizeof(void *) * copy_size);
4149 if (dst_offset < max_piv) {
4150 if (copy_size > max_piv - dst_offset)
4151 copy_size = max_piv - dst_offset;
4153 memcpy(dst_pivots + dst_offset,
4154 wr_mas->pivots + wr_mas->offset_end,
4155 sizeof(unsigned long) * copy_size);
4158 if ((wr_mas->node_end == node_slots - 1) && (new_end < node_slots - 1))
4159 dst_pivots[new_end] = mas->max;
4162 mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end);
4164 mas->node = mt_mk_node(newnode, wr_mas->type);
4165 mas_replace(mas, false);
4167 memcpy(wr_mas->node, newnode, sizeof(struct maple_node));
4169 trace_ma_write(__func__, mas, 0, wr_mas->entry);
4170 mas_update_gap(mas);
4175 * mas_wr_slot_store: Attempt to store a value in a slot.
4176 * @wr_mas: the maple write state
4178 * Return: True if stored, false otherwise
4180 static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
4182 struct ma_state *mas = wr_mas->mas;
4183 unsigned long lmax; /* Logical max. */
4184 unsigned char offset = mas->offset;
4186 if ((wr_mas->r_max > mas->last) && ((wr_mas->r_min != mas->index) ||
4187 (offset != wr_mas->node_end)))
4190 if (offset == wr_mas->node_end - 1)
4193 lmax = wr_mas->pivots[offset + 1];
4195 /* going to overwrite too many slots. */
4196 if (lmax < mas->last)
4199 if (wr_mas->r_min == mas->index) {
4200 /* overwriting two or more ranges with one. */
4201 if (lmax == mas->last)
4204 /* Overwriting all of offset and a portion of offset + 1. */
4205 rcu_assign_pointer(wr_mas->slots[offset], wr_mas->entry);
4206 wr_mas->pivots[offset] = mas->last;
4210 /* Doesn't end on the next range end. */
4211 if (lmax != mas->last)
4214 /* Overwriting a portion of offset and all of offset + 1 */
4215 if ((offset + 1 < mt_pivots[wr_mas->type]) &&
4216 (wr_mas->entry || wr_mas->pivots[offset + 1]))
4217 wr_mas->pivots[offset + 1] = mas->last;
4219 rcu_assign_pointer(wr_mas->slots[offset + 1], wr_mas->entry);
4220 wr_mas->pivots[offset] = mas->index - 1;
4221 mas->offset++; /* Keep mas accurate. */
4224 trace_ma_write(__func__, mas, 0, wr_mas->entry);
4225 mas_update_gap(mas);
4229 static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
4231 while ((wr_mas->mas->last > wr_mas->end_piv) &&
4232 (wr_mas->offset_end < wr_mas->node_end))
4233 wr_mas->end_piv = wr_mas->pivots[++wr_mas->offset_end];
4235 if (wr_mas->mas->last > wr_mas->end_piv)
4236 wr_mas->end_piv = wr_mas->mas->max;
4239 static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
4241 struct ma_state *mas = wr_mas->mas;
4243 if (mas->last < wr_mas->end_piv && !wr_mas->slots[wr_mas->offset_end])
4244 mas->last = wr_mas->end_piv;
4246 /* Check next slot(s) if we are overwriting the end */
4247 if ((mas->last == wr_mas->end_piv) &&
4248 (wr_mas->node_end != wr_mas->offset_end) &&
4249 !wr_mas->slots[wr_mas->offset_end + 1]) {
4250 wr_mas->offset_end++;
4251 if (wr_mas->offset_end == wr_mas->node_end)
4252 mas->last = mas->max;
4254 mas->last = wr_mas->pivots[wr_mas->offset_end];
4255 wr_mas->end_piv = mas->last;
4258 if (!wr_mas->content) {
4259 /* If this one is null, the next and prev are not */
4260 mas->index = wr_mas->r_min;
4262 /* Check prev slot if we are overwriting the start */
4263 if (mas->index == wr_mas->r_min && mas->offset &&
4264 !wr_mas->slots[mas->offset - 1]) {
4266 wr_mas->r_min = mas->index =
4267 mas_safe_min(mas, wr_mas->pivots, mas->offset);
4268 wr_mas->r_max = wr_mas->pivots[mas->offset];
4273 static inline bool mas_wr_append(struct ma_wr_state *wr_mas)
4275 unsigned char end = wr_mas->node_end;
4276 unsigned char new_end = end + 1;
4277 struct ma_state *mas = wr_mas->mas;
4278 unsigned char node_pivots = mt_pivots[wr_mas->type];
4280 if ((mas->index != wr_mas->r_min) && (mas->last == wr_mas->r_max)) {
4281 if (new_end < node_pivots)
4282 wr_mas->pivots[new_end] = wr_mas->pivots[end];
4284 if (new_end < node_pivots)
4285 ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end);
4287 rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->entry);
4288 mas->offset = new_end;
4289 wr_mas->pivots[end] = mas->index - 1;
4294 if ((mas->index == wr_mas->r_min) && (mas->last < wr_mas->r_max)) {
4295 if (new_end < node_pivots)
4296 wr_mas->pivots[new_end] = wr_mas->pivots[end];
4298 rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->content);
4299 if (new_end < node_pivots)
4300 ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end);
4302 wr_mas->pivots[end] = mas->last;
4303 rcu_assign_pointer(wr_mas->slots[end], wr_mas->entry);
4311 * mas_wr_bnode() - Slow path for a modification.
4312 * @wr_mas: The write maple state
4314 * This is where split, rebalance end up.
4316 static void mas_wr_bnode(struct ma_wr_state *wr_mas)
4318 struct maple_big_node b_node;
4320 trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
4321 memset(&b_node, 0, sizeof(struct maple_big_node));
4322 mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
4323 mas_commit_b_node(wr_mas, &b_node, wr_mas->node_end);
4326 static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
4328 unsigned char node_slots;
4329 unsigned char node_size;
4330 struct ma_state *mas = wr_mas->mas;
4332 /* Direct replacement */
4333 if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) {
4334 rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
4335 if (!!wr_mas->entry ^ !!wr_mas->content)
4336 mas_update_gap(mas);
4340 /* Attempt to append */
4341 node_slots = mt_slots[wr_mas->type];
4342 node_size = wr_mas->node_end - wr_mas->offset_end + mas->offset + 2;
4343 if (mas->max == ULONG_MAX)
4346 /* slot and node store will not fit, go to the slow path */
4347 if (unlikely(node_size >= node_slots))
4350 if (wr_mas->entry && (wr_mas->node_end < node_slots - 1) &&
4351 (mas->offset == wr_mas->node_end) && mas_wr_append(wr_mas)) {
4352 if (!wr_mas->content || !wr_mas->entry)
4353 mas_update_gap(mas);
4357 if ((wr_mas->offset_end - mas->offset <= 1) && mas_wr_slot_store(wr_mas))
4359 else if (mas_wr_node_store(wr_mas))
4362 if (mas_is_err(mas))
4366 mas_wr_bnode(wr_mas);
4370 * mas_wr_store_entry() - Internal call to store a value
4371 * @mas: The maple state
4372 * @entry: The entry to store.
4374 * Return: The contents that was stored at the index.
4376 static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas)
4378 struct ma_state *mas = wr_mas->mas;
4380 wr_mas->content = mas_start(mas);
4381 if (mas_is_none(mas) || mas_is_ptr(mas)) {
4382 mas_store_root(mas, wr_mas->entry);
4383 return wr_mas->content;
4386 if (unlikely(!mas_wr_walk(wr_mas))) {
4387 mas_wr_spanning_store(wr_mas);
4388 return wr_mas->content;
4391 /* At this point, we are at the leaf node that needs to be altered. */
4392 wr_mas->end_piv = wr_mas->r_max;
4393 mas_wr_end_piv(wr_mas);
4396 mas_wr_extend_null(wr_mas);
4398 /* New root for a single pointer */
4399 if (unlikely(!mas->index && mas->last == ULONG_MAX)) {
4400 mas_new_root(mas, wr_mas->entry);
4401 return wr_mas->content;
4404 mas_wr_modify(wr_mas);
4405 return wr_mas->content;
4409 * mas_insert() - Internal call to insert a value
4410 * @mas: The maple state
4411 * @entry: The entry to store
4413 * Return: %NULL or the contents that already exists at the requested index
4414 * otherwise. The maple state needs to be checked for error conditions.
4416 static inline void *mas_insert(struct ma_state *mas, void *entry)
4418 MA_WR_STATE(wr_mas, mas, entry);
4421 * Inserting a new range inserts either 0, 1, or 2 pivots within the
4422 * tree. If the insert fits exactly into an existing gap with a value
4423 * of NULL, then the slot only needs to be written with the new value.
4424 * If the range being inserted is adjacent to another range, then only a
4425 * single pivot needs to be inserted (as well as writing the entry). If
4426 * the new range is within a gap but does not touch any other ranges,
4427 * then two pivots need to be inserted: the start - 1, and the end. As
4428 * usual, the entry must be written. Most operations require a new node
4429 * to be allocated and replace an existing node to ensure RCU safety,
4430 * when in RCU mode. The exception to requiring a newly allocated node
4431 * is when inserting at the end of a node (appending). When done
4432 * carefully, appending can reuse the node in place.
4434 wr_mas.content = mas_start(mas);
4438 if (mas_is_none(mas) || mas_is_ptr(mas)) {
4439 mas_store_root(mas, entry);
4443 /* spanning writes always overwrite something */
4444 if (!mas_wr_walk(&wr_mas))
4447 /* At this point, we are at the leaf node that needs to be altered. */
4448 wr_mas.offset_end = mas->offset;
4449 wr_mas.end_piv = wr_mas.r_max;
4451 if (wr_mas.content || (mas->last > wr_mas.r_max))
4457 mas_wr_modify(&wr_mas);
4458 return wr_mas.content;
4461 mas_set_err(mas, -EEXIST);
4462 return wr_mas.content;
4467 * mas_prev_node() - Find the prev non-null entry at the same level in the
4468 * tree. The prev value will be mas->node[mas->offset] or MAS_NONE.
4469 * @mas: The maple state
4470 * @min: The lower limit to search
4472 * The prev node value will be mas->node[mas->offset] or MAS_NONE.
4473 * Return: 1 if the node is dead, 0 otherwise.
4475 static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
4480 struct maple_node *node;
4481 struct maple_enode *enode;
4482 unsigned long *pivots;
4484 if (mas_is_none(mas))
4490 if (ma_is_root(node))
4494 if (unlikely(mas_ascend(mas)))
4496 offset = mas->offset;
4501 mt = mte_node_type(mas->node);
4503 slots = ma_slots(node, mt);
4504 pivots = ma_pivots(node, mt);
4505 mas->max = pivots[offset];
4507 mas->min = pivots[offset - 1] + 1;
4508 if (unlikely(ma_dead_node(node)))
4516 enode = mas_slot(mas, slots, offset);
4517 if (unlikely(ma_dead_node(node)))
4521 mt = mte_node_type(mas->node);
4523 slots = ma_slots(node, mt);
4524 pivots = ma_pivots(node, mt);
4525 offset = ma_data_end(node, mt, pivots, mas->max);
4527 mas->min = pivots[offset - 1] + 1;
4529 if (offset < mt_pivots[mt])
4530 mas->max = pivots[offset];
4536 mas->node = mas_slot(mas, slots, offset);
4537 if (unlikely(ma_dead_node(node)))
4540 mas->offset = mas_data_end(mas);
4541 if (unlikely(mte_dead_node(mas->node)))
4547 mas->offset = offset;
4549 mas->min = pivots[offset - 1] + 1;
4551 if (unlikely(ma_dead_node(node)))
4554 mas->node = MAS_NONE;
4559 * mas_next_node() - Get the next node at the same level in the tree.
4560 * @mas: The maple state
4561 * @max: The maximum pivot value to check.
4563 * The next value will be mas->node[mas->offset] or MAS_NONE.
4564 * Return: 1 on dead node, 0 otherwise.
4566 static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
4569 unsigned long min, pivot;
4570 unsigned long *pivots;
4571 struct maple_enode *enode;
4573 unsigned char offset;
4577 if (mas->max >= max)
4582 if (ma_is_root(node))
4589 if (unlikely(mas_ascend(mas)))
4592 offset = mas->offset;
4595 mt = mte_node_type(mas->node);
4596 pivots = ma_pivots(node, mt);
4597 } while (unlikely(offset == ma_data_end(node, mt, pivots, mas->max)));
4599 slots = ma_slots(node, mt);
4600 pivot = mas_safe_pivot(mas, pivots, ++offset, mt);
4601 while (unlikely(level > 1)) {
4602 /* Descend, if necessary */
4603 enode = mas_slot(mas, slots, offset);
4604 if (unlikely(ma_dead_node(node)))
4610 mt = mte_node_type(mas->node);
4611 slots = ma_slots(node, mt);
4612 pivots = ma_pivots(node, mt);
4617 enode = mas_slot(mas, slots, offset);
4618 if (unlikely(ma_dead_node(node)))
4627 if (unlikely(ma_dead_node(node)))
4630 mas->node = MAS_NONE;
4635 * mas_next_nentry() - Get the next node entry
4636 * @mas: The maple state
4637 * @max: The maximum value to check
4638 * @*range_start: Pointer to store the start of the range.
4640 * Sets @mas->offset to the offset of the next node entry, @mas->last to the
4641 * pivot of the entry.
4643 * Return: The next entry, %NULL otherwise
4645 static inline void *mas_next_nentry(struct ma_state *mas,
4646 struct maple_node *node, unsigned long max, enum maple_type type)
4648 unsigned char count;
4649 unsigned long pivot;
4650 unsigned long *pivots;
4654 if (mas->last == mas->max) {
4655 mas->index = mas->max;
4659 pivots = ma_pivots(node, type);
4660 slots = ma_slots(node, type);
4661 mas->index = mas_safe_min(mas, pivots, mas->offset);
4662 if (ma_dead_node(node))
4665 if (mas->index > max)
4668 count = ma_data_end(node, type, pivots, mas->max);
4669 if (mas->offset > count)
4672 while (mas->offset < count) {
4673 pivot = pivots[mas->offset];
4674 entry = mas_slot(mas, slots, mas->offset);
4675 if (ma_dead_node(node))
4684 mas->index = pivot + 1;
4688 if (mas->index > mas->max) {
4689 mas->index = mas->last;
4693 pivot = mas_safe_pivot(mas, pivots, mas->offset, type);
4694 entry = mas_slot(mas, slots, mas->offset);
4695 if (ma_dead_node(node))
4709 static inline void mas_rewalk(struct ma_state *mas, unsigned long index)
4713 mas_set(mas, index);
4714 mas_state_walk(mas);
4715 if (mas_is_start(mas))
4723 * mas_next_entry() - Internal function to get the next entry.
4724 * @mas: The maple state
4725 * @limit: The maximum range start.
4727 * Set the @mas->node to the next entry and the range_start to
4728 * the beginning value for the entry. Does not check beyond @limit.
4729 * Sets @mas->index and @mas->last to the limit if it is hit.
4730 * Restarts on dead nodes.
4732 * Return: the next entry or %NULL.
4734 static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
4737 struct maple_enode *prev_node;
4738 struct maple_node *node;
4739 unsigned char offset;
4745 offset = mas->offset;
4746 prev_node = mas->node;
4748 mt = mte_node_type(mas->node);
4750 if (unlikely(mas->offset >= mt_slots[mt])) {
4751 mas->offset = mt_slots[mt] - 1;
4755 while (!mas_is_none(mas)) {
4756 entry = mas_next_nentry(mas, node, limit, mt);
4757 if (unlikely(ma_dead_node(node))) {
4758 mas_rewalk(mas, last);
4765 if (unlikely((mas->index > limit)))
4769 prev_node = mas->node;
4770 offset = mas->offset;
4771 if (unlikely(mas_next_node(mas, node, limit))) {
4772 mas_rewalk(mas, last);
4777 mt = mte_node_type(mas->node);
4780 mas->index = mas->last = limit;
4781 mas->offset = offset;
4782 mas->node = prev_node;
4787 * mas_prev_nentry() - Get the previous node entry.
4788 * @mas: The maple state.
4789 * @limit: The lower limit to check for a value.
4791 * Return: the entry, %NULL otherwise.
4793 static inline void *mas_prev_nentry(struct ma_state *mas, unsigned long limit,
4794 unsigned long index)
4796 unsigned long pivot, min;
4797 unsigned char offset;
4798 struct maple_node *mn;
4800 unsigned long *pivots;
4809 mt = mte_node_type(mas->node);
4810 offset = mas->offset - 1;
4811 if (offset >= mt_slots[mt])
4812 offset = mt_slots[mt] - 1;
4814 slots = ma_slots(mn, mt);
4815 pivots = ma_pivots(mn, mt);
4816 if (offset == mt_pivots[mt])
4819 pivot = pivots[offset];
4821 if (unlikely(ma_dead_node(mn))) {
4822 mas_rewalk(mas, index);
4826 while (offset && ((!mas_slot(mas, slots, offset) && pivot >= limit) ||
4828 pivot = pivots[--offset];
4830 min = mas_safe_min(mas, pivots, offset);
4831 entry = mas_slot(mas, slots, offset);
4832 if (unlikely(ma_dead_node(mn))) {
4833 mas_rewalk(mas, index);
4837 if (likely(entry)) {
4838 mas->offset = offset;
4845 static inline void *mas_prev_entry(struct ma_state *mas, unsigned long min)
4850 while (likely(!mas_is_none(mas))) {
4851 entry = mas_prev_nentry(mas, min, mas->index);
4852 if (unlikely(mas->last < min))
4858 if (unlikely(mas_prev_node(mas, min))) {
4859 mas_rewalk(mas, mas->index);
4868 mas->index = mas->last = min;
4873 * mas_rev_awalk() - Internal function. Reverse allocation walk. Find the
4874 * highest gap address of a given size in a given node and descend.
4875 * @mas: The maple state
4876 * @size: The needed size.
4878 * Return: True if found in a leaf, false otherwise.
4881 static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
4883 enum maple_type type = mte_node_type(mas->node);
4884 struct maple_node *node = mas_mn(mas);
4885 unsigned long *pivots, *gaps;
4887 unsigned long gap = 0;
4888 unsigned long max, min, index;
4889 unsigned char offset;
4891 if (unlikely(mas_is_err(mas)))
4894 if (ma_is_dense(type)) {
4896 mas->offset = (unsigned char)(mas->index - mas->min);
4900 pivots = ma_pivots(node, type);
4901 slots = ma_slots(node, type);
4902 gaps = ma_gaps(node, type);
4903 offset = mas->offset;
4904 min = mas_safe_min(mas, pivots, offset);
4905 /* Skip out of bounds. */
4906 while (mas->last < min)
4907 min = mas_safe_min(mas, pivots, --offset);
4909 max = mas_safe_pivot(mas, pivots, offset, type);
4911 while (index <= max) {
4915 else if (!mas_slot(mas, slots, offset))
4916 gap = max - min + 1;
4919 if ((size <= gap) && (size <= mas->last - min + 1))
4923 /* Skip the next slot, it cannot be a gap. */
4928 max = pivots[offset];
4929 min = mas_safe_min(mas, pivots, offset);
4939 min = mas_safe_min(mas, pivots, offset);
4942 if (unlikely(index > max)) {
4943 mas_set_err(mas, -EBUSY);
4947 if (unlikely(ma_is_leaf(type))) {
4948 mas->offset = offset;
4950 mas->max = min + gap - 1;
4954 /* descend, only happens under lock. */
4955 mas->node = mas_slot(mas, slots, offset);
4958 mas->offset = mas_data_end(mas);
4962 if (mte_is_root(mas->node))
4963 mas_set_err(mas, -EBUSY);
4968 static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
4970 enum maple_type type = mte_node_type(mas->node);
4971 unsigned long pivot, min, gap = 0;
4972 unsigned char offset;
4973 unsigned long *gaps;
4974 unsigned long *pivots = ma_pivots(mas_mn(mas), type);
4975 void __rcu **slots = ma_slots(mas_mn(mas), type);
4978 if (ma_is_dense(type)) {
4979 mas->offset = (unsigned char)(mas->index - mas->min);
4983 gaps = ma_gaps(mte_to_node(mas->node), type);
4984 offset = mas->offset;
4985 min = mas_safe_min(mas, pivots, offset);
4986 for (; offset < mt_slots[type]; offset++) {
4987 pivot = mas_safe_pivot(mas, pivots, offset, type);
4988 if (offset && !pivot)
4991 /* Not within lower bounds */
4992 if (mas->index > pivot)
4997 else if (!mas_slot(mas, slots, offset))
4998 gap = min(pivot, mas->last) - max(mas->index, min) + 1;
5003 if (ma_is_leaf(type)) {
5007 if (mas->index <= pivot) {
5008 mas->node = mas_slot(mas, slots, offset);
5017 if (mas->last <= pivot) {
5018 mas_set_err(mas, -EBUSY);
5023 if (mte_is_root(mas->node))
5026 mas->offset = offset;
5031 * mas_walk() - Search for @mas->index in the tree.
5032 * @mas: The maple state.
5034 * mas->index and mas->last will be set to the range if there is a value. If
5035 * mas->node is MAS_NONE, reset to MAS_START.
5037 * Return: the entry at the location or %NULL.
5039 void *mas_walk(struct ma_state *mas)
5044 entry = mas_state_walk(mas);
5045 if (mas_is_start(mas))
5048 if (mas_is_ptr(mas)) {
5053 mas->last = ULONG_MAX;
5058 if (mas_is_none(mas)) {
5060 mas->last = ULONG_MAX;
5065 EXPORT_SYMBOL_GPL(mas_walk);
5067 static inline bool mas_rewind_node(struct ma_state *mas)
5072 if (mte_is_root(mas->node)) {
5082 mas->offset = --slot;
5087 * mas_skip_node() - Internal function. Skip over a node.
5088 * @mas: The maple state.
5090 * Return: true if there is another node, false otherwise.
5092 static inline bool mas_skip_node(struct ma_state *mas)
5094 unsigned char slot, slot_count;
5095 unsigned long *pivots;
5098 mt = mte_node_type(mas->node);
5099 slot_count = mt_slots[mt] - 1;
5101 if (mte_is_root(mas->node)) {
5103 if (slot > slot_count) {
5104 mas_set_err(mas, -EBUSY);
5110 mt = mte_node_type(mas->node);
5111 slot_count = mt_slots[mt] - 1;
5113 } while (slot > slot_count);
5115 mas->offset = ++slot;
5116 pivots = ma_pivots(mas_mn(mas), mt);
5118 mas->min = pivots[slot - 1] + 1;
5120 if (slot <= slot_count)
5121 mas->max = pivots[slot];
5127 * mas_awalk() - Allocation walk. Search from low address to high, for a gap of
5129 * @mas: The maple state
5130 * @size: The size of the gap required
5132 * Search between @mas->index and @mas->last for a gap of @size.
5134 static inline void mas_awalk(struct ma_state *mas, unsigned long size)
5136 struct maple_enode *last = NULL;
5139 * There are 4 options:
5140 * go to child (descend)
5141 * go back to parent (ascend)
5142 * no gap found. (return, slot == MAPLE_NODE_SLOTS)
5143 * found the gap. (return, slot != MAPLE_NODE_SLOTS)
5145 while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) {
5146 if (last == mas->node)
5154 * mas_fill_gap() - Fill a located gap with @entry.
5155 * @mas: The maple state
5156 * @entry: The value to store
5157 * @slot: The offset into the node to store the @entry
5158 * @size: The size of the entry
5159 * @index: The start location
5161 static inline void mas_fill_gap(struct ma_state *mas, void *entry,
5162 unsigned char slot, unsigned long size, unsigned long *index)
5164 MA_WR_STATE(wr_mas, mas, entry);
5165 unsigned char pslot = mte_parent_slot(mas->node);
5166 struct maple_enode *mn = mas->node;
5167 unsigned long *pivots;
5168 enum maple_type ptype;
5170 * mas->index is the start address for the search
5171 * which may no longer be needed.
5172 * mas->last is the end address for the search
5175 *index = mas->index;
5176 mas->last = mas->index + size - 1;
5179 * It is possible that using mas->max and mas->min to correctly
5180 * calculate the index and last will cause an issue in the gap
5181 * calculation, so fix the ma_state here
5184 ptype = mte_node_type(mas->node);
5185 pivots = ma_pivots(mas_mn(mas), ptype);
5186 mas->max = mas_safe_pivot(mas, pivots, pslot, ptype);
5187 mas->min = mas_safe_min(mas, pivots, pslot);
5190 mas_wr_store_entry(&wr_mas);
5194 * mas_sparse_area() - Internal function. Return upper or lower limit when
5195 * searching for a gap in an empty tree.
5196 * @mas: The maple state
5197 * @min: the minimum range
5198 * @max: The maximum range
5199 * @size: The size of the gap
5200 * @fwd: Searching forward or back
5202 static inline void mas_sparse_area(struct ma_state *mas, unsigned long min,
5203 unsigned long max, unsigned long size, bool fwd)
5205 unsigned long start = 0;
5207 if (!unlikely(mas_is_none(mas)))
5216 mas->last = start + size - 1;
5224 * mas_empty_area() - Get the lowest address within the range that is
5225 * sufficient for the size requested.
5226 * @mas: The maple state
5227 * @min: The lowest value of the range
5228 * @max: The highest value of the range
5229 * @size: The size needed
5231 int mas_empty_area(struct ma_state *mas, unsigned long min,
5232 unsigned long max, unsigned long size)
5234 unsigned char offset;
5235 unsigned long *pivots;
5238 if (mas_is_start(mas))
5240 else if (mas->offset >= 2)
5242 else if (!mas_skip_node(mas))
5246 if (mas_is_none(mas) || mas_is_ptr(mas)) {
5247 mas_sparse_area(mas, min, max, size, true);
5251 /* The start of the window can only be within these values */
5254 mas_awalk(mas, size);
5256 if (unlikely(mas_is_err(mas)))
5257 return xa_err(mas->node);
5259 offset = mas->offset;
5260 if (unlikely(offset == MAPLE_NODE_SLOTS))
5263 mt = mte_node_type(mas->node);
5264 pivots = ma_pivots(mas_mn(mas), mt);
5266 mas->min = pivots[offset - 1] + 1;
5268 if (offset < mt_pivots[mt])
5269 mas->max = pivots[offset];
5271 if (mas->index < mas->min)
5272 mas->index = mas->min;
5274 mas->last = mas->index + size - 1;
5277 EXPORT_SYMBOL_GPL(mas_empty_area);
5280 * mas_empty_area_rev() - Get the highest address within the range that is
5281 * sufficient for the size requested.
5282 * @mas: The maple state
5283 * @min: The lowest value of the range
5284 * @max: The highest value of the range
5285 * @size: The size needed
5287 int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
5288 unsigned long max, unsigned long size)
5290 struct maple_enode *last = mas->node;
5292 if (mas_is_start(mas)) {
5294 mas->offset = mas_data_end(mas);
5295 } else if (mas->offset >= 2) {
5297 } else if (!mas_rewind_node(mas)) {
5302 if (mas_is_none(mas) || mas_is_ptr(mas)) {
5303 mas_sparse_area(mas, min, max, size, false);
5307 /* The start of the window can only be within these values. */
5311 while (!mas_rev_awalk(mas, size)) {
5312 if (last == mas->node) {
5313 if (!mas_rewind_node(mas))
5320 if (mas_is_err(mas))
5321 return xa_err(mas->node);
5323 if (unlikely(mas->offset == MAPLE_NODE_SLOTS))
5327 * mas_rev_awalk() has set mas->min and mas->max to the gap values. If
5328 * the maximum is outside the window we are searching, then use the last
5329 * location in the search.
5330 * mas->max and mas->min is the range of the gap.
5331 * mas->index and mas->last are currently set to the search range.
5334 /* Trim the upper limit to the max. */
5335 if (mas->max <= mas->last)
5336 mas->last = mas->max;
5338 mas->index = mas->last - size + 1;
5341 EXPORT_SYMBOL_GPL(mas_empty_area_rev);
5343 static inline int mas_alloc(struct ma_state *mas, void *entry,
5344 unsigned long size, unsigned long *index)
5349 if (mas_is_none(mas) || mas_is_ptr(mas)) {
5350 mas_root_expand(mas, entry);
5351 if (mas_is_err(mas))
5352 return xa_err(mas->node);
5355 return mte_pivot(mas->node, 0);
5356 return mte_pivot(mas->node, 1);
5359 /* Must be walking a tree. */
5360 mas_awalk(mas, size);
5361 if (mas_is_err(mas))
5362 return xa_err(mas->node);
5364 if (mas->offset == MAPLE_NODE_SLOTS)
5368 * At this point, mas->node points to the right node and we have an
5369 * offset that has a sufficient gap.
5373 min = mte_pivot(mas->node, mas->offset - 1) + 1;
5375 if (mas->index < min)
5378 mas_fill_gap(mas, entry, mas->offset, size, index);
5385 static inline int mas_rev_alloc(struct ma_state *mas, unsigned long min,
5386 unsigned long max, void *entry,
5387 unsigned long size, unsigned long *index)
5391 ret = mas_empty_area_rev(mas, min, max, size);
5395 if (mas_is_err(mas))
5396 return xa_err(mas->node);
5398 if (mas->offset == MAPLE_NODE_SLOTS)
5401 mas_fill_gap(mas, entry, mas->offset, size, index);
5409 * mas_dead_leaves() - Mark all leaves of a node as dead.
5410 * @mas: The maple state
5411 * @slots: Pointer to the slot array
5413 * Must hold the write lock.
5415 * Return: The number of leaves marked as dead.
5418 unsigned char mas_dead_leaves(struct ma_state *mas, void __rcu **slots)
5420 struct maple_node *node;
5421 enum maple_type type;
5425 for (offset = 0; offset < mt_slot_count(mas->node); offset++) {
5426 entry = mas_slot_locked(mas, slots, offset);
5427 type = mte_node_type(entry);
5428 node = mte_to_node(entry);
5429 /* Use both node and type to catch LE & BE metadata */
5433 mte_set_node_dead(entry);
5434 smp_wmb(); /* Needed for RCU */
5436 rcu_assign_pointer(slots[offset], node);
5442 static void __rcu **mas_dead_walk(struct ma_state *mas, unsigned char offset)
5444 struct maple_node *node, *next;
5445 void __rcu **slots = NULL;
5449 mas->node = ma_enode_ptr(next);
5451 slots = ma_slots(node, node->type);
5452 next = mas_slot_locked(mas, slots, offset);
5454 } while (!ma_is_leaf(next->type));
5459 static void mt_free_walk(struct rcu_head *head)
5462 struct maple_node *node, *start;
5463 struct maple_tree mt;
5464 unsigned char offset;
5465 enum maple_type type;
5466 MA_STATE(mas, &mt, 0, 0);
5468 node = container_of(head, struct maple_node, rcu);
5470 if (ma_is_leaf(node->type))
5473 mt_init_flags(&mt, node->ma_flags);
5476 mas.node = mt_mk_node(node, node->type);
5477 slots = mas_dead_walk(&mas, 0);
5478 node = mas_mn(&mas);
5480 mt_free_bulk(node->slot_len, slots);
5481 offset = node->parent_slot + 1;
5482 mas.node = node->piv_parent;
5483 if (mas_mn(&mas) == node)
5484 goto start_slots_free;
5486 type = mte_node_type(mas.node);
5487 slots = ma_slots(mte_to_node(mas.node), type);
5488 if ((offset < mt_slots[type]) && (slots[offset]))
5489 slots = mas_dead_walk(&mas, offset);
5491 node = mas_mn(&mas);
5492 } while ((node != start) || (node->slot_len < offset));
5494 slots = ma_slots(node, node->type);
5495 mt_free_bulk(node->slot_len, slots);
5500 mt_free_rcu(&node->rcu);
5503 static inline void __rcu **mas_destroy_descend(struct ma_state *mas,
5504 struct maple_enode *prev, unsigned char offset)
5506 struct maple_node *node;
5507 struct maple_enode *next = mas->node;
5508 void __rcu **slots = NULL;
5513 slots = ma_slots(node, mte_node_type(mas->node));
5514 next = mas_slot_locked(mas, slots, 0);
5515 if ((mte_dead_node(next)))
5516 next = mas_slot_locked(mas, slots, 1);
5518 mte_set_node_dead(mas->node);
5519 node->type = mte_node_type(mas->node);
5520 node->piv_parent = prev;
5521 node->parent_slot = offset;
5524 } while (!mte_is_leaf(next));
5529 static void mt_destroy_walk(struct maple_enode *enode, unsigned char ma_flags,
5533 struct maple_node *node = mte_to_node(enode);
5534 struct maple_enode *start;
5535 struct maple_tree mt;
5537 MA_STATE(mas, &mt, 0, 0);
5539 if (mte_is_leaf(enode))
5542 mt_init_flags(&mt, ma_flags);
5545 mas.node = start = enode;
5546 slots = mas_destroy_descend(&mas, start, 0);
5547 node = mas_mn(&mas);
5549 enum maple_type type;
5550 unsigned char offset;
5551 struct maple_enode *parent, *tmp;
5553 node->slot_len = mas_dead_leaves(&mas, slots);
5555 mt_free_bulk(node->slot_len, slots);
5556 offset = node->parent_slot + 1;
5557 mas.node = node->piv_parent;
5558 if (mas_mn(&mas) == node)
5559 goto start_slots_free;
5561 type = mte_node_type(mas.node);
5562 slots = ma_slots(mte_to_node(mas.node), type);
5563 if (offset >= mt_slots[type])
5566 tmp = mas_slot_locked(&mas, slots, offset);
5567 if (mte_node_type(tmp) && mte_to_node(tmp)) {
5570 slots = mas_destroy_descend(&mas, parent, offset);
5573 node = mas_mn(&mas);
5574 } while (start != mas.node);
5576 node = mas_mn(&mas);
5577 node->slot_len = mas_dead_leaves(&mas, slots);
5579 mt_free_bulk(node->slot_len, slots);
5586 mt_free_rcu(&node->rcu);
5590 * mte_destroy_walk() - Free a tree or sub-tree.
5591 * @enode - the encoded maple node (maple_enode) to start
5592 * @mn - the tree to free - needed for node types.
5594 * Must hold the write lock.
5596 static inline void mte_destroy_walk(struct maple_enode *enode,
5597 struct maple_tree *mt)
5599 struct maple_node *node = mte_to_node(enode);
5601 if (mt_in_rcu(mt)) {
5602 mt_destroy_walk(enode, mt->ma_flags, false);
5603 call_rcu(&node->rcu, mt_free_walk);
5605 mt_destroy_walk(enode, mt->ma_flags, true);
5609 static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
5611 if (!mas_is_start(wr_mas->mas)) {
5612 if (mas_is_none(wr_mas->mas)) {
5613 mas_reset(wr_mas->mas);
5615 wr_mas->r_max = wr_mas->mas->max;
5616 wr_mas->type = mte_node_type(wr_mas->mas->node);
5617 if (mas_is_span_wr(wr_mas))
5618 mas_reset(wr_mas->mas);
5627 * mas_store() - Store an @entry.
5628 * @mas: The maple state.
5629 * @entry: The entry to store.
5631 * The @mas->index and @mas->last is used to set the range for the @entry.
5632 * Note: The @mas should have pre-allocated entries to ensure there is memory to
5633 * store the entry. Please see mas_expected_entries()/mas_destroy() for more details.
5635 * Return: the first entry between mas->index and mas->last or %NULL.
5637 void *mas_store(struct ma_state *mas, void *entry)
5639 MA_WR_STATE(wr_mas, mas, entry);
5641 trace_ma_write(__func__, mas, 0, entry);
5642 #ifdef CONFIG_DEBUG_MAPLE_TREE
5643 if (mas->index > mas->last)
5644 pr_err("Error %lu > %lu %p\n", mas->index, mas->last, entry);
5645 MT_BUG_ON(mas->tree, mas->index > mas->last);
5646 if (mas->index > mas->last) {
5647 mas_set_err(mas, -EINVAL);
5654 * Storing is the same operation as insert with the added caveat that it
5655 * can overwrite entries. Although this seems simple enough, one may
5656 * want to examine what happens if a single store operation was to
5657 * overwrite multiple entries within a self-balancing B-Tree.
5659 mas_wr_store_setup(&wr_mas);
5660 mas_wr_store_entry(&wr_mas);
5661 return wr_mas.content;
5663 EXPORT_SYMBOL_GPL(mas_store);
5666 * mas_store_gfp() - Store a value into the tree.
5667 * @mas: The maple state
5668 * @entry: The entry to store
5669 * @gfp: The GFP_FLAGS to use for allocations if necessary.
5671 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
5674 int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
5676 MA_WR_STATE(wr_mas, mas, entry);
5678 mas_wr_store_setup(&wr_mas);
5679 trace_ma_write(__func__, mas, 0, entry);
5681 mas_wr_store_entry(&wr_mas);
5682 if (unlikely(mas_nomem(mas, gfp)))
5685 if (unlikely(mas_is_err(mas)))
5686 return xa_err(mas->node);
5690 EXPORT_SYMBOL_GPL(mas_store_gfp);
5693 * mas_store_prealloc() - Store a value into the tree using memory
5694 * preallocated in the maple state.
5695 * @mas: The maple state
5696 * @entry: The entry to store.
5698 void mas_store_prealloc(struct ma_state *mas, void *entry)
5700 MA_WR_STATE(wr_mas, mas, entry);
5702 mas_wr_store_setup(&wr_mas);
5703 trace_ma_write(__func__, mas, 0, entry);
5704 mas_wr_store_entry(&wr_mas);
5705 BUG_ON(mas_is_err(mas));
5708 EXPORT_SYMBOL_GPL(mas_store_prealloc);
5711 * mas_preallocate() - Preallocate enough nodes for a store operation
5712 * @mas: The maple state
5713 * @entry: The entry that will be stored
5714 * @gfp: The GFP_FLAGS to use for allocations.
5716 * Return: 0 on success, -ENOMEM if memory could not be allocated.
5718 int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
5722 mas_node_count_gfp(mas, 1 + mas_mt_height(mas) * 3, gfp);
5723 mas->mas_flags |= MA_STATE_PREALLOC;
5724 if (likely(!mas_is_err(mas)))
5727 mas_set_alloc_req(mas, 0);
5728 ret = xa_err(mas->node);
5736 * mas_destroy() - destroy a maple state.
5737 * @mas: The maple state
5739 * Upon completion, check the left-most node and rebalance against the node to
5740 * the right if necessary. Frees any allocated nodes associated with this maple
5743 void mas_destroy(struct ma_state *mas)
5745 struct maple_alloc *node;
5748 * When using mas_for_each() to insert an expected number of elements,
5749 * it is possible that the number inserted is less than the expected
5750 * number. To fix an invalid final node, a check is performed here to
5751 * rebalance the previous node with the final node.
5753 if (mas->mas_flags & MA_STATE_REBALANCE) {
5756 if (mas_is_start(mas))
5759 mtree_range_walk(mas);
5760 end = mas_data_end(mas) + 1;
5761 if (end < mt_min_slot_count(mas->node) - 1)
5762 mas_destroy_rebalance(mas, end);
5764 mas->mas_flags &= ~MA_STATE_REBALANCE;
5766 mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC);
5768 while (mas->alloc && !((unsigned long)mas->alloc & 0x1)) {
5770 mas->alloc = node->slot[0];
5771 if (node->node_count > 0)
5772 mt_free_bulk(node->node_count,
5773 (void __rcu **)&node->slot[1]);
5774 kmem_cache_free(maple_node_cache, node);
5778 EXPORT_SYMBOL_GPL(mas_destroy);
5781 * mas_expected_entries() - Set the expected number of entries that will be inserted.
5782 * @mas: The maple state
5783 * @nr_entries: The number of expected entries.
5785 * This will attempt to pre-allocate enough nodes to store the expected number
5786 * of entries. The allocations will occur using the bulk allocator interface
5787 * for speed. Please call mas_destroy() on the @mas after inserting the entries
5788 * to ensure any unused nodes are freed.
5790 * Return: 0 on success, -ENOMEM if memory could not be allocated.
5792 int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
5794 int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2;
5795 struct maple_enode *enode = mas->node;
5800 * Sometimes it is necessary to duplicate a tree to a new tree, such as
5801 * forking a process and duplicating the VMAs from one tree to a new
5802 * tree. When such a situation arises, it is known that the new tree is
5803 * not going to be used until the entire tree is populated. For
5804 * performance reasons, it is best to use a bulk load with RCU disabled.
5805 * This allows for optimistic splitting that favours the left and reuse
5806 * of nodes during the operation.
5809 /* Optimize splitting for bulk insert in-order */
5810 mas->mas_flags |= MA_STATE_BULK;
5813 * Avoid overflow, assume a gap between each entry and a trailing null.
5814 * If this is wrong, it just means allocation can happen during
5815 * insertion of entries.
5817 nr_nodes = max(nr_entries, nr_entries * 2 + 1);
5818 if (!mt_is_alloc(mas->tree))
5819 nonleaf_cap = MAPLE_RANGE64_SLOTS - 2;
5821 /* Leaves; reduce slots to keep space for expansion */
5822 nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 2);
5823 /* Internal nodes */
5824 nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
5825 /* Add working room for split (2 nodes) + new parents */
5826 mas_node_count(mas, nr_nodes + 3);
5828 /* Detect if allocations run out */
5829 mas->mas_flags |= MA_STATE_PREALLOC;
5831 if (!mas_is_err(mas))
5834 ret = xa_err(mas->node);
5840 EXPORT_SYMBOL_GPL(mas_expected_entries);
5843 * mas_next() - Get the next entry.
5844 * @mas: The maple state
5845 * @max: The maximum index to check.
5847 * Returns the next entry after @mas->index.
5848 * Must hold rcu_read_lock or the write lock.
5849 * Can return the zero entry.
5851 * Return: The next entry or %NULL
5853 void *mas_next(struct ma_state *mas, unsigned long max)
5855 if (mas_is_none(mas) || mas_is_paused(mas))
5856 mas->node = MAS_START;
5858 if (mas_is_start(mas))
5859 mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
5861 if (mas_is_ptr(mas)) {
5864 mas->last = ULONG_MAX;
5869 if (mas->last == ULONG_MAX)
5872 /* Retries on dead nodes handled by mas_next_entry */
5873 return mas_next_entry(mas, max);
5875 EXPORT_SYMBOL_GPL(mas_next);
5878 * mt_next() - get the next value in the maple tree
5879 * @mt: The maple tree
5880 * @index: The start index
5881 * @max: The maximum index to check
5883 * Return: The entry at @index or higher, or %NULL if nothing is found.
5885 void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max)
5888 MA_STATE(mas, mt, index, index);
5891 entry = mas_next(&mas, max);
5895 EXPORT_SYMBOL_GPL(mt_next);
5898 * mas_prev() - Get the previous entry
5899 * @mas: The maple state
5900 * @min: The minimum value to check.
5902 * Must hold rcu_read_lock or the write lock.
5903 * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not
5906 * Return: the previous value or %NULL.
5908 void *mas_prev(struct ma_state *mas, unsigned long min)
5911 /* Nothing comes before 0 */
5916 if (unlikely(mas_is_ptr(mas)))
5919 if (mas_is_none(mas) || mas_is_paused(mas))
5920 mas->node = MAS_START;
5922 if (mas_is_start(mas)) {
5928 if (mas_is_ptr(mas)) {
5934 mas->index = mas->last = 0;
5935 return mas_root_locked(mas);
5937 return mas_prev_entry(mas, min);
5939 EXPORT_SYMBOL_GPL(mas_prev);
5942 * mt_prev() - get the previous value in the maple tree
5943 * @mt: The maple tree
5944 * @index: The start index
5945 * @min: The minimum index to check
5947 * Return: The entry at @index or lower, or %NULL if nothing is found.
5949 void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min)
5952 MA_STATE(mas, mt, index, index);
5955 entry = mas_prev(&mas, min);
5959 EXPORT_SYMBOL_GPL(mt_prev);
5962 * mas_pause() - Pause a mas_find/mas_for_each to drop the lock.
5963 * @mas: The maple state to pause
5965 * Some users need to pause a walk and drop the lock they're holding in
5966 * order to yield to a higher priority thread or carry out an operation
5967 * on an entry. Those users should call this function before they drop
5968 * the lock. It resets the @mas to be suitable for the next iteration
5969 * of the loop after the user has reacquired the lock. If most entries
5970 * found during a walk require you to call mas_pause(), the mt_for_each()
5971 * iterator may be more appropriate.
5974 void mas_pause(struct ma_state *mas)
5976 mas->node = MAS_PAUSE;
5978 EXPORT_SYMBOL_GPL(mas_pause);
5981 * mas_find() - On the first call, find the entry at or after mas->index up to
5982 * %max. Otherwise, find the entry after mas->index.
5983 * @mas: The maple state
5984 * @max: The maximum value to check.
5986 * Must hold rcu_read_lock or the write lock.
5987 * If an entry exists, last and index are updated accordingly.
5988 * May set @mas->node to MAS_NONE.
5990 * Return: The entry or %NULL.
5992 void *mas_find(struct ma_state *mas, unsigned long max)
5994 if (unlikely(mas_is_paused(mas))) {
5995 if (unlikely(mas->last == ULONG_MAX)) {
5996 mas->node = MAS_NONE;
5999 mas->node = MAS_START;
6000 mas->index = ++mas->last;
6003 if (unlikely(mas_is_start(mas))) {
6004 /* First run or continue */
6007 if (mas->index > max)
6010 entry = mas_walk(mas);
6015 if (unlikely(!mas_searchable(mas)))
6018 /* Retries on dead nodes handled by mas_next_entry */
6019 return mas_next_entry(mas, max);
6021 EXPORT_SYMBOL_GPL(mas_find);
6024 * mas_find_rev: On the first call, find the first non-null entry at or below
6025 * mas->index down to %min. Otherwise find the first non-null entry below
6026 * mas->index down to %min.
6027 * @mas: The maple state
6028 * @min: The minimum value to check.
6030 * Must hold rcu_read_lock or the write lock.
6031 * If an entry exists, last and index are updated accordingly.
6032 * May set @mas->node to MAS_NONE.
6034 * Return: The entry or %NULL.
6036 void *mas_find_rev(struct ma_state *mas, unsigned long min)
6038 if (unlikely(mas_is_paused(mas))) {
6039 if (unlikely(mas->last == ULONG_MAX)) {
6040 mas->node = MAS_NONE;
6043 mas->node = MAS_START;
6044 mas->last = --mas->index;
6047 if (unlikely(mas_is_start(mas))) {
6048 /* First run or continue */
6051 if (mas->index < min)
6054 entry = mas_walk(mas);
6059 if (unlikely(!mas_searchable(mas)))
6062 if (mas->index < min)
6065 /* Retries on dead nodes handled by mas_prev_entry */
6066 return mas_prev_entry(mas, min);
6068 EXPORT_SYMBOL_GPL(mas_find_rev);
6071 * mas_erase() - Find the range in which index resides and erase the entire
6073 * @mas: The maple state
6075 * Must hold the write lock.
6076 * Searches for @mas->index, sets @mas->index and @mas->last to the range and
6077 * erases that range.
6079 * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated.
6081 void *mas_erase(struct ma_state *mas)
6084 MA_WR_STATE(wr_mas, mas, NULL);
6086 if (mas_is_none(mas) || mas_is_paused(mas))
6087 mas->node = MAS_START;
6089 /* Retry unnecessary when holding the write lock. */
6090 entry = mas_state_walk(mas);
6095 /* Must reset to ensure spanning writes of last slot are detected */
6097 mas_wr_store_setup(&wr_mas);
6098 mas_wr_store_entry(&wr_mas);
6099 if (mas_nomem(mas, GFP_KERNEL))
6104 EXPORT_SYMBOL_GPL(mas_erase);
6107 * mas_nomem() - Check if there was an error allocating and do the allocation
6108 * if necessary If there are allocations, then free them.
6109 * @mas: The maple state
6110 * @gfp: The GFP_FLAGS to use for allocations
6111 * Return: true on allocation, false otherwise.
6113 bool mas_nomem(struct ma_state *mas, gfp_t gfp)
6114 __must_hold(mas->tree->lock)
6116 if (likely(mas->node != MA_ERROR(-ENOMEM))) {
6121 if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) {
6122 mtree_unlock(mas->tree);
6123 mas_alloc_nodes(mas, gfp);
6124 mtree_lock(mas->tree);
6126 mas_alloc_nodes(mas, gfp);
6129 if (!mas_allocated(mas))
6132 mas->node = MAS_START;
6136 void __init maple_tree_init(void)
6138 maple_node_cache = kmem_cache_create("maple_node",
6139 sizeof(struct maple_node), sizeof(struct maple_node),
6144 * mtree_load() - Load a value stored in a maple tree
6145 * @mt: The maple tree
6146 * @index: The index to load
6148 * Return: the entry or %NULL
6150 void *mtree_load(struct maple_tree *mt, unsigned long index)
6152 MA_STATE(mas, mt, index, index);
6155 trace_ma_read(__func__, &mas);
6158 entry = mas_start(&mas);
6159 if (unlikely(mas_is_none(&mas)))
6162 if (unlikely(mas_is_ptr(&mas))) {
6169 entry = mtree_lookup_walk(&mas);
6170 if (!entry && unlikely(mas_is_start(&mas)))
6174 if (xa_is_zero(entry))
6179 EXPORT_SYMBOL(mtree_load);
6182 * mtree_store_range() - Store an entry at a given range.
6183 * @mt: The maple tree
6184 * @index: The start of the range
6185 * @last: The end of the range
6186 * @entry: The entry to store
6187 * @gfp: The GFP_FLAGS to use for allocations
6189 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6192 int mtree_store_range(struct maple_tree *mt, unsigned long index,
6193 unsigned long last, void *entry, gfp_t gfp)
6195 MA_STATE(mas, mt, index, last);
6196 MA_WR_STATE(wr_mas, &mas, entry);
6198 trace_ma_write(__func__, &mas, 0, entry);
6199 if (WARN_ON_ONCE(xa_is_advanced(entry)))
6207 mas_wr_store_entry(&wr_mas);
6208 if (mas_nomem(&mas, gfp))
6212 if (mas_is_err(&mas))
6213 return xa_err(mas.node);
6217 EXPORT_SYMBOL(mtree_store_range);
6220 * mtree_store() - Store an entry at a given index.
6221 * @mt: The maple tree
6222 * @index: The index to store the value
6223 * @entry: The entry to store
6224 * @gfp: The GFP_FLAGS to use for allocations
6226 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6229 int mtree_store(struct maple_tree *mt, unsigned long index, void *entry,
6232 return mtree_store_range(mt, index, index, entry, gfp);
6234 EXPORT_SYMBOL(mtree_store);
6237 * mtree_insert_range() - Insert an entry at a give range if there is no value.
6238 * @mt: The maple tree
6239 * @first: The start of the range
6240 * @last: The end of the range
6241 * @entry: The entry to store
6242 * @gfp: The GFP_FLAGS to use for allocations.
6244 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6245 * request, -ENOMEM if memory could not be allocated.
6247 int mtree_insert_range(struct maple_tree *mt, unsigned long first,
6248 unsigned long last, void *entry, gfp_t gfp)
6250 MA_STATE(ms, mt, first, last);
6252 if (WARN_ON_ONCE(xa_is_advanced(entry)))
6260 mas_insert(&ms, entry);
6261 if (mas_nomem(&ms, gfp))
6265 if (mas_is_err(&ms))
6266 return xa_err(ms.node);
6270 EXPORT_SYMBOL(mtree_insert_range);
6273 * mtree_insert() - Insert an entry at a give index if there is no value.
6274 * @mt: The maple tree
6275 * @index : The index to store the value
6276 * @entry: The entry to store
6277 * @gfp: The FGP_FLAGS to use for allocations.
6279 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6280 * request, -ENOMEM if memory could not be allocated.
6282 int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry,
6285 return mtree_insert_range(mt, index, index, entry, gfp);
6287 EXPORT_SYMBOL(mtree_insert);
6289 int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
6290 void *entry, unsigned long size, unsigned long min,
6291 unsigned long max, gfp_t gfp)
6295 MA_STATE(mas, mt, min, max - size);
6296 if (!mt_is_alloc(mt))
6299 if (WARN_ON_ONCE(mt_is_reserved(entry)))
6315 mas.last = max - size;
6316 ret = mas_alloc(&mas, entry, size, startp);
6317 if (mas_nomem(&mas, gfp))
6323 EXPORT_SYMBOL(mtree_alloc_range);
6325 int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
6326 void *entry, unsigned long size, unsigned long min,
6327 unsigned long max, gfp_t gfp)
6331 MA_STATE(mas, mt, min, max - size);
6332 if (!mt_is_alloc(mt))
6335 if (WARN_ON_ONCE(mt_is_reserved(entry)))
6349 ret = mas_rev_alloc(&mas, min, max, entry, size, startp);
6350 if (mas_nomem(&mas, gfp))
6356 EXPORT_SYMBOL(mtree_alloc_rrange);
6359 * mtree_erase() - Find an index and erase the entire range.
6360 * @mt: The maple tree
6361 * @index: The index to erase
6363 * Erasing is the same as a walk to an entry then a store of a NULL to that
6364 * ENTIRE range. In fact, it is implemented as such using the advanced API.
6366 * Return: The entry stored at the @index or %NULL
6368 void *mtree_erase(struct maple_tree *mt, unsigned long index)
6372 MA_STATE(mas, mt, index, index);
6373 trace_ma_op(__func__, &mas);
6376 entry = mas_erase(&mas);
6381 EXPORT_SYMBOL(mtree_erase);
6384 * __mt_destroy() - Walk and free all nodes of a locked maple tree.
6385 * @mt: The maple tree
6387 * Note: Does not handle locking.
6389 void __mt_destroy(struct maple_tree *mt)
6391 void *root = mt_root_locked(mt);
6393 rcu_assign_pointer(mt->ma_root, NULL);
6394 if (xa_is_node(root))
6395 mte_destroy_walk(root, mt);
6399 EXPORT_SYMBOL_GPL(__mt_destroy);
6402 * mtree_destroy() - Destroy a maple tree
6403 * @mt: The maple tree
6405 * Frees all resources used by the tree. Handles locking.
6407 void mtree_destroy(struct maple_tree *mt)
6413 EXPORT_SYMBOL(mtree_destroy);
6416 * mt_find() - Search from the start up until an entry is found.
6417 * @mt: The maple tree
6418 * @index: Pointer which contains the start location of the search
6419 * @max: The maximum value to check
6421 * Handles locking. @index will be incremented to one beyond the range.
6423 * Return: The entry at or after the @index or %NULL
6425 void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max)
6427 MA_STATE(mas, mt, *index, *index);
6429 #ifdef CONFIG_DEBUG_MAPLE_TREE
6430 unsigned long copy = *index;
6433 trace_ma_read(__func__, &mas);
6440 entry = mas_state_walk(&mas);
6441 if (mas_is_start(&mas))
6444 if (unlikely(xa_is_zero(entry)))
6450 while (mas_searchable(&mas) && (mas.index < max)) {
6451 entry = mas_next_entry(&mas, max);
6452 if (likely(entry && !xa_is_zero(entry)))
6456 if (unlikely(xa_is_zero(entry)))
6460 if (likely(entry)) {
6461 *index = mas.last + 1;
6462 #ifdef CONFIG_DEBUG_MAPLE_TREE
6463 if ((*index) && (*index) <= copy)
6464 pr_err("index not increased! %lx <= %lx\n",
6466 MT_BUG_ON(mt, (*index) && ((*index) <= copy));
6472 EXPORT_SYMBOL(mt_find);
6475 * mt_find_after() - Search from the start up until an entry is found.
6476 * @mt: The maple tree
6477 * @index: Pointer which contains the start location of the search
6478 * @max: The maximum value to check
6480 * Handles locking, detects wrapping on index == 0
6482 * Return: The entry at or after the @index or %NULL
6484 void *mt_find_after(struct maple_tree *mt, unsigned long *index,
6490 return mt_find(mt, index, max);
6492 EXPORT_SYMBOL(mt_find_after);
6494 #ifdef CONFIG_DEBUG_MAPLE_TREE
6495 atomic_t maple_tree_tests_run;
6496 EXPORT_SYMBOL_GPL(maple_tree_tests_run);
6497 atomic_t maple_tree_tests_passed;
6498 EXPORT_SYMBOL_GPL(maple_tree_tests_passed);
6501 extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int);
6502 void mt_set_non_kernel(unsigned int val)
6504 kmem_cache_set_non_kernel(maple_node_cache, val);
6507 extern unsigned long kmem_cache_get_alloc(struct kmem_cache *);
6508 unsigned long mt_get_alloc_size(void)
6510 return kmem_cache_get_alloc(maple_node_cache);
6513 extern void kmem_cache_zero_nr_tallocated(struct kmem_cache *);
6514 void mt_zero_nr_tallocated(void)
6516 kmem_cache_zero_nr_tallocated(maple_node_cache);
6519 extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache *);
6520 unsigned int mt_nr_tallocated(void)
6522 return kmem_cache_nr_tallocated(maple_node_cache);
6525 extern unsigned int kmem_cache_nr_allocated(struct kmem_cache *);
6526 unsigned int mt_nr_allocated(void)
6528 return kmem_cache_nr_allocated(maple_node_cache);
6532 * mas_dead_node() - Check if the maple state is pointing to a dead node.
6533 * @mas: The maple state
6534 * @index: The index to restore in @mas.
6536 * Used in test code.
6537 * Return: 1 if @mas has been reset to MAS_START, 0 otherwise.
6539 static inline int mas_dead_node(struct ma_state *mas, unsigned long index)
6541 if (unlikely(!mas_searchable(mas) || mas_is_start(mas)))
6544 if (likely(!mte_dead_node(mas->node)))
6547 mas_rewalk(mas, index);
6551 void mt_cache_shrink(void)
6556 * mt_cache_shrink() - For testing, don't use this.
6558 * Certain testcases can trigger an OOM when combined with other memory
6559 * debugging configuration options. This function is used to reduce the
6560 * possibility of an out of memory even due to kmem_cache objects remaining
6561 * around for longer than usual.
6563 void mt_cache_shrink(void)
6565 kmem_cache_shrink(maple_node_cache);
6568 EXPORT_SYMBOL_GPL(mt_cache_shrink);
6570 #endif /* not defined __KERNEL__ */
6572 * mas_get_slot() - Get the entry in the maple state node stored at @offset.
6573 * @mas: The maple state
6574 * @offset: The offset into the slot array to fetch.
6576 * Return: The entry stored at @offset.
6578 static inline struct maple_enode *mas_get_slot(struct ma_state *mas,
6579 unsigned char offset)
6581 return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)),
6587 * mas_first_entry() - Go the first leaf and find the first entry.
6588 * @mas: the maple state.
6589 * @limit: the maximum index to check.
6590 * @*r_start: Pointer to set to the range start.
6592 * Sets mas->offset to the offset of the entry, r_start to the range minimum.
6594 * Return: The first entry or MAS_NONE.
6596 static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn,
6597 unsigned long limit, enum maple_type mt)
6601 unsigned long *pivots;
6605 mas->index = mas->min;
6606 if (mas->index > limit)
6611 while (likely(!ma_is_leaf(mt))) {
6612 MT_BUG_ON(mas->tree, mte_dead_node(mas->node));
6613 slots = ma_slots(mn, mt);
6614 pivots = ma_pivots(mn, mt);
6616 entry = mas_slot(mas, slots, 0);
6617 if (unlikely(ma_dead_node(mn)))
6621 mt = mte_node_type(mas->node);
6623 MT_BUG_ON(mas->tree, mte_dead_node(mas->node));
6626 slots = ma_slots(mn, mt);
6627 entry = mas_slot(mas, slots, 0);
6628 if (unlikely(ma_dead_node(mn)))
6631 /* Slot 0 or 1 must be set */
6632 if (mas->index > limit)
6638 pivots = ma_pivots(mn, mt);
6639 mas->index = pivots[0] + 1;
6641 entry = mas_slot(mas, slots, 1);
6642 if (unlikely(ma_dead_node(mn)))
6645 if (mas->index > limit)
6652 if (likely(!ma_dead_node(mn)))
6653 mas->node = MAS_NONE;
6657 /* Depth first search, post-order */
6658 static void mas_dfs_postorder(struct ma_state *mas, unsigned long max)
6661 struct maple_enode *p = MAS_NONE, *mn = mas->node;
6662 unsigned long p_min, p_max;
6664 mas_next_node(mas, mas_mn(mas), max);
6665 if (!mas_is_none(mas))
6668 if (mte_is_root(mn))
6673 while (mas->node != MAS_NONE) {
6677 mas_prev_node(mas, 0);
6688 /* Tree validations */
6689 static void mt_dump_node(const struct maple_tree *mt, void *entry,
6690 unsigned long min, unsigned long max, unsigned int depth);
6691 static void mt_dump_range(unsigned long min, unsigned long max,
6694 static const char spaces[] = " ";
6697 pr_info("%.*s%lu: ", depth * 2, spaces, min);
6699 pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max);
6702 static void mt_dump_entry(void *entry, unsigned long min, unsigned long max,
6705 mt_dump_range(min, max, depth);
6707 if (xa_is_value(entry))
6708 pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry),
6709 xa_to_value(entry), entry);
6710 else if (xa_is_zero(entry))
6711 pr_cont("zero (%ld)\n", xa_to_internal(entry));
6712 else if (mt_is_reserved(entry))
6713 pr_cont("UNKNOWN ENTRY (%p)\n", entry);
6715 pr_cont("%p\n", entry);
6718 static void mt_dump_range64(const struct maple_tree *mt, void *entry,
6719 unsigned long min, unsigned long max, unsigned int depth)
6721 struct maple_range_64 *node = &mte_to_node(entry)->mr64;
6722 bool leaf = mte_is_leaf(entry);
6723 unsigned long first = min;
6726 pr_cont(" contents: ");
6727 for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++)
6728 pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
6729 pr_cont("%p\n", node->slot[i]);
6730 for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) {
6731 unsigned long last = max;
6733 if (i < (MAPLE_RANGE64_SLOTS - 1))
6734 last = node->pivot[i];
6735 else if (!node->slot[i] && max != mt_max[mte_node_type(entry)])
6737 if (last == 0 && i > 0)
6740 mt_dump_entry(mt_slot(mt, node->slot, i),
6741 first, last, depth + 1);
6742 else if (node->slot[i])
6743 mt_dump_node(mt, mt_slot(mt, node->slot, i),
6744 first, last, depth + 1);
6749 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
6750 node, last, max, i);
6757 static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
6758 unsigned long min, unsigned long max, unsigned int depth)
6760 struct maple_arange_64 *node = &mte_to_node(entry)->ma64;
6761 bool leaf = mte_is_leaf(entry);
6762 unsigned long first = min;
6765 pr_cont(" contents: ");
6766 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++)
6767 pr_cont("%lu ", node->gap[i]);
6768 pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap);
6769 for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++)
6770 pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
6771 pr_cont("%p\n", node->slot[i]);
6772 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
6773 unsigned long last = max;
6775 if (i < (MAPLE_ARANGE64_SLOTS - 1))
6776 last = node->pivot[i];
6777 else if (!node->slot[i])
6779 if (last == 0 && i > 0)
6782 mt_dump_entry(mt_slot(mt, node->slot, i),
6783 first, last, depth + 1);
6784 else if (node->slot[i])
6785 mt_dump_node(mt, mt_slot(mt, node->slot, i),
6786 first, last, depth + 1);
6791 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
6792 node, last, max, i);
6799 static void mt_dump_node(const struct maple_tree *mt, void *entry,
6800 unsigned long min, unsigned long max, unsigned int depth)
6802 struct maple_node *node = mte_to_node(entry);
6803 unsigned int type = mte_node_type(entry);
6806 mt_dump_range(min, max, depth);
6808 pr_cont("node %p depth %d type %d parent %p", node, depth, type,
6809 node ? node->parent : NULL);
6813 for (i = 0; i < MAPLE_NODE_SLOTS; i++) {
6815 pr_cont("OUT OF RANGE: ");
6816 mt_dump_entry(mt_slot(mt, node->slot, i),
6817 min + i, min + i, depth);
6821 case maple_range_64:
6822 mt_dump_range64(mt, entry, min, max, depth);
6824 case maple_arange_64:
6825 mt_dump_arange64(mt, entry, min, max, depth);
6829 pr_cont(" UNKNOWN TYPE\n");
6833 void mt_dump(const struct maple_tree *mt)
6835 void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt));
6837 pr_info("maple_tree(%p) flags %X, height %u root %p\n",
6838 mt, mt->ma_flags, mt_height(mt), entry);
6839 if (!xa_is_node(entry))
6840 mt_dump_entry(entry, 0, 0, 0);
6842 mt_dump_node(mt, entry, 0, mt_max[mte_node_type(entry)], 0);
6844 EXPORT_SYMBOL_GPL(mt_dump);
6847 * Calculate the maximum gap in a node and check if that's what is reported in
6848 * the parent (unless root).
6850 static void mas_validate_gaps(struct ma_state *mas)
6852 struct maple_enode *mte = mas->node;
6853 struct maple_node *p_mn;
6854 unsigned long gap = 0, max_gap = 0;
6855 unsigned long p_end, p_start = mas->min;
6856 unsigned char p_slot;
6857 unsigned long *gaps = NULL;
6858 unsigned long *pivots = ma_pivots(mte_to_node(mte), mte_node_type(mte));
6861 if (ma_is_dense(mte_node_type(mte))) {
6862 for (i = 0; i < mt_slot_count(mte); i++) {
6863 if (mas_get_slot(mas, i)) {
6874 gaps = ma_gaps(mte_to_node(mte), mte_node_type(mte));
6875 for (i = 0; i < mt_slot_count(mte); i++) {
6876 p_end = mas_logical_pivot(mas, pivots, i, mte_node_type(mte));
6879 if (mas_get_slot(mas, i)) {
6884 gap += p_end - p_start + 1;
6886 void *entry = mas_get_slot(mas, i);
6890 if (gap != p_end - p_start + 1) {
6891 pr_err("%p[%u] -> %p %lu != %lu - %lu + 1\n",
6893 mas_get_slot(mas, i), gap,
6897 MT_BUG_ON(mas->tree,
6898 gap != p_end - p_start + 1);
6901 if (gap > p_end - p_start + 1) {
6902 pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n",
6903 mas_mn(mas), i, gap, p_end, p_start,
6904 p_end - p_start + 1);
6905 MT_BUG_ON(mas->tree,
6906 gap > p_end - p_start + 1);
6914 p_start = p_end + 1;
6915 if (p_end >= mas->max)
6920 if (mte_is_root(mte))
6923 p_slot = mte_parent_slot(mas->node);
6924 p_mn = mte_parent(mte);
6925 MT_BUG_ON(mas->tree, max_gap > mas->max);
6926 if (ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap) {
6927 pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap);
6931 MT_BUG_ON(mas->tree,
6932 ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap);
6935 static void mas_validate_parent_slot(struct ma_state *mas)
6937 struct maple_node *parent;
6938 struct maple_enode *node;
6939 enum maple_type p_type = mas_parent_enum(mas, mas->node);
6940 unsigned char p_slot = mte_parent_slot(mas->node);
6944 if (mte_is_root(mas->node))
6947 parent = mte_parent(mas->node);
6948 slots = ma_slots(parent, p_type);
6949 MT_BUG_ON(mas->tree, mas_mn(mas) == parent);
6951 /* Check prev/next parent slot for duplicate node entry */
6953 for (i = 0; i < mt_slots[p_type]; i++) {
6954 node = mas_slot(mas, slots, i);
6956 if (node != mas->node)
6957 pr_err("parent %p[%u] does not have %p\n",
6958 parent, i, mas_mn(mas));
6959 MT_BUG_ON(mas->tree, node != mas->node);
6960 } else if (node == mas->node) {
6961 pr_err("Invalid child %p at parent %p[%u] p_slot %u\n",
6962 mas_mn(mas), parent, i, p_slot);
6963 MT_BUG_ON(mas->tree, node == mas->node);
6968 static void mas_validate_child_slot(struct ma_state *mas)
6970 enum maple_type type = mte_node_type(mas->node);
6971 void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
6972 unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type);
6973 struct maple_enode *child;
6976 if (mte_is_leaf(mas->node))
6979 for (i = 0; i < mt_slots[type]; i++) {
6980 child = mas_slot(mas, slots, i);
6981 if (!pivots[i] || pivots[i] == mas->max)
6987 if (mte_parent_slot(child) != i) {
6988 pr_err("Slot error at %p[%u]: child %p has pslot %u\n",
6989 mas_mn(mas), i, mte_to_node(child),
6990 mte_parent_slot(child));
6991 MT_BUG_ON(mas->tree, 1);
6994 if (mte_parent(child) != mte_to_node(mas->node)) {
6995 pr_err("child %p has parent %p not %p\n",
6996 mte_to_node(child), mte_parent(child),
6997 mte_to_node(mas->node));
6998 MT_BUG_ON(mas->tree, 1);
7004 * Validate all pivots are within mas->min and mas->max.
7006 static void mas_validate_limits(struct ma_state *mas)
7009 unsigned long prev_piv = 0;
7010 enum maple_type type = mte_node_type(mas->node);
7011 void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7012 unsigned long *pivots = ma_pivots(mas_mn(mas), type);
7014 /* all limits are fine here. */
7015 if (mte_is_root(mas->node))
7018 for (i = 0; i < mt_slots[type]; i++) {
7021 piv = mas_safe_pivot(mas, pivots, i, type);
7023 if (!piv && (i != 0))
7026 if (!mte_is_leaf(mas->node)) {
7027 void *entry = mas_slot(mas, slots, i);
7030 pr_err("%p[%u] cannot be null\n",
7033 MT_BUG_ON(mas->tree, !entry);
7036 if (prev_piv > piv) {
7037 pr_err("%p[%u] piv %lu < prev_piv %lu\n",
7038 mas_mn(mas), i, piv, prev_piv);
7039 MT_BUG_ON(mas->tree, piv < prev_piv);
7042 if (piv < mas->min) {
7043 pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i,
7045 MT_BUG_ON(mas->tree, piv < mas->min);
7047 if (piv > mas->max) {
7048 pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i,
7050 MT_BUG_ON(mas->tree, piv > mas->max);
7053 if (piv == mas->max)
7056 for (i += 1; i < mt_slots[type]; i++) {
7057 void *entry = mas_slot(mas, slots, i);
7059 if (entry && (i != mt_slots[type] - 1)) {
7060 pr_err("%p[%u] should not have entry %p\n", mas_mn(mas),
7062 MT_BUG_ON(mas->tree, entry != NULL);
7065 if (i < mt_pivots[type]) {
7066 unsigned long piv = pivots[i];
7071 pr_err("%p[%u] should not have piv %lu\n",
7072 mas_mn(mas), i, piv);
7073 MT_BUG_ON(mas->tree, i < mt_pivots[type] - 1);
7078 static void mt_validate_nulls(struct maple_tree *mt)
7080 void *entry, *last = (void *)1;
7081 unsigned char offset = 0;
7083 MA_STATE(mas, mt, 0, 0);
7086 if (mas_is_none(&mas) || (mas.node == MAS_ROOT))
7089 while (!mte_is_leaf(mas.node))
7092 slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node));
7094 entry = mas_slot(&mas, slots, offset);
7095 if (!last && !entry) {
7096 pr_err("Sequential nulls end at %p[%u]\n",
7097 mas_mn(&mas), offset);
7099 MT_BUG_ON(mt, !last && !entry);
7101 if (offset == mas_data_end(&mas)) {
7102 mas_next_node(&mas, mas_mn(&mas), ULONG_MAX);
7103 if (mas_is_none(&mas))
7106 slots = ma_slots(mte_to_node(mas.node),
7107 mte_node_type(mas.node));
7112 } while (!mas_is_none(&mas));
7116 * validate a maple tree by checking:
7117 * 1. The limits (pivots are within mas->min to mas->max)
7118 * 2. The gap is correctly set in the parents
7120 void mt_validate(struct maple_tree *mt)
7124 MA_STATE(mas, mt, 0, 0);
7127 if (!mas_searchable(&mas))
7130 mas_first_entry(&mas, mas_mn(&mas), ULONG_MAX, mte_node_type(mas.node));
7131 while (!mas_is_none(&mas)) {
7132 MT_BUG_ON(mas.tree, mte_dead_node(mas.node));
7133 if (!mte_is_root(mas.node)) {
7134 end = mas_data_end(&mas);
7135 if ((end < mt_min_slot_count(mas.node)) &&
7136 (mas.max != ULONG_MAX)) {
7137 pr_err("Invalid size %u of %p\n", end,
7139 MT_BUG_ON(mas.tree, 1);
7143 mas_validate_parent_slot(&mas);
7144 mas_validate_child_slot(&mas);
7145 mas_validate_limits(&mas);
7146 if (mt_is_alloc(mt))
7147 mas_validate_gaps(&mas);
7148 mas_dfs_postorder(&mas, ULONG_MAX);
7150 mt_validate_nulls(mt);
7155 EXPORT_SYMBOL_GPL(mt_validate);
7157 #endif /* CONFIG_DEBUG_MAPLE_TREE */