1 // SPDX-License-Identifier: GPL-2.0+
3 * Maple Tree implementation
4 * Copyright (c) 2018-2022 Oracle Corporation
5 * Authors: Liam R. Howlett <Liam.Howlett@oracle.com>
6 * Matthew Wilcox <willy@infradead.org>
10 * DOC: Interesting implementation details of the Maple Tree
12 * Each node type has a number of slots for entries and a number of slots for
13 * pivots. In the case of dense nodes, the pivots are implied by the position
14 * and are simply the slot index + the minimum of the node.
16 * In regular B-Tree terms, pivots are called keys. The term pivot is used to
17 * indicate that the tree is specifying ranges, Pivots may appear in the
18 * subtree with an entry attached to the value where as keys are unique to a
19 * specific position of a B-tree. Pivot values are inclusive of the slot with
23 * The following illustrates the layout of a range64 nodes slots and pivots.
26 * Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 |
28 * │ │ │ │ │ │ │ │ └─ Implied maximum
29 * │ │ │ │ │ │ │ └─ Pivot 14
30 * │ │ │ │ │ │ └─ Pivot 13
31 * │ │ │ │ │ └─ Pivot 12
39 * Internal (non-leaf) nodes contain pointers to other nodes.
40 * Leaf nodes contain entries.
42 * The location of interest is often referred to as an offset. All offsets have
43 * a slot, but the last offset has an implied pivot from the node above (or
44 * UINT_MAX for the root node.
46 * Ranges complicate certain write activities. When modifying any of
47 * the B-tree variants, it is known that one entry will either be added or
48 * deleted. When modifying the Maple Tree, one store operation may overwrite
49 * the entire data set, or one half of the tree, or the middle half of the tree.
54 #include <linux/maple_tree.h>
55 #include <linux/xarray.h>
56 #include <linux/types.h>
57 #include <linux/export.h>
58 #include <linux/slab.h>
59 #include <linux/limits.h>
60 #include <asm/barrier.h>
62 #define CREATE_TRACE_POINTS
63 #include <trace/events/maple_tree.h>
65 #define MA_ROOT_PARENT 1
69 * * MA_STATE_BULK - Bulk insert mode
70 * * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert
71 * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation
73 #define MA_STATE_BULK 1
74 #define MA_STATE_REBALANCE 2
75 #define MA_STATE_PREALLOC 4
77 #define ma_parent_ptr(x) ((struct maple_pnode *)(x))
78 #define ma_mnode_ptr(x) ((struct maple_node *)(x))
79 #define ma_enode_ptr(x) ((struct maple_enode *)(x))
80 static struct kmem_cache *maple_node_cache;
82 #ifdef CONFIG_DEBUG_MAPLE_TREE
83 static const unsigned long mt_max[] = {
84 [maple_dense] = MAPLE_NODE_SLOTS,
85 [maple_leaf_64] = ULONG_MAX,
86 [maple_range_64] = ULONG_MAX,
87 [maple_arange_64] = ULONG_MAX,
89 #define mt_node_max(x) mt_max[mte_node_type(x)]
92 static const unsigned char mt_slots[] = {
93 [maple_dense] = MAPLE_NODE_SLOTS,
94 [maple_leaf_64] = MAPLE_RANGE64_SLOTS,
95 [maple_range_64] = MAPLE_RANGE64_SLOTS,
96 [maple_arange_64] = MAPLE_ARANGE64_SLOTS,
98 #define mt_slot_count(x) mt_slots[mte_node_type(x)]
100 static const unsigned char mt_pivots[] = {
102 [maple_leaf_64] = MAPLE_RANGE64_SLOTS - 1,
103 [maple_range_64] = MAPLE_RANGE64_SLOTS - 1,
104 [maple_arange_64] = MAPLE_ARANGE64_SLOTS - 1,
106 #define mt_pivot_count(x) mt_pivots[mte_node_type(x)]
108 static const unsigned char mt_min_slots[] = {
109 [maple_dense] = MAPLE_NODE_SLOTS / 2,
110 [maple_leaf_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
111 [maple_range_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
112 [maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 1,
114 #define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)]
116 #define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS * 2 + 2)
117 #define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1)
119 struct maple_big_node {
120 struct maple_pnode *parent;
121 unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1];
123 struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS];
125 unsigned long padding[MAPLE_BIG_NODE_GAPS];
126 unsigned long gap[MAPLE_BIG_NODE_GAPS];
130 enum maple_type type;
134 * The maple_subtree_state is used to build a tree to replace a segment of an
135 * existing tree in a more atomic way. Any walkers of the older tree will hit a
136 * dead node and restart on updates.
138 struct maple_subtree_state {
139 struct ma_state *orig_l; /* Original left side of subtree */
140 struct ma_state *orig_r; /* Original right side of subtree */
141 struct ma_state *l; /* New left side of subtree */
142 struct ma_state *m; /* New middle of subtree (rare) */
143 struct ma_state *r; /* New right side of subtree */
144 struct ma_topiary *free; /* nodes to be freed */
145 struct ma_topiary *destroy; /* Nodes to be destroyed (walked and freed) */
146 struct maple_big_node *bn;
149 #ifdef CONFIG_KASAN_STACK
150 /* Prevent mas_wr_bnode() from exceeding the stack frame limit */
151 #define noinline_for_kasan noinline_for_stack
153 #define noinline_for_kasan inline
157 static inline struct maple_node *mt_alloc_one(gfp_t gfp)
159 return kmem_cache_alloc(maple_node_cache, gfp);
162 static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes)
164 return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes);
167 static inline void mt_free_bulk(size_t size, void __rcu **nodes)
169 kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes);
172 static void mt_free_rcu(struct rcu_head *head)
174 struct maple_node *node = container_of(head, struct maple_node, rcu);
176 kmem_cache_free(maple_node_cache, node);
180 * ma_free_rcu() - Use rcu callback to free a maple node
181 * @node: The node to free
183 * The maple tree uses the parent pointer to indicate this node is no longer in
184 * use and will be freed.
186 static void ma_free_rcu(struct maple_node *node)
188 WARN_ON(node->parent != ma_parent_ptr(node));
189 call_rcu(&node->rcu, mt_free_rcu);
192 static void mas_set_height(struct ma_state *mas)
194 unsigned int new_flags = mas->tree->ma_flags;
196 new_flags &= ~MT_FLAGS_HEIGHT_MASK;
197 BUG_ON(mas->depth > MAPLE_HEIGHT_MAX);
198 new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET;
199 mas->tree->ma_flags = new_flags;
202 static unsigned int mas_mt_height(struct ma_state *mas)
204 return mt_height(mas->tree);
207 static inline enum maple_type mte_node_type(const struct maple_enode *entry)
209 return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) &
210 MAPLE_NODE_TYPE_MASK;
213 static inline bool ma_is_dense(const enum maple_type type)
215 return type < maple_leaf_64;
218 static inline bool ma_is_leaf(const enum maple_type type)
220 return type < maple_range_64;
223 static inline bool mte_is_leaf(const struct maple_enode *entry)
225 return ma_is_leaf(mte_node_type(entry));
229 * We also reserve values with the bottom two bits set to '10' which are
232 static inline bool mt_is_reserved(const void *entry)
234 return ((unsigned long)entry < MAPLE_RESERVED_RANGE) &&
235 xa_is_internal(entry);
238 static inline void mas_set_err(struct ma_state *mas, long err)
240 mas->node = MA_ERROR(err);
243 static inline bool mas_is_ptr(struct ma_state *mas)
245 return mas->node == MAS_ROOT;
248 static inline bool mas_is_start(struct ma_state *mas)
250 return mas->node == MAS_START;
253 bool mas_is_err(struct ma_state *mas)
255 return xa_is_err(mas->node);
258 static inline bool mas_searchable(struct ma_state *mas)
260 if (mas_is_none(mas))
269 static inline struct maple_node *mte_to_node(const struct maple_enode *entry)
271 return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK);
275 * mte_to_mat() - Convert a maple encoded node to a maple topiary node.
276 * @entry: The maple encoded node
278 * Return: a maple topiary pointer
280 static inline struct maple_topiary *mte_to_mat(const struct maple_enode *entry)
282 return (struct maple_topiary *)
283 ((unsigned long)entry & ~MAPLE_NODE_MASK);
287 * mas_mn() - Get the maple state node.
288 * @mas: The maple state
290 * Return: the maple node (not encoded - bare pointer).
292 static inline struct maple_node *mas_mn(const struct ma_state *mas)
294 return mte_to_node(mas->node);
298 * mte_set_node_dead() - Set a maple encoded node as dead.
299 * @mn: The maple encoded node.
301 static inline void mte_set_node_dead(struct maple_enode *mn)
303 mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn));
304 smp_wmb(); /* Needed for RCU */
307 /* Bit 1 indicates the root is a node */
308 #define MAPLE_ROOT_NODE 0x02
309 /* maple_type stored bit 3-6 */
310 #define MAPLE_ENODE_TYPE_SHIFT 0x03
311 /* Bit 2 means a NULL somewhere below */
312 #define MAPLE_ENODE_NULL 0x04
314 static inline struct maple_enode *mt_mk_node(const struct maple_node *node,
315 enum maple_type type)
317 return (void *)((unsigned long)node |
318 (type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL);
321 static inline void *mte_mk_root(const struct maple_enode *node)
323 return (void *)((unsigned long)node | MAPLE_ROOT_NODE);
326 static inline void *mte_safe_root(const struct maple_enode *node)
328 return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE);
331 static inline void *mte_set_full(const struct maple_enode *node)
333 return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL);
336 static inline void *mte_clear_full(const struct maple_enode *node)
338 return (void *)((unsigned long)node | MAPLE_ENODE_NULL);
341 static inline bool mte_has_null(const struct maple_enode *node)
343 return (unsigned long)node & MAPLE_ENODE_NULL;
346 static inline bool ma_is_root(struct maple_node *node)
348 return ((unsigned long)node->parent & MA_ROOT_PARENT);
351 static inline bool mte_is_root(const struct maple_enode *node)
353 return ma_is_root(mte_to_node(node));
356 static inline bool mas_is_root_limits(const struct ma_state *mas)
358 return !mas->min && mas->max == ULONG_MAX;
361 static inline bool mt_is_alloc(struct maple_tree *mt)
363 return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE);
368 * Excluding root, the parent pointer is 256B aligned like all other tree nodes.
369 * When storing a 32 or 64 bit values, the offset can fit into 5 bits. The 16
370 * bit values need an extra bit to store the offset. This extra bit comes from
371 * a reuse of the last bit in the node type. This is possible by using bit 1 to
372 * indicate if bit 2 is part of the type or the slot.
376 * 0x?00 = 16 bit nodes
377 * 0x010 = 32 bit nodes
378 * 0x110 = 64 bit nodes
380 * Slot size and alignment
382 * 0b?00 : 16 bit values, type in 0-1, slot in 2-7
383 * 0b010 : 32 bit values, type in 0-2, slot in 3-7
384 * 0b110 : 64 bit values, type in 0-2, slot in 3-7
387 #define MAPLE_PARENT_ROOT 0x01
389 #define MAPLE_PARENT_SLOT_SHIFT 0x03
390 #define MAPLE_PARENT_SLOT_MASK 0xF8
392 #define MAPLE_PARENT_16B_SLOT_SHIFT 0x02
393 #define MAPLE_PARENT_16B_SLOT_MASK 0xFC
395 #define MAPLE_PARENT_RANGE64 0x06
396 #define MAPLE_PARENT_RANGE32 0x04
397 #define MAPLE_PARENT_NOT_RANGE16 0x02
400 * mte_parent_shift() - Get the parent shift for the slot storage.
401 * @parent: The parent pointer cast as an unsigned long
402 * Return: The shift into that pointer to the star to of the slot
404 static inline unsigned long mte_parent_shift(unsigned long parent)
406 /* Note bit 1 == 0 means 16B */
407 if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
408 return MAPLE_PARENT_SLOT_SHIFT;
410 return MAPLE_PARENT_16B_SLOT_SHIFT;
414 * mte_parent_slot_mask() - Get the slot mask for the parent.
415 * @parent: The parent pointer cast as an unsigned long.
416 * Return: The slot mask for that parent.
418 static inline unsigned long mte_parent_slot_mask(unsigned long parent)
420 /* Note bit 1 == 0 means 16B */
421 if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
422 return MAPLE_PARENT_SLOT_MASK;
424 return MAPLE_PARENT_16B_SLOT_MASK;
428 * mas_parent_type() - Return the maple_type of the parent from the stored
430 * @mas: The maple state
431 * @enode: The maple_enode to extract the parent's enum
432 * Return: The node->parent maple_type
435 enum maple_type mas_parent_type(struct ma_state *mas, struct maple_enode *enode)
437 unsigned long p_type;
439 p_type = (unsigned long)mte_to_node(enode)->parent;
440 if (WARN_ON(p_type & MAPLE_PARENT_ROOT))
443 p_type &= MAPLE_NODE_MASK;
444 p_type &= ~mte_parent_slot_mask(p_type);
446 case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */
447 if (mt_is_alloc(mas->tree))
448 return maple_arange_64;
449 return maple_range_64;
456 * mte_set_parent() - Set the parent node and encode the slot
457 * @enode: The encoded maple node.
458 * @parent: The encoded maple node that is the parent of @enode.
459 * @slot: The slot that @enode resides in @parent.
461 * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the
465 void mte_set_parent(struct maple_enode *enode, const struct maple_enode *parent,
468 unsigned long val = (unsigned long)parent;
471 enum maple_type p_type = mte_node_type(parent);
473 BUG_ON(p_type == maple_dense);
474 BUG_ON(p_type == maple_leaf_64);
478 case maple_arange_64:
479 shift = MAPLE_PARENT_SLOT_SHIFT;
480 type = MAPLE_PARENT_RANGE64;
489 val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */
490 val |= (slot << shift) | type;
491 mte_to_node(enode)->parent = ma_parent_ptr(val);
495 * mte_parent_slot() - get the parent slot of @enode.
496 * @enode: The encoded maple node.
498 * Return: The slot in the parent node where @enode resides.
500 static inline unsigned int mte_parent_slot(const struct maple_enode *enode)
502 unsigned long val = (unsigned long)mte_to_node(enode)->parent;
504 if (val & MA_ROOT_PARENT)
508 * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost
509 * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT
511 return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val);
515 * mte_parent() - Get the parent of @node.
516 * @node: The encoded maple node.
518 * Return: The parent maple node.
520 static inline struct maple_node *mte_parent(const struct maple_enode *enode)
522 return (void *)((unsigned long)
523 (mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK);
527 * ma_dead_node() - check if the @enode is dead.
528 * @enode: The encoded maple node
530 * Return: true if dead, false otherwise.
532 static inline bool ma_dead_node(const struct maple_node *node)
534 struct maple_node *parent;
536 /* Do not reorder reads from the node prior to the parent check */
538 parent = (void *)((unsigned long) node->parent & ~MAPLE_NODE_MASK);
539 return (parent == node);
543 * mte_dead_node() - check if the @enode is dead.
544 * @enode: The encoded maple node
546 * Return: true if dead, false otherwise.
548 static inline bool mte_dead_node(const struct maple_enode *enode)
550 struct maple_node *parent, *node;
552 node = mte_to_node(enode);
553 /* Do not reorder reads from the node prior to the parent check */
555 parent = mte_parent(enode);
556 return (parent == node);
560 * mas_allocated() - Get the number of nodes allocated in a maple state.
561 * @mas: The maple state
563 * The ma_state alloc member is overloaded to hold a pointer to the first
564 * allocated node or to the number of requested nodes to allocate. If bit 0 is
565 * set, then the alloc contains the number of requested nodes. If there is an
566 * allocated node, then the total allocated nodes is in that node.
568 * Return: The total number of nodes allocated
570 static inline unsigned long mas_allocated(const struct ma_state *mas)
572 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1))
575 return mas->alloc->total;
579 * mas_set_alloc_req() - Set the requested number of allocations.
580 * @mas: the maple state
581 * @count: the number of allocations.
583 * The requested number of allocations is either in the first allocated node,
584 * located in @mas->alloc->request_count, or directly in @mas->alloc if there is
585 * no allocated node. Set the request either in the node or do the necessary
586 * encoding to store in @mas->alloc directly.
588 static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count)
590 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) {
594 mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U);
598 mas->alloc->request_count = count;
602 * mas_alloc_req() - get the requested number of allocations.
603 * @mas: The maple state
605 * The alloc count is either stored directly in @mas, or in
606 * @mas->alloc->request_count if there is at least one node allocated. Decode
607 * the request count if it's stored directly in @mas->alloc.
609 * Return: The allocation request count.
611 static inline unsigned int mas_alloc_req(const struct ma_state *mas)
613 if ((unsigned long)mas->alloc & 0x1)
614 return (unsigned long)(mas->alloc) >> 1;
616 return mas->alloc->request_count;
621 * ma_pivots() - Get a pointer to the maple node pivots.
622 * @node - the maple node
623 * @type - the node type
625 * In the event of a dead node, this array may be %NULL
627 * Return: A pointer to the maple node pivots
629 static inline unsigned long *ma_pivots(struct maple_node *node,
630 enum maple_type type)
633 case maple_arange_64:
634 return node->ma64.pivot;
637 return node->mr64.pivot;
645 * ma_gaps() - Get a pointer to the maple node gaps.
646 * @node - the maple node
647 * @type - the node type
649 * Return: A pointer to the maple node gaps
651 static inline unsigned long *ma_gaps(struct maple_node *node,
652 enum maple_type type)
655 case maple_arange_64:
656 return node->ma64.gap;
666 * mte_pivot() - Get the pivot at @piv of the maple encoded node.
667 * @mn: The maple encoded node.
670 * Return: the pivot at @piv of @mn.
672 static inline unsigned long mte_pivot(const struct maple_enode *mn,
675 struct maple_node *node = mte_to_node(mn);
676 enum maple_type type = mte_node_type(mn);
678 if (piv >= mt_pivots[type]) {
683 case maple_arange_64:
684 return node->ma64.pivot[piv];
687 return node->mr64.pivot[piv];
695 * mas_safe_pivot() - get the pivot at @piv or mas->max.
696 * @mas: The maple state
697 * @pivots: The pointer to the maple node pivots
698 * @piv: The pivot to fetch
699 * @type: The maple node type
701 * Return: The pivot at @piv within the limit of the @pivots array, @mas->max
704 static inline unsigned long
705 mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots,
706 unsigned char piv, enum maple_type type)
708 if (piv >= mt_pivots[type])
715 * mas_safe_min() - Return the minimum for a given offset.
716 * @mas: The maple state
717 * @pivots: The pointer to the maple node pivots
718 * @offset: The offset into the pivot array
720 * Return: The minimum range value that is contained in @offset.
722 static inline unsigned long
723 mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset)
726 return pivots[offset - 1] + 1;
732 * mas_logical_pivot() - Get the logical pivot of a given offset.
733 * @mas: The maple state
734 * @pivots: The pointer to the maple node pivots
735 * @offset: The offset into the pivot array
736 * @type: The maple node type
738 * When there is no value at a pivot (beyond the end of the data), then the
739 * pivot is actually @mas->max.
741 * Return: the logical pivot of a given @offset.
743 static inline unsigned long
744 mas_logical_pivot(struct ma_state *mas, unsigned long *pivots,
745 unsigned char offset, enum maple_type type)
747 unsigned long lpiv = mas_safe_pivot(mas, pivots, offset, type);
759 * mte_set_pivot() - Set a pivot to a value in an encoded maple node.
760 * @mn: The encoded maple node
761 * @piv: The pivot offset
762 * @val: The value of the pivot
764 static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv,
767 struct maple_node *node = mte_to_node(mn);
768 enum maple_type type = mte_node_type(mn);
770 BUG_ON(piv >= mt_pivots[type]);
775 node->mr64.pivot[piv] = val;
777 case maple_arange_64:
778 node->ma64.pivot[piv] = val;
787 * ma_slots() - Get a pointer to the maple node slots.
788 * @mn: The maple node
789 * @mt: The maple node type
791 * Return: A pointer to the maple node slots
793 static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt)
797 case maple_arange_64:
798 return mn->ma64.slot;
801 return mn->mr64.slot;
807 static inline bool mt_locked(const struct maple_tree *mt)
809 return mt_external_lock(mt) ? mt_lock_is_held(mt) :
810 lockdep_is_held(&mt->ma_lock);
813 static inline void *mt_slot(const struct maple_tree *mt,
814 void __rcu **slots, unsigned char offset)
816 return rcu_dereference_check(slots[offset], mt_locked(mt));
819 static inline void *mt_slot_locked(struct maple_tree *mt, void __rcu **slots,
820 unsigned char offset)
822 return rcu_dereference_protected(slots[offset], mt_locked(mt));
825 * mas_slot_locked() - Get the slot value when holding the maple tree lock.
826 * @mas: The maple state
827 * @slots: The pointer to the slots
828 * @offset: The offset into the slots array to fetch
830 * Return: The entry stored in @slots at the @offset.
832 static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots,
833 unsigned char offset)
835 return mt_slot_locked(mas->tree, slots, offset);
839 * mas_slot() - Get the slot value when not holding the maple tree lock.
840 * @mas: The maple state
841 * @slots: The pointer to the slots
842 * @offset: The offset into the slots array to fetch
844 * Return: The entry stored in @slots at the @offset
846 static inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
847 unsigned char offset)
849 return mt_slot(mas->tree, slots, offset);
853 * mas_root() - Get the maple tree root.
854 * @mas: The maple state.
856 * Return: The pointer to the root of the tree
858 static inline void *mas_root(struct ma_state *mas)
860 return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree));
863 static inline void *mt_root_locked(struct maple_tree *mt)
865 return rcu_dereference_protected(mt->ma_root, mt_locked(mt));
869 * mas_root_locked() - Get the maple tree root when holding the maple tree lock.
870 * @mas: The maple state.
872 * Return: The pointer to the root of the tree
874 static inline void *mas_root_locked(struct ma_state *mas)
876 return mt_root_locked(mas->tree);
879 static inline struct maple_metadata *ma_meta(struct maple_node *mn,
883 case maple_arange_64:
884 return &mn->ma64.meta;
886 return &mn->mr64.meta;
891 * ma_set_meta() - Set the metadata information of a node.
892 * @mn: The maple node
893 * @mt: The maple node type
894 * @offset: The offset of the highest sub-gap in this node.
895 * @end: The end of the data in this node.
897 static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
898 unsigned char offset, unsigned char end)
900 struct maple_metadata *meta = ma_meta(mn, mt);
907 * mt_clear_meta() - clear the metadata information of a node, if it exists
908 * @mt: The maple tree
909 * @mn: The maple node
910 * @type: The maple node type
911 * @offset: The offset of the highest sub-gap in this node.
912 * @end: The end of the data in this node.
914 static inline void mt_clear_meta(struct maple_tree *mt, struct maple_node *mn,
915 enum maple_type type)
917 struct maple_metadata *meta;
918 unsigned long *pivots;
924 pivots = mn->mr64.pivot;
925 if (unlikely(pivots[MAPLE_RANGE64_SLOTS - 2])) {
926 slots = mn->mr64.slot;
927 next = mt_slot_locked(mt, slots,
928 MAPLE_RANGE64_SLOTS - 1);
929 if (unlikely((mte_to_node(next) &&
930 mte_node_type(next))))
931 return; /* no metadata, could be node */
934 case maple_arange_64:
935 meta = ma_meta(mn, type);
946 * ma_meta_end() - Get the data end of a node from the metadata
947 * @mn: The maple node
948 * @mt: The maple node type
950 static inline unsigned char ma_meta_end(struct maple_node *mn,
953 struct maple_metadata *meta = ma_meta(mn, mt);
959 * ma_meta_gap() - Get the largest gap location of a node from the metadata
960 * @mn: The maple node
961 * @mt: The maple node type
963 static inline unsigned char ma_meta_gap(struct maple_node *mn,
966 BUG_ON(mt != maple_arange_64);
968 return mn->ma64.meta.gap;
972 * ma_set_meta_gap() - Set the largest gap location in a nodes metadata
973 * @mn: The maple node
974 * @mn: The maple node type
975 * @offset: The location of the largest gap.
977 static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt,
978 unsigned char offset)
981 struct maple_metadata *meta = ma_meta(mn, mt);
987 * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
988 * @mat - the ma_topiary, a linked list of dead nodes.
989 * @dead_enode - the node to be marked as dead and added to the tail of the list
991 * Add the @dead_enode to the linked list in @mat.
993 static inline void mat_add(struct ma_topiary *mat,
994 struct maple_enode *dead_enode)
996 mte_set_node_dead(dead_enode);
997 mte_to_mat(dead_enode)->next = NULL;
999 mat->tail = mat->head = dead_enode;
1003 mte_to_mat(mat->tail)->next = dead_enode;
1004 mat->tail = dead_enode;
1007 static void mte_destroy_walk(struct maple_enode *, struct maple_tree *);
1008 static inline void mas_free(struct ma_state *mas, struct maple_enode *used);
1011 * mas_mat_free() - Free all nodes in a dead list.
1012 * @mas - the maple state
1013 * @mat - the ma_topiary linked list of dead nodes to free.
1015 * Free walk a dead list.
1017 static void mas_mat_free(struct ma_state *mas, struct ma_topiary *mat)
1019 struct maple_enode *next;
1022 next = mte_to_mat(mat->head)->next;
1023 mas_free(mas, mat->head);
1029 * mas_mat_destroy() - Free all nodes and subtrees in a dead list.
1030 * @mas - the maple state
1031 * @mat - the ma_topiary linked list of dead nodes to free.
1033 * Destroy walk a dead list.
1035 static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat)
1037 struct maple_enode *next;
1040 next = mte_to_mat(mat->head)->next;
1041 mte_destroy_walk(mat->head, mat->mtree);
1046 * mas_descend() - Descend into the slot stored in the ma_state.
1047 * @mas - the maple state.
1049 * Note: Not RCU safe, only use in write side or debug code.
1051 static inline void mas_descend(struct ma_state *mas)
1053 enum maple_type type;
1054 unsigned long *pivots;
1055 struct maple_node *node;
1059 type = mte_node_type(mas->node);
1060 pivots = ma_pivots(node, type);
1061 slots = ma_slots(node, type);
1064 mas->min = pivots[mas->offset - 1] + 1;
1065 mas->max = mas_safe_pivot(mas, pivots, mas->offset, type);
1066 mas->node = mas_slot(mas, slots, mas->offset);
1070 * mte_set_gap() - Set a maple node gap.
1071 * @mn: The encoded maple node
1072 * @gap: The offset of the gap to set
1073 * @val: The gap value
1075 static inline void mte_set_gap(const struct maple_enode *mn,
1076 unsigned char gap, unsigned long val)
1078 switch (mte_node_type(mn)) {
1081 case maple_arange_64:
1082 mte_to_node(mn)->ma64.gap[gap] = val;
1088 * mas_ascend() - Walk up a level of the tree.
1089 * @mas: The maple state
1091 * Sets the @mas->max and @mas->min to the correct values when walking up. This
1092 * may cause several levels of walking up to find the correct min and max.
1093 * May find a dead node which will cause a premature return.
1094 * Return: 1 on dead node, 0 otherwise
1096 static int mas_ascend(struct ma_state *mas)
1098 struct maple_enode *p_enode; /* parent enode. */
1099 struct maple_enode *a_enode; /* ancestor enode. */
1100 struct maple_node *a_node; /* ancestor node. */
1101 struct maple_node *p_node; /* parent node. */
1102 unsigned char a_slot;
1103 enum maple_type a_type;
1104 unsigned long min, max;
1105 unsigned long *pivots;
1106 bool set_max = false, set_min = false;
1108 a_node = mas_mn(mas);
1109 if (ma_is_root(a_node)) {
1114 p_node = mte_parent(mas->node);
1115 if (unlikely(a_node == p_node))
1118 a_type = mas_parent_type(mas, mas->node);
1119 mas->offset = mte_parent_slot(mas->node);
1120 a_enode = mt_mk_node(p_node, a_type);
1122 /* Check to make sure all parent information is still accurate */
1123 if (p_node != mte_parent(mas->node))
1126 mas->node = a_enode;
1128 if (mte_is_root(a_enode)) {
1129 mas->max = ULONG_MAX;
1137 if (mas->max == ULONG_MAX)
1144 a_type = mas_parent_type(mas, p_enode);
1145 a_node = mte_parent(p_enode);
1146 a_slot = mte_parent_slot(p_enode);
1147 a_enode = mt_mk_node(a_node, a_type);
1148 pivots = ma_pivots(a_node, a_type);
1150 if (unlikely(ma_dead_node(a_node)))
1153 if (!set_min && a_slot) {
1155 min = pivots[a_slot - 1] + 1;
1158 if (!set_max && a_slot < mt_pivots[a_type]) {
1160 max = pivots[a_slot];
1163 if (unlikely(ma_dead_node(a_node)))
1166 if (unlikely(ma_is_root(a_node)))
1169 } while (!set_min || !set_max);
1177 * mas_pop_node() - Get a previously allocated maple node from the maple state.
1178 * @mas: The maple state
1180 * Return: A pointer to a maple node.
1182 static inline struct maple_node *mas_pop_node(struct ma_state *mas)
1184 struct maple_alloc *ret, *node = mas->alloc;
1185 unsigned long total = mas_allocated(mas);
1186 unsigned int req = mas_alloc_req(mas);
1188 /* nothing or a request pending. */
1189 if (WARN_ON(!total))
1193 /* single allocation in this ma_state */
1199 if (node->node_count == 1) {
1200 /* Single allocation in this node. */
1201 mas->alloc = node->slot[0];
1202 mas->alloc->total = node->total - 1;
1207 ret = node->slot[--node->node_count];
1208 node->slot[node->node_count] = NULL;
1214 mas_set_alloc_req(mas, req);
1217 memset(ret, 0, sizeof(*ret));
1218 return (struct maple_node *)ret;
1222 * mas_push_node() - Push a node back on the maple state allocation.
1223 * @mas: The maple state
1224 * @used: The used maple node
1226 * Stores the maple node back into @mas->alloc for reuse. Updates allocated and
1227 * requested node count as necessary.
1229 static inline void mas_push_node(struct ma_state *mas, struct maple_node *used)
1231 struct maple_alloc *reuse = (struct maple_alloc *)used;
1232 struct maple_alloc *head = mas->alloc;
1233 unsigned long count;
1234 unsigned int requested = mas_alloc_req(mas);
1236 count = mas_allocated(mas);
1238 reuse->request_count = 0;
1239 reuse->node_count = 0;
1240 if (count && (head->node_count < MAPLE_ALLOC_SLOTS)) {
1241 head->slot[head->node_count++] = reuse;
1247 if ((head) && !((unsigned long)head & 0x1)) {
1248 reuse->slot[0] = head;
1249 reuse->node_count = 1;
1250 reuse->total += head->total;
1256 mas_set_alloc_req(mas, requested - 1);
1260 * mas_alloc_nodes() - Allocate nodes into a maple state
1261 * @mas: The maple state
1262 * @gfp: The GFP Flags
1264 static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
1266 struct maple_alloc *node;
1267 unsigned long allocated = mas_allocated(mas);
1268 unsigned int requested = mas_alloc_req(mas);
1270 void **slots = NULL;
1271 unsigned int max_req = 0;
1276 mas_set_alloc_req(mas, 0);
1277 if (mas->mas_flags & MA_STATE_PREALLOC) {
1280 WARN_ON(!allocated);
1283 if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) {
1284 node = (struct maple_alloc *)mt_alloc_one(gfp);
1289 node->slot[0] = mas->alloc;
1290 node->node_count = 1;
1292 node->node_count = 0;
1296 node->total = ++allocated;
1301 node->request_count = 0;
1303 max_req = MAPLE_ALLOC_SLOTS - node->node_count;
1304 slots = (void **)&node->slot[node->node_count];
1305 max_req = min(requested, max_req);
1306 count = mt_alloc_bulk(gfp, max_req, slots);
1310 if (node->node_count == 0) {
1311 node->slot[0]->node_count = 0;
1312 node->slot[0]->request_count = 0;
1315 node->node_count += count;
1317 node = node->slot[0];
1320 mas->alloc->total = allocated;
1324 /* Clean up potential freed allocations on bulk failure */
1325 memset(slots, 0, max_req * sizeof(unsigned long));
1327 mas_set_alloc_req(mas, requested);
1328 if (mas->alloc && !(((unsigned long)mas->alloc & 0x1)))
1329 mas->alloc->total = allocated;
1330 mas_set_err(mas, -ENOMEM);
1334 * mas_free() - Free an encoded maple node
1335 * @mas: The maple state
1336 * @used: The encoded maple node to free.
1338 * Uses rcu free if necessary, pushes @used back on the maple state allocations
1341 static inline void mas_free(struct ma_state *mas, struct maple_enode *used)
1343 struct maple_node *tmp = mte_to_node(used);
1345 if (mt_in_rcu(mas->tree))
1348 mas_push_node(mas, tmp);
1352 * mas_node_count() - Check if enough nodes are allocated and request more if
1353 * there is not enough nodes.
1354 * @mas: The maple state
1355 * @count: The number of nodes needed
1356 * @gfp: the gfp flags
1358 static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp)
1360 unsigned long allocated = mas_allocated(mas);
1362 if (allocated < count) {
1363 mas_set_alloc_req(mas, count - allocated);
1364 mas_alloc_nodes(mas, gfp);
1369 * mas_node_count() - Check if enough nodes are allocated and request more if
1370 * there is not enough nodes.
1371 * @mas: The maple state
1372 * @count: The number of nodes needed
1374 * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags.
1376 static void mas_node_count(struct ma_state *mas, int count)
1378 return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN);
1382 * mas_start() - Sets up maple state for operations.
1383 * @mas: The maple state.
1385 * If mas->node == MAS_START, then set the min, max and depth to
1389 * - If mas->node is an error or not MAS_START, return NULL.
1390 * - If it's an empty tree: NULL & mas->node == MAS_NONE
1391 * - If it's a single entry: The entry & mas->node == MAS_ROOT
1392 * - If it's a tree: NULL & mas->node == safe root node.
1394 static inline struct maple_enode *mas_start(struct ma_state *mas)
1396 if (likely(mas_is_start(mas))) {
1397 struct maple_enode *root;
1400 mas->max = ULONG_MAX;
1404 root = mas_root(mas);
1405 /* Tree with nodes */
1406 if (likely(xa_is_node(root))) {
1408 mas->node = mte_safe_root(root);
1410 if (mte_dead_node(mas->node))
1417 if (unlikely(!root)) {
1418 mas->node = MAS_NONE;
1419 mas->offset = MAPLE_NODE_SLOTS;
1423 /* Single entry tree */
1424 mas->node = MAS_ROOT;
1425 mas->offset = MAPLE_NODE_SLOTS;
1427 /* Single entry tree. */
1438 * ma_data_end() - Find the end of the data in a node.
1439 * @node: The maple node
1440 * @type: The maple node type
1441 * @pivots: The array of pivots in the node
1442 * @max: The maximum value in the node
1444 * Uses metadata to find the end of the data when possible.
1445 * Return: The zero indexed last slot with data (may be null).
1447 static inline unsigned char ma_data_end(struct maple_node *node,
1448 enum maple_type type,
1449 unsigned long *pivots,
1452 unsigned char offset;
1457 if (type == maple_arange_64)
1458 return ma_meta_end(node, type);
1460 offset = mt_pivots[type] - 1;
1461 if (likely(!pivots[offset]))
1462 return ma_meta_end(node, type);
1464 if (likely(pivots[offset] == max))
1467 return mt_pivots[type];
1471 * mas_data_end() - Find the end of the data (slot).
1472 * @mas: the maple state
1474 * This method is optimized to check the metadata of a node if the node type
1475 * supports data end metadata.
1477 * Return: The zero indexed last slot with data (may be null).
1479 static inline unsigned char mas_data_end(struct ma_state *mas)
1481 enum maple_type type;
1482 struct maple_node *node;
1483 unsigned char offset;
1484 unsigned long *pivots;
1486 type = mte_node_type(mas->node);
1488 if (type == maple_arange_64)
1489 return ma_meta_end(node, type);
1491 pivots = ma_pivots(node, type);
1492 if (unlikely(ma_dead_node(node)))
1495 offset = mt_pivots[type] - 1;
1496 if (likely(!pivots[offset]))
1497 return ma_meta_end(node, type);
1499 if (likely(pivots[offset] == mas->max))
1502 return mt_pivots[type];
1506 * mas_leaf_max_gap() - Returns the largest gap in a leaf node
1507 * @mas - the maple state
1509 * Return: The maximum gap in the leaf.
1511 static unsigned long mas_leaf_max_gap(struct ma_state *mas)
1514 unsigned long pstart, gap, max_gap;
1515 struct maple_node *mn;
1516 unsigned long *pivots;
1519 unsigned char max_piv;
1521 mt = mte_node_type(mas->node);
1523 slots = ma_slots(mn, mt);
1525 if (unlikely(ma_is_dense(mt))) {
1527 for (i = 0; i < mt_slots[mt]; i++) {
1542 * Check the first implied pivot optimizes the loop below and slot 1 may
1543 * be skipped if there is a gap in slot 0.
1545 pivots = ma_pivots(mn, mt);
1546 if (likely(!slots[0])) {
1547 max_gap = pivots[0] - mas->min + 1;
1553 /* reduce max_piv as the special case is checked before the loop */
1554 max_piv = ma_data_end(mn, mt, pivots, mas->max) - 1;
1556 * Check end implied pivot which can only be a gap on the right most
1559 if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) {
1560 gap = ULONG_MAX - pivots[max_piv];
1565 for (; i <= max_piv; i++) {
1566 /* data == no gap. */
1567 if (likely(slots[i]))
1570 pstart = pivots[i - 1];
1571 gap = pivots[i] - pstart;
1575 /* There cannot be two gaps in a row. */
1582 * ma_max_gap() - Get the maximum gap in a maple node (non-leaf)
1583 * @node: The maple node
1584 * @gaps: The pointer to the gaps
1585 * @mt: The maple node type
1586 * @*off: Pointer to store the offset location of the gap.
1588 * Uses the metadata data end to scan backwards across set gaps.
1590 * Return: The maximum gap value
1592 static inline unsigned long
1593 ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt,
1596 unsigned char offset, i;
1597 unsigned long max_gap = 0;
1599 i = offset = ma_meta_end(node, mt);
1601 if (gaps[i] > max_gap) {
1612 * mas_max_gap() - find the largest gap in a non-leaf node and set the slot.
1613 * @mas: The maple state.
1615 * If the metadata gap is set to MAPLE_ARANGE64_META_MAX, there is no gap.
1617 * Return: The gap value.
1619 static inline unsigned long mas_max_gap(struct ma_state *mas)
1621 unsigned long *gaps;
1622 unsigned char offset;
1624 struct maple_node *node;
1626 mt = mte_node_type(mas->node);
1628 return mas_leaf_max_gap(mas);
1631 offset = ma_meta_gap(node, mt);
1632 if (offset == MAPLE_ARANGE64_META_MAX)
1635 gaps = ma_gaps(node, mt);
1636 return gaps[offset];
1640 * mas_parent_gap() - Set the parent gap and any gaps above, as needed
1641 * @mas: The maple state
1642 * @offset: The gap offset in the parent to set
1643 * @new: The new gap value.
1645 * Set the parent gap then continue to set the gap upwards, using the metadata
1646 * of the parent to see if it is necessary to check the node above.
1648 static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset,
1651 unsigned long meta_gap = 0;
1652 struct maple_node *pnode;
1653 struct maple_enode *penode;
1654 unsigned long *pgaps;
1655 unsigned char meta_offset;
1656 enum maple_type pmt;
1658 pnode = mte_parent(mas->node);
1659 pmt = mas_parent_type(mas, mas->node);
1660 penode = mt_mk_node(pnode, pmt);
1661 pgaps = ma_gaps(pnode, pmt);
1664 meta_offset = ma_meta_gap(pnode, pmt);
1665 if (meta_offset == MAPLE_ARANGE64_META_MAX)
1668 meta_gap = pgaps[meta_offset];
1670 pgaps[offset] = new;
1672 if (meta_gap == new)
1675 if (offset != meta_offset) {
1679 ma_set_meta_gap(pnode, pmt, offset);
1680 } else if (new < meta_gap) {
1682 new = ma_max_gap(pnode, pgaps, pmt, &meta_offset);
1683 ma_set_meta_gap(pnode, pmt, meta_offset);
1686 if (ma_is_root(pnode))
1689 /* Go to the parent node. */
1690 pnode = mte_parent(penode);
1691 pmt = mas_parent_type(mas, penode);
1692 pgaps = ma_gaps(pnode, pmt);
1693 offset = mte_parent_slot(penode);
1694 penode = mt_mk_node(pnode, pmt);
1699 * mas_update_gap() - Update a nodes gaps and propagate up if necessary.
1700 * @mas - the maple state.
1702 static inline void mas_update_gap(struct ma_state *mas)
1704 unsigned char pslot;
1705 unsigned long p_gap;
1706 unsigned long max_gap;
1708 if (!mt_is_alloc(mas->tree))
1711 if (mte_is_root(mas->node))
1714 max_gap = mas_max_gap(mas);
1716 pslot = mte_parent_slot(mas->node);
1717 p_gap = ma_gaps(mte_parent(mas->node),
1718 mas_parent_type(mas, mas->node))[pslot];
1720 if (p_gap != max_gap)
1721 mas_parent_gap(mas, pslot, max_gap);
1725 * mas_adopt_children() - Set the parent pointer of all nodes in @parent to
1726 * @parent with the slot encoded.
1727 * @mas - the maple state (for the tree)
1728 * @parent - the maple encoded node containing the children.
1730 static inline void mas_adopt_children(struct ma_state *mas,
1731 struct maple_enode *parent)
1733 enum maple_type type = mte_node_type(parent);
1734 struct maple_node *node = mas_mn(mas);
1735 void __rcu **slots = ma_slots(node, type);
1736 unsigned long *pivots = ma_pivots(node, type);
1737 struct maple_enode *child;
1738 unsigned char offset;
1740 offset = ma_data_end(node, type, pivots, mas->max);
1742 child = mas_slot_locked(mas, slots, offset);
1743 mte_set_parent(child, parent, offset);
1748 * mas_replace() - Replace a maple node in the tree with mas->node. Uses the
1749 * parent encoding to locate the maple node in the tree.
1750 * @mas - the ma_state to use for operations.
1751 * @advanced - boolean to adopt the child nodes and free the old node (false) or
1752 * leave the node (true) and handle the adoption and free elsewhere.
1754 static inline void mas_replace(struct ma_state *mas, bool advanced)
1755 __must_hold(mas->tree->lock)
1757 struct maple_node *mn = mas_mn(mas);
1758 struct maple_enode *old_enode;
1759 unsigned char offset = 0;
1760 void __rcu **slots = NULL;
1762 if (ma_is_root(mn)) {
1763 old_enode = mas_root_locked(mas);
1765 offset = mte_parent_slot(mas->node);
1766 slots = ma_slots(mte_parent(mas->node),
1767 mas_parent_type(mas, mas->node));
1768 old_enode = mas_slot_locked(mas, slots, offset);
1771 if (!advanced && !mte_is_leaf(mas->node))
1772 mas_adopt_children(mas, mas->node);
1774 if (mte_is_root(mas->node)) {
1775 mn->parent = ma_parent_ptr(
1776 ((unsigned long)mas->tree | MA_ROOT_PARENT));
1777 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
1778 mas_set_height(mas);
1780 rcu_assign_pointer(slots[offset], mas->node);
1784 mte_set_node_dead(old_enode);
1785 mas_free(mas, old_enode);
1790 * mas_new_child() - Find the new child of a node.
1791 * @mas: the maple state
1792 * @child: the maple state to store the child.
1794 static inline bool mas_new_child(struct ma_state *mas, struct ma_state *child)
1795 __must_hold(mas->tree->lock)
1798 unsigned char offset;
1800 unsigned long *pivots;
1801 struct maple_enode *entry;
1802 struct maple_node *node;
1805 mt = mte_node_type(mas->node);
1807 slots = ma_slots(node, mt);
1808 pivots = ma_pivots(node, mt);
1809 end = ma_data_end(node, mt, pivots, mas->max);
1810 for (offset = mas->offset; offset <= end; offset++) {
1811 entry = mas_slot_locked(mas, slots, offset);
1812 if (mte_parent(entry) == node) {
1814 mas->offset = offset + 1;
1815 child->offset = offset;
1825 * mab_shift_right() - Shift the data in mab right. Note, does not clean out the
1826 * old data or set b_node->b_end.
1827 * @b_node: the maple_big_node
1828 * @shift: the shift count
1830 static inline void mab_shift_right(struct maple_big_node *b_node,
1831 unsigned char shift)
1833 unsigned long size = b_node->b_end * sizeof(unsigned long);
1835 memmove(b_node->pivot + shift, b_node->pivot, size);
1836 memmove(b_node->slot + shift, b_node->slot, size);
1837 if (b_node->type == maple_arange_64)
1838 memmove(b_node->gap + shift, b_node->gap, size);
1842 * mab_middle_node() - Check if a middle node is needed (unlikely)
1843 * @b_node: the maple_big_node that contains the data.
1844 * @size: the amount of data in the b_node
1845 * @split: the potential split location
1846 * @slot_count: the size that can be stored in a single node being considered.
1848 * Return: true if a middle node is required.
1850 static inline bool mab_middle_node(struct maple_big_node *b_node, int split,
1851 unsigned char slot_count)
1853 unsigned char size = b_node->b_end;
1855 if (size >= 2 * slot_count)
1858 if (!b_node->slot[split] && (size >= 2 * slot_count - 1))
1865 * mab_no_null_split() - ensure the split doesn't fall on a NULL
1866 * @b_node: the maple_big_node with the data
1867 * @split: the suggested split location
1868 * @slot_count: the number of slots in the node being considered.
1870 * Return: the split location.
1872 static inline int mab_no_null_split(struct maple_big_node *b_node,
1873 unsigned char split, unsigned char slot_count)
1875 if (!b_node->slot[split]) {
1877 * If the split is less than the max slot && the right side will
1878 * still be sufficient, then increment the split on NULL.
1880 if ((split < slot_count - 1) &&
1881 (b_node->b_end - split) > (mt_min_slots[b_node->type]))
1890 * mab_calc_split() - Calculate the split location and if there needs to be two
1892 * @bn: The maple_big_node with the data
1893 * @mid_split: The second split, if required. 0 otherwise.
1895 * Return: The first split location. The middle split is set in @mid_split.
1897 static inline int mab_calc_split(struct ma_state *mas,
1898 struct maple_big_node *bn, unsigned char *mid_split, unsigned long min)
1900 unsigned char b_end = bn->b_end;
1901 int split = b_end / 2; /* Assume equal split. */
1902 unsigned char slot_min, slot_count = mt_slots[bn->type];
1905 * To support gap tracking, all NULL entries are kept together and a node cannot
1906 * end on a NULL entry, with the exception of the left-most leaf. The
1907 * limitation means that the split of a node must be checked for this condition
1908 * and be able to put more data in one direction or the other.
1910 if (unlikely((mas->mas_flags & MA_STATE_BULK))) {
1912 split = b_end - mt_min_slots[bn->type];
1914 if (!ma_is_leaf(bn->type))
1917 mas->mas_flags |= MA_STATE_REBALANCE;
1918 if (!bn->slot[split])
1924 * Although extremely rare, it is possible to enter what is known as the 3-way
1925 * split scenario. The 3-way split comes about by means of a store of a range
1926 * that overwrites the end and beginning of two full nodes. The result is a set
1927 * of entries that cannot be stored in 2 nodes. Sometimes, these two nodes can
1928 * also be located in different parent nodes which are also full. This can
1929 * carry upwards all the way to the root in the worst case.
1931 if (unlikely(mab_middle_node(bn, split, slot_count))) {
1933 *mid_split = split * 2;
1935 slot_min = mt_min_slots[bn->type];
1939 * Avoid having a range less than the slot count unless it
1940 * causes one node to be deficient.
1941 * NOTE: mt_min_slots is 1 based, b_end and split are zero.
1943 while ((split < slot_count - 1) &&
1944 ((bn->pivot[split] - min) < slot_count - 1) &&
1945 (b_end - split > slot_min))
1949 /* Avoid ending a node on a NULL entry */
1950 split = mab_no_null_split(bn, split, slot_count);
1952 if (unlikely(*mid_split))
1953 *mid_split = mab_no_null_split(bn, *mid_split, slot_count);
1959 * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node
1960 * and set @b_node->b_end to the next free slot.
1961 * @mas: The maple state
1962 * @mas_start: The starting slot to copy
1963 * @mas_end: The end slot to copy (inclusively)
1964 * @b_node: The maple_big_node to place the data
1965 * @mab_start: The starting location in maple_big_node to store the data.
1967 static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start,
1968 unsigned char mas_end, struct maple_big_node *b_node,
1969 unsigned char mab_start)
1972 struct maple_node *node;
1974 unsigned long *pivots, *gaps;
1975 int i = mas_start, j = mab_start;
1976 unsigned char piv_end;
1979 mt = mte_node_type(mas->node);
1980 pivots = ma_pivots(node, mt);
1982 b_node->pivot[j] = pivots[i++];
1983 if (unlikely(i > mas_end))
1988 piv_end = min(mas_end, mt_pivots[mt]);
1989 for (; i < piv_end; i++, j++) {
1990 b_node->pivot[j] = pivots[i];
1991 if (unlikely(!b_node->pivot[j]))
1994 if (unlikely(mas->max == b_node->pivot[j]))
1998 if (likely(i <= mas_end))
1999 b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt);
2002 b_node->b_end = ++j;
2004 slots = ma_slots(node, mt);
2005 memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j);
2006 if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) {
2007 gaps = ma_gaps(node, mt);
2008 memcpy(b_node->gap + mab_start, gaps + mas_start,
2009 sizeof(unsigned long) * j);
2014 * mas_leaf_set_meta() - Set the metadata of a leaf if possible.
2015 * @mas: The maple state
2016 * @node: The maple node
2017 * @pivots: pointer to the maple node pivots
2018 * @mt: The maple type
2019 * @end: The assumed end
2021 * Note, end may be incremented within this function but not modified at the
2022 * source. This is fine since the metadata is the last thing to be stored in a
2023 * node during a write.
2025 static inline void mas_leaf_set_meta(struct ma_state *mas,
2026 struct maple_node *node, unsigned long *pivots,
2027 enum maple_type mt, unsigned char end)
2029 /* There is no room for metadata already */
2030 if (mt_pivots[mt] <= end)
2033 if (pivots[end] && pivots[end] < mas->max)
2036 if (end < mt_slots[mt] - 1)
2037 ma_set_meta(node, mt, 0, end);
2041 * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node.
2042 * @b_node: the maple_big_node that has the data
2043 * @mab_start: the start location in @b_node.
2044 * @mab_end: The end location in @b_node (inclusively)
2045 * @mas: The maple state with the maple encoded node.
2047 static inline void mab_mas_cp(struct maple_big_node *b_node,
2048 unsigned char mab_start, unsigned char mab_end,
2049 struct ma_state *mas, bool new_max)
2052 enum maple_type mt = mte_node_type(mas->node);
2053 struct maple_node *node = mte_to_node(mas->node);
2054 void __rcu **slots = ma_slots(node, mt);
2055 unsigned long *pivots = ma_pivots(node, mt);
2056 unsigned long *gaps = NULL;
2059 if (mab_end - mab_start > mt_pivots[mt])
2062 if (!pivots[mt_pivots[mt] - 1])
2063 slots[mt_pivots[mt]] = NULL;
2067 pivots[j++] = b_node->pivot[i++];
2068 } while (i <= mab_end && likely(b_node->pivot[i]));
2070 memcpy(slots, b_node->slot + mab_start,
2071 sizeof(void *) * (i - mab_start));
2074 mas->max = b_node->pivot[i - 1];
2077 if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) {
2078 unsigned long max_gap = 0;
2079 unsigned char offset = 15;
2081 gaps = ma_gaps(node, mt);
2083 gaps[--j] = b_node->gap[--i];
2084 if (gaps[j] > max_gap) {
2090 ma_set_meta(node, mt, offset, end);
2092 mas_leaf_set_meta(mas, node, pivots, mt, end);
2097 * mas_descend_adopt() - Descend through a sub-tree and adopt children.
2098 * @mas: the maple state with the maple encoded node of the sub-tree.
2100 * Descend through a sub-tree and adopt children who do not have the correct
2101 * parents set. Follow the parents which have the correct parents as they are
2102 * the new entries which need to be followed to find other incorrectly set
2105 static inline void mas_descend_adopt(struct ma_state *mas)
2107 struct ma_state list[3], next[3];
2111 * At each level there may be up to 3 correct parent pointers which indicates
2112 * the new nodes which need to be walked to find any new nodes at a lower level.
2115 for (i = 0; i < 3; i++) {
2122 while (!mte_is_leaf(list[0].node)) {
2124 for (i = 0; i < 3; i++) {
2125 if (mas_is_none(&list[i]))
2128 if (i && list[i-1].node == list[i].node)
2131 while ((n < 3) && (mas_new_child(&list[i], &next[n])))
2134 mas_adopt_children(&list[i], list[i].node);
2138 next[n++].node = MAS_NONE;
2140 /* descend by setting the list to the children */
2141 for (i = 0; i < 3; i++)
2147 * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert.
2148 * @mas: The maple state
2149 * @end: The maple node end
2150 * @mt: The maple node type
2152 static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end,
2155 if (!(mas->mas_flags & MA_STATE_BULK))
2158 if (mte_is_root(mas->node))
2161 if (end > mt_min_slots[mt]) {
2162 mas->mas_flags &= ~MA_STATE_REBALANCE;
2168 * mas_store_b_node() - Store an @entry into the b_node while also copying the
2169 * data from a maple encoded node.
2170 * @wr_mas: the maple write state
2171 * @b_node: the maple_big_node to fill with data
2172 * @offset_end: the offset to end copying
2174 * Return: The actual end of the data stored in @b_node
2176 static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas,
2177 struct maple_big_node *b_node, unsigned char offset_end)
2180 unsigned char b_end;
2181 /* Possible underflow of piv will wrap back to 0 before use. */
2183 struct ma_state *mas = wr_mas->mas;
2185 b_node->type = wr_mas->type;
2189 /* Copy start data up to insert. */
2190 mas_mab_cp(mas, 0, slot - 1, b_node, 0);
2191 b_end = b_node->b_end;
2192 piv = b_node->pivot[b_end - 1];
2196 if (piv + 1 < mas->index) {
2197 /* Handle range starting after old range */
2198 b_node->slot[b_end] = wr_mas->content;
2199 if (!wr_mas->content)
2200 b_node->gap[b_end] = mas->index - 1 - piv;
2201 b_node->pivot[b_end++] = mas->index - 1;
2204 /* Store the new entry. */
2205 mas->offset = b_end;
2206 b_node->slot[b_end] = wr_mas->entry;
2207 b_node->pivot[b_end] = mas->last;
2210 if (mas->last >= mas->max)
2213 /* Handle new range ending before old range ends */
2214 piv = mas_logical_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type);
2215 if (piv > mas->last) {
2216 if (piv == ULONG_MAX)
2217 mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type);
2219 if (offset_end != slot)
2220 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
2223 b_node->slot[++b_end] = wr_mas->content;
2224 if (!wr_mas->content)
2225 b_node->gap[b_end] = piv - mas->last + 1;
2226 b_node->pivot[b_end] = piv;
2229 slot = offset_end + 1;
2230 if (slot > wr_mas->node_end)
2233 /* Copy end data to the end of the node. */
2234 mas_mab_cp(mas, slot, wr_mas->node_end + 1, b_node, ++b_end);
2239 b_node->b_end = b_end;
2243 * mas_prev_sibling() - Find the previous node with the same parent.
2244 * @mas: the maple state
2246 * Return: True if there is a previous sibling, false otherwise.
2248 static inline bool mas_prev_sibling(struct ma_state *mas)
2250 unsigned int p_slot = mte_parent_slot(mas->node);
2252 if (mte_is_root(mas->node))
2259 mas->offset = p_slot - 1;
2265 * mas_next_sibling() - Find the next node with the same parent.
2266 * @mas: the maple state
2268 * Return: true if there is a next sibling, false otherwise.
2270 static inline bool mas_next_sibling(struct ma_state *mas)
2272 MA_STATE(parent, mas->tree, mas->index, mas->last);
2274 if (mte_is_root(mas->node))
2278 mas_ascend(&parent);
2279 parent.offset = mte_parent_slot(mas->node) + 1;
2280 if (parent.offset > mas_data_end(&parent))
2289 * mte_node_or_node() - Return the encoded node or MAS_NONE.
2290 * @enode: The encoded maple node.
2292 * Shorthand to avoid setting %NULLs in the tree or maple_subtree_state.
2294 * Return: @enode or MAS_NONE
2296 static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode)
2301 return ma_enode_ptr(MAS_NONE);
2305 * mas_wr_node_walk() - Find the correct offset for the index in the @mas.
2306 * @wr_mas: The maple write state
2308 * Uses mas_slot_locked() and does not need to worry about dead nodes.
2310 static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas)
2312 struct ma_state *mas = wr_mas->mas;
2313 unsigned char count, offset;
2315 if (unlikely(ma_is_dense(wr_mas->type))) {
2316 wr_mas->r_max = wr_mas->r_min = mas->index;
2317 mas->offset = mas->index = mas->min;
2321 wr_mas->node = mas_mn(wr_mas->mas);
2322 wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type);
2323 count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type,
2324 wr_mas->pivots, mas->max);
2325 offset = mas->offset;
2327 while (offset < count && mas->index > wr_mas->pivots[offset])
2330 wr_mas->r_max = offset < count ? wr_mas->pivots[offset] : mas->max;
2331 wr_mas->r_min = mas_safe_min(mas, wr_mas->pivots, offset);
2332 wr_mas->offset_end = mas->offset = offset;
2336 * mas_topiary_range() - Add a range of slots to the topiary.
2337 * @mas: The maple state
2338 * @destroy: The topiary to add the slots (usually destroy)
2339 * @start: The starting slot inclusively
2340 * @end: The end slot inclusively
2342 static inline void mas_topiary_range(struct ma_state *mas,
2343 struct ma_topiary *destroy, unsigned char start, unsigned char end)
2346 unsigned char offset;
2348 MT_BUG_ON(mas->tree, mte_is_leaf(mas->node));
2349 slots = ma_slots(mas_mn(mas), mte_node_type(mas->node));
2350 for (offset = start; offset <= end; offset++) {
2351 struct maple_enode *enode = mas_slot_locked(mas, slots, offset);
2353 if (mte_dead_node(enode))
2356 mat_add(destroy, enode);
2361 * mast_topiary() - Add the portions of the tree to the removal list; either to
2362 * be freed or discarded (destroy walk).
2363 * @mast: The maple_subtree_state.
2365 static inline void mast_topiary(struct maple_subtree_state *mast)
2367 MA_WR_STATE(wr_mas, mast->orig_l, NULL);
2368 unsigned char r_start, r_end;
2369 unsigned char l_start, l_end;
2370 void __rcu **l_slots, **r_slots;
2372 wr_mas.type = mte_node_type(mast->orig_l->node);
2373 mast->orig_l->index = mast->orig_l->last;
2374 mas_wr_node_walk(&wr_mas);
2375 l_start = mast->orig_l->offset + 1;
2376 l_end = mas_data_end(mast->orig_l);
2378 r_end = mast->orig_r->offset;
2383 l_slots = ma_slots(mas_mn(mast->orig_l),
2384 mte_node_type(mast->orig_l->node));
2386 r_slots = ma_slots(mas_mn(mast->orig_r),
2387 mte_node_type(mast->orig_r->node));
2389 if ((l_start < l_end) &&
2390 mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_start))) {
2394 if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_end))) {
2399 if ((l_start > r_end) && (mast->orig_l->node == mast->orig_r->node))
2402 /* At the node where left and right sides meet, add the parts between */
2403 if (mast->orig_l->node == mast->orig_r->node) {
2404 return mas_topiary_range(mast->orig_l, mast->destroy,
2408 /* mast->orig_r is different and consumed. */
2409 if (mte_is_leaf(mast->orig_r->node))
2412 if (mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_end)))
2416 if (l_start <= l_end)
2417 mas_topiary_range(mast->orig_l, mast->destroy, l_start, l_end);
2419 if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_start)))
2422 if (r_start <= r_end)
2423 mas_topiary_range(mast->orig_r, mast->destroy, 0, r_end);
2427 * mast_rebalance_next() - Rebalance against the next node
2428 * @mast: The maple subtree state
2429 * @old_r: The encoded maple node to the right (next node).
2431 static inline void mast_rebalance_next(struct maple_subtree_state *mast)
2433 unsigned char b_end = mast->bn->b_end;
2435 mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node),
2437 mast->orig_r->last = mast->orig_r->max;
2441 * mast_rebalance_prev() - Rebalance against the previous node
2442 * @mast: The maple subtree state
2443 * @old_l: The encoded maple node to the left (previous node)
2445 static inline void mast_rebalance_prev(struct maple_subtree_state *mast)
2447 unsigned char end = mas_data_end(mast->orig_l) + 1;
2448 unsigned char b_end = mast->bn->b_end;
2450 mab_shift_right(mast->bn, end);
2451 mas_mab_cp(mast->orig_l, 0, end - 1, mast->bn, 0);
2452 mast->l->min = mast->orig_l->min;
2453 mast->orig_l->index = mast->orig_l->min;
2454 mast->bn->b_end = end + b_end;
2455 mast->l->offset += end;
2459 * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring
2460 * the node to the right. Checking the nodes to the right then the left at each
2461 * level upwards until root is reached. Free and destroy as needed.
2462 * Data is copied into the @mast->bn.
2463 * @mast: The maple_subtree_state.
2466 bool mast_spanning_rebalance(struct maple_subtree_state *mast)
2468 struct ma_state r_tmp = *mast->orig_r;
2469 struct ma_state l_tmp = *mast->orig_l;
2470 struct maple_enode *ancestor = NULL;
2471 unsigned char start, end;
2472 unsigned char depth = 0;
2474 r_tmp = *mast->orig_r;
2475 l_tmp = *mast->orig_l;
2477 mas_ascend(mast->orig_r);
2478 mas_ascend(mast->orig_l);
2481 (mast->orig_r->node == mast->orig_l->node)) {
2482 ancestor = mast->orig_r->node;
2483 end = mast->orig_r->offset - 1;
2484 start = mast->orig_l->offset + 1;
2487 if (mast->orig_r->offset < mas_data_end(mast->orig_r)) {
2489 ancestor = mast->orig_r->node;
2493 mast->orig_r->offset++;
2495 mas_descend(mast->orig_r);
2496 mast->orig_r->offset = 0;
2500 mast_rebalance_next(mast);
2502 unsigned char l_off = 0;
2503 struct maple_enode *child = r_tmp.node;
2506 if (ancestor == r_tmp.node)
2512 if (l_off < r_tmp.offset)
2513 mas_topiary_range(&r_tmp, mast->destroy,
2514 l_off, r_tmp.offset);
2516 if (l_tmp.node != child)
2517 mat_add(mast->free, child);
2519 } while (r_tmp.node != ancestor);
2521 *mast->orig_l = l_tmp;
2524 } else if (mast->orig_l->offset != 0) {
2526 ancestor = mast->orig_l->node;
2527 end = mas_data_end(mast->orig_l);
2530 mast->orig_l->offset--;
2532 mas_descend(mast->orig_l);
2533 mast->orig_l->offset =
2534 mas_data_end(mast->orig_l);
2538 mast_rebalance_prev(mast);
2540 unsigned char r_off;
2541 struct maple_enode *child = l_tmp.node;
2544 if (ancestor == l_tmp.node)
2547 r_off = mas_data_end(&l_tmp);
2549 if (l_tmp.offset < r_off)
2552 if (l_tmp.offset < r_off)
2553 mas_topiary_range(&l_tmp, mast->destroy,
2554 l_tmp.offset, r_off);
2556 if (r_tmp.node != child)
2557 mat_add(mast->free, child);
2559 } while (l_tmp.node != ancestor);
2561 *mast->orig_r = r_tmp;
2564 } while (!mte_is_root(mast->orig_r->node));
2566 *mast->orig_r = r_tmp;
2567 *mast->orig_l = l_tmp;
2572 * mast_ascend_free() - Add current original maple state nodes to the free list
2574 * @mast: the maple subtree state.
2576 * Ascend the original left and right sides and add the previous nodes to the
2577 * free list. Set the slots to point to the correct location in the new nodes.
2580 mast_ascend_free(struct maple_subtree_state *mast)
2582 MA_WR_STATE(wr_mas, mast->orig_r, NULL);
2583 struct maple_enode *left = mast->orig_l->node;
2584 struct maple_enode *right = mast->orig_r->node;
2586 mas_ascend(mast->orig_l);
2587 mas_ascend(mast->orig_r);
2588 mat_add(mast->free, left);
2591 mat_add(mast->free, right);
2593 mast->orig_r->offset = 0;
2594 mast->orig_r->index = mast->r->max;
2595 /* last should be larger than or equal to index */
2596 if (mast->orig_r->last < mast->orig_r->index)
2597 mast->orig_r->last = mast->orig_r->index;
2599 * The node may not contain the value so set slot to ensure all
2600 * of the nodes contents are freed or destroyed.
2602 wr_mas.type = mte_node_type(mast->orig_r->node);
2603 mas_wr_node_walk(&wr_mas);
2604 /* Set up the left side of things */
2605 mast->orig_l->offset = 0;
2606 mast->orig_l->index = mast->l->min;
2607 wr_mas.mas = mast->orig_l;
2608 wr_mas.type = mte_node_type(mast->orig_l->node);
2609 mas_wr_node_walk(&wr_mas);
2611 mast->bn->type = wr_mas.type;
2615 * mas_new_ma_node() - Create and return a new maple node. Helper function.
2616 * @mas: the maple state with the allocations.
2617 * @b_node: the maple_big_node with the type encoding.
2619 * Use the node type from the maple_big_node to allocate a new node from the
2620 * ma_state. This function exists mainly for code readability.
2622 * Return: A new maple encoded node
2624 static inline struct maple_enode
2625 *mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node)
2627 return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type);
2631 * mas_mab_to_node() - Set up right and middle nodes
2633 * @mas: the maple state that contains the allocations.
2634 * @b_node: the node which contains the data.
2635 * @left: The pointer which will have the left node
2636 * @right: The pointer which may have the right node
2637 * @middle: the pointer which may have the middle node (rare)
2638 * @mid_split: the split location for the middle node
2640 * Return: the split of left.
2642 static inline unsigned char mas_mab_to_node(struct ma_state *mas,
2643 struct maple_big_node *b_node, struct maple_enode **left,
2644 struct maple_enode **right, struct maple_enode **middle,
2645 unsigned char *mid_split, unsigned long min)
2647 unsigned char split = 0;
2648 unsigned char slot_count = mt_slots[b_node->type];
2650 *left = mas_new_ma_node(mas, b_node);
2655 if (b_node->b_end < slot_count) {
2656 split = b_node->b_end;
2658 split = mab_calc_split(mas, b_node, mid_split, min);
2659 *right = mas_new_ma_node(mas, b_node);
2663 *middle = mas_new_ma_node(mas, b_node);
2670 * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end
2672 * @b_node - the big node to add the entry
2673 * @mas - the maple state to get the pivot (mas->max)
2674 * @entry - the entry to add, if NULL nothing happens.
2676 static inline void mab_set_b_end(struct maple_big_node *b_node,
2677 struct ma_state *mas,
2683 b_node->slot[b_node->b_end] = entry;
2684 if (mt_is_alloc(mas->tree))
2685 b_node->gap[b_node->b_end] = mas_max_gap(mas);
2686 b_node->pivot[b_node->b_end++] = mas->max;
2690 * mas_set_split_parent() - combine_then_separate helper function. Sets the parent
2691 * of @mas->node to either @left or @right, depending on @slot and @split
2693 * @mas - the maple state with the node that needs a parent
2694 * @left - possible parent 1
2695 * @right - possible parent 2
2696 * @slot - the slot the mas->node was placed
2697 * @split - the split location between @left and @right
2699 static inline void mas_set_split_parent(struct ma_state *mas,
2700 struct maple_enode *left,
2701 struct maple_enode *right,
2702 unsigned char *slot, unsigned char split)
2704 if (mas_is_none(mas))
2707 if ((*slot) <= split)
2708 mte_set_parent(mas->node, left, *slot);
2710 mte_set_parent(mas->node, right, (*slot) - split - 1);
2716 * mte_mid_split_check() - Check if the next node passes the mid-split
2717 * @**l: Pointer to left encoded maple node.
2718 * @**m: Pointer to middle encoded maple node.
2719 * @**r: Pointer to right encoded maple node.
2721 * @*split: The split location.
2722 * @mid_split: The middle split.
2724 static inline void mte_mid_split_check(struct maple_enode **l,
2725 struct maple_enode **r,
2726 struct maple_enode *right,
2728 unsigned char *split,
2729 unsigned char mid_split)
2734 if (slot < mid_split)
2743 * mast_set_split_parents() - Helper function to set three nodes parents. Slot
2744 * is taken from @mast->l.
2745 * @mast - the maple subtree state
2746 * @left - the left node
2747 * @right - the right node
2748 * @split - the split location.
2750 static inline void mast_set_split_parents(struct maple_subtree_state *mast,
2751 struct maple_enode *left,
2752 struct maple_enode *middle,
2753 struct maple_enode *right,
2754 unsigned char split,
2755 unsigned char mid_split)
2758 struct maple_enode *l = left;
2759 struct maple_enode *r = right;
2761 if (mas_is_none(mast->l))
2767 slot = mast->l->offset;
2769 mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2770 mas_set_split_parent(mast->l, l, r, &slot, split);
2772 mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2773 mas_set_split_parent(mast->m, l, r, &slot, split);
2775 mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2776 mas_set_split_parent(mast->r, l, r, &slot, split);
2780 * mas_wmb_replace() - Write memory barrier and replace
2781 * @mas: The maple state
2782 * @free: the maple topiary list of nodes to free
2783 * @destroy: The maple topiary list of nodes to destroy (walk and free)
2785 * Updates gap as necessary.
2787 static inline void mas_wmb_replace(struct ma_state *mas,
2788 struct ma_topiary *free,
2789 struct ma_topiary *destroy)
2791 /* All nodes must see old data as dead prior to replacing that data */
2792 smp_wmb(); /* Needed for RCU */
2794 /* Insert the new data in the tree */
2795 mas_replace(mas, true);
2797 if (!mte_is_leaf(mas->node))
2798 mas_descend_adopt(mas);
2800 mas_mat_free(mas, free);
2803 mas_mat_destroy(mas, destroy);
2805 if (mte_is_leaf(mas->node))
2808 mas_update_gap(mas);
2812 * mast_new_root() - Set a new tree root during subtree creation
2813 * @mast: The maple subtree state
2814 * @mas: The maple state
2816 static inline void mast_new_root(struct maple_subtree_state *mast,
2817 struct ma_state *mas)
2819 mas_mn(mast->l)->parent =
2820 ma_parent_ptr(((unsigned long)mas->tree | MA_ROOT_PARENT));
2821 if (!mte_dead_node(mast->orig_l->node) &&
2822 !mte_is_root(mast->orig_l->node)) {
2824 mast_ascend_free(mast);
2826 } while (!mte_is_root(mast->orig_l->node));
2828 if ((mast->orig_l->node != mas->node) &&
2829 (mast->l->depth > mas_mt_height(mas))) {
2830 mat_add(mast->free, mas->node);
2835 * mast_cp_to_nodes() - Copy data out to nodes.
2836 * @mast: The maple subtree state
2837 * @left: The left encoded maple node
2838 * @middle: The middle encoded maple node
2839 * @right: The right encoded maple node
2840 * @split: The location to split between left and (middle ? middle : right)
2841 * @mid_split: The location to split between middle and right.
2843 static inline void mast_cp_to_nodes(struct maple_subtree_state *mast,
2844 struct maple_enode *left, struct maple_enode *middle,
2845 struct maple_enode *right, unsigned char split, unsigned char mid_split)
2847 bool new_lmax = true;
2849 mast->l->node = mte_node_or_none(left);
2850 mast->m->node = mte_node_or_none(middle);
2851 mast->r->node = mte_node_or_none(right);
2853 mast->l->min = mast->orig_l->min;
2854 if (split == mast->bn->b_end) {
2855 mast->l->max = mast->orig_r->max;
2859 mab_mas_cp(mast->bn, 0, split, mast->l, new_lmax);
2862 mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true);
2863 mast->m->min = mast->bn->pivot[split] + 1;
2867 mast->r->max = mast->orig_r->max;
2869 mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, false);
2870 mast->r->min = mast->bn->pivot[split] + 1;
2875 * mast_combine_cp_left - Copy in the original left side of the tree into the
2876 * combined data set in the maple subtree state big node.
2877 * @mast: The maple subtree state
2879 static inline void mast_combine_cp_left(struct maple_subtree_state *mast)
2881 unsigned char l_slot = mast->orig_l->offset;
2886 mas_mab_cp(mast->orig_l, 0, l_slot - 1, mast->bn, 0);
2890 * mast_combine_cp_right: Copy in the original right side of the tree into the
2891 * combined data set in the maple subtree state big node.
2892 * @mast: The maple subtree state
2894 static inline void mast_combine_cp_right(struct maple_subtree_state *mast)
2896 if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max)
2899 mas_mab_cp(mast->orig_r, mast->orig_r->offset + 1,
2900 mt_slot_count(mast->orig_r->node), mast->bn,
2902 mast->orig_r->last = mast->orig_r->max;
2906 * mast_sufficient: Check if the maple subtree state has enough data in the big
2907 * node to create at least one sufficient node
2908 * @mast: the maple subtree state
2910 static inline bool mast_sufficient(struct maple_subtree_state *mast)
2912 if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node))
2919 * mast_overflow: Check if there is too much data in the subtree state for a
2921 * @mast: The maple subtree state
2923 static inline bool mast_overflow(struct maple_subtree_state *mast)
2925 if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node))
2931 static inline void *mtree_range_walk(struct ma_state *mas)
2933 unsigned long *pivots;
2934 unsigned char offset;
2935 struct maple_node *node;
2936 struct maple_enode *next, *last;
2937 enum maple_type type;
2940 unsigned long max, min;
2941 unsigned long prev_max, prev_min;
2949 node = mte_to_node(next);
2950 type = mte_node_type(next);
2951 pivots = ma_pivots(node, type);
2952 end = ma_data_end(node, type, pivots, max);
2953 if (unlikely(ma_dead_node(node)))
2956 if (pivots[offset] >= mas->index) {
2959 max = pivots[offset];
2965 } while ((offset < end) && (pivots[offset] < mas->index));
2968 min = pivots[offset - 1] + 1;
2970 if (likely(offset < end && pivots[offset]))
2971 max = pivots[offset];
2974 slots = ma_slots(node, type);
2975 next = mt_slot(mas->tree, slots, offset);
2976 if (unlikely(ma_dead_node(node)))
2978 } while (!ma_is_leaf(type));
2980 mas->offset = offset;
2983 mas->min = prev_min;
2984 mas->max = prev_max;
2986 return (void *)next;
2994 * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers.
2995 * @mas: The starting maple state
2996 * @mast: The maple_subtree_state, keeps track of 4 maple states.
2997 * @count: The estimated count of iterations needed.
2999 * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root
3000 * is hit. First @b_node is split into two entries which are inserted into the
3001 * next iteration of the loop. @b_node is returned populated with the final
3002 * iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the
3003 * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last
3004 * to account of what has been copied into the new sub-tree. The update of
3005 * orig_l_mas->last is used in mas_consume to find the slots that will need to
3006 * be either freed or destroyed. orig_l_mas->depth keeps track of the height of
3007 * the new sub-tree in case the sub-tree becomes the full tree.
3009 * Return: the number of elements in b_node during the last loop.
3011 static int mas_spanning_rebalance(struct ma_state *mas,
3012 struct maple_subtree_state *mast, unsigned char count)
3014 unsigned char split, mid_split;
3015 unsigned char slot = 0;
3016 struct maple_enode *left = NULL, *middle = NULL, *right = NULL;
3018 MA_STATE(l_mas, mas->tree, mas->index, mas->index);
3019 MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3020 MA_STATE(m_mas, mas->tree, mas->index, mas->index);
3021 MA_TOPIARY(free, mas->tree);
3022 MA_TOPIARY(destroy, mas->tree);
3025 * The tree needs to be rebalanced and leaves need to be kept at the same level.
3026 * Rebalancing is done by use of the ``struct maple_topiary``.
3032 mast->destroy = &destroy;
3033 l_mas.node = r_mas.node = m_mas.node = MAS_NONE;
3035 /* Check if this is not root and has sufficient data. */
3036 if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) &&
3037 unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type]))
3038 mast_spanning_rebalance(mast);
3040 mast->orig_l->depth = 0;
3043 * Each level of the tree is examined and balanced, pushing data to the left or
3044 * right, or rebalancing against left or right nodes is employed to avoid
3045 * rippling up the tree to limit the amount of churn. Once a new sub-section of
3046 * the tree is created, there may be a mix of new and old nodes. The old nodes
3047 * will have the incorrect parent pointers and currently be in two trees: the
3048 * original tree and the partially new tree. To remedy the parent pointers in
3049 * the old tree, the new data is swapped into the active tree and a walk down
3050 * the tree is performed and the parent pointers are updated.
3051 * See mas_descend_adopt() for more information..
3055 mast->bn->type = mte_node_type(mast->orig_l->node);
3056 split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle,
3057 &mid_split, mast->orig_l->min);
3058 mast_set_split_parents(mast, left, middle, right, split,
3060 mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
3063 * Copy data from next level in the tree to mast->bn from next
3066 memset(mast->bn, 0, sizeof(struct maple_big_node));
3067 mast->bn->type = mte_node_type(left);
3068 mast->orig_l->depth++;
3070 /* Root already stored in l->node. */
3071 if (mas_is_root_limits(mast->l))
3074 mast_ascend_free(mast);
3075 mast_combine_cp_left(mast);
3076 l_mas.offset = mast->bn->b_end;
3077 mab_set_b_end(mast->bn, &l_mas, left);
3078 mab_set_b_end(mast->bn, &m_mas, middle);
3079 mab_set_b_end(mast->bn, &r_mas, right);
3081 /* Copy anything necessary out of the right node. */
3082 mast_combine_cp_right(mast);
3084 mast->orig_l->last = mast->orig_l->max;
3086 if (mast_sufficient(mast))
3089 if (mast_overflow(mast))
3092 /* May be a new root stored in mast->bn */
3093 if (mas_is_root_limits(mast->orig_l))
3096 mast_spanning_rebalance(mast);
3098 /* rebalancing from other nodes may require another loop. */
3103 l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
3104 mte_node_type(mast->orig_l->node));
3105 mast->orig_l->depth++;
3106 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true);
3107 mte_set_parent(left, l_mas.node, slot);
3109 mte_set_parent(middle, l_mas.node, ++slot);
3112 mte_set_parent(right, l_mas.node, ++slot);
3114 if (mas_is_root_limits(mast->l)) {
3116 mast_new_root(mast, mas);
3118 mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent;
3121 if (!mte_dead_node(mast->orig_l->node))
3122 mat_add(&free, mast->orig_l->node);
3124 mas->depth = mast->orig_l->depth;
3125 *mast->orig_l = l_mas;
3126 mte_set_node_dead(mas->node);
3128 /* Set up mas for insertion. */
3129 mast->orig_l->depth = mas->depth;
3130 mast->orig_l->alloc = mas->alloc;
3131 *mas = *mast->orig_l;
3132 mas_wmb_replace(mas, &free, &destroy);
3133 mtree_range_walk(mas);
3134 return mast->bn->b_end;
3138 * mas_rebalance() - Rebalance a given node.
3139 * @mas: The maple state
3140 * @b_node: The big maple node.
3142 * Rebalance two nodes into a single node or two new nodes that are sufficient.
3143 * Continue upwards until tree is sufficient.
3145 * Return: the number of elements in b_node during the last loop.
3147 static inline int mas_rebalance(struct ma_state *mas,
3148 struct maple_big_node *b_node)
3150 char empty_count = mas_mt_height(mas);
3151 struct maple_subtree_state mast;
3152 unsigned char shift, b_end = ++b_node->b_end;
3154 MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3155 MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3157 trace_ma_op(__func__, mas);
3160 * Rebalancing occurs if a node is insufficient. Data is rebalanced
3161 * against the node to the right if it exists, otherwise the node to the
3162 * left of this node is rebalanced against this node. If rebalancing
3163 * causes just one node to be produced instead of two, then the parent
3164 * is also examined and rebalanced if it is insufficient. Every level
3165 * tries to combine the data in the same way. If one node contains the
3166 * entire range of the tree, then that node is used as a new root node.
3168 mas_node_count(mas, 1 + empty_count * 3);
3169 if (mas_is_err(mas))
3172 mast.orig_l = &l_mas;
3173 mast.orig_r = &r_mas;
3175 mast.bn->type = mte_node_type(mas->node);
3177 l_mas = r_mas = *mas;
3179 if (mas_next_sibling(&r_mas)) {
3180 mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end);
3181 r_mas.last = r_mas.index = r_mas.max;
3183 mas_prev_sibling(&l_mas);
3184 shift = mas_data_end(&l_mas) + 1;
3185 mab_shift_right(b_node, shift);
3186 mas->offset += shift;
3187 mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0);
3188 b_node->b_end = shift + b_end;
3189 l_mas.index = l_mas.last = l_mas.min;
3192 return mas_spanning_rebalance(mas, &mast, empty_count);
3196 * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple
3198 * @mas: The maple state
3199 * @end: The end of the left-most node.
3201 * During a mass-insert event (such as forking), it may be necessary to
3202 * rebalance the left-most node when it is not sufficient.
3204 static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end)
3206 enum maple_type mt = mte_node_type(mas->node);
3207 struct maple_node reuse, *newnode, *parent, *new_left, *left, *node;
3208 struct maple_enode *eparent;
3209 unsigned char offset, tmp, split = mt_slots[mt] / 2;
3210 void __rcu **l_slots, **slots;
3211 unsigned long *l_pivs, *pivs, gap;
3212 bool in_rcu = mt_in_rcu(mas->tree);
3214 MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3217 mas_prev_sibling(&l_mas);
3221 /* Allocate for both left and right as well as parent. */
3222 mas_node_count(mas, 3);
3223 if (mas_is_err(mas))
3226 newnode = mas_pop_node(mas);
3232 newnode->parent = node->parent;
3233 slots = ma_slots(newnode, mt);
3234 pivs = ma_pivots(newnode, mt);
3235 left = mas_mn(&l_mas);
3236 l_slots = ma_slots(left, mt);
3237 l_pivs = ma_pivots(left, mt);
3238 if (!l_slots[split])
3240 tmp = mas_data_end(&l_mas) - split;
3242 memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp);
3243 memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp);
3244 pivs[tmp] = l_mas.max;
3245 memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end);
3246 memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end);
3248 l_mas.max = l_pivs[split];
3249 mas->min = l_mas.max + 1;
3250 eparent = mt_mk_node(mte_parent(l_mas.node),
3251 mas_parent_type(&l_mas, l_mas.node));
3254 unsigned char max_p = mt_pivots[mt];
3255 unsigned char max_s = mt_slots[mt];
3258 memset(pivs + tmp, 0,
3259 sizeof(unsigned long) * (max_p - tmp));
3261 if (tmp < mt_slots[mt])
3262 memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3264 memcpy(node, newnode, sizeof(struct maple_node));
3265 ma_set_meta(node, mt, 0, tmp - 1);
3266 mte_set_pivot(eparent, mte_parent_slot(l_mas.node),
3269 /* Remove data from l_pivs. */
3271 memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp));
3272 memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3273 ma_set_meta(left, mt, 0, split);
3278 /* RCU requires replacing both l_mas, mas, and parent. */
3279 mas->node = mt_mk_node(newnode, mt);
3280 ma_set_meta(newnode, mt, 0, tmp);
3282 new_left = mas_pop_node(mas);
3283 new_left->parent = left->parent;
3284 mt = mte_node_type(l_mas.node);
3285 slots = ma_slots(new_left, mt);
3286 pivs = ma_pivots(new_left, mt);
3287 memcpy(slots, l_slots, sizeof(void *) * split);
3288 memcpy(pivs, l_pivs, sizeof(unsigned long) * split);
3289 ma_set_meta(new_left, mt, 0, split);
3290 l_mas.node = mt_mk_node(new_left, mt);
3292 /* replace parent. */
3293 offset = mte_parent_slot(mas->node);
3294 mt = mas_parent_type(&l_mas, l_mas.node);
3295 parent = mas_pop_node(mas);
3296 slots = ma_slots(parent, mt);
3297 pivs = ma_pivots(parent, mt);
3298 memcpy(parent, mte_to_node(eparent), sizeof(struct maple_node));
3299 rcu_assign_pointer(slots[offset], mas->node);
3300 rcu_assign_pointer(slots[offset - 1], l_mas.node);
3301 pivs[offset - 1] = l_mas.max;
3302 eparent = mt_mk_node(parent, mt);
3304 gap = mas_leaf_max_gap(mas);
3305 mte_set_gap(eparent, mte_parent_slot(mas->node), gap);
3306 gap = mas_leaf_max_gap(&l_mas);
3307 mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap);
3311 mas_replace(mas, false);
3313 mas_update_gap(mas);
3317 * mas_split_final_node() - Split the final node in a subtree operation.
3318 * @mast: the maple subtree state
3319 * @mas: The maple state
3320 * @height: The height of the tree in case it's a new root.
3322 static inline bool mas_split_final_node(struct maple_subtree_state *mast,
3323 struct ma_state *mas, int height)
3325 struct maple_enode *ancestor;
3327 if (mte_is_root(mas->node)) {
3328 if (mt_is_alloc(mas->tree))
3329 mast->bn->type = maple_arange_64;
3331 mast->bn->type = maple_range_64;
3332 mas->depth = height;
3335 * Only a single node is used here, could be root.
3336 * The Big_node data should just fit in a single node.
3338 ancestor = mas_new_ma_node(mas, mast->bn);
3339 mte_set_parent(mast->l->node, ancestor, mast->l->offset);
3340 mte_set_parent(mast->r->node, ancestor, mast->r->offset);
3341 mte_to_node(ancestor)->parent = mas_mn(mas)->parent;
3343 mast->l->node = ancestor;
3344 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true);
3345 mas->offset = mast->bn->b_end - 1;
3350 * mast_fill_bnode() - Copy data into the big node in the subtree state
3351 * @mast: The maple subtree state
3352 * @mas: the maple state
3353 * @skip: The number of entries to skip for new nodes insertion.
3355 static inline void mast_fill_bnode(struct maple_subtree_state *mast,
3356 struct ma_state *mas,
3360 struct maple_enode *old = mas->node;
3361 unsigned char split;
3363 memset(mast->bn->gap, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->gap));
3364 memset(mast->bn->slot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->slot));
3365 memset(mast->bn->pivot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->pivot));
3366 mast->bn->b_end = 0;
3368 if (mte_is_root(mas->node)) {
3372 mat_add(mast->free, old);
3373 mas->offset = mte_parent_slot(mas->node);
3376 if (cp && mast->l->offset)
3377 mas_mab_cp(mas, 0, mast->l->offset - 1, mast->bn, 0);
3379 split = mast->bn->b_end;
3380 mab_set_b_end(mast->bn, mast->l, mast->l->node);
3381 mast->r->offset = mast->bn->b_end;
3382 mab_set_b_end(mast->bn, mast->r, mast->r->node);
3383 if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max)
3387 mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1,
3388 mast->bn, mast->bn->b_end);
3391 mast->bn->type = mte_node_type(mas->node);
3395 * mast_split_data() - Split the data in the subtree state big node into regular
3397 * @mast: The maple subtree state
3398 * @mas: The maple state
3399 * @split: The location to split the big node
3401 static inline void mast_split_data(struct maple_subtree_state *mast,
3402 struct ma_state *mas, unsigned char split)
3404 unsigned char p_slot;
3406 mab_mas_cp(mast->bn, 0, split, mast->l, true);
3407 mte_set_pivot(mast->r->node, 0, mast->r->max);
3408 mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, false);
3409 mast->l->offset = mte_parent_slot(mas->node);
3410 mast->l->max = mast->bn->pivot[split];
3411 mast->r->min = mast->l->max + 1;
3412 if (mte_is_leaf(mas->node))
3415 p_slot = mast->orig_l->offset;
3416 mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node,
3418 mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node,
3423 * mas_push_data() - Instead of splitting a node, it is beneficial to push the
3424 * data to the right or left node if there is room.
3425 * @mas: The maple state
3426 * @height: The current height of the maple state
3427 * @mast: The maple subtree state
3428 * @left: Push left or not.
3430 * Keeping the height of the tree low means faster lookups.
3432 * Return: True if pushed, false otherwise.
3434 static inline bool mas_push_data(struct ma_state *mas, int height,
3435 struct maple_subtree_state *mast, bool left)
3437 unsigned char slot_total = mast->bn->b_end;
3438 unsigned char end, space, split;
3440 MA_STATE(tmp_mas, mas->tree, mas->index, mas->last);
3442 tmp_mas.depth = mast->l->depth;
3444 if (left && !mas_prev_sibling(&tmp_mas))
3446 else if (!left && !mas_next_sibling(&tmp_mas))
3449 end = mas_data_end(&tmp_mas);
3451 space = 2 * mt_slot_count(mas->node) - 2;
3452 /* -2 instead of -1 to ensure there isn't a triple split */
3453 if (ma_is_leaf(mast->bn->type))
3456 if (mas->max == ULONG_MAX)
3459 if (slot_total >= space)
3462 /* Get the data; Fill mast->bn */
3465 mab_shift_right(mast->bn, end + 1);
3466 mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0);
3467 mast->bn->b_end = slot_total + 1;
3469 mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end);
3472 /* Configure mast for splitting of mast->bn */
3473 split = mt_slots[mast->bn->type] - 2;
3475 /* Switch mas to prev node */
3476 mat_add(mast->free, mas->node);
3478 /* Start using mast->l for the left side. */
3479 tmp_mas.node = mast->l->node;
3482 mat_add(mast->free, tmp_mas.node);
3483 tmp_mas.node = mast->r->node;
3485 split = slot_total - split;
3487 split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]);
3488 /* Update parent slot for split calculation. */
3490 mast->orig_l->offset += end + 1;
3492 mast_split_data(mast, mas, split);
3493 mast_fill_bnode(mast, mas, 2);
3494 mas_split_final_node(mast, mas, height + 1);
3499 * mas_split() - Split data that is too big for one node into two.
3500 * @mas: The maple state
3501 * @b_node: The maple big node
3502 * Return: 1 on success, 0 on failure.
3504 static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
3506 struct maple_subtree_state mast;
3508 unsigned char mid_split, split = 0;
3511 * Splitting is handled differently from any other B-tree; the Maple
3512 * Tree splits upwards. Splitting up means that the split operation
3513 * occurs when the walk of the tree hits the leaves and not on the way
3514 * down. The reason for splitting up is that it is impossible to know
3515 * how much space will be needed until the leaf is (or leaves are)
3516 * reached. Since overwriting data is allowed and a range could
3517 * overwrite more than one range or result in changing one entry into 3
3518 * entries, it is impossible to know if a split is required until the
3521 * Splitting is a balancing act between keeping allocations to a minimum
3522 * and avoiding a 'jitter' event where a tree is expanded to make room
3523 * for an entry followed by a contraction when the entry is removed. To
3524 * accomplish the balance, there are empty slots remaining in both left
3525 * and right nodes after a split.
3527 MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3528 MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3529 MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last);
3530 MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last);
3531 MA_TOPIARY(mat, mas->tree);
3533 trace_ma_op(__func__, mas);
3534 mas->depth = mas_mt_height(mas);
3535 /* Allocation failures will happen early. */
3536 mas_node_count(mas, 1 + mas->depth * 2);
3537 if (mas_is_err(mas))
3542 mast.orig_l = &prev_l_mas;
3543 mast.orig_r = &prev_r_mas;
3547 while (height++ <= mas->depth) {
3548 if (mt_slots[b_node->type] > b_node->b_end) {
3549 mas_split_final_node(&mast, mas, height);
3553 l_mas = r_mas = *mas;
3554 l_mas.node = mas_new_ma_node(mas, b_node);
3555 r_mas.node = mas_new_ma_node(mas, b_node);
3557 * Another way that 'jitter' is avoided is to terminate a split up early if the
3558 * left or right node has space to spare. This is referred to as "pushing left"
3559 * or "pushing right" and is similar to the B* tree, except the nodes left or
3560 * right can rarely be reused due to RCU, but the ripple upwards is halted which
3561 * is a significant savings.
3563 /* Try to push left. */
3564 if (mas_push_data(mas, height, &mast, true))
3567 /* Try to push right. */
3568 if (mas_push_data(mas, height, &mast, false))
3571 split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min);
3572 mast_split_data(&mast, mas, split);
3574 * Usually correct, mab_mas_cp in the above call overwrites
3577 mast.r->max = mas->max;
3578 mast_fill_bnode(&mast, mas, 1);
3579 prev_l_mas = *mast.l;
3580 prev_r_mas = *mast.r;
3583 /* Set the original node as dead */
3584 mat_add(mast.free, mas->node);
3585 mas->node = l_mas.node;
3586 mas_wmb_replace(mas, mast.free, NULL);
3587 mtree_range_walk(mas);
3592 * mas_reuse_node() - Reuse the node to store the data.
3593 * @wr_mas: The maple write state
3594 * @bn: The maple big node
3595 * @end: The end of the data.
3597 * Will always return false in RCU mode.
3599 * Return: True if node was reused, false otherwise.
3601 static inline bool mas_reuse_node(struct ma_wr_state *wr_mas,
3602 struct maple_big_node *bn, unsigned char end)
3604 /* Need to be rcu safe. */
3605 if (mt_in_rcu(wr_mas->mas->tree))
3608 if (end > bn->b_end) {
3609 int clear = mt_slots[wr_mas->type] - bn->b_end;
3611 memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--);
3612 memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear);
3614 mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false);
3619 * mas_commit_b_node() - Commit the big node into the tree.
3620 * @wr_mas: The maple write state
3621 * @b_node: The maple big node
3622 * @end: The end of the data.
3624 static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas,
3625 struct maple_big_node *b_node, unsigned char end)
3627 struct maple_node *node;
3628 unsigned char b_end = b_node->b_end;
3629 enum maple_type b_type = b_node->type;
3631 if ((b_end < mt_min_slots[b_type]) &&
3632 (!mte_is_root(wr_mas->mas->node)) &&
3633 (mas_mt_height(wr_mas->mas) > 1))
3634 return mas_rebalance(wr_mas->mas, b_node);
3636 if (b_end >= mt_slots[b_type])
3637 return mas_split(wr_mas->mas, b_node);
3639 if (mas_reuse_node(wr_mas, b_node, end))
3642 mas_node_count(wr_mas->mas, 1);
3643 if (mas_is_err(wr_mas->mas))
3646 node = mas_pop_node(wr_mas->mas);
3647 node->parent = mas_mn(wr_mas->mas)->parent;
3648 wr_mas->mas->node = mt_mk_node(node, b_type);
3649 mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false);
3650 mas_replace(wr_mas->mas, false);
3652 mas_update_gap(wr_mas->mas);
3657 * mas_root_expand() - Expand a root to a node
3658 * @mas: The maple state
3659 * @entry: The entry to store into the tree
3661 static inline int mas_root_expand(struct ma_state *mas, void *entry)
3663 void *contents = mas_root_locked(mas);
3664 enum maple_type type = maple_leaf_64;
3665 struct maple_node *node;
3667 unsigned long *pivots;
3670 mas_node_count(mas, 1);
3671 if (unlikely(mas_is_err(mas)))
3674 node = mas_pop_node(mas);
3675 pivots = ma_pivots(node, type);
3676 slots = ma_slots(node, type);
3677 node->parent = ma_parent_ptr(
3678 ((unsigned long)mas->tree | MA_ROOT_PARENT));
3679 mas->node = mt_mk_node(node, type);
3683 rcu_assign_pointer(slots[slot], contents);
3684 if (likely(mas->index > 1))
3687 pivots[slot++] = mas->index - 1;
3690 rcu_assign_pointer(slots[slot], entry);
3692 pivots[slot] = mas->last;
3693 if (mas->last != ULONG_MAX)
3696 mas_set_height(mas);
3697 ma_set_meta(node, maple_leaf_64, 0, slot);
3698 /* swap the new root into the tree */
3699 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
3703 static inline void mas_store_root(struct ma_state *mas, void *entry)
3705 if (likely((mas->last != 0) || (mas->index != 0)))
3706 mas_root_expand(mas, entry);
3707 else if (((unsigned long) (entry) & 3) == 2)
3708 mas_root_expand(mas, entry);
3710 rcu_assign_pointer(mas->tree->ma_root, entry);
3711 mas->node = MAS_START;
3716 * mas_is_span_wr() - Check if the write needs to be treated as a write that
3718 * @mas: The maple state
3719 * @piv: The pivot value being written
3720 * @type: The maple node type
3721 * @entry: The data to write
3723 * Spanning writes are writes that start in one node and end in another OR if
3724 * the write of a %NULL will cause the node to end with a %NULL.
3726 * Return: True if this is a spanning write, false otherwise.
3728 static bool mas_is_span_wr(struct ma_wr_state *wr_mas)
3731 unsigned long last = wr_mas->mas->last;
3732 unsigned long piv = wr_mas->r_max;
3733 enum maple_type type = wr_mas->type;
3734 void *entry = wr_mas->entry;
3736 /* Contained in this pivot */
3740 max = wr_mas->mas->max;
3741 if (unlikely(ma_is_leaf(type))) {
3742 /* Fits in the node, but may span slots. */
3746 /* Writes to the end of the node but not null. */
3747 if ((last == max) && entry)
3751 * Writing ULONG_MAX is not a spanning write regardless of the
3752 * value being written as long as the range fits in the node.
3754 if ((last == ULONG_MAX) && (last == max))
3756 } else if (piv == last) {
3760 /* Detect spanning store wr walk */
3761 if (last == ULONG_MAX)
3765 trace_ma_write(__func__, wr_mas->mas, piv, entry);
3770 static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas)
3772 wr_mas->type = mte_node_type(wr_mas->mas->node);
3773 mas_wr_node_walk(wr_mas);
3774 wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type);
3777 static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas)
3779 wr_mas->mas->max = wr_mas->r_max;
3780 wr_mas->mas->min = wr_mas->r_min;
3781 wr_mas->mas->node = wr_mas->content;
3782 wr_mas->mas->offset = 0;
3783 wr_mas->mas->depth++;
3786 * mas_wr_walk() - Walk the tree for a write.
3787 * @wr_mas: The maple write state
3789 * Uses mas_slot_locked() and does not need to worry about dead nodes.
3791 * Return: True if it's contained in a node, false on spanning write.
3793 static bool mas_wr_walk(struct ma_wr_state *wr_mas)
3795 struct ma_state *mas = wr_mas->mas;
3798 mas_wr_walk_descend(wr_mas);
3799 if (unlikely(mas_is_span_wr(wr_mas)))
3802 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3804 if (ma_is_leaf(wr_mas->type))
3807 mas_wr_walk_traverse(wr_mas);
3813 static bool mas_wr_walk_index(struct ma_wr_state *wr_mas)
3815 struct ma_state *mas = wr_mas->mas;
3818 mas_wr_walk_descend(wr_mas);
3819 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3821 if (ma_is_leaf(wr_mas->type))
3823 mas_wr_walk_traverse(wr_mas);
3829 * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
3830 * @l_wr_mas: The left maple write state
3831 * @r_wr_mas: The right maple write state
3833 static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas,
3834 struct ma_wr_state *r_wr_mas)
3836 struct ma_state *r_mas = r_wr_mas->mas;
3837 struct ma_state *l_mas = l_wr_mas->mas;
3838 unsigned char l_slot;
3840 l_slot = l_mas->offset;
3841 if (!l_wr_mas->content)
3842 l_mas->index = l_wr_mas->r_min;
3844 if ((l_mas->index == l_wr_mas->r_min) &&
3846 !mas_slot_locked(l_mas, l_wr_mas->slots, l_slot - 1))) {
3848 l_mas->index = l_wr_mas->pivots[l_slot - 2] + 1;
3850 l_mas->index = l_mas->min;
3852 l_mas->offset = l_slot - 1;
3855 if (!r_wr_mas->content) {
3856 if (r_mas->last < r_wr_mas->r_max)
3857 r_mas->last = r_wr_mas->r_max;
3859 } else if ((r_mas->last == r_wr_mas->r_max) &&
3860 (r_mas->last < r_mas->max) &&
3861 !mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) {
3862 r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots,
3863 r_wr_mas->type, r_mas->offset + 1);
3868 static inline void *mas_state_walk(struct ma_state *mas)
3872 entry = mas_start(mas);
3873 if (mas_is_none(mas))
3876 if (mas_is_ptr(mas))
3879 return mtree_range_walk(mas);
3883 * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up
3886 * @mas: The maple state.
3888 * Note: Leaves mas in undesirable state.
3889 * Return: The entry for @mas->index or %NULL on dead node.
3891 static inline void *mtree_lookup_walk(struct ma_state *mas)
3893 unsigned long *pivots;
3894 unsigned char offset;
3895 struct maple_node *node;
3896 struct maple_enode *next;
3897 enum maple_type type;
3906 node = mte_to_node(next);
3907 type = mte_node_type(next);
3908 pivots = ma_pivots(node, type);
3909 end = ma_data_end(node, type, pivots, max);
3910 if (unlikely(ma_dead_node(node)))
3913 if (pivots[offset] >= mas->index) {
3914 max = pivots[offset];
3917 } while (++offset < end);
3919 slots = ma_slots(node, type);
3920 next = mt_slot(mas->tree, slots, offset);
3921 if (unlikely(ma_dead_node(node)))
3923 } while (!ma_is_leaf(type));
3925 return (void *)next;
3933 * mas_new_root() - Create a new root node that only contains the entry passed
3935 * @mas: The maple state
3936 * @entry: The entry to store.
3938 * Only valid when the index == 0 and the last == ULONG_MAX
3940 * Return 0 on error, 1 on success.
3942 static inline int mas_new_root(struct ma_state *mas, void *entry)
3944 struct maple_enode *root = mas_root_locked(mas);
3945 enum maple_type type = maple_leaf_64;
3946 struct maple_node *node;
3948 unsigned long *pivots;
3950 if (!entry && !mas->index && mas->last == ULONG_MAX) {
3952 mas_set_height(mas);
3953 rcu_assign_pointer(mas->tree->ma_root, entry);
3954 mas->node = MAS_START;
3958 mas_node_count(mas, 1);
3959 if (mas_is_err(mas))
3962 node = mas_pop_node(mas);
3963 pivots = ma_pivots(node, type);
3964 slots = ma_slots(node, type);
3965 node->parent = ma_parent_ptr(
3966 ((unsigned long)mas->tree | MA_ROOT_PARENT));
3967 mas->node = mt_mk_node(node, type);
3968 rcu_assign_pointer(slots[0], entry);
3969 pivots[0] = mas->last;
3971 mas_set_height(mas);
3972 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
3975 if (xa_is_node(root))
3976 mte_destroy_walk(root, mas->tree);
3981 * mas_wr_spanning_store() - Create a subtree with the store operation completed
3982 * and new nodes where necessary, then place the sub-tree in the actual tree.
3983 * Note that mas is expected to point to the node which caused the store to
3985 * @wr_mas: The maple write state
3987 * Return: 0 on error, positive on success.
3989 static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
3991 struct maple_subtree_state mast;
3992 struct maple_big_node b_node;
3993 struct ma_state *mas;
3994 unsigned char height;
3996 /* Left and Right side of spanning store */
3997 MA_STATE(l_mas, NULL, 0, 0);
3998 MA_STATE(r_mas, NULL, 0, 0);
4000 MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry);
4001 MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry);
4004 * A store operation that spans multiple nodes is called a spanning
4005 * store and is handled early in the store call stack by the function
4006 * mas_is_span_wr(). When a spanning store is identified, the maple
4007 * state is duplicated. The first maple state walks the left tree path
4008 * to ``index``, the duplicate walks the right tree path to ``last``.
4009 * The data in the two nodes are combined into a single node, two nodes,
4010 * or possibly three nodes (see the 3-way split above). A ``NULL``
4011 * written to the last entry of a node is considered a spanning store as
4012 * a rebalance is required for the operation to complete and an overflow
4013 * of data may happen.
4016 trace_ma_op(__func__, mas);
4018 if (unlikely(!mas->index && mas->last == ULONG_MAX))
4019 return mas_new_root(mas, wr_mas->entry);
4021 * Node rebalancing may occur due to this store, so there may be three new
4022 * entries per level plus a new root.
4024 height = mas_mt_height(mas);
4025 mas_node_count(mas, 1 + height * 3);
4026 if (mas_is_err(mas))
4030 * Set up right side. Need to get to the next offset after the spanning
4031 * store to ensure it's not NULL and to combine both the next node and
4032 * the node with the start together.
4035 /* Avoid overflow, walk to next slot in the tree. */
4039 r_mas.index = r_mas.last;
4040 mas_wr_walk_index(&r_wr_mas);
4041 r_mas.last = r_mas.index = mas->last;
4043 /* Set up left side. */
4045 mas_wr_walk_index(&l_wr_mas);
4047 if (!wr_mas->entry) {
4048 mas_extend_spanning_null(&l_wr_mas, &r_wr_mas);
4049 mas->offset = l_mas.offset;
4050 mas->index = l_mas.index;
4051 mas->last = l_mas.last = r_mas.last;
4054 /* expanding NULLs may make this cover the entire range */
4055 if (!l_mas.index && r_mas.last == ULONG_MAX) {
4056 mas_set_range(mas, 0, ULONG_MAX);
4057 return mas_new_root(mas, wr_mas->entry);
4060 memset(&b_node, 0, sizeof(struct maple_big_node));
4061 /* Copy l_mas and store the value in b_node. */
4062 mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end);
4063 /* Copy r_mas into b_node. */
4064 if (r_mas.offset <= r_wr_mas.node_end)
4065 mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end,
4066 &b_node, b_node.b_end + 1);
4070 /* Stop spanning searches by searching for just index. */
4071 l_mas.index = l_mas.last = mas->index;
4074 mast.orig_l = &l_mas;
4075 mast.orig_r = &r_mas;
4076 /* Combine l_mas and r_mas and split them up evenly again. */
4077 return mas_spanning_rebalance(mas, &mast, height + 1);
4081 * mas_wr_node_store() - Attempt to store the value in a node
4082 * @wr_mas: The maple write state
4084 * Attempts to reuse the node, but may allocate.
4086 * Return: True if stored, false otherwise
4088 static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas)
4090 struct ma_state *mas = wr_mas->mas;
4091 void __rcu **dst_slots;
4092 unsigned long *dst_pivots;
4093 unsigned char dst_offset;
4094 unsigned char new_end = wr_mas->node_end;
4095 unsigned char offset;
4096 unsigned char node_slots = mt_slots[wr_mas->type];
4097 struct maple_node reuse, *newnode;
4098 unsigned char copy_size, max_piv = mt_pivots[wr_mas->type];
4099 bool in_rcu = mt_in_rcu(mas->tree);
4101 offset = mas->offset;
4102 if (mas->last == wr_mas->r_max) {
4103 /* runs right to the end of the node */
4104 if (mas->last == mas->max)
4106 /* don't copy this offset */
4107 wr_mas->offset_end++;
4108 } else if (mas->last < wr_mas->r_max) {
4109 /* new range ends in this range */
4110 if (unlikely(wr_mas->r_max == ULONG_MAX))
4111 mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type);
4115 if (wr_mas->end_piv == mas->last)
4116 wr_mas->offset_end++;
4118 new_end -= wr_mas->offset_end - offset - 1;
4121 /* new range starts within a range */
4122 if (wr_mas->r_min < mas->index)
4125 /* Not enough room */
4126 if (new_end >= node_slots)
4129 /* Not enough data. */
4130 if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) &&
4131 !(mas->mas_flags & MA_STATE_BULK))
4136 mas_node_count(mas, 1);
4137 if (mas_is_err(mas))
4140 newnode = mas_pop_node(mas);
4142 memset(&reuse, 0, sizeof(struct maple_node));
4146 newnode->parent = mas_mn(mas)->parent;
4147 dst_pivots = ma_pivots(newnode, wr_mas->type);
4148 dst_slots = ma_slots(newnode, wr_mas->type);
4149 /* Copy from start to insert point */
4150 memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * (offset + 1));
4151 memcpy(dst_slots, wr_mas->slots, sizeof(void *) * (offset + 1));
4152 dst_offset = offset;
4154 /* Handle insert of new range starting after old range */
4155 if (wr_mas->r_min < mas->index) {
4157 rcu_assign_pointer(dst_slots[dst_offset], wr_mas->content);
4158 dst_pivots[dst_offset++] = mas->index - 1;
4161 /* Store the new entry and range end. */
4162 if (dst_offset < max_piv)
4163 dst_pivots[dst_offset] = mas->last;
4164 mas->offset = dst_offset;
4165 rcu_assign_pointer(dst_slots[dst_offset], wr_mas->entry);
4168 * this range wrote to the end of the node or it overwrote the rest of
4171 if (wr_mas->offset_end > wr_mas->node_end || mas->last >= mas->max) {
4172 new_end = dst_offset;
4177 /* Copy to the end of node if necessary. */
4178 copy_size = wr_mas->node_end - wr_mas->offset_end + 1;
4179 memcpy(dst_slots + dst_offset, wr_mas->slots + wr_mas->offset_end,
4180 sizeof(void *) * copy_size);
4181 if (dst_offset < max_piv) {
4182 if (copy_size > max_piv - dst_offset)
4183 copy_size = max_piv - dst_offset;
4185 memcpy(dst_pivots + dst_offset,
4186 wr_mas->pivots + wr_mas->offset_end,
4187 sizeof(unsigned long) * copy_size);
4190 if ((wr_mas->node_end == node_slots - 1) && (new_end < node_slots - 1))
4191 dst_pivots[new_end] = mas->max;
4194 mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end);
4196 mte_set_node_dead(mas->node);
4197 mas->node = mt_mk_node(newnode, wr_mas->type);
4198 mas_replace(mas, false);
4200 memcpy(wr_mas->node, newnode, sizeof(struct maple_node));
4202 trace_ma_write(__func__, mas, 0, wr_mas->entry);
4203 mas_update_gap(mas);
4208 * mas_wr_slot_store: Attempt to store a value in a slot.
4209 * @wr_mas: the maple write state
4211 * Return: True if stored, false otherwise
4213 static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
4215 struct ma_state *mas = wr_mas->mas;
4216 unsigned long lmax; /* Logical max. */
4217 unsigned char offset = mas->offset;
4219 if ((wr_mas->r_max > mas->last) && ((wr_mas->r_min != mas->index) ||
4220 (offset != wr_mas->node_end)))
4223 if (offset == wr_mas->node_end - 1)
4226 lmax = wr_mas->pivots[offset + 1];
4228 /* going to overwrite too many slots. */
4229 if (lmax < mas->last)
4232 if (wr_mas->r_min == mas->index) {
4233 /* overwriting two or more ranges with one. */
4234 if (lmax == mas->last)
4237 /* Overwriting all of offset and a portion of offset + 1. */
4238 rcu_assign_pointer(wr_mas->slots[offset], wr_mas->entry);
4239 wr_mas->pivots[offset] = mas->last;
4243 /* Doesn't end on the next range end. */
4244 if (lmax != mas->last)
4247 /* Overwriting a portion of offset and all of offset + 1 */
4248 if ((offset + 1 < mt_pivots[wr_mas->type]) &&
4249 (wr_mas->entry || wr_mas->pivots[offset + 1]))
4250 wr_mas->pivots[offset + 1] = mas->last;
4252 rcu_assign_pointer(wr_mas->slots[offset + 1], wr_mas->entry);
4253 wr_mas->pivots[offset] = mas->index - 1;
4254 mas->offset++; /* Keep mas accurate. */
4257 trace_ma_write(__func__, mas, 0, wr_mas->entry);
4258 mas_update_gap(mas);
4262 static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
4264 while ((wr_mas->offset_end < wr_mas->node_end) &&
4265 (wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end]))
4266 wr_mas->offset_end++;
4268 if (wr_mas->offset_end < wr_mas->node_end)
4269 wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end];
4271 wr_mas->end_piv = wr_mas->mas->max;
4274 static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
4276 struct ma_state *mas = wr_mas->mas;
4278 if (mas->last < wr_mas->end_piv && !wr_mas->slots[wr_mas->offset_end])
4279 mas->last = wr_mas->end_piv;
4281 /* Check next slot(s) if we are overwriting the end */
4282 if ((mas->last == wr_mas->end_piv) &&
4283 (wr_mas->node_end != wr_mas->offset_end) &&
4284 !wr_mas->slots[wr_mas->offset_end + 1]) {
4285 wr_mas->offset_end++;
4286 if (wr_mas->offset_end == wr_mas->node_end)
4287 mas->last = mas->max;
4289 mas->last = wr_mas->pivots[wr_mas->offset_end];
4290 wr_mas->end_piv = mas->last;
4293 if (!wr_mas->content) {
4294 /* If this one is null, the next and prev are not */
4295 mas->index = wr_mas->r_min;
4297 /* Check prev slot if we are overwriting the start */
4298 if (mas->index == wr_mas->r_min && mas->offset &&
4299 !wr_mas->slots[mas->offset - 1]) {
4301 wr_mas->r_min = mas->index =
4302 mas_safe_min(mas, wr_mas->pivots, mas->offset);
4303 wr_mas->r_max = wr_mas->pivots[mas->offset];
4308 static inline bool mas_wr_append(struct ma_wr_state *wr_mas)
4310 unsigned char end = wr_mas->node_end;
4311 unsigned char new_end = end + 1;
4312 struct ma_state *mas = wr_mas->mas;
4313 unsigned char node_pivots = mt_pivots[wr_mas->type];
4315 if ((mas->index != wr_mas->r_min) && (mas->last == wr_mas->r_max)) {
4316 if (new_end < node_pivots)
4317 wr_mas->pivots[new_end] = wr_mas->pivots[end];
4319 if (new_end < node_pivots)
4320 ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end);
4322 rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->entry);
4323 mas->offset = new_end;
4324 wr_mas->pivots[end] = mas->index - 1;
4329 if ((mas->index == wr_mas->r_min) && (mas->last < wr_mas->r_max)) {
4330 if (new_end < node_pivots)
4331 wr_mas->pivots[new_end] = wr_mas->pivots[end];
4333 rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->content);
4334 if (new_end < node_pivots)
4335 ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end);
4337 wr_mas->pivots[end] = mas->last;
4338 rcu_assign_pointer(wr_mas->slots[end], wr_mas->entry);
4346 * mas_wr_bnode() - Slow path for a modification.
4347 * @wr_mas: The write maple state
4349 * This is where split, rebalance end up.
4351 static void mas_wr_bnode(struct ma_wr_state *wr_mas)
4353 struct maple_big_node b_node;
4355 trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
4356 memset(&b_node, 0, sizeof(struct maple_big_node));
4357 mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
4358 mas_commit_b_node(wr_mas, &b_node, wr_mas->node_end);
4361 static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
4363 unsigned char node_slots;
4364 unsigned char node_size;
4365 struct ma_state *mas = wr_mas->mas;
4367 /* Direct replacement */
4368 if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) {
4369 rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
4370 if (!!wr_mas->entry ^ !!wr_mas->content)
4371 mas_update_gap(mas);
4375 /* Attempt to append */
4376 node_slots = mt_slots[wr_mas->type];
4377 node_size = wr_mas->node_end - wr_mas->offset_end + mas->offset + 2;
4378 if (mas->max == ULONG_MAX)
4381 /* slot and node store will not fit, go to the slow path */
4382 if (unlikely(node_size >= node_slots))
4385 if (wr_mas->entry && (wr_mas->node_end < node_slots - 1) &&
4386 (mas->offset == wr_mas->node_end) && mas_wr_append(wr_mas)) {
4387 if (!wr_mas->content || !wr_mas->entry)
4388 mas_update_gap(mas);
4392 if ((wr_mas->offset_end - mas->offset <= 1) && mas_wr_slot_store(wr_mas))
4394 else if (mas_wr_node_store(wr_mas))
4397 if (mas_is_err(mas))
4401 mas_wr_bnode(wr_mas);
4405 * mas_wr_store_entry() - Internal call to store a value
4406 * @mas: The maple state
4407 * @entry: The entry to store.
4409 * Return: The contents that was stored at the index.
4411 static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas)
4413 struct ma_state *mas = wr_mas->mas;
4415 wr_mas->content = mas_start(mas);
4416 if (mas_is_none(mas) || mas_is_ptr(mas)) {
4417 mas_store_root(mas, wr_mas->entry);
4418 return wr_mas->content;
4421 if (unlikely(!mas_wr_walk(wr_mas))) {
4422 mas_wr_spanning_store(wr_mas);
4423 return wr_mas->content;
4426 /* At this point, we are at the leaf node that needs to be altered. */
4427 mas_wr_end_piv(wr_mas);
4430 mas_wr_extend_null(wr_mas);
4432 /* New root for a single pointer */
4433 if (unlikely(!mas->index && mas->last == ULONG_MAX)) {
4434 mas_new_root(mas, wr_mas->entry);
4435 return wr_mas->content;
4438 mas_wr_modify(wr_mas);
4439 return wr_mas->content;
4443 * mas_insert() - Internal call to insert a value
4444 * @mas: The maple state
4445 * @entry: The entry to store
4447 * Return: %NULL or the contents that already exists at the requested index
4448 * otherwise. The maple state needs to be checked for error conditions.
4450 static inline void *mas_insert(struct ma_state *mas, void *entry)
4452 MA_WR_STATE(wr_mas, mas, entry);
4455 * Inserting a new range inserts either 0, 1, or 2 pivots within the
4456 * tree. If the insert fits exactly into an existing gap with a value
4457 * of NULL, then the slot only needs to be written with the new value.
4458 * If the range being inserted is adjacent to another range, then only a
4459 * single pivot needs to be inserted (as well as writing the entry). If
4460 * the new range is within a gap but does not touch any other ranges,
4461 * then two pivots need to be inserted: the start - 1, and the end. As
4462 * usual, the entry must be written. Most operations require a new node
4463 * to be allocated and replace an existing node to ensure RCU safety,
4464 * when in RCU mode. The exception to requiring a newly allocated node
4465 * is when inserting at the end of a node (appending). When done
4466 * carefully, appending can reuse the node in place.
4468 wr_mas.content = mas_start(mas);
4472 if (mas_is_none(mas) || mas_is_ptr(mas)) {
4473 mas_store_root(mas, entry);
4477 /* spanning writes always overwrite something */
4478 if (!mas_wr_walk(&wr_mas))
4481 /* At this point, we are at the leaf node that needs to be altered. */
4482 wr_mas.offset_end = mas->offset;
4483 wr_mas.end_piv = wr_mas.r_max;
4485 if (wr_mas.content || (mas->last > wr_mas.r_max))
4491 mas_wr_modify(&wr_mas);
4492 return wr_mas.content;
4495 mas_set_err(mas, -EEXIST);
4496 return wr_mas.content;
4501 * mas_prev_node() - Find the prev non-null entry at the same level in the
4502 * tree. The prev value will be mas->node[mas->offset] or MAS_NONE.
4503 * @mas: The maple state
4504 * @min: The lower limit to search
4506 * The prev node value will be mas->node[mas->offset] or MAS_NONE.
4507 * Return: 1 if the node is dead, 0 otherwise.
4509 static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
4514 struct maple_node *node;
4515 struct maple_enode *enode;
4516 unsigned long *pivots;
4518 if (mas_is_none(mas))
4524 if (ma_is_root(node))
4528 if (unlikely(mas_ascend(mas)))
4530 offset = mas->offset;
4535 mt = mte_node_type(mas->node);
4537 slots = ma_slots(node, mt);
4538 pivots = ma_pivots(node, mt);
4539 if (unlikely(ma_dead_node(node)))
4542 mas->max = pivots[offset];
4544 mas->min = pivots[offset - 1] + 1;
4545 if (unlikely(ma_dead_node(node)))
4553 enode = mas_slot(mas, slots, offset);
4554 if (unlikely(ma_dead_node(node)))
4558 mt = mte_node_type(mas->node);
4560 slots = ma_slots(node, mt);
4561 pivots = ma_pivots(node, mt);
4562 offset = ma_data_end(node, mt, pivots, mas->max);
4563 if (unlikely(ma_dead_node(node)))
4567 mas->min = pivots[offset - 1] + 1;
4569 if (offset < mt_pivots[mt])
4570 mas->max = pivots[offset];
4576 mas->node = mas_slot(mas, slots, offset);
4577 if (unlikely(ma_dead_node(node)))
4580 mas->offset = mas_data_end(mas);
4581 if (unlikely(mte_dead_node(mas->node)))
4587 mas->offset = offset;
4589 mas->min = pivots[offset - 1] + 1;
4591 if (unlikely(ma_dead_node(node)))
4594 mas->node = MAS_NONE;
4599 * mas_next_node() - Get the next node at the same level in the tree.
4600 * @mas: The maple state
4601 * @max: The maximum pivot value to check.
4603 * The next value will be mas->node[mas->offset] or MAS_NONE.
4604 * Return: 1 on dead node, 0 otherwise.
4606 static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
4609 unsigned long min, pivot;
4610 unsigned long *pivots;
4611 struct maple_enode *enode;
4613 unsigned char offset;
4614 unsigned char node_end;
4618 if (mas->max >= max)
4623 if (ma_is_root(node))
4630 if (unlikely(mas_ascend(mas)))
4633 offset = mas->offset;
4636 mt = mte_node_type(mas->node);
4637 pivots = ma_pivots(node, mt);
4638 node_end = ma_data_end(node, mt, pivots, mas->max);
4639 if (unlikely(ma_dead_node(node)))
4642 } while (unlikely(offset == node_end));
4644 slots = ma_slots(node, mt);
4645 pivot = mas_safe_pivot(mas, pivots, ++offset, mt);
4646 while (unlikely(level > 1)) {
4647 /* Descend, if necessary */
4648 enode = mas_slot(mas, slots, offset);
4649 if (unlikely(ma_dead_node(node)))
4655 mt = mte_node_type(mas->node);
4656 slots = ma_slots(node, mt);
4657 pivots = ma_pivots(node, mt);
4658 if (unlikely(ma_dead_node(node)))
4665 enode = mas_slot(mas, slots, offset);
4666 if (unlikely(ma_dead_node(node)))
4675 if (unlikely(ma_dead_node(node)))
4678 mas->node = MAS_NONE;
4683 * mas_next_nentry() - Get the next node entry
4684 * @mas: The maple state
4685 * @max: The maximum value to check
4686 * @*range_start: Pointer to store the start of the range.
4688 * Sets @mas->offset to the offset of the next node entry, @mas->last to the
4689 * pivot of the entry.
4691 * Return: The next entry, %NULL otherwise
4693 static inline void *mas_next_nentry(struct ma_state *mas,
4694 struct maple_node *node, unsigned long max, enum maple_type type)
4696 unsigned char count;
4697 unsigned long pivot;
4698 unsigned long *pivots;
4702 if (mas->last == mas->max) {
4703 mas->index = mas->max;
4707 slots = ma_slots(node, type);
4708 pivots = ma_pivots(node, type);
4709 count = ma_data_end(node, type, pivots, mas->max);
4710 if (unlikely(ma_dead_node(node)))
4713 mas->index = mas_safe_min(mas, pivots, mas->offset);
4714 if (unlikely(ma_dead_node(node)))
4717 if (mas->index > max)
4720 if (mas->offset > count)
4723 while (mas->offset < count) {
4724 pivot = pivots[mas->offset];
4725 entry = mas_slot(mas, slots, mas->offset);
4726 if (ma_dead_node(node))
4735 mas->index = pivot + 1;
4739 if (mas->index > mas->max) {
4740 mas->index = mas->last;
4744 pivot = mas_safe_pivot(mas, pivots, mas->offset, type);
4745 entry = mas_slot(mas, slots, mas->offset);
4746 if (ma_dead_node(node))
4760 static inline void mas_rewalk(struct ma_state *mas, unsigned long index)
4763 mas_set(mas, index);
4764 mas_state_walk(mas);
4765 if (mas_is_start(mas))
4770 * mas_next_entry() - Internal function to get the next entry.
4771 * @mas: The maple state
4772 * @limit: The maximum range start.
4774 * Set the @mas->node to the next entry and the range_start to
4775 * the beginning value for the entry. Does not check beyond @limit.
4776 * Sets @mas->index and @mas->last to the limit if it is hit.
4777 * Restarts on dead nodes.
4779 * Return: the next entry or %NULL.
4781 static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
4784 struct maple_enode *prev_node;
4785 struct maple_node *node;
4786 unsigned char offset;
4790 if (mas->index > limit) {
4791 mas->index = mas->last = limit;
4797 offset = mas->offset;
4798 prev_node = mas->node;
4800 mt = mte_node_type(mas->node);
4802 if (unlikely(mas->offset >= mt_slots[mt])) {
4803 mas->offset = mt_slots[mt] - 1;
4807 while (!mas_is_none(mas)) {
4808 entry = mas_next_nentry(mas, node, limit, mt);
4809 if (unlikely(ma_dead_node(node))) {
4810 mas_rewalk(mas, last);
4817 if (unlikely((mas->index > limit)))
4821 prev_node = mas->node;
4822 offset = mas->offset;
4823 if (unlikely(mas_next_node(mas, node, limit))) {
4824 mas_rewalk(mas, last);
4829 mt = mte_node_type(mas->node);
4832 mas->index = mas->last = limit;
4833 mas->offset = offset;
4834 mas->node = prev_node;
4839 * mas_prev_nentry() - Get the previous node entry.
4840 * @mas: The maple state.
4841 * @limit: The lower limit to check for a value.
4843 * Return: the entry, %NULL otherwise.
4845 static inline void *mas_prev_nentry(struct ma_state *mas, unsigned long limit,
4846 unsigned long index)
4848 unsigned long pivot, min;
4849 unsigned char offset;
4850 struct maple_node *mn;
4852 unsigned long *pivots;
4861 mt = mte_node_type(mas->node);
4862 offset = mas->offset - 1;
4863 if (offset >= mt_slots[mt])
4864 offset = mt_slots[mt] - 1;
4866 slots = ma_slots(mn, mt);
4867 pivots = ma_pivots(mn, mt);
4868 if (unlikely(ma_dead_node(mn))) {
4869 mas_rewalk(mas, index);
4873 if (offset == mt_pivots[mt])
4876 pivot = pivots[offset];
4878 if (unlikely(ma_dead_node(mn))) {
4879 mas_rewalk(mas, index);
4883 while (offset && ((!mas_slot(mas, slots, offset) && pivot >= limit) ||
4885 pivot = pivots[--offset];
4887 min = mas_safe_min(mas, pivots, offset);
4888 entry = mas_slot(mas, slots, offset);
4889 if (unlikely(ma_dead_node(mn))) {
4890 mas_rewalk(mas, index);
4894 if (likely(entry)) {
4895 mas->offset = offset;
4902 static inline void *mas_prev_entry(struct ma_state *mas, unsigned long min)
4906 if (mas->index < min) {
4907 mas->index = mas->last = min;
4908 mas->node = MAS_NONE;
4912 while (likely(!mas_is_none(mas))) {
4913 entry = mas_prev_nentry(mas, min, mas->index);
4914 if (unlikely(mas->last < min))
4920 if (unlikely(mas_prev_node(mas, min))) {
4921 mas_rewalk(mas, mas->index);
4930 mas->index = mas->last = min;
4935 * mas_rev_awalk() - Internal function. Reverse allocation walk. Find the
4936 * highest gap address of a given size in a given node and descend.
4937 * @mas: The maple state
4938 * @size: The needed size.
4940 * Return: True if found in a leaf, false otherwise.
4943 static bool mas_rev_awalk(struct ma_state *mas, unsigned long size,
4944 unsigned long *gap_min, unsigned long *gap_max)
4946 enum maple_type type = mte_node_type(mas->node);
4947 struct maple_node *node = mas_mn(mas);
4948 unsigned long *pivots, *gaps;
4950 unsigned long gap = 0;
4951 unsigned long max, min;
4952 unsigned char offset;
4954 if (unlikely(mas_is_err(mas)))
4957 if (ma_is_dense(type)) {
4959 mas->offset = (unsigned char)(mas->index - mas->min);
4963 pivots = ma_pivots(node, type);
4964 slots = ma_slots(node, type);
4965 gaps = ma_gaps(node, type);
4966 offset = mas->offset;
4967 min = mas_safe_min(mas, pivots, offset);
4968 /* Skip out of bounds. */
4969 while (mas->last < min)
4970 min = mas_safe_min(mas, pivots, --offset);
4972 max = mas_safe_pivot(mas, pivots, offset, type);
4973 while (mas->index <= max) {
4977 else if (!mas_slot(mas, slots, offset))
4978 gap = max - min + 1;
4981 if ((size <= gap) && (size <= mas->last - min + 1))
4985 /* Skip the next slot, it cannot be a gap. */
4990 max = pivots[offset];
4991 min = mas_safe_min(mas, pivots, offset);
5001 min = mas_safe_min(mas, pivots, offset);
5004 if (unlikely((mas->index > max) || (size - 1 > max - mas->index)))
5007 if (unlikely(ma_is_leaf(type))) {
5008 mas->offset = offset;
5010 *gap_max = min + gap - 1;
5014 /* descend, only happens under lock. */
5015 mas->node = mas_slot(mas, slots, offset);
5018 mas->offset = mas_data_end(mas);
5022 if (!mte_is_root(mas->node))
5026 mas_set_err(mas, -EBUSY);
5030 static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
5032 enum maple_type type = mte_node_type(mas->node);
5033 unsigned long pivot, min, gap = 0;
5034 unsigned char offset, data_end;
5035 unsigned long *gaps, *pivots;
5037 struct maple_node *node;
5040 if (ma_is_dense(type)) {
5041 mas->offset = (unsigned char)(mas->index - mas->min);
5046 pivots = ma_pivots(node, type);
5047 slots = ma_slots(node, type);
5048 gaps = ma_gaps(node, type);
5049 offset = mas->offset;
5050 min = mas_safe_min(mas, pivots, offset);
5051 data_end = ma_data_end(node, type, pivots, mas->max);
5052 for (; offset <= data_end; offset++) {
5053 pivot = mas_logical_pivot(mas, pivots, offset, type);
5055 /* Not within lower bounds */
5056 if (mas->index > pivot)
5061 else if (!mas_slot(mas, slots, offset))
5062 gap = min(pivot, mas->last) - max(mas->index, min) + 1;
5067 if (ma_is_leaf(type)) {
5071 if (mas->index <= pivot) {
5072 mas->node = mas_slot(mas, slots, offset);
5081 if (mas->last <= pivot) {
5082 mas_set_err(mas, -EBUSY);
5087 if (mte_is_root(mas->node))
5090 mas->offset = offset;
5095 * mas_walk() - Search for @mas->index in the tree.
5096 * @mas: The maple state.
5098 * mas->index and mas->last will be set to the range if there is a value. If
5099 * mas->node is MAS_NONE, reset to MAS_START.
5101 * Return: the entry at the location or %NULL.
5103 void *mas_walk(struct ma_state *mas)
5108 entry = mas_state_walk(mas);
5109 if (mas_is_start(mas))
5112 if (mas_is_ptr(mas)) {
5117 mas->last = ULONG_MAX;
5122 if (mas_is_none(mas)) {
5124 mas->last = ULONG_MAX;
5129 EXPORT_SYMBOL_GPL(mas_walk);
5131 static inline bool mas_rewind_node(struct ma_state *mas)
5136 if (mte_is_root(mas->node)) {
5146 mas->offset = --slot;
5151 * mas_skip_node() - Internal function. Skip over a node.
5152 * @mas: The maple state.
5154 * Return: true if there is another node, false otherwise.
5156 static inline bool mas_skip_node(struct ma_state *mas)
5158 if (mas_is_err(mas))
5162 if (mte_is_root(mas->node)) {
5163 if (mas->offset >= mas_data_end(mas)) {
5164 mas_set_err(mas, -EBUSY);
5170 } while (mas->offset >= mas_data_end(mas));
5177 * mas_awalk() - Allocation walk. Search from low address to high, for a gap of
5179 * @mas: The maple state
5180 * @size: The size of the gap required
5182 * Search between @mas->index and @mas->last for a gap of @size.
5184 static inline void mas_awalk(struct ma_state *mas, unsigned long size)
5186 struct maple_enode *last = NULL;
5189 * There are 4 options:
5190 * go to child (descend)
5191 * go back to parent (ascend)
5192 * no gap found. (return, slot == MAPLE_NODE_SLOTS)
5193 * found the gap. (return, slot != MAPLE_NODE_SLOTS)
5195 while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) {
5196 if (last == mas->node)
5204 * mas_fill_gap() - Fill a located gap with @entry.
5205 * @mas: The maple state
5206 * @entry: The value to store
5207 * @slot: The offset into the node to store the @entry
5208 * @size: The size of the entry
5209 * @index: The start location
5211 static inline void mas_fill_gap(struct ma_state *mas, void *entry,
5212 unsigned char slot, unsigned long size, unsigned long *index)
5214 MA_WR_STATE(wr_mas, mas, entry);
5215 unsigned char pslot = mte_parent_slot(mas->node);
5216 struct maple_enode *mn = mas->node;
5217 unsigned long *pivots;
5218 enum maple_type ptype;
5220 * mas->index is the start address for the search
5221 * which may no longer be needed.
5222 * mas->last is the end address for the search
5225 *index = mas->index;
5226 mas->last = mas->index + size - 1;
5229 * It is possible that using mas->max and mas->min to correctly
5230 * calculate the index and last will cause an issue in the gap
5231 * calculation, so fix the ma_state here
5234 ptype = mte_node_type(mas->node);
5235 pivots = ma_pivots(mas_mn(mas), ptype);
5236 mas->max = mas_safe_pivot(mas, pivots, pslot, ptype);
5237 mas->min = mas_safe_min(mas, pivots, pslot);
5240 mas_wr_store_entry(&wr_mas);
5244 * mas_sparse_area() - Internal function. Return upper or lower limit when
5245 * searching for a gap in an empty tree.
5246 * @mas: The maple state
5247 * @min: the minimum range
5248 * @max: The maximum range
5249 * @size: The size of the gap
5250 * @fwd: Searching forward or back
5252 static inline int mas_sparse_area(struct ma_state *mas, unsigned long min,
5253 unsigned long max, unsigned long size, bool fwd)
5255 if (!unlikely(mas_is_none(mas)) && min == 0) {
5258 * At this time, min is increased, we need to recheck whether
5259 * the size is satisfied.
5261 if (min > max || max - min + 1 < size)
5268 mas->last = min + size - 1;
5271 mas->index = max - size + 1;
5277 * mas_empty_area() - Get the lowest address within the range that is
5278 * sufficient for the size requested.
5279 * @mas: The maple state
5280 * @min: The lowest value of the range
5281 * @max: The highest value of the range
5282 * @size: The size needed
5284 int mas_empty_area(struct ma_state *mas, unsigned long min,
5285 unsigned long max, unsigned long size)
5287 unsigned char offset;
5288 unsigned long *pivots;
5294 if (mas_is_start(mas))
5296 else if (mas->offset >= 2)
5298 else if (!mas_skip_node(mas))
5302 if (mas_is_none(mas) || mas_is_ptr(mas))
5303 return mas_sparse_area(mas, min, max, size, true);
5305 /* The start of the window can only be within these values */
5308 mas_awalk(mas, size);
5310 if (unlikely(mas_is_err(mas)))
5311 return xa_err(mas->node);
5313 offset = mas->offset;
5314 if (unlikely(offset == MAPLE_NODE_SLOTS))
5317 mt = mte_node_type(mas->node);
5318 pivots = ma_pivots(mas_mn(mas), mt);
5319 min = mas_safe_min(mas, pivots, offset);
5320 if (mas->index < min)
5322 mas->last = mas->index + size - 1;
5325 EXPORT_SYMBOL_GPL(mas_empty_area);
5328 * mas_empty_area_rev() - Get the highest address within the range that is
5329 * sufficient for the size requested.
5330 * @mas: The maple state
5331 * @min: The lowest value of the range
5332 * @max: The highest value of the range
5333 * @size: The size needed
5335 int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
5336 unsigned long max, unsigned long size)
5338 struct maple_enode *last = mas->node;
5343 if (mas_is_start(mas)) {
5345 mas->offset = mas_data_end(mas);
5346 } else if (mas->offset >= 2) {
5348 } else if (!mas_rewind_node(mas)) {
5353 if (mas_is_none(mas) || mas_is_ptr(mas))
5354 return mas_sparse_area(mas, min, max, size, false);
5356 /* The start of the window can only be within these values. */
5360 while (!mas_rev_awalk(mas, size, &min, &max)) {
5361 if (last == mas->node) {
5362 if (!mas_rewind_node(mas))
5369 if (mas_is_err(mas))
5370 return xa_err(mas->node);
5372 if (unlikely(mas->offset == MAPLE_NODE_SLOTS))
5375 /* Trim the upper limit to the max. */
5376 if (max <= mas->last)
5379 mas->index = mas->last - size + 1;
5382 EXPORT_SYMBOL_GPL(mas_empty_area_rev);
5384 static inline int mas_alloc(struct ma_state *mas, void *entry,
5385 unsigned long size, unsigned long *index)
5390 if (mas_is_none(mas) || mas_is_ptr(mas)) {
5391 mas_root_expand(mas, entry);
5392 if (mas_is_err(mas))
5393 return xa_err(mas->node);
5396 return mte_pivot(mas->node, 0);
5397 return mte_pivot(mas->node, 1);
5400 /* Must be walking a tree. */
5401 mas_awalk(mas, size);
5402 if (mas_is_err(mas))
5403 return xa_err(mas->node);
5405 if (mas->offset == MAPLE_NODE_SLOTS)
5409 * At this point, mas->node points to the right node and we have an
5410 * offset that has a sufficient gap.
5414 min = mte_pivot(mas->node, mas->offset - 1) + 1;
5416 if (mas->index < min)
5419 mas_fill_gap(mas, entry, mas->offset, size, index);
5426 static inline int mas_rev_alloc(struct ma_state *mas, unsigned long min,
5427 unsigned long max, void *entry,
5428 unsigned long size, unsigned long *index)
5432 ret = mas_empty_area_rev(mas, min, max, size);
5436 if (mas_is_err(mas))
5437 return xa_err(mas->node);
5439 if (mas->offset == MAPLE_NODE_SLOTS)
5442 mas_fill_gap(mas, entry, mas->offset, size, index);
5450 * mte_dead_leaves() - Mark all leaves of a node as dead.
5451 * @mas: The maple state
5452 * @slots: Pointer to the slot array
5453 * @type: The maple node type
5455 * Must hold the write lock.
5457 * Return: The number of leaves marked as dead.
5460 unsigned char mte_dead_leaves(struct maple_enode *enode, struct maple_tree *mt,
5463 struct maple_node *node;
5464 enum maple_type type;
5468 for (offset = 0; offset < mt_slot_count(enode); offset++) {
5469 entry = mt_slot(mt, slots, offset);
5470 type = mte_node_type(entry);
5471 node = mte_to_node(entry);
5472 /* Use both node and type to catch LE & BE metadata */
5476 mte_set_node_dead(entry);
5478 rcu_assign_pointer(slots[offset], node);
5485 * mte_dead_walk() - Walk down a dead tree to just before the leaves
5486 * @enode: The maple encoded node
5487 * @offset: The starting offset
5489 * Note: This can only be used from the RCU callback context.
5491 static void __rcu **mte_dead_walk(struct maple_enode **enode, unsigned char offset)
5493 struct maple_node *node, *next;
5494 void __rcu **slots = NULL;
5496 next = mte_to_node(*enode);
5498 *enode = ma_enode_ptr(next);
5499 node = mte_to_node(*enode);
5500 slots = ma_slots(node, node->type);
5501 next = rcu_dereference_protected(slots[offset],
5502 lock_is_held(&rcu_callback_map));
5504 } while (!ma_is_leaf(next->type));
5510 * mt_free_walk() - Walk & free a tree in the RCU callback context
5511 * @head: The RCU head that's within the node.
5513 * Note: This can only be used from the RCU callback context.
5515 static void mt_free_walk(struct rcu_head *head)
5518 struct maple_node *node, *start;
5519 struct maple_enode *enode;
5520 unsigned char offset;
5521 enum maple_type type;
5523 node = container_of(head, struct maple_node, rcu);
5525 if (ma_is_leaf(node->type))
5529 enode = mt_mk_node(node, node->type);
5530 slots = mte_dead_walk(&enode, 0);
5531 node = mte_to_node(enode);
5533 mt_free_bulk(node->slot_len, slots);
5534 offset = node->parent_slot + 1;
5535 enode = node->piv_parent;
5536 if (mte_to_node(enode) == node)
5539 type = mte_node_type(enode);
5540 slots = ma_slots(mte_to_node(enode), type);
5541 if ((offset < mt_slots[type]) &&
5542 rcu_dereference_protected(slots[offset],
5543 lock_is_held(&rcu_callback_map)))
5544 slots = mte_dead_walk(&enode, offset);
5545 node = mte_to_node(enode);
5546 } while ((node != start) || (node->slot_len < offset));
5548 slots = ma_slots(node, node->type);
5549 mt_free_bulk(node->slot_len, slots);
5552 mt_free_rcu(&node->rcu);
5555 static inline void __rcu **mte_destroy_descend(struct maple_enode **enode,
5556 struct maple_tree *mt, struct maple_enode *prev, unsigned char offset)
5558 struct maple_node *node;
5559 struct maple_enode *next = *enode;
5560 void __rcu **slots = NULL;
5561 enum maple_type type;
5562 unsigned char next_offset = 0;
5566 node = mte_to_node(*enode);
5567 type = mte_node_type(*enode);
5568 slots = ma_slots(node, type);
5569 next = mt_slot_locked(mt, slots, next_offset);
5570 if ((mte_dead_node(next)))
5571 next = mt_slot_locked(mt, slots, ++next_offset);
5573 mte_set_node_dead(*enode);
5575 node->piv_parent = prev;
5576 node->parent_slot = offset;
5577 offset = next_offset;
5580 } while (!mte_is_leaf(next));
5585 static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
5589 struct maple_node *node = mte_to_node(enode);
5590 struct maple_enode *start;
5592 if (mte_is_leaf(enode)) {
5593 node->type = mte_node_type(enode);
5598 slots = mte_destroy_descend(&enode, mt, start, 0);
5599 node = mte_to_node(enode); // Updated in the above call.
5601 enum maple_type type;
5602 unsigned char offset;
5603 struct maple_enode *parent, *tmp;
5605 node->slot_len = mte_dead_leaves(enode, mt, slots);
5607 mt_free_bulk(node->slot_len, slots);
5608 offset = node->parent_slot + 1;
5609 enode = node->piv_parent;
5610 if (mte_to_node(enode) == node)
5613 type = mte_node_type(enode);
5614 slots = ma_slots(mte_to_node(enode), type);
5615 if (offset >= mt_slots[type])
5618 tmp = mt_slot_locked(mt, slots, offset);
5619 if (mte_node_type(tmp) && mte_to_node(tmp)) {
5622 slots = mte_destroy_descend(&enode, mt, parent, offset);
5625 node = mte_to_node(enode);
5626 } while (start != enode);
5628 node = mte_to_node(enode);
5629 node->slot_len = mte_dead_leaves(enode, mt, slots);
5631 mt_free_bulk(node->slot_len, slots);
5635 mt_free_rcu(&node->rcu);
5637 mt_clear_meta(mt, node, node->type);
5641 * mte_destroy_walk() - Free a tree or sub-tree.
5642 * @enode: the encoded maple node (maple_enode) to start
5643 * @mt: the tree to free - needed for node types.
5645 * Must hold the write lock.
5647 static inline void mte_destroy_walk(struct maple_enode *enode,
5648 struct maple_tree *mt)
5650 struct maple_node *node = mte_to_node(enode);
5652 if (mt_in_rcu(mt)) {
5653 mt_destroy_walk(enode, mt, false);
5654 call_rcu(&node->rcu, mt_free_walk);
5656 mt_destroy_walk(enode, mt, true);
5660 static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
5662 if (unlikely(mas_is_paused(wr_mas->mas)))
5663 mas_reset(wr_mas->mas);
5665 if (!mas_is_start(wr_mas->mas)) {
5666 if (mas_is_none(wr_mas->mas)) {
5667 mas_reset(wr_mas->mas);
5669 wr_mas->r_max = wr_mas->mas->max;
5670 wr_mas->type = mte_node_type(wr_mas->mas->node);
5671 if (mas_is_span_wr(wr_mas))
5672 mas_reset(wr_mas->mas);
5680 * mas_store() - Store an @entry.
5681 * @mas: The maple state.
5682 * @entry: The entry to store.
5684 * The @mas->index and @mas->last is used to set the range for the @entry.
5685 * Note: The @mas should have pre-allocated entries to ensure there is memory to
5686 * store the entry. Please see mas_expected_entries()/mas_destroy() for more details.
5688 * Return: the first entry between mas->index and mas->last or %NULL.
5690 void *mas_store(struct ma_state *mas, void *entry)
5692 MA_WR_STATE(wr_mas, mas, entry);
5694 trace_ma_write(__func__, mas, 0, entry);
5695 #ifdef CONFIG_DEBUG_MAPLE_TREE
5696 if (mas->index > mas->last)
5697 pr_err("Error %lX > %lX %p\n", mas->index, mas->last, entry);
5698 MT_BUG_ON(mas->tree, mas->index > mas->last);
5699 if (mas->index > mas->last) {
5700 mas_set_err(mas, -EINVAL);
5707 * Storing is the same operation as insert with the added caveat that it
5708 * can overwrite entries. Although this seems simple enough, one may
5709 * want to examine what happens if a single store operation was to
5710 * overwrite multiple entries within a self-balancing B-Tree.
5712 mas_wr_store_setup(&wr_mas);
5713 mas_wr_store_entry(&wr_mas);
5714 return wr_mas.content;
5716 EXPORT_SYMBOL_GPL(mas_store);
5719 * mas_store_gfp() - Store a value into the tree.
5720 * @mas: The maple state
5721 * @entry: The entry to store
5722 * @gfp: The GFP_FLAGS to use for allocations if necessary.
5724 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
5727 int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
5729 MA_WR_STATE(wr_mas, mas, entry);
5731 mas_wr_store_setup(&wr_mas);
5732 trace_ma_write(__func__, mas, 0, entry);
5734 mas_wr_store_entry(&wr_mas);
5735 if (unlikely(mas_nomem(mas, gfp)))
5738 if (unlikely(mas_is_err(mas)))
5739 return xa_err(mas->node);
5743 EXPORT_SYMBOL_GPL(mas_store_gfp);
5746 * mas_store_prealloc() - Store a value into the tree using memory
5747 * preallocated in the maple state.
5748 * @mas: The maple state
5749 * @entry: The entry to store.
5751 void mas_store_prealloc(struct ma_state *mas, void *entry)
5753 MA_WR_STATE(wr_mas, mas, entry);
5755 mas_wr_store_setup(&wr_mas);
5756 trace_ma_write(__func__, mas, 0, entry);
5757 mas_wr_store_entry(&wr_mas);
5758 BUG_ON(mas_is_err(mas));
5761 EXPORT_SYMBOL_GPL(mas_store_prealloc);
5764 * mas_preallocate() - Preallocate enough nodes for a store operation
5765 * @mas: The maple state
5766 * @gfp: The GFP_FLAGS to use for allocations.
5768 * Return: 0 on success, -ENOMEM if memory could not be allocated.
5770 int mas_preallocate(struct ma_state *mas, gfp_t gfp)
5774 mas_node_count_gfp(mas, 1 + mas_mt_height(mas) * 3, gfp);
5775 mas->mas_flags |= MA_STATE_PREALLOC;
5776 if (likely(!mas_is_err(mas)))
5779 mas_set_alloc_req(mas, 0);
5780 ret = xa_err(mas->node);
5786 EXPORT_SYMBOL_GPL(mas_preallocate);
5789 * mas_destroy() - destroy a maple state.
5790 * @mas: The maple state
5792 * Upon completion, check the left-most node and rebalance against the node to
5793 * the right if necessary. Frees any allocated nodes associated with this maple
5796 void mas_destroy(struct ma_state *mas)
5798 struct maple_alloc *node;
5799 unsigned long total;
5802 * When using mas_for_each() to insert an expected number of elements,
5803 * it is possible that the number inserted is less than the expected
5804 * number. To fix an invalid final node, a check is performed here to
5805 * rebalance the previous node with the final node.
5807 if (mas->mas_flags & MA_STATE_REBALANCE) {
5810 if (mas_is_start(mas))
5813 mtree_range_walk(mas);
5814 end = mas_data_end(mas) + 1;
5815 if (end < mt_min_slot_count(mas->node) - 1)
5816 mas_destroy_rebalance(mas, end);
5818 mas->mas_flags &= ~MA_STATE_REBALANCE;
5820 mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC);
5822 total = mas_allocated(mas);
5825 mas->alloc = node->slot[0];
5826 if (node->node_count > 1) {
5827 size_t count = node->node_count - 1;
5829 mt_free_bulk(count, (void __rcu **)&node->slot[1]);
5832 kmem_cache_free(maple_node_cache, node);
5838 EXPORT_SYMBOL_GPL(mas_destroy);
5841 * mas_expected_entries() - Set the expected number of entries that will be inserted.
5842 * @mas: The maple state
5843 * @nr_entries: The number of expected entries.
5845 * This will attempt to pre-allocate enough nodes to store the expected number
5846 * of entries. The allocations will occur using the bulk allocator interface
5847 * for speed. Please call mas_destroy() on the @mas after inserting the entries
5848 * to ensure any unused nodes are freed.
5850 * Return: 0 on success, -ENOMEM if memory could not be allocated.
5852 int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
5854 int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2;
5855 struct maple_enode *enode = mas->node;
5860 * Sometimes it is necessary to duplicate a tree to a new tree, such as
5861 * forking a process and duplicating the VMAs from one tree to a new
5862 * tree. When such a situation arises, it is known that the new tree is
5863 * not going to be used until the entire tree is populated. For
5864 * performance reasons, it is best to use a bulk load with RCU disabled.
5865 * This allows for optimistic splitting that favours the left and reuse
5866 * of nodes during the operation.
5869 /* Optimize splitting for bulk insert in-order */
5870 mas->mas_flags |= MA_STATE_BULK;
5873 * Avoid overflow, assume a gap between each entry and a trailing null.
5874 * If this is wrong, it just means allocation can happen during
5875 * insertion of entries.
5877 nr_nodes = max(nr_entries, nr_entries * 2 + 1);
5878 if (!mt_is_alloc(mas->tree))
5879 nonleaf_cap = MAPLE_RANGE64_SLOTS - 2;
5881 /* Leaves; reduce slots to keep space for expansion */
5882 nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 2);
5883 /* Internal nodes */
5884 nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
5885 /* Add working room for split (2 nodes) + new parents */
5886 mas_node_count(mas, nr_nodes + 3);
5888 /* Detect if allocations run out */
5889 mas->mas_flags |= MA_STATE_PREALLOC;
5891 if (!mas_is_err(mas))
5894 ret = xa_err(mas->node);
5900 EXPORT_SYMBOL_GPL(mas_expected_entries);
5903 * mas_next() - Get the next entry.
5904 * @mas: The maple state
5905 * @max: The maximum index to check.
5907 * Returns the next entry after @mas->index.
5908 * Must hold rcu_read_lock or the write lock.
5909 * Can return the zero entry.
5911 * Return: The next entry or %NULL
5913 void *mas_next(struct ma_state *mas, unsigned long max)
5915 if (mas_is_none(mas) || mas_is_paused(mas))
5916 mas->node = MAS_START;
5918 if (mas_is_start(mas))
5919 mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
5921 if (mas_is_ptr(mas)) {
5924 mas->last = ULONG_MAX;
5929 if (mas->last == ULONG_MAX)
5932 /* Retries on dead nodes handled by mas_next_entry */
5933 return mas_next_entry(mas, max);
5935 EXPORT_SYMBOL_GPL(mas_next);
5938 * mt_next() - get the next value in the maple tree
5939 * @mt: The maple tree
5940 * @index: The start index
5941 * @max: The maximum index to check
5943 * Return: The entry at @index or higher, or %NULL if nothing is found.
5945 void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max)
5948 MA_STATE(mas, mt, index, index);
5951 entry = mas_next(&mas, max);
5955 EXPORT_SYMBOL_GPL(mt_next);
5958 * mas_prev() - Get the previous entry
5959 * @mas: The maple state
5960 * @min: The minimum value to check.
5962 * Must hold rcu_read_lock or the write lock.
5963 * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not
5966 * Return: the previous value or %NULL.
5968 void *mas_prev(struct ma_state *mas, unsigned long min)
5971 /* Nothing comes before 0 */
5973 mas->node = MAS_NONE;
5977 if (unlikely(mas_is_ptr(mas)))
5980 if (mas_is_none(mas) || mas_is_paused(mas))
5981 mas->node = MAS_START;
5983 if (mas_is_start(mas)) {
5989 if (mas_is_ptr(mas)) {
5995 mas->index = mas->last = 0;
5996 return mas_root_locked(mas);
5998 return mas_prev_entry(mas, min);
6000 EXPORT_SYMBOL_GPL(mas_prev);
6003 * mt_prev() - get the previous value in the maple tree
6004 * @mt: The maple tree
6005 * @index: The start index
6006 * @min: The minimum index to check
6008 * Return: The entry at @index or lower, or %NULL if nothing is found.
6010 void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min)
6013 MA_STATE(mas, mt, index, index);
6016 entry = mas_prev(&mas, min);
6020 EXPORT_SYMBOL_GPL(mt_prev);
6023 * mas_pause() - Pause a mas_find/mas_for_each to drop the lock.
6024 * @mas: The maple state to pause
6026 * Some users need to pause a walk and drop the lock they're holding in
6027 * order to yield to a higher priority thread or carry out an operation
6028 * on an entry. Those users should call this function before they drop
6029 * the lock. It resets the @mas to be suitable for the next iteration
6030 * of the loop after the user has reacquired the lock. If most entries
6031 * found during a walk require you to call mas_pause(), the mt_for_each()
6032 * iterator may be more appropriate.
6035 void mas_pause(struct ma_state *mas)
6037 mas->node = MAS_PAUSE;
6039 EXPORT_SYMBOL_GPL(mas_pause);
6042 * mas_find() - On the first call, find the entry at or after mas->index up to
6043 * %max. Otherwise, find the entry after mas->index.
6044 * @mas: The maple state
6045 * @max: The maximum value to check.
6047 * Must hold rcu_read_lock or the write lock.
6048 * If an entry exists, last and index are updated accordingly.
6049 * May set @mas->node to MAS_NONE.
6051 * Return: The entry or %NULL.
6053 void *mas_find(struct ma_state *mas, unsigned long max)
6055 if (unlikely(mas_is_paused(mas))) {
6056 if (unlikely(mas->last == ULONG_MAX)) {
6057 mas->node = MAS_NONE;
6060 mas->node = MAS_START;
6061 mas->index = ++mas->last;
6064 if (unlikely(mas_is_none(mas)))
6065 mas->node = MAS_START;
6067 if (unlikely(mas_is_start(mas))) {
6068 /* First run or continue */
6071 if (mas->index > max)
6074 entry = mas_walk(mas);
6079 if (unlikely(!mas_searchable(mas)))
6082 /* Retries on dead nodes handled by mas_next_entry */
6083 return mas_next_entry(mas, max);
6085 EXPORT_SYMBOL_GPL(mas_find);
6088 * mas_find_rev: On the first call, find the first non-null entry at or below
6089 * mas->index down to %min. Otherwise find the first non-null entry below
6090 * mas->index down to %min.
6091 * @mas: The maple state
6092 * @min: The minimum value to check.
6094 * Must hold rcu_read_lock or the write lock.
6095 * If an entry exists, last and index are updated accordingly.
6096 * May set @mas->node to MAS_NONE.
6098 * Return: The entry or %NULL.
6100 void *mas_find_rev(struct ma_state *mas, unsigned long min)
6102 if (unlikely(mas_is_paused(mas))) {
6103 if (unlikely(mas->last == ULONG_MAX)) {
6104 mas->node = MAS_NONE;
6107 mas->node = MAS_START;
6108 mas->last = --mas->index;
6111 if (unlikely(mas_is_start(mas))) {
6112 /* First run or continue */
6115 if (mas->index < min)
6118 entry = mas_walk(mas);
6123 if (unlikely(!mas_searchable(mas)))
6126 if (mas->index < min)
6129 /* Retries on dead nodes handled by mas_prev_entry */
6130 return mas_prev_entry(mas, min);
6132 EXPORT_SYMBOL_GPL(mas_find_rev);
6135 * mas_erase() - Find the range in which index resides and erase the entire
6137 * @mas: The maple state
6139 * Must hold the write lock.
6140 * Searches for @mas->index, sets @mas->index and @mas->last to the range and
6141 * erases that range.
6143 * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated.
6145 void *mas_erase(struct ma_state *mas)
6148 MA_WR_STATE(wr_mas, mas, NULL);
6150 if (mas_is_none(mas) || mas_is_paused(mas))
6151 mas->node = MAS_START;
6153 /* Retry unnecessary when holding the write lock. */
6154 entry = mas_state_walk(mas);
6159 /* Must reset to ensure spanning writes of last slot are detected */
6161 mas_wr_store_setup(&wr_mas);
6162 mas_wr_store_entry(&wr_mas);
6163 if (mas_nomem(mas, GFP_KERNEL))
6168 EXPORT_SYMBOL_GPL(mas_erase);
6171 * mas_nomem() - Check if there was an error allocating and do the allocation
6172 * if necessary If there are allocations, then free them.
6173 * @mas: The maple state
6174 * @gfp: The GFP_FLAGS to use for allocations
6175 * Return: true on allocation, false otherwise.
6177 bool mas_nomem(struct ma_state *mas, gfp_t gfp)
6178 __must_hold(mas->tree->lock)
6180 if (likely(mas->node != MA_ERROR(-ENOMEM))) {
6185 if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) {
6186 mtree_unlock(mas->tree);
6187 mas_alloc_nodes(mas, gfp);
6188 mtree_lock(mas->tree);
6190 mas_alloc_nodes(mas, gfp);
6193 if (!mas_allocated(mas))
6196 mas->node = MAS_START;
6200 void __init maple_tree_init(void)
6202 maple_node_cache = kmem_cache_create("maple_node",
6203 sizeof(struct maple_node), sizeof(struct maple_node),
6208 * mtree_load() - Load a value stored in a maple tree
6209 * @mt: The maple tree
6210 * @index: The index to load
6212 * Return: the entry or %NULL
6214 void *mtree_load(struct maple_tree *mt, unsigned long index)
6216 MA_STATE(mas, mt, index, index);
6219 trace_ma_read(__func__, &mas);
6222 entry = mas_start(&mas);
6223 if (unlikely(mas_is_none(&mas)))
6226 if (unlikely(mas_is_ptr(&mas))) {
6233 entry = mtree_lookup_walk(&mas);
6234 if (!entry && unlikely(mas_is_start(&mas)))
6238 if (xa_is_zero(entry))
6243 EXPORT_SYMBOL(mtree_load);
6246 * mtree_store_range() - Store an entry at a given range.
6247 * @mt: The maple tree
6248 * @index: The start of the range
6249 * @last: The end of the range
6250 * @entry: The entry to store
6251 * @gfp: The GFP_FLAGS to use for allocations
6253 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6256 int mtree_store_range(struct maple_tree *mt, unsigned long index,
6257 unsigned long last, void *entry, gfp_t gfp)
6259 MA_STATE(mas, mt, index, last);
6260 MA_WR_STATE(wr_mas, &mas, entry);
6262 trace_ma_write(__func__, &mas, 0, entry);
6263 if (WARN_ON_ONCE(xa_is_advanced(entry)))
6271 mas_wr_store_entry(&wr_mas);
6272 if (mas_nomem(&mas, gfp))
6276 if (mas_is_err(&mas))
6277 return xa_err(mas.node);
6281 EXPORT_SYMBOL(mtree_store_range);
6284 * mtree_store() - Store an entry at a given index.
6285 * @mt: The maple tree
6286 * @index: The index to store the value
6287 * @entry: The entry to store
6288 * @gfp: The GFP_FLAGS to use for allocations
6290 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6293 int mtree_store(struct maple_tree *mt, unsigned long index, void *entry,
6296 return mtree_store_range(mt, index, index, entry, gfp);
6298 EXPORT_SYMBOL(mtree_store);
6301 * mtree_insert_range() - Insert an entry at a give range if there is no value.
6302 * @mt: The maple tree
6303 * @first: The start of the range
6304 * @last: The end of the range
6305 * @entry: The entry to store
6306 * @gfp: The GFP_FLAGS to use for allocations.
6308 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6309 * request, -ENOMEM if memory could not be allocated.
6311 int mtree_insert_range(struct maple_tree *mt, unsigned long first,
6312 unsigned long last, void *entry, gfp_t gfp)
6314 MA_STATE(ms, mt, first, last);
6316 if (WARN_ON_ONCE(xa_is_advanced(entry)))
6324 mas_insert(&ms, entry);
6325 if (mas_nomem(&ms, gfp))
6329 if (mas_is_err(&ms))
6330 return xa_err(ms.node);
6334 EXPORT_SYMBOL(mtree_insert_range);
6337 * mtree_insert() - Insert an entry at a give index if there is no value.
6338 * @mt: The maple tree
6339 * @index : The index to store the value
6340 * @entry: The entry to store
6341 * @gfp: The FGP_FLAGS to use for allocations.
6343 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6344 * request, -ENOMEM if memory could not be allocated.
6346 int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry,
6349 return mtree_insert_range(mt, index, index, entry, gfp);
6351 EXPORT_SYMBOL(mtree_insert);
6353 int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
6354 void *entry, unsigned long size, unsigned long min,
6355 unsigned long max, gfp_t gfp)
6359 MA_STATE(mas, mt, min, max - size);
6360 if (!mt_is_alloc(mt))
6363 if (WARN_ON_ONCE(mt_is_reserved(entry)))
6379 mas.last = max - size;
6380 ret = mas_alloc(&mas, entry, size, startp);
6381 if (mas_nomem(&mas, gfp))
6387 EXPORT_SYMBOL(mtree_alloc_range);
6389 int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
6390 void *entry, unsigned long size, unsigned long min,
6391 unsigned long max, gfp_t gfp)
6395 MA_STATE(mas, mt, min, max - size);
6396 if (!mt_is_alloc(mt))
6399 if (WARN_ON_ONCE(mt_is_reserved(entry)))
6413 ret = mas_rev_alloc(&mas, min, max, entry, size, startp);
6414 if (mas_nomem(&mas, gfp))
6420 EXPORT_SYMBOL(mtree_alloc_rrange);
6423 * mtree_erase() - Find an index and erase the entire range.
6424 * @mt: The maple tree
6425 * @index: The index to erase
6427 * Erasing is the same as a walk to an entry then a store of a NULL to that
6428 * ENTIRE range. In fact, it is implemented as such using the advanced API.
6430 * Return: The entry stored at the @index or %NULL
6432 void *mtree_erase(struct maple_tree *mt, unsigned long index)
6436 MA_STATE(mas, mt, index, index);
6437 trace_ma_op(__func__, &mas);
6440 entry = mas_erase(&mas);
6445 EXPORT_SYMBOL(mtree_erase);
6448 * __mt_destroy() - Walk and free all nodes of a locked maple tree.
6449 * @mt: The maple tree
6451 * Note: Does not handle locking.
6453 void __mt_destroy(struct maple_tree *mt)
6455 void *root = mt_root_locked(mt);
6457 rcu_assign_pointer(mt->ma_root, NULL);
6458 if (xa_is_node(root))
6459 mte_destroy_walk(root, mt);
6463 EXPORT_SYMBOL_GPL(__mt_destroy);
6466 * mtree_destroy() - Destroy a maple tree
6467 * @mt: The maple tree
6469 * Frees all resources used by the tree. Handles locking.
6471 void mtree_destroy(struct maple_tree *mt)
6477 EXPORT_SYMBOL(mtree_destroy);
6480 * mt_find() - Search from the start up until an entry is found.
6481 * @mt: The maple tree
6482 * @index: Pointer which contains the start location of the search
6483 * @max: The maximum value to check
6485 * Handles locking. @index will be incremented to one beyond the range.
6487 * Return: The entry at or after the @index or %NULL
6489 void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max)
6491 MA_STATE(mas, mt, *index, *index);
6493 #ifdef CONFIG_DEBUG_MAPLE_TREE
6494 unsigned long copy = *index;
6497 trace_ma_read(__func__, &mas);
6504 entry = mas_state_walk(&mas);
6505 if (mas_is_start(&mas))
6508 if (unlikely(xa_is_zero(entry)))
6514 while (mas_searchable(&mas) && (mas.index < max)) {
6515 entry = mas_next_entry(&mas, max);
6516 if (likely(entry && !xa_is_zero(entry)))
6520 if (unlikely(xa_is_zero(entry)))
6524 if (likely(entry)) {
6525 *index = mas.last + 1;
6526 #ifdef CONFIG_DEBUG_MAPLE_TREE
6527 if ((*index) && (*index) <= copy)
6528 pr_err("index not increased! %lx <= %lx\n",
6530 MT_BUG_ON(mt, (*index) && ((*index) <= copy));
6536 EXPORT_SYMBOL(mt_find);
6539 * mt_find_after() - Search from the start up until an entry is found.
6540 * @mt: The maple tree
6541 * @index: Pointer which contains the start location of the search
6542 * @max: The maximum value to check
6544 * Handles locking, detects wrapping on index == 0
6546 * Return: The entry at or after the @index or %NULL
6548 void *mt_find_after(struct maple_tree *mt, unsigned long *index,
6554 return mt_find(mt, index, max);
6556 EXPORT_SYMBOL(mt_find_after);
6558 #ifdef CONFIG_DEBUG_MAPLE_TREE
6559 atomic_t maple_tree_tests_run;
6560 EXPORT_SYMBOL_GPL(maple_tree_tests_run);
6561 atomic_t maple_tree_tests_passed;
6562 EXPORT_SYMBOL_GPL(maple_tree_tests_passed);
6565 extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int);
6566 void mt_set_non_kernel(unsigned int val)
6568 kmem_cache_set_non_kernel(maple_node_cache, val);
6571 extern unsigned long kmem_cache_get_alloc(struct kmem_cache *);
6572 unsigned long mt_get_alloc_size(void)
6574 return kmem_cache_get_alloc(maple_node_cache);
6577 extern void kmem_cache_zero_nr_tallocated(struct kmem_cache *);
6578 void mt_zero_nr_tallocated(void)
6580 kmem_cache_zero_nr_tallocated(maple_node_cache);
6583 extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache *);
6584 unsigned int mt_nr_tallocated(void)
6586 return kmem_cache_nr_tallocated(maple_node_cache);
6589 extern unsigned int kmem_cache_nr_allocated(struct kmem_cache *);
6590 unsigned int mt_nr_allocated(void)
6592 return kmem_cache_nr_allocated(maple_node_cache);
6596 * mas_dead_node() - Check if the maple state is pointing to a dead node.
6597 * @mas: The maple state
6598 * @index: The index to restore in @mas.
6600 * Used in test code.
6601 * Return: 1 if @mas has been reset to MAS_START, 0 otherwise.
6603 static inline int mas_dead_node(struct ma_state *mas, unsigned long index)
6605 if (unlikely(!mas_searchable(mas) || mas_is_start(mas)))
6608 if (likely(!mte_dead_node(mas->node)))
6611 mas_rewalk(mas, index);
6615 void mt_cache_shrink(void)
6620 * mt_cache_shrink() - For testing, don't use this.
6622 * Certain testcases can trigger an OOM when combined with other memory
6623 * debugging configuration options. This function is used to reduce the
6624 * possibility of an out of memory even due to kmem_cache objects remaining
6625 * around for longer than usual.
6627 void mt_cache_shrink(void)
6629 kmem_cache_shrink(maple_node_cache);
6632 EXPORT_SYMBOL_GPL(mt_cache_shrink);
6634 #endif /* not defined __KERNEL__ */
6636 * mas_get_slot() - Get the entry in the maple state node stored at @offset.
6637 * @mas: The maple state
6638 * @offset: The offset into the slot array to fetch.
6640 * Return: The entry stored at @offset.
6642 static inline struct maple_enode *mas_get_slot(struct ma_state *mas,
6643 unsigned char offset)
6645 return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)),
6651 * mas_first_entry() - Go the first leaf and find the first entry.
6652 * @mas: the maple state.
6653 * @limit: the maximum index to check.
6654 * @*r_start: Pointer to set to the range start.
6656 * Sets mas->offset to the offset of the entry, r_start to the range minimum.
6658 * Return: The first entry or MAS_NONE.
6660 static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn,
6661 unsigned long limit, enum maple_type mt)
6665 unsigned long *pivots;
6669 mas->index = mas->min;
6670 if (mas->index > limit)
6675 while (likely(!ma_is_leaf(mt))) {
6676 MT_BUG_ON(mas->tree, mte_dead_node(mas->node));
6677 slots = ma_slots(mn, mt);
6678 entry = mas_slot(mas, slots, 0);
6679 pivots = ma_pivots(mn, mt);
6680 if (unlikely(ma_dead_node(mn)))
6685 mt = mte_node_type(mas->node);
6687 MT_BUG_ON(mas->tree, mte_dead_node(mas->node));
6690 slots = ma_slots(mn, mt);
6691 entry = mas_slot(mas, slots, 0);
6692 if (unlikely(ma_dead_node(mn)))
6695 /* Slot 0 or 1 must be set */
6696 if (mas->index > limit)
6703 entry = mas_slot(mas, slots, 1);
6704 pivots = ma_pivots(mn, mt);
6705 if (unlikely(ma_dead_node(mn)))
6708 mas->index = pivots[0] + 1;
6709 if (mas->index > limit)
6716 if (likely(!ma_dead_node(mn)))
6717 mas->node = MAS_NONE;
6721 /* Depth first search, post-order */
6722 static void mas_dfs_postorder(struct ma_state *mas, unsigned long max)
6725 struct maple_enode *p = MAS_NONE, *mn = mas->node;
6726 unsigned long p_min, p_max;
6728 mas_next_node(mas, mas_mn(mas), max);
6729 if (!mas_is_none(mas))
6732 if (mte_is_root(mn))
6741 mas_prev_node(mas, 0);
6742 } while (!mas_is_none(mas));
6749 /* Tree validations */
6750 static void mt_dump_node(const struct maple_tree *mt, void *entry,
6751 unsigned long min, unsigned long max, unsigned int depth,
6752 enum mt_dump_format format);
6753 static void mt_dump_range(unsigned long min, unsigned long max,
6754 unsigned int depth, enum mt_dump_format format)
6756 static const char spaces[] = " ";
6761 pr_info("%.*s%lx: ", depth * 2, spaces, min);
6763 pr_info("%.*s%lx-%lx: ", depth * 2, spaces, min, max);
6768 pr_info("%.*s%lu: ", depth * 2, spaces, min);
6770 pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max);
6774 static void mt_dump_entry(void *entry, unsigned long min, unsigned long max,
6775 unsigned int depth, enum mt_dump_format format)
6777 mt_dump_range(min, max, depth, format);
6779 if (xa_is_value(entry))
6780 pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry),
6781 xa_to_value(entry), entry);
6782 else if (xa_is_zero(entry))
6783 pr_cont("zero (%ld)\n", xa_to_internal(entry));
6784 else if (mt_is_reserved(entry))
6785 pr_cont("UNKNOWN ENTRY (%p)\n", entry);
6787 pr_cont("%p\n", entry);
6790 static void mt_dump_range64(const struct maple_tree *mt, void *entry,
6791 unsigned long min, unsigned long max, unsigned int depth,
6792 enum mt_dump_format format)
6794 struct maple_range_64 *node = &mte_to_node(entry)->mr64;
6795 bool leaf = mte_is_leaf(entry);
6796 unsigned long first = min;
6799 pr_cont(" contents: ");
6800 for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) {
6803 pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
6807 pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
6810 pr_cont("%p\n", node->slot[i]);
6811 for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) {
6812 unsigned long last = max;
6814 if (i < (MAPLE_RANGE64_SLOTS - 1))
6815 last = node->pivot[i];
6816 else if (!node->slot[i] && max != mt_node_max(entry))
6818 if (last == 0 && i > 0)
6821 mt_dump_entry(mt_slot(mt, node->slot, i),
6822 first, last, depth + 1, format);
6823 else if (node->slot[i])
6824 mt_dump_node(mt, mt_slot(mt, node->slot, i),
6825 first, last, depth + 1, format);
6832 pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n",
6833 node, last, max, i);
6837 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
6838 node, last, max, i);
6845 static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
6846 unsigned long min, unsigned long max, unsigned int depth,
6847 enum mt_dump_format format)
6849 struct maple_arange_64 *node = &mte_to_node(entry)->ma64;
6850 bool leaf = mte_is_leaf(entry);
6851 unsigned long first = min;
6854 pr_cont(" contents: ");
6855 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++)
6856 pr_cont("%lu ", node->gap[i]);
6857 pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap);
6858 for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++)
6859 pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
6860 pr_cont("%p\n", node->slot[i]);
6861 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
6862 unsigned long last = max;
6864 if (i < (MAPLE_ARANGE64_SLOTS - 1))
6865 last = node->pivot[i];
6866 else if (!node->slot[i])
6868 if (last == 0 && i > 0)
6871 mt_dump_entry(mt_slot(mt, node->slot, i),
6872 first, last, depth + 1, format);
6873 else if (node->slot[i])
6874 mt_dump_node(mt, mt_slot(mt, node->slot, i),
6875 first, last, depth + 1, format);
6880 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
6881 node, last, max, i);
6888 static void mt_dump_node(const struct maple_tree *mt, void *entry,
6889 unsigned long min, unsigned long max, unsigned int depth,
6890 enum mt_dump_format format)
6892 struct maple_node *node = mte_to_node(entry);
6893 unsigned int type = mte_node_type(entry);
6896 mt_dump_range(min, max, depth, format);
6898 pr_cont("node %p depth %d type %d parent %p", node, depth, type,
6899 node ? node->parent : NULL);
6903 for (i = 0; i < MAPLE_NODE_SLOTS; i++) {
6905 pr_cont("OUT OF RANGE: ");
6906 mt_dump_entry(mt_slot(mt, node->slot, i),
6907 min + i, min + i, depth, format);
6911 case maple_range_64:
6912 mt_dump_range64(mt, entry, min, max, depth, format);
6914 case maple_arange_64:
6915 mt_dump_arange64(mt, entry, min, max, depth, format);
6919 pr_cont(" UNKNOWN TYPE\n");
6923 void mt_dump(const struct maple_tree *mt, enum mt_dump_format format)
6925 void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt));
6927 pr_info("maple_tree(%p) flags %X, height %u root %p\n",
6928 mt, mt->ma_flags, mt_height(mt), entry);
6929 if (!xa_is_node(entry))
6930 mt_dump_entry(entry, 0, 0, 0, format);
6932 mt_dump_node(mt, entry, 0, mt_node_max(entry), 0, format);
6934 EXPORT_SYMBOL_GPL(mt_dump);
6937 * Calculate the maximum gap in a node and check if that's what is reported in
6938 * the parent (unless root).
6940 static void mas_validate_gaps(struct ma_state *mas)
6942 struct maple_enode *mte = mas->node;
6943 struct maple_node *p_mn;
6944 unsigned long gap = 0, max_gap = 0;
6945 unsigned long p_end, p_start = mas->min;
6946 unsigned char p_slot;
6947 unsigned long *gaps = NULL;
6948 unsigned long *pivots = ma_pivots(mte_to_node(mte), mte_node_type(mte));
6951 if (ma_is_dense(mte_node_type(mte))) {
6952 for (i = 0; i < mt_slot_count(mte); i++) {
6953 if (mas_get_slot(mas, i)) {
6964 gaps = ma_gaps(mte_to_node(mte), mte_node_type(mte));
6965 for (i = 0; i < mt_slot_count(mte); i++) {
6966 p_end = mas_logical_pivot(mas, pivots, i, mte_node_type(mte));
6969 if (mas_get_slot(mas, i)) {
6974 gap += p_end - p_start + 1;
6976 void *entry = mas_get_slot(mas, i);
6980 if (gap != p_end - p_start + 1) {
6981 pr_err("%p[%u] -> %p %lu != %lu - %lu + 1\n",
6983 mas_get_slot(mas, i), gap,
6985 mt_dump(mas->tree, mt_dump_hex);
6987 MT_BUG_ON(mas->tree,
6988 gap != p_end - p_start + 1);
6991 if (gap > p_end - p_start + 1) {
6992 pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n",
6993 mas_mn(mas), i, gap, p_end, p_start,
6994 p_end - p_start + 1);
6995 MT_BUG_ON(mas->tree,
6996 gap > p_end - p_start + 1);
7004 p_start = p_end + 1;
7005 if (p_end >= mas->max)
7010 if (mte_is_root(mte))
7013 p_slot = mte_parent_slot(mas->node);
7014 p_mn = mte_parent(mte);
7015 MT_BUG_ON(mas->tree, max_gap > mas->max);
7016 if (ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap) {
7017 pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap);
7018 mt_dump(mas->tree, mt_dump_hex);
7021 MT_BUG_ON(mas->tree,
7022 ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap);
7025 static void mas_validate_parent_slot(struct ma_state *mas)
7027 struct maple_node *parent;
7028 struct maple_enode *node;
7029 enum maple_type p_type;
7030 unsigned char p_slot;
7034 if (mte_is_root(mas->node))
7037 p_slot = mte_parent_slot(mas->node);
7038 p_type = mas_parent_type(mas, mas->node);
7039 parent = mte_parent(mas->node);
7040 slots = ma_slots(parent, p_type);
7041 MT_BUG_ON(mas->tree, mas_mn(mas) == parent);
7043 /* Check prev/next parent slot for duplicate node entry */
7045 for (i = 0; i < mt_slots[p_type]; i++) {
7046 node = mas_slot(mas, slots, i);
7048 if (node != mas->node)
7049 pr_err("parent %p[%u] does not have %p\n",
7050 parent, i, mas_mn(mas));
7051 MT_BUG_ON(mas->tree, node != mas->node);
7052 } else if (node == mas->node) {
7053 pr_err("Invalid child %p at parent %p[%u] p_slot %u\n",
7054 mas_mn(mas), parent, i, p_slot);
7055 MT_BUG_ON(mas->tree, node == mas->node);
7060 static void mas_validate_child_slot(struct ma_state *mas)
7062 enum maple_type type = mte_node_type(mas->node);
7063 void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7064 unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type);
7065 struct maple_enode *child;
7068 if (mte_is_leaf(mas->node))
7071 for (i = 0; i < mt_slots[type]; i++) {
7072 child = mas_slot(mas, slots, i);
7073 if (!pivots[i] || pivots[i] == mas->max)
7079 if (mte_parent_slot(child) != i) {
7080 pr_err("Slot error at %p[%u]: child %p has pslot %u\n",
7081 mas_mn(mas), i, mte_to_node(child),
7082 mte_parent_slot(child));
7083 MT_BUG_ON(mas->tree, 1);
7086 if (mte_parent(child) != mte_to_node(mas->node)) {
7087 pr_err("child %p has parent %p not %p\n",
7088 mte_to_node(child), mte_parent(child),
7089 mte_to_node(mas->node));
7090 MT_BUG_ON(mas->tree, 1);
7096 * Validate all pivots are within mas->min and mas->max.
7098 static void mas_validate_limits(struct ma_state *mas)
7101 unsigned long prev_piv = 0;
7102 enum maple_type type = mte_node_type(mas->node);
7103 void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7104 unsigned long *pivots = ma_pivots(mas_mn(mas), type);
7106 /* all limits are fine here. */
7107 if (mte_is_root(mas->node))
7110 for (i = 0; i < mt_slots[type]; i++) {
7113 piv = mas_safe_pivot(mas, pivots, i, type);
7115 if (!piv && (i != 0))
7118 if (!mte_is_leaf(mas->node)) {
7119 void *entry = mas_slot(mas, slots, i);
7122 pr_err("%p[%u] cannot be null\n",
7125 MT_BUG_ON(mas->tree, !entry);
7128 if (prev_piv > piv) {
7129 pr_err("%p[%u] piv %lu < prev_piv %lu\n",
7130 mas_mn(mas), i, piv, prev_piv);
7131 MT_BUG_ON(mas->tree, piv < prev_piv);
7134 if (piv < mas->min) {
7135 pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i,
7137 MT_BUG_ON(mas->tree, piv < mas->min);
7139 if (piv > mas->max) {
7140 pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i,
7142 MT_BUG_ON(mas->tree, piv > mas->max);
7145 if (piv == mas->max)
7148 for (i += 1; i < mt_slots[type]; i++) {
7149 void *entry = mas_slot(mas, slots, i);
7151 if (entry && (i != mt_slots[type] - 1)) {
7152 pr_err("%p[%u] should not have entry %p\n", mas_mn(mas),
7154 MT_BUG_ON(mas->tree, entry != NULL);
7157 if (i < mt_pivots[type]) {
7158 unsigned long piv = pivots[i];
7163 pr_err("%p[%u] should not have piv %lu\n",
7164 mas_mn(mas), i, piv);
7165 MT_BUG_ON(mas->tree, i < mt_pivots[type] - 1);
7170 static void mt_validate_nulls(struct maple_tree *mt)
7172 void *entry, *last = (void *)1;
7173 unsigned char offset = 0;
7175 MA_STATE(mas, mt, 0, 0);
7178 if (mas_is_none(&mas) || (mas.node == MAS_ROOT))
7181 while (!mte_is_leaf(mas.node))
7184 slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node));
7186 entry = mas_slot(&mas, slots, offset);
7187 if (!last && !entry) {
7188 pr_err("Sequential nulls end at %p[%u]\n",
7189 mas_mn(&mas), offset);
7191 MT_BUG_ON(mt, !last && !entry);
7193 if (offset == mas_data_end(&mas)) {
7194 mas_next_node(&mas, mas_mn(&mas), ULONG_MAX);
7195 if (mas_is_none(&mas))
7198 slots = ma_slots(mte_to_node(mas.node),
7199 mte_node_type(mas.node));
7204 } while (!mas_is_none(&mas));
7208 * validate a maple tree by checking:
7209 * 1. The limits (pivots are within mas->min to mas->max)
7210 * 2. The gap is correctly set in the parents
7212 void mt_validate(struct maple_tree *mt)
7216 MA_STATE(mas, mt, 0, 0);
7219 if (!mas_searchable(&mas))
7222 mas_first_entry(&mas, mas_mn(&mas), ULONG_MAX, mte_node_type(mas.node));
7223 while (!mas_is_none(&mas)) {
7224 MT_BUG_ON(mas.tree, mte_dead_node(mas.node));
7225 if (!mte_is_root(mas.node)) {
7226 end = mas_data_end(&mas);
7227 if ((end < mt_min_slot_count(mas.node)) &&
7228 (mas.max != ULONG_MAX)) {
7229 pr_err("Invalid size %u of %p\n", end,
7231 MT_BUG_ON(mas.tree, 1);
7235 mas_validate_parent_slot(&mas);
7236 mas_validate_child_slot(&mas);
7237 mas_validate_limits(&mas);
7238 if (mt_is_alloc(mt))
7239 mas_validate_gaps(&mas);
7240 mas_dfs_postorder(&mas, ULONG_MAX);
7242 mt_validate_nulls(mt);
7247 EXPORT_SYMBOL_GPL(mt_validate);
7249 #endif /* CONFIG_DEBUG_MAPLE_TREE */