1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 * Copyright 2016 Intel Corporation
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 **************************************************************************/
31 * Generic simple memory manager implementation. Intended to be used as a base
32 * class implementation for more advanced memory managers.
34 * Note that the algorithm used is quite simple and there might be substantial
35 * performance gains if a smarter free list is implemented. Currently it is
36 * just an unordered stack of free regions. This could easily be improved if
37 * an RB-tree is used instead. At least if we expect heavy fragmentation.
39 * Aligned allocations can also see improvement.
42 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
45 #include <linux/export.h>
46 #include <linux/interval_tree_generic.h>
47 #include <linux/seq_file.h>
48 #include <linux/sched/signal.h>
49 #include <linux/slab.h>
50 #include <linux/stacktrace.h>
52 #include <drm/drm_mm.h>
57 * drm_mm provides a simple range allocator. The drivers are free to use the
58 * resource allocator from the linux core if it suits them, the upside of drm_mm
59 * is that it's in the DRM core. Which means that it's easier to extend for
60 * some of the crazier special purpose needs of gpus.
62 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
63 * Drivers are free to embed either of them into their own suitable
64 * datastructures. drm_mm itself will not do any memory allocations of its own,
65 * so if drivers choose not to embed nodes they need to still allocate them
68 * The range allocator also supports reservation of preallocated blocks. This is
69 * useful for taking over initial mode setting configurations from the firmware,
70 * where an object needs to be created which exactly matches the firmware's
71 * scanout target. As long as the range is still free it can be inserted anytime
72 * after the allocator is initialized, which helps with avoiding looped
73 * dependencies in the driver load sequence.
75 * drm_mm maintains a stack of most recently freed holes, which of all
76 * simplistic datastructures seems to be a fairly decent approach to clustering
77 * allocations and avoiding too much fragmentation. This means free space
78 * searches are O(num_holes). Given that all the fancy features drm_mm supports
79 * something better would be fairly complex and since gfx thrashing is a fairly
80 * steep cliff not a real concern. Removing a node again is O(1).
82 * drm_mm supports a few features: Alignment and range restrictions can be
83 * supplied. Furthermore every &drm_mm_node has a color value (which is just an
84 * opaque unsigned long) which in conjunction with a driver callback can be used
85 * to implement sophisticated placement restrictions. The i915 DRM driver uses
86 * this to implement guard pages between incompatible caching domains in the
89 * Two behaviors are supported for searching and allocating: bottom-up and
90 * top-down. The default is bottom-up. Top-down allocation can be used if the
91 * memory area has different restrictions, or just to reduce fragmentation.
93 * Finally iteration helpers to walk all nodes and all holes are provided as are
94 * some basic allocator dumpers for debugging.
96 * Note that this range allocator is not thread-safe, drivers need to protect
97 * modifications with their own locking. The idea behind this is that for a full
98 * memory manager additional data needs to be protected anyway, hence internal
99 * locking would be fully redundant.
102 #ifdef CONFIG_DRM_DEBUG_MM
103 #include <linux/stackdepot.h>
105 #define STACKDEPTH 32
108 static noinline void save_stack(struct drm_mm_node *node)
110 unsigned long entries[STACKDEPTH];
113 n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
115 /* May be called under spinlock, so avoid sleeping */
116 node->stack = stack_depot_save(entries, n, GFP_NOWAIT);
119 static void show_leaks(struct drm_mm *mm)
121 struct drm_mm_node *node;
122 unsigned long *entries;
123 unsigned int nr_entries;
126 buf = kmalloc(BUFSZ, GFP_KERNEL);
130 list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
132 DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
133 node->start, node->size);
137 nr_entries = stack_depot_fetch(node->stack, &entries);
138 stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0);
139 DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
140 node->start, node->size, buf);
149 static void save_stack(struct drm_mm_node *node) { }
150 static void show_leaks(struct drm_mm *mm) { }
153 #define START(node) ((node)->start)
154 #define LAST(node) ((node)->start + (node)->size - 1)
156 INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
158 START, LAST, static inline, drm_mm_interval_tree)
161 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
163 return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree,
164 start, last) ?: (struct drm_mm_node *)&mm->head_node;
166 EXPORT_SYMBOL(__drm_mm_interval_first);
168 static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
169 struct drm_mm_node *node)
171 struct drm_mm *mm = hole_node->mm;
172 struct rb_node **link, *rb;
173 struct drm_mm_node *parent;
176 node->__subtree_last = LAST(node);
178 if (drm_mm_node_allocated(hole_node)) {
181 parent = rb_entry(rb, struct drm_mm_node, rb);
182 if (parent->__subtree_last >= node->__subtree_last)
185 parent->__subtree_last = node->__subtree_last;
190 link = &hole_node->rb.rb_right;
194 link = &mm->interval_tree.rb_root.rb_node;
200 parent = rb_entry(rb, struct drm_mm_node, rb);
201 if (parent->__subtree_last < node->__subtree_last)
202 parent->__subtree_last = node->__subtree_last;
203 if (node->start < parent->start) {
204 link = &parent->rb.rb_left;
206 link = &parent->rb.rb_right;
211 rb_link_node(&node->rb, rb, link);
212 rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
213 &drm_mm_interval_tree_augment);
216 #define RB_INSERT(root, member, expr) do { \
217 struct rb_node **link = &root.rb_node, *rb = NULL; \
218 u64 x = expr(node); \
221 if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
222 link = &rb->rb_left; \
224 link = &rb->rb_right; \
226 rb_link_node(&node->member, rb, link); \
227 rb_insert_color(&node->member, &root); \
230 #define HOLE_SIZE(NODE) ((NODE)->hole_size)
231 #define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
233 static u64 rb_to_hole_size(struct rb_node *rb)
235 return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
238 static void insert_hole_size(struct rb_root_cached *root,
239 struct drm_mm_node *node)
241 struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
242 u64 x = node->hole_size;
247 if (x > rb_to_hole_size(rb)) {
250 link = &rb->rb_right;
255 rb_link_node(&node->rb_hole_size, rb, link);
256 rb_insert_color_cached(&node->rb_hole_size, root, first);
259 static void add_hole(struct drm_mm_node *node)
261 struct drm_mm *mm = node->mm;
264 __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
265 DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
267 insert_hole_size(&mm->holes_size, node);
268 RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
270 list_add(&node->hole_stack, &mm->hole_stack);
273 static void rm_hole(struct drm_mm_node *node)
275 DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
277 list_del(&node->hole_stack);
278 rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
279 rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
282 DRM_MM_BUG_ON(drm_mm_hole_follows(node));
285 static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
287 return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
290 static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
292 return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
295 static inline u64 rb_hole_size(struct rb_node *rb)
297 return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
300 static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
302 struct rb_node *rb = mm->holes_size.rb_root.rb_node;
303 struct drm_mm_node *best = NULL;
306 struct drm_mm_node *node =
307 rb_entry(rb, struct drm_mm_node, rb_hole_size);
309 if (size <= node->hole_size) {
320 static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
322 struct rb_node *rb = mm->holes_addr.rb_node;
323 struct drm_mm_node *node = NULL;
328 node = rb_hole_addr_to_node(rb);
329 hole_start = __drm_mm_hole_node_start(node);
331 if (addr < hole_start)
332 rb = node->rb_hole_addr.rb_left;
333 else if (addr > hole_start + node->hole_size)
334 rb = node->rb_hole_addr.rb_right;
342 static struct drm_mm_node *
343 first_hole(struct drm_mm *mm,
344 u64 start, u64 end, u64 size,
345 enum drm_mm_insert_mode mode)
349 case DRM_MM_INSERT_BEST:
350 return best_hole(mm, size);
352 case DRM_MM_INSERT_LOW:
353 return find_hole(mm, start);
355 case DRM_MM_INSERT_HIGH:
356 return find_hole(mm, end);
358 case DRM_MM_INSERT_EVICT:
359 return list_first_entry_or_null(&mm->hole_stack,
365 static struct drm_mm_node *
366 next_hole(struct drm_mm *mm,
367 struct drm_mm_node *node,
368 enum drm_mm_insert_mode mode)
370 /* Searching is slow; check if we ran out of time/patience */
372 if (fatal_signal_pending(current))
377 case DRM_MM_INSERT_BEST:
378 return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
380 case DRM_MM_INSERT_LOW:
381 return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
383 case DRM_MM_INSERT_HIGH:
384 return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));
386 case DRM_MM_INSERT_EVICT:
387 node = list_next_entry(node, hole_stack);
388 return &node->hole_stack == &mm->hole_stack ? NULL : node;
393 * drm_mm_reserve_node - insert an pre-initialized node
394 * @mm: drm_mm allocator to insert @node into
395 * @node: drm_mm_node to insert
397 * This functions inserts an already set-up &drm_mm_node into the allocator,
398 * meaning that start, size and color must be set by the caller. All other
399 * fields must be cleared to 0. This is useful to initialize the allocator with
400 * preallocated objects which must be set-up before the range allocator can be
401 * set-up, e.g. when taking over a firmware framebuffer.
404 * 0 on success, -ENOSPC if there's no hole where @node is.
406 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
408 struct drm_mm_node *hole;
409 u64 hole_start, hole_end;
410 u64 adj_start, adj_end;
413 end = node->start + node->size;
414 if (unlikely(end <= node->start))
417 /* Find the relevant hole to add our node to */
418 hole = find_hole(mm, node->start);
422 adj_start = hole_start = __drm_mm_hole_node_start(hole);
423 adj_end = hole_end = hole_start + hole->hole_size;
425 if (mm->color_adjust)
426 mm->color_adjust(hole, node->color, &adj_start, &adj_end);
428 if (adj_start > node->start || adj_end < end)
433 __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
434 list_add(&node->node_list, &hole->node_list);
435 drm_mm_interval_tree_add_node(hole, node);
439 if (node->start > hole_start)
447 EXPORT_SYMBOL(drm_mm_reserve_node);
449 static u64 rb_to_hole_size_or_zero(struct rb_node *rb)
451 return rb ? rb_to_hole_size(rb) : 0;
455 * drm_mm_insert_node_in_range - ranged search for space and insert @node
456 * @mm: drm_mm to allocate from
457 * @node: preallocate node to insert
458 * @size: size of the allocation
459 * @alignment: alignment of the allocation
460 * @color: opaque tag value to use for this node
461 * @range_start: start of the allowed range for this node
462 * @range_end: end of the allowed range for this node
463 * @mode: fine-tune the allocation search and placement
465 * The preallocated @node must be cleared to 0.
468 * 0 on success, -ENOSPC if there's no suitable hole.
470 int drm_mm_insert_node_in_range(struct drm_mm * const mm,
471 struct drm_mm_node * const node,
472 u64 size, u64 alignment,
474 u64 range_start, u64 range_end,
475 enum drm_mm_insert_mode mode)
477 struct drm_mm_node *hole;
481 DRM_MM_BUG_ON(range_start > range_end);
483 if (unlikely(size == 0 || range_end - range_start < size))
486 if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size)
492 once = mode & DRM_MM_INSERT_ONCE;
493 mode &= ~DRM_MM_INSERT_ONCE;
495 remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
496 for (hole = first_hole(mm, range_start, range_end, size, mode);
498 hole = once ? NULL : next_hole(mm, hole, mode)) {
499 u64 hole_start = __drm_mm_hole_node_start(hole);
500 u64 hole_end = hole_start + hole->hole_size;
501 u64 adj_start, adj_end;
502 u64 col_start, col_end;
504 if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
507 if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
510 col_start = hole_start;
512 if (mm->color_adjust)
513 mm->color_adjust(hole, color, &col_start, &col_end);
515 adj_start = max(col_start, range_start);
516 adj_end = min(col_end, range_end);
518 if (adj_end <= adj_start || adj_end - adj_start < size)
521 if (mode == DRM_MM_INSERT_HIGH)
522 adj_start = adj_end - size;
527 if (likely(remainder_mask))
528 rem = adj_start & remainder_mask;
530 div64_u64_rem(adj_start, alignment, &rem);
533 if (mode != DRM_MM_INSERT_HIGH)
534 adj_start += alignment;
536 if (adj_start < max(col_start, range_start) ||
537 min(col_end, range_end) - adj_start < size)
540 if (adj_end <= adj_start ||
541 adj_end - adj_start < size)
548 node->start = adj_start;
552 __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
553 list_add(&node->node_list, &hole->node_list);
554 drm_mm_interval_tree_add_node(hole, node);
557 if (adj_start > hole_start)
559 if (adj_start + size < hole_end)
566 return signal_pending(current) ? -ERESTARTSYS : -ENOSPC;
568 EXPORT_SYMBOL(drm_mm_insert_node_in_range);
570 static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
572 return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
576 * drm_mm_remove_node - Remove a memory node from the allocator.
577 * @node: drm_mm_node to remove
579 * This just removes a node from its drm_mm allocator. The node does not need to
580 * be cleared again before it can be re-inserted into this or any other drm_mm
581 * allocator. It is a bug to call this function on a unallocated node.
583 void drm_mm_remove_node(struct drm_mm_node *node)
585 struct drm_mm *mm = node->mm;
586 struct drm_mm_node *prev_node;
588 DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
589 DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
591 prev_node = list_prev_entry(node, node_list);
593 if (drm_mm_hole_follows(node))
596 drm_mm_interval_tree_remove(node, &mm->interval_tree);
597 list_del(&node->node_list);
599 if (drm_mm_hole_follows(prev_node))
603 clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
605 EXPORT_SYMBOL(drm_mm_remove_node);
608 * drm_mm_replace_node - move an allocation from @old to @new
609 * @old: drm_mm_node to remove from the allocator
610 * @new: drm_mm_node which should inherit @old's allocation
612 * This is useful for when drivers embed the drm_mm_node structure and hence
613 * can't move allocations by reassigning pointers. It's a combination of remove
614 * and insert with the guarantee that the allocation start will match.
616 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
618 struct drm_mm *mm = old->mm;
620 DRM_MM_BUG_ON(!drm_mm_node_allocated(old));
624 __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags);
625 list_replace(&old->node_list, &new->node_list);
626 rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
628 if (drm_mm_hole_follows(old)) {
629 list_replace(&old->hole_stack, &new->hole_stack);
630 rb_replace_node_cached(&old->rb_hole_size,
633 rb_replace_node(&old->rb_hole_addr,
638 clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &old->flags);
640 EXPORT_SYMBOL(drm_mm_replace_node);
643 * DOC: lru scan roster
645 * Very often GPUs need to have continuous allocations for a given object. When
646 * evicting objects to make space for a new one it is therefore not most
647 * efficient when we simply start to select all objects from the tail of an LRU
648 * until there's a suitable hole: Especially for big objects or nodes that
649 * otherwise have special allocation constraints there's a good chance we evict
650 * lots of (smaller) objects unnecessarily.
652 * The DRM range allocator supports this use-case through the scanning
653 * interfaces. First a scan operation needs to be initialized with
654 * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
655 * objects to the roster, probably by walking an LRU list, but this can be
656 * freely implemented. Eviction candiates are added using
657 * drm_mm_scan_add_block() until a suitable hole is found or there are no
658 * further evictable objects. Eviction roster metadata is tracked in &struct
661 * The driver must walk through all objects again in exactly the reverse
662 * order to restore the allocator state. Note that while the allocator is used
663 * in the scan mode no other operation is allowed.
665 * Finally the driver evicts all objects selected (drm_mm_scan_remove_block()
666 * reported true) in the scan, and any overlapping nodes after color adjustment
667 * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and
668 * since freeing a node is also O(1) the overall complexity is
669 * O(scanned_objects). So like the free stack which needs to be walked before a
670 * scan operation even begins this is linear in the number of objects. It
671 * doesn't seem to hurt too badly.
675 * drm_mm_scan_init_with_range - initialize range-restricted lru scanning
677 * @mm: drm_mm to scan
678 * @size: size of the allocation
679 * @alignment: alignment of the allocation
680 * @color: opaque tag value to use for the allocation
681 * @start: start of the allowed range for the allocation
682 * @end: end of the allowed range for the allocation
683 * @mode: fine-tune the allocation search and placement
685 * This simply sets up the scanning routines with the parameters for the desired
689 * As long as the scan list is non-empty, no other operations than
690 * adding/removing nodes to/from the scan list are allowed.
692 void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
699 enum drm_mm_insert_mode mode)
701 DRM_MM_BUG_ON(start >= end);
702 DRM_MM_BUG_ON(!size || size > end - start);
703 DRM_MM_BUG_ON(mm->scan_active);
711 scan->alignment = alignment;
712 scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
716 DRM_MM_BUG_ON(end <= start);
717 scan->range_start = start;
718 scan->range_end = end;
720 scan->hit_start = U64_MAX;
723 EXPORT_SYMBOL(drm_mm_scan_init_with_range);
726 * drm_mm_scan_add_block - add a node to the scan list
727 * @scan: the active drm_mm scanner
728 * @node: drm_mm_node to add
730 * Add a node to the scan list that might be freed to make space for the desired
734 * True if a hole has been found, false otherwise.
736 bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
737 struct drm_mm_node *node)
739 struct drm_mm *mm = scan->mm;
740 struct drm_mm_node *hole;
741 u64 hole_start, hole_end;
742 u64 col_start, col_end;
743 u64 adj_start, adj_end;
745 DRM_MM_BUG_ON(node->mm != mm);
746 DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
747 DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
748 __set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
751 /* Remove this block from the node_list so that we enlarge the hole
752 * (distance between the end of our previous node and the start of
753 * or next), without poisoning the link so that we can restore it
754 * later in drm_mm_scan_remove_block().
756 hole = list_prev_entry(node, node_list);
757 DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node);
758 __list_del_entry(&node->node_list);
760 hole_start = __drm_mm_hole_node_start(hole);
761 hole_end = __drm_mm_hole_node_end(hole);
763 col_start = hole_start;
765 if (mm->color_adjust)
766 mm->color_adjust(hole, scan->color, &col_start, &col_end);
768 adj_start = max(col_start, scan->range_start);
769 adj_end = min(col_end, scan->range_end);
770 if (adj_end <= adj_start || adj_end - adj_start < scan->size)
773 if (scan->mode == DRM_MM_INSERT_HIGH)
774 adj_start = adj_end - scan->size;
776 if (scan->alignment) {
779 if (likely(scan->remainder_mask))
780 rem = adj_start & scan->remainder_mask;
782 div64_u64_rem(adj_start, scan->alignment, &rem);
785 if (scan->mode != DRM_MM_INSERT_HIGH)
786 adj_start += scan->alignment;
787 if (adj_start < max(col_start, scan->range_start) ||
788 min(col_end, scan->range_end) - adj_start < scan->size)
791 if (adj_end <= adj_start ||
792 adj_end - adj_start < scan->size)
797 scan->hit_start = adj_start;
798 scan->hit_end = adj_start + scan->size;
800 DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
801 DRM_MM_BUG_ON(scan->hit_start < hole_start);
802 DRM_MM_BUG_ON(scan->hit_end > hole_end);
806 EXPORT_SYMBOL(drm_mm_scan_add_block);
809 * drm_mm_scan_remove_block - remove a node from the scan list
810 * @scan: the active drm_mm scanner
811 * @node: drm_mm_node to remove
813 * Nodes **must** be removed in exactly the reverse order from the scan list as
814 * they have been added (e.g. using list_add() as they are added and then
815 * list_for_each() over that eviction list to remove), otherwise the internal
816 * state of the memory manager will be corrupted.
818 * When the scan list is empty, the selected memory nodes can be freed. An
819 * immediately following drm_mm_insert_node_in_range_generic() or one of the
820 * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return
821 * the just freed block (because it's at the top of the free_stack list).
824 * True if this block should be evicted, false otherwise. Will always
825 * return false when no hole has been found.
827 bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
828 struct drm_mm_node *node)
830 struct drm_mm_node *prev_node;
832 DRM_MM_BUG_ON(node->mm != scan->mm);
833 DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node));
834 __clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
836 DRM_MM_BUG_ON(!node->mm->scan_active);
837 node->mm->scan_active--;
839 /* During drm_mm_scan_add_block() we decoupled this node leaving
840 * its pointers intact. Now that the caller is walking back along
841 * the eviction list we can restore this block into its rightful
842 * place on the full node_list. To confirm that the caller is walking
843 * backwards correctly we check that prev_node->next == node->next,
844 * i.e. both believe the same node should be on the other side of the
847 prev_node = list_prev_entry(node, node_list);
848 DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) !=
849 list_next_entry(node, node_list));
850 list_add(&node->node_list, &prev_node->node_list);
852 return (node->start + node->size > scan->hit_start &&
853 node->start < scan->hit_end);
855 EXPORT_SYMBOL(drm_mm_scan_remove_block);
858 * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole
859 * @scan: drm_mm scan with target hole
861 * After completing an eviction scan and removing the selected nodes, we may
862 * need to remove a few more nodes from either side of the target hole if
863 * mm.color_adjust is being used.
866 * A node to evict, or NULL if there are no overlapping nodes.
868 struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
870 struct drm_mm *mm = scan->mm;
871 struct drm_mm_node *hole;
872 u64 hole_start, hole_end;
874 DRM_MM_BUG_ON(list_empty(&mm->hole_stack));
876 if (!mm->color_adjust)
880 * The hole found during scanning should ideally be the first element
881 * in the hole_stack list, but due to side-effects in the driver it
884 list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
885 hole_start = __drm_mm_hole_node_start(hole);
886 hole_end = hole_start + hole->hole_size;
888 if (hole_start <= scan->hit_start &&
889 hole_end >= scan->hit_end)
893 /* We should only be called after we found the hole previously */
894 DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
895 if (unlikely(&hole->hole_stack == &mm->hole_stack))
898 DRM_MM_BUG_ON(hole_start > scan->hit_start);
899 DRM_MM_BUG_ON(hole_end < scan->hit_end);
901 mm->color_adjust(hole, scan->color, &hole_start, &hole_end);
902 if (hole_start > scan->hit_start)
904 if (hole_end < scan->hit_end)
905 return list_next_entry(hole, node_list);
909 EXPORT_SYMBOL(drm_mm_scan_color_evict);
912 * drm_mm_init - initialize a drm-mm allocator
913 * @mm: the drm_mm structure to initialize
914 * @start: start of the range managed by @mm
915 * @size: end of the range managed by @mm
917 * Note that @mm must be cleared to 0 before calling this function.
919 void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
921 DRM_MM_BUG_ON(start + size <= start);
923 mm->color_adjust = NULL;
925 INIT_LIST_HEAD(&mm->hole_stack);
926 mm->interval_tree = RB_ROOT_CACHED;
927 mm->holes_size = RB_ROOT_CACHED;
928 mm->holes_addr = RB_ROOT;
930 /* Clever trick to avoid a special case in the free hole tracking. */
931 INIT_LIST_HEAD(&mm->head_node.node_list);
932 mm->head_node.flags = 0;
933 mm->head_node.mm = mm;
934 mm->head_node.start = start + size;
935 mm->head_node.size = -size;
936 add_hole(&mm->head_node);
940 EXPORT_SYMBOL(drm_mm_init);
943 * drm_mm_takedown - clean up a drm_mm allocator
944 * @mm: drm_mm allocator to clean up
946 * Note that it is a bug to call this function on an allocator which is not
949 void drm_mm_takedown(struct drm_mm *mm)
951 if (WARN(!drm_mm_clean(mm),
952 "Memory manager not clean during takedown.\n"))
955 EXPORT_SYMBOL(drm_mm_takedown);
957 static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
961 size = entry->hole_size;
963 start = drm_mm_hole_node_start(entry);
964 drm_printf(p, "%#018llx-%#018llx: %llu: free\n",
965 start, start + size, size);
971 * drm_mm_print - print allocator state
972 * @mm: drm_mm allocator to print
973 * @p: DRM printer to use
975 void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
977 const struct drm_mm_node *entry;
978 u64 total_used = 0, total_free = 0, total = 0;
980 total_free += drm_mm_dump_hole(p, &mm->head_node);
982 drm_mm_for_each_node(entry, mm) {
983 drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start,
984 entry->start + entry->size, entry->size);
985 total_used += entry->size;
986 total_free += drm_mm_dump_hole(p, entry);
988 total = total_free + total_used;
990 drm_printf(p, "total: %llu, used %llu free %llu\n", total,
991 total_used, total_free);
993 EXPORT_SYMBOL(drm_mm_print);