1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 * Copyright 2016 Intel Corporation
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 **************************************************************************/
31 * Generic simple memory manager implementation. Intended to be used as a base
32 * class implementation for more advanced memory managers.
34 * Note that the algorithm used is quite simple and there might be substantial
35 * performance gains if a smarter free list is implemented. Currently it is
36 * just an unordered stack of free regions. This could easily be improved if
37 * an RB-tree is used instead. At least if we expect heavy fragmentation.
39 * Aligned allocations can also see improvement.
42 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
45 #include <linux/export.h>
46 #include <linux/interval_tree_generic.h>
47 #include <linux/seq_file.h>
48 #include <linux/slab.h>
49 #include <linux/stacktrace.h>
51 #include <drm/drm_mm.h>
56 * drm_mm provides a simple range allocator. The drivers are free to use the
57 * resource allocator from the linux core if it suits them, the upside of drm_mm
58 * is that it's in the DRM core. Which means that it's easier to extend for
59 * some of the crazier special purpose needs of gpus.
61 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
62 * Drivers are free to embed either of them into their own suitable
63 * datastructures. drm_mm itself will not do any memory allocations of its own,
64 * so if drivers choose not to embed nodes they need to still allocate them
67 * The range allocator also supports reservation of preallocated blocks. This is
68 * useful for taking over initial mode setting configurations from the firmware,
69 * where an object needs to be created which exactly matches the firmware's
70 * scanout target. As long as the range is still free it can be inserted anytime
71 * after the allocator is initialized, which helps with avoiding looped
72 * dependencies in the driver load sequence.
74 * drm_mm maintains a stack of most recently freed holes, which of all
75 * simplistic datastructures seems to be a fairly decent approach to clustering
76 * allocations and avoiding too much fragmentation. This means free space
77 * searches are O(num_holes). Given that all the fancy features drm_mm supports
78 * something better would be fairly complex and since gfx thrashing is a fairly
79 * steep cliff not a real concern. Removing a node again is O(1).
81 * drm_mm supports a few features: Alignment and range restrictions can be
82 * supplied. Furthermore every &drm_mm_node has a color value (which is just an
83 * opaque unsigned long) which in conjunction with a driver callback can be used
84 * to implement sophisticated placement restrictions. The i915 DRM driver uses
85 * this to implement guard pages between incompatible caching domains in the
88 * Two behaviors are supported for searching and allocating: bottom-up and
89 * top-down. The default is bottom-up. Top-down allocation can be used if the
90 * memory area has different restrictions, or just to reduce fragmentation.
92 * Finally iteration helpers to walk all nodes and all holes are provided as are
93 * some basic allocator dumpers for debugging.
95 * Note that this range allocator is not thread-safe, drivers need to protect
96 * modifications with their own locking. The idea behind this is that for a full
97 * memory manager additional data needs to be protected anyway, hence internal
98 * locking would be fully redundant.
101 #ifdef CONFIG_DRM_DEBUG_MM
102 #include <linux/stackdepot.h>
104 #define STACKDEPTH 32
107 static noinline void save_stack(struct drm_mm_node *node)
109 unsigned long entries[STACKDEPTH];
112 n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
114 /* May be called under spinlock, so avoid sleeping */
115 node->stack = stack_depot_save(entries, n, GFP_NOWAIT);
118 static void show_leaks(struct drm_mm *mm)
120 struct drm_mm_node *node;
121 unsigned long *entries;
122 unsigned int nr_entries;
125 buf = kmalloc(BUFSZ, GFP_KERNEL);
129 list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
131 DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
132 node->start, node->size);
136 nr_entries = stack_depot_fetch(node->stack, &entries);
137 stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0);
138 DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
139 node->start, node->size, buf);
148 static void save_stack(struct drm_mm_node *node) { }
149 static void show_leaks(struct drm_mm *mm) { }
152 #define START(node) ((node)->start)
153 #define LAST(node) ((node)->start + (node)->size - 1)
155 INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
157 START, LAST, static inline, drm_mm_interval_tree)
160 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
162 return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree,
163 start, last) ?: (struct drm_mm_node *)&mm->head_node;
165 EXPORT_SYMBOL(__drm_mm_interval_first);
167 static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
168 struct drm_mm_node *node)
170 struct drm_mm *mm = hole_node->mm;
171 struct rb_node **link, *rb;
172 struct drm_mm_node *parent;
175 node->__subtree_last = LAST(node);
177 if (drm_mm_node_allocated(hole_node)) {
180 parent = rb_entry(rb, struct drm_mm_node, rb);
181 if (parent->__subtree_last >= node->__subtree_last)
184 parent->__subtree_last = node->__subtree_last;
189 link = &hole_node->rb.rb_right;
193 link = &mm->interval_tree.rb_root.rb_node;
199 parent = rb_entry(rb, struct drm_mm_node, rb);
200 if (parent->__subtree_last < node->__subtree_last)
201 parent->__subtree_last = node->__subtree_last;
202 if (node->start < parent->start) {
203 link = &parent->rb.rb_left;
205 link = &parent->rb.rb_right;
210 rb_link_node(&node->rb, rb, link);
211 rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
212 &drm_mm_interval_tree_augment);
215 #define HOLE_SIZE(NODE) ((NODE)->hole_size)
216 #define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
218 static u64 rb_to_hole_size(struct rb_node *rb)
220 return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
223 static void insert_hole_size(struct rb_root_cached *root,
224 struct drm_mm_node *node)
226 struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
227 u64 x = node->hole_size;
232 if (x > rb_to_hole_size(rb)) {
235 link = &rb->rb_right;
240 rb_link_node(&node->rb_hole_size, rb, link);
241 rb_insert_color_cached(&node->rb_hole_size, root, first);
244 RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks,
245 struct drm_mm_node, rb_hole_addr,
246 u64, subtree_max_hole, HOLE_SIZE)
248 static void insert_hole_addr(struct rb_root *root, struct drm_mm_node *node)
250 struct rb_node **link = &root->rb_node, *rb_parent = NULL;
251 u64 start = HOLE_ADDR(node), subtree_max_hole = node->subtree_max_hole;
252 struct drm_mm_node *parent;
256 parent = rb_entry(rb_parent, struct drm_mm_node, rb_hole_addr);
257 if (parent->subtree_max_hole < subtree_max_hole)
258 parent->subtree_max_hole = subtree_max_hole;
259 if (start < HOLE_ADDR(parent))
260 link = &parent->rb_hole_addr.rb_left;
262 link = &parent->rb_hole_addr.rb_right;
265 rb_link_node(&node->rb_hole_addr, rb_parent, link);
266 rb_insert_augmented(&node->rb_hole_addr, root, &augment_callbacks);
269 static void add_hole(struct drm_mm_node *node)
271 struct drm_mm *mm = node->mm;
274 __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
275 node->subtree_max_hole = node->hole_size;
276 DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
278 insert_hole_size(&mm->holes_size, node);
279 insert_hole_addr(&mm->holes_addr, node);
281 list_add(&node->hole_stack, &mm->hole_stack);
284 static void rm_hole(struct drm_mm_node *node)
286 DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
288 list_del(&node->hole_stack);
289 rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
290 rb_erase_augmented(&node->rb_hole_addr, &node->mm->holes_addr,
293 node->subtree_max_hole = 0;
295 DRM_MM_BUG_ON(drm_mm_hole_follows(node));
298 static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
300 return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
303 static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
305 return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
308 static inline u64 rb_hole_size(struct rb_node *rb)
310 return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
313 static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
315 struct rb_node *rb = mm->holes_size.rb_root.rb_node;
316 struct drm_mm_node *best = NULL;
319 struct drm_mm_node *node =
320 rb_entry(rb, struct drm_mm_node, rb_hole_size);
322 if (size <= node->hole_size) {
333 static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
335 struct rb_node *rb = mm->holes_addr.rb_node;
336 struct drm_mm_node *node = NULL;
341 node = rb_hole_addr_to_node(rb);
342 hole_start = __drm_mm_hole_node_start(node);
344 if (addr < hole_start)
345 rb = node->rb_hole_addr.rb_left;
346 else if (addr > hole_start + node->hole_size)
347 rb = node->rb_hole_addr.rb_right;
355 static struct drm_mm_node *
356 first_hole(struct drm_mm *mm,
357 u64 start, u64 end, u64 size,
358 enum drm_mm_insert_mode mode)
362 case DRM_MM_INSERT_BEST:
363 return best_hole(mm, size);
365 case DRM_MM_INSERT_LOW:
366 return find_hole(mm, start);
368 case DRM_MM_INSERT_HIGH:
369 return find_hole(mm, end);
371 case DRM_MM_INSERT_EVICT:
372 return list_first_entry_or_null(&mm->hole_stack,
379 * next_hole_high_addr - returns next hole for a DRM_MM_INSERT_HIGH mode request
380 * @entry: previously selected drm_mm_node
381 * @size: size of the a hole needed for the request
383 * This function will verify whether left subtree of @entry has hole big enough
384 * to fit the requtested size. If so, it will return previous node of @entry or
385 * else it will return parent node of @entry
387 * It will also skip the complete left subtree if subtree_max_hole of that
388 * subtree is same as the subtree_max_hole of the @entry.
391 * previous node of @entry if left subtree of @entry can serve the request or
392 * else return parent of @entry
394 static struct drm_mm_node *
395 next_hole_high_addr(struct drm_mm_node *entry, u64 size)
397 struct rb_node *rb_node, *left_rb_node, *parent_rb_node;
398 struct drm_mm_node *left_node;
403 rb_node = &entry->rb_hole_addr;
404 if (rb_node->rb_left) {
405 left_rb_node = rb_node->rb_left;
406 parent_rb_node = rb_parent(rb_node);
407 left_node = rb_entry(left_rb_node,
408 struct drm_mm_node, rb_hole_addr);
409 if ((left_node->subtree_max_hole < size ||
410 entry->size == entry->subtree_max_hole) &&
411 parent_rb_node && parent_rb_node->rb_left != rb_node)
412 return rb_hole_addr_to_node(parent_rb_node);
415 return rb_hole_addr_to_node(rb_prev(rb_node));
419 * next_hole_low_addr - returns next hole for a DRM_MM_INSERT_LOW mode request
420 * @entry: previously selected drm_mm_node
421 * @size: size of the a hole needed for the request
423 * This function will verify whether right subtree of @entry has hole big enough
424 * to fit the requtested size. If so, it will return next node of @entry or
425 * else it will return parent node of @entry
427 * It will also skip the complete right subtree if subtree_max_hole of that
428 * subtree is same as the subtree_max_hole of the @entry.
431 * next node of @entry if right subtree of @entry can serve the request or
432 * else return parent of @entry
434 static struct drm_mm_node *
435 next_hole_low_addr(struct drm_mm_node *entry, u64 size)
437 struct rb_node *rb_node, *right_rb_node, *parent_rb_node;
438 struct drm_mm_node *right_node;
443 rb_node = &entry->rb_hole_addr;
444 if (rb_node->rb_right) {
445 right_rb_node = rb_node->rb_right;
446 parent_rb_node = rb_parent(rb_node);
447 right_node = rb_entry(right_rb_node,
448 struct drm_mm_node, rb_hole_addr);
449 if ((right_node->subtree_max_hole < size ||
450 entry->size == entry->subtree_max_hole) &&
451 parent_rb_node && parent_rb_node->rb_right != rb_node)
452 return rb_hole_addr_to_node(parent_rb_node);
455 return rb_hole_addr_to_node(rb_next(rb_node));
458 static struct drm_mm_node *
459 next_hole(struct drm_mm *mm,
460 struct drm_mm_node *node,
462 enum drm_mm_insert_mode mode)
466 case DRM_MM_INSERT_BEST:
467 return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
469 case DRM_MM_INSERT_LOW:
470 return next_hole_low_addr(node, size);
472 case DRM_MM_INSERT_HIGH:
473 return next_hole_high_addr(node, size);
475 case DRM_MM_INSERT_EVICT:
476 node = list_next_entry(node, hole_stack);
477 return &node->hole_stack == &mm->hole_stack ? NULL : node;
482 * drm_mm_reserve_node - insert an pre-initialized node
483 * @mm: drm_mm allocator to insert @node into
484 * @node: drm_mm_node to insert
486 * This functions inserts an already set-up &drm_mm_node into the allocator,
487 * meaning that start, size and color must be set by the caller. All other
488 * fields must be cleared to 0. This is useful to initialize the allocator with
489 * preallocated objects which must be set-up before the range allocator can be
490 * set-up, e.g. when taking over a firmware framebuffer.
493 * 0 on success, -ENOSPC if there's no hole where @node is.
495 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
497 struct drm_mm_node *hole;
498 u64 hole_start, hole_end;
499 u64 adj_start, adj_end;
502 end = node->start + node->size;
503 if (unlikely(end <= node->start))
506 /* Find the relevant hole to add our node to */
507 hole = find_hole(mm, node->start);
511 adj_start = hole_start = __drm_mm_hole_node_start(hole);
512 adj_end = hole_end = hole_start + hole->hole_size;
514 if (mm->color_adjust)
515 mm->color_adjust(hole, node->color, &adj_start, &adj_end);
517 if (adj_start > node->start || adj_end < end)
522 __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
523 list_add(&node->node_list, &hole->node_list);
524 drm_mm_interval_tree_add_node(hole, node);
528 if (node->start > hole_start)
536 EXPORT_SYMBOL(drm_mm_reserve_node);
538 static u64 rb_to_hole_size_or_zero(struct rb_node *rb)
540 return rb ? rb_to_hole_size(rb) : 0;
544 * drm_mm_insert_node_in_range - ranged search for space and insert @node
545 * @mm: drm_mm to allocate from
546 * @node: preallocate node to insert
547 * @size: size of the allocation
548 * @alignment: alignment of the allocation
549 * @color: opaque tag value to use for this node
550 * @range_start: start of the allowed range for this node
551 * @range_end: end of the allowed range for this node
552 * @mode: fine-tune the allocation search and placement
554 * The preallocated @node must be cleared to 0.
557 * 0 on success, -ENOSPC if there's no suitable hole.
559 int drm_mm_insert_node_in_range(struct drm_mm * const mm,
560 struct drm_mm_node * const node,
561 u64 size, u64 alignment,
563 u64 range_start, u64 range_end,
564 enum drm_mm_insert_mode mode)
566 struct drm_mm_node *hole;
570 DRM_MM_BUG_ON(range_start > range_end);
572 if (unlikely(size == 0 || range_end - range_start < size))
575 if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size)
581 once = mode & DRM_MM_INSERT_ONCE;
582 mode &= ~DRM_MM_INSERT_ONCE;
584 remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
585 for (hole = first_hole(mm, range_start, range_end, size, mode);
587 hole = once ? NULL : next_hole(mm, hole, size, mode)) {
588 u64 hole_start = __drm_mm_hole_node_start(hole);
589 u64 hole_end = hole_start + hole->hole_size;
590 u64 adj_start, adj_end;
591 u64 col_start, col_end;
593 if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
596 if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
599 col_start = hole_start;
601 if (mm->color_adjust)
602 mm->color_adjust(hole, color, &col_start, &col_end);
604 adj_start = max(col_start, range_start);
605 adj_end = min(col_end, range_end);
607 if (adj_end <= adj_start || adj_end - adj_start < size)
610 if (mode == DRM_MM_INSERT_HIGH)
611 adj_start = adj_end - size;
616 if (likely(remainder_mask))
617 rem = adj_start & remainder_mask;
619 div64_u64_rem(adj_start, alignment, &rem);
622 if (mode != DRM_MM_INSERT_HIGH)
623 adj_start += alignment;
625 if (adj_start < max(col_start, range_start) ||
626 min(col_end, range_end) - adj_start < size)
629 if (adj_end <= adj_start ||
630 adj_end - adj_start < size)
637 node->start = adj_start;
641 __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
642 list_add(&node->node_list, &hole->node_list);
643 drm_mm_interval_tree_add_node(hole, node);
646 if (adj_start > hole_start)
648 if (adj_start + size < hole_end)
657 EXPORT_SYMBOL(drm_mm_insert_node_in_range);
659 static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
661 return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
665 * drm_mm_remove_node - Remove a memory node from the allocator.
666 * @node: drm_mm_node to remove
668 * This just removes a node from its drm_mm allocator. The node does not need to
669 * be cleared again before it can be re-inserted into this or any other drm_mm
670 * allocator. It is a bug to call this function on a unallocated node.
672 void drm_mm_remove_node(struct drm_mm_node *node)
674 struct drm_mm *mm = node->mm;
675 struct drm_mm_node *prev_node;
677 DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
678 DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
680 prev_node = list_prev_entry(node, node_list);
682 if (drm_mm_hole_follows(node))
685 drm_mm_interval_tree_remove(node, &mm->interval_tree);
686 list_del(&node->node_list);
688 if (drm_mm_hole_follows(prev_node))
692 clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
694 EXPORT_SYMBOL(drm_mm_remove_node);
697 * drm_mm_replace_node - move an allocation from @old to @new
698 * @old: drm_mm_node to remove from the allocator
699 * @new: drm_mm_node which should inherit @old's allocation
701 * This is useful for when drivers embed the drm_mm_node structure and hence
702 * can't move allocations by reassigning pointers. It's a combination of remove
703 * and insert with the guarantee that the allocation start will match.
705 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
707 struct drm_mm *mm = old->mm;
709 DRM_MM_BUG_ON(!drm_mm_node_allocated(old));
713 __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags);
714 list_replace(&old->node_list, &new->node_list);
715 rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
717 if (drm_mm_hole_follows(old)) {
718 list_replace(&old->hole_stack, &new->hole_stack);
719 rb_replace_node_cached(&old->rb_hole_size,
722 rb_replace_node(&old->rb_hole_addr,
727 clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &old->flags);
729 EXPORT_SYMBOL(drm_mm_replace_node);
732 * DOC: lru scan roster
734 * Very often GPUs need to have continuous allocations for a given object. When
735 * evicting objects to make space for a new one it is therefore not most
736 * efficient when we simply start to select all objects from the tail of an LRU
737 * until there's a suitable hole: Especially for big objects or nodes that
738 * otherwise have special allocation constraints there's a good chance we evict
739 * lots of (smaller) objects unnecessarily.
741 * The DRM range allocator supports this use-case through the scanning
742 * interfaces. First a scan operation needs to be initialized with
743 * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
744 * objects to the roster, probably by walking an LRU list, but this can be
745 * freely implemented. Eviction candiates are added using
746 * drm_mm_scan_add_block() until a suitable hole is found or there are no
747 * further evictable objects. Eviction roster metadata is tracked in &struct
750 * The driver must walk through all objects again in exactly the reverse
751 * order to restore the allocator state. Note that while the allocator is used
752 * in the scan mode no other operation is allowed.
754 * Finally the driver evicts all objects selected (drm_mm_scan_remove_block()
755 * reported true) in the scan, and any overlapping nodes after color adjustment
756 * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and
757 * since freeing a node is also O(1) the overall complexity is
758 * O(scanned_objects). So like the free stack which needs to be walked before a
759 * scan operation even begins this is linear in the number of objects. It
760 * doesn't seem to hurt too badly.
764 * drm_mm_scan_init_with_range - initialize range-restricted lru scanning
766 * @mm: drm_mm to scan
767 * @size: size of the allocation
768 * @alignment: alignment of the allocation
769 * @color: opaque tag value to use for the allocation
770 * @start: start of the allowed range for the allocation
771 * @end: end of the allowed range for the allocation
772 * @mode: fine-tune the allocation search and placement
774 * This simply sets up the scanning routines with the parameters for the desired
778 * As long as the scan list is non-empty, no other operations than
779 * adding/removing nodes to/from the scan list are allowed.
781 void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
788 enum drm_mm_insert_mode mode)
790 DRM_MM_BUG_ON(start >= end);
791 DRM_MM_BUG_ON(!size || size > end - start);
792 DRM_MM_BUG_ON(mm->scan_active);
800 scan->alignment = alignment;
801 scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
805 DRM_MM_BUG_ON(end <= start);
806 scan->range_start = start;
807 scan->range_end = end;
809 scan->hit_start = U64_MAX;
812 EXPORT_SYMBOL(drm_mm_scan_init_with_range);
815 * drm_mm_scan_add_block - add a node to the scan list
816 * @scan: the active drm_mm scanner
817 * @node: drm_mm_node to add
819 * Add a node to the scan list that might be freed to make space for the desired
823 * True if a hole has been found, false otherwise.
825 bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
826 struct drm_mm_node *node)
828 struct drm_mm *mm = scan->mm;
829 struct drm_mm_node *hole;
830 u64 hole_start, hole_end;
831 u64 col_start, col_end;
832 u64 adj_start, adj_end;
834 DRM_MM_BUG_ON(node->mm != mm);
835 DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
836 DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
837 __set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
840 /* Remove this block from the node_list so that we enlarge the hole
841 * (distance between the end of our previous node and the start of
842 * or next), without poisoning the link so that we can restore it
843 * later in drm_mm_scan_remove_block().
845 hole = list_prev_entry(node, node_list);
846 DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node);
847 __list_del_entry(&node->node_list);
849 hole_start = __drm_mm_hole_node_start(hole);
850 hole_end = __drm_mm_hole_node_end(hole);
852 col_start = hole_start;
854 if (mm->color_adjust)
855 mm->color_adjust(hole, scan->color, &col_start, &col_end);
857 adj_start = max(col_start, scan->range_start);
858 adj_end = min(col_end, scan->range_end);
859 if (adj_end <= adj_start || adj_end - adj_start < scan->size)
862 if (scan->mode == DRM_MM_INSERT_HIGH)
863 adj_start = adj_end - scan->size;
865 if (scan->alignment) {
868 if (likely(scan->remainder_mask))
869 rem = adj_start & scan->remainder_mask;
871 div64_u64_rem(adj_start, scan->alignment, &rem);
874 if (scan->mode != DRM_MM_INSERT_HIGH)
875 adj_start += scan->alignment;
876 if (adj_start < max(col_start, scan->range_start) ||
877 min(col_end, scan->range_end) - adj_start < scan->size)
880 if (adj_end <= adj_start ||
881 adj_end - adj_start < scan->size)
886 scan->hit_start = adj_start;
887 scan->hit_end = adj_start + scan->size;
889 DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
890 DRM_MM_BUG_ON(scan->hit_start < hole_start);
891 DRM_MM_BUG_ON(scan->hit_end > hole_end);
895 EXPORT_SYMBOL(drm_mm_scan_add_block);
898 * drm_mm_scan_remove_block - remove a node from the scan list
899 * @scan: the active drm_mm scanner
900 * @node: drm_mm_node to remove
902 * Nodes **must** be removed in exactly the reverse order from the scan list as
903 * they have been added (e.g. using list_add() as they are added and then
904 * list_for_each() over that eviction list to remove), otherwise the internal
905 * state of the memory manager will be corrupted.
907 * When the scan list is empty, the selected memory nodes can be freed. An
908 * immediately following drm_mm_insert_node_in_range_generic() or one of the
909 * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return
910 * the just freed block (because it's at the top of the free_stack list).
913 * True if this block should be evicted, false otherwise. Will always
914 * return false when no hole has been found.
916 bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
917 struct drm_mm_node *node)
919 struct drm_mm_node *prev_node;
921 DRM_MM_BUG_ON(node->mm != scan->mm);
922 DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node));
923 __clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
925 DRM_MM_BUG_ON(!node->mm->scan_active);
926 node->mm->scan_active--;
928 /* During drm_mm_scan_add_block() we decoupled this node leaving
929 * its pointers intact. Now that the caller is walking back along
930 * the eviction list we can restore this block into its rightful
931 * place on the full node_list. To confirm that the caller is walking
932 * backwards correctly we check that prev_node->next == node->next,
933 * i.e. both believe the same node should be on the other side of the
936 prev_node = list_prev_entry(node, node_list);
937 DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) !=
938 list_next_entry(node, node_list));
939 list_add(&node->node_list, &prev_node->node_list);
941 return (node->start + node->size > scan->hit_start &&
942 node->start < scan->hit_end);
944 EXPORT_SYMBOL(drm_mm_scan_remove_block);
947 * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole
948 * @scan: drm_mm scan with target hole
950 * After completing an eviction scan and removing the selected nodes, we may
951 * need to remove a few more nodes from either side of the target hole if
952 * mm.color_adjust is being used.
955 * A node to evict, or NULL if there are no overlapping nodes.
957 struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
959 struct drm_mm *mm = scan->mm;
960 struct drm_mm_node *hole;
961 u64 hole_start, hole_end;
963 DRM_MM_BUG_ON(list_empty(&mm->hole_stack));
965 if (!mm->color_adjust)
969 * The hole found during scanning should ideally be the first element
970 * in the hole_stack list, but due to side-effects in the driver it
973 list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
974 hole_start = __drm_mm_hole_node_start(hole);
975 hole_end = hole_start + hole->hole_size;
977 if (hole_start <= scan->hit_start &&
978 hole_end >= scan->hit_end)
982 /* We should only be called after we found the hole previously */
983 DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
984 if (unlikely(&hole->hole_stack == &mm->hole_stack))
987 DRM_MM_BUG_ON(hole_start > scan->hit_start);
988 DRM_MM_BUG_ON(hole_end < scan->hit_end);
990 mm->color_adjust(hole, scan->color, &hole_start, &hole_end);
991 if (hole_start > scan->hit_start)
993 if (hole_end < scan->hit_end)
994 return list_next_entry(hole, node_list);
998 EXPORT_SYMBOL(drm_mm_scan_color_evict);
1001 * drm_mm_init - initialize a drm-mm allocator
1002 * @mm: the drm_mm structure to initialize
1003 * @start: start of the range managed by @mm
1004 * @size: end of the range managed by @mm
1006 * Note that @mm must be cleared to 0 before calling this function.
1008 void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
1010 DRM_MM_BUG_ON(start + size <= start);
1012 mm->color_adjust = NULL;
1014 INIT_LIST_HEAD(&mm->hole_stack);
1015 mm->interval_tree = RB_ROOT_CACHED;
1016 mm->holes_size = RB_ROOT_CACHED;
1017 mm->holes_addr = RB_ROOT;
1019 /* Clever trick to avoid a special case in the free hole tracking. */
1020 INIT_LIST_HEAD(&mm->head_node.node_list);
1021 mm->head_node.flags = 0;
1022 mm->head_node.mm = mm;
1023 mm->head_node.start = start + size;
1024 mm->head_node.size = -size;
1025 add_hole(&mm->head_node);
1027 mm->scan_active = 0;
1029 EXPORT_SYMBOL(drm_mm_init);
1032 * drm_mm_takedown - clean up a drm_mm allocator
1033 * @mm: drm_mm allocator to clean up
1035 * Note that it is a bug to call this function on an allocator which is not
1038 void drm_mm_takedown(struct drm_mm *mm)
1040 if (WARN(!drm_mm_clean(mm),
1041 "Memory manager not clean during takedown.\n"))
1044 EXPORT_SYMBOL(drm_mm_takedown);
1046 static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
1050 size = entry->hole_size;
1052 start = drm_mm_hole_node_start(entry);
1053 drm_printf(p, "%#018llx-%#018llx: %llu: free\n",
1054 start, start + size, size);
1060 * drm_mm_print - print allocator state
1061 * @mm: drm_mm allocator to print
1062 * @p: DRM printer to use
1064 void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
1066 const struct drm_mm_node *entry;
1067 u64 total_used = 0, total_free = 0, total = 0;
1069 total_free += drm_mm_dump_hole(p, &mm->head_node);
1071 drm_mm_for_each_node(entry, mm) {
1072 drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start,
1073 entry->start + entry->size, entry->size);
1074 total_used += entry->size;
1075 total_free += drm_mm_dump_hole(p, entry);
1077 total = total_free + total_used;
1079 drm_printf(p, "total: %llu, used %llu free %llu\n", total,
1080 total_used, total_free);
1082 EXPORT_SYMBOL(drm_mm_print);