1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include <linux/kmemleak.h>
7 #include <linux/slab.h>
9 #include "i915_buddy.h"
12 #include "i915_globals.h"
13 #include "i915_utils.h"
15 static struct i915_global_block {
16 struct i915_global base;
17 struct kmem_cache *slab_blocks;
20 static void i915_global_buddy_shrink(void)
22 kmem_cache_shrink(global.slab_blocks);
25 static void i915_global_buddy_exit(void)
27 kmem_cache_destroy(global.slab_blocks);
30 static struct i915_global_block global = { {
31 .shrink = i915_global_buddy_shrink,
32 .exit = i915_global_buddy_exit,
35 int __init i915_global_buddy_init(void)
37 global.slab_blocks = KMEM_CACHE(i915_buddy_block, SLAB_HWCACHE_ALIGN);
38 if (!global.slab_blocks)
44 static struct i915_buddy_block *i915_block_alloc(struct i915_buddy_block *parent,
48 struct i915_buddy_block *block;
50 block = kmem_cache_zalloc(global.slab_blocks, GFP_KERNEL);
54 block->header = offset;
55 block->header |= order;
56 block->parent = parent;
61 static void i915_block_free(struct i915_buddy_block *block)
63 kmem_cache_free(global.slab_blocks, block);
66 static void mark_allocated(struct i915_buddy_block *block)
68 block->header &= ~I915_BUDDY_HEADER_STATE;
69 block->header |= I915_BUDDY_ALLOCATED;
71 list_del(&block->link);
74 static void mark_free(struct i915_buddy_mm *mm,
75 struct i915_buddy_block *block)
77 block->header &= ~I915_BUDDY_HEADER_STATE;
78 block->header |= I915_BUDDY_FREE;
80 list_add(&block->link,
81 &mm->free_list[i915_buddy_block_order(block)]);
84 static void mark_split(struct i915_buddy_block *block)
86 block->header &= ~I915_BUDDY_HEADER_STATE;
87 block->header |= I915_BUDDY_SPLIT;
89 list_del(&block->link);
92 int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size)
97 if (size < chunk_size)
100 if (chunk_size < PAGE_SIZE)
103 if (!is_power_of_2(chunk_size))
106 size = round_down(size, chunk_size);
109 mm->chunk_size = chunk_size;
110 mm->max_order = ilog2(size) - ilog2(chunk_size);
112 GEM_BUG_ON(mm->max_order > I915_BUDDY_MAX_ORDER);
114 mm->free_list = kmalloc_array(mm->max_order + 1,
115 sizeof(struct list_head),
120 for (i = 0; i <= mm->max_order; ++i)
121 INIT_LIST_HEAD(&mm->free_list[i]);
123 mm->n_roots = hweight64(size);
125 mm->roots = kmalloc_array(mm->n_roots,
126 sizeof(struct i915_buddy_block *),
135 * Split into power-of-two blocks, in case we are given a size that is
136 * not itself a power-of-two.
139 struct i915_buddy_block *root;
143 root_size = rounddown_pow_of_two(size);
144 order = ilog2(root_size) - ilog2(chunk_size);
146 root = i915_block_alloc(NULL, order, offset);
152 GEM_BUG_ON(i > mm->max_order);
153 GEM_BUG_ON(i915_buddy_block_size(mm, root) < chunk_size);
166 i915_block_free(mm->roots[i]);
169 kfree(mm->free_list);
173 void i915_buddy_fini(struct i915_buddy_mm *mm)
177 for (i = 0; i < mm->n_roots; ++i) {
178 GEM_WARN_ON(!i915_buddy_block_is_free(mm->roots[i]));
179 i915_block_free(mm->roots[i]);
183 kfree(mm->free_list);
186 static int split_block(struct i915_buddy_mm *mm,
187 struct i915_buddy_block *block)
189 unsigned int block_order = i915_buddy_block_order(block) - 1;
190 u64 offset = i915_buddy_block_offset(block);
192 GEM_BUG_ON(!i915_buddy_block_is_free(block));
193 GEM_BUG_ON(!i915_buddy_block_order(block));
195 block->left = i915_block_alloc(block, block_order, offset);
199 block->right = i915_block_alloc(block, block_order,
200 offset + (mm->chunk_size << block_order));
202 i915_block_free(block->left);
206 mark_free(mm, block->left);
207 mark_free(mm, block->right);
214 static struct i915_buddy_block *
215 get_buddy(struct i915_buddy_block *block)
217 struct i915_buddy_block *parent;
219 parent = block->parent;
223 if (parent->left == block)
224 return parent->right;
229 static void __i915_buddy_free(struct i915_buddy_mm *mm,
230 struct i915_buddy_block *block)
232 struct i915_buddy_block *parent;
234 while ((parent = block->parent)) {
235 struct i915_buddy_block *buddy;
237 buddy = get_buddy(block);
239 if (!i915_buddy_block_is_free(buddy))
242 list_del(&buddy->link);
244 i915_block_free(block);
245 i915_block_free(buddy);
250 mark_free(mm, block);
253 void i915_buddy_free(struct i915_buddy_mm *mm,
254 struct i915_buddy_block *block)
256 GEM_BUG_ON(!i915_buddy_block_is_allocated(block));
257 __i915_buddy_free(mm, block);
260 void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects)
262 struct i915_buddy_block *block, *on;
264 list_for_each_entry_safe(block, on, objects, link)
265 i915_buddy_free(mm, block);
266 INIT_LIST_HEAD(objects);
270 * Allocate power-of-two block. The order value here translates to:
272 * 0 = 2^0 * mm->chunk_size
273 * 1 = 2^1 * mm->chunk_size
274 * 2 = 2^2 * mm->chunk_size
277 struct i915_buddy_block *
278 i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order)
280 struct i915_buddy_block *block = NULL;
284 for (i = order; i <= mm->max_order; ++i) {
285 block = list_first_entry_or_null(&mm->free_list[i],
286 struct i915_buddy_block,
293 return ERR_PTR(-ENOSPC);
295 GEM_BUG_ON(!i915_buddy_block_is_free(block));
298 err = split_block(mm, block);
307 mark_allocated(block);
308 kmemleak_update_trace(block);
312 __i915_buddy_free(mm, block);
316 static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
318 return s1 <= e2 && e1 >= s2;
321 static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
323 return s1 <= s2 && e1 >= e2;
327 * Allocate range. Note that it's safe to chain together multiple alloc_ranges
328 * with the same blocks list.
330 * Intended for pre-allocating portions of the address space, for example to
331 * reserve a block for the initial framebuffer or similar, hence the expectation
332 * here is that i915_buddy_alloc() is still the main vehicle for
333 * allocations, so if that's not the case then the drm_mm range allocator is
334 * probably a much better fit, and so you should probably go use that instead.
336 int i915_buddy_alloc_range(struct i915_buddy_mm *mm,
337 struct list_head *blocks,
340 struct i915_buddy_block *block;
341 struct i915_buddy_block *buddy;
342 LIST_HEAD(allocated);
348 if (size < mm->chunk_size)
351 if (!IS_ALIGNED(size | start, mm->chunk_size))
354 if (range_overflows(start, size, mm->size))
357 for (i = 0; i < mm->n_roots; ++i)
358 list_add_tail(&mm->roots[i]->tmp_link, &dfs);
360 end = start + size - 1;
366 block = list_first_entry_or_null(&dfs,
367 struct i915_buddy_block,
372 list_del(&block->tmp_link);
374 block_start = i915_buddy_block_offset(block);
375 block_end = block_start + i915_buddy_block_size(mm, block) - 1;
377 if (!overlaps(start, end, block_start, block_end))
380 if (i915_buddy_block_is_allocated(block)) {
385 if (contains(start, end, block_start, block_end)) {
386 if (!i915_buddy_block_is_free(block)) {
391 mark_allocated(block);
392 list_add_tail(&block->link, &allocated);
396 if (!i915_buddy_block_is_split(block)) {
397 err = split_block(mm, block);
402 list_add(&block->right->tmp_link, &dfs);
403 list_add(&block->left->tmp_link, &dfs);
406 list_splice_tail(&allocated, blocks);
411 * We really don't want to leave around a bunch of split blocks, since
412 * bigger is better, so make sure we merge everything back before we
413 * free the allocated blocks.
415 buddy = get_buddy(block);
417 (i915_buddy_block_is_free(block) &&
418 i915_buddy_block_is_free(buddy)))
419 __i915_buddy_free(mm, block);
422 i915_buddy_free_list(mm, &allocated);
426 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
427 #include "selftests/i915_buddy.c"