1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include <linux/prime_numbers.h>
8 #include "../i915_selftest.h"
9 #include "i915_random.h"
11 #define SZ_8G (1ULL << 33)
13 static void __igt_dump_block(struct i915_buddy_mm *mm,
14 struct i915_buddy_block *block,
17 pr_err("block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%s buddy=%s\n",
19 i915_buddy_block_state(block),
20 i915_buddy_block_order(block),
21 i915_buddy_block_offset(block),
22 i915_buddy_block_size(mm, block),
23 yesno(!block->parent),
27 static void igt_dump_block(struct i915_buddy_mm *mm,
28 struct i915_buddy_block *block)
30 struct i915_buddy_block *buddy;
32 __igt_dump_block(mm, block, false);
34 buddy = get_buddy(block);
36 __igt_dump_block(mm, buddy, true);
39 static int igt_check_block(struct i915_buddy_mm *mm,
40 struct i915_buddy_block *block)
42 struct i915_buddy_block *buddy;
43 unsigned int block_state;
48 block_state = i915_buddy_block_state(block);
50 if (block_state != I915_BUDDY_ALLOCATED &&
51 block_state != I915_BUDDY_FREE &&
52 block_state != I915_BUDDY_SPLIT) {
53 pr_err("block state mismatch\n");
57 block_size = i915_buddy_block_size(mm, block);
58 offset = i915_buddy_block_offset(block);
60 if (block_size < mm->chunk_size) {
61 pr_err("block size smaller than min size\n");
65 if (!is_power_of_2(block_size)) {
66 pr_err("block size not power of two\n");
70 if (!IS_ALIGNED(block_size, mm->chunk_size)) {
71 pr_err("block size not aligned to min size\n");
75 if (!IS_ALIGNED(offset, mm->chunk_size)) {
76 pr_err("block offset not aligned to min size\n");
80 if (!IS_ALIGNED(offset, block_size)) {
81 pr_err("block offset not aligned to block size\n");
85 buddy = get_buddy(block);
87 if (!buddy && block->parent) {
88 pr_err("buddy has gone fishing\n");
93 if (i915_buddy_block_offset(buddy) != (offset ^ block_size)) {
94 pr_err("buddy has wrong offset\n");
98 if (i915_buddy_block_size(mm, buddy) != block_size) {
99 pr_err("buddy size mismatch\n");
103 if (i915_buddy_block_state(buddy) == block_state &&
104 block_state == I915_BUDDY_FREE) {
105 pr_err("block and its buddy are free\n");
113 static int igt_check_blocks(struct i915_buddy_mm *mm,
114 struct list_head *blocks,
118 struct i915_buddy_block *block;
119 struct i915_buddy_block *prev;
127 list_for_each_entry(block, blocks, link) {
128 err = igt_check_block(mm, block);
130 if (!i915_buddy_block_is_allocated(block)) {
131 pr_err("block not allocated\n"),
135 if (is_contiguous && prev) {
140 prev_offset = i915_buddy_block_offset(prev);
141 prev_block_size = i915_buddy_block_size(mm, prev);
142 offset = i915_buddy_block_offset(block);
144 if (offset != (prev_offset + prev_block_size)) {
145 pr_err("block offset mismatch\n");
153 total += i915_buddy_block_size(mm, block);
158 if (total != expected_size) {
159 pr_err("size mismatch, expected=%llx, found=%llx\n",
160 expected_size, total);
167 pr_err("prev block, dump:\n");
168 igt_dump_block(mm, prev);
172 pr_err("bad block, dump:\n");
173 igt_dump_block(mm, block);
179 static int igt_check_mm(struct i915_buddy_mm *mm)
181 struct i915_buddy_block *root;
182 struct i915_buddy_block *prev;
188 pr_err("n_roots is zero\n");
192 if (mm->n_roots != hweight64(mm->size)) {
193 pr_err("n_roots mismatch, n_roots=%u, expected=%lu\n",
194 mm->n_roots, hweight64(mm->size));
202 for (i = 0; i < mm->n_roots; ++i) {
203 struct i915_buddy_block *block;
208 pr_err("root(%u) is NULL\n", i);
213 err = igt_check_block(mm, root);
215 if (!i915_buddy_block_is_free(root)) {
216 pr_err("root not free\n");
220 order = i915_buddy_block_order(root);
223 if (order != mm->max_order) {
224 pr_err("max order root missing\n");
234 prev_offset = i915_buddy_block_offset(prev);
235 prev_block_size = i915_buddy_block_size(mm, prev);
236 offset = i915_buddy_block_offset(root);
238 if (offset != (prev_offset + prev_block_size)) {
239 pr_err("root offset mismatch\n");
244 block = list_first_entry_or_null(&mm->free_list[order],
245 struct i915_buddy_block,
248 pr_err("root mismatch at order=%u\n", order);
256 total += i915_buddy_block_size(mm, root);
260 if (total != mm->size) {
261 pr_err("expected mm size=%llx, found=%llx\n", mm->size,
269 pr_err("prev root(%u), dump:\n", i - 1);
270 igt_dump_block(mm, prev);
274 pr_err("bad root(%u), dump:\n", i);
275 igt_dump_block(mm, root);
281 static void igt_mm_config(u64 *size, u64 *chunk_size)
283 I915_RND_STATE(prng);
286 /* Nothing fancy, just try to get an interesting bit pattern */
288 prandom_seed_state(&prng, i915_selftest.random_seed);
290 s = i915_prandom_u64_state(&prng) & (SZ_8G - 1);
291 ms = BIT_ULL(12 + (prandom_u32_state(&prng) % ilog2(s >> 12)));
292 s = max(s & -ms, ms);
298 static int igt_buddy_alloc_smoke(void *arg)
300 struct i915_buddy_mm mm;
301 IGT_TIMEOUT(end_time);
302 I915_RND_STATE(prng);
308 igt_mm_config(&mm_size, &chunk_size);
310 pr_info("buddy_init with size=%llx, chunk_size=%llx\n", mm_size, chunk_size);
312 err = i915_buddy_init(&mm, mm_size, chunk_size);
314 pr_err("buddy_init failed(%d)\n", err);
318 order = i915_random_order(mm.max_order + 1, &prng);
322 for (i = 0; i <= mm.max_order; ++i) {
323 struct i915_buddy_block *block;
324 int max_order = order[i];
325 bool timeout = false;
330 err = igt_check_mm(&mm);
332 pr_err("pre-mm check failed, abort\n");
336 pr_info("filling from max_order=%u\n", max_order);
343 block = i915_buddy_alloc(&mm, order);
345 err = PTR_ERR(block);
346 if (err == -ENOMEM) {
347 pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
355 pr_err("buddy_alloc with order=%d failed(%d)\n",
362 list_add_tail(&block->link, &blocks);
364 if (i915_buddy_block_order(block) != order) {
365 pr_err("buddy_alloc order mismatch\n");
370 total += i915_buddy_block_size(&mm, block);
372 if (__igt_timeout(end_time, NULL)) {
376 } while (total < mm.size);
379 err = igt_check_blocks(&mm, &blocks, total, false);
381 i915_buddy_free_list(&mm, &blocks);
384 err = igt_check_mm(&mm);
386 pr_err("post-mm check failed\n");
400 i915_buddy_fini(&mm);
405 static int igt_buddy_alloc_pessimistic(void *arg)
407 const unsigned int max_order = 16;
408 struct i915_buddy_block *block, *bn;
409 struct i915_buddy_mm mm;
415 * Create a pot-sized mm, then allocate one of each possible
416 * order within. This should leave the mm with exactly one
420 err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE);
422 pr_err("buddy_init failed(%d)\n", err);
425 GEM_BUG_ON(mm.max_order != max_order);
427 for (order = 0; order < max_order; order++) {
428 block = i915_buddy_alloc(&mm, order);
430 pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
432 err = PTR_ERR(block);
436 list_add_tail(&block->link, &blocks);
439 /* And now the last remaining block available */
440 block = i915_buddy_alloc(&mm, 0);
442 pr_info("buddy_alloc hit -ENOMEM on final alloc\n");
443 err = PTR_ERR(block);
446 list_add_tail(&block->link, &blocks);
448 /* Should be completely full! */
449 for (order = max_order; order--; ) {
450 block = i915_buddy_alloc(&mm, order);
451 if (!IS_ERR(block)) {
452 pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
454 list_add_tail(&block->link, &blocks);
460 block = list_last_entry(&blocks, typeof(*block), link);
461 list_del(&block->link);
462 i915_buddy_free(&mm, block);
464 /* As we free in increasing size, we make available larger blocks */
466 list_for_each_entry_safe(block, bn, &blocks, link) {
467 list_del(&block->link);
468 i915_buddy_free(&mm, block);
470 block = i915_buddy_alloc(&mm, order);
472 pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
474 err = PTR_ERR(block);
477 i915_buddy_free(&mm, block);
481 /* To confirm, now the whole mm should be available */
482 block = i915_buddy_alloc(&mm, max_order);
484 pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
486 err = PTR_ERR(block);
489 i915_buddy_free(&mm, block);
492 i915_buddy_free_list(&mm, &blocks);
493 i915_buddy_fini(&mm);
497 static int igt_buddy_alloc_optimistic(void *arg)
499 const int max_order = 16;
500 struct i915_buddy_block *block;
501 struct i915_buddy_mm mm;
507 * Create a mm with one block of each order available, and
508 * try to allocate them all.
511 err = i915_buddy_init(&mm,
512 PAGE_SIZE * ((1 << (max_order + 1)) - 1),
515 pr_err("buddy_init failed(%d)\n", err);
518 GEM_BUG_ON(mm.max_order != max_order);
520 for (order = 0; order <= max_order; order++) {
521 block = i915_buddy_alloc(&mm, order);
523 pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
525 err = PTR_ERR(block);
529 list_add_tail(&block->link, &blocks);
532 /* Should be completely full! */
533 block = i915_buddy_alloc(&mm, 0);
534 if (!IS_ERR(block)) {
535 pr_info("buddy_alloc unexpectedly succeeded, it should be full!");
536 list_add_tail(&block->link, &blocks);
542 i915_buddy_free_list(&mm, &blocks);
543 i915_buddy_fini(&mm);
547 static int igt_buddy_alloc_pathological(void *arg)
549 const int max_order = 16;
550 struct i915_buddy_block *block;
551 struct i915_buddy_mm mm;
558 * Create a pot-sized mm, then allocate one of each possible
559 * order within. This should leave the mm with exactly one
560 * page left. Free the largest block, then whittle down again.
561 * Eventually we will have a fully 50% fragmented mm.
564 err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE);
566 pr_err("buddy_init failed(%d)\n", err);
569 GEM_BUG_ON(mm.max_order != max_order);
571 for (top = max_order; top; top--) {
572 /* Make room by freeing the largest allocated block */
573 block = list_first_entry_or_null(&blocks, typeof(*block), link);
575 list_del(&block->link);
576 i915_buddy_free(&mm, block);
579 for (order = top; order--; ) {
580 block = i915_buddy_alloc(&mm, order);
582 pr_info("buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
584 err = PTR_ERR(block);
587 list_add_tail(&block->link, &blocks);
590 /* There should be one final page for this sub-allocation */
591 block = i915_buddy_alloc(&mm, 0);
593 pr_info("buddy_alloc hit -ENOMEM for hole\n");
594 err = PTR_ERR(block);
597 list_add_tail(&block->link, &holes);
599 block = i915_buddy_alloc(&mm, top);
600 if (!IS_ERR(block)) {
601 pr_info("buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
603 list_add_tail(&block->link, &blocks);
609 i915_buddy_free_list(&mm, &holes);
611 /* Nothing larger than blocks of chunk_size now available */
612 for (order = 1; order <= max_order; order++) {
613 block = i915_buddy_alloc(&mm, order);
614 if (!IS_ERR(block)) {
615 pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
617 list_add_tail(&block->link, &blocks);
624 list_splice_tail(&holes, &blocks);
625 i915_buddy_free_list(&mm, &blocks);
626 i915_buddy_fini(&mm);
630 static int igt_buddy_alloc_range(void *arg)
632 struct i915_buddy_mm mm;
633 unsigned long page_num;
641 igt_mm_config(&size, &chunk_size);
643 pr_info("buddy_init with size=%llx, chunk_size=%llx\n", size, chunk_size);
645 err = i915_buddy_init(&mm, size, chunk_size);
647 pr_err("buddy_init failed(%d)\n", err);
651 err = igt_check_mm(&mm);
653 pr_err("pre-mm check failed, abort, abort, abort!\n");
660 for_each_prime_number_from(page_num, 1, ULONG_MAX - 1) {
661 struct i915_buddy_block *block;
664 size = min(page_num * mm.chunk_size, rem);
666 err = i915_buddy_alloc_range(&mm, &tmp, offset, size);
668 if (err == -ENOMEM) {
669 pr_info("alloc_range hit -ENOMEM with size=%llx\n",
672 pr_err("alloc_range with offset=%llx, size=%llx failed(%d)\n",
679 block = list_first_entry_or_null(&tmp,
680 struct i915_buddy_block,
683 pr_err("alloc_range has no blocks\n");
688 if (i915_buddy_block_offset(block) != offset) {
689 pr_err("alloc_range start offset mismatch, found=%llx, expected=%llx\n",
690 i915_buddy_block_offset(block), offset);
695 err = igt_check_blocks(&mm, &tmp, size, true);
697 list_splice_tail(&tmp, &blocks);
714 i915_buddy_free_list(&mm, &blocks);
717 err = igt_check_mm(&mm);
719 pr_err("post-mm check failed\n");
723 i915_buddy_fini(&mm);
728 int i915_buddy_mock_selftests(void)
730 static const struct i915_subtest tests[] = {
731 SUBTEST(igt_buddy_alloc_pessimistic),
732 SUBTEST(igt_buddy_alloc_optimistic),
733 SUBTEST(igt_buddy_alloc_pathological),
734 SUBTEST(igt_buddy_alloc_smoke),
735 SUBTEST(igt_buddy_alloc_range),
738 return i915_subtests(tests, NULL);