2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "../i915_selftest.h"
27 #include <linux/prime_numbers.h>
30 #include "i915_random.h"
32 static const unsigned int page_sizes[] = {
33 I915_GTT_PAGE_SIZE_2M,
34 I915_GTT_PAGE_SIZE_64K,
35 I915_GTT_PAGE_SIZE_4K,
38 static unsigned int get_largest_page_size(struct drm_i915_private *i915,
43 for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
44 unsigned int page_size = page_sizes[i];
46 if (HAS_PAGE_SIZES(i915, page_size) && rem >= page_size)
53 static void huge_pages_free_pages(struct sg_table *st)
55 struct scatterlist *sg;
57 for (sg = st->sgl; sg; sg = __sg_next(sg)) {
59 __free_pages(sg_page(sg), get_order(sg->length));
66 static int get_huge_pages(struct drm_i915_gem_object *obj)
68 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
69 unsigned int page_mask = obj->mm.page_mask;
71 struct scatterlist *sg;
72 unsigned int sg_page_sizes;
75 st = kmalloc(sizeof(*st), GFP);
79 if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
90 * Our goal here is simple, we want to greedily fill the object from
91 * largest to smallest page-size, while ensuring that we use *every*
92 * page-size as per the given page-mask.
95 unsigned int bit = ilog2(page_mask);
96 unsigned int page_size = BIT(bit);
97 int order = get_order(page_size);
102 GEM_BUG_ON(order >= MAX_ORDER);
103 page = alloc_pages(GFP | __GFP_ZERO, order);
107 sg_set_page(sg, page, page_size, 0);
108 sg_page_sizes |= page_size;
118 } while ((rem - ((page_size-1) & page_mask)) >= page_size);
120 page_mask &= (page_size-1);
123 if (i915_gem_gtt_prepare_pages(obj, st))
126 obj->mm.madv = I915_MADV_DONTNEED;
128 GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
129 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
134 sg_set_page(sg, NULL, 0, 0);
136 huge_pages_free_pages(st);
141 static void put_huge_pages(struct drm_i915_gem_object *obj,
142 struct sg_table *pages)
144 i915_gem_gtt_finish_pages(obj, pages);
145 huge_pages_free_pages(pages);
147 obj->mm.dirty = false;
148 obj->mm.madv = I915_MADV_WILLNEED;
151 static const struct drm_i915_gem_object_ops huge_page_ops = {
152 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
153 I915_GEM_OBJECT_IS_SHRINKABLE,
154 .get_pages = get_huge_pages,
155 .put_pages = put_huge_pages,
158 static struct drm_i915_gem_object *
159 huge_pages_object(struct drm_i915_private *i915,
161 unsigned int page_mask)
163 struct drm_i915_gem_object *obj;
166 GEM_BUG_ON(!IS_ALIGNED(size, BIT(__ffs(page_mask))));
168 if (size >> PAGE_SHIFT > INT_MAX)
169 return ERR_PTR(-E2BIG);
171 if (overflows_type(size, obj->base.size))
172 return ERR_PTR(-E2BIG);
174 obj = i915_gem_object_alloc();
176 return ERR_PTR(-ENOMEM);
178 drm_gem_private_object_init(&i915->drm, &obj->base, size);
179 i915_gem_object_init(obj, &huge_page_ops);
181 obj->write_domain = I915_GEM_DOMAIN_CPU;
182 obj->read_domains = I915_GEM_DOMAIN_CPU;
183 obj->cache_level = I915_CACHE_NONE;
185 obj->mm.page_mask = page_mask;
190 static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
192 struct drm_i915_private *i915 = to_i915(obj->base.dev);
193 const u64 max_len = rounddown_pow_of_two(UINT_MAX);
195 struct scatterlist *sg;
196 unsigned int sg_page_sizes;
199 st = kmalloc(sizeof(*st), GFP);
203 if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
208 /* Use optimal page sized chunks to fill in the sg table */
209 rem = obj->base.size;
214 unsigned int page_size = get_largest_page_size(i915, rem);
215 unsigned int len = min(page_size * div_u64(rem, page_size),
218 GEM_BUG_ON(!page_size);
222 sg_dma_len(sg) = len;
223 sg_dma_address(sg) = page_size;
225 sg_page_sizes |= len;
240 obj->mm.madv = I915_MADV_DONTNEED;
242 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
247 static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
249 struct drm_i915_private *i915 = to_i915(obj->base.dev);
251 struct scatterlist *sg;
252 unsigned int page_size;
254 st = kmalloc(sizeof(*st), GFP);
258 if (sg_alloc_table(st, 1, GFP)) {
266 page_size = get_largest_page_size(i915, obj->base.size);
267 GEM_BUG_ON(!page_size);
270 sg->length = obj->base.size;
271 sg_dma_len(sg) = obj->base.size;
272 sg_dma_address(sg) = page_size;
274 obj->mm.madv = I915_MADV_DONTNEED;
276 __i915_gem_object_set_pages(obj, st, sg->length);
282 static void fake_free_huge_pages(struct drm_i915_gem_object *obj,
283 struct sg_table *pages)
285 sg_free_table(pages);
289 static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
290 struct sg_table *pages)
292 fake_free_huge_pages(obj, pages);
293 obj->mm.dirty = false;
294 obj->mm.madv = I915_MADV_WILLNEED;
297 static const struct drm_i915_gem_object_ops fake_ops = {
298 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
299 .get_pages = fake_get_huge_pages,
300 .put_pages = fake_put_huge_pages,
303 static const struct drm_i915_gem_object_ops fake_ops_single = {
304 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
305 .get_pages = fake_get_huge_pages_single,
306 .put_pages = fake_put_huge_pages,
309 static struct drm_i915_gem_object *
310 fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
312 struct drm_i915_gem_object *obj;
315 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
317 if (size >> PAGE_SHIFT > UINT_MAX)
318 return ERR_PTR(-E2BIG);
320 if (overflows_type(size, obj->base.size))
321 return ERR_PTR(-E2BIG);
323 obj = i915_gem_object_alloc();
325 return ERR_PTR(-ENOMEM);
327 drm_gem_private_object_init(&i915->drm, &obj->base, size);
330 i915_gem_object_init(obj, &fake_ops_single);
332 i915_gem_object_init(obj, &fake_ops);
334 obj->write_domain = I915_GEM_DOMAIN_CPU;
335 obj->read_domains = I915_GEM_DOMAIN_CPU;
336 obj->cache_level = I915_CACHE_NONE;
341 static int igt_check_page_sizes(struct i915_vma *vma)
343 struct drm_i915_private *i915 = vma->vm->i915;
344 unsigned int supported = INTEL_INFO(i915)->page_sizes;
345 struct drm_i915_gem_object *obj = vma->obj;
348 if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) {
349 pr_err("unsupported page_sizes.sg=%u, supported=%u\n",
350 vma->page_sizes.sg & ~supported, supported);
354 if (!HAS_PAGE_SIZES(i915, vma->page_sizes.gtt)) {
355 pr_err("unsupported page_sizes.gtt=%u, supported=%u\n",
356 vma->page_sizes.gtt & ~supported, supported);
360 if (vma->page_sizes.phys != obj->mm.page_sizes.phys) {
361 pr_err("vma->page_sizes.phys(%u) != obj->mm.page_sizes.phys(%u)\n",
362 vma->page_sizes.phys, obj->mm.page_sizes.phys);
366 if (vma->page_sizes.sg != obj->mm.page_sizes.sg) {
367 pr_err("vma->page_sizes.sg(%u) != obj->mm.page_sizes.sg(%u)\n",
368 vma->page_sizes.sg, obj->mm.page_sizes.sg);
372 if (obj->mm.page_sizes.gtt) {
373 pr_err("obj->page_sizes.gtt(%u) should never be set\n",
374 obj->mm.page_sizes.gtt);
381 static int igt_mock_exhaust_device_supported_pages(void *arg)
383 struct i915_hw_ppgtt *ppgtt = arg;
384 struct drm_i915_private *i915 = ppgtt->vm.i915;
385 unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
386 struct drm_i915_gem_object *obj;
387 struct i915_vma *vma;
392 * Sanity check creating objects with every valid page support
393 * combination for our mock device.
396 for (i = 1; i < BIT(ARRAY_SIZE(page_sizes)); i++) {
397 unsigned int combination = 0;
399 for (j = 0; j < ARRAY_SIZE(page_sizes); j++) {
401 combination |= page_sizes[j];
404 mkwrite_device_info(i915)->page_sizes = combination;
406 for (single = 0; single <= 1; ++single) {
407 obj = fake_huge_pages_object(i915, combination, !!single);
413 if (obj->base.size != combination) {
414 pr_err("obj->base.size=%zu, expected=%u\n",
415 obj->base.size, combination);
420 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
426 err = i915_vma_pin(vma, 0, 0, PIN_USER);
430 err = igt_check_page_sizes(vma);
432 if (vma->page_sizes.sg != combination) {
433 pr_err("page_sizes.sg=%u, expected=%u\n",
434 vma->page_sizes.sg, combination);
441 i915_gem_object_put(obj);
453 i915_gem_object_put(obj);
455 mkwrite_device_info(i915)->page_sizes = saved_mask;
460 static int igt_mock_ppgtt_misaligned_dma(void *arg)
462 struct i915_hw_ppgtt *ppgtt = arg;
463 struct drm_i915_private *i915 = ppgtt->vm.i915;
464 unsigned long supported = INTEL_INFO(i915)->page_sizes;
465 struct drm_i915_gem_object *obj;
470 * Sanity check dma misalignment for huge pages -- the dma addresses we
471 * insert into the paging structures need to always respect the page
475 bit = ilog2(I915_GTT_PAGE_SIZE_64K);
477 for_each_set_bit_from(bit, &supported,
478 ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
479 IGT_TIMEOUT(end_time);
480 unsigned int page_size = BIT(bit);
481 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
484 round_up(page_size, I915_GTT_PAGE_SIZE_2M) << 1;
485 struct i915_vma *vma;
487 obj = fake_huge_pages_object(i915, size, true);
491 if (obj->base.size != size) {
492 pr_err("obj->base.size=%zu, expected=%u\n",
493 obj->base.size, size);
498 err = i915_gem_object_pin_pages(obj);
502 /* Force the page size for this object */
503 obj->mm.page_sizes.sg = page_size;
505 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
511 err = i915_vma_pin(vma, 0, 0, flags);
518 err = igt_check_page_sizes(vma);
520 if (vma->page_sizes.gtt != page_size) {
521 pr_err("page_sizes.gtt=%u, expected %u\n",
522 vma->page_sizes.gtt, page_size);
534 * Try all the other valid offsets until the next
535 * boundary -- should always fall back to using 4K
538 for (offset = 4096; offset < page_size; offset += 4096) {
539 err = i915_vma_unbind(vma);
545 err = i915_vma_pin(vma, 0, 0, flags | offset);
551 err = igt_check_page_sizes(vma);
553 if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) {
554 pr_err("page_sizes.gtt=%u, expected %llu\n",
555 vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K);
566 if (igt_timeout(end_time,
567 "%s timed out at offset %x with page-size %x\n",
568 __func__, offset, page_size))
574 i915_gem_object_unpin_pages(obj);
575 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
576 i915_gem_object_put(obj);
582 i915_gem_object_unpin_pages(obj);
584 i915_gem_object_put(obj);
589 static void close_object_list(struct list_head *objects,
590 struct i915_hw_ppgtt *ppgtt)
592 struct drm_i915_gem_object *obj, *on;
594 list_for_each_entry_safe(obj, on, objects, st_link) {
595 struct i915_vma *vma;
597 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
601 list_del(&obj->st_link);
602 i915_gem_object_unpin_pages(obj);
603 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
604 i915_gem_object_put(obj);
608 static int igt_mock_ppgtt_huge_fill(void *arg)
610 struct i915_hw_ppgtt *ppgtt = arg;
611 struct drm_i915_private *i915 = ppgtt->vm.i915;
612 unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT;
613 unsigned long page_num;
616 IGT_TIMEOUT(end_time);
619 for_each_prime_number_from(page_num, 1, max_pages) {
620 struct drm_i915_gem_object *obj;
621 u64 size = page_num << PAGE_SHIFT;
622 struct i915_vma *vma;
623 unsigned int expected_gtt = 0;
626 obj = fake_huge_pages_object(i915, size, single);
632 if (obj->base.size != size) {
633 pr_err("obj->base.size=%zd, expected=%llu\n",
634 obj->base.size, size);
635 i915_gem_object_put(obj);
640 err = i915_gem_object_pin_pages(obj);
642 i915_gem_object_put(obj);
646 list_add(&obj->st_link, &objects);
648 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
654 err = i915_vma_pin(vma, 0, 0, PIN_USER);
658 err = igt_check_page_sizes(vma);
665 * Figure out the expected gtt page size knowing that we go from
666 * largest to smallest page size sg chunks, and that we align to
667 * the largest page size.
669 for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
670 unsigned int page_size = page_sizes[i];
672 if (HAS_PAGE_SIZES(i915, page_size) &&
674 expected_gtt |= page_size;
679 GEM_BUG_ON(!expected_gtt);
682 if (expected_gtt & I915_GTT_PAGE_SIZE_4K)
683 expected_gtt &= ~I915_GTT_PAGE_SIZE_64K;
687 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
688 if (!IS_ALIGNED(vma->node.start,
689 I915_GTT_PAGE_SIZE_2M)) {
690 pr_err("node.start(%llx) not aligned to 2M\n",
696 if (!IS_ALIGNED(vma->node.size,
697 I915_GTT_PAGE_SIZE_2M)) {
698 pr_err("node.size(%llx) not aligned to 2M\n",
705 if (vma->page_sizes.gtt != expected_gtt) {
706 pr_err("gtt=%u, expected=%u, size=%zd, single=%s\n",
707 vma->page_sizes.gtt, expected_gtt,
708 obj->base.size, yesno(!!single));
713 if (igt_timeout(end_time,
714 "%s timed out at size %zd\n",
715 __func__, obj->base.size))
721 close_object_list(&objects, ppgtt);
723 if (err == -ENOMEM || err == -ENOSPC)
729 static int igt_mock_ppgtt_64K(void *arg)
731 struct i915_hw_ppgtt *ppgtt = arg;
732 struct drm_i915_private *i915 = ppgtt->vm.i915;
733 struct drm_i915_gem_object *obj;
734 const struct object_info {
739 /* Cases with forced padding/alignment */
742 .gtt = I915_GTT_PAGE_SIZE_64K,
746 .size = SZ_64K + SZ_4K,
747 .gtt = I915_GTT_PAGE_SIZE_4K,
751 .size = SZ_64K - SZ_4K,
752 .gtt = I915_GTT_PAGE_SIZE_4K,
757 .gtt = I915_GTT_PAGE_SIZE_64K,
761 .size = SZ_2M - SZ_4K,
762 .gtt = I915_GTT_PAGE_SIZE_4K,
766 .size = SZ_2M + SZ_4K,
767 .gtt = I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_4K,
771 .size = SZ_2M + SZ_64K,
772 .gtt = I915_GTT_PAGE_SIZE_64K,
776 .size = SZ_2M - SZ_64K,
777 .gtt = I915_GTT_PAGE_SIZE_64K,
780 /* Try without any forced padding/alignment */
784 .gtt = I915_GTT_PAGE_SIZE_4K,
788 .offset = SZ_2M - SZ_64K,
789 .gtt = I915_GTT_PAGE_SIZE_4K,
792 struct i915_vma *vma;
797 * Sanity check some of the trickiness with 64K pages -- either we can
798 * safely mark the whole page-table(2M block) as 64K, or we have to
799 * always fallback to 4K.
802 if (!HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K))
805 for (i = 0; i < ARRAY_SIZE(objects); ++i) {
806 unsigned int size = objects[i].size;
807 unsigned int expected_gtt = objects[i].gtt;
808 unsigned int offset = objects[i].offset;
809 unsigned int flags = PIN_USER;
811 for (single = 0; single <= 1; single++) {
812 obj = fake_huge_pages_object(i915, size, !!single);
816 err = i915_gem_object_pin_pages(obj);
821 * Disable 2M pages -- We only want to use 64K/4K pages
824 obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
826 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
829 goto out_object_unpin;
833 flags |= PIN_OFFSET_FIXED | offset;
835 err = i915_vma_pin(vma, 0, 0, flags);
839 err = igt_check_page_sizes(vma);
843 if (!offset && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
844 if (!IS_ALIGNED(vma->node.start,
845 I915_GTT_PAGE_SIZE_2M)) {
846 pr_err("node.start(%llx) not aligned to 2M\n",
852 if (!IS_ALIGNED(vma->node.size,
853 I915_GTT_PAGE_SIZE_2M)) {
854 pr_err("node.size(%llx) not aligned to 2M\n",
861 if (vma->page_sizes.gtt != expected_gtt) {
862 pr_err("gtt=%u, expected=%u, i=%d, single=%s\n",
863 vma->page_sizes.gtt, expected_gtt, i,
872 i915_gem_object_unpin_pages(obj);
873 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
874 i915_gem_object_put(obj);
885 i915_gem_object_unpin_pages(obj);
887 i915_gem_object_put(obj);
892 static struct i915_vma *
893 gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
895 struct drm_i915_private *i915 = vma->vm->i915;
896 const int gen = INTEL_GEN(i915);
897 unsigned int count = vma->size >> PAGE_SHIFT;
898 struct drm_i915_gem_object *obj;
899 struct i915_vma *batch;
905 size = (1 + 4 * count) * sizeof(u32);
906 size = round_up(size, PAGE_SIZE);
907 obj = i915_gem_object_create_internal(i915, size);
909 return ERR_CAST(obj);
911 cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
917 offset += vma->node.start;
919 for (n = 0; n < count; n++) {
921 *cmd++ = MI_STORE_DWORD_IMM_GEN4;
922 *cmd++ = lower_32_bits(offset);
923 *cmd++ = upper_32_bits(offset);
925 } else if (gen >= 4) {
926 *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
927 (gen < 6 ? MI_USE_GGTT : 0);
932 *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
940 *cmd = MI_BATCH_BUFFER_END;
941 i915_gem_chipset_flush(i915);
943 i915_gem_object_unpin_map(obj);
945 batch = i915_vma_instance(obj, vma->vm, NULL);
947 err = PTR_ERR(batch);
951 err = i915_vma_pin(batch, 0, 0, PIN_USER);
958 i915_gem_object_put(obj);
963 static int gpu_write(struct i915_vma *vma,
964 struct i915_gem_context *ctx,
965 struct intel_engine_cs *engine,
969 struct i915_request *rq;
970 struct i915_vma *batch;
973 GEM_BUG_ON(!intel_engine_can_store_dword(engine));
975 err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
979 batch = gpu_write_dw(vma, dword * sizeof(u32), value);
981 return PTR_ERR(batch);
983 rq = i915_request_alloc(engine, ctx);
989 err = i915_vma_move_to_active(batch, rq, 0);
993 i915_gem_object_set_active_reference(batch->obj);
995 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
999 err = engine->emit_bb_start(rq,
1000 batch->node.start, batch->node.size,
1004 i915_request_skip(rq, err);
1005 i915_request_add(rq);
1007 i915_vma_unpin(batch);
1008 i915_vma_close(batch);
1013 static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
1015 unsigned int needs_flush;
1019 err = i915_gem_obj_prepare_shmem_read(obj, &needs_flush);
1023 for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
1024 u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n));
1026 if (needs_flush & CLFLUSH_BEFORE)
1027 drm_clflush_virt_range(ptr, PAGE_SIZE);
1029 if (ptr[dword] != val) {
1030 pr_err("n=%lu ptr[%u]=%u, val=%u\n",
1031 n, dword, ptr[dword], val);
1040 i915_gem_obj_finish_shmem_access(obj);
1045 static int __igt_write_huge(struct i915_gem_context *ctx,
1046 struct intel_engine_cs *engine,
1047 struct drm_i915_gem_object *obj,
1048 u64 size, u64 offset,
1051 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1052 struct i915_address_space *vm =
1053 ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
1054 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
1055 struct i915_vma *vma;
1058 vma = i915_vma_instance(obj, vm, NULL);
1060 return PTR_ERR(vma);
1062 err = i915_vma_unbind(vma);
1066 err = i915_vma_pin(vma, size, 0, flags | offset);
1069 * The ggtt may have some pages reserved so
1070 * refrain from erroring out.
1072 if (err == -ENOSPC && i915_is_ggtt(vm))
1078 err = igt_check_page_sizes(vma);
1082 err = gpu_write(vma, ctx, engine, dword, val);
1084 pr_err("gpu-write failed at offset=%llx\n", offset);
1088 err = cpu_check(obj, dword, val);
1090 pr_err("cpu-check failed at offset=%llx\n", offset);
1095 i915_vma_unpin(vma);
1097 i915_vma_destroy(vma);
1102 static int igt_write_huge(struct i915_gem_context *ctx,
1103 struct drm_i915_gem_object *obj)
1105 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1106 struct i915_address_space *vm =
1107 ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
1108 static struct intel_engine_cs *engines[I915_NUM_ENGINES];
1109 struct intel_engine_cs *engine;
1110 I915_RND_STATE(prng);
1111 IGT_TIMEOUT(end_time);
1112 unsigned int max_page_size;
1121 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
1123 size = obj->base.size;
1124 if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
1125 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
1127 max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
1128 max = div_u64((vm->total - size), max_page_size);
1131 for_each_engine(engine, i915, id) {
1132 if (!intel_engine_can_store_dword(engine)) {
1133 pr_info("store-dword-imm not supported on engine=%u\n",
1137 engines[n++] = engine;
1144 * To keep things interesting when alternating between engines in our
1145 * randomized order, lets also make feeding to the same engine a few
1146 * times in succession a possibility by enlarging the permutation array.
1148 order = i915_random_order(n * I915_NUM_ENGINES, &prng);
1153 * Try various offsets in an ascending/descending fashion until we
1154 * timeout -- we want to avoid issues hidden by effectively always using
1158 for_each_prime_number_from(num, 0, max) {
1159 u64 offset_low = num * max_page_size;
1160 u64 offset_high = (max - num) * max_page_size;
1161 u32 dword = offset_in_page(num) / 4;
1163 engine = engines[order[i] % n];
1164 i = (i + 1) % (n * I915_NUM_ENGINES);
1167 * In order to utilize 64K pages we need to both pad the vma
1168 * size and ensure the vma offset is at the start of the pt
1169 * boundary, however to improve coverage we opt for testing both
1170 * aligned and unaligned offsets.
1172 if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
1173 offset_low = round_down(offset_low,
1174 I915_GTT_PAGE_SIZE_2M);
1176 err = __igt_write_huge(ctx, engine, obj, size, offset_low,
1181 err = __igt_write_huge(ctx, engine, obj, size, offset_high,
1186 if (igt_timeout(end_time,
1187 "%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
1188 __func__, engine->id, offset_low, offset_high,
1198 static int igt_ppgtt_exhaust_huge(void *arg)
1200 struct i915_gem_context *ctx = arg;
1201 struct drm_i915_private *i915 = ctx->i915;
1202 unsigned long supported = INTEL_INFO(i915)->page_sizes;
1203 static unsigned int pages[ARRAY_SIZE(page_sizes)];
1204 struct drm_i915_gem_object *obj;
1205 unsigned int size_mask;
1206 unsigned int page_mask;
1210 if (supported == I915_GTT_PAGE_SIZE_4K)
1214 * Sanity check creating objects with a varying mix of page sizes --
1215 * ensuring that our writes lands in the right place.
1219 for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1)
1220 pages[n++] = BIT(i);
1222 for (size_mask = 2; size_mask < BIT(n); size_mask++) {
1223 unsigned int size = 0;
1225 for (i = 0; i < n; i++) {
1226 if (size_mask & BIT(i))
1231 * For our page mask we want to enumerate all the page-size
1232 * combinations which will fit into our chosen object size.
1234 for (page_mask = 2; page_mask <= size_mask; page_mask++) {
1235 unsigned int page_sizes = 0;
1237 for (i = 0; i < n; i++) {
1238 if (page_mask & BIT(i))
1239 page_sizes |= pages[i];
1243 * Ensure that we can actually fill the given object
1244 * with our chosen page mask.
1246 if (!IS_ALIGNED(size, BIT(__ffs(page_sizes))))
1249 obj = huge_pages_object(i915, size, page_sizes);
1255 err = i915_gem_object_pin_pages(obj);
1257 i915_gem_object_put(obj);
1259 if (err == -ENOMEM) {
1260 pr_info("unable to get pages, size=%u, pages=%u\n",
1266 pr_err("pin_pages failed, size=%u, pages=%u\n",
1267 size_mask, page_mask);
1272 /* Force the page-size for the gtt insertion */
1273 obj->mm.page_sizes.sg = page_sizes;
1275 err = igt_write_huge(ctx, obj);
1277 pr_err("exhaust write-huge failed with size=%u\n",
1282 i915_gem_object_unpin_pages(obj);
1283 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
1284 i915_gem_object_put(obj);
1291 i915_gem_object_unpin_pages(obj);
1292 i915_gem_object_put(obj);
1294 mkwrite_device_info(i915)->page_sizes = supported;
1299 static int igt_ppgtt_internal_huge(void *arg)
1301 struct i915_gem_context *ctx = arg;
1302 struct drm_i915_private *i915 = ctx->i915;
1303 struct drm_i915_gem_object *obj;
1304 static const unsigned int sizes[] = {
1316 * Sanity check that the HW uses huge pages correctly through internal
1317 * -- ensure that our writes land in the right place.
1320 for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
1321 unsigned int size = sizes[i];
1323 obj = i915_gem_object_create_internal(i915, size);
1325 return PTR_ERR(obj);
1327 err = i915_gem_object_pin_pages(obj);
1331 if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
1332 pr_info("internal unable to allocate huge-page(s) with size=%u\n",
1337 err = igt_write_huge(ctx, obj);
1339 pr_err("internal write-huge failed with size=%u\n",
1344 i915_gem_object_unpin_pages(obj);
1345 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
1346 i915_gem_object_put(obj);
1352 i915_gem_object_unpin_pages(obj);
1354 i915_gem_object_put(obj);
1359 static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
1361 return i915->mm.gemfs && has_transparent_hugepage();
1364 static int igt_ppgtt_gemfs_huge(void *arg)
1366 struct i915_gem_context *ctx = arg;
1367 struct drm_i915_private *i915 = ctx->i915;
1368 struct drm_i915_gem_object *obj;
1369 static const unsigned int sizes[] = {
1380 * Sanity check that the HW uses huge pages correctly through gemfs --
1381 * ensure that our writes land in the right place.
1384 if (!igt_can_allocate_thp(i915)) {
1385 pr_info("missing THP support, skipping\n");
1389 for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
1390 unsigned int size = sizes[i];
1392 obj = i915_gem_object_create(i915, size);
1394 return PTR_ERR(obj);
1396 err = i915_gem_object_pin_pages(obj);
1400 if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
1401 pr_info("finishing test early, gemfs unable to allocate huge-page(s) with size=%u\n",
1406 err = igt_write_huge(ctx, obj);
1408 pr_err("gemfs write-huge failed with size=%u\n",
1413 i915_gem_object_unpin_pages(obj);
1414 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
1415 i915_gem_object_put(obj);
1421 i915_gem_object_unpin_pages(obj);
1423 i915_gem_object_put(obj);
1428 static int igt_ppgtt_pin_update(void *arg)
1430 struct i915_gem_context *ctx = arg;
1431 struct drm_i915_private *dev_priv = ctx->i915;
1432 unsigned long supported = INTEL_INFO(dev_priv)->page_sizes;
1433 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
1434 struct drm_i915_gem_object *obj;
1435 struct i915_vma *vma;
1436 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
1441 * Make sure there's no funny business when doing a PIN_UPDATE -- in the
1442 * past we had a subtle issue with being able to incorrectly do multiple
1443 * alloc va ranges on the same object when doing a PIN_UPDATE, which
1444 * resulted in some pretty nasty bugs, though only when using
1448 if (!ppgtt || !i915_vm_is_4lvl(&ppgtt->vm)) {
1449 pr_info("48b PPGTT not supported, skipping\n");
1453 first = ilog2(I915_GTT_PAGE_SIZE_64K);
1454 last = ilog2(I915_GTT_PAGE_SIZE_2M);
1456 for_each_set_bit_from(first, &supported, last + 1) {
1457 unsigned int page_size = BIT(first);
1459 obj = i915_gem_object_create_internal(dev_priv, page_size);
1461 return PTR_ERR(obj);
1463 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
1469 err = i915_vma_pin(vma, SZ_2M, 0, flags);
1473 if (vma->page_sizes.sg < page_size) {
1474 pr_info("Unable to allocate page-size %x, finishing test early\n",
1479 err = igt_check_page_sizes(vma);
1483 if (vma->page_sizes.gtt != page_size) {
1484 dma_addr_t addr = i915_gem_object_get_dma_address(obj, 0);
1487 * The only valid reason for this to ever fail would be
1488 * if the dma-mapper screwed us over when we did the
1489 * dma_map_sg(), since it has the final say over the dma
1492 if (IS_ALIGNED(addr, page_size)) {
1493 pr_err("page_sizes.gtt=%u, expected=%u\n",
1494 vma->page_sizes.gtt, page_size);
1497 pr_info("dma address misaligned, finishing test early\n");
1503 err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE);
1507 i915_vma_unpin(vma);
1508 i915_vma_close(vma);
1510 i915_gem_object_put(obj);
1513 obj = i915_gem_object_create_internal(dev_priv, PAGE_SIZE);
1515 return PTR_ERR(obj);
1517 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
1523 err = i915_vma_pin(vma, 0, 0, flags);
1528 * Make sure we don't end up with something like where the pde is still
1529 * pointing to the 2M page, and the pt we just filled-in is dangling --
1530 * we can check this by writing to the first page where it would then
1531 * land in the now stale 2M page.
1534 err = gpu_write(vma, ctx, dev_priv->engine[RCS0], 0, 0xdeadbeaf);
1538 err = cpu_check(obj, 0, 0xdeadbeaf);
1541 i915_vma_unpin(vma);
1543 i915_vma_close(vma);
1545 i915_gem_object_put(obj);
1550 static int igt_tmpfs_fallback(void *arg)
1552 struct i915_gem_context *ctx = arg;
1553 struct drm_i915_private *i915 = ctx->i915;
1554 struct vfsmount *gemfs = i915->mm.gemfs;
1555 struct i915_address_space *vm =
1556 ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
1557 struct drm_i915_gem_object *obj;
1558 struct i915_vma *vma;
1563 * Make sure that we don't burst into a ball of flames upon falling back
1564 * to tmpfs, which we rely on if on the off-chance we encouter a failure
1565 * when setting up gemfs.
1568 i915->mm.gemfs = NULL;
1570 obj = i915_gem_object_create(i915, PAGE_SIZE);
1576 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
1577 if (IS_ERR(vaddr)) {
1578 err = PTR_ERR(vaddr);
1581 *vaddr = 0xdeadbeaf;
1583 __i915_gem_object_flush_map(obj, 0, 64);
1584 i915_gem_object_unpin_map(obj);
1586 vma = i915_vma_instance(obj, vm, NULL);
1592 err = i915_vma_pin(vma, 0, 0, PIN_USER);
1596 err = igt_check_page_sizes(vma);
1598 i915_vma_unpin(vma);
1600 i915_vma_close(vma);
1602 i915_gem_object_put(obj);
1604 i915->mm.gemfs = gemfs;
1609 static int igt_shrink_thp(void *arg)
1611 struct i915_gem_context *ctx = arg;
1612 struct drm_i915_private *i915 = ctx->i915;
1613 struct i915_address_space *vm =
1614 ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
1615 struct drm_i915_gem_object *obj;
1616 struct i915_vma *vma;
1617 unsigned int flags = PIN_USER;
1621 * Sanity check shrinking huge-paged object -- make sure nothing blows
1625 if (!igt_can_allocate_thp(i915)) {
1626 pr_info("missing THP support, skipping\n");
1630 obj = i915_gem_object_create(i915, SZ_2M);
1632 return PTR_ERR(obj);
1634 vma = i915_vma_instance(obj, vm, NULL);
1640 err = i915_vma_pin(vma, 0, 0, flags);
1644 if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
1645 pr_info("failed to allocate THP, finishing test early\n");
1649 err = igt_check_page_sizes(vma);
1653 err = gpu_write(vma, ctx, i915->engine[RCS0], 0, 0xdeadbeaf);
1657 i915_vma_unpin(vma);
1660 * Now that the pages are *unpinned* shrink-all should invoke
1661 * shmem to truncate our pages.
1663 i915_gem_shrink_all(i915);
1664 if (i915_gem_object_has_pages(obj)) {
1665 pr_err("shrink-all didn't truncate the pages\n");
1670 if (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys) {
1671 pr_err("residual page-size bits left\n");
1676 err = i915_vma_pin(vma, 0, 0, flags);
1680 err = cpu_check(obj, 0, 0xdeadbeaf);
1683 i915_vma_unpin(vma);
1685 i915_vma_close(vma);
1687 i915_gem_object_put(obj);
1692 int i915_gem_huge_page_mock_selftests(void)
1694 static const struct i915_subtest tests[] = {
1695 SUBTEST(igt_mock_exhaust_device_supported_pages),
1696 SUBTEST(igt_mock_ppgtt_misaligned_dma),
1697 SUBTEST(igt_mock_ppgtt_huge_fill),
1698 SUBTEST(igt_mock_ppgtt_64K),
1700 struct drm_i915_private *dev_priv;
1701 struct i915_hw_ppgtt *ppgtt;
1704 dev_priv = mock_gem_device();
1708 /* Pretend to be a device which supports the 48b PPGTT */
1709 mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
1710 mkwrite_device_info(dev_priv)->ppgtt_size = 48;
1712 mutex_lock(&dev_priv->drm.struct_mutex);
1713 ppgtt = i915_ppgtt_create(dev_priv);
1714 if (IS_ERR(ppgtt)) {
1715 err = PTR_ERR(ppgtt);
1719 if (!i915_vm_is_4lvl(&ppgtt->vm)) {
1720 pr_err("failed to create 48b PPGTT\n");
1725 /* If we were ever hit this then it's time to mock the 64K scratch */
1726 if (!i915_vm_has_scratch_64K(&ppgtt->vm)) {
1727 pr_err("PPGTT missing 64K scratch page\n");
1732 err = i915_subtests(tests, ppgtt);
1735 i915_ppgtt_put(ppgtt);
1738 mutex_unlock(&dev_priv->drm.struct_mutex);
1739 drm_dev_put(&dev_priv->drm);
1744 int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
1746 static const struct i915_subtest tests[] = {
1747 SUBTEST(igt_shrink_thp),
1748 SUBTEST(igt_ppgtt_pin_update),
1749 SUBTEST(igt_tmpfs_fallback),
1750 SUBTEST(igt_ppgtt_exhaust_huge),
1751 SUBTEST(igt_ppgtt_gemfs_huge),
1752 SUBTEST(igt_ppgtt_internal_huge),
1754 struct drm_file *file;
1755 struct i915_gem_context *ctx;
1756 intel_wakeref_t wakeref;
1759 if (!HAS_PPGTT(dev_priv)) {
1760 pr_info("PPGTT not supported, skipping live-selftests\n");
1764 if (i915_terminally_wedged(dev_priv))
1767 file = mock_file(dev_priv);
1769 return PTR_ERR(file);
1771 mutex_lock(&dev_priv->drm.struct_mutex);
1772 wakeref = intel_runtime_pm_get(dev_priv);
1774 ctx = live_context(dev_priv, file);
1781 ctx->ppgtt->vm.scrub_64K = true;
1783 err = i915_subtests(tests, ctx);
1786 intel_runtime_pm_put(dev_priv, wakeref);
1787 mutex_unlock(&dev_priv->drm.struct_mutex);
1789 mock_file_free(dev_priv, file);