2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/prime_numbers.h>
27 #include "gt/intel_reset.h"
28 #include "i915_selftest.h"
30 #include "i915_random.h"
31 #include "igt_flush_test.h"
32 #include "igt_gem_utils.h"
33 #include "igt_live_test.h"
34 #include "igt_reset.h"
35 #include "igt_spinner.h"
38 #include "mock_gem_device.h"
39 #include "huge_gem_object.h"
41 #define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
43 static int live_nop_switch(void *arg)
45 const unsigned int nctx = 1024;
46 struct drm_i915_private *i915 = arg;
47 struct intel_engine_cs *engine;
48 struct i915_gem_context **ctx;
49 enum intel_engine_id id;
50 intel_wakeref_t wakeref;
51 struct igt_live_test t;
52 struct drm_file *file;
57 * Create as many contexts as we can feasibly get away with
58 * and check we can switch between them rapidly.
60 * Serves as very simple stress test for submission and HW switching
64 if (!DRIVER_CAPS(i915)->has_logical_contexts)
67 file = mock_file(i915);
71 mutex_lock(&i915->drm.struct_mutex);
72 wakeref = intel_runtime_pm_get(i915);
74 ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
80 for (n = 0; n < nctx; n++) {
81 ctx[n] = live_context(i915, file);
83 err = PTR_ERR(ctx[n]);
88 for_each_engine(engine, i915, id) {
89 struct i915_request *rq;
90 unsigned long end_time, prime;
91 ktime_t times[2] = {};
93 times[0] = ktime_get_raw();
94 for (n = 0; n < nctx; n++) {
95 rq = igt_request_alloc(ctx[n], engine);
100 i915_request_add(rq);
102 if (i915_request_wait(rq,
105 pr_err("Failed to populated %d contexts\n", nctx);
106 i915_gem_set_wedged(i915);
111 times[1] = ktime_get_raw();
113 pr_info("Populated %d contexts on %s in %lluns\n",
114 nctx, engine->name, ktime_to_ns(times[1] - times[0]));
116 err = igt_live_test_begin(&t, i915, __func__, engine->name);
120 end_time = jiffies + i915_selftest.timeout_jiffies;
121 for_each_prime_number_from(prime, 2, 8192) {
122 times[1] = ktime_get_raw();
124 for (n = 0; n < prime; n++) {
125 rq = igt_request_alloc(ctx[n % nctx], engine);
132 * This space is left intentionally blank.
134 * We do not actually want to perform any
135 * action with this request, we just want
136 * to measure the latency in allocation
137 * and submission of our breadcrumbs -
138 * ensuring that the bare request is sufficient
139 * for the system to work (i.e. proper HEAD
140 * tracking of the rings, interrupt handling,
141 * etc). It also gives us the lowest bounds
145 i915_request_add(rq);
147 if (i915_request_wait(rq,
150 pr_err("Switching between %ld contexts timed out\n",
152 i915_gem_set_wedged(i915);
156 times[1] = ktime_sub(ktime_get_raw(), times[1]);
160 if (__igt_timeout(end_time, NULL))
164 err = igt_live_test_end(&t);
168 pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n",
170 ktime_to_ns(times[0]),
171 prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1));
175 intel_runtime_pm_put(i915, wakeref);
176 mutex_unlock(&i915->drm.struct_mutex);
177 mock_file_free(i915, file);
181 static struct i915_vma *
182 gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
184 struct drm_i915_gem_object *obj;
185 const int gen = INTEL_GEN(vma->vm->i915);
186 unsigned long n, size;
190 size = (4 * count + 1) * sizeof(u32);
191 size = round_up(size, PAGE_SIZE);
192 obj = i915_gem_object_create_internal(vma->vm->i915, size);
194 return ERR_CAST(obj);
196 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
202 GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
203 offset += vma->node.start;
205 for (n = 0; n < count; n++) {
207 *cmd++ = MI_STORE_DWORD_IMM_GEN4;
208 *cmd++ = lower_32_bits(offset);
209 *cmd++ = upper_32_bits(offset);
211 } else if (gen >= 4) {
212 *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
213 (gen < 6 ? MI_USE_GGTT : 0);
218 *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
224 *cmd = MI_BATCH_BUFFER_END;
225 i915_gem_object_flush_map(obj);
226 i915_gem_object_unpin_map(obj);
228 err = i915_gem_object_set_to_gtt_domain(obj, false);
232 vma = i915_vma_instance(obj, vma->vm, NULL);
238 err = i915_vma_pin(vma, 0, 0, PIN_USER);
245 i915_gem_object_put(obj);
249 static unsigned long real_page_count(struct drm_i915_gem_object *obj)
251 return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
254 static unsigned long fake_page_count(struct drm_i915_gem_object *obj)
256 return huge_gem_object_dma_size(obj) >> PAGE_SHIFT;
259 static int gpu_fill(struct drm_i915_gem_object *obj,
260 struct i915_gem_context *ctx,
261 struct intel_engine_cs *engine,
264 struct drm_i915_private *i915 = to_i915(obj->base.dev);
265 struct i915_address_space *vm =
266 ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
267 struct i915_request *rq;
268 struct i915_vma *vma;
269 struct i915_vma *batch;
273 GEM_BUG_ON(obj->base.size > vm->total);
274 GEM_BUG_ON(!intel_engine_can_store_dword(engine));
276 vma = i915_vma_instance(obj, vm, NULL);
280 err = i915_gem_object_set_to_gtt_domain(obj, false);
284 err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
288 /* Within the GTT the huge objects maps every page onto
289 * its 1024 real pages (using phys_pfn = dma_pfn % 1024).
290 * We set the nth dword within the page using the nth
291 * mapping via the GTT - this should exercise the GTT mapping
292 * whilst checking that each context provides a unique view
295 batch = gpu_fill_dw(vma,
296 (dw * real_page_count(obj)) << PAGE_SHIFT |
298 real_page_count(obj),
301 err = PTR_ERR(batch);
305 rq = igt_request_alloc(ctx, engine);
312 if (INTEL_GEN(vm->i915) <= 5)
313 flags |= I915_DISPATCH_SECURE;
315 err = engine->emit_bb_start(rq,
316 batch->node.start, batch->node.size,
321 err = i915_vma_move_to_active(batch, rq, 0);
325 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
329 i915_gem_object_set_active_reference(batch->obj);
330 i915_vma_unpin(batch);
331 i915_vma_close(batch);
335 i915_request_add(rq);
340 i915_request_skip(rq, err);
342 i915_request_add(rq);
344 i915_vma_unpin(batch);
351 static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
353 const bool has_llc = HAS_LLC(to_i915(obj->base.dev));
354 unsigned int n, m, need_flush;
357 err = i915_gem_obj_prepare_shmem_write(obj, &need_flush);
361 for (n = 0; n < real_page_count(obj); n++) {
364 map = kmap_atomic(i915_gem_object_get_page(obj, n));
365 for (m = 0; m < DW_PER_PAGE; m++)
368 drm_clflush_virt_range(map, PAGE_SIZE);
372 i915_gem_obj_finish_shmem_access(obj);
373 obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
374 obj->write_domain = 0;
378 static noinline int cpu_check(struct drm_i915_gem_object *obj,
379 unsigned int idx, unsigned int max)
381 unsigned int n, m, needs_flush;
384 err = i915_gem_obj_prepare_shmem_read(obj, &needs_flush);
388 for (n = 0; n < real_page_count(obj); n++) {
391 map = kmap_atomic(i915_gem_object_get_page(obj, n));
392 if (needs_flush & CLFLUSH_BEFORE)
393 drm_clflush_virt_range(map, PAGE_SIZE);
395 for (m = 0; m < max; m++) {
397 pr_err("%pS: Invalid value at object %d page %d/%ld, offset %d/%d: found %x expected %x\n",
398 __builtin_return_address(0), idx,
399 n, real_page_count(obj), m, max,
406 for (; m < DW_PER_PAGE; m++) {
407 if (map[m] != STACK_MAGIC) {
408 pr_err("%pS: Invalid value at object %d page %d, offset %d: found %x expected %x (uninitialised)\n",
409 __builtin_return_address(0), idx, n, m,
410 map[m], STACK_MAGIC);
422 i915_gem_obj_finish_shmem_access(obj);
426 static int file_add_object(struct drm_file *file,
427 struct drm_i915_gem_object *obj)
431 GEM_BUG_ON(obj->base.handle_count);
433 /* tie the object to the drm_file for easy reaping */
434 err = idr_alloc(&file->object_idr, &obj->base, 1, 0, GFP_KERNEL);
438 i915_gem_object_get(obj);
439 obj->base.handle_count++;
443 static struct drm_i915_gem_object *
444 create_test_object(struct i915_gem_context *ctx,
445 struct drm_file *file,
446 struct list_head *objects)
448 struct drm_i915_gem_object *obj;
449 struct i915_address_space *vm =
450 ctx->ppgtt ? &ctx->ppgtt->vm : &ctx->i915->ggtt.vm;
454 size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
455 size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
457 obj = huge_gem_object(ctx->i915, DW_PER_PAGE * PAGE_SIZE, size);
461 err = file_add_object(file, obj);
462 i915_gem_object_put(obj);
466 err = cpu_fill(obj, STACK_MAGIC);
468 pr_err("Failed to fill object with cpu, err=%d\n",
473 list_add_tail(&obj->st_link, objects);
477 static unsigned long max_dwords(struct drm_i915_gem_object *obj)
479 unsigned long npages = fake_page_count(obj);
481 GEM_BUG_ON(!IS_ALIGNED(npages, DW_PER_PAGE));
482 return npages / DW_PER_PAGE;
485 static int igt_ctx_exec(void *arg)
487 struct drm_i915_private *i915 = arg;
488 struct intel_engine_cs *engine;
489 enum intel_engine_id id;
493 * Create a few different contexts (with different mm) and write
494 * through each ctx/mm using the GPU making sure those writes end
495 * up in the expected pages of our obj.
498 if (!DRIVER_CAPS(i915)->has_logical_contexts)
501 for_each_engine(engine, i915, id) {
502 struct drm_i915_gem_object *obj = NULL;
503 unsigned long ncontexts, ndwords, dw;
504 struct igt_live_test t;
505 struct drm_file *file;
506 IGT_TIMEOUT(end_time);
509 if (!intel_engine_can_store_dword(engine))
512 if (!engine->context_size)
513 continue; /* No logical context support in HW */
515 file = mock_file(i915);
517 return PTR_ERR(file);
519 mutex_lock(&i915->drm.struct_mutex);
521 err = igt_live_test_begin(&t, i915, __func__, engine->name);
528 while (!time_after(jiffies, end_time)) {
529 struct i915_gem_context *ctx;
530 intel_wakeref_t wakeref;
532 ctx = live_context(i915, file);
539 obj = create_test_object(ctx, file, &objects);
546 with_intel_runtime_pm(i915, wakeref)
547 err = gpu_fill(obj, ctx, engine, dw);
549 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
550 ndwords, dw, max_dwords(obj),
551 engine->name, ctx->hw_id,
552 yesno(!!ctx->ppgtt), err);
556 if (++dw == max_dwords(obj)) {
565 pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
566 ncontexts, engine->name, ndwords);
569 list_for_each_entry(obj, &objects, st_link) {
571 min_t(unsigned int, ndwords - dw, max_dwords(obj));
573 err = cpu_check(obj, ncontexts++, rem);
581 if (igt_live_test_end(&t))
583 mutex_unlock(&i915->drm.struct_mutex);
585 mock_file_free(i915, file);
593 static int igt_shared_ctx_exec(void *arg)
595 struct drm_i915_private *i915 = arg;
596 struct i915_gem_context *parent;
597 struct intel_engine_cs *engine;
598 enum intel_engine_id id;
599 struct igt_live_test t;
600 struct drm_file *file;
604 * Create a few different contexts with the same mm and write
605 * through each ctx using the GPU making sure those writes end
606 * up in the expected pages of our obj.
608 if (!DRIVER_CAPS(i915)->has_logical_contexts)
611 file = mock_file(i915);
613 return PTR_ERR(file);
615 mutex_lock(&i915->drm.struct_mutex);
617 parent = live_context(i915, file);
618 if (IS_ERR(parent)) {
619 err = PTR_ERR(parent);
623 if (!parent->ppgtt) { /* not full-ppgtt; nothing to share */
628 err = igt_live_test_begin(&t, i915, __func__, "");
632 for_each_engine(engine, i915, id) {
633 unsigned long ncontexts, ndwords, dw;
634 struct drm_i915_gem_object *obj = NULL;
635 IGT_TIMEOUT(end_time);
638 if (!intel_engine_can_store_dword(engine))
644 while (!time_after(jiffies, end_time)) {
645 struct i915_gem_context *ctx;
646 intel_wakeref_t wakeref;
648 ctx = kernel_context(i915);
654 __assign_ppgtt(ctx, parent->ppgtt);
657 obj = create_test_object(parent, file, &objects);
660 kernel_context_close(ctx);
666 with_intel_runtime_pm(i915, wakeref)
667 err = gpu_fill(obj, ctx, engine, dw);
669 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
670 ndwords, dw, max_dwords(obj),
671 engine->name, ctx->hw_id,
672 yesno(!!ctx->ppgtt), err);
673 kernel_context_close(ctx);
677 if (++dw == max_dwords(obj)) {
685 kernel_context_close(ctx);
687 pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
688 ncontexts, engine->name, ndwords);
691 list_for_each_entry(obj, &objects, st_link) {
693 min_t(unsigned int, ndwords - dw, max_dwords(obj));
695 err = cpu_check(obj, ncontexts++, rem);
703 if (igt_live_test_end(&t))
706 mutex_unlock(&i915->drm.struct_mutex);
708 mock_file_free(i915, file);
712 static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
714 struct drm_i915_gem_object *obj;
718 if (INTEL_GEN(vma->vm->i915) < 8)
719 return ERR_PTR(-EINVAL);
721 obj = i915_gem_object_create_internal(vma->vm->i915, PAGE_SIZE);
723 return ERR_CAST(obj);
725 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
731 *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
732 *cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE);
733 *cmd++ = lower_32_bits(vma->node.start);
734 *cmd++ = upper_32_bits(vma->node.start);
735 *cmd = MI_BATCH_BUFFER_END;
737 __i915_gem_object_flush_map(obj, 0, 64);
738 i915_gem_object_unpin_map(obj);
740 vma = i915_vma_instance(obj, vma->vm, NULL);
746 err = i915_vma_pin(vma, 0, 0, PIN_USER);
753 i915_gem_object_put(obj);
758 emit_rpcs_query(struct drm_i915_gem_object *obj,
759 struct intel_context *ce,
760 struct i915_request **rq_out)
762 struct i915_request *rq;
763 struct i915_vma *batch;
764 struct i915_vma *vma;
767 GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
769 vma = i915_vma_instance(obj, &ce->gem_context->ppgtt->vm, NULL);
773 err = i915_gem_object_set_to_gtt_domain(obj, false);
777 err = i915_vma_pin(vma, 0, 0, PIN_USER);
781 batch = rpcs_query_batch(vma);
783 err = PTR_ERR(batch);
787 rq = i915_request_create(ce);
793 err = rq->engine->emit_bb_start(rq,
794 batch->node.start, batch->node.size,
799 err = i915_vma_move_to_active(batch, rq, 0);
803 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
807 i915_gem_object_set_active_reference(batch->obj);
808 i915_vma_unpin(batch);
809 i915_vma_close(batch);
813 *rq_out = i915_request_get(rq);
815 i915_request_add(rq);
820 i915_request_skip(rq, err);
822 i915_request_add(rq);
824 i915_vma_unpin(batch);
831 #define TEST_IDLE BIT(0)
832 #define TEST_BUSY BIT(1)
833 #define TEST_RESET BIT(2)
836 __sseu_prepare(struct drm_i915_private *i915,
839 struct intel_context *ce,
840 struct igt_spinner **spin)
842 struct i915_request *rq;
846 if (!(flags & (TEST_BUSY | TEST_RESET)))
849 *spin = kzalloc(sizeof(**spin), GFP_KERNEL);
853 ret = igt_spinner_init(*spin, i915);
857 rq = igt_spinner_create_request(*spin,
866 i915_request_add(rq);
868 if (!igt_wait_for_spinner(*spin, rq)) {
869 pr_err("%s: Spinner failed to start!\n", name);
877 igt_spinner_end(*spin);
879 igt_spinner_fini(*spin);
881 kfree(fetch_and_zero(spin));
886 __read_slice_count(struct drm_i915_private *i915,
887 struct intel_context *ce,
888 struct drm_i915_gem_object *obj,
889 struct igt_spinner *spin,
892 struct i915_request *rq = NULL;
898 ret = emit_rpcs_query(obj, ce, &rq);
903 igt_spinner_end(spin);
905 ret = i915_request_wait(rq, I915_WAIT_LOCKED, MAX_SCHEDULE_TIMEOUT);
906 i915_request_put(rq);
910 buf = i915_gem_object_pin_map(obj, I915_MAP_WB);
916 if (INTEL_GEN(i915) >= 11) {
917 s_mask = GEN11_RPCS_S_CNT_MASK;
918 s_shift = GEN11_RPCS_S_CNT_SHIFT;
920 s_mask = GEN8_RPCS_S_CNT_MASK;
921 s_shift = GEN8_RPCS_S_CNT_SHIFT;
925 cnt = (val & s_mask) >> s_shift;
928 i915_gem_object_unpin_map(obj);
934 __check_rpcs(const char *name, u32 rpcs, int slices, unsigned int expected,
935 const char *prefix, const char *suffix)
937 if (slices == expected)
941 pr_err("%s: %s read slice count failed with %d%s\n",
942 name, prefix, slices, suffix);
946 pr_err("%s: %s slice count %d is not %u%s\n",
947 name, prefix, slices, expected, suffix);
949 pr_info("RPCS=0x%x; %u%sx%u%s\n",
951 (rpcs & GEN8_RPCS_S_CNT_ENABLE) ? "*" : "",
952 (rpcs & GEN8_RPCS_SS_CNT_MASK) >> GEN8_RPCS_SS_CNT_SHIFT,
953 (rpcs & GEN8_RPCS_SS_CNT_ENABLE) ? "*" : "");
959 __sseu_finish(struct drm_i915_private *i915,
962 struct intel_context *ce,
963 struct drm_i915_gem_object *obj,
964 unsigned int expected,
965 struct igt_spinner *spin)
967 unsigned int slices = hweight32(ce->engine->sseu.slice_mask);
971 if (flags & TEST_RESET) {
972 ret = i915_reset_engine(ce->engine, "sseu");
977 ret = __read_slice_count(i915, ce, obj,
978 flags & TEST_RESET ? NULL : spin, &rpcs);
979 ret = __check_rpcs(name, rpcs, ret, expected, "Context", "!");
983 ret = __read_slice_count(i915, ce->engine->kernel_context, obj,
985 ret = __check_rpcs(name, rpcs, ret, slices, "Kernel context", "!");
989 igt_spinner_end(spin);
991 if ((flags & TEST_IDLE) && ret == 0) {
992 ret = i915_gem_wait_for_idle(i915,
994 MAX_SCHEDULE_TIMEOUT);
998 ret = __read_slice_count(i915, ce, obj, NULL, &rpcs);
999 ret = __check_rpcs(name, rpcs, ret, expected,
1000 "Context", " after idle!");
1007 __sseu_test(struct drm_i915_private *i915,
1010 struct intel_context *ce,
1011 struct drm_i915_gem_object *obj,
1012 struct intel_sseu sseu)
1014 struct igt_spinner *spin = NULL;
1017 ret = __sseu_prepare(i915, name, flags, ce, &spin);
1021 ret = __intel_context_reconfigure_sseu(ce, sseu);
1025 ret = __sseu_finish(i915, name, flags, ce, obj,
1026 hweight32(sseu.slice_mask), spin);
1030 igt_spinner_end(spin);
1031 igt_spinner_fini(spin);
1038 __igt_ctx_sseu(struct drm_i915_private *i915,
1042 struct intel_engine_cs *engine = i915->engine[RCS0];
1043 struct intel_sseu default_sseu = engine->sseu;
1044 struct drm_i915_gem_object *obj;
1045 struct i915_gem_context *ctx;
1046 struct intel_context *ce;
1047 struct intel_sseu pg_sseu;
1048 intel_wakeref_t wakeref;
1049 struct drm_file *file;
1052 if (INTEL_GEN(i915) < 9)
1055 if (!RUNTIME_INFO(i915)->sseu.has_slice_pg)
1058 if (hweight32(default_sseu.slice_mask) < 2)
1062 * Gen11 VME friendly power-gated configuration with half enabled
1065 pg_sseu = default_sseu;
1066 pg_sseu.slice_mask = 1;
1067 pg_sseu.subslice_mask =
1068 ~(~0 << (hweight32(default_sseu.subslice_mask) / 2));
1070 pr_info("SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n",
1071 name, flags, hweight32(default_sseu.slice_mask),
1072 hweight32(pg_sseu.slice_mask));
1074 file = mock_file(i915);
1076 return PTR_ERR(file);
1078 if (flags & TEST_RESET)
1079 igt_global_reset_lock(i915);
1081 mutex_lock(&i915->drm.struct_mutex);
1083 ctx = live_context(i915, file);
1088 i915_gem_context_clear_bannable(ctx); /* to reset and beyond! */
1090 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1096 wakeref = intel_runtime_pm_get(i915);
1098 ce = i915_gem_context_get_engine(ctx, RCS0);
1104 ret = intel_context_pin(ce);
1108 /* First set the default mask. */
1109 ret = __sseu_test(i915, name, flags, ce, obj, default_sseu);
1113 /* Then set a power-gated configuration. */
1114 ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu);
1118 /* Back to defaults. */
1119 ret = __sseu_test(i915, name, flags, ce, obj, default_sseu);
1123 /* One last power-gated configuration for the road. */
1124 ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu);
1129 if (igt_flush_test(i915, I915_WAIT_LOCKED))
1132 intel_context_unpin(ce);
1134 intel_context_put(ce);
1136 intel_runtime_pm_put(i915, wakeref);
1137 i915_gem_object_put(obj);
1140 mutex_unlock(&i915->drm.struct_mutex);
1142 if (flags & TEST_RESET)
1143 igt_global_reset_unlock(i915);
1145 mock_file_free(i915, file);
1148 pr_err("%s: Failed with %d!\n", name, ret);
1153 static int igt_ctx_sseu(void *arg)
1158 } *phase, phases[] = {
1159 { .name = "basic", .flags = 0 },
1160 { .name = "idle", .flags = TEST_IDLE },
1161 { .name = "busy", .flags = TEST_BUSY },
1162 { .name = "busy-reset", .flags = TEST_BUSY | TEST_RESET },
1163 { .name = "busy-idle", .flags = TEST_BUSY | TEST_IDLE },
1164 { .name = "reset-idle", .flags = TEST_RESET | TEST_IDLE },
1169 for (i = 0, phase = phases; ret == 0 && i < ARRAY_SIZE(phases);
1171 ret = __igt_ctx_sseu(arg, phase->name, phase->flags);
1176 static int igt_ctx_readonly(void *arg)
1178 struct drm_i915_private *i915 = arg;
1179 struct drm_i915_gem_object *obj = NULL;
1180 struct i915_gem_context *ctx;
1181 struct i915_hw_ppgtt *ppgtt;
1182 unsigned long idx, ndwords, dw;
1183 struct igt_live_test t;
1184 struct drm_file *file;
1185 I915_RND_STATE(prng);
1186 IGT_TIMEOUT(end_time);
1191 * Create a few read-only objects (with the occasional writable object)
1192 * and try to write into these object checking that the GPU discards
1193 * any write to a read-only object.
1196 file = mock_file(i915);
1198 return PTR_ERR(file);
1200 mutex_lock(&i915->drm.struct_mutex);
1202 err = igt_live_test_begin(&t, i915, __func__, "");
1206 ctx = live_context(i915, file);
1212 ppgtt = ctx->ppgtt ?: i915->mm.aliasing_ppgtt;
1213 if (!ppgtt || !ppgtt->vm.has_read_only) {
1220 while (!time_after(jiffies, end_time)) {
1221 struct intel_engine_cs *engine;
1224 for_each_engine(engine, i915, id) {
1225 intel_wakeref_t wakeref;
1227 if (!intel_engine_can_store_dword(engine))
1231 obj = create_test_object(ctx, file, &objects);
1237 if (prandom_u32_state(&prng) & 1)
1238 i915_gem_object_set_readonly(obj);
1242 with_intel_runtime_pm(i915, wakeref)
1243 err = gpu_fill(obj, ctx, engine, dw);
1245 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
1246 ndwords, dw, max_dwords(obj),
1247 engine->name, ctx->hw_id,
1248 yesno(!!ctx->ppgtt), err);
1252 if (++dw == max_dwords(obj)) {
1259 pr_info("Submitted %lu dwords (across %u engines)\n",
1260 ndwords, RUNTIME_INFO(i915)->num_engines);
1264 list_for_each_entry(obj, &objects, st_link) {
1266 min_t(unsigned int, ndwords - dw, max_dwords(obj));
1267 unsigned int num_writes;
1270 if (i915_gem_object_is_readonly(obj))
1273 err = cpu_check(obj, idx++, num_writes);
1281 if (igt_live_test_end(&t))
1283 mutex_unlock(&i915->drm.struct_mutex);
1285 mock_file_free(i915, file);
1289 static int check_scratch(struct i915_gem_context *ctx, u64 offset)
1291 struct drm_mm_node *node =
1292 __drm_mm_interval_first(&ctx->ppgtt->vm.mm,
1293 offset, offset + sizeof(u32) - 1);
1294 if (!node || node->start > offset)
1297 GEM_BUG_ON(offset >= node->start + node->size);
1299 pr_err("Target offset 0x%08x_%08x overlaps with a node in the mm!\n",
1300 upper_32_bits(offset), lower_32_bits(offset));
1304 static int write_to_scratch(struct i915_gem_context *ctx,
1305 struct intel_engine_cs *engine,
1306 u64 offset, u32 value)
1308 struct drm_i915_private *i915 = ctx->i915;
1309 struct drm_i915_gem_object *obj;
1310 struct i915_request *rq;
1311 struct i915_vma *vma;
1315 GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
1317 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1319 return PTR_ERR(obj);
1321 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
1327 *cmd++ = MI_STORE_DWORD_IMM_GEN4;
1328 if (INTEL_GEN(i915) >= 8) {
1329 *cmd++ = lower_32_bits(offset);
1330 *cmd++ = upper_32_bits(offset);
1336 *cmd = MI_BATCH_BUFFER_END;
1337 __i915_gem_object_flush_map(obj, 0, 64);
1338 i915_gem_object_unpin_map(obj);
1340 vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
1346 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
1350 err = check_scratch(ctx, offset);
1354 rq = igt_request_alloc(ctx, engine);
1360 err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
1364 err = i915_vma_move_to_active(vma, rq, 0);
1368 i915_gem_object_set_active_reference(obj);
1369 i915_vma_unpin(vma);
1370 i915_vma_close(vma);
1372 i915_request_add(rq);
1377 i915_request_skip(rq, err);
1379 i915_request_add(rq);
1381 i915_vma_unpin(vma);
1383 i915_gem_object_put(obj);
1387 static int read_from_scratch(struct i915_gem_context *ctx,
1388 struct intel_engine_cs *engine,
1389 u64 offset, u32 *value)
1391 struct drm_i915_private *i915 = ctx->i915;
1392 struct drm_i915_gem_object *obj;
1393 const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */
1394 const u32 result = 0x100;
1395 struct i915_request *rq;
1396 struct i915_vma *vma;
1400 GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
1402 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1404 return PTR_ERR(obj);
1406 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
1412 memset(cmd, POISON_INUSE, PAGE_SIZE);
1413 if (INTEL_GEN(i915) >= 8) {
1414 *cmd++ = MI_LOAD_REGISTER_MEM_GEN8;
1416 *cmd++ = lower_32_bits(offset);
1417 *cmd++ = upper_32_bits(offset);
1418 *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
1423 *cmd++ = MI_LOAD_REGISTER_MEM;
1426 *cmd++ = MI_STORE_REGISTER_MEM;
1430 *cmd = MI_BATCH_BUFFER_END;
1432 i915_gem_object_flush_map(obj);
1433 i915_gem_object_unpin_map(obj);
1435 vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
1441 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
1445 err = check_scratch(ctx, offset);
1449 rq = igt_request_alloc(ctx, engine);
1455 err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
1459 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
1463 i915_vma_unpin(vma);
1464 i915_vma_close(vma);
1466 i915_request_add(rq);
1468 err = i915_gem_object_set_to_cpu_domain(obj, false);
1472 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
1478 *value = cmd[result / sizeof(*cmd)];
1479 i915_gem_object_unpin_map(obj);
1480 i915_gem_object_put(obj);
1485 i915_request_skip(rq, err);
1487 i915_request_add(rq);
1489 i915_vma_unpin(vma);
1491 i915_gem_object_put(obj);
1495 static int igt_vm_isolation(void *arg)
1497 struct drm_i915_private *i915 = arg;
1498 struct i915_gem_context *ctx_a, *ctx_b;
1499 struct intel_engine_cs *engine;
1500 intel_wakeref_t wakeref;
1501 struct igt_live_test t;
1502 struct drm_file *file;
1503 I915_RND_STATE(prng);
1504 unsigned long count;
1509 if (INTEL_GEN(i915) < 7)
1513 * The simple goal here is that a write into one context is not
1514 * observed in a second (separate page tables and scratch).
1517 file = mock_file(i915);
1519 return PTR_ERR(file);
1521 mutex_lock(&i915->drm.struct_mutex);
1523 err = igt_live_test_begin(&t, i915, __func__, "");
1527 ctx_a = live_context(i915, file);
1528 if (IS_ERR(ctx_a)) {
1529 err = PTR_ERR(ctx_a);
1533 ctx_b = live_context(i915, file);
1534 if (IS_ERR(ctx_b)) {
1535 err = PTR_ERR(ctx_b);
1539 /* We can only test vm isolation, if the vm are distinct */
1540 if (ctx_a->ppgtt == ctx_b->ppgtt)
1543 vm_total = ctx_a->ppgtt->vm.total;
1544 GEM_BUG_ON(ctx_b->ppgtt->vm.total != vm_total);
1545 vm_total -= I915_GTT_PAGE_SIZE;
1547 wakeref = intel_runtime_pm_get(i915);
1550 for_each_engine(engine, i915, id) {
1551 IGT_TIMEOUT(end_time);
1552 unsigned long this = 0;
1554 if (!intel_engine_can_store_dword(engine))
1557 while (!__igt_timeout(end_time, NULL)) {
1558 u32 value = 0xc5c5c5c5;
1561 div64_u64_rem(i915_prandom_u64_state(&prng),
1563 offset &= -sizeof(u32);
1564 offset += I915_GTT_PAGE_SIZE;
1566 err = write_to_scratch(ctx_a, engine,
1567 offset, 0xdeadbeef);
1569 err = read_from_scratch(ctx_b, engine,
1575 pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
1576 engine->name, value,
1577 upper_32_bits(offset),
1578 lower_32_bits(offset),
1588 pr_info("Checked %lu scratch offsets across %d engines\n",
1589 count, RUNTIME_INFO(i915)->num_engines);
1592 intel_runtime_pm_put(i915, wakeref);
1594 if (igt_live_test_end(&t))
1596 mutex_unlock(&i915->drm.struct_mutex);
1598 mock_file_free(i915, file);
1602 static __maybe_unused const char *
1603 __engine_name(struct drm_i915_private *i915, intel_engine_mask_t engines)
1605 struct intel_engine_cs *engine;
1606 intel_engine_mask_t tmp;
1608 if (engines == ALL_ENGINES)
1611 for_each_engine_masked(engine, i915, engines, tmp)
1612 return engine->name;
1617 static void mock_barrier_task(void *data)
1619 unsigned int *counter = data;
1624 static int mock_context_barrier(void *arg)
1627 #define pr_fmt(x) "context_barrier_task():" # x
1628 struct drm_i915_private *i915 = arg;
1629 struct i915_gem_context *ctx;
1630 struct i915_request *rq;
1631 unsigned int counter;
1635 * The context barrier provides us with a callback after it emits
1636 * a request; useful for retiring old state after loading new.
1639 mutex_lock(&i915->drm.struct_mutex);
1641 ctx = mock_context(i915, "mock");
1648 err = context_barrier_task(ctx, 0,
1649 NULL, mock_barrier_task, &counter);
1651 pr_err("Failed at line %d, err=%d\n", __LINE__, err);
1655 pr_err("Did not retire immediately with 0 engines\n");
1661 err = context_barrier_task(ctx, ALL_ENGINES,
1662 NULL, mock_barrier_task, &counter);
1664 pr_err("Failed at line %d, err=%d\n", __LINE__, err);
1668 pr_err("Did not retire immediately for all unused engines\n");
1673 rq = igt_request_alloc(ctx, i915->engine[RCS0]);
1675 pr_err("Request allocation failed!\n");
1678 i915_request_add(rq);
1681 context_barrier_inject_fault = BIT(RCS0);
1682 err = context_barrier_task(ctx, ALL_ENGINES,
1683 NULL, mock_barrier_task, &counter);
1684 context_barrier_inject_fault = 0;
1688 pr_err("Did not hit fault injection!\n");
1690 pr_err("Invoked callback on error!\n");
1697 err = context_barrier_task(ctx, ALL_ENGINES,
1698 NULL, mock_barrier_task, &counter);
1700 pr_err("Failed at line %d, err=%d\n", __LINE__, err);
1703 mock_device_flush(i915);
1705 pr_err("Did not retire on each active engines\n");
1711 mock_context_close(ctx);
1713 mutex_unlock(&i915->drm.struct_mutex);
1719 int i915_gem_context_mock_selftests(void)
1721 static const struct i915_subtest tests[] = {
1722 SUBTEST(mock_context_barrier),
1724 struct drm_i915_private *i915;
1727 i915 = mock_gem_device();
1731 err = i915_subtests(tests, i915);
1733 drm_dev_put(&i915->drm);
1737 int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
1739 static const struct i915_subtest tests[] = {
1740 SUBTEST(live_nop_switch),
1741 SUBTEST(igt_ctx_exec),
1742 SUBTEST(igt_ctx_readonly),
1743 SUBTEST(igt_ctx_sseu),
1744 SUBTEST(igt_shared_ctx_exec),
1745 SUBTEST(igt_vm_isolation),
1748 if (i915_terminally_wedged(dev_priv))
1751 return i915_subtests(tests, dev_priv);