2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/prime_numbers.h>
27 #include "../i915_selftest.h"
28 #include "i915_random.h"
29 #include "igt_flush_test.h"
32 #include "mock_gem_device.h"
33 #include "huge_gem_object.h"
35 #define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
38 struct drm_i915_private *i915;
42 unsigned int reset_global;
43 unsigned int reset_engine[I915_NUM_ENGINES];
46 static int begin_live_test(struct live_test *t,
47 struct drm_i915_private *i915,
51 struct intel_engine_cs *engine;
52 enum intel_engine_id id;
59 err = i915_gem_wait_for_idle(i915,
61 MAX_SCHEDULE_TIMEOUT);
63 pr_err("%s(%s): failed to idle before, with err=%d!",
68 i915->gpu_error.missed_irq_rings = 0;
69 t->reset_global = i915_reset_count(&i915->gpu_error);
71 for_each_engine(engine, i915, id)
73 i915_reset_engine_count(&i915->gpu_error, engine);
78 static int end_live_test(struct live_test *t)
80 struct drm_i915_private *i915 = t->i915;
81 struct intel_engine_cs *engine;
82 enum intel_engine_id id;
84 if (igt_flush_test(i915, I915_WAIT_LOCKED))
87 if (t->reset_global != i915_reset_count(&i915->gpu_error)) {
88 pr_err("%s(%s): GPU was reset %d times!\n",
90 i915_reset_count(&i915->gpu_error) - t->reset_global);
94 for_each_engine(engine, i915, id) {
95 if (t->reset_engine[id] ==
96 i915_reset_engine_count(&i915->gpu_error, engine))
99 pr_err("%s(%s): engine '%s' was reset %d times!\n",
100 t->func, t->name, engine->name,
101 i915_reset_engine_count(&i915->gpu_error, engine) -
102 t->reset_engine[id]);
106 if (i915->gpu_error.missed_irq_rings) {
107 pr_err("%s(%s): Missed interrupts on engines %lx\n",
108 t->func, t->name, i915->gpu_error.missed_irq_rings);
115 static int live_nop_switch(void *arg)
117 const unsigned int nctx = 1024;
118 struct drm_i915_private *i915 = arg;
119 struct intel_engine_cs *engine;
120 struct i915_gem_context **ctx;
121 enum intel_engine_id id;
122 struct drm_file *file;
128 * Create as many contexts as we can feasibly get away with
129 * and check we can switch between them rapidly.
131 * Serves as very simple stress test for submission and HW switching
135 if (!DRIVER_CAPS(i915)->has_logical_contexts)
138 file = mock_file(i915);
140 return PTR_ERR(file);
142 mutex_lock(&i915->drm.struct_mutex);
143 intel_runtime_pm_get(i915);
145 ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
151 for (n = 0; n < nctx; n++) {
152 ctx[n] = i915_gem_create_context(i915, file->driver_priv);
153 if (IS_ERR(ctx[n])) {
154 err = PTR_ERR(ctx[n]);
159 for_each_engine(engine, i915, id) {
160 struct i915_request *rq;
161 unsigned long end_time, prime;
162 ktime_t times[2] = {};
164 times[0] = ktime_get_raw();
165 for (n = 0; n < nctx; n++) {
166 rq = i915_request_alloc(engine, ctx[n]);
171 i915_request_add(rq);
173 if (i915_request_wait(rq,
176 pr_err("Failed to populated %d contexts\n", nctx);
177 i915_gem_set_wedged(i915);
182 times[1] = ktime_get_raw();
184 pr_info("Populated %d contexts on %s in %lluns\n",
185 nctx, engine->name, ktime_to_ns(times[1] - times[0]));
187 err = begin_live_test(&t, i915, __func__, engine->name);
191 end_time = jiffies + i915_selftest.timeout_jiffies;
192 for_each_prime_number_from(prime, 2, 8192) {
193 times[1] = ktime_get_raw();
195 for (n = 0; n < prime; n++) {
196 rq = i915_request_alloc(engine, ctx[n % nctx]);
203 * This space is left intentionally blank.
205 * We do not actually want to perform any
206 * action with this request, we just want
207 * to measure the latency in allocation
208 * and submission of our breadcrumbs -
209 * ensuring that the bare request is sufficient
210 * for the system to work (i.e. proper HEAD
211 * tracking of the rings, interrupt handling,
212 * etc). It also gives us the lowest bounds
216 i915_request_add(rq);
218 if (i915_request_wait(rq,
221 pr_err("Switching between %ld contexts timed out\n",
223 i915_gem_set_wedged(i915);
227 times[1] = ktime_sub(ktime_get_raw(), times[1]);
231 if (__igt_timeout(end_time, NULL))
235 err = end_live_test(&t);
239 pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n",
241 ktime_to_ns(times[0]),
242 prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1));
246 intel_runtime_pm_put(i915);
247 mutex_unlock(&i915->drm.struct_mutex);
248 mock_file_free(i915, file);
252 static struct i915_vma *
253 gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
255 struct drm_i915_gem_object *obj;
256 const int gen = INTEL_GEN(vma->vm->i915);
257 unsigned long n, size;
261 size = (4 * count + 1) * sizeof(u32);
262 size = round_up(size, PAGE_SIZE);
263 obj = i915_gem_object_create_internal(vma->vm->i915, size);
265 return ERR_CAST(obj);
267 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
273 GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
274 offset += vma->node.start;
276 for (n = 0; n < count; n++) {
278 *cmd++ = MI_STORE_DWORD_IMM_GEN4;
279 *cmd++ = lower_32_bits(offset);
280 *cmd++ = upper_32_bits(offset);
282 } else if (gen >= 4) {
283 *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
284 (gen < 6 ? MI_USE_GGTT : 0);
289 *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
295 *cmd = MI_BATCH_BUFFER_END;
296 i915_gem_object_unpin_map(obj);
298 err = i915_gem_object_set_to_gtt_domain(obj, false);
302 vma = i915_vma_instance(obj, vma->vm, NULL);
308 err = i915_vma_pin(vma, 0, 0, PIN_USER);
315 i915_gem_object_put(obj);
319 static unsigned long real_page_count(struct drm_i915_gem_object *obj)
321 return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
324 static unsigned long fake_page_count(struct drm_i915_gem_object *obj)
326 return huge_gem_object_dma_size(obj) >> PAGE_SHIFT;
329 static int gpu_fill(struct drm_i915_gem_object *obj,
330 struct i915_gem_context *ctx,
331 struct intel_engine_cs *engine,
334 struct drm_i915_private *i915 = to_i915(obj->base.dev);
335 struct i915_address_space *vm =
336 ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
337 struct i915_request *rq;
338 struct i915_vma *vma;
339 struct i915_vma *batch;
343 GEM_BUG_ON(obj->base.size > vm->total);
344 GEM_BUG_ON(!intel_engine_can_store_dword(engine));
346 vma = i915_vma_instance(obj, vm, NULL);
350 err = i915_gem_object_set_to_gtt_domain(obj, false);
354 err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
358 /* Within the GTT the huge objects maps every page onto
359 * its 1024 real pages (using phys_pfn = dma_pfn % 1024).
360 * We set the nth dword within the page using the nth
361 * mapping via the GTT - this should exercise the GTT mapping
362 * whilst checking that each context provides a unique view
365 batch = gpu_fill_dw(vma,
366 (dw * real_page_count(obj)) << PAGE_SHIFT |
368 real_page_count(obj),
371 err = PTR_ERR(batch);
375 rq = i915_request_alloc(engine, ctx);
382 if (INTEL_GEN(vm->i915) <= 5)
383 flags |= I915_DISPATCH_SECURE;
385 err = engine->emit_bb_start(rq,
386 batch->node.start, batch->node.size,
391 err = i915_vma_move_to_active(batch, rq, 0);
395 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
399 i915_gem_object_set_active_reference(batch->obj);
400 i915_vma_unpin(batch);
401 i915_vma_close(batch);
405 i915_request_add(rq);
410 i915_request_skip(rq, err);
412 i915_request_add(rq);
414 i915_vma_unpin(batch);
421 static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
423 const bool has_llc = HAS_LLC(to_i915(obj->base.dev));
424 unsigned int n, m, need_flush;
427 err = i915_gem_obj_prepare_shmem_write(obj, &need_flush);
431 for (n = 0; n < real_page_count(obj); n++) {
434 map = kmap_atomic(i915_gem_object_get_page(obj, n));
435 for (m = 0; m < DW_PER_PAGE; m++)
438 drm_clflush_virt_range(map, PAGE_SIZE);
442 i915_gem_obj_finish_shmem_access(obj);
443 obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
444 obj->write_domain = 0;
448 static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
450 unsigned int n, m, needs_flush;
453 err = i915_gem_obj_prepare_shmem_read(obj, &needs_flush);
457 for (n = 0; n < real_page_count(obj); n++) {
460 map = kmap_atomic(i915_gem_object_get_page(obj, n));
461 if (needs_flush & CLFLUSH_BEFORE)
462 drm_clflush_virt_range(map, PAGE_SIZE);
464 for (m = 0; m < max; m++) {
466 pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
473 for (; m < DW_PER_PAGE; m++) {
474 if (map[m] != STACK_MAGIC) {
475 pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
476 n, m, map[m], STACK_MAGIC);
488 i915_gem_obj_finish_shmem_access(obj);
492 static int file_add_object(struct drm_file *file,
493 struct drm_i915_gem_object *obj)
497 GEM_BUG_ON(obj->base.handle_count);
499 /* tie the object to the drm_file for easy reaping */
500 err = idr_alloc(&file->object_idr, &obj->base, 1, 0, GFP_KERNEL);
504 i915_gem_object_get(obj);
505 obj->base.handle_count++;
509 static struct drm_i915_gem_object *
510 create_test_object(struct i915_gem_context *ctx,
511 struct drm_file *file,
512 struct list_head *objects)
514 struct drm_i915_gem_object *obj;
515 struct i915_address_space *vm =
516 ctx->ppgtt ? &ctx->ppgtt->vm : &ctx->i915->ggtt.vm;
520 size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
521 size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
523 obj = huge_gem_object(ctx->i915, DW_PER_PAGE * PAGE_SIZE, size);
527 err = file_add_object(file, obj);
528 i915_gem_object_put(obj);
532 err = cpu_fill(obj, STACK_MAGIC);
534 pr_err("Failed to fill object with cpu, err=%d\n",
539 list_add_tail(&obj->st_link, objects);
543 static unsigned long max_dwords(struct drm_i915_gem_object *obj)
545 unsigned long npages = fake_page_count(obj);
547 GEM_BUG_ON(!IS_ALIGNED(npages, DW_PER_PAGE));
548 return npages / DW_PER_PAGE;
551 static int igt_ctx_exec(void *arg)
553 struct drm_i915_private *i915 = arg;
554 struct drm_i915_gem_object *obj = NULL;
555 unsigned long ncontexts, ndwords, dw;
556 struct drm_file *file;
557 IGT_TIMEOUT(end_time);
563 * Create a few different contexts (with different mm) and write
564 * through each ctx/mm using the GPU making sure those writes end
565 * up in the expected pages of our obj.
568 if (!DRIVER_CAPS(i915)->has_logical_contexts)
571 file = mock_file(i915);
573 return PTR_ERR(file);
575 mutex_lock(&i915->drm.struct_mutex);
577 err = begin_live_test(&t, i915, __func__, "");
584 while (!time_after(jiffies, end_time)) {
585 struct intel_engine_cs *engine;
586 struct i915_gem_context *ctx;
589 ctx = i915_gem_create_context(i915, file->driver_priv);
595 for_each_engine(engine, i915, id) {
596 if (!engine->context_size)
597 continue; /* No logical context support in HW */
599 if (!intel_engine_can_store_dword(engine))
603 obj = create_test_object(ctx, file, &objects);
610 intel_runtime_pm_get(i915);
611 err = gpu_fill(obj, ctx, engine, dw);
612 intel_runtime_pm_put(i915);
614 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
615 ndwords, dw, max_dwords(obj),
616 engine->name, ctx->hw_id,
617 yesno(!!ctx->ppgtt), err);
621 if (++dw == max_dwords(obj)) {
629 pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n",
630 ncontexts, INTEL_INFO(i915)->num_rings, ndwords);
633 list_for_each_entry(obj, &objects, st_link) {
635 min_t(unsigned int, ndwords - dw, max_dwords(obj));
637 err = cpu_check(obj, rem);
645 if (end_live_test(&t))
647 mutex_unlock(&i915->drm.struct_mutex);
649 mock_file_free(i915, file);
653 static int igt_ctx_readonly(void *arg)
655 struct drm_i915_private *i915 = arg;
656 struct drm_i915_gem_object *obj = NULL;
657 struct i915_gem_context *ctx;
658 struct i915_hw_ppgtt *ppgtt;
659 unsigned long ndwords, dw;
660 struct drm_file *file;
661 I915_RND_STATE(prng);
662 IGT_TIMEOUT(end_time);
668 * Create a few read-only objects (with the occasional writable object)
669 * and try to write into these object checking that the GPU discards
670 * any write to a read-only object.
673 file = mock_file(i915);
675 return PTR_ERR(file);
677 mutex_lock(&i915->drm.struct_mutex);
679 err = begin_live_test(&t, i915, __func__, "");
683 ctx = i915_gem_create_context(i915, file->driver_priv);
689 ppgtt = ctx->ppgtt ?: i915->mm.aliasing_ppgtt;
690 if (!ppgtt || !ppgtt->vm.has_read_only) {
697 while (!time_after(jiffies, end_time)) {
698 struct intel_engine_cs *engine;
701 for_each_engine(engine, i915, id) {
702 if (!intel_engine_can_store_dword(engine))
706 obj = create_test_object(ctx, file, &objects);
712 if (prandom_u32_state(&prng) & 1)
713 i915_gem_object_set_readonly(obj);
716 intel_runtime_pm_get(i915);
717 err = gpu_fill(obj, ctx, engine, dw);
718 intel_runtime_pm_put(i915);
720 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
721 ndwords, dw, max_dwords(obj),
722 engine->name, ctx->hw_id,
723 yesno(!!ctx->ppgtt), err);
727 if (++dw == max_dwords(obj)) {
734 pr_info("Submitted %lu dwords (across %u engines)\n",
735 ndwords, INTEL_INFO(i915)->num_rings);
738 list_for_each_entry(obj, &objects, st_link) {
740 min_t(unsigned int, ndwords - dw, max_dwords(obj));
741 unsigned int num_writes;
744 if (i915_gem_object_is_readonly(obj))
747 err = cpu_check(obj, num_writes);
755 if (end_live_test(&t))
757 mutex_unlock(&i915->drm.struct_mutex);
759 mock_file_free(i915, file);
763 static int check_scratch(struct i915_gem_context *ctx, u64 offset)
765 struct drm_mm_node *node =
766 __drm_mm_interval_first(&ctx->ppgtt->vm.mm,
767 offset, offset + sizeof(u32) - 1);
768 if (!node || node->start > offset)
771 GEM_BUG_ON(offset >= node->start + node->size);
773 pr_err("Target offset 0x%08x_%08x overlaps with a node in the mm!\n",
774 upper_32_bits(offset), lower_32_bits(offset));
778 static int write_to_scratch(struct i915_gem_context *ctx,
779 struct intel_engine_cs *engine,
780 u64 offset, u32 value)
782 struct drm_i915_private *i915 = ctx->i915;
783 struct drm_i915_gem_object *obj;
784 struct i915_request *rq;
785 struct i915_vma *vma;
789 GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
791 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
795 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
801 *cmd++ = MI_STORE_DWORD_IMM_GEN4;
802 if (INTEL_GEN(i915) >= 8) {
803 *cmd++ = lower_32_bits(offset);
804 *cmd++ = upper_32_bits(offset);
810 *cmd = MI_BATCH_BUFFER_END;
811 i915_gem_object_unpin_map(obj);
813 err = i915_gem_object_set_to_gtt_domain(obj, false);
817 vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
823 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
827 err = check_scratch(ctx, offset);
831 rq = i915_request_alloc(engine, ctx);
837 err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
841 err = i915_vma_move_to_active(vma, rq, 0);
845 i915_gem_object_set_active_reference(obj);
849 i915_request_add(rq);
854 i915_request_skip(rq, err);
856 i915_request_add(rq);
860 i915_gem_object_put(obj);
864 static int read_from_scratch(struct i915_gem_context *ctx,
865 struct intel_engine_cs *engine,
866 u64 offset, u32 *value)
868 struct drm_i915_private *i915 = ctx->i915;
869 struct drm_i915_gem_object *obj;
870 const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */
871 const u32 result = 0x100;
872 struct i915_request *rq;
873 struct i915_vma *vma;
877 GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
879 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
883 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
889 memset(cmd, POISON_INUSE, PAGE_SIZE);
890 if (INTEL_GEN(i915) >= 8) {
891 *cmd++ = MI_LOAD_REGISTER_MEM_GEN8;
893 *cmd++ = lower_32_bits(offset);
894 *cmd++ = upper_32_bits(offset);
895 *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
900 *cmd++ = MI_LOAD_REGISTER_MEM;
903 *cmd++ = MI_STORE_REGISTER_MEM;
907 *cmd = MI_BATCH_BUFFER_END;
908 i915_gem_object_unpin_map(obj);
910 err = i915_gem_object_set_to_gtt_domain(obj, false);
914 vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
920 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
924 err = check_scratch(ctx, offset);
928 rq = i915_request_alloc(engine, ctx);
934 err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
938 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
945 i915_request_add(rq);
947 err = i915_gem_object_set_to_cpu_domain(obj, false);
951 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
957 *value = cmd[result / sizeof(*cmd)];
958 i915_gem_object_unpin_map(obj);
959 i915_gem_object_put(obj);
964 i915_request_skip(rq, err);
966 i915_request_add(rq);
970 i915_gem_object_put(obj);
974 static int igt_vm_isolation(void *arg)
976 struct drm_i915_private *i915 = arg;
977 struct i915_gem_context *ctx_a, *ctx_b;
978 struct intel_engine_cs *engine;
979 struct drm_file *file;
980 I915_RND_STATE(prng);
987 if (INTEL_GEN(i915) < 7)
991 * The simple goal here is that a write into one context is not
992 * observed in a second (separate page tables and scratch).
995 file = mock_file(i915);
997 return PTR_ERR(file);
999 mutex_lock(&i915->drm.struct_mutex);
1001 err = begin_live_test(&t, i915, __func__, "");
1005 ctx_a = i915_gem_create_context(i915, file->driver_priv);
1006 if (IS_ERR(ctx_a)) {
1007 err = PTR_ERR(ctx_a);
1011 ctx_b = i915_gem_create_context(i915, file->driver_priv);
1012 if (IS_ERR(ctx_b)) {
1013 err = PTR_ERR(ctx_b);
1017 /* We can only test vm isolation, if the vm are distinct */
1018 if (ctx_a->ppgtt == ctx_b->ppgtt)
1021 vm_total = ctx_a->ppgtt->vm.total;
1022 GEM_BUG_ON(ctx_b->ppgtt->vm.total != vm_total);
1023 vm_total -= I915_GTT_PAGE_SIZE;
1025 intel_runtime_pm_get(i915);
1028 for_each_engine(engine, i915, id) {
1029 IGT_TIMEOUT(end_time);
1030 unsigned long this = 0;
1032 if (!intel_engine_can_store_dword(engine))
1035 while (!__igt_timeout(end_time, NULL)) {
1036 u32 value = 0xc5c5c5c5;
1039 div64_u64_rem(i915_prandom_u64_state(&prng),
1041 offset &= ~sizeof(u32);
1042 offset += I915_GTT_PAGE_SIZE;
1044 err = write_to_scratch(ctx_a, engine,
1045 offset, 0xdeadbeef);
1047 err = read_from_scratch(ctx_b, engine,
1053 pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
1054 engine->name, value,
1055 upper_32_bits(offset),
1056 lower_32_bits(offset),
1066 pr_info("Checked %lu scratch offsets across %d engines\n",
1067 count, INTEL_INFO(i915)->num_rings);
1070 intel_runtime_pm_put(i915);
1072 if (end_live_test(&t))
1074 mutex_unlock(&i915->drm.struct_mutex);
1076 mock_file_free(i915, file);
1080 static __maybe_unused const char *
1081 __engine_name(struct drm_i915_private *i915, unsigned int engines)
1083 struct intel_engine_cs *engine;
1086 if (engines == ALL_ENGINES)
1089 for_each_engine_masked(engine, i915, engines, tmp)
1090 return engine->name;
1095 static int __igt_switch_to_kernel_context(struct drm_i915_private *i915,
1096 struct i915_gem_context *ctx,
1097 unsigned int engines)
1099 struct intel_engine_cs *engine;
1103 GEM_TRACE("Testing %s\n", __engine_name(i915, engines));
1104 for_each_engine_masked(engine, i915, engines, tmp) {
1105 struct i915_request *rq;
1107 rq = i915_request_alloc(engine, ctx);
1111 i915_request_add(rq);
1114 err = i915_gem_switch_to_kernel_context(i915);
1118 for_each_engine_masked(engine, i915, engines, tmp) {
1119 if (!engine_has_kernel_context_barrier(engine)) {
1120 pr_err("kernel context not last on engine %s!\n",
1126 err = i915_gem_wait_for_idle(i915,
1128 MAX_SCHEDULE_TIMEOUT);
1132 GEM_BUG_ON(i915->gt.active_requests);
1133 for_each_engine_masked(engine, i915, engines, tmp) {
1134 if (engine->last_retired_context->gem_context != i915->kernel_context) {
1135 pr_err("engine %s not idling in kernel context!\n",
1141 err = i915_gem_switch_to_kernel_context(i915);
1145 if (i915->gt.active_requests) {
1146 pr_err("switch-to-kernel-context emitted %d requests even though it should already be idling in the kernel context\n",
1147 i915->gt.active_requests);
1151 for_each_engine_masked(engine, i915, engines, tmp) {
1152 if (!intel_engine_has_kernel_context(engine)) {
1153 pr_err("kernel context not last on engine %s!\n",
1162 static int igt_switch_to_kernel_context(void *arg)
1164 struct drm_i915_private *i915 = arg;
1165 struct intel_engine_cs *engine;
1166 struct i915_gem_context *ctx;
1167 enum intel_engine_id id;
1171 * A core premise of switching to the kernel context is that
1172 * if an engine is already idling in the kernel context, we
1173 * do not emit another request and wake it up. The other being
1174 * that we do indeed end up idling in the kernel context.
1177 mutex_lock(&i915->drm.struct_mutex);
1178 intel_runtime_pm_get(i915);
1180 ctx = kernel_context(i915);
1182 mutex_unlock(&i915->drm.struct_mutex);
1183 return PTR_ERR(ctx);
1186 /* First check idling each individual engine */
1187 for_each_engine(engine, i915, id) {
1188 err = __igt_switch_to_kernel_context(i915, ctx, BIT(id));
1194 err = __igt_switch_to_kernel_context(i915, ctx, ALL_ENGINES);
1199 GEM_TRACE_DUMP_ON(err);
1200 if (igt_flush_test(i915, I915_WAIT_LOCKED))
1203 intel_runtime_pm_put(i915);
1204 mutex_unlock(&i915->drm.struct_mutex);
1206 kernel_context_close(ctx);
1210 int i915_gem_context_mock_selftests(void)
1212 static const struct i915_subtest tests[] = {
1213 SUBTEST(igt_switch_to_kernel_context),
1215 struct drm_i915_private *i915;
1218 i915 = mock_gem_device();
1222 err = i915_subtests(tests, i915);
1224 drm_dev_put(&i915->drm);
1228 int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
1230 static const struct i915_subtest tests[] = {
1231 SUBTEST(igt_switch_to_kernel_context),
1232 SUBTEST(live_nop_switch),
1233 SUBTEST(igt_ctx_exec),
1234 SUBTEST(igt_ctx_readonly),
1235 SUBTEST(igt_vm_isolation),
1238 if (i915_terminally_wedged(&dev_priv->gpu_error))
1241 return i915_subtests(tests, dev_priv);