2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/prime_numbers.h>
26 #include <linux/pm_qos.h>
27 #include <linux/sort.h>
29 #include "gem/i915_gem_pm.h"
30 #include "gem/selftests/mock_context.h"
32 #include "gt/intel_engine_heartbeat.h"
33 #include "gt/intel_engine_pm.h"
34 #include "gt/intel_engine_user.h"
35 #include "gt/intel_gt.h"
36 #include "gt/intel_gt_clock_utils.h"
37 #include "gt/intel_gt_requests.h"
38 #include "gt/selftest_engine_heartbeat.h"
40 #include "i915_random.h"
41 #include "i915_selftest.h"
42 #include "igt_flush_test.h"
43 #include "igt_live_test.h"
44 #include "igt_spinner.h"
45 #include "lib_sw_fence.h"
48 #include "mock_gem_device.h"
50 static unsigned int num_uabi_engines(struct drm_i915_private *i915)
52 struct intel_engine_cs *engine;
56 for_each_uabi_engine(engine, i915)
62 static struct intel_engine_cs *rcs0(struct drm_i915_private *i915)
64 return intel_engine_lookup_user(i915, I915_ENGINE_CLASS_RENDER, 0);
67 static int igt_add_request(void *arg)
69 struct drm_i915_private *i915 = arg;
70 struct i915_request *request;
72 /* Basic preliminary test to create a request and let it loose! */
74 request = mock_request(rcs0(i915)->kernel_context, HZ / 10);
78 i915_request_add(request);
83 static int igt_wait_request(void *arg)
85 const long T = HZ / 4;
86 struct drm_i915_private *i915 = arg;
87 struct i915_request *request;
90 /* Submit a request, then wait upon it */
92 request = mock_request(rcs0(i915)->kernel_context, T);
96 i915_request_get(request);
98 if (i915_request_wait(request, 0, 0) != -ETIME) {
99 pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
103 if (i915_request_wait(request, 0, T) != -ETIME) {
104 pr_err("request wait succeeded (expected timeout before submit!)\n");
108 if (i915_request_completed(request)) {
109 pr_err("request completed before submit!!\n");
113 i915_request_add(request);
115 if (i915_request_wait(request, 0, 0) != -ETIME) {
116 pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
120 if (i915_request_completed(request)) {
121 pr_err("request completed immediately!\n");
125 if (i915_request_wait(request, 0, T / 2) != -ETIME) {
126 pr_err("request wait succeeded (expected timeout!)\n");
130 if (i915_request_wait(request, 0, T) == -ETIME) {
131 pr_err("request wait timed out!\n");
135 if (!i915_request_completed(request)) {
136 pr_err("request not complete after waiting!\n");
140 if (i915_request_wait(request, 0, T) == -ETIME) {
141 pr_err("request wait timed out when already complete!\n");
147 i915_request_put(request);
148 mock_device_flush(i915);
152 static int igt_fence_wait(void *arg)
154 const long T = HZ / 4;
155 struct drm_i915_private *i915 = arg;
156 struct i915_request *request;
159 /* Submit a request, treat it as a fence and wait upon it */
161 request = mock_request(rcs0(i915)->kernel_context, T);
165 if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
166 pr_err("fence wait success before submit (expected timeout)!\n");
170 i915_request_add(request);
172 if (dma_fence_is_signaled(&request->fence)) {
173 pr_err("fence signaled immediately!\n");
177 if (dma_fence_wait_timeout(&request->fence, false, T / 2) != -ETIME) {
178 pr_err("fence wait success after submit (expected timeout)!\n");
182 if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
183 pr_err("fence wait timed out (expected success)!\n");
187 if (!dma_fence_is_signaled(&request->fence)) {
188 pr_err("fence unsignaled after waiting!\n");
192 if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
193 pr_err("fence wait timed out when complete (expected success)!\n");
199 mock_device_flush(i915);
203 static int igt_request_rewind(void *arg)
205 struct drm_i915_private *i915 = arg;
206 struct i915_request *request, *vip;
207 struct i915_gem_context *ctx[2];
208 struct intel_context *ce;
211 ctx[0] = mock_context(i915, "A");
213 ce = i915_gem_context_get_engine(ctx[0], RCS0);
214 GEM_BUG_ON(IS_ERR(ce));
215 request = mock_request(ce, 2 * HZ);
216 intel_context_put(ce);
222 i915_request_get(request);
223 i915_request_add(request);
225 ctx[1] = mock_context(i915, "B");
227 ce = i915_gem_context_get_engine(ctx[1], RCS0);
228 GEM_BUG_ON(IS_ERR(ce));
229 vip = mock_request(ce, 0);
230 intel_context_put(ce);
236 /* Simulate preemption by manual reordering */
237 if (!mock_cancel_request(request)) {
238 pr_err("failed to cancel request (already executed)!\n");
239 i915_request_add(vip);
242 i915_request_get(vip);
243 i915_request_add(vip);
245 request->engine->submit_request(request);
249 if (i915_request_wait(vip, 0, HZ) == -ETIME) {
250 pr_err("timed out waiting for high priority request\n");
254 if (i915_request_completed(request)) {
255 pr_err("low priority request already completed\n");
261 i915_request_put(vip);
263 mock_context_close(ctx[1]);
264 i915_request_put(request);
266 mock_context_close(ctx[0]);
267 mock_device_flush(i915);
272 struct intel_engine_cs *engine;
273 struct i915_gem_context **contexts;
274 atomic_long_t num_waits, num_fences;
275 int ncontexts, max_batch;
276 struct i915_request *(*request_alloc)(struct intel_context *ce);
279 static struct i915_request *
280 __mock_request_alloc(struct intel_context *ce)
282 return mock_request(ce, 0);
285 static struct i915_request *
286 __live_request_alloc(struct intel_context *ce)
288 return intel_context_create_request(ce);
291 static int __igt_breadcrumbs_smoketest(void *arg)
293 struct smoketest *t = arg;
294 const unsigned int max_batch = min(t->ncontexts, t->max_batch) - 1;
295 const unsigned int total = 4 * t->ncontexts + 1;
296 unsigned int num_waits = 0, num_fences = 0;
297 struct i915_request **requests;
298 I915_RND_STATE(prng);
303 * A very simple test to catch the most egregious of list handling bugs.
305 * At its heart, we simply create oodles of requests running across
306 * multiple kthreads and enable signaling on them, for the sole purpose
307 * of stressing our breadcrumb handling. The only inspection we do is
308 * that the fences were marked as signaled.
311 requests = kcalloc(total, sizeof(*requests), GFP_KERNEL);
315 order = i915_random_order(total, &prng);
321 while (!kthread_should_stop()) {
322 struct i915_sw_fence *submit, *wait;
323 unsigned int n, count;
325 submit = heap_fence_create(GFP_KERNEL);
331 wait = heap_fence_create(GFP_KERNEL);
333 i915_sw_fence_commit(submit);
334 heap_fence_put(submit);
339 i915_random_reorder(order, total, &prng);
340 count = 1 + i915_prandom_u32_max_state(max_batch, &prng);
342 for (n = 0; n < count; n++) {
343 struct i915_gem_context *ctx =
344 t->contexts[order[n] % t->ncontexts];
345 struct i915_request *rq;
346 struct intel_context *ce;
348 ce = i915_gem_context_get_engine(ctx, t->engine->legacy_idx);
349 GEM_BUG_ON(IS_ERR(ce));
350 rq = t->request_alloc(ce);
351 intel_context_put(ce);
358 err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
362 requests[n] = i915_request_get(rq);
363 i915_request_add(rq);
366 err = i915_sw_fence_await_dma_fence(wait,
372 i915_request_put(rq);
378 i915_sw_fence_commit(submit);
379 i915_sw_fence_commit(wait);
381 if (!wait_event_timeout(wait->wait,
382 i915_sw_fence_done(wait),
384 struct i915_request *rq = requests[count - 1];
386 pr_err("waiting for %d/%d fences (last %llx:%lld) on %s timed out!\n",
387 atomic_read(&wait->pending), count,
388 rq->fence.context, rq->fence.seqno,
392 intel_gt_set_wedged(t->engine->gt);
393 GEM_BUG_ON(!i915_request_completed(rq));
394 i915_sw_fence_wait(wait);
398 for (n = 0; n < count; n++) {
399 struct i915_request *rq = requests[n];
401 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
403 pr_err("%llu:%llu was not signaled!\n",
404 rq->fence.context, rq->fence.seqno);
408 i915_request_put(rq);
411 heap_fence_put(wait);
412 heap_fence_put(submit);
423 atomic_long_add(num_fences, &t->num_fences);
424 atomic_long_add(num_waits, &t->num_waits);
432 static int mock_breadcrumbs_smoketest(void *arg)
434 struct drm_i915_private *i915 = arg;
435 struct smoketest t = {
436 .engine = rcs0(i915),
439 .request_alloc = __mock_request_alloc
441 unsigned int ncpus = num_online_cpus();
442 struct task_struct **threads;
447 * Smoketest our breadcrumb/signal handling for requests across multiple
448 * threads. A very simple test to only catch the most egregious of bugs.
449 * See __igt_breadcrumbs_smoketest();
452 threads = kcalloc(ncpus, sizeof(*threads), GFP_KERNEL);
456 t.contexts = kcalloc(t.ncontexts, sizeof(*t.contexts), GFP_KERNEL);
462 for (n = 0; n < t.ncontexts; n++) {
463 t.contexts[n] = mock_context(t.engine->i915, "mock");
464 if (!t.contexts[n]) {
470 for (n = 0; n < ncpus; n++) {
471 threads[n] = kthread_run(__igt_breadcrumbs_smoketest,
473 if (IS_ERR(threads[n])) {
474 ret = PTR_ERR(threads[n]);
479 get_task_struct(threads[n]);
482 yield(); /* start all threads before we begin */
483 msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
485 for (n = 0; n < ncpus; n++) {
488 err = kthread_stop(threads[n]);
492 put_task_struct(threads[n]);
494 pr_info("Completed %lu waits for %lu fence across %d cpus\n",
495 atomic_long_read(&t.num_waits),
496 atomic_long_read(&t.num_fences),
500 for (n = 0; n < t.ncontexts; n++) {
503 mock_context_close(t.contexts[n]);
511 int i915_request_mock_selftests(void)
513 static const struct i915_subtest tests[] = {
514 SUBTEST(igt_add_request),
515 SUBTEST(igt_wait_request),
516 SUBTEST(igt_fence_wait),
517 SUBTEST(igt_request_rewind),
518 SUBTEST(mock_breadcrumbs_smoketest),
520 struct drm_i915_private *i915;
521 intel_wakeref_t wakeref;
524 i915 = mock_gem_device();
528 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
529 err = i915_subtests(tests, i915);
531 mock_destroy_device(i915);
536 static int live_nop_request(void *arg)
538 struct drm_i915_private *i915 = arg;
539 struct intel_engine_cs *engine;
540 struct igt_live_test t;
544 * Submit various sized batches of empty requests, to each engine
545 * (individually), and wait for the batch to complete. We can check
546 * the overhead of submitting requests to the hardware.
549 for_each_uabi_engine(engine, i915) {
550 unsigned long n, prime;
551 IGT_TIMEOUT(end_time);
552 ktime_t times[2] = {};
554 err = igt_live_test_begin(&t, i915, __func__, engine->name);
558 intel_engine_pm_get(engine);
559 for_each_prime_number_from(prime, 1, 8192) {
560 struct i915_request *request = NULL;
562 times[1] = ktime_get_raw();
564 for (n = 0; n < prime; n++) {
565 i915_request_put(request);
566 request = i915_request_create(engine->kernel_context);
568 return PTR_ERR(request);
571 * This space is left intentionally blank.
573 * We do not actually want to perform any
574 * action with this request, we just want
575 * to measure the latency in allocation
576 * and submission of our breadcrumbs -
577 * ensuring that the bare request is sufficient
578 * for the system to work (i.e. proper HEAD
579 * tracking of the rings, interrupt handling,
580 * etc). It also gives us the lowest bounds
584 i915_request_get(request);
585 i915_request_add(request);
587 i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
588 i915_request_put(request);
590 times[1] = ktime_sub(ktime_get_raw(), times[1]);
594 if (__igt_timeout(end_time, NULL))
597 intel_engine_pm_put(engine);
599 err = igt_live_test_end(&t);
603 pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n",
605 ktime_to_ns(times[0]),
606 prime, div64_u64(ktime_to_ns(times[1]), prime));
612 static int __cancel_inactive(struct intel_engine_cs *engine)
614 struct intel_context *ce;
615 struct igt_spinner spin;
616 struct i915_request *rq;
619 if (igt_spinner_init(&spin, engine->gt))
622 ce = intel_context_create(engine);
628 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
634 pr_debug("%s: Cancelling inactive request\n", engine->name);
635 i915_request_cancel(rq, -EINTR);
636 i915_request_get(rq);
637 i915_request_add(rq);
639 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
640 struct drm_printer p = drm_info_printer(engine->i915->drm.dev);
642 pr_err("%s: Failed to cancel inactive request\n", engine->name);
643 intel_engine_dump(engine, &p, "%s\n", engine->name);
648 if (rq->fence.error != -EINTR) {
649 pr_err("%s: fence not cancelled (%u)\n",
650 engine->name, rq->fence.error);
655 i915_request_put(rq);
657 intel_context_put(ce);
659 igt_spinner_fini(&spin);
661 pr_err("%s: %s error %d\n", __func__, engine->name, err);
665 static int __cancel_active(struct intel_engine_cs *engine)
667 struct intel_context *ce;
668 struct igt_spinner spin;
669 struct i915_request *rq;
672 if (igt_spinner_init(&spin, engine->gt))
675 ce = intel_context_create(engine);
681 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
687 pr_debug("%s: Cancelling active request\n", engine->name);
688 i915_request_get(rq);
689 i915_request_add(rq);
690 if (!igt_wait_for_spinner(&spin, rq)) {
691 struct drm_printer p = drm_info_printer(engine->i915->drm.dev);
693 pr_err("Failed to start spinner on %s\n", engine->name);
694 intel_engine_dump(engine, &p, "%s\n", engine->name);
698 i915_request_cancel(rq, -EINTR);
700 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
701 struct drm_printer p = drm_info_printer(engine->i915->drm.dev);
703 pr_err("%s: Failed to cancel active request\n", engine->name);
704 intel_engine_dump(engine, &p, "%s\n", engine->name);
709 if (rq->fence.error != -EINTR) {
710 pr_err("%s: fence not cancelled (%u)\n",
711 engine->name, rq->fence.error);
716 i915_request_put(rq);
718 intel_context_put(ce);
720 igt_spinner_fini(&spin);
722 pr_err("%s: %s error %d\n", __func__, engine->name, err);
726 static int __cancel_completed(struct intel_engine_cs *engine)
728 struct intel_context *ce;
729 struct igt_spinner spin;
730 struct i915_request *rq;
733 if (igt_spinner_init(&spin, engine->gt))
736 ce = intel_context_create(engine);
742 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
747 igt_spinner_end(&spin);
748 i915_request_get(rq);
749 i915_request_add(rq);
751 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
756 pr_debug("%s: Cancelling completed request\n", engine->name);
757 i915_request_cancel(rq, -EINTR);
758 if (rq->fence.error) {
759 pr_err("%s: fence not cancelled (%u)\n",
760 engine->name, rq->fence.error);
765 i915_request_put(rq);
767 intel_context_put(ce);
769 igt_spinner_fini(&spin);
771 pr_err("%s: %s error %d\n", __func__, engine->name, err);
775 static int live_cancel_request(void *arg)
777 struct drm_i915_private *i915 = arg;
778 struct intel_engine_cs *engine;
781 * Check cancellation of requests. We expect to be able to immediately
782 * cancel active requests, even if they are currently on the GPU.
785 for_each_uabi_engine(engine, i915) {
786 struct igt_live_test t;
789 if (!intel_engine_has_preemption(engine))
792 err = igt_live_test_begin(&t, i915, __func__, engine->name);
796 err = __cancel_inactive(engine);
798 err = __cancel_active(engine);
800 err = __cancel_completed(engine);
802 err2 = igt_live_test_end(&t);
812 static struct i915_vma *empty_batch(struct drm_i915_private *i915)
814 struct drm_i915_gem_object *obj;
815 struct i915_vma *vma;
819 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
821 return ERR_CAST(obj);
823 cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
829 *cmd = MI_BATCH_BUFFER_END;
831 __i915_gem_object_flush_map(obj, 0, 64);
832 i915_gem_object_unpin_map(obj);
834 intel_gt_chipset_flush(&i915->gt);
836 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
842 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_GLOBAL);
846 /* Force the wait wait now to avoid including it in the benchmark */
847 err = i915_vma_sync(vma);
856 i915_gem_object_put(obj);
860 static struct i915_request *
861 empty_request(struct intel_engine_cs *engine,
862 struct i915_vma *batch)
864 struct i915_request *request;
867 request = i915_request_create(engine->kernel_context);
871 err = engine->emit_bb_start(request,
874 I915_DISPATCH_SECURE);
878 i915_request_get(request);
880 i915_request_add(request);
881 return err ? ERR_PTR(err) : request;
884 static int live_empty_request(void *arg)
886 struct drm_i915_private *i915 = arg;
887 struct intel_engine_cs *engine;
888 struct igt_live_test t;
889 struct i915_vma *batch;
893 * Submit various sized batches of empty requests, to each engine
894 * (individually), and wait for the batch to complete. We can check
895 * the overhead of submitting requests to the hardware.
898 batch = empty_batch(i915);
900 return PTR_ERR(batch);
902 for_each_uabi_engine(engine, i915) {
903 IGT_TIMEOUT(end_time);
904 struct i915_request *request;
905 unsigned long n, prime;
906 ktime_t times[2] = {};
908 err = igt_live_test_begin(&t, i915, __func__, engine->name);
912 intel_engine_pm_get(engine);
914 /* Warmup / preload */
915 request = empty_request(engine, batch);
916 if (IS_ERR(request)) {
917 err = PTR_ERR(request);
918 intel_engine_pm_put(engine);
921 i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
923 for_each_prime_number_from(prime, 1, 8192) {
924 times[1] = ktime_get_raw();
926 for (n = 0; n < prime; n++) {
927 i915_request_put(request);
928 request = empty_request(engine, batch);
929 if (IS_ERR(request)) {
930 err = PTR_ERR(request);
931 intel_engine_pm_put(engine);
935 i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
937 times[1] = ktime_sub(ktime_get_raw(), times[1]);
941 if (__igt_timeout(end_time, NULL))
944 i915_request_put(request);
945 intel_engine_pm_put(engine);
947 err = igt_live_test_end(&t);
951 pr_info("Batch latencies on %s: 1 = %lluns, %lu = %lluns\n",
953 ktime_to_ns(times[0]),
954 prime, div64_u64(ktime_to_ns(times[1]), prime));
958 i915_vma_unpin(batch);
963 static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
965 struct drm_i915_gem_object *obj;
966 const int gen = INTEL_GEN(i915);
967 struct i915_vma *vma;
971 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
973 return ERR_CAST(obj);
975 vma = i915_vma_instance(obj, i915->gt.vm, NULL);
981 err = i915_vma_pin(vma, 0, 0, PIN_USER);
985 cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
992 *cmd++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
993 *cmd++ = lower_32_bits(vma->node.start);
994 *cmd++ = upper_32_bits(vma->node.start);
995 } else if (gen >= 6) {
996 *cmd++ = MI_BATCH_BUFFER_START | 1 << 8;
997 *cmd++ = lower_32_bits(vma->node.start);
999 *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
1000 *cmd++ = lower_32_bits(vma->node.start);
1002 *cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
1004 __i915_gem_object_flush_map(obj, 0, 64);
1005 i915_gem_object_unpin_map(obj);
1007 intel_gt_chipset_flush(&i915->gt);
1012 i915_gem_object_put(obj);
1013 return ERR_PTR(err);
1016 static int recursive_batch_resolve(struct i915_vma *batch)
1020 cmd = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
1022 return PTR_ERR(cmd);
1024 *cmd = MI_BATCH_BUFFER_END;
1026 __i915_gem_object_flush_map(batch->obj, 0, sizeof(*cmd));
1027 i915_gem_object_unpin_map(batch->obj);
1029 intel_gt_chipset_flush(batch->vm->gt);
1034 static int live_all_engines(void *arg)
1036 struct drm_i915_private *i915 = arg;
1037 const unsigned int nengines = num_uabi_engines(i915);
1038 struct intel_engine_cs *engine;
1039 struct i915_request **request;
1040 struct igt_live_test t;
1041 struct i915_vma *batch;
1046 * Check we can submit requests to all engines simultaneously. We
1047 * send a recursive batch to each engine - checking that we don't
1048 * block doing so, and that they don't complete too soon.
1051 request = kcalloc(nengines, sizeof(*request), GFP_KERNEL);
1055 err = igt_live_test_begin(&t, i915, __func__, "");
1059 batch = recursive_batch(i915);
1060 if (IS_ERR(batch)) {
1061 err = PTR_ERR(batch);
1062 pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
1066 i915_vma_lock(batch);
1069 for_each_uabi_engine(engine, i915) {
1070 request[idx] = intel_engine_create_kernel_request(engine);
1071 if (IS_ERR(request[idx])) {
1072 err = PTR_ERR(request[idx]);
1073 pr_err("%s: Request allocation failed with err=%d\n",
1078 err = i915_request_await_object(request[idx], batch->obj, 0);
1080 err = i915_vma_move_to_active(batch, request[idx], 0);
1083 err = engine->emit_bb_start(request[idx],
1088 request[idx]->batch = batch;
1090 i915_request_get(request[idx]);
1091 i915_request_add(request[idx]);
1095 i915_vma_unlock(batch);
1098 for_each_uabi_engine(engine, i915) {
1099 if (i915_request_completed(request[idx])) {
1100 pr_err("%s(%s): request completed too early!\n",
1101 __func__, engine->name);
1108 err = recursive_batch_resolve(batch);
1110 pr_err("%s: failed to resolve batch, err=%d\n", __func__, err);
1115 for_each_uabi_engine(engine, i915) {
1118 timeout = i915_request_wait(request[idx], 0,
1119 MAX_SCHEDULE_TIMEOUT);
1122 pr_err("%s: error waiting for request on %s, err=%d\n",
1123 __func__, engine->name, err);
1127 GEM_BUG_ON(!i915_request_completed(request[idx]));
1128 i915_request_put(request[idx]);
1129 request[idx] = NULL;
1133 err = igt_live_test_end(&t);
1137 for_each_uabi_engine(engine, i915) {
1139 i915_request_put(request[idx]);
1142 i915_vma_unpin(batch);
1143 i915_vma_put(batch);
1149 static int live_sequential_engines(void *arg)
1151 struct drm_i915_private *i915 = arg;
1152 const unsigned int nengines = num_uabi_engines(i915);
1153 struct i915_request **request;
1154 struct i915_request *prev = NULL;
1155 struct intel_engine_cs *engine;
1156 struct igt_live_test t;
1161 * Check we can submit requests to all engines sequentially, such
1162 * that each successive request waits for the earlier ones. This
1163 * tests that we don't execute requests out of order, even though
1164 * they are running on independent engines.
1167 request = kcalloc(nengines, sizeof(*request), GFP_KERNEL);
1171 err = igt_live_test_begin(&t, i915, __func__, "");
1176 for_each_uabi_engine(engine, i915) {
1177 struct i915_vma *batch;
1179 batch = recursive_batch(i915);
1180 if (IS_ERR(batch)) {
1181 err = PTR_ERR(batch);
1182 pr_err("%s: Unable to create batch for %s, err=%d\n",
1183 __func__, engine->name, err);
1187 i915_vma_lock(batch);
1188 request[idx] = intel_engine_create_kernel_request(engine);
1189 if (IS_ERR(request[idx])) {
1190 err = PTR_ERR(request[idx]);
1191 pr_err("%s: Request allocation failed for %s with err=%d\n",
1192 __func__, engine->name, err);
1197 err = i915_request_await_dma_fence(request[idx],
1200 i915_request_add(request[idx]);
1201 pr_err("%s: Request await failed for %s with err=%d\n",
1202 __func__, engine->name, err);
1207 err = i915_request_await_object(request[idx],
1210 err = i915_vma_move_to_active(batch, request[idx], 0);
1213 err = engine->emit_bb_start(request[idx],
1218 request[idx]->batch = batch;
1220 i915_request_get(request[idx]);
1221 i915_request_add(request[idx]);
1223 prev = request[idx];
1227 i915_vma_unlock(batch);
1233 for_each_uabi_engine(engine, i915) {
1236 if (i915_request_completed(request[idx])) {
1237 pr_err("%s(%s): request completed too early!\n",
1238 __func__, engine->name);
1243 err = recursive_batch_resolve(request[idx]->batch);
1245 pr_err("%s: failed to resolve batch, err=%d\n",
1250 timeout = i915_request_wait(request[idx], 0,
1251 MAX_SCHEDULE_TIMEOUT);
1254 pr_err("%s: error waiting for request on %s, err=%d\n",
1255 __func__, engine->name, err);
1259 GEM_BUG_ON(!i915_request_completed(request[idx]));
1263 err = igt_live_test_end(&t);
1267 for_each_uabi_engine(engine, i915) {
1273 cmd = i915_gem_object_pin_map_unlocked(request[idx]->batch->obj,
1276 *cmd = MI_BATCH_BUFFER_END;
1278 __i915_gem_object_flush_map(request[idx]->batch->obj,
1280 i915_gem_object_unpin_map(request[idx]->batch->obj);
1282 intel_gt_chipset_flush(engine->gt);
1285 i915_vma_put(request[idx]->batch);
1286 i915_request_put(request[idx]);
1294 static int __live_parallel_engine1(void *arg)
1296 struct intel_engine_cs *engine = arg;
1297 IGT_TIMEOUT(end_time);
1298 unsigned long count;
1302 intel_engine_pm_get(engine);
1304 struct i915_request *rq;
1306 rq = i915_request_create(engine->kernel_context);
1312 i915_request_get(rq);
1313 i915_request_add(rq);
1316 if (i915_request_wait(rq, 0, HZ / 5) < 0)
1318 i915_request_put(rq);
1323 } while (!__igt_timeout(end_time, NULL));
1324 intel_engine_pm_put(engine);
1326 pr_info("%s: %lu request + sync\n", engine->name, count);
1330 static int __live_parallel_engineN(void *arg)
1332 struct intel_engine_cs *engine = arg;
1333 IGT_TIMEOUT(end_time);
1334 unsigned long count;
1338 intel_engine_pm_get(engine);
1340 struct i915_request *rq;
1342 rq = i915_request_create(engine->kernel_context);
1348 i915_request_add(rq);
1350 } while (!__igt_timeout(end_time, NULL));
1351 intel_engine_pm_put(engine);
1353 pr_info("%s: %lu requests\n", engine->name, count);
1357 static bool wake_all(struct drm_i915_private *i915)
1359 if (atomic_dec_and_test(&i915->selftest.counter)) {
1360 wake_up_var(&i915->selftest.counter);
1367 static int wait_for_all(struct drm_i915_private *i915)
1372 if (wait_var_event_timeout(&i915->selftest.counter,
1373 !atomic_read(&i915->selftest.counter),
1374 i915_selftest.timeout_jiffies))
1380 static int __live_parallel_spin(void *arg)
1382 struct intel_engine_cs *engine = arg;
1383 struct igt_spinner spin;
1384 struct i915_request *rq;
1388 * Create a spinner running for eternity on each engine. If a second
1389 * spinner is incorrectly placed on the same engine, it will not be
1390 * able to start in time.
1393 if (igt_spinner_init(&spin, engine->gt)) {
1394 wake_all(engine->i915);
1398 intel_engine_pm_get(engine);
1399 rq = igt_spinner_create_request(&spin,
1400 engine->kernel_context,
1401 MI_NOOP); /* no preemption */
1402 intel_engine_pm_put(engine);
1407 wake_all(engine->i915);
1411 i915_request_get(rq);
1412 i915_request_add(rq);
1413 if (igt_wait_for_spinner(&spin, rq)) {
1414 /* Occupy this engine for the whole test */
1415 err = wait_for_all(engine->i915);
1417 pr_err("Failed to start spinner on %s\n", engine->name);
1420 igt_spinner_end(&spin);
1422 if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0)
1424 i915_request_put(rq);
1427 igt_spinner_fini(&spin);
1431 static int live_parallel_engines(void *arg)
1433 struct drm_i915_private *i915 = arg;
1434 static int (* const func[])(void *arg) = {
1435 __live_parallel_engine1,
1436 __live_parallel_engineN,
1437 __live_parallel_spin,
1440 const unsigned int nengines = num_uabi_engines(i915);
1441 struct intel_engine_cs *engine;
1442 int (* const *fn)(void *arg);
1443 struct task_struct **tsk;
1447 * Check we can submit requests to all engines concurrently. This
1448 * tests that we load up the system maximally.
1451 tsk = kcalloc(nengines, sizeof(*tsk), GFP_KERNEL);
1455 for (fn = func; !err && *fn; fn++) {
1456 char name[KSYM_NAME_LEN];
1457 struct igt_live_test t;
1460 snprintf(name, sizeof(name), "%ps", *fn);
1461 err = igt_live_test_begin(&t, i915, __func__, name);
1465 atomic_set(&i915->selftest.counter, nengines);
1468 for_each_uabi_engine(engine, i915) {
1469 tsk[idx] = kthread_run(*fn, engine,
1472 if (IS_ERR(tsk[idx])) {
1473 err = PTR_ERR(tsk[idx]);
1476 get_task_struct(tsk[idx++]);
1479 yield(); /* start all threads before we kthread_stop() */
1482 for_each_uabi_engine(engine, i915) {
1485 if (IS_ERR(tsk[idx]))
1488 status = kthread_stop(tsk[idx]);
1492 put_task_struct(tsk[idx++]);
1495 if (igt_live_test_end(&t))
1504 max_batches(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
1506 struct i915_request *rq;
1510 * Before execlists, all contexts share the same ringbuffer. With
1511 * execlists, each context/engine has a separate ringbuffer and
1512 * for the purposes of this test, inexhaustible.
1514 * For the global ringbuffer though, we have to be very careful
1515 * that we do not wrap while preventing the execution of requests
1516 * with a unsignaled fence.
1518 if (HAS_EXECLISTS(ctx->i915))
1521 rq = igt_request_alloc(ctx, engine);
1527 ret = rq->ring->size - rq->reserved_space;
1528 i915_request_add(rq);
1530 sz = rq->ring->emit - rq->head;
1532 sz += rq->ring->size;
1534 ret /= 2; /* leave half spare, in case of emergency! */
1540 static int live_breadcrumbs_smoketest(void *arg)
1542 struct drm_i915_private *i915 = arg;
1543 const unsigned int nengines = num_uabi_engines(i915);
1544 const unsigned int ncpus = num_online_cpus();
1545 unsigned long num_waits, num_fences;
1546 struct intel_engine_cs *engine;
1547 struct task_struct **threads;
1548 struct igt_live_test live;
1549 intel_wakeref_t wakeref;
1550 struct smoketest *smoke;
1551 unsigned int n, idx;
1556 * Smoketest our breadcrumb/signal handling for requests across multiple
1557 * threads. A very simple test to only catch the most egregious of bugs.
1558 * See __igt_breadcrumbs_smoketest();
1560 * On real hardware this time.
1563 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1565 file = mock_file(i915);
1567 ret = PTR_ERR(file);
1571 smoke = kcalloc(nengines, sizeof(*smoke), GFP_KERNEL);
1577 threads = kcalloc(ncpus * nengines, sizeof(*threads), GFP_KERNEL);
1583 smoke[0].request_alloc = __live_request_alloc;
1584 smoke[0].ncontexts = 64;
1585 smoke[0].contexts = kcalloc(smoke[0].ncontexts,
1586 sizeof(*smoke[0].contexts),
1588 if (!smoke[0].contexts) {
1593 for (n = 0; n < smoke[0].ncontexts; n++) {
1594 smoke[0].contexts[n] = live_context(i915, file);
1595 if (IS_ERR(smoke[0].contexts[n])) {
1596 ret = PTR_ERR(smoke[0].contexts[n]);
1601 ret = igt_live_test_begin(&live, i915, __func__, "");
1606 for_each_uabi_engine(engine, i915) {
1607 smoke[idx] = smoke[0];
1608 smoke[idx].engine = engine;
1609 smoke[idx].max_batch =
1610 max_batches(smoke[0].contexts[0], engine);
1611 if (smoke[idx].max_batch < 0) {
1612 ret = smoke[idx].max_batch;
1615 /* One ring interleaved between requests from all cpus */
1616 smoke[idx].max_batch /= num_online_cpus() + 1;
1617 pr_debug("Limiting batches to %d requests on %s\n",
1618 smoke[idx].max_batch, engine->name);
1620 for (n = 0; n < ncpus; n++) {
1621 struct task_struct *tsk;
1623 tsk = kthread_run(__igt_breadcrumbs_smoketest,
1624 &smoke[idx], "igt/%d.%d", idx, n);
1630 get_task_struct(tsk);
1631 threads[idx * ncpus + n] = tsk;
1637 yield(); /* start all threads before we begin */
1638 msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
1644 for_each_uabi_engine(engine, i915) {
1645 for (n = 0; n < ncpus; n++) {
1646 struct task_struct *tsk = threads[idx * ncpus + n];
1652 err = kthread_stop(tsk);
1653 if (err < 0 && !ret)
1656 put_task_struct(tsk);
1659 num_waits += atomic_long_read(&smoke[idx].num_waits);
1660 num_fences += atomic_long_read(&smoke[idx].num_fences);
1663 pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n",
1664 num_waits, num_fences, idx, ncpus);
1666 ret = igt_live_test_end(&live) ?: ret;
1668 kfree(smoke[0].contexts);
1676 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1681 int i915_request_live_selftests(struct drm_i915_private *i915)
1683 static const struct i915_subtest tests[] = {
1684 SUBTEST(live_nop_request),
1685 SUBTEST(live_all_engines),
1686 SUBTEST(live_sequential_engines),
1687 SUBTEST(live_parallel_engines),
1688 SUBTEST(live_empty_request),
1689 SUBTEST(live_cancel_request),
1690 SUBTEST(live_breadcrumbs_smoketest),
1693 if (intel_gt_is_wedged(&i915->gt))
1696 return i915_subtests(tests, i915);
1699 static int switch_to_kernel_sync(struct intel_context *ce, int err)
1701 struct i915_request *rq;
1702 struct dma_fence *fence;
1704 rq = intel_engine_create_kernel_request(ce->engine);
1708 fence = i915_active_fence_get(&ce->timeline->last_request);
1710 i915_request_await_dma_fence(rq, fence);
1711 dma_fence_put(fence);
1714 rq = i915_request_get(rq);
1715 i915_request_add(rq);
1716 if (i915_request_wait(rq, 0, HZ / 2) < 0 && !err)
1718 i915_request_put(rq);
1720 while (!err && !intel_engine_is_idle(ce->engine))
1721 intel_engine_flush_submission(ce->engine);
1727 struct intel_engine_cs *engine;
1728 unsigned long count;
1734 struct perf_series {
1735 struct drm_i915_private *i915;
1736 unsigned int nengines;
1737 struct intel_context *ce[];
1740 static int cmp_u32(const void *A, const void *B)
1742 const u32 *a = A, *b = B;
1747 static u32 trifilter(u32 *a)
1752 sort(a, TF_COUNT, sizeof(*a), cmp_u32, NULL);
1754 sum = mul_u32_u32(a[2], 2);
1758 GEM_BUG_ON(sum > U32_MAX);
1763 static u64 cycles_to_ns(struct intel_engine_cs *engine, u32 cycles)
1765 u64 ns = intel_gt_clock_interval_to_ns(engine->gt, cycles);
1767 return DIV_ROUND_CLOSEST(ns, 1 << TF_BIAS);
1770 static u32 *emit_timestamp_store(u32 *cs, struct intel_context *ce, u32 offset)
1772 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
1773 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP((ce->engine->mmio_base)));
1780 static u32 *emit_store_dw(u32 *cs, u32 offset, u32 value)
1782 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1790 static u32 *emit_semaphore_poll(u32 *cs, u32 mode, u32 value, u32 offset)
1792 *cs++ = MI_SEMAPHORE_WAIT |
1793 MI_SEMAPHORE_GLOBAL_GTT |
1803 static u32 *emit_semaphore_poll_until(u32 *cs, u32 offset, u32 value)
1805 return emit_semaphore_poll(cs, MI_SEMAPHORE_SAD_EQ_SDD, value, offset);
1808 static void semaphore_set(u32 *sema, u32 value)
1810 WRITE_ONCE(*sema, value);
1811 wmb(); /* flush the update to the cache, and beyond */
1814 static u32 *hwsp_scratch(const struct intel_context *ce)
1816 return memset32(ce->engine->status_page.addr + 1000, 0, 21);
1819 static u32 hwsp_offset(const struct intel_context *ce, u32 *dw)
1821 return (i915_ggtt_offset(ce->engine->status_page.vma) +
1822 offset_in_page(dw));
1825 static int measure_semaphore_response(struct intel_context *ce)
1827 u32 *sema = hwsp_scratch(ce);
1828 const u32 offset = hwsp_offset(ce, sema);
1829 u32 elapsed[TF_COUNT], cycles;
1830 struct i915_request *rq;
1836 * Measure how many cycles it takes for the HW to detect the change
1837 * in a semaphore value.
1839 * A: read CS_TIMESTAMP from CPU
1841 * B: read CS_TIMESTAMP on GPU
1843 * Semaphore latency: B - A
1846 semaphore_set(sema, -1);
1848 rq = i915_request_create(ce);
1852 cs = intel_ring_begin(rq, 4 + 12 * ARRAY_SIZE(elapsed));
1854 i915_request_add(rq);
1859 cs = emit_store_dw(cs, offset, 0);
1860 for (i = 1; i <= ARRAY_SIZE(elapsed); i++) {
1861 cs = emit_semaphore_poll_until(cs, offset, i);
1862 cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32));
1863 cs = emit_store_dw(cs, offset, 0);
1866 intel_ring_advance(rq, cs);
1867 i915_request_add(rq);
1869 if (wait_for(READ_ONCE(*sema) == 0, 50)) {
1874 for (i = 1; i <= ARRAY_SIZE(elapsed); i++) {
1876 cycles = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP);
1877 semaphore_set(sema, i);
1880 if (wait_for(READ_ONCE(*sema) == 0, 50)) {
1885 elapsed[i - 1] = sema[i] - cycles;
1888 cycles = trifilter(elapsed);
1889 pr_info("%s: semaphore response %d cycles, %lluns\n",
1890 ce->engine->name, cycles >> TF_BIAS,
1891 cycles_to_ns(ce->engine, cycles));
1893 return intel_gt_wait_for_idle(ce->engine->gt, HZ);
1896 intel_gt_set_wedged(ce->engine->gt);
1900 static int measure_idle_dispatch(struct intel_context *ce)
1902 u32 *sema = hwsp_scratch(ce);
1903 const u32 offset = hwsp_offset(ce, sema);
1904 u32 elapsed[TF_COUNT], cycles;
1910 * Measure how long it takes for us to submit a request while the
1911 * engine is idle, but is resting in our context.
1913 * A: read CS_TIMESTAMP from CPU
1915 * B: read CS_TIMESTAMP on GPU
1917 * Submission latency: B - A
1920 for (i = 0; i < ARRAY_SIZE(elapsed); i++) {
1921 struct i915_request *rq;
1923 err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2);
1927 rq = i915_request_create(ce);
1933 cs = intel_ring_begin(rq, 4);
1935 i915_request_add(rq);
1940 cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32));
1942 intel_ring_advance(rq, cs);
1946 elapsed[i] = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP);
1947 i915_request_add(rq);
1952 err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2);
1956 for (i = 0; i < ARRAY_SIZE(elapsed); i++)
1957 elapsed[i] = sema[i] - elapsed[i];
1959 cycles = trifilter(elapsed);
1960 pr_info("%s: idle dispatch latency %d cycles, %lluns\n",
1961 ce->engine->name, cycles >> TF_BIAS,
1962 cycles_to_ns(ce->engine, cycles));
1964 return intel_gt_wait_for_idle(ce->engine->gt, HZ);
1967 intel_gt_set_wedged(ce->engine->gt);
1971 static int measure_busy_dispatch(struct intel_context *ce)
1973 u32 *sema = hwsp_scratch(ce);
1974 const u32 offset = hwsp_offset(ce, sema);
1975 u32 elapsed[TF_COUNT + 1], cycles;
1981 * Measure how long it takes for us to submit a request while the
1982 * engine is busy, polling on a semaphore in our context. With
1983 * direct submission, this will include the cost of a lite restore.
1985 * A: read CS_TIMESTAMP from CPU
1987 * B: read CS_TIMESTAMP on GPU
1989 * Submission latency: B - A
1992 for (i = 1; i <= ARRAY_SIZE(elapsed); i++) {
1993 struct i915_request *rq;
1995 rq = i915_request_create(ce);
2001 cs = intel_ring_begin(rq, 12);
2003 i915_request_add(rq);
2008 cs = emit_store_dw(cs, offset + i * sizeof(u32), -1);
2009 cs = emit_semaphore_poll_until(cs, offset, i);
2010 cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32));
2012 intel_ring_advance(rq, cs);
2014 if (i > 1 && wait_for(READ_ONCE(sema[i - 1]), 500)) {
2021 elapsed[i - 1] = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP);
2022 i915_request_add(rq);
2024 semaphore_set(sema, i - 1);
2028 wait_for(READ_ONCE(sema[i - 1]), 500);
2029 semaphore_set(sema, i - 1);
2031 for (i = 1; i <= TF_COUNT; i++) {
2032 GEM_BUG_ON(sema[i] == -1);
2033 elapsed[i - 1] = sema[i] - elapsed[i];
2036 cycles = trifilter(elapsed);
2037 pr_info("%s: busy dispatch latency %d cycles, %lluns\n",
2038 ce->engine->name, cycles >> TF_BIAS,
2039 cycles_to_ns(ce->engine, cycles));
2041 return intel_gt_wait_for_idle(ce->engine->gt, HZ);
2044 intel_gt_set_wedged(ce->engine->gt);
2048 static int plug(struct intel_engine_cs *engine, u32 *sema, u32 mode, int value)
2051 i915_ggtt_offset(engine->status_page.vma) +
2052 offset_in_page(sema);
2053 struct i915_request *rq;
2056 rq = i915_request_create(engine->kernel_context);
2060 cs = intel_ring_begin(rq, 4);
2062 i915_request_add(rq);
2066 cs = emit_semaphore_poll(cs, mode, value, offset);
2068 intel_ring_advance(rq, cs);
2069 i915_request_add(rq);
2074 static int measure_inter_request(struct intel_context *ce)
2076 u32 *sema = hwsp_scratch(ce);
2077 const u32 offset = hwsp_offset(ce, sema);
2078 u32 elapsed[TF_COUNT + 1], cycles;
2079 struct i915_sw_fence *submit;
2083 * Measure how long it takes to advance from one request into the
2084 * next. Between each request we flush the GPU caches to memory,
2085 * update the breadcrumbs, and then invalidate those caches.
2086 * We queue up all the requests to be submitted in one batch so
2087 * it should be one set of contiguous measurements.
2089 * A: read CS_TIMESTAMP on GPU
2091 * B: read CS_TIMESTAMP on GPU
2093 * Request latency: B - A
2096 err = plug(ce->engine, sema, MI_SEMAPHORE_SAD_NEQ_SDD, 0);
2100 submit = heap_fence_create(GFP_KERNEL);
2102 semaphore_set(sema, 1);
2106 intel_engine_flush_submission(ce->engine);
2107 for (i = 1; i <= ARRAY_SIZE(elapsed); i++) {
2108 struct i915_request *rq;
2111 rq = i915_request_create(ce);
2117 err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
2121 i915_request_add(rq);
2125 cs = intel_ring_begin(rq, 4);
2127 i915_request_add(rq);
2132 cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32));
2134 intel_ring_advance(rq, cs);
2135 i915_request_add(rq);
2137 i915_sw_fence_commit(submit);
2138 intel_engine_flush_submission(ce->engine);
2139 heap_fence_put(submit);
2141 semaphore_set(sema, 1);
2142 err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2);
2146 for (i = 1; i <= TF_COUNT; i++)
2147 elapsed[i - 1] = sema[i + 1] - sema[i];
2149 cycles = trifilter(elapsed);
2150 pr_info("%s: inter-request latency %d cycles, %lluns\n",
2151 ce->engine->name, cycles >> TF_BIAS,
2152 cycles_to_ns(ce->engine, cycles));
2154 return intel_gt_wait_for_idle(ce->engine->gt, HZ);
2157 i915_sw_fence_commit(submit);
2158 heap_fence_put(submit);
2159 semaphore_set(sema, 1);
2161 intel_gt_set_wedged(ce->engine->gt);
2165 static int measure_context_switch(struct intel_context *ce)
2167 u32 *sema = hwsp_scratch(ce);
2168 const u32 offset = hwsp_offset(ce, sema);
2169 struct i915_request *fence = NULL;
2170 u32 elapsed[TF_COUNT + 1], cycles;
2175 * Measure how long it takes to advance from one request in one
2176 * context to a request in another context. This allows us to
2177 * measure how long the context save/restore take, along with all
2178 * the inter-context setup we require.
2180 * A: read CS_TIMESTAMP on GPU
2182 * B: read CS_TIMESTAMP on GPU
2184 * Context switch latency: B - A
2187 err = plug(ce->engine, sema, MI_SEMAPHORE_SAD_NEQ_SDD, 0);
2191 for (i = 1; i <= ARRAY_SIZE(elapsed); i++) {
2192 struct intel_context *arr[] = {
2193 ce, ce->engine->kernel_context
2195 u32 addr = offset + ARRAY_SIZE(arr) * i * sizeof(u32);
2197 for (j = 0; j < ARRAY_SIZE(arr); j++) {
2198 struct i915_request *rq;
2200 rq = i915_request_create(arr[j]);
2207 err = i915_request_await_dma_fence(rq,
2210 i915_request_add(rq);
2215 cs = intel_ring_begin(rq, 4);
2217 i915_request_add(rq);
2222 cs = emit_timestamp_store(cs, ce, addr);
2223 addr += sizeof(u32);
2225 intel_ring_advance(rq, cs);
2227 i915_request_put(fence);
2228 fence = i915_request_get(rq);
2230 i915_request_add(rq);
2233 i915_request_put(fence);
2234 intel_engine_flush_submission(ce->engine);
2236 semaphore_set(sema, 1);
2237 err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2);
2241 for (i = 1; i <= TF_COUNT; i++)
2242 elapsed[i - 1] = sema[2 * i + 2] - sema[2 * i + 1];
2244 cycles = trifilter(elapsed);
2245 pr_info("%s: context switch latency %d cycles, %lluns\n",
2246 ce->engine->name, cycles >> TF_BIAS,
2247 cycles_to_ns(ce->engine, cycles));
2249 return intel_gt_wait_for_idle(ce->engine->gt, HZ);
2252 i915_request_put(fence);
2253 semaphore_set(sema, 1);
2255 intel_gt_set_wedged(ce->engine->gt);
2259 static int measure_preemption(struct intel_context *ce)
2261 u32 *sema = hwsp_scratch(ce);
2262 const u32 offset = hwsp_offset(ce, sema);
2263 u32 elapsed[TF_COUNT], cycles;
2269 * We measure two latencies while triggering preemption. The first
2270 * latency is how long it takes for us to submit a preempting request.
2271 * The second latency is how it takes for us to return from the
2272 * preemption back to the original context.
2274 * A: read CS_TIMESTAMP from CPU
2276 * B: read CS_TIMESTAMP on GPU (in preempting context)
2278 * C: read CS_TIMESTAMP on GPU (in original context)
2280 * Preemption dispatch latency: B - A
2281 * Preemption switch latency: C - B
2284 if (!intel_engine_has_preemption(ce->engine))
2287 for (i = 1; i <= ARRAY_SIZE(elapsed); i++) {
2288 u32 addr = offset + 2 * i * sizeof(u32);
2289 struct i915_request *rq;
2291 rq = i915_request_create(ce);
2297 cs = intel_ring_begin(rq, 12);
2299 i915_request_add(rq);
2304 cs = emit_store_dw(cs, addr, -1);
2305 cs = emit_semaphore_poll_until(cs, offset, i);
2306 cs = emit_timestamp_store(cs, ce, addr + sizeof(u32));
2308 intel_ring_advance(rq, cs);
2309 i915_request_add(rq);
2311 if (wait_for(READ_ONCE(sema[2 * i]) == -1, 500)) {
2316 rq = i915_request_create(ce->engine->kernel_context);
2322 cs = intel_ring_begin(rq, 8);
2324 i915_request_add(rq);
2329 cs = emit_timestamp_store(cs, ce, addr);
2330 cs = emit_store_dw(cs, offset, i);
2332 intel_ring_advance(rq, cs);
2333 rq->sched.attr.priority = I915_PRIORITY_BARRIER;
2335 elapsed[i - 1] = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP);
2336 i915_request_add(rq);
2339 if (wait_for(READ_ONCE(sema[2 * i - 2]) != -1, 500)) {
2344 for (i = 1; i <= TF_COUNT; i++)
2345 elapsed[i - 1] = sema[2 * i + 0] - elapsed[i - 1];
2347 cycles = trifilter(elapsed);
2348 pr_info("%s: preemption dispatch latency %d cycles, %lluns\n",
2349 ce->engine->name, cycles >> TF_BIAS,
2350 cycles_to_ns(ce->engine, cycles));
2352 for (i = 1; i <= TF_COUNT; i++)
2353 elapsed[i - 1] = sema[2 * i + 1] - sema[2 * i + 0];
2355 cycles = trifilter(elapsed);
2356 pr_info("%s: preemption switch latency %d cycles, %lluns\n",
2357 ce->engine->name, cycles >> TF_BIAS,
2358 cycles_to_ns(ce->engine, cycles));
2360 return intel_gt_wait_for_idle(ce->engine->gt, HZ);
2363 intel_gt_set_wedged(ce->engine->gt);
2368 struct dma_fence_cb base;
2372 static void signal_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
2374 struct signal_cb *s = container_of(cb, typeof(*s), base);
2376 smp_store_mb(s->seen, true); /* be safe, be strong */
2379 static int measure_completion(struct intel_context *ce)
2381 u32 *sema = hwsp_scratch(ce);
2382 const u32 offset = hwsp_offset(ce, sema);
2383 u32 elapsed[TF_COUNT], cycles;
2389 * Measure how long it takes for the signal (interrupt) to be
2390 * sent from the GPU to be processed by the CPU.
2392 * A: read CS_TIMESTAMP on GPU
2394 * B: read CS_TIMESTAMP from CPU
2396 * Completion latency: B - A
2399 for (i = 1; i <= ARRAY_SIZE(elapsed); i++) {
2400 struct signal_cb cb = { .seen = false };
2401 struct i915_request *rq;
2403 rq = i915_request_create(ce);
2409 cs = intel_ring_begin(rq, 12);
2411 i915_request_add(rq);
2416 cs = emit_store_dw(cs, offset + i * sizeof(u32), -1);
2417 cs = emit_semaphore_poll_until(cs, offset, i);
2418 cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32));
2420 intel_ring_advance(rq, cs);
2422 dma_fence_add_callback(&rq->fence, &cb.base, signal_cb);
2423 i915_request_add(rq);
2425 intel_engine_flush_submission(ce->engine);
2426 if (wait_for(READ_ONCE(sema[i]) == -1, 50)) {
2432 semaphore_set(sema, i);
2433 while (!READ_ONCE(cb.seen))
2436 elapsed[i - 1] = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP);
2440 err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2);
2444 for (i = 0; i < ARRAY_SIZE(elapsed); i++) {
2445 GEM_BUG_ON(sema[i + 1] == -1);
2446 elapsed[i] = elapsed[i] - sema[i + 1];
2449 cycles = trifilter(elapsed);
2450 pr_info("%s: completion latency %d cycles, %lluns\n",
2451 ce->engine->name, cycles >> TF_BIAS,
2452 cycles_to_ns(ce->engine, cycles));
2454 return intel_gt_wait_for_idle(ce->engine->gt, HZ);
2457 intel_gt_set_wedged(ce->engine->gt);
2461 static void rps_pin(struct intel_gt *gt)
2463 /* Pin the frequency to max */
2464 atomic_inc(>->rps.num_waiters);
2465 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
2467 mutex_lock(>->rps.lock);
2468 intel_rps_set(>->rps, gt->rps.max_freq);
2469 mutex_unlock(>->rps.lock);
2472 static void rps_unpin(struct intel_gt *gt)
2474 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
2475 atomic_dec(>->rps.num_waiters);
2478 static int perf_request_latency(void *arg)
2480 struct drm_i915_private *i915 = arg;
2481 struct intel_engine_cs *engine;
2482 struct pm_qos_request qos;
2485 if (INTEL_GEN(i915) < 8) /* per-engine CS timestamp, semaphores */
2488 cpu_latency_qos_add_request(&qos, 0); /* disable cstates */
2490 for_each_uabi_engine(engine, i915) {
2491 struct intel_context *ce;
2493 ce = intel_context_create(engine);
2499 err = intel_context_pin(ce);
2501 intel_context_put(ce);
2505 st_engine_heartbeat_disable(engine);
2506 rps_pin(engine->gt);
2509 err = measure_semaphore_response(ce);
2511 err = measure_idle_dispatch(ce);
2513 err = measure_busy_dispatch(ce);
2515 err = measure_inter_request(ce);
2517 err = measure_context_switch(ce);
2519 err = measure_preemption(ce);
2521 err = measure_completion(ce);
2523 rps_unpin(engine->gt);
2524 st_engine_heartbeat_enable(engine);
2526 intel_context_unpin(ce);
2527 intel_context_put(ce);
2533 if (igt_flush_test(i915))
2536 cpu_latency_qos_remove_request(&qos);
2540 static int s_sync0(void *arg)
2542 struct perf_series *ps = arg;
2543 IGT_TIMEOUT(end_time);
2544 unsigned int idx = 0;
2547 GEM_BUG_ON(!ps->nengines);
2549 struct i915_request *rq;
2551 rq = i915_request_create(ps->ce[idx]);
2557 i915_request_get(rq);
2558 i915_request_add(rq);
2560 if (i915_request_wait(rq, 0, HZ / 5) < 0)
2562 i915_request_put(rq);
2566 if (++idx == ps->nengines)
2568 } while (!__igt_timeout(end_time, NULL));
2573 static int s_sync1(void *arg)
2575 struct perf_series *ps = arg;
2576 struct i915_request *prev = NULL;
2577 IGT_TIMEOUT(end_time);
2578 unsigned int idx = 0;
2581 GEM_BUG_ON(!ps->nengines);
2583 struct i915_request *rq;
2585 rq = i915_request_create(ps->ce[idx]);
2591 i915_request_get(rq);
2592 i915_request_add(rq);
2594 if (prev && i915_request_wait(prev, 0, HZ / 5) < 0)
2596 i915_request_put(prev);
2601 if (++idx == ps->nengines)
2603 } while (!__igt_timeout(end_time, NULL));
2604 i915_request_put(prev);
2609 static int s_many(void *arg)
2611 struct perf_series *ps = arg;
2612 IGT_TIMEOUT(end_time);
2613 unsigned int idx = 0;
2615 GEM_BUG_ON(!ps->nengines);
2617 struct i915_request *rq;
2619 rq = i915_request_create(ps->ce[idx]);
2623 i915_request_add(rq);
2625 if (++idx == ps->nengines)
2627 } while (!__igt_timeout(end_time, NULL));
2632 static int perf_series_engines(void *arg)
2634 struct drm_i915_private *i915 = arg;
2635 static int (* const func[])(void *arg) = {
2641 const unsigned int nengines = num_uabi_engines(i915);
2642 struct intel_engine_cs *engine;
2643 int (* const *fn)(void *arg);
2644 struct pm_qos_request qos;
2645 struct perf_stats *stats;
2646 struct perf_series *ps;
2650 stats = kcalloc(nengines, sizeof(*stats), GFP_KERNEL);
2654 ps = kzalloc(struct_size(ps, ce, nengines), GFP_KERNEL);
2660 cpu_latency_qos_add_request(&qos, 0); /* disable cstates */
2663 ps->nengines = nengines;
2666 for_each_uabi_engine(engine, i915) {
2667 struct intel_context *ce;
2669 ce = intel_context_create(engine);
2675 err = intel_context_pin(ce);
2677 intel_context_put(ce);
2683 GEM_BUG_ON(idx != ps->nengines);
2685 for (fn = func; *fn && !err; fn++) {
2686 char name[KSYM_NAME_LEN];
2687 struct igt_live_test t;
2689 snprintf(name, sizeof(name), "%ps", *fn);
2690 err = igt_live_test_begin(&t, i915, __func__, name);
2694 for (idx = 0; idx < nengines; idx++) {
2695 struct perf_stats *p =
2696 memset(&stats[idx], 0, sizeof(stats[idx]));
2697 struct intel_context *ce = ps->ce[idx];
2699 p->engine = ps->ce[idx]->engine;
2700 intel_engine_pm_get(p->engine);
2702 if (intel_engine_supports_stats(p->engine))
2703 p->busy = intel_engine_get_busy_time(p->engine,
2706 p->time = ktime_get();
2707 p->runtime = -intel_context_get_total_runtime_ns(ce);
2711 if (igt_live_test_end(&t))
2714 for (idx = 0; idx < nengines; idx++) {
2715 struct perf_stats *p = &stats[idx];
2716 struct intel_context *ce = ps->ce[idx];
2717 int integer, decimal;
2721 p->busy = ktime_sub(intel_engine_get_busy_time(p->engine,
2726 p->time = ktime_sub(now, p->time);
2728 err = switch_to_kernel_sync(ce, err);
2729 p->runtime += intel_context_get_total_runtime_ns(ce);
2730 intel_engine_pm_put(p->engine);
2732 busy = 100 * ktime_to_ns(p->busy);
2733 dt = ktime_to_ns(p->time);
2735 integer = div64_u64(busy, dt);
2736 busy -= integer * dt;
2737 decimal = div64_u64(100 * busy, dt);
2743 pr_info("%s %5s: { seqno:%d, busy:%d.%02d%%, runtime:%lldms, walltime:%lldms }\n",
2744 name, p->engine->name, ce->timeline->seqno,
2746 div_u64(p->runtime, 1000 * 1000),
2747 div_u64(ktime_to_ns(p->time), 1000 * 1000));
2752 for (idx = 0; idx < nengines; idx++) {
2753 if (IS_ERR_OR_NULL(ps->ce[idx]))
2756 intel_context_unpin(ps->ce[idx]);
2757 intel_context_put(ps->ce[idx]);
2761 cpu_latency_qos_remove_request(&qos);
2766 static int p_sync0(void *arg)
2768 struct perf_stats *p = arg;
2769 struct intel_engine_cs *engine = p->engine;
2770 struct intel_context *ce;
2771 IGT_TIMEOUT(end_time);
2772 unsigned long count;
2776 ce = intel_context_create(engine);
2780 err = intel_context_pin(ce);
2782 intel_context_put(ce);
2786 if (intel_engine_supports_stats(engine)) {
2787 p->busy = intel_engine_get_busy_time(engine, &p->time);
2790 p->time = ktime_get();
2796 struct i915_request *rq;
2798 rq = i915_request_create(ce);
2804 i915_request_get(rq);
2805 i915_request_add(rq);
2808 if (i915_request_wait(rq, 0, HZ / 5) < 0)
2810 i915_request_put(rq);
2815 } while (!__igt_timeout(end_time, NULL));
2820 p->busy = ktime_sub(intel_engine_get_busy_time(engine, &now),
2822 p->time = ktime_sub(now, p->time);
2824 p->time = ktime_sub(ktime_get(), p->time);
2827 err = switch_to_kernel_sync(ce, err);
2828 p->runtime = intel_context_get_total_runtime_ns(ce);
2831 intel_context_unpin(ce);
2832 intel_context_put(ce);
2836 static int p_sync1(void *arg)
2838 struct perf_stats *p = arg;
2839 struct intel_engine_cs *engine = p->engine;
2840 struct i915_request *prev = NULL;
2841 struct intel_context *ce;
2842 IGT_TIMEOUT(end_time);
2843 unsigned long count;
2847 ce = intel_context_create(engine);
2851 err = intel_context_pin(ce);
2853 intel_context_put(ce);
2857 if (intel_engine_supports_stats(engine)) {
2858 p->busy = intel_engine_get_busy_time(engine, &p->time);
2861 p->time = ktime_get();
2867 struct i915_request *rq;
2869 rq = i915_request_create(ce);
2875 i915_request_get(rq);
2876 i915_request_add(rq);
2879 if (prev && i915_request_wait(prev, 0, HZ / 5) < 0)
2881 i915_request_put(prev);
2887 } while (!__igt_timeout(end_time, NULL));
2888 i915_request_put(prev);
2893 p->busy = ktime_sub(intel_engine_get_busy_time(engine, &now),
2895 p->time = ktime_sub(now, p->time);
2897 p->time = ktime_sub(ktime_get(), p->time);
2900 err = switch_to_kernel_sync(ce, err);
2901 p->runtime = intel_context_get_total_runtime_ns(ce);
2904 intel_context_unpin(ce);
2905 intel_context_put(ce);
2909 static int p_many(void *arg)
2911 struct perf_stats *p = arg;
2912 struct intel_engine_cs *engine = p->engine;
2913 struct intel_context *ce;
2914 IGT_TIMEOUT(end_time);
2915 unsigned long count;
2919 ce = intel_context_create(engine);
2923 err = intel_context_pin(ce);
2925 intel_context_put(ce);
2929 if (intel_engine_supports_stats(engine)) {
2930 p->busy = intel_engine_get_busy_time(engine, &p->time);
2933 p->time = ktime_get();
2939 struct i915_request *rq;
2941 rq = i915_request_create(ce);
2947 i915_request_add(rq);
2949 } while (!__igt_timeout(end_time, NULL));
2954 p->busy = ktime_sub(intel_engine_get_busy_time(engine, &now),
2956 p->time = ktime_sub(now, p->time);
2958 p->time = ktime_sub(ktime_get(), p->time);
2961 err = switch_to_kernel_sync(ce, err);
2962 p->runtime = intel_context_get_total_runtime_ns(ce);
2965 intel_context_unpin(ce);
2966 intel_context_put(ce);
2970 static int perf_parallel_engines(void *arg)
2972 struct drm_i915_private *i915 = arg;
2973 static int (* const func[])(void *arg) = {
2979 const unsigned int nengines = num_uabi_engines(i915);
2980 struct intel_engine_cs *engine;
2981 int (* const *fn)(void *arg);
2982 struct pm_qos_request qos;
2984 struct perf_stats p;
2985 struct task_struct *tsk;
2989 engines = kcalloc(nengines, sizeof(*engines), GFP_KERNEL);
2993 cpu_latency_qos_add_request(&qos, 0);
2995 for (fn = func; *fn; fn++) {
2996 char name[KSYM_NAME_LEN];
2997 struct igt_live_test t;
3000 snprintf(name, sizeof(name), "%ps", *fn);
3001 err = igt_live_test_begin(&t, i915, __func__, name);
3005 atomic_set(&i915->selftest.counter, nengines);
3008 for_each_uabi_engine(engine, i915) {
3009 intel_engine_pm_get(engine);
3011 memset(&engines[idx].p, 0, sizeof(engines[idx].p));
3012 engines[idx].p.engine = engine;
3014 engines[idx].tsk = kthread_run(*fn, &engines[idx].p,
3015 "igt:%s", engine->name);
3016 if (IS_ERR(engines[idx].tsk)) {
3017 err = PTR_ERR(engines[idx].tsk);
3018 intel_engine_pm_put(engine);
3021 get_task_struct(engines[idx++].tsk);
3024 yield(); /* start all threads before we kthread_stop() */
3027 for_each_uabi_engine(engine, i915) {
3030 if (IS_ERR(engines[idx].tsk))
3033 status = kthread_stop(engines[idx].tsk);
3037 intel_engine_pm_put(engine);
3038 put_task_struct(engines[idx++].tsk);
3041 if (igt_live_test_end(&t))
3047 for_each_uabi_engine(engine, i915) {
3048 struct perf_stats *p = &engines[idx].p;
3049 u64 busy = 100 * ktime_to_ns(p->busy);
3050 u64 dt = ktime_to_ns(p->time);
3051 int integer, decimal;
3054 integer = div64_u64(busy, dt);
3055 busy -= integer * dt;
3056 decimal = div64_u64(100 * busy, dt);
3062 GEM_BUG_ON(engine != p->engine);
3063 pr_info("%s %5s: { count:%lu, busy:%d.%02d%%, runtime:%lldms, walltime:%lldms }\n",
3064 name, engine->name, p->count, integer, decimal,
3065 div_u64(p->runtime, 1000 * 1000),
3066 div_u64(ktime_to_ns(p->time), 1000 * 1000));
3071 cpu_latency_qos_remove_request(&qos);
3076 int i915_request_perf_selftests(struct drm_i915_private *i915)
3078 static const struct i915_subtest tests[] = {
3079 SUBTEST(perf_request_latency),
3080 SUBTEST(perf_series_engines),
3081 SUBTEST(perf_parallel_engines),
3084 if (intel_gt_is_wedged(&i915->gt))
3087 return i915_subtests(tests, i915);