2 * SPDX-License-Identifier: MIT
4 * Copyright © 2017-2018 Intel Corporation
7 #include <linux/prime_numbers.h>
9 #include "intel_context.h"
10 #include "intel_engine_heartbeat.h"
11 #include "intel_engine_pm.h"
13 #include "intel_gt_requests.h"
14 #include "intel_ring.h"
16 #include "../selftests/i915_random.h"
17 #include "../i915_selftest.h"
19 #include "../selftests/igt_flush_test.h"
20 #include "../selftests/mock_gem_device.h"
21 #include "selftests/mock_timeline.h"
23 static struct page *hwsp_page(struct intel_timeline *tl)
25 struct drm_i915_gem_object *obj = tl->hwsp_ggtt->obj;
27 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
28 return sg_page(obj->mm.pages->sgl);
31 static unsigned long hwsp_cacheline(struct intel_timeline *tl)
33 unsigned long address = (unsigned long)page_address(hwsp_page(tl));
35 return (address + tl->hwsp_offset) / CACHELINE_BYTES;
38 #define CACHELINES_PER_PAGE (PAGE_SIZE / CACHELINE_BYTES)
40 struct mock_hwsp_freelist {
42 struct radix_tree_root cachelines;
43 struct intel_timeline **history;
44 unsigned long count, max;
45 struct rnd_state prng;
52 static void __mock_hwsp_record(struct mock_hwsp_freelist *state,
54 struct intel_timeline *tl)
56 tl = xchg(&state->history[idx], tl);
58 radix_tree_delete(&state->cachelines, hwsp_cacheline(tl));
59 intel_timeline_put(tl);
63 static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
67 struct intel_timeline *tl;
71 unsigned long cacheline;
74 tl = intel_timeline_create(state->gt, NULL);
78 cacheline = hwsp_cacheline(tl);
79 err = radix_tree_insert(&state->cachelines, cacheline, tl);
82 pr_err("HWSP cacheline %lu already used; duplicate allocation!\n",
85 intel_timeline_put(tl);
89 idx = state->count++ % state->max;
90 __mock_hwsp_record(state, idx, tl);
94 i915_prandom_shuffle(state->history,
95 sizeof(*state->history),
96 min(state->count, state->max),
99 count = i915_prandom_u32_max_state(min(state->count, state->max),
102 idx = --state->count % state->max;
103 __mock_hwsp_record(state, idx, NULL);
109 static int mock_hwsp_freelist(void *arg)
111 struct mock_hwsp_freelist state;
112 struct drm_i915_private *i915;
118 { "shuffled", SHUFFLE },
124 i915 = mock_gem_device();
128 INIT_RADIX_TREE(&state.cachelines, GFP_KERNEL);
129 state.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed);
131 state.gt = &i915->gt;
134 * Create a bunch of timelines and check that their HWSP do not overlap.
135 * Free some, and try again.
138 state.max = PAGE_SIZE / sizeof(*state.history);
140 state.history = kcalloc(state.max, sizeof(*state.history), GFP_KERNEL);
141 if (!state.history) {
146 for (p = phases; p->name; p++) {
147 pr_debug("%s(%s)\n", __func__, p->name);
148 for_each_prime_number_from(na, 1, 2 * CACHELINES_PER_PAGE) {
149 err = __mock_hwsp_timeline(&state, na, p->flags);
156 for (na = 0; na < state.max; na++)
157 __mock_hwsp_record(&state, na, NULL);
158 kfree(state.history);
160 drm_dev_put(&i915->drm);
171 static int __igt_sync(struct intel_timeline *tl,
173 const struct __igt_sync *p,
178 if (__intel_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) {
179 pr_err("%s: %s(ctx=%llu, seqno=%u) expected passed %s but failed\n",
180 name, p->name, ctx, p->seqno, yesno(p->expected));
185 ret = __intel_timeline_sync_set(tl, ctx, p->seqno);
193 static int igt_sync(void *arg)
195 const struct __igt_sync pass[] = {
196 { "unset", 0, false, false },
197 { "new", 0, false, true },
198 { "0a", 0, true, true },
199 { "1a", 1, false, true },
200 { "1b", 1, true, true },
201 { "0b", 0, true, false },
202 { "2a", 2, false, true },
203 { "4", 4, false, true },
204 { "INT_MAX", INT_MAX, false, true },
205 { "INT_MAX-1", INT_MAX-1, true, false },
206 { "INT_MAX+1", (u32)INT_MAX+1, false, true },
207 { "INT_MAX", INT_MAX, true, false },
208 { "UINT_MAX", UINT_MAX, false, true },
209 { "wrap", 0, false, true },
210 { "unwrap", UINT_MAX, true, false },
213 struct intel_timeline tl;
217 mock_timeline_init(&tl, 0);
218 for (p = pass; p->name; p++) {
219 for (order = 1; order < 64; order++) {
220 for (offset = -1; offset <= (order > 1); offset++) {
221 u64 ctx = BIT_ULL(order) + offset;
223 ret = __igt_sync(&tl, ctx, p, "1");
229 mock_timeline_fini(&tl);
231 mock_timeline_init(&tl, 0);
232 for (order = 1; order < 64; order++) {
233 for (offset = -1; offset <= (order > 1); offset++) {
234 u64 ctx = BIT_ULL(order) + offset;
236 for (p = pass; p->name; p++) {
237 ret = __igt_sync(&tl, ctx, p, "2");
245 mock_timeline_fini(&tl);
249 static unsigned int random_engine(struct rnd_state *rnd)
251 return i915_prandom_u32_max_state(I915_NUM_ENGINES, rnd);
254 static int bench_sync(void *arg)
256 struct rnd_state prng;
257 struct intel_timeline tl;
258 unsigned long end_time, count;
261 int order, last_order;
263 mock_timeline_init(&tl, 0);
265 /* Lookups from cache are very fast and so the random number generation
266 * and the loop itself becomes a significant factor in the per-iteration
267 * timings. We try to compensate the results by measuring the overhead
268 * of the prng and subtract it from the reported results.
270 prandom_seed_state(&prng, i915_selftest.random_seed);
273 end_time = jiffies + HZ/10;
277 /* Make sure the compiler doesn't optimise away the prng call */
278 WRITE_ONCE(x, prandom_u32_state(&prng));
281 } while (!time_after(jiffies, end_time));
282 kt = ktime_sub(ktime_get(), kt);
283 pr_debug("%s: %lu random evaluations, %lluns/prng\n",
284 __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
285 prng32_1M = div64_ul(ktime_to_ns(kt) << 20, count);
287 /* Benchmark (only) setting random context ids */
288 prandom_seed_state(&prng, i915_selftest.random_seed);
291 end_time = jiffies + HZ/10;
293 u64 id = i915_prandom_u64_state(&prng);
295 __intel_timeline_sync_set(&tl, id, 0);
297 } while (!time_after(jiffies, end_time));
298 kt = ktime_sub(ktime_get(), kt);
299 kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
300 pr_info("%s: %lu random insertions, %lluns/insert\n",
301 __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
303 /* Benchmark looking up the exact same context ids as we just set */
304 prandom_seed_state(&prng, i915_selftest.random_seed);
308 u64 id = i915_prandom_u64_state(&prng);
310 if (!__intel_timeline_sync_is_later(&tl, id, 0)) {
311 mock_timeline_fini(&tl);
312 pr_err("Lookup of %llu failed\n", id);
316 kt = ktime_sub(ktime_get(), kt);
317 kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
318 pr_info("%s: %lu random lookups, %lluns/lookup\n",
319 __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
321 mock_timeline_fini(&tl);
324 mock_timeline_init(&tl, 0);
326 /* Benchmark setting the first N (in order) contexts */
329 end_time = jiffies + HZ/10;
331 __intel_timeline_sync_set(&tl, count++, 0);
332 } while (!time_after(jiffies, end_time));
333 kt = ktime_sub(ktime_get(), kt);
334 pr_info("%s: %lu in-order insertions, %lluns/insert\n",
335 __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
337 /* Benchmark looking up the exact same context ids as we just set */
341 if (!__intel_timeline_sync_is_later(&tl, end_time, 0)) {
342 pr_err("Lookup of %lu failed\n", end_time);
343 mock_timeline_fini(&tl);
347 kt = ktime_sub(ktime_get(), kt);
348 pr_info("%s: %lu in-order lookups, %lluns/lookup\n",
349 __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
351 mock_timeline_fini(&tl);
354 mock_timeline_init(&tl, 0);
356 /* Benchmark searching for a random context id and maybe changing it */
357 prandom_seed_state(&prng, i915_selftest.random_seed);
360 end_time = jiffies + HZ/10;
362 u32 id = random_engine(&prng);
363 u32 seqno = prandom_u32_state(&prng);
365 if (!__intel_timeline_sync_is_later(&tl, id, seqno))
366 __intel_timeline_sync_set(&tl, id, seqno);
369 } while (!time_after(jiffies, end_time));
370 kt = ktime_sub(ktime_get(), kt);
371 kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
372 pr_info("%s: %lu repeated insert/lookups, %lluns/op\n",
373 __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
374 mock_timeline_fini(&tl);
377 /* Benchmark searching for a known context id and changing the seqno */
378 for (last_order = 1, order = 1; order < 32;
379 ({ int tmp = last_order; last_order = order; order += tmp; })) {
380 unsigned int mask = BIT(order) - 1;
382 mock_timeline_init(&tl, 0);
386 end_time = jiffies + HZ/10;
388 /* Without assuming too many details of the underlying
389 * implementation, try to identify its phase-changes
392 u64 id = (u64)(count & mask) << order;
394 __intel_timeline_sync_is_later(&tl, id, 0);
395 __intel_timeline_sync_set(&tl, id, 0);
398 } while (!time_after(jiffies, end_time));
399 kt = ktime_sub(ktime_get(), kt);
400 pr_info("%s: %lu cyclic/%d insert/lookups, %lluns/op\n",
401 __func__, count, order,
402 (long long)div64_ul(ktime_to_ns(kt), count));
403 mock_timeline_fini(&tl);
410 int intel_timeline_mock_selftests(void)
412 static const struct i915_subtest tests[] = {
413 SUBTEST(mock_hwsp_freelist),
418 return i915_subtests(tests, NULL);
421 static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value)
425 cs = intel_ring_begin(rq, 4);
429 if (INTEL_GEN(rq->i915) >= 8) {
430 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
434 } else if (INTEL_GEN(rq->i915) >= 4) {
435 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
440 *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
446 intel_ring_advance(rq, cs);
451 static struct i915_request *
452 tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value)
454 struct i915_request *rq;
457 err = intel_timeline_pin(tl);
463 rq = intel_engine_create_kernel_request(engine);
467 i915_request_get(rq);
469 err = emit_ggtt_store_dw(rq, tl->hwsp_offset, value);
470 i915_request_add(rq);
472 i915_request_put(rq);
477 intel_timeline_unpin(tl);
480 pr_err("Failed to write to timeline!\n");
484 static struct intel_timeline *
485 checked_intel_timeline_create(struct intel_gt *gt)
487 struct intel_timeline *tl;
489 tl = intel_timeline_create(gt, NULL);
493 if (*tl->hwsp_seqno != tl->seqno) {
494 pr_err("Timeline created with incorrect breadcrumb, found %x, expected %x\n",
495 *tl->hwsp_seqno, tl->seqno);
496 intel_timeline_put(tl);
497 return ERR_PTR(-EINVAL);
503 static int live_hwsp_engine(void *arg)
505 #define NUM_TIMELINES 4096
506 struct intel_gt *gt = arg;
507 struct intel_timeline **timelines;
508 struct intel_engine_cs *engine;
509 enum intel_engine_id id;
510 unsigned long count, n;
514 * Create a bunch of timelines and check we can write
515 * independently to each of their breadcrumb slots.
518 timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES,
525 for_each_engine(engine, gt, id) {
526 if (!intel_engine_can_store_dword(engine))
529 intel_engine_pm_get(engine);
531 for (n = 0; n < NUM_TIMELINES; n++) {
532 struct intel_timeline *tl;
533 struct i915_request *rq;
535 tl = checked_intel_timeline_create(gt);
541 rq = tl_write(tl, engine, count);
543 intel_timeline_put(tl);
548 timelines[count++] = tl;
549 i915_request_put(rq);
552 intel_engine_pm_put(engine);
557 if (igt_flush_test(gt->i915))
560 for (n = 0; n < count; n++) {
561 struct intel_timeline *tl = timelines[n];
563 if (!err && *tl->hwsp_seqno != n) {
564 pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
568 intel_timeline_put(tl);
576 static int live_hwsp_alternate(void *arg)
578 #define NUM_TIMELINES 4096
579 struct intel_gt *gt = arg;
580 struct intel_timeline **timelines;
581 struct intel_engine_cs *engine;
582 enum intel_engine_id id;
583 unsigned long count, n;
587 * Create a bunch of timelines and check we can write
588 * independently to each of their breadcrumb slots with adjacent
592 timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES,
599 for (n = 0; n < NUM_TIMELINES; n++) {
600 for_each_engine(engine, gt, id) {
601 struct intel_timeline *tl;
602 struct i915_request *rq;
604 if (!intel_engine_can_store_dword(engine))
607 tl = checked_intel_timeline_create(gt);
613 intel_engine_pm_get(engine);
614 rq = tl_write(tl, engine, count);
615 intel_engine_pm_put(engine);
617 intel_timeline_put(tl);
622 timelines[count++] = tl;
623 i915_request_put(rq);
628 if (igt_flush_test(gt->i915))
631 for (n = 0; n < count; n++) {
632 struct intel_timeline *tl = timelines[n];
634 if (!err && *tl->hwsp_seqno != n) {
635 pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
639 intel_timeline_put(tl);
647 static int live_hwsp_wrap(void *arg)
649 struct intel_gt *gt = arg;
650 struct intel_engine_cs *engine;
651 struct intel_timeline *tl;
652 enum intel_engine_id id;
656 * Across a seqno wrap, we need to keep the old cacheline alive for
657 * foreign GPU references.
660 tl = intel_timeline_create(gt, NULL);
664 if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
667 err = intel_timeline_pin(tl);
671 for_each_engine(engine, gt, id) {
672 const u32 *hwsp_seqno[2];
673 struct i915_request *rq;
676 if (!intel_engine_can_store_dword(engine))
679 rq = intel_engine_create_kernel_request(engine);
687 mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
688 err = intel_timeline_get_seqno(tl, rq, &seqno[0]);
689 mutex_unlock(&tl->mutex);
691 i915_request_add(rq);
694 pr_debug("seqno[0]:%08x, hwsp_offset:%08x\n",
695 seqno[0], tl->hwsp_offset);
697 err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[0]);
699 i915_request_add(rq);
702 hwsp_seqno[0] = tl->hwsp_seqno;
704 mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
705 err = intel_timeline_get_seqno(tl, rq, &seqno[1]);
706 mutex_unlock(&tl->mutex);
708 i915_request_add(rq);
711 pr_debug("seqno[1]:%08x, hwsp_offset:%08x\n",
712 seqno[1], tl->hwsp_offset);
714 err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[1]);
716 i915_request_add(rq);
719 hwsp_seqno[1] = tl->hwsp_seqno;
721 /* With wrap should come a new hwsp */
722 GEM_BUG_ON(seqno[1] >= seqno[0]);
723 GEM_BUG_ON(hwsp_seqno[0] == hwsp_seqno[1]);
725 i915_request_add(rq);
727 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
728 pr_err("Wait for timeline writes timed out!\n");
733 if (*hwsp_seqno[0] != seqno[0] || *hwsp_seqno[1] != seqno[1]) {
734 pr_err("Bad timeline values: found (%x, %x), expected (%x, %x)\n",
735 *hwsp_seqno[0], *hwsp_seqno[1],
741 intel_gt_retire_requests(gt); /* recycle HWSP */
745 if (igt_flush_test(gt->i915))
748 intel_timeline_unpin(tl);
750 intel_timeline_put(tl);
754 static void engine_heartbeat_disable(struct intel_engine_cs *engine,
755 unsigned long *saved)
757 *saved = engine->props.heartbeat_interval_ms;
758 engine->props.heartbeat_interval_ms = 0;
760 intel_engine_pm_get(engine);
761 intel_engine_park_heartbeat(engine);
764 static void engine_heartbeat_enable(struct intel_engine_cs *engine,
767 intel_engine_pm_put(engine);
769 engine->props.heartbeat_interval_ms = saved;
772 static int live_hwsp_rollover_kernel(void *arg)
774 struct intel_gt *gt = arg;
775 struct intel_engine_cs *engine;
776 enum intel_engine_id id;
780 * Run the host for long enough, and even the kernel context will
781 * see a seqno rollover.
784 for_each_engine(engine, gt, id) {
785 struct intel_context *ce = engine->kernel_context;
786 struct intel_timeline *tl = ce->timeline;
787 struct i915_request *rq[3] = {};
788 unsigned long heartbeat;
791 engine_heartbeat_disable(engine, &heartbeat);
792 if (intel_gt_wait_for_idle(gt, HZ / 2)) {
797 GEM_BUG_ON(i915_active_fence_isset(&tl->last_request));
799 timeline_rollback(tl);
800 timeline_rollback(tl);
801 WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
803 for (i = 0; i < ARRAY_SIZE(rq); i++) {
804 struct i915_request *this;
806 this = i915_request_create(ce);
812 pr_debug("%s: create fence.seqnp:%d\n",
814 lower_32_bits(this->fence.seqno));
816 GEM_BUG_ON(rcu_access_pointer(this->timeline) != tl);
818 rq[i] = i915_request_get(this);
819 i915_request_add(this);
822 /* We expected a wrap! */
823 GEM_BUG_ON(rq[2]->fence.seqno > rq[0]->fence.seqno);
825 if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
826 pr_err("Wait for timeline wrap timed out!\n");
831 for (i = 0; i < ARRAY_SIZE(rq); i++) {
832 if (!i915_request_completed(rq[i])) {
833 pr_err("Pre-wrap request not completed!\n");
840 for (i = 0; i < ARRAY_SIZE(rq); i++)
841 i915_request_put(rq[i]);
842 engine_heartbeat_enable(engine, heartbeat);
847 if (igt_flush_test(gt->i915))
853 static int live_hwsp_rollover_user(void *arg)
855 struct intel_gt *gt = arg;
856 struct intel_engine_cs *engine;
857 enum intel_engine_id id;
861 * Simulate a long running user context, and force the seqno wrap
862 * on the user's timeline.
865 for_each_engine(engine, gt, id) {
866 struct i915_request *rq[3] = {};
867 struct intel_timeline *tl;
868 struct intel_context *ce;
871 ce = intel_context_create(engine);
875 err = intel_context_alloc_state(ce);
880 if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
883 timeline_rollback(tl);
884 timeline_rollback(tl);
885 WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
887 for (i = 0; i < ARRAY_SIZE(rq); i++) {
888 struct i915_request *this;
890 this = intel_context_create_request(ce);
896 pr_debug("%s: create fence.seqnp:%d\n",
898 lower_32_bits(this->fence.seqno));
900 GEM_BUG_ON(rcu_access_pointer(this->timeline) != tl);
902 rq[i] = i915_request_get(this);
903 i915_request_add(this);
906 /* We expected a wrap! */
907 GEM_BUG_ON(rq[2]->fence.seqno > rq[0]->fence.seqno);
909 if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
910 pr_err("Wait for timeline wrap timed out!\n");
915 for (i = 0; i < ARRAY_SIZE(rq); i++) {
916 if (!i915_request_completed(rq[i])) {
917 pr_err("Pre-wrap request not completed!\n");
924 for (i = 0; i < ARRAY_SIZE(rq); i++)
925 i915_request_put(rq[i]);
926 intel_context_put(ce);
931 if (igt_flush_test(gt->i915))
937 static int live_hwsp_recycle(void *arg)
939 struct intel_gt *gt = arg;
940 struct intel_engine_cs *engine;
941 enum intel_engine_id id;
946 * Check seqno writes into one timeline at a time. We expect to
947 * recycle the breadcrumb slot between iterations and neither
948 * want to confuse ourselves or the GPU.
952 for_each_engine(engine, gt, id) {
953 IGT_TIMEOUT(end_time);
955 if (!intel_engine_can_store_dword(engine))
958 intel_engine_pm_get(engine);
961 struct intel_timeline *tl;
962 struct i915_request *rq;
964 tl = checked_intel_timeline_create(gt);
970 rq = tl_write(tl, engine, count);
972 intel_timeline_put(tl);
977 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
978 pr_err("Wait for timeline writes timed out!\n");
979 i915_request_put(rq);
980 intel_timeline_put(tl);
985 if (*tl->hwsp_seqno != count) {
986 pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
987 count, *tl->hwsp_seqno);
991 i915_request_put(rq);
992 intel_timeline_put(tl);
997 } while (!__igt_timeout(end_time, NULL));
999 intel_engine_pm_put(engine);
1007 int intel_timeline_live_selftests(struct drm_i915_private *i915)
1009 static const struct i915_subtest tests[] = {
1010 SUBTEST(live_hwsp_recycle),
1011 SUBTEST(live_hwsp_engine),
1012 SUBTEST(live_hwsp_alternate),
1013 SUBTEST(live_hwsp_wrap),
1014 SUBTEST(live_hwsp_rollover_kernel),
1015 SUBTEST(live_hwsp_rollover_user),
1018 if (intel_gt_is_wedged(&i915->gt))
1021 return intel_gt_live_subtests(tests, &i915->gt);