1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright © 2019 Intel Corporation
6 #include "i915_selftest.h"
7 #include "intel_engine_heartbeat.h"
8 #include "intel_engine_pm.h"
11 #include "gem/selftests/mock_context.h"
12 #include "selftests/igt_flush_test.h"
13 #include "selftests/mock_drm.h"
15 static int request_sync(struct i915_request *rq)
17 struct intel_timeline *tl = i915_request_timeline(rq);
21 intel_timeline_get(tl);
24 /* Opencode i915_request_add() so we can keep the timeline locked. */
25 __i915_request_commit(rq);
26 rq->sched.attr.priority = I915_PRIORITY_BARRIER;
27 __i915_request_queue_bh(rq);
29 timeout = i915_request_wait(rq, 0, HZ / 10);
33 i915_request_retire_upto(rq);
35 lockdep_unpin_lock(&tl->mutex, rq->cookie);
36 mutex_unlock(&tl->mutex);
39 intel_timeline_put(tl);
44 static int context_sync(struct intel_context *ce)
46 struct intel_timeline *tl = ce->timeline;
49 mutex_lock(&tl->mutex);
51 struct i915_request *rq;
54 if (list_empty(&tl->requests))
57 rq = list_last_entry(&tl->requests, typeof(*rq), link);
60 timeout = i915_request_wait(rq, 0, HZ / 10);
64 i915_request_retire_upto(rq);
68 mutex_unlock(&tl->mutex);
70 /* Wait for all barriers to complete (remote CPU) before we check */
71 i915_active_unlock_wait(&ce->active);
75 static int __live_context_size(struct intel_engine_cs *engine)
77 struct intel_context *ce;
78 struct i915_request *rq;
82 ce = intel_context_create(engine);
86 err = intel_context_pin(ce);
90 vaddr = i915_gem_object_pin_map_unlocked(ce->state->obj,
91 i915_coherent_map_type(engine->i915,
92 ce->state->obj, false));
95 intel_context_unpin(ce);
100 * Note that execlists also applies a redzone which it checks on
101 * context unpin when debugging. We are using the same location
102 * and same poison value so that our checks overlap. Despite the
103 * redundancy, we want to keep this little selftest so that we
104 * get coverage of any and all submission backends, and we can
105 * always extend this test to ensure we trick the HW into a
106 * compromising position wrt to the various sections that need
107 * to be written into the context state.
109 * TLDR; this overlaps with the execlists redzone.
111 vaddr += engine->context_size - I915_GTT_PAGE_SIZE;
112 memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE);
114 rq = intel_context_create_request(ce);
115 intel_context_unpin(ce);
121 err = request_sync(rq);
125 /* Force the context switch */
126 rq = intel_engine_create_kernel_request(engine);
131 err = request_sync(rq);
135 if (memchr_inv(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE)) {
136 pr_err("%s context overwrote trailing red-zone!", engine->name);
141 i915_gem_object_unpin_map(ce->state->obj);
143 intel_context_put(ce);
147 static int live_context_size(void *arg)
149 struct intel_gt *gt = arg;
150 struct intel_engine_cs *engine;
151 enum intel_engine_id id;
155 * Check that our context sizes are correct by seeing if the
156 * HW tries to write past the end of one.
159 for_each_engine(engine, gt, id) {
162 if (!engine->context_size)
165 intel_engine_pm_get(engine);
168 * Hide the old default state -- we lie about the context size
169 * and get confused when the default state is smaller than
170 * expected. For our do nothing request, inheriting the
171 * active state is sufficient, we are only checking that we
172 * don't use more than we planned.
174 saved = fetch_and_zero(&engine->default_state);
176 /* Overlaps with the execlists redzone */
177 engine->context_size += I915_GTT_PAGE_SIZE;
179 err = __live_context_size(engine);
181 engine->context_size -= I915_GTT_PAGE_SIZE;
183 engine->default_state = saved;
185 intel_engine_pm_put(engine);
194 static int __live_active_context(struct intel_engine_cs *engine)
196 unsigned long saved_heartbeat;
197 struct intel_context *ce;
202 * We keep active contexts alive until after a subsequent context
203 * switch as the final write from the context-save will be after
204 * we retire the final request. We track when we unpin the context,
205 * under the presumption that the final pin is from the last request,
206 * and instead of immediately unpinning the context, we add a task
207 * to unpin the context from the next idle-barrier.
209 * This test makes sure that the context is kept alive until a
210 * subsequent idle-barrier (emitted when the engine wakeref hits 0
211 * with no more outstanding requests).
213 * In GuC submission mode we don't use idle barriers and we instead
214 * get a message from the GuC to signal that it is safe to unpin the
215 * context from memory.
217 if (intel_engine_uses_guc(engine))
220 if (intel_engine_pm_is_awake(engine)) {
221 pr_err("%s is awake before starting %s!\n",
222 engine->name, __func__);
226 ce = intel_context_create(engine);
230 saved_heartbeat = engine->props.heartbeat_interval_ms;
231 engine->props.heartbeat_interval_ms = 0;
233 for (pass = 0; pass <= 2; pass++) {
234 struct i915_request *rq;
236 intel_engine_pm_get(engine);
238 rq = intel_context_create_request(ce);
244 err = request_sync(rq);
248 /* Context will be kept active until after an idle-barrier. */
249 if (i915_active_is_idle(&ce->active)) {
250 pr_err("context is not active; expected idle-barrier (%s pass %d)\n",
256 if (!intel_engine_pm_is_awake(engine)) {
257 pr_err("%s is asleep before idle-barrier\n",
264 intel_engine_pm_put(engine);
269 /* Now make sure our idle-barriers are flushed */
270 err = intel_engine_flush_barriers(engine);
274 /* Wait for the barrier and in the process wait for engine to park */
275 err = context_sync(engine->kernel_context);
279 if (!i915_active_is_idle(&ce->active)) {
280 pr_err("context is still active!");
284 intel_engine_pm_flush(engine);
286 if (intel_engine_pm_is_awake(engine)) {
287 struct drm_printer p = drm_debug_printer(__func__);
289 intel_engine_dump(engine, &p,
290 "%s is still awake:%d after idle-barriers\n",
292 atomic_read(&engine->wakeref.count));
300 engine->props.heartbeat_interval_ms = saved_heartbeat;
301 intel_context_put(ce);
305 static int live_active_context(void *arg)
307 struct intel_gt *gt = arg;
308 struct intel_engine_cs *engine;
309 enum intel_engine_id id;
312 for_each_engine(engine, gt, id) {
313 err = __live_active_context(engine);
317 err = igt_flush_test(gt->i915);
325 static int __remote_sync(struct intel_context *ce, struct intel_context *remote)
327 struct i915_request *rq;
330 err = intel_context_pin(remote);
334 rq = intel_context_create_request(ce);
340 err = intel_context_prepare_remote_request(remote, rq);
342 i915_request_add(rq);
346 err = request_sync(rq);
349 intel_context_unpin(remote);
353 static int __live_remote_context(struct intel_engine_cs *engine)
355 struct intel_context *local, *remote;
356 unsigned long saved_heartbeat;
361 * Check that our idle barriers do not interfere with normal
362 * activity tracking. In particular, check that operating
363 * on the context image remotely (intel_context_prepare_remote_request),
364 * which inserts foreign fences into intel_context.active, does not
365 * clobber the idle-barrier.
367 * In GuC submission mode we don't use idle barriers.
369 if (intel_engine_uses_guc(engine))
372 if (intel_engine_pm_is_awake(engine)) {
373 pr_err("%s is awake before starting %s!\n",
374 engine->name, __func__);
378 remote = intel_context_create(engine);
380 return PTR_ERR(remote);
382 local = intel_context_create(engine);
384 err = PTR_ERR(local);
388 saved_heartbeat = engine->props.heartbeat_interval_ms;
389 engine->props.heartbeat_interval_ms = 0;
390 intel_engine_pm_get(engine);
392 for (pass = 0; pass <= 2; pass++) {
393 err = __remote_sync(local, remote);
397 err = __remote_sync(engine->kernel_context, remote);
401 if (i915_active_is_idle(&remote->active)) {
402 pr_err("remote context is not active; expected idle-barrier (%s pass %d)\n",
409 intel_engine_pm_put(engine);
410 engine->props.heartbeat_interval_ms = saved_heartbeat;
412 intel_context_put(local);
414 intel_context_put(remote);
418 static int live_remote_context(void *arg)
420 struct intel_gt *gt = arg;
421 struct intel_engine_cs *engine;
422 enum intel_engine_id id;
425 for_each_engine(engine, gt, id) {
426 err = __live_remote_context(engine);
430 err = igt_flush_test(gt->i915);
438 int intel_context_live_selftests(struct drm_i915_private *i915)
440 static const struct i915_subtest tests[] = {
441 SUBTEST(live_context_size),
442 SUBTEST(live_active_context),
443 SUBTEST(live_remote_context),
445 struct intel_gt *gt = &i915->gt;
447 if (intel_gt_is_wedged(gt))
450 return intel_gt_live_subtests(tests, gt);