Merge branch 'drm-next-4.20' of git://people.freedesktop.org/~agd5f/linux into drm...
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / selftests / intel_lrc.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6
7 #include "../i915_selftest.h"
8 #include "igt_flush_test.h"
9
10 #include "mock_context.h"
11
12 struct spinner {
13         struct drm_i915_private *i915;
14         struct drm_i915_gem_object *hws;
15         struct drm_i915_gem_object *obj;
16         u32 *batch;
17         void *seqno;
18 };
19
20 static int spinner_init(struct spinner *spin, struct drm_i915_private *i915)
21 {
22         unsigned int mode;
23         void *vaddr;
24         int err;
25
26         GEM_BUG_ON(INTEL_GEN(i915) < 8);
27
28         memset(spin, 0, sizeof(*spin));
29         spin->i915 = i915;
30
31         spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
32         if (IS_ERR(spin->hws)) {
33                 err = PTR_ERR(spin->hws);
34                 goto err;
35         }
36
37         spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
38         if (IS_ERR(spin->obj)) {
39                 err = PTR_ERR(spin->obj);
40                 goto err_hws;
41         }
42
43         i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
44         vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
45         if (IS_ERR(vaddr)) {
46                 err = PTR_ERR(vaddr);
47                 goto err_obj;
48         }
49         spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
50
51         mode = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
52         vaddr = i915_gem_object_pin_map(spin->obj, mode);
53         if (IS_ERR(vaddr)) {
54                 err = PTR_ERR(vaddr);
55                 goto err_unpin_hws;
56         }
57         spin->batch = vaddr;
58
59         return 0;
60
61 err_unpin_hws:
62         i915_gem_object_unpin_map(spin->hws);
63 err_obj:
64         i915_gem_object_put(spin->obj);
65 err_hws:
66         i915_gem_object_put(spin->hws);
67 err:
68         return err;
69 }
70
71 static unsigned int seqno_offset(u64 fence)
72 {
73         return offset_in_page(sizeof(u32) * fence);
74 }
75
76 static u64 hws_address(const struct i915_vma *hws,
77                        const struct i915_request *rq)
78 {
79         return hws->node.start + seqno_offset(rq->fence.context);
80 }
81
82 static int emit_recurse_batch(struct spinner *spin,
83                               struct i915_request *rq,
84                               u32 arbitration_command)
85 {
86         struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
87         struct i915_vma *hws, *vma;
88         u32 *batch;
89         int err;
90
91         vma = i915_vma_instance(spin->obj, vm, NULL);
92         if (IS_ERR(vma))
93                 return PTR_ERR(vma);
94
95         hws = i915_vma_instance(spin->hws, vm, NULL);
96         if (IS_ERR(hws))
97                 return PTR_ERR(hws);
98
99         err = i915_vma_pin(vma, 0, 0, PIN_USER);
100         if (err)
101                 return err;
102
103         err = i915_vma_pin(hws, 0, 0, PIN_USER);
104         if (err)
105                 goto unpin_vma;
106
107         err = i915_vma_move_to_active(vma, rq, 0);
108         if (err)
109                 goto unpin_hws;
110
111         if (!i915_gem_object_has_active_reference(vma->obj)) {
112                 i915_gem_object_get(vma->obj);
113                 i915_gem_object_set_active_reference(vma->obj);
114         }
115
116         err = i915_vma_move_to_active(hws, rq, 0);
117         if (err)
118                 goto unpin_hws;
119
120         if (!i915_gem_object_has_active_reference(hws->obj)) {
121                 i915_gem_object_get(hws->obj);
122                 i915_gem_object_set_active_reference(hws->obj);
123         }
124
125         batch = spin->batch;
126
127         *batch++ = MI_STORE_DWORD_IMM_GEN4;
128         *batch++ = lower_32_bits(hws_address(hws, rq));
129         *batch++ = upper_32_bits(hws_address(hws, rq));
130         *batch++ = rq->fence.seqno;
131
132         *batch++ = arbitration_command;
133
134         *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
135         *batch++ = lower_32_bits(vma->node.start);
136         *batch++ = upper_32_bits(vma->node.start);
137         *batch++ = MI_BATCH_BUFFER_END; /* not reached */
138
139         i915_gem_chipset_flush(spin->i915);
140
141         err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
142
143 unpin_hws:
144         i915_vma_unpin(hws);
145 unpin_vma:
146         i915_vma_unpin(vma);
147         return err;
148 }
149
150 static struct i915_request *
151 spinner_create_request(struct spinner *spin,
152                        struct i915_gem_context *ctx,
153                        struct intel_engine_cs *engine,
154                        u32 arbitration_command)
155 {
156         struct i915_request *rq;
157         int err;
158
159         rq = i915_request_alloc(engine, ctx);
160         if (IS_ERR(rq))
161                 return rq;
162
163         err = emit_recurse_batch(spin, rq, arbitration_command);
164         if (err) {
165                 i915_request_add(rq);
166                 return ERR_PTR(err);
167         }
168
169         return rq;
170 }
171
172 static u32 hws_seqno(const struct spinner *spin, const struct i915_request *rq)
173 {
174         u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
175
176         return READ_ONCE(*seqno);
177 }
178
179 static void spinner_end(struct spinner *spin)
180 {
181         *spin->batch = MI_BATCH_BUFFER_END;
182         i915_gem_chipset_flush(spin->i915);
183 }
184
185 static void spinner_fini(struct spinner *spin)
186 {
187         spinner_end(spin);
188
189         i915_gem_object_unpin_map(spin->obj);
190         i915_gem_object_put(spin->obj);
191
192         i915_gem_object_unpin_map(spin->hws);
193         i915_gem_object_put(spin->hws);
194 }
195
196 static bool wait_for_spinner(struct spinner *spin, struct i915_request *rq)
197 {
198         if (!wait_event_timeout(rq->execute,
199                                 READ_ONCE(rq->global_seqno),
200                                 msecs_to_jiffies(10)))
201                 return false;
202
203         return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
204                                                rq->fence.seqno),
205                              10) &&
206                  wait_for(i915_seqno_passed(hws_seqno(spin, rq),
207                                             rq->fence.seqno),
208                           1000));
209 }
210
211 static int live_sanitycheck(void *arg)
212 {
213         struct drm_i915_private *i915 = arg;
214         struct intel_engine_cs *engine;
215         struct i915_gem_context *ctx;
216         enum intel_engine_id id;
217         struct spinner spin;
218         int err = -ENOMEM;
219
220         if (!HAS_LOGICAL_RING_CONTEXTS(i915))
221                 return 0;
222
223         mutex_lock(&i915->drm.struct_mutex);
224
225         if (spinner_init(&spin, i915))
226                 goto err_unlock;
227
228         ctx = kernel_context(i915);
229         if (!ctx)
230                 goto err_spin;
231
232         for_each_engine(engine, i915, id) {
233                 struct i915_request *rq;
234
235                 rq = spinner_create_request(&spin, ctx, engine, MI_NOOP);
236                 if (IS_ERR(rq)) {
237                         err = PTR_ERR(rq);
238                         goto err_ctx;
239                 }
240
241                 i915_request_add(rq);
242                 if (!wait_for_spinner(&spin, rq)) {
243                         GEM_TRACE("spinner failed to start\n");
244                         GEM_TRACE_DUMP();
245                         i915_gem_set_wedged(i915);
246                         err = -EIO;
247                         goto err_ctx;
248                 }
249
250                 spinner_end(&spin);
251                 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
252                         err = -EIO;
253                         goto err_ctx;
254                 }
255         }
256
257         err = 0;
258 err_ctx:
259         kernel_context_close(ctx);
260 err_spin:
261         spinner_fini(&spin);
262 err_unlock:
263         igt_flush_test(i915, I915_WAIT_LOCKED);
264         mutex_unlock(&i915->drm.struct_mutex);
265         return err;
266 }
267
268 static int live_preempt(void *arg)
269 {
270         struct drm_i915_private *i915 = arg;
271         struct i915_gem_context *ctx_hi, *ctx_lo;
272         struct spinner spin_hi, spin_lo;
273         struct intel_engine_cs *engine;
274         enum intel_engine_id id;
275         int err = -ENOMEM;
276
277         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
278                 return 0;
279
280         mutex_lock(&i915->drm.struct_mutex);
281
282         if (spinner_init(&spin_hi, i915))
283                 goto err_unlock;
284
285         if (spinner_init(&spin_lo, i915))
286                 goto err_spin_hi;
287
288         ctx_hi = kernel_context(i915);
289         if (!ctx_hi)
290                 goto err_spin_lo;
291         ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
292
293         ctx_lo = kernel_context(i915);
294         if (!ctx_lo)
295                 goto err_ctx_hi;
296         ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
297
298         for_each_engine(engine, i915, id) {
299                 struct i915_request *rq;
300
301                 rq = spinner_create_request(&spin_lo, ctx_lo, engine,
302                                             MI_ARB_CHECK);
303                 if (IS_ERR(rq)) {
304                         err = PTR_ERR(rq);
305                         goto err_ctx_lo;
306                 }
307
308                 i915_request_add(rq);
309                 if (!wait_for_spinner(&spin_lo, rq)) {
310                         GEM_TRACE("lo spinner failed to start\n");
311                         GEM_TRACE_DUMP();
312                         i915_gem_set_wedged(i915);
313                         err = -EIO;
314                         goto err_ctx_lo;
315                 }
316
317                 rq = spinner_create_request(&spin_hi, ctx_hi, engine,
318                                             MI_ARB_CHECK);
319                 if (IS_ERR(rq)) {
320                         spinner_end(&spin_lo);
321                         err = PTR_ERR(rq);
322                         goto err_ctx_lo;
323                 }
324
325                 i915_request_add(rq);
326                 if (!wait_for_spinner(&spin_hi, rq)) {
327                         GEM_TRACE("hi spinner failed to start\n");
328                         GEM_TRACE_DUMP();
329                         i915_gem_set_wedged(i915);
330                         err = -EIO;
331                         goto err_ctx_lo;
332                 }
333
334                 spinner_end(&spin_hi);
335                 spinner_end(&spin_lo);
336                 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
337                         err = -EIO;
338                         goto err_ctx_lo;
339                 }
340         }
341
342         err = 0;
343 err_ctx_lo:
344         kernel_context_close(ctx_lo);
345 err_ctx_hi:
346         kernel_context_close(ctx_hi);
347 err_spin_lo:
348         spinner_fini(&spin_lo);
349 err_spin_hi:
350         spinner_fini(&spin_hi);
351 err_unlock:
352         igt_flush_test(i915, I915_WAIT_LOCKED);
353         mutex_unlock(&i915->drm.struct_mutex);
354         return err;
355 }
356
357 static int live_late_preempt(void *arg)
358 {
359         struct drm_i915_private *i915 = arg;
360         struct i915_gem_context *ctx_hi, *ctx_lo;
361         struct spinner spin_hi, spin_lo;
362         struct intel_engine_cs *engine;
363         struct i915_sched_attr attr = {};
364         enum intel_engine_id id;
365         int err = -ENOMEM;
366
367         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
368                 return 0;
369
370         mutex_lock(&i915->drm.struct_mutex);
371
372         if (spinner_init(&spin_hi, i915))
373                 goto err_unlock;
374
375         if (spinner_init(&spin_lo, i915))
376                 goto err_spin_hi;
377
378         ctx_hi = kernel_context(i915);
379         if (!ctx_hi)
380                 goto err_spin_lo;
381
382         ctx_lo = kernel_context(i915);
383         if (!ctx_lo)
384                 goto err_ctx_hi;
385
386         for_each_engine(engine, i915, id) {
387                 struct i915_request *rq;
388
389                 rq = spinner_create_request(&spin_lo, ctx_lo, engine,
390                                             MI_ARB_CHECK);
391                 if (IS_ERR(rq)) {
392                         err = PTR_ERR(rq);
393                         goto err_ctx_lo;
394                 }
395
396                 i915_request_add(rq);
397                 if (!wait_for_spinner(&spin_lo, rq)) {
398                         pr_err("First context failed to start\n");
399                         goto err_wedged;
400                 }
401
402                 rq = spinner_create_request(&spin_hi, ctx_hi, engine, MI_NOOP);
403                 if (IS_ERR(rq)) {
404                         spinner_end(&spin_lo);
405                         err = PTR_ERR(rq);
406                         goto err_ctx_lo;
407                 }
408
409                 i915_request_add(rq);
410                 if (wait_for_spinner(&spin_hi, rq)) {
411                         pr_err("Second context overtook first?\n");
412                         goto err_wedged;
413                 }
414
415                 attr.priority = I915_PRIORITY_MAX;
416                 engine->schedule(rq, &attr);
417
418                 if (!wait_for_spinner(&spin_hi, rq)) {
419                         pr_err("High priority context failed to preempt the low priority context\n");
420                         GEM_TRACE_DUMP();
421                         goto err_wedged;
422                 }
423
424                 spinner_end(&spin_hi);
425                 spinner_end(&spin_lo);
426                 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
427                         err = -EIO;
428                         goto err_ctx_lo;
429                 }
430         }
431
432         err = 0;
433 err_ctx_lo:
434         kernel_context_close(ctx_lo);
435 err_ctx_hi:
436         kernel_context_close(ctx_hi);
437 err_spin_lo:
438         spinner_fini(&spin_lo);
439 err_spin_hi:
440         spinner_fini(&spin_hi);
441 err_unlock:
442         igt_flush_test(i915, I915_WAIT_LOCKED);
443         mutex_unlock(&i915->drm.struct_mutex);
444         return err;
445
446 err_wedged:
447         spinner_end(&spin_hi);
448         spinner_end(&spin_lo);
449         i915_gem_set_wedged(i915);
450         err = -EIO;
451         goto err_ctx_lo;
452 }
453
454 static int live_preempt_hang(void *arg)
455 {
456         struct drm_i915_private *i915 = arg;
457         struct i915_gem_context *ctx_hi, *ctx_lo;
458         struct spinner spin_hi, spin_lo;
459         struct intel_engine_cs *engine;
460         enum intel_engine_id id;
461         int err = -ENOMEM;
462
463         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
464                 return 0;
465
466         if (!intel_has_reset_engine(i915))
467                 return 0;
468
469         mutex_lock(&i915->drm.struct_mutex);
470
471         if (spinner_init(&spin_hi, i915))
472                 goto err_unlock;
473
474         if (spinner_init(&spin_lo, i915))
475                 goto err_spin_hi;
476
477         ctx_hi = kernel_context(i915);
478         if (!ctx_hi)
479                 goto err_spin_lo;
480         ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
481
482         ctx_lo = kernel_context(i915);
483         if (!ctx_lo)
484                 goto err_ctx_hi;
485         ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
486
487         for_each_engine(engine, i915, id) {
488                 struct i915_request *rq;
489
490                 if (!intel_engine_has_preemption(engine))
491                         continue;
492
493                 rq = spinner_create_request(&spin_lo, ctx_lo, engine,
494                                             MI_ARB_CHECK);
495                 if (IS_ERR(rq)) {
496                         err = PTR_ERR(rq);
497                         goto err_ctx_lo;
498                 }
499
500                 i915_request_add(rq);
501                 if (!wait_for_spinner(&spin_lo, rq)) {
502                         GEM_TRACE("lo spinner failed to start\n");
503                         GEM_TRACE_DUMP();
504                         i915_gem_set_wedged(i915);
505                         err = -EIO;
506                         goto err_ctx_lo;
507                 }
508
509                 rq = spinner_create_request(&spin_hi, ctx_hi, engine,
510                                             MI_ARB_CHECK);
511                 if (IS_ERR(rq)) {
512                         spinner_end(&spin_lo);
513                         err = PTR_ERR(rq);
514                         goto err_ctx_lo;
515                 }
516
517                 init_completion(&engine->execlists.preempt_hang.completion);
518                 engine->execlists.preempt_hang.inject_hang = true;
519
520                 i915_request_add(rq);
521
522                 if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
523                                                  HZ / 10)) {
524                         pr_err("Preemption did not occur within timeout!");
525                         GEM_TRACE_DUMP();
526                         i915_gem_set_wedged(i915);
527                         err = -EIO;
528                         goto err_ctx_lo;
529                 }
530
531                 set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
532                 i915_reset_engine(engine, NULL);
533                 clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
534
535                 engine->execlists.preempt_hang.inject_hang = false;
536
537                 if (!wait_for_spinner(&spin_hi, rq)) {
538                         GEM_TRACE("hi spinner failed to start\n");
539                         GEM_TRACE_DUMP();
540                         i915_gem_set_wedged(i915);
541                         err = -EIO;
542                         goto err_ctx_lo;
543                 }
544
545                 spinner_end(&spin_hi);
546                 spinner_end(&spin_lo);
547                 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
548                         err = -EIO;
549                         goto err_ctx_lo;
550                 }
551         }
552
553         err = 0;
554 err_ctx_lo:
555         kernel_context_close(ctx_lo);
556 err_ctx_hi:
557         kernel_context_close(ctx_hi);
558 err_spin_lo:
559         spinner_fini(&spin_lo);
560 err_spin_hi:
561         spinner_fini(&spin_hi);
562 err_unlock:
563         igt_flush_test(i915, I915_WAIT_LOCKED);
564         mutex_unlock(&i915->drm.struct_mutex);
565         return err;
566 }
567
568 int intel_execlists_live_selftests(struct drm_i915_private *i915)
569 {
570         static const struct i915_subtest tests[] = {
571                 SUBTEST(live_sanitycheck),
572                 SUBTEST(live_preempt),
573                 SUBTEST(live_late_preempt),
574                 SUBTEST(live_preempt_hang),
575         };
576
577         if (!HAS_EXECLISTS(i915))
578                 return 0;
579
580         if (i915_terminally_wedged(&i915->gpu_error))
581                 return 0;
582
583         return i915_subtests(tests, i915);
584 }