Merge tag 'perf-tools-for-v5.12-2020-02-19' of git://git.kernel.org/pub/scm/linux...
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gt / selftest_workarounds.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6
7 #include "gem/i915_gem_pm.h"
8 #include "gt/intel_engine_user.h"
9 #include "gt/intel_gt.h"
10 #include "i915_selftest.h"
11 #include "intel_reset.h"
12
13 #include "selftests/igt_flush_test.h"
14 #include "selftests/igt_reset.h"
15 #include "selftests/igt_spinner.h"
16 #include "selftests/mock_drm.h"
17
18 #include "gem/selftests/igt_gem_utils.h"
19 #include "gem/selftests/mock_context.h"
20
21 static const struct wo_register {
22         enum intel_platform platform;
23         u32 reg;
24 } wo_registers[] = {
25         { INTEL_GEMINILAKE, 0x731c }
26 };
27
28 struct wa_lists {
29         struct i915_wa_list gt_wa_list;
30         struct {
31                 struct i915_wa_list wa_list;
32                 struct i915_wa_list ctx_wa_list;
33         } engine[I915_NUM_ENGINES];
34 };
35
36 static int request_add_sync(struct i915_request *rq, int err)
37 {
38         i915_request_get(rq);
39         i915_request_add(rq);
40         if (i915_request_wait(rq, 0, HZ / 5) < 0)
41                 err = -EIO;
42         i915_request_put(rq);
43
44         return err;
45 }
46
47 static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
48 {
49         int err = 0;
50
51         i915_request_get(rq);
52         i915_request_add(rq);
53         if (spin && !igt_wait_for_spinner(spin, rq))
54                 err = -ETIMEDOUT;
55         i915_request_put(rq);
56
57         return err;
58 }
59
60 static void
61 reference_lists_init(struct intel_gt *gt, struct wa_lists *lists)
62 {
63         struct intel_engine_cs *engine;
64         enum intel_engine_id id;
65
66         memset(lists, 0, sizeof(*lists));
67
68         wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
69         gt_init_workarounds(gt->i915, &lists->gt_wa_list);
70         wa_init_finish(&lists->gt_wa_list);
71
72         for_each_engine(engine, gt, id) {
73                 struct i915_wa_list *wal = &lists->engine[id].wa_list;
74
75                 wa_init_start(wal, "REF", engine->name);
76                 engine_init_workarounds(engine, wal);
77                 wa_init_finish(wal);
78
79                 __intel_engine_init_ctx_wa(engine,
80                                            &lists->engine[id].ctx_wa_list,
81                                            "CTX_REF");
82         }
83 }
84
85 static void
86 reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
87 {
88         struct intel_engine_cs *engine;
89         enum intel_engine_id id;
90
91         for_each_engine(engine, gt, id)
92                 intel_wa_list_free(&lists->engine[id].wa_list);
93
94         intel_wa_list_free(&lists->gt_wa_list);
95 }
96
97 static struct drm_i915_gem_object *
98 read_nonprivs(struct intel_context *ce)
99 {
100         struct intel_engine_cs *engine = ce->engine;
101         const u32 base = engine->mmio_base;
102         struct drm_i915_gem_object *result;
103         struct i915_request *rq;
104         struct i915_vma *vma;
105         u32 srm, *cs;
106         int err;
107         int i;
108
109         result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
110         if (IS_ERR(result))
111                 return result;
112
113         i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
114
115         cs = i915_gem_object_pin_map(result, I915_MAP_WB);
116         if (IS_ERR(cs)) {
117                 err = PTR_ERR(cs);
118                 goto err_obj;
119         }
120         memset(cs, 0xc5, PAGE_SIZE);
121         i915_gem_object_flush_map(result);
122         i915_gem_object_unpin_map(result);
123
124         vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
125         if (IS_ERR(vma)) {
126                 err = PTR_ERR(vma);
127                 goto err_obj;
128         }
129
130         err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
131         if (err)
132                 goto err_obj;
133
134         rq = intel_context_create_request(ce);
135         if (IS_ERR(rq)) {
136                 err = PTR_ERR(rq);
137                 goto err_pin;
138         }
139
140         i915_vma_lock(vma);
141         err = i915_request_await_object(rq, vma->obj, true);
142         if (err == 0)
143                 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
144         i915_vma_unlock(vma);
145         if (err)
146                 goto err_req;
147
148         srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
149         if (INTEL_GEN(engine->i915) >= 8)
150                 srm++;
151
152         cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
153         if (IS_ERR(cs)) {
154                 err = PTR_ERR(cs);
155                 goto err_req;
156         }
157
158         for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
159                 *cs++ = srm;
160                 *cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
161                 *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
162                 *cs++ = 0;
163         }
164         intel_ring_advance(rq, cs);
165
166         i915_request_add(rq);
167         i915_vma_unpin(vma);
168
169         return result;
170
171 err_req:
172         i915_request_add(rq);
173 err_pin:
174         i915_vma_unpin(vma);
175 err_obj:
176         i915_gem_object_put(result);
177         return ERR_PTR(err);
178 }
179
180 static u32
181 get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
182 {
183         i915_reg_t reg = i < engine->whitelist.count ?
184                          engine->whitelist.list[i].reg :
185                          RING_NOPID(engine->mmio_base);
186
187         return i915_mmio_reg_offset(reg);
188 }
189
190 static void
191 print_results(const struct intel_engine_cs *engine, const u32 *results)
192 {
193         unsigned int i;
194
195         for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
196                 u32 expected = get_whitelist_reg(engine, i);
197                 u32 actual = results[i];
198
199                 pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
200                         i, expected, actual);
201         }
202 }
203
204 static int check_whitelist(struct intel_context *ce)
205 {
206         struct intel_engine_cs *engine = ce->engine;
207         struct drm_i915_gem_object *results;
208         struct intel_wedge_me wedge;
209         u32 *vaddr;
210         int err;
211         int i;
212
213         results = read_nonprivs(ce);
214         if (IS_ERR(results))
215                 return PTR_ERR(results);
216
217         err = 0;
218         i915_gem_object_lock(results, NULL);
219         intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
220                 err = i915_gem_object_set_to_cpu_domain(results, false);
221         i915_gem_object_unlock(results);
222         if (intel_gt_is_wedged(engine->gt))
223                 err = -EIO;
224         if (err)
225                 goto out_put;
226
227         vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
228         if (IS_ERR(vaddr)) {
229                 err = PTR_ERR(vaddr);
230                 goto out_put;
231         }
232
233         for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
234                 u32 expected = get_whitelist_reg(engine, i);
235                 u32 actual = vaddr[i];
236
237                 if (expected != actual) {
238                         print_results(engine, vaddr);
239                         pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
240                                i, expected, actual);
241
242                         err = -EINVAL;
243                         break;
244                 }
245         }
246
247         i915_gem_object_unpin_map(results);
248 out_put:
249         i915_gem_object_put(results);
250         return err;
251 }
252
253 static int do_device_reset(struct intel_engine_cs *engine)
254 {
255         intel_gt_reset(engine->gt, engine->mask, "live_workarounds");
256         return 0;
257 }
258
259 static int do_engine_reset(struct intel_engine_cs *engine)
260 {
261         return intel_engine_reset(engine, "live_workarounds");
262 }
263
264 static int
265 switch_to_scratch_context(struct intel_engine_cs *engine,
266                           struct igt_spinner *spin)
267 {
268         struct intel_context *ce;
269         struct i915_request *rq;
270         int err = 0;
271
272         ce = intel_context_create(engine);
273         if (IS_ERR(ce))
274                 return PTR_ERR(ce);
275
276         rq = igt_spinner_create_request(spin, ce, MI_NOOP);
277         intel_context_put(ce);
278
279         if (IS_ERR(rq)) {
280                 spin = NULL;
281                 err = PTR_ERR(rq);
282                 goto err;
283         }
284
285         err = request_add_spin(rq, spin);
286 err:
287         if (err && spin)
288                 igt_spinner_end(spin);
289
290         return err;
291 }
292
293 static int check_whitelist_across_reset(struct intel_engine_cs *engine,
294                                         int (*reset)(struct intel_engine_cs *),
295                                         const char *name)
296 {
297         struct intel_context *ce, *tmp;
298         struct igt_spinner spin;
299         intel_wakeref_t wakeref;
300         int err;
301
302         pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
303                 engine->whitelist.count, engine->name, name);
304
305         ce = intel_context_create(engine);
306         if (IS_ERR(ce))
307                 return PTR_ERR(ce);
308
309         err = igt_spinner_init(&spin, engine->gt);
310         if (err)
311                 goto out_ctx;
312
313         err = check_whitelist(ce);
314         if (err) {
315                 pr_err("Invalid whitelist *before* %s reset!\n", name);
316                 goto out_spin;
317         }
318
319         err = switch_to_scratch_context(engine, &spin);
320         if (err)
321                 goto out_spin;
322
323         with_intel_runtime_pm(engine->uncore->rpm, wakeref)
324                 err = reset(engine);
325
326         igt_spinner_end(&spin);
327
328         if (err) {
329                 pr_err("%s reset failed\n", name);
330                 goto out_spin;
331         }
332
333         err = check_whitelist(ce);
334         if (err) {
335                 pr_err("Whitelist not preserved in context across %s reset!\n",
336                        name);
337                 goto out_spin;
338         }
339
340         tmp = intel_context_create(engine);
341         if (IS_ERR(tmp)) {
342                 err = PTR_ERR(tmp);
343                 goto out_spin;
344         }
345         intel_context_put(ce);
346         ce = tmp;
347
348         err = check_whitelist(ce);
349         if (err) {
350                 pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
351                        name);
352                 goto out_spin;
353         }
354
355 out_spin:
356         igt_spinner_fini(&spin);
357 out_ctx:
358         intel_context_put(ce);
359         return err;
360 }
361
362 static struct i915_vma *create_batch(struct i915_address_space *vm)
363 {
364         struct drm_i915_gem_object *obj;
365         struct i915_vma *vma;
366         int err;
367
368         obj = i915_gem_object_create_internal(vm->i915, 16 * PAGE_SIZE);
369         if (IS_ERR(obj))
370                 return ERR_CAST(obj);
371
372         vma = i915_vma_instance(obj, vm, NULL);
373         if (IS_ERR(vma)) {
374                 err = PTR_ERR(vma);
375                 goto err_obj;
376         }
377
378         err = i915_vma_pin(vma, 0, 0, PIN_USER);
379         if (err)
380                 goto err_obj;
381
382         return vma;
383
384 err_obj:
385         i915_gem_object_put(obj);
386         return ERR_PTR(err);
387 }
388
389 static u32 reg_write(u32 old, u32 new, u32 rsvd)
390 {
391         if (rsvd == 0x0000ffff) {
392                 old &= ~(new >> 16);
393                 old |= new & (new >> 16);
394         } else {
395                 old &= ~rsvd;
396                 old |= new & rsvd;
397         }
398
399         return old;
400 }
401
402 static bool wo_register(struct intel_engine_cs *engine, u32 reg)
403 {
404         enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
405         int i;
406
407         if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
408              RING_FORCE_TO_NONPRIV_ACCESS_WR)
409                 return true;
410
411         for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
412                 if (wo_registers[i].platform == platform &&
413                     wo_registers[i].reg == reg)
414                         return true;
415         }
416
417         return false;
418 }
419
420 static bool timestamp(const struct intel_engine_cs *engine, u32 reg)
421 {
422         reg = (reg - engine->mmio_base) & ~RING_FORCE_TO_NONPRIV_ACCESS_MASK;
423         switch (reg) {
424         case 0x358:
425         case 0x35c:
426         case 0x3a8:
427                 return true;
428
429         default:
430                 return false;
431         }
432 }
433
434 static bool ro_register(u32 reg)
435 {
436         if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
437              RING_FORCE_TO_NONPRIV_ACCESS_RD)
438                 return true;
439
440         return false;
441 }
442
443 static int whitelist_writable_count(struct intel_engine_cs *engine)
444 {
445         int count = engine->whitelist.count;
446         int i;
447
448         for (i = 0; i < engine->whitelist.count; i++) {
449                 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
450
451                 if (ro_register(reg))
452                         count--;
453         }
454
455         return count;
456 }
457
458 static int check_dirty_whitelist(struct intel_context *ce)
459 {
460         const u32 values[] = {
461                 0x00000000,
462                 0x01010101,
463                 0x10100101,
464                 0x03030303,
465                 0x30300303,
466                 0x05050505,
467                 0x50500505,
468                 0x0f0f0f0f,
469                 0xf00ff00f,
470                 0x10101010,
471                 0xf0f01010,
472                 0x30303030,
473                 0xa0a03030,
474                 0x50505050,
475                 0xc0c05050,
476                 0xf0f0f0f0,
477                 0x11111111,
478                 0x33333333,
479                 0x55555555,
480                 0x0000ffff,
481                 0x00ff00ff,
482                 0xff0000ff,
483                 0xffff00ff,
484                 0xffffffff,
485         };
486         struct intel_engine_cs *engine = ce->engine;
487         struct i915_vma *scratch;
488         struct i915_vma *batch;
489         int err = 0, i, v, sz;
490         u32 *cs, *results;
491
492         sz = (2 * ARRAY_SIZE(values) + 1) * sizeof(u32);
493         scratch = __vm_create_scratch_for_read(ce->vm, sz);
494         if (IS_ERR(scratch))
495                 return PTR_ERR(scratch);
496
497         batch = create_batch(ce->vm);
498         if (IS_ERR(batch)) {
499                 err = PTR_ERR(batch);
500                 goto out_scratch;
501         }
502
503         for (i = 0; i < engine->whitelist.count; i++) {
504                 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
505                 u64 addr = scratch->node.start;
506                 struct i915_request *rq;
507                 u32 srm, lrm, rsvd;
508                 u32 expect;
509                 int idx;
510                 bool ro_reg;
511
512                 if (wo_register(engine, reg))
513                         continue;
514
515                 if (timestamp(engine, reg))
516                         continue; /* timestamps are expected to autoincrement */
517
518                 ro_reg = ro_register(reg);
519
520                 /* Clear non priv flags */
521                 reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
522
523                 srm = MI_STORE_REGISTER_MEM;
524                 lrm = MI_LOAD_REGISTER_MEM;
525                 if (INTEL_GEN(engine->i915) >= 8)
526                         lrm++, srm++;
527
528                 pr_debug("%s: Writing garbage to %x\n",
529                          engine->name, reg);
530
531                 cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
532                 if (IS_ERR(cs)) {
533                         err = PTR_ERR(cs);
534                         goto out_batch;
535                 }
536
537                 /* SRM original */
538                 *cs++ = srm;
539                 *cs++ = reg;
540                 *cs++ = lower_32_bits(addr);
541                 *cs++ = upper_32_bits(addr);
542
543                 idx = 1;
544                 for (v = 0; v < ARRAY_SIZE(values); v++) {
545                         /* LRI garbage */
546                         *cs++ = MI_LOAD_REGISTER_IMM(1);
547                         *cs++ = reg;
548                         *cs++ = values[v];
549
550                         /* SRM result */
551                         *cs++ = srm;
552                         *cs++ = reg;
553                         *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
554                         *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
555                         idx++;
556                 }
557                 for (v = 0; v < ARRAY_SIZE(values); v++) {
558                         /* LRI garbage */
559                         *cs++ = MI_LOAD_REGISTER_IMM(1);
560                         *cs++ = reg;
561                         *cs++ = ~values[v];
562
563                         /* SRM result */
564                         *cs++ = srm;
565                         *cs++ = reg;
566                         *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
567                         *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
568                         idx++;
569                 }
570                 GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
571
572                 /* LRM original -- don't leave garbage in the context! */
573                 *cs++ = lrm;
574                 *cs++ = reg;
575                 *cs++ = lower_32_bits(addr);
576                 *cs++ = upper_32_bits(addr);
577
578                 *cs++ = MI_BATCH_BUFFER_END;
579
580                 i915_gem_object_flush_map(batch->obj);
581                 i915_gem_object_unpin_map(batch->obj);
582                 intel_gt_chipset_flush(engine->gt);
583
584                 rq = intel_context_create_request(ce);
585                 if (IS_ERR(rq)) {
586                         err = PTR_ERR(rq);
587                         goto out_batch;
588                 }
589
590                 if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
591                         err = engine->emit_init_breadcrumb(rq);
592                         if (err)
593                                 goto err_request;
594                 }
595
596                 i915_vma_lock(batch);
597                 err = i915_request_await_object(rq, batch->obj, false);
598                 if (err == 0)
599                         err = i915_vma_move_to_active(batch, rq, 0);
600                 i915_vma_unlock(batch);
601                 if (err)
602                         goto err_request;
603
604                 i915_vma_lock(scratch);
605                 err = i915_request_await_object(rq, scratch->obj, true);
606                 if (err == 0)
607                         err = i915_vma_move_to_active(scratch, rq,
608                                                       EXEC_OBJECT_WRITE);
609                 i915_vma_unlock(scratch);
610                 if (err)
611                         goto err_request;
612
613                 err = engine->emit_bb_start(rq,
614                                             batch->node.start, PAGE_SIZE,
615                                             0);
616                 if (err)
617                         goto err_request;
618
619 err_request:
620                 err = request_add_sync(rq, err);
621                 if (err) {
622                         pr_err("%s: Futzing %x timedout; cancelling test\n",
623                                engine->name, reg);
624                         intel_gt_set_wedged(engine->gt);
625                         goto out_batch;
626                 }
627
628                 results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
629                 if (IS_ERR(results)) {
630                         err = PTR_ERR(results);
631                         goto out_batch;
632                 }
633
634                 GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
635                 if (!ro_reg) {
636                         /* detect write masking */
637                         rsvd = results[ARRAY_SIZE(values)];
638                         if (!rsvd) {
639                                 pr_err("%s: Unable to write to whitelisted register %x\n",
640                                        engine->name, reg);
641                                 err = -EINVAL;
642                                 goto out_unpin;
643                         }
644                 } else {
645                         rsvd = 0;
646                 }
647
648                 expect = results[0];
649                 idx = 1;
650                 for (v = 0; v < ARRAY_SIZE(values); v++) {
651                         if (ro_reg)
652                                 expect = results[0];
653                         else
654                                 expect = reg_write(expect, values[v], rsvd);
655
656                         if (results[idx] != expect)
657                                 err++;
658                         idx++;
659                 }
660                 for (v = 0; v < ARRAY_SIZE(values); v++) {
661                         if (ro_reg)
662                                 expect = results[0];
663                         else
664                                 expect = reg_write(expect, ~values[v], rsvd);
665
666                         if (results[idx] != expect)
667                                 err++;
668                         idx++;
669                 }
670                 if (err) {
671                         pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
672                                engine->name, err, reg);
673
674                         if (ro_reg)
675                                 pr_info("%s: Whitelisted read-only register: %x, original value %08x\n",
676                                         engine->name, reg, results[0]);
677                         else
678                                 pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
679                                         engine->name, reg, results[0], rsvd);
680
681                         expect = results[0];
682                         idx = 1;
683                         for (v = 0; v < ARRAY_SIZE(values); v++) {
684                                 u32 w = values[v];
685
686                                 if (ro_reg)
687                                         expect = results[0];
688                                 else
689                                         expect = reg_write(expect, w, rsvd);
690                                 pr_info("Wrote %08x, read %08x, expect %08x\n",
691                                         w, results[idx], expect);
692                                 idx++;
693                         }
694                         for (v = 0; v < ARRAY_SIZE(values); v++) {
695                                 u32 w = ~values[v];
696
697                                 if (ro_reg)
698                                         expect = results[0];
699                                 else
700                                         expect = reg_write(expect, w, rsvd);
701                                 pr_info("Wrote %08x, read %08x, expect %08x\n",
702                                         w, results[idx], expect);
703                                 idx++;
704                         }
705
706                         err = -EINVAL;
707                 }
708 out_unpin:
709                 i915_gem_object_unpin_map(scratch->obj);
710                 if (err)
711                         break;
712         }
713
714         if (igt_flush_test(engine->i915))
715                 err = -EIO;
716 out_batch:
717         i915_vma_unpin_and_release(&batch, 0);
718 out_scratch:
719         i915_vma_unpin_and_release(&scratch, 0);
720         return err;
721 }
722
723 static int live_dirty_whitelist(void *arg)
724 {
725         struct intel_gt *gt = arg;
726         struct intel_engine_cs *engine;
727         enum intel_engine_id id;
728
729         /* Can the user write to the whitelisted registers? */
730
731         if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
732                 return 0;
733
734         for_each_engine(engine, gt, id) {
735                 struct intel_context *ce;
736                 int err;
737
738                 if (engine->whitelist.count == 0)
739                         continue;
740
741                 ce = intel_context_create(engine);
742                 if (IS_ERR(ce))
743                         return PTR_ERR(ce);
744
745                 err = check_dirty_whitelist(ce);
746                 intel_context_put(ce);
747                 if (err)
748                         return err;
749         }
750
751         return 0;
752 }
753
754 static int live_reset_whitelist(void *arg)
755 {
756         struct intel_gt *gt = arg;
757         struct intel_engine_cs *engine;
758         enum intel_engine_id id;
759         int err = 0;
760
761         /* If we reset the gpu, we should not lose the RING_NONPRIV */
762         igt_global_reset_lock(gt);
763
764         for_each_engine(engine, gt, id) {
765                 if (engine->whitelist.count == 0)
766                         continue;
767
768                 if (intel_has_reset_engine(gt)) {
769                         err = check_whitelist_across_reset(engine,
770                                                            do_engine_reset,
771                                                            "engine");
772                         if (err)
773                                 goto out;
774                 }
775
776                 if (intel_has_gpu_reset(gt)) {
777                         err = check_whitelist_across_reset(engine,
778                                                            do_device_reset,
779                                                            "device");
780                         if (err)
781                                 goto out;
782                 }
783         }
784
785 out:
786         igt_global_reset_unlock(gt);
787         return err;
788 }
789
790 static int read_whitelisted_registers(struct intel_context *ce,
791                                       struct i915_vma *results)
792 {
793         struct intel_engine_cs *engine = ce->engine;
794         struct i915_request *rq;
795         int i, err = 0;
796         u32 srm, *cs;
797
798         rq = intel_context_create_request(ce);
799         if (IS_ERR(rq))
800                 return PTR_ERR(rq);
801
802         i915_vma_lock(results);
803         err = i915_request_await_object(rq, results->obj, true);
804         if (err == 0)
805                 err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
806         i915_vma_unlock(results);
807         if (err)
808                 goto err_req;
809
810         srm = MI_STORE_REGISTER_MEM;
811         if (INTEL_GEN(engine->i915) >= 8)
812                 srm++;
813
814         cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
815         if (IS_ERR(cs)) {
816                 err = PTR_ERR(cs);
817                 goto err_req;
818         }
819
820         for (i = 0; i < engine->whitelist.count; i++) {
821                 u64 offset = results->node.start + sizeof(u32) * i;
822                 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
823
824                 /* Clear non priv flags */
825                 reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
826
827                 *cs++ = srm;
828                 *cs++ = reg;
829                 *cs++ = lower_32_bits(offset);
830                 *cs++ = upper_32_bits(offset);
831         }
832         intel_ring_advance(rq, cs);
833
834 err_req:
835         return request_add_sync(rq, err);
836 }
837
838 static int scrub_whitelisted_registers(struct intel_context *ce)
839 {
840         struct intel_engine_cs *engine = ce->engine;
841         struct i915_request *rq;
842         struct i915_vma *batch;
843         int i, err = 0;
844         u32 *cs;
845
846         batch = create_batch(ce->vm);
847         if (IS_ERR(batch))
848                 return PTR_ERR(batch);
849
850         cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
851         if (IS_ERR(cs)) {
852                 err = PTR_ERR(cs);
853                 goto err_batch;
854         }
855
856         *cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine));
857         for (i = 0; i < engine->whitelist.count; i++) {
858                 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
859
860                 if (ro_register(reg))
861                         continue;
862
863                 /* Clear non priv flags */
864                 reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
865
866                 *cs++ = reg;
867                 *cs++ = 0xffffffff;
868         }
869         *cs++ = MI_BATCH_BUFFER_END;
870
871         i915_gem_object_flush_map(batch->obj);
872         intel_gt_chipset_flush(engine->gt);
873
874         rq = intel_context_create_request(ce);
875         if (IS_ERR(rq)) {
876                 err = PTR_ERR(rq);
877                 goto err_unpin;
878         }
879
880         if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
881                 err = engine->emit_init_breadcrumb(rq);
882                 if (err)
883                         goto err_request;
884         }
885
886         i915_vma_lock(batch);
887         err = i915_request_await_object(rq, batch->obj, false);
888         if (err == 0)
889                 err = i915_vma_move_to_active(batch, rq, 0);
890         i915_vma_unlock(batch);
891         if (err)
892                 goto err_request;
893
894         /* Perform the writes from an unprivileged "user" batch */
895         err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
896
897 err_request:
898         err = request_add_sync(rq, err);
899
900 err_unpin:
901         i915_gem_object_unpin_map(batch->obj);
902 err_batch:
903         i915_vma_unpin_and_release(&batch, 0);
904         return err;
905 }
906
907 struct regmask {
908         i915_reg_t reg;
909         unsigned long gen_mask;
910 };
911
912 static bool find_reg(struct drm_i915_private *i915,
913                      i915_reg_t reg,
914                      const struct regmask *tbl,
915                      unsigned long count)
916 {
917         u32 offset = i915_mmio_reg_offset(reg);
918
919         while (count--) {
920                 if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask &&
921                     i915_mmio_reg_offset(tbl->reg) == offset)
922                         return true;
923                 tbl++;
924         }
925
926         return false;
927 }
928
929 static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
930 {
931         /* Alas, we must pardon some whitelists. Mistakes already made */
932         static const struct regmask pardon[] = {
933                 { GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
934                 { GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
935         };
936
937         return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
938 }
939
940 static bool result_eq(struct intel_engine_cs *engine,
941                       u32 a, u32 b, i915_reg_t reg)
942 {
943         if (a != b && !pardon_reg(engine->i915, reg)) {
944                 pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
945                        i915_mmio_reg_offset(reg), a, b);
946                 return false;
947         }
948
949         return true;
950 }
951
952 static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
953 {
954         /* Some registers do not seem to behave and our writes unreadable */
955         static const struct regmask wo[] = {
956                 { GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
957         };
958
959         return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
960 }
961
962 static bool result_neq(struct intel_engine_cs *engine,
963                        u32 a, u32 b, i915_reg_t reg)
964 {
965         if (a == b && !writeonly_reg(engine->i915, reg)) {
966                 pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
967                        i915_mmio_reg_offset(reg), a);
968                 return false;
969         }
970
971         return true;
972 }
973
974 static int
975 check_whitelisted_registers(struct intel_engine_cs *engine,
976                             struct i915_vma *A,
977                             struct i915_vma *B,
978                             bool (*fn)(struct intel_engine_cs *engine,
979                                        u32 a, u32 b,
980                                        i915_reg_t reg))
981 {
982         u32 *a, *b;
983         int i, err;
984
985         a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
986         if (IS_ERR(a))
987                 return PTR_ERR(a);
988
989         b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
990         if (IS_ERR(b)) {
991                 err = PTR_ERR(b);
992                 goto err_a;
993         }
994
995         err = 0;
996         for (i = 0; i < engine->whitelist.count; i++) {
997                 const struct i915_wa *wa = &engine->whitelist.list[i];
998
999                 if (i915_mmio_reg_offset(wa->reg) &
1000                     RING_FORCE_TO_NONPRIV_ACCESS_RD)
1001                         continue;
1002
1003                 if (!fn(engine, a[i], b[i], wa->reg))
1004                         err = -EINVAL;
1005         }
1006
1007         i915_gem_object_unpin_map(B->obj);
1008 err_a:
1009         i915_gem_object_unpin_map(A->obj);
1010         return err;
1011 }
1012
1013 static int live_isolated_whitelist(void *arg)
1014 {
1015         struct intel_gt *gt = arg;
1016         struct {
1017                 struct i915_vma *scratch[2];
1018         } client[2] = {};
1019         struct intel_engine_cs *engine;
1020         enum intel_engine_id id;
1021         int i, err = 0;
1022
1023         /*
1024          * Check that a write into a whitelist register works, but
1025          * invisible to a second context.
1026          */
1027
1028         if (!intel_engines_has_context_isolation(gt->i915))
1029                 return 0;
1030
1031         for (i = 0; i < ARRAY_SIZE(client); i++) {
1032                 client[i].scratch[0] =
1033                         __vm_create_scratch_for_read(gt->vm, 4096);
1034                 if (IS_ERR(client[i].scratch[0])) {
1035                         err = PTR_ERR(client[i].scratch[0]);
1036                         goto err;
1037                 }
1038
1039                 client[i].scratch[1] =
1040                         __vm_create_scratch_for_read(gt->vm, 4096);
1041                 if (IS_ERR(client[i].scratch[1])) {
1042                         err = PTR_ERR(client[i].scratch[1]);
1043                         i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1044                         goto err;
1045                 }
1046         }
1047
1048         for_each_engine(engine, gt, id) {
1049                 struct intel_context *ce[2];
1050
1051                 if (!engine->kernel_context->vm)
1052                         continue;
1053
1054                 if (!whitelist_writable_count(engine))
1055                         continue;
1056
1057                 ce[0] = intel_context_create(engine);
1058                 if (IS_ERR(ce[0])) {
1059                         err = PTR_ERR(ce[0]);
1060                         break;
1061                 }
1062                 ce[1] = intel_context_create(engine);
1063                 if (IS_ERR(ce[1])) {
1064                         err = PTR_ERR(ce[1]);
1065                         intel_context_put(ce[0]);
1066                         break;
1067                 }
1068
1069                 /* Read default values */
1070                 err = read_whitelisted_registers(ce[0], client[0].scratch[0]);
1071                 if (err)
1072                         goto err_ce;
1073
1074                 /* Try to overwrite registers (should only affect ctx0) */
1075                 err = scrub_whitelisted_registers(ce[0]);
1076                 if (err)
1077                         goto err_ce;
1078
1079                 /* Read values from ctx1, we expect these to be defaults */
1080                 err = read_whitelisted_registers(ce[1], client[1].scratch[0]);
1081                 if (err)
1082                         goto err_ce;
1083
1084                 /* Verify that both reads return the same default values */
1085                 err = check_whitelisted_registers(engine,
1086                                                   client[0].scratch[0],
1087                                                   client[1].scratch[0],
1088                                                   result_eq);
1089                 if (err)
1090                         goto err_ce;
1091
1092                 /* Read back the updated values in ctx0 */
1093                 err = read_whitelisted_registers(ce[0], client[0].scratch[1]);
1094                 if (err)
1095                         goto err_ce;
1096
1097                 /* User should be granted privilege to overwhite regs */
1098                 err = check_whitelisted_registers(engine,
1099                                                   client[0].scratch[0],
1100                                                   client[0].scratch[1],
1101                                                   result_neq);
1102 err_ce:
1103                 intel_context_put(ce[1]);
1104                 intel_context_put(ce[0]);
1105                 if (err)
1106                         break;
1107         }
1108
1109 err:
1110         for (i = 0; i < ARRAY_SIZE(client); i++) {
1111                 i915_vma_unpin_and_release(&client[i].scratch[1], 0);
1112                 i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1113         }
1114
1115         if (igt_flush_test(gt->i915))
1116                 err = -EIO;
1117
1118         return err;
1119 }
1120
1121 static bool
1122 verify_wa_lists(struct intel_gt *gt, struct wa_lists *lists,
1123                 const char *str)
1124 {
1125         struct intel_engine_cs *engine;
1126         enum intel_engine_id id;
1127         bool ok = true;
1128
1129         ok &= wa_list_verify(gt->uncore, &lists->gt_wa_list, str);
1130
1131         for_each_engine(engine, gt, id) {
1132                 struct intel_context *ce;
1133
1134                 ce = intel_context_create(engine);
1135                 if (IS_ERR(ce))
1136                         return false;
1137
1138                 ok &= engine_wa_list_verify(ce,
1139                                             &lists->engine[id].wa_list,
1140                                             str) == 0;
1141
1142                 ok &= engine_wa_list_verify(ce,
1143                                             &lists->engine[id].ctx_wa_list,
1144                                             str) == 0;
1145
1146                 intel_context_put(ce);
1147         }
1148
1149         return ok;
1150 }
1151
1152 static int
1153 live_gpu_reset_workarounds(void *arg)
1154 {
1155         struct intel_gt *gt = arg;
1156         intel_wakeref_t wakeref;
1157         struct wa_lists lists;
1158         bool ok;
1159
1160         if (!intel_has_gpu_reset(gt))
1161                 return 0;
1162
1163         pr_info("Verifying after GPU reset...\n");
1164
1165         igt_global_reset_lock(gt);
1166         wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1167
1168         reference_lists_init(gt, &lists);
1169
1170         ok = verify_wa_lists(gt, &lists, "before reset");
1171         if (!ok)
1172                 goto out;
1173
1174         intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
1175
1176         ok = verify_wa_lists(gt, &lists, "after reset");
1177
1178 out:
1179         reference_lists_fini(gt, &lists);
1180         intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1181         igt_global_reset_unlock(gt);
1182
1183         return ok ? 0 : -ESRCH;
1184 }
1185
1186 static int
1187 live_engine_reset_workarounds(void *arg)
1188 {
1189         struct intel_gt *gt = arg;
1190         struct intel_engine_cs *engine;
1191         enum intel_engine_id id;
1192         struct intel_context *ce;
1193         struct igt_spinner spin;
1194         struct i915_request *rq;
1195         intel_wakeref_t wakeref;
1196         struct wa_lists lists;
1197         int ret = 0;
1198
1199         if (!intel_has_reset_engine(gt))
1200                 return 0;
1201
1202         igt_global_reset_lock(gt);
1203         wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1204
1205         reference_lists_init(gt, &lists);
1206
1207         for_each_engine(engine, gt, id) {
1208                 bool ok;
1209
1210                 pr_info("Verifying after %s reset...\n", engine->name);
1211                 ce = intel_context_create(engine);
1212                 if (IS_ERR(ce)) {
1213                         ret = PTR_ERR(ce);
1214                         break;
1215                 }
1216
1217                 ok = verify_wa_lists(gt, &lists, "before reset");
1218                 if (!ok) {
1219                         ret = -ESRCH;
1220                         goto err;
1221                 }
1222
1223                 intel_engine_reset(engine, "live_workarounds:idle");
1224
1225                 ok = verify_wa_lists(gt, &lists, "after idle reset");
1226                 if (!ok) {
1227                         ret = -ESRCH;
1228                         goto err;
1229                 }
1230
1231                 ret = igt_spinner_init(&spin, engine->gt);
1232                 if (ret)
1233                         goto err;
1234
1235                 rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
1236                 if (IS_ERR(rq)) {
1237                         ret = PTR_ERR(rq);
1238                         igt_spinner_fini(&spin);
1239                         goto err;
1240                 }
1241
1242                 ret = request_add_spin(rq, &spin);
1243                 if (ret) {
1244                         pr_err("Spinner failed to start\n");
1245                         igt_spinner_fini(&spin);
1246                         goto err;
1247                 }
1248
1249                 intel_engine_reset(engine, "live_workarounds:active");
1250
1251                 igt_spinner_end(&spin);
1252                 igt_spinner_fini(&spin);
1253
1254                 ok = verify_wa_lists(gt, &lists, "after busy reset");
1255                 if (!ok) {
1256                         ret = -ESRCH;
1257                         goto err;
1258                 }
1259
1260 err:
1261                 intel_context_put(ce);
1262                 if (ret)
1263                         break;
1264         }
1265
1266         reference_lists_fini(gt, &lists);
1267         intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1268         igt_global_reset_unlock(gt);
1269
1270         igt_flush_test(gt->i915);
1271
1272         return ret;
1273 }
1274
1275 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
1276 {
1277         static const struct i915_subtest tests[] = {
1278                 SUBTEST(live_dirty_whitelist),
1279                 SUBTEST(live_reset_whitelist),
1280                 SUBTEST(live_isolated_whitelist),
1281                 SUBTEST(live_gpu_reset_workarounds),
1282                 SUBTEST(live_engine_reset_workarounds),
1283         };
1284
1285         if (intel_gt_is_wedged(&i915->gt))
1286                 return 0;
1287
1288         return intel_gt_live_subtests(tests, &i915->gt);
1289 }