Merge branch 'next' into for-linus
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gt / selftest_workarounds.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2018 Intel Corporation
4  */
5
6 #include "gem/i915_gem_pm.h"
7 #include "gt/intel_engine_user.h"
8 #include "gt/intel_gt.h"
9 #include "i915_selftest.h"
10 #include "intel_reset.h"
11
12 #include "selftests/igt_flush_test.h"
13 #include "selftests/igt_reset.h"
14 #include "selftests/igt_spinner.h"
15 #include "selftests/mock_drm.h"
16
17 #include "gem/selftests/igt_gem_utils.h"
18 #include "gem/selftests/mock_context.h"
19
20 static const struct wo_register {
21         enum intel_platform platform;
22         u32 reg;
23 } wo_registers[] = {
24         { INTEL_GEMINILAKE, 0x731c }
25 };
26
27 struct wa_lists {
28         struct i915_wa_list gt_wa_list;
29         struct {
30                 struct i915_wa_list wa_list;
31                 struct i915_wa_list ctx_wa_list;
32         } engine[I915_NUM_ENGINES];
33 };
34
35 static int request_add_sync(struct i915_request *rq, int err)
36 {
37         i915_request_get(rq);
38         i915_request_add(rq);
39         if (i915_request_wait(rq, 0, HZ / 5) < 0)
40                 err = -EIO;
41         i915_request_put(rq);
42
43         return err;
44 }
45
46 static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
47 {
48         int err = 0;
49
50         i915_request_get(rq);
51         i915_request_add(rq);
52         if (spin && !igt_wait_for_spinner(spin, rq))
53                 err = -ETIMEDOUT;
54         i915_request_put(rq);
55
56         return err;
57 }
58
59 static void
60 reference_lists_init(struct intel_gt *gt, struct wa_lists *lists)
61 {
62         struct intel_engine_cs *engine;
63         enum intel_engine_id id;
64
65         memset(lists, 0, sizeof(*lists));
66
67         wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
68         gt_init_workarounds(gt->i915, &lists->gt_wa_list);
69         wa_init_finish(&lists->gt_wa_list);
70
71         for_each_engine(engine, gt, id) {
72                 struct i915_wa_list *wal = &lists->engine[id].wa_list;
73
74                 wa_init_start(wal, "REF", engine->name);
75                 engine_init_workarounds(engine, wal);
76                 wa_init_finish(wal);
77
78                 __intel_engine_init_ctx_wa(engine,
79                                            &lists->engine[id].ctx_wa_list,
80                                            "CTX_REF");
81         }
82 }
83
84 static void
85 reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
86 {
87         struct intel_engine_cs *engine;
88         enum intel_engine_id id;
89
90         for_each_engine(engine, gt, id)
91                 intel_wa_list_free(&lists->engine[id].wa_list);
92
93         intel_wa_list_free(&lists->gt_wa_list);
94 }
95
96 static struct drm_i915_gem_object *
97 read_nonprivs(struct intel_context *ce)
98 {
99         struct intel_engine_cs *engine = ce->engine;
100         const u32 base = engine->mmio_base;
101         struct drm_i915_gem_object *result;
102         struct i915_request *rq;
103         struct i915_vma *vma;
104         u32 srm, *cs;
105         int err;
106         int i;
107
108         result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
109         if (IS_ERR(result))
110                 return result;
111
112         i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
113
114         cs = i915_gem_object_pin_map_unlocked(result, I915_MAP_WB);
115         if (IS_ERR(cs)) {
116                 err = PTR_ERR(cs);
117                 goto err_obj;
118         }
119         memset(cs, 0xc5, PAGE_SIZE);
120         i915_gem_object_flush_map(result);
121         i915_gem_object_unpin_map(result);
122
123         vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
124         if (IS_ERR(vma)) {
125                 err = PTR_ERR(vma);
126                 goto err_obj;
127         }
128
129         err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
130         if (err)
131                 goto err_obj;
132
133         rq = intel_context_create_request(ce);
134         if (IS_ERR(rq)) {
135                 err = PTR_ERR(rq);
136                 goto err_pin;
137         }
138
139         i915_vma_lock(vma);
140         err = i915_request_await_object(rq, vma->obj, true);
141         if (err == 0)
142                 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
143         i915_vma_unlock(vma);
144         if (err)
145                 goto err_req;
146
147         srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
148         if (INTEL_GEN(engine->i915) >= 8)
149                 srm++;
150
151         cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
152         if (IS_ERR(cs)) {
153                 err = PTR_ERR(cs);
154                 goto err_req;
155         }
156
157         for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
158                 *cs++ = srm;
159                 *cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
160                 *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
161                 *cs++ = 0;
162         }
163         intel_ring_advance(rq, cs);
164
165         i915_request_add(rq);
166         i915_vma_unpin(vma);
167
168         return result;
169
170 err_req:
171         i915_request_add(rq);
172 err_pin:
173         i915_vma_unpin(vma);
174 err_obj:
175         i915_gem_object_put(result);
176         return ERR_PTR(err);
177 }
178
179 static u32
180 get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
181 {
182         i915_reg_t reg = i < engine->whitelist.count ?
183                          engine->whitelist.list[i].reg :
184                          RING_NOPID(engine->mmio_base);
185
186         return i915_mmio_reg_offset(reg);
187 }
188
189 static void
190 print_results(const struct intel_engine_cs *engine, const u32 *results)
191 {
192         unsigned int i;
193
194         for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
195                 u32 expected = get_whitelist_reg(engine, i);
196                 u32 actual = results[i];
197
198                 pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
199                         i, expected, actual);
200         }
201 }
202
203 static int check_whitelist(struct intel_context *ce)
204 {
205         struct intel_engine_cs *engine = ce->engine;
206         struct drm_i915_gem_object *results;
207         struct intel_wedge_me wedge;
208         u32 *vaddr;
209         int err;
210         int i;
211
212         results = read_nonprivs(ce);
213         if (IS_ERR(results))
214                 return PTR_ERR(results);
215
216         err = 0;
217         i915_gem_object_lock(results, NULL);
218         intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
219                 err = i915_gem_object_set_to_cpu_domain(results, false);
220
221         if (intel_gt_is_wedged(engine->gt))
222                 err = -EIO;
223         if (err)
224                 goto out_put;
225
226         vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
227         if (IS_ERR(vaddr)) {
228                 err = PTR_ERR(vaddr);
229                 goto out_put;
230         }
231
232         for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
233                 u32 expected = get_whitelist_reg(engine, i);
234                 u32 actual = vaddr[i];
235
236                 if (expected != actual) {
237                         print_results(engine, vaddr);
238                         pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
239                                i, expected, actual);
240
241                         err = -EINVAL;
242                         break;
243                 }
244         }
245
246         i915_gem_object_unpin_map(results);
247 out_put:
248         i915_gem_object_unlock(results);
249         i915_gem_object_put(results);
250         return err;
251 }
252
253 static int do_device_reset(struct intel_engine_cs *engine)
254 {
255         intel_gt_reset(engine->gt, engine->mask, "live_workarounds");
256         return 0;
257 }
258
259 static int do_engine_reset(struct intel_engine_cs *engine)
260 {
261         return intel_engine_reset(engine, "live_workarounds");
262 }
263
264 static int
265 switch_to_scratch_context(struct intel_engine_cs *engine,
266                           struct igt_spinner *spin)
267 {
268         struct intel_context *ce;
269         struct i915_request *rq;
270         int err = 0;
271
272         ce = intel_context_create(engine);
273         if (IS_ERR(ce))
274                 return PTR_ERR(ce);
275
276         rq = igt_spinner_create_request(spin, ce, MI_NOOP);
277         intel_context_put(ce);
278
279         if (IS_ERR(rq)) {
280                 spin = NULL;
281                 err = PTR_ERR(rq);
282                 goto err;
283         }
284
285         err = request_add_spin(rq, spin);
286 err:
287         if (err && spin)
288                 igt_spinner_end(spin);
289
290         return err;
291 }
292
293 static int check_whitelist_across_reset(struct intel_engine_cs *engine,
294                                         int (*reset)(struct intel_engine_cs *),
295                                         const char *name)
296 {
297         struct intel_context *ce, *tmp;
298         struct igt_spinner spin;
299         intel_wakeref_t wakeref;
300         int err;
301
302         pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
303                 engine->whitelist.count, engine->name, name);
304
305         ce = intel_context_create(engine);
306         if (IS_ERR(ce))
307                 return PTR_ERR(ce);
308
309         err = igt_spinner_init(&spin, engine->gt);
310         if (err)
311                 goto out_ctx;
312
313         err = check_whitelist(ce);
314         if (err) {
315                 pr_err("Invalid whitelist *before* %s reset!\n", name);
316                 goto out_spin;
317         }
318
319         err = switch_to_scratch_context(engine, &spin);
320         if (err)
321                 goto out_spin;
322
323         with_intel_runtime_pm(engine->uncore->rpm, wakeref)
324                 err = reset(engine);
325
326         igt_spinner_end(&spin);
327
328         if (err) {
329                 pr_err("%s reset failed\n", name);
330                 goto out_spin;
331         }
332
333         err = check_whitelist(ce);
334         if (err) {
335                 pr_err("Whitelist not preserved in context across %s reset!\n",
336                        name);
337                 goto out_spin;
338         }
339
340         tmp = intel_context_create(engine);
341         if (IS_ERR(tmp)) {
342                 err = PTR_ERR(tmp);
343                 goto out_spin;
344         }
345         intel_context_put(ce);
346         ce = tmp;
347
348         err = check_whitelist(ce);
349         if (err) {
350                 pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
351                        name);
352                 goto out_spin;
353         }
354
355 out_spin:
356         igt_spinner_fini(&spin);
357 out_ctx:
358         intel_context_put(ce);
359         return err;
360 }
361
362 static struct i915_vma *create_batch(struct i915_address_space *vm)
363 {
364         struct drm_i915_gem_object *obj;
365         struct i915_vma *vma;
366         int err;
367
368         obj = i915_gem_object_create_internal(vm->i915, 16 * PAGE_SIZE);
369         if (IS_ERR(obj))
370                 return ERR_CAST(obj);
371
372         vma = i915_vma_instance(obj, vm, NULL);
373         if (IS_ERR(vma)) {
374                 err = PTR_ERR(vma);
375                 goto err_obj;
376         }
377
378         err = i915_vma_pin(vma, 0, 0, PIN_USER);
379         if (err)
380                 goto err_obj;
381
382         return vma;
383
384 err_obj:
385         i915_gem_object_put(obj);
386         return ERR_PTR(err);
387 }
388
389 static u32 reg_write(u32 old, u32 new, u32 rsvd)
390 {
391         if (rsvd == 0x0000ffff) {
392                 old &= ~(new >> 16);
393                 old |= new & (new >> 16);
394         } else {
395                 old &= ~rsvd;
396                 old |= new & rsvd;
397         }
398
399         return old;
400 }
401
402 static bool wo_register(struct intel_engine_cs *engine, u32 reg)
403 {
404         enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
405         int i;
406
407         if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
408              RING_FORCE_TO_NONPRIV_ACCESS_WR)
409                 return true;
410
411         for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
412                 if (wo_registers[i].platform == platform &&
413                     wo_registers[i].reg == reg)
414                         return true;
415         }
416
417         return false;
418 }
419
420 static bool timestamp(const struct intel_engine_cs *engine, u32 reg)
421 {
422         reg = (reg - engine->mmio_base) & ~RING_FORCE_TO_NONPRIV_ACCESS_MASK;
423         switch (reg) {
424         case 0x358:
425         case 0x35c:
426         case 0x3a8:
427                 return true;
428
429         default:
430                 return false;
431         }
432 }
433
434 static bool ro_register(u32 reg)
435 {
436         if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
437              RING_FORCE_TO_NONPRIV_ACCESS_RD)
438                 return true;
439
440         return false;
441 }
442
443 static int whitelist_writable_count(struct intel_engine_cs *engine)
444 {
445         int count = engine->whitelist.count;
446         int i;
447
448         for (i = 0; i < engine->whitelist.count; i++) {
449                 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
450
451                 if (ro_register(reg))
452                         count--;
453         }
454
455         return count;
456 }
457
458 static int check_dirty_whitelist(struct intel_context *ce)
459 {
460         const u32 values[] = {
461                 0x00000000,
462                 0x01010101,
463                 0x10100101,
464                 0x03030303,
465                 0x30300303,
466                 0x05050505,
467                 0x50500505,
468                 0x0f0f0f0f,
469                 0xf00ff00f,
470                 0x10101010,
471                 0xf0f01010,
472                 0x30303030,
473                 0xa0a03030,
474                 0x50505050,
475                 0xc0c05050,
476                 0xf0f0f0f0,
477                 0x11111111,
478                 0x33333333,
479                 0x55555555,
480                 0x0000ffff,
481                 0x00ff00ff,
482                 0xff0000ff,
483                 0xffff00ff,
484                 0xffffffff,
485         };
486         struct intel_engine_cs *engine = ce->engine;
487         struct i915_vma *scratch;
488         struct i915_vma *batch;
489         int err = 0, i, v, sz;
490         u32 *cs, *results;
491
492         sz = (2 * ARRAY_SIZE(values) + 1) * sizeof(u32);
493         scratch = __vm_create_scratch_for_read_pinned(ce->vm, sz);
494         if (IS_ERR(scratch))
495                 return PTR_ERR(scratch);
496
497         batch = create_batch(ce->vm);
498         if (IS_ERR(batch)) {
499                 err = PTR_ERR(batch);
500                 goto out_scratch;
501         }
502
503         for (i = 0; i < engine->whitelist.count; i++) {
504                 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
505                 struct i915_gem_ww_ctx ww;
506                 u64 addr = scratch->node.start;
507                 struct i915_request *rq;
508                 u32 srm, lrm, rsvd;
509                 u32 expect;
510                 int idx;
511                 bool ro_reg;
512
513                 if (wo_register(engine, reg))
514                         continue;
515
516                 if (timestamp(engine, reg))
517                         continue; /* timestamps are expected to autoincrement */
518
519                 ro_reg = ro_register(reg);
520
521                 i915_gem_ww_ctx_init(&ww, false);
522 retry:
523                 cs = NULL;
524                 err = i915_gem_object_lock(scratch->obj, &ww);
525                 if (!err)
526                         err = i915_gem_object_lock(batch->obj, &ww);
527                 if (!err)
528                         err = intel_context_pin_ww(ce, &ww);
529                 if (err)
530                         goto out;
531
532                 cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
533                 if (IS_ERR(cs)) {
534                         err = PTR_ERR(cs);
535                         goto out_ctx;
536                 }
537
538                 results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
539                 if (IS_ERR(results)) {
540                         err = PTR_ERR(results);
541                         goto out_unmap_batch;
542                 }
543
544                 /* Clear non priv flags */
545                 reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
546
547                 srm = MI_STORE_REGISTER_MEM;
548                 lrm = MI_LOAD_REGISTER_MEM;
549                 if (INTEL_GEN(engine->i915) >= 8)
550                         lrm++, srm++;
551
552                 pr_debug("%s: Writing garbage to %x\n",
553                          engine->name, reg);
554
555                 /* SRM original */
556                 *cs++ = srm;
557                 *cs++ = reg;
558                 *cs++ = lower_32_bits(addr);
559                 *cs++ = upper_32_bits(addr);
560
561                 idx = 1;
562                 for (v = 0; v < ARRAY_SIZE(values); v++) {
563                         /* LRI garbage */
564                         *cs++ = MI_LOAD_REGISTER_IMM(1);
565                         *cs++ = reg;
566                         *cs++ = values[v];
567
568                         /* SRM result */
569                         *cs++ = srm;
570                         *cs++ = reg;
571                         *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
572                         *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
573                         idx++;
574                 }
575                 for (v = 0; v < ARRAY_SIZE(values); v++) {
576                         /* LRI garbage */
577                         *cs++ = MI_LOAD_REGISTER_IMM(1);
578                         *cs++ = reg;
579                         *cs++ = ~values[v];
580
581                         /* SRM result */
582                         *cs++ = srm;
583                         *cs++ = reg;
584                         *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
585                         *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
586                         idx++;
587                 }
588                 GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
589
590                 /* LRM original -- don't leave garbage in the context! */
591                 *cs++ = lrm;
592                 *cs++ = reg;
593                 *cs++ = lower_32_bits(addr);
594                 *cs++ = upper_32_bits(addr);
595
596                 *cs++ = MI_BATCH_BUFFER_END;
597
598                 i915_gem_object_flush_map(batch->obj);
599                 i915_gem_object_unpin_map(batch->obj);
600                 intel_gt_chipset_flush(engine->gt);
601                 cs = NULL;
602
603                 rq = i915_request_create(ce);
604                 if (IS_ERR(rq)) {
605                         err = PTR_ERR(rq);
606                         goto out_unmap_scratch;
607                 }
608
609                 if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
610                         err = engine->emit_init_breadcrumb(rq);
611                         if (err)
612                                 goto err_request;
613                 }
614
615                 err = i915_request_await_object(rq, batch->obj, false);
616                 if (err == 0)
617                         err = i915_vma_move_to_active(batch, rq, 0);
618                 if (err)
619                         goto err_request;
620
621                 err = i915_request_await_object(rq, scratch->obj, true);
622                 if (err == 0)
623                         err = i915_vma_move_to_active(scratch, rq,
624                                                       EXEC_OBJECT_WRITE);
625                 if (err)
626                         goto err_request;
627
628                 err = engine->emit_bb_start(rq,
629                                             batch->node.start, PAGE_SIZE,
630                                             0);
631                 if (err)
632                         goto err_request;
633
634 err_request:
635                 err = request_add_sync(rq, err);
636                 if (err) {
637                         pr_err("%s: Futzing %x timedout; cancelling test\n",
638                                engine->name, reg);
639                         intel_gt_set_wedged(engine->gt);
640                         goto out_unmap_scratch;
641                 }
642
643                 GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
644                 if (!ro_reg) {
645                         /* detect write masking */
646                         rsvd = results[ARRAY_SIZE(values)];
647                         if (!rsvd) {
648                                 pr_err("%s: Unable to write to whitelisted register %x\n",
649                                        engine->name, reg);
650                                 err = -EINVAL;
651                                 goto out_unmap_scratch;
652                         }
653                 } else {
654                         rsvd = 0;
655                 }
656
657                 expect = results[0];
658                 idx = 1;
659                 for (v = 0; v < ARRAY_SIZE(values); v++) {
660                         if (ro_reg)
661                                 expect = results[0];
662                         else
663                                 expect = reg_write(expect, values[v], rsvd);
664
665                         if (results[idx] != expect)
666                                 err++;
667                         idx++;
668                 }
669                 for (v = 0; v < ARRAY_SIZE(values); v++) {
670                         if (ro_reg)
671                                 expect = results[0];
672                         else
673                                 expect = reg_write(expect, ~values[v], rsvd);
674
675                         if (results[idx] != expect)
676                                 err++;
677                         idx++;
678                 }
679                 if (err) {
680                         pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
681                                engine->name, err, reg);
682
683                         if (ro_reg)
684                                 pr_info("%s: Whitelisted read-only register: %x, original value %08x\n",
685                                         engine->name, reg, results[0]);
686                         else
687                                 pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
688                                         engine->name, reg, results[0], rsvd);
689
690                         expect = results[0];
691                         idx = 1;
692                         for (v = 0; v < ARRAY_SIZE(values); v++) {
693                                 u32 w = values[v];
694
695                                 if (ro_reg)
696                                         expect = results[0];
697                                 else
698                                         expect = reg_write(expect, w, rsvd);
699                                 pr_info("Wrote %08x, read %08x, expect %08x\n",
700                                         w, results[idx], expect);
701                                 idx++;
702                         }
703                         for (v = 0; v < ARRAY_SIZE(values); v++) {
704                                 u32 w = ~values[v];
705
706                                 if (ro_reg)
707                                         expect = results[0];
708                                 else
709                                         expect = reg_write(expect, w, rsvd);
710                                 pr_info("Wrote %08x, read %08x, expect %08x\n",
711                                         w, results[idx], expect);
712                                 idx++;
713                         }
714
715                         err = -EINVAL;
716                 }
717 out_unmap_scratch:
718                 i915_gem_object_unpin_map(scratch->obj);
719 out_unmap_batch:
720                 if (cs)
721                         i915_gem_object_unpin_map(batch->obj);
722 out_ctx:
723                 intel_context_unpin(ce);
724 out:
725                 if (err == -EDEADLK) {
726                         err = i915_gem_ww_ctx_backoff(&ww);
727                         if (!err)
728                                 goto retry;
729                 }
730                 i915_gem_ww_ctx_fini(&ww);
731                 if (err)
732                         break;
733         }
734
735         if (igt_flush_test(engine->i915))
736                 err = -EIO;
737
738         i915_vma_unpin_and_release(&batch, 0);
739 out_scratch:
740         i915_vma_unpin_and_release(&scratch, 0);
741         return err;
742 }
743
744 static int live_dirty_whitelist(void *arg)
745 {
746         struct intel_gt *gt = arg;
747         struct intel_engine_cs *engine;
748         enum intel_engine_id id;
749
750         /* Can the user write to the whitelisted registers? */
751
752         if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
753                 return 0;
754
755         for_each_engine(engine, gt, id) {
756                 struct intel_context *ce;
757                 int err;
758
759                 if (engine->whitelist.count == 0)
760                         continue;
761
762                 ce = intel_context_create(engine);
763                 if (IS_ERR(ce))
764                         return PTR_ERR(ce);
765
766                 err = check_dirty_whitelist(ce);
767                 intel_context_put(ce);
768                 if (err)
769                         return err;
770         }
771
772         return 0;
773 }
774
775 static int live_reset_whitelist(void *arg)
776 {
777         struct intel_gt *gt = arg;
778         struct intel_engine_cs *engine;
779         enum intel_engine_id id;
780         int err = 0;
781
782         /* If we reset the gpu, we should not lose the RING_NONPRIV */
783         igt_global_reset_lock(gt);
784
785         for_each_engine(engine, gt, id) {
786                 if (engine->whitelist.count == 0)
787                         continue;
788
789                 if (intel_has_reset_engine(gt)) {
790                         err = check_whitelist_across_reset(engine,
791                                                            do_engine_reset,
792                                                            "engine");
793                         if (err)
794                                 goto out;
795                 }
796
797                 if (intel_has_gpu_reset(gt)) {
798                         err = check_whitelist_across_reset(engine,
799                                                            do_device_reset,
800                                                            "device");
801                         if (err)
802                                 goto out;
803                 }
804         }
805
806 out:
807         igt_global_reset_unlock(gt);
808         return err;
809 }
810
811 static int read_whitelisted_registers(struct intel_context *ce,
812                                       struct i915_vma *results)
813 {
814         struct intel_engine_cs *engine = ce->engine;
815         struct i915_request *rq;
816         int i, err = 0;
817         u32 srm, *cs;
818
819         rq = intel_context_create_request(ce);
820         if (IS_ERR(rq))
821                 return PTR_ERR(rq);
822
823         i915_vma_lock(results);
824         err = i915_request_await_object(rq, results->obj, true);
825         if (err == 0)
826                 err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
827         i915_vma_unlock(results);
828         if (err)
829                 goto err_req;
830
831         srm = MI_STORE_REGISTER_MEM;
832         if (INTEL_GEN(engine->i915) >= 8)
833                 srm++;
834
835         cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
836         if (IS_ERR(cs)) {
837                 err = PTR_ERR(cs);
838                 goto err_req;
839         }
840
841         for (i = 0; i < engine->whitelist.count; i++) {
842                 u64 offset = results->node.start + sizeof(u32) * i;
843                 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
844
845                 /* Clear non priv flags */
846                 reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
847
848                 *cs++ = srm;
849                 *cs++ = reg;
850                 *cs++ = lower_32_bits(offset);
851                 *cs++ = upper_32_bits(offset);
852         }
853         intel_ring_advance(rq, cs);
854
855 err_req:
856         return request_add_sync(rq, err);
857 }
858
859 static int scrub_whitelisted_registers(struct intel_context *ce)
860 {
861         struct intel_engine_cs *engine = ce->engine;
862         struct i915_request *rq;
863         struct i915_vma *batch;
864         int i, err = 0;
865         u32 *cs;
866
867         batch = create_batch(ce->vm);
868         if (IS_ERR(batch))
869                 return PTR_ERR(batch);
870
871         cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
872         if (IS_ERR(cs)) {
873                 err = PTR_ERR(cs);
874                 goto err_batch;
875         }
876
877         *cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine));
878         for (i = 0; i < engine->whitelist.count; i++) {
879                 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
880
881                 if (ro_register(reg))
882                         continue;
883
884                 /* Clear non priv flags */
885                 reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
886
887                 *cs++ = reg;
888                 *cs++ = 0xffffffff;
889         }
890         *cs++ = MI_BATCH_BUFFER_END;
891
892         i915_gem_object_flush_map(batch->obj);
893         intel_gt_chipset_flush(engine->gt);
894
895         rq = intel_context_create_request(ce);
896         if (IS_ERR(rq)) {
897                 err = PTR_ERR(rq);
898                 goto err_unpin;
899         }
900
901         if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
902                 err = engine->emit_init_breadcrumb(rq);
903                 if (err)
904                         goto err_request;
905         }
906
907         i915_vma_lock(batch);
908         err = i915_request_await_object(rq, batch->obj, false);
909         if (err == 0)
910                 err = i915_vma_move_to_active(batch, rq, 0);
911         i915_vma_unlock(batch);
912         if (err)
913                 goto err_request;
914
915         /* Perform the writes from an unprivileged "user" batch */
916         err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
917
918 err_request:
919         err = request_add_sync(rq, err);
920
921 err_unpin:
922         i915_gem_object_unpin_map(batch->obj);
923 err_batch:
924         i915_vma_unpin_and_release(&batch, 0);
925         return err;
926 }
927
928 struct regmask {
929         i915_reg_t reg;
930         unsigned long gen_mask;
931 };
932
933 static bool find_reg(struct drm_i915_private *i915,
934                      i915_reg_t reg,
935                      const struct regmask *tbl,
936                      unsigned long count)
937 {
938         u32 offset = i915_mmio_reg_offset(reg);
939
940         while (count--) {
941                 if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask &&
942                     i915_mmio_reg_offset(tbl->reg) == offset)
943                         return true;
944                 tbl++;
945         }
946
947         return false;
948 }
949
950 static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
951 {
952         /* Alas, we must pardon some whitelists. Mistakes already made */
953         static const struct regmask pardon[] = {
954                 { GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
955                 { GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
956         };
957
958         return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
959 }
960
961 static bool result_eq(struct intel_engine_cs *engine,
962                       u32 a, u32 b, i915_reg_t reg)
963 {
964         if (a != b && !pardon_reg(engine->i915, reg)) {
965                 pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
966                        i915_mmio_reg_offset(reg), a, b);
967                 return false;
968         }
969
970         return true;
971 }
972
973 static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
974 {
975         /* Some registers do not seem to behave and our writes unreadable */
976         static const struct regmask wo[] = {
977                 { GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
978         };
979
980         return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
981 }
982
983 static bool result_neq(struct intel_engine_cs *engine,
984                        u32 a, u32 b, i915_reg_t reg)
985 {
986         if (a == b && !writeonly_reg(engine->i915, reg)) {
987                 pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
988                        i915_mmio_reg_offset(reg), a);
989                 return false;
990         }
991
992         return true;
993 }
994
995 static int
996 check_whitelisted_registers(struct intel_engine_cs *engine,
997                             struct i915_vma *A,
998                             struct i915_vma *B,
999                             bool (*fn)(struct intel_engine_cs *engine,
1000                                        u32 a, u32 b,
1001                                        i915_reg_t reg))
1002 {
1003         u32 *a, *b;
1004         int i, err;
1005
1006         a = i915_gem_object_pin_map_unlocked(A->obj, I915_MAP_WB);
1007         if (IS_ERR(a))
1008                 return PTR_ERR(a);
1009
1010         b = i915_gem_object_pin_map_unlocked(B->obj, I915_MAP_WB);
1011         if (IS_ERR(b)) {
1012                 err = PTR_ERR(b);
1013                 goto err_a;
1014         }
1015
1016         err = 0;
1017         for (i = 0; i < engine->whitelist.count; i++) {
1018                 const struct i915_wa *wa = &engine->whitelist.list[i];
1019
1020                 if (i915_mmio_reg_offset(wa->reg) &
1021                     RING_FORCE_TO_NONPRIV_ACCESS_RD)
1022                         continue;
1023
1024                 if (!fn(engine, a[i], b[i], wa->reg))
1025                         err = -EINVAL;
1026         }
1027
1028         i915_gem_object_unpin_map(B->obj);
1029 err_a:
1030         i915_gem_object_unpin_map(A->obj);
1031         return err;
1032 }
1033
1034 static int live_isolated_whitelist(void *arg)
1035 {
1036         struct intel_gt *gt = arg;
1037         struct {
1038                 struct i915_vma *scratch[2];
1039         } client[2] = {};
1040         struct intel_engine_cs *engine;
1041         enum intel_engine_id id;
1042         int i, err = 0;
1043
1044         /*
1045          * Check that a write into a whitelist register works, but
1046          * invisible to a second context.
1047          */
1048
1049         if (!intel_engines_has_context_isolation(gt->i915))
1050                 return 0;
1051
1052         for (i = 0; i < ARRAY_SIZE(client); i++) {
1053                 client[i].scratch[0] =
1054                         __vm_create_scratch_for_read_pinned(gt->vm, 4096);
1055                 if (IS_ERR(client[i].scratch[0])) {
1056                         err = PTR_ERR(client[i].scratch[0]);
1057                         goto err;
1058                 }
1059
1060                 client[i].scratch[1] =
1061                         __vm_create_scratch_for_read_pinned(gt->vm, 4096);
1062                 if (IS_ERR(client[i].scratch[1])) {
1063                         err = PTR_ERR(client[i].scratch[1]);
1064                         i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1065                         goto err;
1066                 }
1067         }
1068
1069         for_each_engine(engine, gt, id) {
1070                 struct intel_context *ce[2];
1071
1072                 if (!engine->kernel_context->vm)
1073                         continue;
1074
1075                 if (!whitelist_writable_count(engine))
1076                         continue;
1077
1078                 ce[0] = intel_context_create(engine);
1079                 if (IS_ERR(ce[0])) {
1080                         err = PTR_ERR(ce[0]);
1081                         break;
1082                 }
1083                 ce[1] = intel_context_create(engine);
1084                 if (IS_ERR(ce[1])) {
1085                         err = PTR_ERR(ce[1]);
1086                         intel_context_put(ce[0]);
1087                         break;
1088                 }
1089
1090                 /* Read default values */
1091                 err = read_whitelisted_registers(ce[0], client[0].scratch[0]);
1092                 if (err)
1093                         goto err_ce;
1094
1095                 /* Try to overwrite registers (should only affect ctx0) */
1096                 err = scrub_whitelisted_registers(ce[0]);
1097                 if (err)
1098                         goto err_ce;
1099
1100                 /* Read values from ctx1, we expect these to be defaults */
1101                 err = read_whitelisted_registers(ce[1], client[1].scratch[0]);
1102                 if (err)
1103                         goto err_ce;
1104
1105                 /* Verify that both reads return the same default values */
1106                 err = check_whitelisted_registers(engine,
1107                                                   client[0].scratch[0],
1108                                                   client[1].scratch[0],
1109                                                   result_eq);
1110                 if (err)
1111                         goto err_ce;
1112
1113                 /* Read back the updated values in ctx0 */
1114                 err = read_whitelisted_registers(ce[0], client[0].scratch[1]);
1115                 if (err)
1116                         goto err_ce;
1117
1118                 /* User should be granted privilege to overwhite regs */
1119                 err = check_whitelisted_registers(engine,
1120                                                   client[0].scratch[0],
1121                                                   client[0].scratch[1],
1122                                                   result_neq);
1123 err_ce:
1124                 intel_context_put(ce[1]);
1125                 intel_context_put(ce[0]);
1126                 if (err)
1127                         break;
1128         }
1129
1130 err:
1131         for (i = 0; i < ARRAY_SIZE(client); i++) {
1132                 i915_vma_unpin_and_release(&client[i].scratch[1], 0);
1133                 i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1134         }
1135
1136         if (igt_flush_test(gt->i915))
1137                 err = -EIO;
1138
1139         return err;
1140 }
1141
1142 static bool
1143 verify_wa_lists(struct intel_gt *gt, struct wa_lists *lists,
1144                 const char *str)
1145 {
1146         struct intel_engine_cs *engine;
1147         enum intel_engine_id id;
1148         bool ok = true;
1149
1150         ok &= wa_list_verify(gt->uncore, &lists->gt_wa_list, str);
1151
1152         for_each_engine(engine, gt, id) {
1153                 struct intel_context *ce;
1154
1155                 ce = intel_context_create(engine);
1156                 if (IS_ERR(ce))
1157                         return false;
1158
1159                 ok &= engine_wa_list_verify(ce,
1160                                             &lists->engine[id].wa_list,
1161                                             str) == 0;
1162
1163                 ok &= engine_wa_list_verify(ce,
1164                                             &lists->engine[id].ctx_wa_list,
1165                                             str) == 0;
1166
1167                 intel_context_put(ce);
1168         }
1169
1170         return ok;
1171 }
1172
1173 static int
1174 live_gpu_reset_workarounds(void *arg)
1175 {
1176         struct intel_gt *gt = arg;
1177         intel_wakeref_t wakeref;
1178         struct wa_lists lists;
1179         bool ok;
1180
1181         if (!intel_has_gpu_reset(gt))
1182                 return 0;
1183
1184         pr_info("Verifying after GPU reset...\n");
1185
1186         igt_global_reset_lock(gt);
1187         wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1188
1189         reference_lists_init(gt, &lists);
1190
1191         ok = verify_wa_lists(gt, &lists, "before reset");
1192         if (!ok)
1193                 goto out;
1194
1195         intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
1196
1197         ok = verify_wa_lists(gt, &lists, "after reset");
1198
1199 out:
1200         reference_lists_fini(gt, &lists);
1201         intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1202         igt_global_reset_unlock(gt);
1203
1204         return ok ? 0 : -ESRCH;
1205 }
1206
1207 static int
1208 live_engine_reset_workarounds(void *arg)
1209 {
1210         struct intel_gt *gt = arg;
1211         struct intel_engine_cs *engine;
1212         enum intel_engine_id id;
1213         struct intel_context *ce;
1214         struct igt_spinner spin;
1215         struct i915_request *rq;
1216         intel_wakeref_t wakeref;
1217         struct wa_lists lists;
1218         int ret = 0;
1219
1220         if (!intel_has_reset_engine(gt))
1221                 return 0;
1222
1223         igt_global_reset_lock(gt);
1224         wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1225
1226         reference_lists_init(gt, &lists);
1227
1228         for_each_engine(engine, gt, id) {
1229                 bool ok;
1230
1231                 pr_info("Verifying after %s reset...\n", engine->name);
1232                 ce = intel_context_create(engine);
1233                 if (IS_ERR(ce)) {
1234                         ret = PTR_ERR(ce);
1235                         break;
1236                 }
1237
1238                 ok = verify_wa_lists(gt, &lists, "before reset");
1239                 if (!ok) {
1240                         ret = -ESRCH;
1241                         goto err;
1242                 }
1243
1244                 ret = intel_engine_reset(engine, "live_workarounds:idle");
1245                 if (ret) {
1246                         pr_err("%s: Reset failed while idle\n", engine->name);
1247                         goto err;
1248                 }
1249
1250                 ok = verify_wa_lists(gt, &lists, "after idle reset");
1251                 if (!ok) {
1252                         ret = -ESRCH;
1253                         goto err;
1254                 }
1255
1256                 ret = igt_spinner_init(&spin, engine->gt);
1257                 if (ret)
1258                         goto err;
1259
1260                 rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
1261                 if (IS_ERR(rq)) {
1262                         ret = PTR_ERR(rq);
1263                         igt_spinner_fini(&spin);
1264                         goto err;
1265                 }
1266
1267                 ret = request_add_spin(rq, &spin);
1268                 if (ret) {
1269                         pr_err("%s: Spinner failed to start\n", engine->name);
1270                         igt_spinner_fini(&spin);
1271                         goto err;
1272                 }
1273
1274                 ret = intel_engine_reset(engine, "live_workarounds:active");
1275                 if (ret) {
1276                         pr_err("%s: Reset failed on an active spinner\n",
1277                                engine->name);
1278                         igt_spinner_fini(&spin);
1279                         goto err;
1280                 }
1281
1282                 igt_spinner_end(&spin);
1283                 igt_spinner_fini(&spin);
1284
1285                 ok = verify_wa_lists(gt, &lists, "after busy reset");
1286                 if (!ok) {
1287                         ret = -ESRCH;
1288                         goto err;
1289                 }
1290
1291 err:
1292                 intel_context_put(ce);
1293                 if (ret)
1294                         break;
1295         }
1296
1297         reference_lists_fini(gt, &lists);
1298         intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1299         igt_global_reset_unlock(gt);
1300
1301         igt_flush_test(gt->i915);
1302
1303         return ret;
1304 }
1305
1306 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
1307 {
1308         static const struct i915_subtest tests[] = {
1309                 SUBTEST(live_dirty_whitelist),
1310                 SUBTEST(live_reset_whitelist),
1311                 SUBTEST(live_isolated_whitelist),
1312                 SUBTEST(live_gpu_reset_workarounds),
1313                 SUBTEST(live_engine_reset_workarounds),
1314         };
1315
1316         if (intel_gt_is_wedged(&i915->gt))
1317                 return 0;
1318
1319         return intel_gt_live_subtests(tests, &i915->gt);
1320 }