Merge tag 'drm-intel-gt-next-2021-04-06' of git://anongit.freedesktop.org/drm/drm...
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gt / selftest_workarounds.c
index 2070b91..1985048 100644 (file)
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
 /*
- * SPDX-License-Identifier: MIT
- *
  * Copyright © 2018 Intel Corporation
  */
 
@@ -112,7 +111,7 @@ read_nonprivs(struct intel_context *ce)
 
        i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
 
-       cs = i915_gem_object_pin_map(result, I915_MAP_WB);
+       cs = i915_gem_object_pin_map_unlocked(result, I915_MAP_WB);
        if (IS_ERR(cs)) {
                err = PTR_ERR(cs);
                goto err_obj;
@@ -218,7 +217,7 @@ static int check_whitelist(struct intel_context *ce)
        i915_gem_object_lock(results, NULL);
        intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
                err = i915_gem_object_set_to_cpu_domain(results, false);
-       i915_gem_object_unlock(results);
+
        if (intel_gt_is_wedged(engine->gt))
                err = -EIO;
        if (err)
@@ -246,6 +245,7 @@ static int check_whitelist(struct intel_context *ce)
 
        i915_gem_object_unpin_map(results);
 out_put:
+       i915_gem_object_unlock(results);
        i915_gem_object_put(results);
        return err;
 }
@@ -490,7 +490,7 @@ static int check_dirty_whitelist(struct intel_context *ce)
        u32 *cs, *results;
 
        sz = (2 * ARRAY_SIZE(values) + 1) * sizeof(u32);
-       scratch = __vm_create_scratch_for_read(ce->vm, sz);
+       scratch = __vm_create_scratch_for_read_pinned(ce->vm, sz);
        if (IS_ERR(scratch))
                return PTR_ERR(scratch);
 
@@ -502,6 +502,7 @@ static int check_dirty_whitelist(struct intel_context *ce)
 
        for (i = 0; i < engine->whitelist.count; i++) {
                u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+               struct i915_gem_ww_ctx ww;
                u64 addr = scratch->node.start;
                struct i915_request *rq;
                u32 srm, lrm, rsvd;
@@ -517,6 +518,29 @@ static int check_dirty_whitelist(struct intel_context *ce)
 
                ro_reg = ro_register(reg);
 
+               i915_gem_ww_ctx_init(&ww, false);
+retry:
+               cs = NULL;
+               err = i915_gem_object_lock(scratch->obj, &ww);
+               if (!err)
+                       err = i915_gem_object_lock(batch->obj, &ww);
+               if (!err)
+                       err = intel_context_pin_ww(ce, &ww);
+               if (err)
+                       goto out;
+
+               cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+               if (IS_ERR(cs)) {
+                       err = PTR_ERR(cs);
+                       goto out_ctx;
+               }
+
+               results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+               if (IS_ERR(results)) {
+                       err = PTR_ERR(results);
+                       goto out_unmap_batch;
+               }
+
                /* Clear non priv flags */
                reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
 
@@ -528,12 +552,6 @@ static int check_dirty_whitelist(struct intel_context *ce)
                pr_debug("%s: Writing garbage to %x\n",
                         engine->name, reg);
 
-               cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
-               if (IS_ERR(cs)) {
-                       err = PTR_ERR(cs);
-                       goto out_batch;
-               }
-
                /* SRM original */
                *cs++ = srm;
                *cs++ = reg;
@@ -580,11 +598,12 @@ static int check_dirty_whitelist(struct intel_context *ce)
                i915_gem_object_flush_map(batch->obj);
                i915_gem_object_unpin_map(batch->obj);
                intel_gt_chipset_flush(engine->gt);
+               cs = NULL;
 
-               rq = intel_context_create_request(ce);
+               rq = i915_request_create(ce);
                if (IS_ERR(rq)) {
                        err = PTR_ERR(rq);
-                       goto out_batch;
+                       goto out_unmap_scratch;
                }
 
                if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
@@ -593,20 +612,16 @@ static int check_dirty_whitelist(struct intel_context *ce)
                                goto err_request;
                }
 
-               i915_vma_lock(batch);
                err = i915_request_await_object(rq, batch->obj, false);
                if (err == 0)
                        err = i915_vma_move_to_active(batch, rq, 0);
-               i915_vma_unlock(batch);
                if (err)
                        goto err_request;
 
-               i915_vma_lock(scratch);
                err = i915_request_await_object(rq, scratch->obj, true);
                if (err == 0)
                        err = i915_vma_move_to_active(scratch, rq,
                                                      EXEC_OBJECT_WRITE);
-               i915_vma_unlock(scratch);
                if (err)
                        goto err_request;
 
@@ -622,13 +637,7 @@ err_request:
                        pr_err("%s: Futzing %x timedout; cancelling test\n",
                               engine->name, reg);
                        intel_gt_set_wedged(engine->gt);
-                       goto out_batch;
-               }
-
-               results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
-               if (IS_ERR(results)) {
-                       err = PTR_ERR(results);
-                       goto out_batch;
+                       goto out_unmap_scratch;
                }
 
                GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
@@ -639,7 +648,7 @@ err_request:
                                pr_err("%s: Unable to write to whitelisted register %x\n",
                                       engine->name, reg);
                                err = -EINVAL;
-                               goto out_unpin;
+                               goto out_unmap_scratch;
                        }
                } else {
                        rsvd = 0;
@@ -705,15 +714,27 @@ err_request:
 
                        err = -EINVAL;
                }
-out_unpin:
+out_unmap_scratch:
                i915_gem_object_unpin_map(scratch->obj);
+out_unmap_batch:
+               if (cs)
+                       i915_gem_object_unpin_map(batch->obj);
+out_ctx:
+               intel_context_unpin(ce);
+out:
+               if (err == -EDEADLK) {
+                       err = i915_gem_ww_ctx_backoff(&ww);
+                       if (!err)
+                               goto retry;
+               }
+               i915_gem_ww_ctx_fini(&ww);
                if (err)
                        break;
        }
 
        if (igt_flush_test(engine->i915))
                err = -EIO;
-out_batch:
+
        i915_vma_unpin_and_release(&batch, 0);
 out_scratch:
        i915_vma_unpin_and_release(&scratch, 0);
@@ -847,7 +868,7 @@ static int scrub_whitelisted_registers(struct intel_context *ce)
        if (IS_ERR(batch))
                return PTR_ERR(batch);
 
-       cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+       cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
        if (IS_ERR(cs)) {
                err = PTR_ERR(cs);
                goto err_batch;
@@ -982,11 +1003,11 @@ check_whitelisted_registers(struct intel_engine_cs *engine,
        u32 *a, *b;
        int i, err;
 
-       a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
+       a = i915_gem_object_pin_map_unlocked(A->obj, I915_MAP_WB);
        if (IS_ERR(a))
                return PTR_ERR(a);
 
-       b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
+       b = i915_gem_object_pin_map_unlocked(B->obj, I915_MAP_WB);
        if (IS_ERR(b)) {
                err = PTR_ERR(b);
                goto err_a;
@@ -1030,14 +1051,14 @@ static int live_isolated_whitelist(void *arg)
 
        for (i = 0; i < ARRAY_SIZE(client); i++) {
                client[i].scratch[0] =
-                       __vm_create_scratch_for_read(gt->vm, 4096);
+                       __vm_create_scratch_for_read_pinned(gt->vm, 4096);
                if (IS_ERR(client[i].scratch[0])) {
                        err = PTR_ERR(client[i].scratch[0]);
                        goto err;
                }
 
                client[i].scratch[1] =
-                       __vm_create_scratch_for_read(gt->vm, 4096);
+                       __vm_create_scratch_for_read_pinned(gt->vm, 4096);
                if (IS_ERR(client[i].scratch[1])) {
                        err = PTR_ERR(client[i].scratch[1]);
                        i915_vma_unpin_and_release(&client[i].scratch[0], 0);
@@ -1220,7 +1241,11 @@ live_engine_reset_workarounds(void *arg)
                        goto err;
                }
 
-               intel_engine_reset(engine, "live_workarounds:idle");
+               ret = intel_engine_reset(engine, "live_workarounds:idle");
+               if (ret) {
+                       pr_err("%s: Reset failed while idle\n", engine->name);
+                       goto err;
+               }
 
                ok = verify_wa_lists(gt, &lists, "after idle reset");
                if (!ok) {
@@ -1241,12 +1266,18 @@ live_engine_reset_workarounds(void *arg)
 
                ret = request_add_spin(rq, &spin);
                if (ret) {
-                       pr_err("Spinner failed to start\n");
+                       pr_err("%s: Spinner failed to start\n", engine->name);
                        igt_spinner_fini(&spin);
                        goto err;
                }
 
-               intel_engine_reset(engine, "live_workarounds:active");
+               ret = intel_engine_reset(engine, "live_workarounds:active");
+               if (ret) {
+                       pr_err("%s: Reset failed on an active spinner\n",
+                              engine->name);
+                       igt_spinner_fini(&spin);
+                       goto err;
+               }
 
                igt_spinner_end(&spin);
                igt_spinner_fini(&spin);