drm/i915/selftest: use igt_vma_move_to_active_unlocked if possible
authorAndrzej Hajda <andrzej.hajda@intel.com>
Tue, 13 Dec 2022 12:19:51 +0000 (13:19 +0100)
committerAndi Shyti <andi.shyti@linux.intel.com>
Mon, 9 Jan 2023 13:23:52 +0000 (14:23 +0100)
Helper replaces common sequence of calls.

Signed-off-by: Andrzej Hajda <andrzej.hajda@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221213121951.1515023-2-andrzej.hajda@intel.com
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
drivers/gpu/drm/i915/gt/selftest_execlists.c
drivers/gpu/drm/i915/gt/selftest_lrc.c
drivers/gpu/drm/i915/gt/selftest_mocs.c
drivers/gpu/drm/i915/gt/selftest_workarounds.c

index 414ee2c..a81fa6a 100644 (file)
@@ -1551,9 +1551,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
                goto err_unpin;
        }
 
-       i915_vma_lock(vma);
-       err = i915_vma_move_to_active(vma, rq, 0);
-       i915_vma_unlock(vma);
+       err = igt_vma_move_to_active_unlocked(vma, rq, 0);
        if (err)
                goto skip_request;
 
@@ -1686,9 +1684,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
                goto err_unpin;
        }
 
-       i915_vma_lock(vma);
-       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
-       i915_vma_unlock(vma);
+       err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE);
        if (err)
                goto skip_request;
 
index c147038..20a232a 100644 (file)
@@ -130,15 +130,11 @@ int igt_gpu_fill_dw(struct intel_context *ce,
                goto err_batch;
        }
 
-       i915_vma_lock(batch);
-       err = i915_vma_move_to_active(batch, rq, 0);
-       i915_vma_unlock(batch);
+       err = igt_vma_move_to_active_unlocked(batch, rq, 0);
        if (err)
                goto skip_request;
 
-       i915_vma_lock(vma);
-       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
-       i915_vma_unlock(vma);
+       err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE);
        if (err)
                goto skip_request;
 
index a619057..736b89a 100644 (file)
@@ -2763,13 +2763,11 @@ static int create_gang(struct intel_engine_cs *engine,
        rq->batch = i915_vma_get(vma);
        i915_request_get(rq);
 
-       i915_vma_lock(vma);
-       err = i915_vma_move_to_active(vma, rq, 0);
+       err = igt_vma_move_to_active_unlocked(vma, rq, 0);
        if (!err)
                err = rq->engine->emit_bb_start(rq,
                                                i915_vma_offset(vma),
                                                PAGE_SIZE, 0);
-       i915_vma_unlock(vma);
        i915_request_add(rq);
        if (err)
                goto err_rq;
@@ -3177,9 +3175,7 @@ create_gpr_client(struct intel_engine_cs *engine,
                goto out_batch;
        }
 
-       i915_vma_lock(vma);
-       err = i915_vma_move_to_active(vma, rq, 0);
-       i915_vma_unlock(vma);
+       err = igt_vma_move_to_active_unlocked(vma, rq, 0);
 
        i915_vma_lock(batch);
        if (!err)
@@ -3514,13 +3510,11 @@ static int smoke_submit(struct preempt_smoke *smoke,
        }
 
        if (vma) {
-               i915_vma_lock(vma);
-               err = i915_vma_move_to_active(vma, rq, 0);
+               err = igt_vma_move_to_active_unlocked(vma, rq, 0);
                if (!err)
                        err = rq->engine->emit_bb_start(rq,
                                                        i915_vma_offset(vma),
                                                        PAGE_SIZE, 0);
-               i915_vma_unlock(vma);
        }
 
        i915_request_add(rq);
index a61ae9d..a78a3d2 100644 (file)
@@ -599,9 +599,7 @@ __gpr_read(struct intel_context *ce, struct i915_vma *scratch, u32 *slot)
                *cs++ = 0;
        }
 
-       i915_vma_lock(scratch);
-       err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
-       i915_vma_unlock(scratch);
+       err = igt_vma_move_to_active_unlocked(scratch, rq, EXEC_OBJECT_WRITE);
 
        i915_request_get(rq);
        i915_request_add(rq);
index f27cc28..ca009a6 100644 (file)
@@ -228,9 +228,7 @@ static int check_mocs_engine(struct live_mocs *arg,
        if (IS_ERR(rq))
                return PTR_ERR(rq);
 
-       i915_vma_lock(vma);
-       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
-       i915_vma_unlock(vma);
+       err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE);
 
        /* Read the mocs tables back using SRM */
        offset = i915_ggtt_offset(vma);
index 9c54497..14a8b25 100644 (file)
@@ -138,9 +138,7 @@ read_nonprivs(struct intel_context *ce)
                goto err_pin;
        }
 
-       i915_vma_lock(vma);
-       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
-       i915_vma_unlock(vma);
+       err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE);
        if (err)
                goto err_req;
 
@@ -853,9 +851,7 @@ static int read_whitelisted_registers(struct intel_context *ce,
        if (IS_ERR(rq))
                return PTR_ERR(rq);
 
-       i915_vma_lock(results);
-       err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
-       i915_vma_unlock(results);
+       err = igt_vma_move_to_active_unlocked(results, rq, EXEC_OBJECT_WRITE);
        if (err)
                goto err_req;
 
@@ -935,9 +931,7 @@ static int scrub_whitelisted_registers(struct intel_context *ce)
                        goto err_request;
        }
 
-       i915_vma_lock(batch);
-       err = i915_vma_move_to_active(batch, rq, 0);
-       i915_vma_unlock(batch);
+       err = igt_vma_move_to_active_unlocked(batch, rq, 0);
        if (err)
                goto err_request;