drm/xe: fix submissions without vm
authorDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Tue, 22 Aug 2023 17:33:32 +0000 (10:33 -0700)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Thu, 21 Dec 2023 16:40:27 +0000 (11:40 -0500)
Kernel queues can submit privileged batches directly in GGTT, so they
don't always need a vm. The submission front-end already supports
creating and submitting jobs without a vm, but some parts of the
back-end assume the vm is always there. Fix this by handling a lack of
vm in the back-end as well.

v2: s/XE_BUG_ON/XE_WARN_ON, s/engine/exec_queue

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20230822173334.1664332-2-daniele.ceraolospurio@intel.com
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
drivers/gpu/drm/xe/xe_guc_submit.c
drivers/gpu/drm/xe/xe_ring_ops.c
drivers/gpu/drm/xe/xe_sched_job.c

index 55c7b13..87f2972 100644 (file)
@@ -1136,7 +1136,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
        ge->q = q;
        init_waitqueue_head(&ge->suspend_wait);
 
-       timeout = xe_vm_no_dma_fences(q->vm) ? MAX_SCHEDULE_TIMEOUT :
+       timeout = (q->vm && xe_vm_no_dma_fences(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
                  q->hwe->eclass->sched_props.job_timeout_ms;
        err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops, NULL,
                             q->lrc[0].ring.size / MAX_JOB_SIZE_BYTES,
index 9e23293..2b4127e 100644 (file)
@@ -213,7 +213,7 @@ static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc
        u32 ppgtt_flag = get_ppgtt_flag(job);
        struct xe_vm *vm = job->q->vm;
 
-       if (vm->batch_invalidate_tlb) {
+       if (vm && vm->batch_invalidate_tlb) {
                dw[i++] = preparser_disable(true);
                i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
                                        seqno, true, dw, i);
@@ -273,13 +273,13 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
                        i = emit_aux_table_inv(gt, VE0_AUX_INV, dw, i);
        }
 
-       if (vm->batch_invalidate_tlb)
+       if (vm && vm->batch_invalidate_tlb)
                i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
                                        seqno, true, dw, i);
 
        dw[i++] = preparser_disable(false);
 
-       if (!vm->batch_invalidate_tlb)
+       if (!vm || !vm->batch_invalidate_tlb)
                i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
                                        seqno, dw, i);
 
@@ -318,7 +318,7 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
                mask_flags = PIPE_CONTROL_3D_ENGINE_FLAGS;
 
        /* See __xe_pt_bind_vma() for a discussion on TLB invalidations. */
-       i = emit_pipe_invalidate(mask_flags, vm->batch_invalidate_tlb, dw, i);
+       i = emit_pipe_invalidate(mask_flags, vm && vm->batch_invalidate_tlb, dw, i);
 
        /* hsdes: 1809175790 */
        if (has_aux_ccs(xe))
index de2851d..0479d05 100644 (file)
@@ -87,6 +87,9 @@ struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
        int i, j;
        u32 width;
 
+       /* only a kernel context can submit a vm-less job */
+       XE_WARN_ON(!q->vm && !(q->flags & EXEC_QUEUE_FLAG_KERNEL));
+
        /* Migration and kernel engines have their own locking */
        if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM |
                          EXEC_QUEUE_FLAG_WA))) {