drm/xe: Streamline exec queue freeing path
authorNiranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Tue, 19 Mar 2024 17:59:46 +0000 (10:59 -0700)
committerMatthew Brost <matthew.brost@intel.com>
Wed, 20 Mar 2024 05:36:30 +0000 (22:36 -0700)
Ensure exec queue freeing happens at one place, that is in
__xe_exec_queue_free(). It releases q->vm reference also. Set
q->vm before handling extensions as they can potentially reference it.

Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240319175947.15890-1-niranjana.vishwanathapura@intel.com
drivers/gpu/drm/xe/xe_exec_queue.c

index 2016c1a..a1b3c71 100644 (file)
@@ -33,6 +33,13 @@ enum xe_exec_queue_sched_prop {
 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
                                      u64 extensions, int ext_number);
 
+static void __xe_exec_queue_free(struct xe_exec_queue *q)
+{
+       if (q->vm)
+               xe_vm_put(q->vm);
+       kfree(q);
+}
+
 static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
                                                   struct xe_vm *vm,
                                                   u32 logical_mask,
@@ -74,6 +81,9 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
        else
                q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
 
+       if (vm)
+               q->vm = xe_vm_get(vm);
+
        if (extensions) {
                /*
                 * may set q->usm, must come before xe_lrc_init(),
@@ -81,14 +91,11 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
                 */
                err = exec_queue_user_extensions(xe, q, extensions, 0);
                if (err) {
-                       kfree(q);
+                       __xe_exec_queue_free(q);
                        return ERR_PTR(err);
                }
        }
 
-       if (vm)
-               q->vm = xe_vm_get(vm);
-
        if (xe_exec_queue_is_parallel(q)) {
                q->parallel.composite_fence_ctx = dma_fence_context_alloc(1);
                q->parallel.composite_fence_seqno = XE_FENCE_INITIAL_SEQNO;
@@ -97,13 +104,6 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
        return q;
 }
 
-static void __xe_exec_queue_free(struct xe_exec_queue *q)
-{
-       if (q->vm)
-               xe_vm_put(q->vm);
-       kfree(q);
-}
-
 static int __xe_exec_queue_init(struct xe_exec_queue *q)
 {
        struct xe_device *xe = gt_to_xe(q->gt);