drm/xe/exec_queue: Rename xe_exec_queue::compute to xe_exec_queue::lr
authorFrancois Dugast <francois.dugast@intel.com>
Thu, 13 Jun 2024 17:03:48 +0000 (19:03 +0200)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Mon, 17 Jun 2024 20:46:52 +0000 (16:46 -0400)
The properties of this struct are used in long running context so
make that clear by renaming it to lr, in alignment with the rest
of the code.

Cc: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Francois Dugast <francois.dugast@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240613170348.723245-1-francois.dugast@intel.com
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
drivers/gpu/drm/xe/xe_exec_queue.c
drivers/gpu/drm/xe/xe_exec_queue_types.h
drivers/gpu/drm/xe/xe_preempt_fence.c
drivers/gpu/drm/xe/xe_vm.c

index cf45df0..0ba3783 100644 (file)
@@ -67,7 +67,7 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
        q->fence_irq = &gt->fence_irq[hwe->class];
        q->ring_ops = gt->ring_ops[hwe->class];
        q->ops = gt->exec_queue_ops;
-       INIT_LIST_HEAD(&q->compute.link);
+       INIT_LIST_HEAD(&q->lr.link);
        INIT_LIST_HEAD(&q->multi_gt_link);
 
        q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
@@ -633,8 +633,8 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
                        return PTR_ERR(q);
 
                if (xe_vm_in_preempt_fence_mode(vm)) {
-                       q->compute.context = dma_fence_context_alloc(1);
-                       spin_lock_init(&q->compute.lock);
+                       q->lr.context = dma_fence_context_alloc(1);
+                       spin_lock_init(&q->lr.lock);
 
                        err = xe_vm_add_compute_exec_queue(vm, q);
                        if (XE_IOCTL_DBG(xe, err))
index f0c5f82..201588e 100644 (file)
@@ -113,19 +113,19 @@ struct xe_exec_queue {
                enum xe_exec_queue_priority priority;
        } sched_props;
 
-       /** @compute: compute exec queue state */
+       /** @lr: long-running exec queue state */
        struct {
-               /** @compute.pfence: preemption fence */
+               /** @lr.pfence: preemption fence */
                struct dma_fence *pfence;
-               /** @compute.context: preemption fence context */
+               /** @lr.context: preemption fence context */
                u64 context;
-               /** @compute.seqno: preemption fence seqno */
+               /** @lr.seqno: preemption fence seqno */
                u32 seqno;
-               /** @compute.link: link into VM's list of exec queues */
+               /** @lr.link: link into VM's list of exec queues */
                struct list_head link;
-               /** @compute.lock: preemption fences lock */
+               /** @lr.lock: preemption fences lock */
                spinlock_t lock;
-       } compute;
+       } lr;
 
        /** @ops: submission backend exec queue operations */
        const struct xe_exec_queue_ops *ops;
index 5b243b7..e8b8ae5 100644 (file)
@@ -129,7 +129,7 @@ xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_exec_queue *q,
        list_del_init(&pfence->link);
        pfence->q = xe_exec_queue_get(q);
        dma_fence_init(&pfence->base, &preempt_fence_ops,
-                     &q->compute.lock, context, seqno);
+                     &q->lr.lock, context, seqno);
 
        return &pfence->base;
 }
index ffda487..61d4d95 100644 (file)
@@ -83,10 +83,10 @@ static bool preempt_fences_waiting(struct xe_vm *vm)
        lockdep_assert_held(&vm->lock);
        xe_vm_assert_held(vm);
 
-       list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
-               if (!q->compute.pfence ||
+       list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
+               if (!q->lr.pfence ||
                    test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
-                            &q->compute.pfence->flags)) {
+                            &q->lr.pfence->flags)) {
                        return true;
                }
        }
@@ -129,14 +129,14 @@ static int wait_for_existing_preempt_fences(struct xe_vm *vm)
 
        xe_vm_assert_held(vm);
 
-       list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
-               if (q->compute.pfence) {
-                       long timeout = dma_fence_wait(q->compute.pfence, false);
+       list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
+               if (q->lr.pfence) {
+                       long timeout = dma_fence_wait(q->lr.pfence, false);
 
                        if (timeout < 0)
                                return -ETIME;
-                       dma_fence_put(q->compute.pfence);
-                       q->compute.pfence = NULL;
+                       dma_fence_put(q->lr.pfence);
+                       q->lr.pfence = NULL;
                }
        }
 
@@ -148,7 +148,7 @@ static bool xe_vm_is_idle(struct xe_vm *vm)
        struct xe_exec_queue *q;
 
        xe_vm_assert_held(vm);
-       list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
+       list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
                if (!xe_exec_queue_is_idle(q))
                        return false;
        }
@@ -161,17 +161,17 @@ static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
        struct list_head *link;
        struct xe_exec_queue *q;
 
-       list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
+       list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
                struct dma_fence *fence;
 
                link = list->next;
                xe_assert(vm->xe, link != list);
 
                fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
-                                            q, q->compute.context,
-                                            ++q->compute.seqno);
-               dma_fence_put(q->compute.pfence);
-               q->compute.pfence = fence;
+                                            q, q->lr.context,
+                                            ++q->lr.seqno);
+               dma_fence_put(q->lr.pfence);
+               q->lr.pfence = fence;
        }
 }
 
@@ -191,10 +191,10 @@ static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
        if (err)
                goto out_unlock;
 
-       list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
-               if (q->compute.pfence) {
+       list_for_each_entry(q, &vm->preempt.exec_queues, lr.link)
+               if (q->lr.pfence) {
                        dma_resv_add_fence(bo->ttm.base.resv,
-                                          q->compute.pfence,
+                                          q->lr.pfence,
                                           DMA_RESV_USAGE_BOOKKEEP);
                }
 
@@ -211,10 +211,10 @@ static void resume_and_reinstall_preempt_fences(struct xe_vm *vm,
        lockdep_assert_held(&vm->lock);
        xe_vm_assert_held(vm);
 
-       list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
+       list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
                q->ops->resume(q);
 
-               drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->compute.pfence,
+               drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->lr.pfence,
                                         DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
        }
 }
@@ -238,16 +238,16 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
        if (err)
                goto out_up_write;
 
-       pfence = xe_preempt_fence_create(q, q->compute.context,
-                                        ++q->compute.seqno);
+       pfence = xe_preempt_fence_create(q, q->lr.context,
+                                        ++q->lr.seqno);
        if (!pfence) {
                err = -ENOMEM;
                goto out_fini;
        }
 
-       list_add(&q->compute.link, &vm->preempt.exec_queues);
+       list_add(&q->lr.link, &vm->preempt.exec_queues);
        ++vm->preempt.num_exec_queues;
-       q->compute.pfence = pfence;
+       q->lr.pfence = pfence;
 
        down_read(&vm->userptr.notifier_lock);
 
@@ -284,12 +284,12 @@ void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
                return;
 
        down_write(&vm->lock);
-       list_del(&q->compute.link);
+       list_del(&q->lr.link);
        --vm->preempt.num_exec_queues;
-       if (q->compute.pfence) {
-               dma_fence_enable_sw_signaling(q->compute.pfence);
-               dma_fence_put(q->compute.pfence);
-               q->compute.pfence = NULL;
+       if (q->lr.pfence) {
+               dma_fence_enable_sw_signaling(q->lr.pfence);
+               dma_fence_put(q->lr.pfence);
+               q->lr.pfence = NULL;
        }
        up_write(&vm->lock);
 }
@@ -327,7 +327,7 @@ static void xe_vm_kill(struct xe_vm *vm, bool unlocked)
        vm->flags |= XE_VM_FLAG_BANNED;
        trace_xe_vm_kill(vm);
 
-       list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
+       list_for_each_entry(q, &vm->preempt.exec_queues, lr.link)
                q->ops->kill(q);
 
        if (unlocked)