Revert "drm/xe/vm: drop vm->destroy_work"
authorMatthew Auld <matthew.auld@intel.com>
Tue, 23 Apr 2024 07:47:22 +0000 (08:47 +0100)
committerMatthew Auld <matthew.auld@intel.com>
Thu, 25 Apr 2024 15:52:34 +0000 (16:52 +0100)
This reverts commit 5b259c0d1d3caa6efc66c2b856840e68993f814e.

Cleanup here is good, however we need to able to flush a worker during
vm destruction which might involve sleeping, so bring back the worker.

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240423074721.119633-3-matthew.auld@intel.com
drivers/gpu/drm/xe/xe_vm.c
drivers/gpu/drm/xe/xe_vm_types.h

index 7ae2b03..633485c 100644 (file)
@@ -1173,6 +1173,8 @@ static const struct xe_pt_ops xelp_pt_ops = {
        .pde_encode_bo = xelp_pde_encode_bo,
 };
 
+static void vm_destroy_work_func(struct work_struct *w);
+
 /**
  * xe_vm_create_scratch() - Setup a scratch memory pagetable tree for the
  * given tile and vm.
@@ -1252,6 +1254,8 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
        init_rwsem(&vm->userptr.notifier_lock);
        spin_lock_init(&vm->userptr.invalidated_lock);
 
+       INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
+
        INIT_LIST_HEAD(&vm->preempt.exec_queues);
        vm->preempt.min_run_period_ms = 10;     /* FIXME: Wire up to uAPI */
 
@@ -1489,9 +1493,10 @@ void xe_vm_close_and_put(struct xe_vm *vm)
        xe_vm_put(vm);
 }
 
-static void xe_vm_free(struct drm_gpuvm *gpuvm)
+static void vm_destroy_work_func(struct work_struct *w)
 {
-       struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
+       struct xe_vm *vm =
+               container_of(w, struct xe_vm, destroy_work);
        struct xe_device *xe = vm->xe;
        struct xe_tile *tile;
        u8 id;
@@ -1511,6 +1516,14 @@ static void xe_vm_free(struct drm_gpuvm *gpuvm)
        kfree(vm);
 }
 
+static void xe_vm_free(struct drm_gpuvm *gpuvm)
+{
+       struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
+
+       /* To destroy the VM we need to be able to sleep */
+       queue_work(system_unbound_wq, &vm->destroy_work);
+}
+
 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
 {
        struct xe_vm *vm;
index 72a1006..0447c79 100644 (file)
@@ -177,6 +177,13 @@ struct xe_vm {
         */
        struct list_head rebind_list;
 
+       /**
+        * @destroy_work: worker to destroy VM, needed as a dma_fence signaling
+        * from an irq context can be last put and the destroy needs to be able
+        * to sleep.
+        */
+       struct work_struct destroy_work;
+
        /**
         * @rftree: range fence tree to track updates to page table structure.
         * Used to implement conflict tracking between independent bind engines.