drm/xe/vm: Fix ASID XA usage
[linux-2.6-microblaze.git] / drivers / gpu / drm / xe / xe_vm.c
index 3132114..7c0ae43 100644 (file)
@@ -592,7 +592,7 @@ static void preempt_rebind_work_func(struct work_struct *w)
        unsigned int fence_count = 0;
        LIST_HEAD(preempt_fences);
        ktime_t end = 0;
-       int err;
+       int err = 0;
        long wait;
        int __maybe_unused tries = 0;
 
@@ -608,22 +608,6 @@ static void preempt_rebind_work_func(struct work_struct *w)
        }
 
 retry:
-       if (vm->async_ops.error)
-               goto out_unlock_outer;
-
-       /*
-        * Extreme corner where we exit a VM error state with a munmap style VM
-        * unbind inflight which requires a rebind. In this case the rebind
-        * needs to install some fences into the dma-resv slots. The worker to
-        * do this queued, let that worker make progress by dropping vm->lock
-        * and trying this again.
-        */
-       if (vm->async_ops.munmap_rebind_inflight) {
-               up_write(&vm->lock);
-               flush_work(&vm->async_ops.work);
-               goto retry;
-       }
-
        if (xe_vm_userptr_check_repin(vm)) {
                err = xe_vm_userptr_pin(vm);
                if (err)
@@ -1245,7 +1229,8 @@ static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index)
        return pte;
 }
 
-static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index)
+static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index,
+                               u32 pt_level)
 {
        u64 pte = 0;
 
@@ -1255,8 +1240,12 @@ static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index)
        if (pat_index & BIT(1))
                pte |= XE_PPGTT_PTE_PAT1;
 
-       if (pat_index & BIT(2))
-               pte |= XE_PPGTT_PTE_PAT2;
+       if (pat_index & BIT(2)) {
+               if (pt_level)
+                       pte |= XE_PPGTT_PDE_PDPE_PAT2;
+               else
+                       pte |= XE_PPGTT_PTE_PAT2;
+       }
 
        if (pat_index & BIT(3))
                pte |= XELPG_PPGTT_PTE_PAT3;
@@ -1300,7 +1289,7 @@ static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
 
        pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
        pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
-       pte |= pte_encode_pat_index(xe, pat_index);
+       pte |= pte_encode_pat_index(xe, pat_index, pt_level);
        pte |= pte_encode_ps(pt_level);
 
        if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
@@ -1319,7 +1308,7 @@ static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
        if (likely(!xe_vma_read_only(vma)))
                pte |= XE_PAGE_RW;
 
-       pte |= pte_encode_pat_index(xe, pat_index);
+       pte |= pte_encode_pat_index(xe, pat_index, pt_level);
        pte |= pte_encode_ps(pt_level);
 
        if (unlikely(xe_vma_is_null(vma)))
@@ -1339,7 +1328,7 @@ static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
 
        pte = addr;
        pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
-       pte |= pte_encode_pat_index(xe, pat_index);
+       pte |= pte_encode_pat_index(xe, pat_index, pt_level);
        pte |= pte_encode_ps(pt_level);
 
        if (devmem)
@@ -1357,7 +1346,6 @@ static const struct xe_pt_ops xelp_pt_ops = {
        .pde_encode_bo = xelp_pde_encode_bo,
 };
 
-static void xe_vma_op_work_func(struct work_struct *w);
 static void vm_destroy_work_func(struct work_struct *w);
 
 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
@@ -1390,10 +1378,6 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
        INIT_LIST_HEAD(&vm->notifier.rebind_list);
        spin_lock_init(&vm->notifier.list_lock);
 
-       INIT_LIST_HEAD(&vm->async_ops.pending);
-       INIT_WORK(&vm->async_ops.work, xe_vma_op_work_func);
-       spin_lock_init(&vm->async_ops.lock);
-
        INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
 
        INIT_LIST_HEAD(&vm->preempt.exec_queues);
@@ -1458,11 +1442,6 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
                vm->batch_invalidate_tlb = false;
        }
 
-       if (flags & XE_VM_FLAG_ASYNC_BIND_OPS) {
-               vm->async_ops.fence.context = dma_fence_context_alloc(1);
-               vm->flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
-       }
-
        /* Fill pt_root after allocating scratch tables */
        for_each_tile(tile, xe, id) {
                if (!vm->pt_root[id])
@@ -1478,6 +1457,9 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
                        struct xe_gt *gt = tile->primary_gt;
                        struct xe_vm *migrate_vm;
                        struct xe_exec_queue *q;
+                       u32 create_flags = EXEC_QUEUE_FLAG_VM |
+                               ((flags & XE_VM_FLAG_ASYNC_DEFAULT) ?
+                               EXEC_QUEUE_FLAG_VM_ASYNC : 0);
 
                        if (!vm->pt_root[id])
                                continue;
@@ -1485,7 +1467,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
                        migrate_vm = xe_migrate_get_vm(tile->migrate);
                        q = xe_exec_queue_create_class(xe, gt, migrate_vm,
                                                       XE_ENGINE_CLASS_COPY,
-                                                      EXEC_QUEUE_FLAG_VM);
+                                                      create_flags);
                        xe_vm_put(migrate_vm);
                        if (IS_ERR(q)) {
                                err = PTR_ERR(q);
@@ -1525,12 +1507,6 @@ err_no_resv:
        return ERR_PTR(err);
 }
 
-static void flush_async_ops(struct xe_vm *vm)
-{
-       queue_work(system_unbound_wq, &vm->async_ops.work);
-       flush_work(&vm->async_ops.work);
-}
-
 static void xe_vm_close(struct xe_vm *vm)
 {
        down_write(&vm->lock);
@@ -1550,10 +1526,16 @@ void xe_vm_close_and_put(struct xe_vm *vm)
        xe_assert(xe, !vm->preempt.num_exec_queues);
 
        xe_vm_close(vm);
-       flush_async_ops(vm);
        if (xe_vm_in_compute_mode(vm))
                flush_work(&vm->preempt.rebind_work);
 
+       down_write(&vm->lock);
+       for_each_tile(tile, xe, id) {
+               if (vm->q[id])
+                       xe_exec_queue_last_fence_put(vm->q[id], vm);
+       }
+       up_write(&vm->lock);
+
        for_each_tile(tile, xe, id) {
                if (vm->q[id]) {
                        xe_exec_queue_kill(vm->q[id]);
@@ -1695,16 +1677,23 @@ u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
                                         tile_to_xe(tile)->pat.idx[XE_CACHE_WB]);
 }
 
+static struct xe_exec_queue *
+to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
+{
+       return q ? q : vm->q[0];
+}
+
 static struct dma_fence *
 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
                 struct xe_sync_entry *syncs, u32 num_syncs,
                 bool first_op, bool last_op)
 {
+       struct xe_vm *vm = xe_vma_vm(vma);
+       struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
        struct xe_tile *tile;
        struct dma_fence *fence = NULL;
        struct dma_fence **fences = NULL;
        struct dma_fence_array *cf = NULL;
-       struct xe_vm *vm = xe_vma_vm(vma);
        int cur_fence = 0, i;
        int number_tiles = hweight8(vma->tile_present);
        int err;
@@ -1751,20 +1740,19 @@ next:
                }
        }
 
+       fence = cf ? &cf->base : !fence ?
+               xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
        if (last_op) {
                for (i = 0; i < num_syncs; i++)
-                       xe_sync_entry_signal(&syncs[i], NULL,
-                                            cf ? &cf->base : fence);
+                       xe_sync_entry_signal(&syncs[i], NULL, fence);
        }
 
-       return cf ? &cf->base : !fence ? dma_fence_get_stub() : fence;
+       return fence;
 
 err_fences:
        if (fences) {
-               while (cur_fence) {
-                       /* FIXME: Rewind the previous binds? */
+               while (cur_fence)
                        dma_fence_put(fences[--cur_fence]);
-               }
                kfree(fences);
        }
 
@@ -1838,102 +1826,27 @@ next:
 
 err_fences:
        if (fences) {
-               while (cur_fence) {
-                       /* FIXME: Rewind the previous binds? */
+               while (cur_fence)
                        dma_fence_put(fences[--cur_fence]);
-               }
                kfree(fences);
        }
 
        return ERR_PTR(err);
 }
 
-struct async_op_fence {
-       struct dma_fence fence;
-       struct dma_fence *wait_fence;
-       struct dma_fence_cb cb;
-       struct xe_vm *vm;
-       wait_queue_head_t wq;
-       bool started;
-};
-
-static const char *async_op_fence_get_driver_name(struct dma_fence *dma_fence)
-{
-       return "xe";
-}
-
-static const char *
-async_op_fence_get_timeline_name(struct dma_fence *dma_fence)
+static bool xe_vm_sync_mode(struct xe_vm *vm, struct xe_exec_queue *q)
 {
-       return "async_op_fence";
-}
-
-static const struct dma_fence_ops async_op_fence_ops = {
-       .get_driver_name = async_op_fence_get_driver_name,
-       .get_timeline_name = async_op_fence_get_timeline_name,
-};
-
-static void async_op_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
-{
-       struct async_op_fence *afence =
-               container_of(cb, struct async_op_fence, cb);
-
-       afence->fence.error = afence->wait_fence->error;
-       dma_fence_signal(&afence->fence);
-       xe_vm_put(afence->vm);
-       dma_fence_put(afence->wait_fence);
-       dma_fence_put(&afence->fence);
-}
-
-static void add_async_op_fence_cb(struct xe_vm *vm,
-                                 struct dma_fence *fence,
-                                 struct async_op_fence *afence)
-{
-       int ret;
-
-       if (!xe_vm_no_dma_fences(vm)) {
-               afence->started = true;
-               smp_wmb();
-               wake_up_all(&afence->wq);
-       }
-
-       afence->wait_fence = dma_fence_get(fence);
-       afence->vm = xe_vm_get(vm);
-       dma_fence_get(&afence->fence);
-       ret = dma_fence_add_callback(fence, &afence->cb, async_op_fence_cb);
-       if (ret == -ENOENT) {
-               afence->fence.error = afence->wait_fence->error;
-               dma_fence_signal(&afence->fence);
-       }
-       if (ret) {
-               xe_vm_put(vm);
-               dma_fence_put(afence->wait_fence);
-               dma_fence_put(&afence->fence);
-       }
-       XE_WARN_ON(ret && ret != -ENOENT);
-}
-
-int xe_vm_async_fence_wait_start(struct dma_fence *fence)
-{
-       if (fence->ops == &async_op_fence_ops) {
-               struct async_op_fence *afence =
-                       container_of(fence, struct async_op_fence, fence);
-
-               xe_assert(afence->vm->xe, !xe_vm_no_dma_fences(afence->vm));
-
-               smp_rmb();
-               return wait_event_interruptible(afence->wq, afence->started);
-       }
-
-       return 0;
+       return q ? !(q->flags & EXEC_QUEUE_FLAG_VM_ASYNC) :
+               !(vm->flags & XE_VM_FLAG_ASYNC_DEFAULT);
 }
 
 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
                        struct xe_exec_queue *q, struct xe_sync_entry *syncs,
-                       u32 num_syncs, struct async_op_fence *afence,
-                       bool immediate, bool first_op, bool last_op)
+                       u32 num_syncs, bool immediate, bool first_op,
+                       bool last_op)
 {
        struct dma_fence *fence;
+       struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
 
        xe_vm_assert_held(vm);
 
@@ -1947,23 +1860,26 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
 
                xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
 
-               fence = dma_fence_get_stub();
+               fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
                if (last_op) {
                        for (i = 0; i < num_syncs; i++)
                                xe_sync_entry_signal(&syncs[i], NULL, fence);
                }
        }
-       if (afence)
-               add_async_op_fence_cb(vm, fence, afence);
 
+       if (last_op)
+               xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
+       if (last_op && xe_vm_sync_mode(vm, q))
+               dma_fence_wait(fence, true);
        dma_fence_put(fence);
+
        return 0;
 }
 
 static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
                      struct xe_bo *bo, struct xe_sync_entry *syncs,
-                     u32 num_syncs, struct async_op_fence *afence,
-                     bool immediate, bool first_op, bool last_op)
+                     u32 num_syncs, bool immediate, bool first_op,
+                     bool last_op)
 {
        int err;
 
@@ -1976,16 +1892,16 @@ static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue
                        return err;
        }
 
-       return __xe_vm_bind(vm, vma, q, syncs, num_syncs, afence, immediate,
-                           first_op, last_op);
+       return __xe_vm_bind(vm, vma, q, syncs, num_syncs, immediate, first_op,
+                           last_op);
 }
 
 static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
                        struct xe_exec_queue *q, struct xe_sync_entry *syncs,
-                       u32 num_syncs, struct async_op_fence *afence,
-                       bool first_op, bool last_op)
+                       u32 num_syncs, bool first_op, bool last_op)
 {
        struct dma_fence *fence;
+       struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
 
        xe_vm_assert_held(vm);
        xe_bo_assert_held(xe_vma_bo(vma));
@@ -1993,19 +1909,21 @@ static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
        fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
        if (IS_ERR(fence))
                return PTR_ERR(fence);
-       if (afence)
-               add_async_op_fence_cb(vm, fence, afence);
 
        xe_vma_destroy(vma, fence);
+       if (last_op)
+               xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
+       if (last_op && xe_vm_sync_mode(vm, q))
+               dma_fence_wait(fence, true);
        dma_fence_put(fence);
 
        return 0;
 }
 
-#define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_SCRATCH_PAGE | \
-                                   DRM_XE_VM_CREATE_COMPUTE_MODE | \
-                                   DRM_XE_VM_CREATE_ASYNC_BIND_OPS | \
-                                   DRM_XE_VM_CREATE_FAULT_MODE)
+#define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
+                                   DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE | \
+                                   DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT | \
+                                   DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
 
 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file)
@@ -2023,9 +1941,9 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
                return -EINVAL;
 
        if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
-               args->flags |= DRM_XE_VM_CREATE_SCRATCH_PAGE;
+               args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
 
-       if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
+       if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
                         !xe->info.supports_usm))
                return -EINVAL;
 
@@ -2035,29 +1953,32 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
        if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
                return -EINVAL;
 
-       if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE &&
-                        args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
+       if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
+                        args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
                return -EINVAL;
 
-       if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE &&
-                        args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
+       if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE &&
+                        args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
                return -EINVAL;
 
-       if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
+       if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
                         xe_device_in_non_fault_mode(xe)))
                return -EINVAL;
 
-       if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FAULT_MODE) &&
+       if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) &&
                         xe_device_in_fault_mode(xe)))
                return -EINVAL;
 
-       if (args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE)
+       if (XE_IOCTL_DBG(xe, args->extensions))
+               return -EINVAL;
+
+       if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
                flags |= XE_VM_FLAG_SCRATCH_PAGE;
-       if (args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE)
+       if (args->flags & DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE)
                flags |= XE_VM_FLAG_COMPUTE_MODE;
-       if (args->flags & DRM_XE_VM_CREATE_ASYNC_BIND_OPS)
-               flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
-       if (args->flags & DRM_XE_VM_CREATE_FAULT_MODE)
+       if (args->flags & DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT)
+               flags |= XE_VM_FLAG_ASYNC_DEFAULT;
+       if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
                flags |= XE_VM_FLAG_FAULT_MODE;
 
        vm = xe_vm_create(xe, flags);
@@ -2075,13 +1996,14 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
        if (xe->info.has_asid) {
                mutex_lock(&xe->usm.lock);
                err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
-                                     XA_LIMIT(0, XE_MAX_ASID - 1),
+                                     XA_LIMIT(1, XE_MAX_ASID - 1),
                                      &xe->usm.next_asid, GFP_KERNEL);
                mutex_unlock(&xe->usm.lock);
-               if (err) {
+               if (err < 0) {
                        xe_vm_close_and_put(vm);
                        return err;
                }
+               err = 0;
                vm->usm.asid = asid;
        }
 
@@ -2139,9 +2061,9 @@ static const u32 region_to_mem_type[] = {
 static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
                          struct xe_exec_queue *q, u32 region,
                          struct xe_sync_entry *syncs, u32 num_syncs,
-                         struct async_op_fence *afence, bool first_op,
-                         bool last_op)
+                         bool first_op, bool last_op)
 {
+       struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
        int err;
 
        xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
@@ -2154,65 +2076,22 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
 
        if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
                return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
-                                 afence, true, first_op, last_op);
+                                 true, first_op, last_op);
        } else {
                int i;
 
                /* Nothing to do, signal fences now */
                if (last_op) {
-                       for (i = 0; i < num_syncs; i++)
-                               xe_sync_entry_signal(&syncs[i], NULL,
-                                                    dma_fence_get_stub());
-               }
-               if (afence)
-                       dma_fence_signal(&afence->fence);
-               return 0;
-       }
-}
+                       for (i = 0; i < num_syncs; i++) {
+                               struct dma_fence *fence =
+                                       xe_exec_queue_last_fence_get(wait_exec_queue, vm);
 
-static void vm_set_async_error(struct xe_vm *vm, int err)
-{
-       lockdep_assert_held(&vm->lock);
-       vm->async_ops.error = err;
-}
-
-static int vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo,
-                                   u64 addr, u64 range, u32 op, u32 flags)
-{
-       struct xe_device *xe = vm->xe;
-       struct xe_vma *vma;
-       bool async = !!(flags & XE_VM_BIND_FLAG_ASYNC);
-
-       lockdep_assert_held(&vm->lock);
+                               xe_sync_entry_signal(&syncs[i], NULL, fence);
+                       }
+               }
 
-       switch (op) {
-       case XE_VM_BIND_OP_MAP:
-       case XE_VM_BIND_OP_MAP_USERPTR:
-               vma = xe_vm_find_overlapping_vma(vm, addr, range);
-               if (XE_IOCTL_DBG(xe, vma && !async))
-                       return -EBUSY;
-               break;
-       case XE_VM_BIND_OP_UNMAP:
-       case XE_VM_BIND_OP_PREFETCH:
-               vma = xe_vm_find_overlapping_vma(vm, addr, range);
-               if (XE_IOCTL_DBG(xe, !vma))
-                       /* Not an actual error, IOCTL cleans up returns and 0 */
-                       return -ENODATA;
-               if (XE_IOCTL_DBG(xe, (xe_vma_start(vma) != addr ||
-                                     xe_vma_end(vma) != addr + range) && !async))
-                       return -EINVAL;
-               break;
-       case XE_VM_BIND_OP_UNMAP_ALL:
-               if (XE_IOCTL_DBG(xe, list_empty(&bo->ttm.base.gpuva.list)))
-                       /* Not an actual error, IOCTL cleans up returns and 0 */
-                       return -ENODATA;
-               break;
-       default:
-               drm_warn(&xe->drm, "NOT POSSIBLE");
-               return -EINVAL;
+               return 0;
        }
-
-       return 0;
 }
 
 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
@@ -2282,7 +2161,8 @@ static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
 static struct drm_gpuva_ops *
 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
                         u64 bo_offset_or_userptr, u64 addr, u64 range,
-                        u32 operation, u32 flags, u8 tile_mask, u32 region)
+                        u32 operation, u32 flags, u8 tile_mask,
+                        u32 prefetch_region)
 {
        struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
        struct drm_gpuva_ops *ops;
@@ -2299,48 +2179,18 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
               (ULL)bo_offset_or_userptr);
 
        switch (operation) {
-       case XE_VM_BIND_OP_MAP:
-       case XE_VM_BIND_OP_MAP_USERPTR:
+       case DRM_XE_VM_BIND_OP_MAP:
+       case DRM_XE_VM_BIND_OP_MAP_USERPTR:
                ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
                                                  obj, bo_offset_or_userptr);
-               if (IS_ERR(ops))
-                       return ops;
-
-               drm_gpuva_for_each_op(__op, ops) {
-                       struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
-
-                       op->tile_mask = tile_mask;
-                       op->map.immediate =
-                               flags & XE_VM_BIND_FLAG_IMMEDIATE;
-                       op->map.read_only =
-                               flags & XE_VM_BIND_FLAG_READONLY;
-                       op->map.is_null = flags & XE_VM_BIND_FLAG_NULL;
-               }
                break;
-       case XE_VM_BIND_OP_UNMAP:
+       case DRM_XE_VM_BIND_OP_UNMAP:
                ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
-               if (IS_ERR(ops))
-                       return ops;
-
-               drm_gpuva_for_each_op(__op, ops) {
-                       struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
-
-                       op->tile_mask = tile_mask;
-               }
                break;
-       case XE_VM_BIND_OP_PREFETCH:
+       case DRM_XE_VM_BIND_OP_PREFETCH:
                ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
-               if (IS_ERR(ops))
-                       return ops;
-
-               drm_gpuva_for_each_op(__op, ops) {
-                       struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
-
-                       op->tile_mask = tile_mask;
-                       op->prefetch.region = region;
-               }
                break;
-       case XE_VM_BIND_OP_UNMAP_ALL:
+       case DRM_XE_VM_BIND_OP_UNMAP_ALL:
                xe_assert(vm->xe, bo);
 
                err = xe_bo_lock(bo, true);
@@ -2354,19 +2204,13 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
                ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
                drm_gpuvm_bo_put(vm_bo);
                xe_bo_unlock(bo);
-               if (IS_ERR(ops))
-                       return ops;
-
-               drm_gpuva_for_each_op(__op, ops) {
-                       struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
-
-                       op->tile_mask = tile_mask;
-               }
                break;
        default:
                drm_warn(&vm->xe->drm, "NOT POSSIBLE");
                ops = ERR_PTR(-EINVAL);
        }
+       if (IS_ERR(ops))
+               return ops;
 
 #ifdef TEST_VM_ASYNC_OPS_ERROR
        if (operation & FORCE_ASYNC_OP_ERROR) {
@@ -2377,9 +2221,22 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
        }
 #endif
 
-       if (!IS_ERR(ops))
-               drm_gpuva_for_each_op(__op, ops)
-                       print_op(vm->xe, __op);
+       drm_gpuva_for_each_op(__op, ops) {
+               struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
+
+               op->tile_mask = tile_mask;
+               if (__op->op == DRM_GPUVA_OP_MAP) {
+                       op->map.immediate =
+                               flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
+                       op->map.read_only =
+                               flags & DRM_XE_VM_BIND_FLAG_READONLY;
+                       op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
+               } else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
+                       op->prefetch.region = prefetch_region;
+               }
+
+               print_op(vm->xe, __op);
+       }
 
        return ops;
 }
@@ -2462,6 +2319,10 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
                        op->flags |= XE_VMA_OP_COMMITTED;
                break;
        case DRM_GPUVA_OP_REMAP:
+       {
+               u8 tile_present =
+                       gpuva_to_vma(op->base.remap.unmap->va)->tile_present;
+
                prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
                                 true);
                op->flags |= XE_VMA_OP_COMMITTED;
@@ -2470,15 +2331,21 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
                        err |= xe_vm_insert_vma(vm, op->remap.prev);
                        if (!err)
                                op->flags |= XE_VMA_OP_PREV_COMMITTED;
-                       if (!err && op->remap.skip_prev)
+                       if (!err && op->remap.skip_prev) {
+                               op->remap.prev->tile_present =
+                                       tile_present;
                                op->remap.prev = NULL;
+                       }
                }
                if (op->remap.next) {
                        err |= xe_vm_insert_vma(vm, op->remap.next);
                        if (!err)
                                op->flags |= XE_VMA_OP_NEXT_COMMITTED;
-                       if (!err && op->remap.skip_next)
+                       if (!err && op->remap.skip_next) {
+                               op->remap.next->tile_present =
+                                       tile_present;
                                op->remap.next = NULL;
+                       }
                }
 
                /* Adjust for partial unbind after removin VMA from VM */
@@ -2487,6 +2354,7 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
                        op->base.remap.unmap->va->va.range = op->remap.range;
                }
                break;
+       }
        case DRM_GPUVA_OP_UNMAP:
                prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
                op->flags |= XE_VMA_OP_COMMITTED;
@@ -2509,37 +2377,15 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
                                   bool async)
 {
        struct xe_vma_op *last_op = NULL;
-       struct async_op_fence *fence = NULL;
        struct drm_gpuva_op *__op;
        int err = 0;
 
        lockdep_assert_held_write(&vm->lock);
 
-       if (last && num_syncs && async) {
-               u64 seqno;
-
-               fence = kmalloc(sizeof(*fence), GFP_KERNEL);
-               if (!fence)
-                       return -ENOMEM;
-
-               seqno = q ? ++q->bind.fence_seqno : ++vm->async_ops.fence.seqno;
-               dma_fence_init(&fence->fence, &async_op_fence_ops,
-                              &vm->async_ops.lock, q ? q->bind.fence_ctx :
-                              vm->async_ops.fence.context, seqno);
-
-               if (!xe_vm_no_dma_fences(vm)) {
-                       fence->vm = vm;
-                       fence->started = false;
-                       init_waitqueue_head(&fence->wq);
-               }
-       }
-
        drm_gpuva_for_each_op(__op, ops) {
                struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
                bool first = list_empty(ops_list);
 
-               xe_assert(vm->xe, first || async);
-
                INIT_LIST_HEAD(&op->link);
                list_add_tail(&op->link, ops_list);
 
@@ -2559,10 +2405,8 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
                        vma = new_vma(vm, &op->base.map,
                                      op->tile_mask, op->map.read_only,
                                      op->map.is_null);
-                       if (IS_ERR(vma)) {
-                               err = PTR_ERR(vma);
-                               goto free_fence;
-                       }
+                       if (IS_ERR(vma))
+                               return PTR_ERR(vma);
 
                        op->map.vma = vma;
                        break;
@@ -2587,10 +2431,8 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
                                vma = new_vma(vm, op->base.remap.prev,
                                              op->tile_mask, read_only,
                                              is_null);
-                               if (IS_ERR(vma)) {
-                                       err = PTR_ERR(vma);
-                                       goto free_fence;
-                               }
+                               if (IS_ERR(vma))
+                                       return PTR_ERR(vma);
 
                                op->remap.prev = vma;
 
@@ -2623,10 +2465,8 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
                                vma = new_vma(vm, op->base.remap.next,
                                              op->tile_mask, read_only,
                                              is_null);
-                               if (IS_ERR(vma)) {
-                                       err = PTR_ERR(vma);
-                                       goto free_fence;
-                               }
+                               if (IS_ERR(vma))
+                                       return PTR_ERR(vma);
 
                                op->remap.next = vma;
 
@@ -2658,27 +2498,23 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
 
                err = xe_vma_op_commit(vm, op);
                if (err)
-                       goto free_fence;
+                       return err;
        }
 
        /* FIXME: Unhandled corner case */
        XE_WARN_ON(!last_op && last && !list_empty(ops_list));
 
        if (!last_op)
-               goto free_fence;
+               return 0;
+
        last_op->ops = ops;
        if (last) {
                last_op->flags |= XE_VMA_OP_LAST;
                last_op->num_syncs = num_syncs;
                last_op->syncs = syncs;
-               last_op->fence = fence;
        }
 
        return 0;
-
-free_fence:
-       kfree(fence);
-       return err;
 }
 
 static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
@@ -2698,7 +2534,7 @@ static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
        switch (op->base.op) {
        case DRM_GPUVA_OP_MAP:
                err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
-                                op->syncs, op->num_syncs, op->fence,
+                                op->syncs, op->num_syncs,
                                 op->map.immediate || !xe_vm_in_fault_mode(vm),
                                 op->flags & XE_VMA_OP_FIRST,
                                 op->flags & XE_VMA_OP_LAST);
@@ -2709,16 +2545,13 @@ static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
                bool next = !!op->remap.next;
 
                if (!op->remap.unmap_done) {
-                       if (prev || next) {
-                               vm->async_ops.munmap_rebind_inflight = true;
+                       if (prev || next)
                                vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
-                       }
                        err = xe_vm_unbind(vm, vma, op->q, op->syncs,
                                           op->num_syncs,
-                                          !prev && !next ? op->fence : NULL,
                                           op->flags & XE_VMA_OP_FIRST,
-                                          op->flags & XE_VMA_OP_LAST && !prev &&
-                                          !next);
+                                          op->flags & XE_VMA_OP_LAST &&
+                                          !prev && !next);
                        if (err)
                                break;
                        op->remap.unmap_done = true;
@@ -2728,8 +2561,7 @@ static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
                        op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
                        err = xe_vm_bind(vm, op->remap.prev, op->q,
                                         xe_vma_bo(op->remap.prev), op->syncs,
-                                        op->num_syncs,
-                                        !next ? op->fence : NULL, true, false,
+                                        op->num_syncs, true, false,
                                         op->flags & XE_VMA_OP_LAST && !next);
                        op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
                        if (err)
@@ -2742,26 +2574,24 @@ static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
                        err = xe_vm_bind(vm, op->remap.next, op->q,
                                         xe_vma_bo(op->remap.next),
                                         op->syncs, op->num_syncs,
-                                        op->fence, true, false,
+                                        true, false,
                                         op->flags & XE_VMA_OP_LAST);
                        op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
                        if (err)
                                break;
                        op->remap.next = NULL;
                }
-               vm->async_ops.munmap_rebind_inflight = false;
 
                break;
        }
        case DRM_GPUVA_OP_UNMAP:
                err = xe_vm_unbind(vm, vma, op->q, op->syncs,
-                                  op->num_syncs, op->fence,
-                                  op->flags & XE_VMA_OP_FIRST,
+                                  op->num_syncs, op->flags & XE_VMA_OP_FIRST,
                                   op->flags & XE_VMA_OP_LAST);
                break;
        case DRM_GPUVA_OP_PREFETCH:
                err = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region,
-                                    op->syncs, op->num_syncs, op->fence,
+                                    op->syncs, op->num_syncs,
                                     op->flags & XE_VMA_OP_FIRST,
                                     op->flags & XE_VMA_OP_LAST);
                break;
@@ -2860,14 +2690,9 @@ static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op)
                kfree(op->syncs);
                if (op->q)
                        xe_exec_queue_put(op->q);
-               if (op->fence)
-                       dma_fence_put(&op->fence->fence);
        }
-       if (!list_empty(&op->link)) {
-               spin_lock_irq(&vm->async_ops.lock);
+       if (!list_empty(&op->link))
                list_del(&op->link);
-               spin_unlock_irq(&vm->async_ops.lock);
-       }
        if (op->ops)
                drm_gpuva_ops_free(&vm->gpuvm, op->ops);
        if (last)
@@ -2929,129 +2754,6 @@ static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
        }
 }
 
-static struct xe_vma_op *next_vma_op(struct xe_vm *vm)
-{
-       return list_first_entry_or_null(&vm->async_ops.pending,
-                                       struct xe_vma_op, link);
-}
-
-static void xe_vma_op_work_func(struct work_struct *w)
-{
-       struct xe_vm *vm = container_of(w, struct xe_vm, async_ops.work);
-
-       for (;;) {
-               struct xe_vma_op *op;
-               int err;
-
-               if (vm->async_ops.error && !xe_vm_is_closed(vm))
-                       break;
-
-               spin_lock_irq(&vm->async_ops.lock);
-               op = next_vma_op(vm);
-               spin_unlock_irq(&vm->async_ops.lock);
-
-               if (!op)
-                       break;
-
-               if (!xe_vm_is_closed(vm)) {
-                       down_write(&vm->lock);
-                       err = xe_vma_op_execute(vm, op);
-                       if (err) {
-                               drm_warn(&vm->xe->drm,
-                                        "Async VM op(%d) failed with %d",
-                                        op->base.op, err);
-                               vm_set_async_error(vm, err);
-                               up_write(&vm->lock);
-
-                               break;
-                       }
-                       up_write(&vm->lock);
-               } else {
-                       struct xe_vma *vma;
-
-                       switch (op->base.op) {
-                       case DRM_GPUVA_OP_REMAP:
-                               vma = gpuva_to_vma(op->base.remap.unmap->va);
-                               trace_xe_vma_flush(vma);
-
-                               down_write(&vm->lock);
-                               xe_vma_destroy_unlocked(vma);
-                               up_write(&vm->lock);
-                               break;
-                       case DRM_GPUVA_OP_UNMAP:
-                               vma = gpuva_to_vma(op->base.unmap.va);
-                               trace_xe_vma_flush(vma);
-
-                               down_write(&vm->lock);
-                               xe_vma_destroy_unlocked(vma);
-                               up_write(&vm->lock);
-                               break;
-                       default:
-                               /* Nothing to do */
-                               break;
-                       }
-
-                       if (op->fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
-                                                  &op->fence->fence.flags)) {
-                               if (!xe_vm_no_dma_fences(vm)) {
-                                       op->fence->started = true;
-                                       wake_up_all(&op->fence->wq);
-                               }
-                               dma_fence_signal(&op->fence->fence);
-                       }
-               }
-
-               xe_vma_op_cleanup(vm, op);
-       }
-}
-
-static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
-                                    struct list_head *ops_list, bool async)
-{
-       struct xe_vma_op *op, *last_op, *next;
-       int err;
-
-       lockdep_assert_held_write(&vm->lock);
-
-       last_op = list_last_entry(ops_list, struct xe_vma_op, link);
-
-       if (!async) {
-               err = xe_vma_op_execute(vm, last_op);
-               if (err)
-                       goto unwind;
-               xe_vma_op_cleanup(vm, last_op);
-       } else {
-               int i;
-               bool installed = false;
-
-               for (i = 0; i < last_op->num_syncs; i++)
-                       installed |= xe_sync_entry_signal(&last_op->syncs[i],
-                                                         NULL,
-                                                         &last_op->fence->fence);
-               if (!installed && last_op->fence)
-                       dma_fence_signal(&last_op->fence->fence);
-
-               spin_lock_irq(&vm->async_ops.lock);
-               list_splice_tail(ops_list, &vm->async_ops.pending);
-               spin_unlock_irq(&vm->async_ops.lock);
-
-               if (!vm->async_ops.error)
-                       queue_work(system_unbound_wq, &vm->async_ops.work);
-       }
-
-       return 0;
-
-unwind:
-       list_for_each_entry_reverse(op, ops_list, link)
-               xe_vma_op_unwind(vm, op, op->flags & XE_VMA_OP_COMMITTED,
-                                op->flags & XE_VMA_OP_PREV_COMMITTED,
-                                op->flags & XE_VMA_OP_NEXT_COMMITTED);
-       list_for_each_entry_safe(op, next, ops_list, link)
-               xe_vma_op_cleanup(vm, op);
-
-       return err;
-}
-
 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
                                     struct drm_gpuva_ops **ops,
                                     int num_ops_list)
@@ -3078,15 +2780,41 @@ static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
        }
 }
 
+static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
+                                    struct list_head *ops_list)
+{
+       struct xe_vma_op *op, *next;
+       int err;
+
+       lockdep_assert_held_write(&vm->lock);
+
+       list_for_each_entry_safe(op, next, ops_list, link) {
+               err = xe_vma_op_execute(vm, op);
+               if (err) {
+                       drm_warn(&vm->xe->drm, "VM op(%d) failed with %d",
+                                op->base.op, err);
+                       /*
+                        * FIXME: Killing VM rather than proper error handling
+                        */
+                       xe_vm_kill(vm);
+                       return -ENOSPC;
+               }
+               xe_vma_op_cleanup(vm, op);
+       }
+
+       return 0;
+}
+
 #ifdef TEST_VM_ASYNC_OPS_ERROR
 #define SUPPORTED_FLAGS        \
-       (FORCE_ASYNC_OP_ERROR | XE_VM_BIND_FLAG_ASYNC | \
-        XE_VM_BIND_FLAG_READONLY | XE_VM_BIND_FLAG_IMMEDIATE | \
-        XE_VM_BIND_FLAG_NULL | 0xffff)
+       (FORCE_ASYNC_OP_ERROR | DRM_XE_VM_BIND_FLAG_ASYNC | \
+        DRM_XE_VM_BIND_FLAG_READONLY | DRM_XE_VM_BIND_FLAG_IMMEDIATE | \
+        DRM_XE_VM_BIND_FLAG_NULL | 0xffff)
 #else
 #define SUPPORTED_FLAGS        \
-       (XE_VM_BIND_FLAG_ASYNC | XE_VM_BIND_FLAG_READONLY | \
-        XE_VM_BIND_FLAG_IMMEDIATE | XE_VM_BIND_FLAG_NULL | 0xffff)
+       (DRM_XE_VM_BIND_FLAG_ASYNC | DRM_XE_VM_BIND_FLAG_READONLY | \
+        DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | \
+        0xffff)
 #endif
 #define XE_64K_PAGE_MASK 0xffffull
 
@@ -3132,55 +2860,46 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
                u32 flags = (*bind_ops)[i].flags;
                u32 obj = (*bind_ops)[i].obj;
                u64 obj_offset = (*bind_ops)[i].obj_offset;
-               u32 region = (*bind_ops)[i].region;
-               bool is_null = flags & XE_VM_BIND_FLAG_NULL;
+               u32 prefetch_region = (*bind_ops)[i].prefetch_mem_region_instance;
+               bool is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
 
                if (i == 0) {
-                       *async = !!(flags & XE_VM_BIND_FLAG_ASYNC);
-               } else if (XE_IOCTL_DBG(xe, !*async) ||
-                          XE_IOCTL_DBG(xe, !(flags & XE_VM_BIND_FLAG_ASYNC)) ||
-                          XE_IOCTL_DBG(xe, op == XE_VM_BIND_OP_RESTART)) {
-                       err = -EINVAL;
-                       goto free_bind_ops;
-               }
-
-               if (XE_IOCTL_DBG(xe, !*async &&
-                                op == XE_VM_BIND_OP_UNMAP_ALL)) {
-                       err = -EINVAL;
-                       goto free_bind_ops;
-               }
-
-               if (XE_IOCTL_DBG(xe, !*async &&
-                                op == XE_VM_BIND_OP_PREFETCH)) {
+                       *async = !!(flags & DRM_XE_VM_BIND_FLAG_ASYNC);
+                       if (XE_IOCTL_DBG(xe, !*async && args->num_syncs)) {
+                               err = -EINVAL;
+                               goto free_bind_ops;
+                       }
+               } else if (XE_IOCTL_DBG(xe, *async !=
+                                       !!(flags & DRM_XE_VM_BIND_FLAG_ASYNC))) {
                        err = -EINVAL;
                        goto free_bind_ops;
                }
 
-               if (XE_IOCTL_DBG(xe, op > XE_VM_BIND_OP_PREFETCH) ||
+               if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
                    XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
                    XE_IOCTL_DBG(xe, obj && is_null) ||
                    XE_IOCTL_DBG(xe, obj_offset && is_null) ||
-                   XE_IOCTL_DBG(xe, op != XE_VM_BIND_OP_MAP &&
+                   XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP &&
                                 is_null) ||
                    XE_IOCTL_DBG(xe, !obj &&
-                                op == XE_VM_BIND_OP_MAP &&
+                                op == DRM_XE_VM_BIND_OP_MAP &&
                                 !is_null) ||
                    XE_IOCTL_DBG(xe, !obj &&
-                                op == XE_VM_BIND_OP_UNMAP_ALL) ||
+                                op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
                    XE_IOCTL_DBG(xe, addr &&
-                                op == XE_VM_BIND_OP_UNMAP_ALL) ||
+                                op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
                    XE_IOCTL_DBG(xe, range &&
-                                op == XE_VM_BIND_OP_UNMAP_ALL) ||
+                                op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
                    XE_IOCTL_DBG(xe, obj &&
-                                op == XE_VM_BIND_OP_MAP_USERPTR) ||
+                                op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
                    XE_IOCTL_DBG(xe, obj &&
-                                op == XE_VM_BIND_OP_PREFETCH) ||
-                   XE_IOCTL_DBG(xe, region &&
-                                op != XE_VM_BIND_OP_PREFETCH) ||
-                   XE_IOCTL_DBG(xe, !(BIT(region) &
+                                op == DRM_XE_VM_BIND_OP_PREFETCH) ||
+                   XE_IOCTL_DBG(xe, prefetch_region &&
+                                op != DRM_XE_VM_BIND_OP_PREFETCH) ||
+                   XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
                                       xe->info.mem_region_mask)) ||
                    XE_IOCTL_DBG(xe, obj &&
-                                op == XE_VM_BIND_OP_UNMAP)) {
+                                op == DRM_XE_VM_BIND_OP_UNMAP)) {
                        err = -EINVAL;
                        goto free_bind_ops;
                }
@@ -3188,9 +2907,8 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
                if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
                    XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
                    XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
-                   XE_IOCTL_DBG(xe, !range && op !=
-                                XE_VM_BIND_OP_RESTART &&
-                                op != XE_VM_BIND_OP_UNMAP_ALL)) {
+                   XE_IOCTL_DBG(xe, !range &&
+                                op != DRM_XE_VM_BIND_OP_UNMAP_ALL)) {
                        err = -EINVAL;
                        goto free_bind_ops;
                }
@@ -3237,6 +2955,12 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
                        err = -EINVAL;
                        goto put_exec_queue;
                }
+
+               if (XE_IOCTL_DBG(xe, async !=
+                                !!(q->flags & EXEC_QUEUE_FLAG_VM_ASYNC))) {
+                       err = -EINVAL;
+                       goto put_exec_queue;
+               }
        }
 
        vm = xe_vm_lookup(xef, args->vm_id);
@@ -3245,6 +2969,14 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
                goto put_exec_queue;
        }
 
+       if (!args->exec_queue_id) {
+               if (XE_IOCTL_DBG(xe, async !=
+                                !!(vm->flags & XE_VM_FLAG_ASYNC_DEFAULT))) {
+                       err = -EINVAL;
+                       goto put_vm;
+               }
+       }
+
        err = down_write_killable(&vm->lock);
        if (err)
                goto put_vm;
@@ -3254,34 +2986,6 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
                goto release_vm_lock;
        }
 
-       if (bind_ops[0].op == XE_VM_BIND_OP_RESTART) {
-               if (XE_IOCTL_DBG(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
-                       err = -EOPNOTSUPP;
-               if (XE_IOCTL_DBG(xe, !err && args->num_syncs))
-                       err = EINVAL;
-               if (XE_IOCTL_DBG(xe, !err && !vm->async_ops.error))
-                       err = -EPROTO;
-
-               if (!err) {
-                       trace_xe_vm_restart(vm);
-                       vm_set_async_error(vm, 0);
-
-                       queue_work(system_unbound_wq, &vm->async_ops.work);
-
-                       /* Rebinds may have been blocked, give worker a kick */
-                       if (xe_vm_in_compute_mode(vm))
-                               xe_vm_queue_rebind_worker(vm);
-               }
-
-               goto release_vm_lock;
-       }
-
-       if (XE_IOCTL_DBG(xe, !vm->async_ops.error &&
-                        async != !!(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS))) {
-               err = -EOPNOTSUPP;
-               goto release_vm_lock;
-       }
-
        for (i = 0; i < args->num_binds; ++i) {
                u64 range = bind_ops[i].range;
                u64 addr = bind_ops[i].addr;
@@ -3367,18 +3071,6 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
                        goto free_syncs;
        }
 
-       /* Do some error checking first to make the unwind easier */
-       for (i = 0; i < args->num_binds; ++i) {
-               u64 range = bind_ops[i].range;
-               u64 addr = bind_ops[i].addr;
-               u32 op = bind_ops[i].op;
-               u32 flags = bind_ops[i].flags;
-
-               err = vm_bind_ioctl_lookup_vma(vm, bos[i], addr, range, op, flags);
-               if (err)
-                       goto free_syncs;
-       }
-
        for (i = 0; i < args->num_binds; ++i) {
                u64 range = bind_ops[i].range;
                u64 addr = bind_ops[i].addr;
@@ -3386,11 +3078,11 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
                u32 flags = bind_ops[i].flags;
                u64 obj_offset = bind_ops[i].obj_offset;
                u8 tile_mask = bind_ops[i].tile_mask;
-               u32 region = bind_ops[i].region;
+               u32 prefetch_region = bind_ops[i].prefetch_mem_region_instance;
 
                ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
                                                  addr, range, op, flags,
-                                                 tile_mask, region);
+                                                 tile_mask, prefetch_region);
                if (IS_ERR(ops[i])) {
                        err = PTR_ERR(ops[i]);
                        ops[i] = NULL;
@@ -3411,10 +3103,19 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
                goto unwind_ops;
        }
 
-       err = vm_bind_ioctl_ops_execute(vm, &ops_list, async);
+       xe_vm_get(vm);
+       if (q)
+               xe_exec_queue_get(q);
+
+       err = vm_bind_ioctl_ops_execute(vm, &ops_list);
+
        up_write(&vm->lock);
 
-       for (i = 0; i < args->num_binds; ++i)
+       if (q)
+               xe_exec_queue_put(q);
+       xe_vm_put(vm);
+
+       for (i = 0; bos && i < args->num_binds; ++i)
                xe_bo_put(bos[i]);
 
        kfree(bos);
@@ -3427,8 +3128,12 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 unwind_ops:
        vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
 free_syncs:
-       for (i = 0; err == -ENODATA && i < num_syncs; i++)
-               xe_sync_entry_signal(&syncs[i], NULL, dma_fence_get_stub());
+       for (i = 0; err == -ENODATA && i < num_syncs; i++) {
+               struct dma_fence *fence =
+                       xe_exec_queue_last_fence_get(to_wait_exec_queue(vm, q), vm);
+
+               xe_sync_entry_signal(&syncs[i], NULL, fence);
+       }
        while (num_syncs--)
                xe_sync_entry_cleanup(&syncs[num_syncs]);