drm/xe/vm: Fix ASID XA usage
[linux-2.6-microblaze.git] / drivers / gpu / drm / xe / xe_vm.c
index 89df50f..7c0ae43 100644 (file)
@@ -1229,7 +1229,8 @@ static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index)
        return pte;
 }
 
-static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index)
+static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index,
+                               u32 pt_level)
 {
        u64 pte = 0;
 
@@ -1239,8 +1240,12 @@ static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index)
        if (pat_index & BIT(1))
                pte |= XE_PPGTT_PTE_PAT1;
 
-       if (pat_index & BIT(2))
-               pte |= XE_PPGTT_PTE_PAT2;
+       if (pat_index & BIT(2)) {
+               if (pt_level)
+                       pte |= XE_PPGTT_PDE_PDPE_PAT2;
+               else
+                       pte |= XE_PPGTT_PTE_PAT2;
+       }
 
        if (pat_index & BIT(3))
                pte |= XELPG_PPGTT_PTE_PAT3;
@@ -1284,7 +1289,7 @@ static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
 
        pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
        pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
-       pte |= pte_encode_pat_index(xe, pat_index);
+       pte |= pte_encode_pat_index(xe, pat_index, pt_level);
        pte |= pte_encode_ps(pt_level);
 
        if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
@@ -1303,7 +1308,7 @@ static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
        if (likely(!xe_vma_read_only(vma)))
                pte |= XE_PAGE_RW;
 
-       pte |= pte_encode_pat_index(xe, pat_index);
+       pte |= pte_encode_pat_index(xe, pat_index, pt_level);
        pte |= pte_encode_ps(pt_level);
 
        if (unlikely(xe_vma_is_null(vma)))
@@ -1323,7 +1328,7 @@ static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
 
        pte = addr;
        pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
-       pte |= pte_encode_pat_index(xe, pat_index);
+       pte |= pte_encode_pat_index(xe, pat_index, pt_level);
        pte |= pte_encode_ps(pt_level);
 
        if (devmem)
@@ -1524,6 +1529,13 @@ void xe_vm_close_and_put(struct xe_vm *vm)
        if (xe_vm_in_compute_mode(vm))
                flush_work(&vm->preempt.rebind_work);
 
+       down_write(&vm->lock);
+       for_each_tile(tile, xe, id) {
+               if (vm->q[id])
+                       xe_exec_queue_last_fence_put(vm->q[id], vm);
+       }
+       up_write(&vm->lock);
+
        for_each_tile(tile, xe, id) {
                if (vm->q[id]) {
                        xe_exec_queue_kill(vm->q[id]);
@@ -1665,16 +1677,23 @@ u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
                                         tile_to_xe(tile)->pat.idx[XE_CACHE_WB]);
 }
 
+static struct xe_exec_queue *
+to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
+{
+       return q ? q : vm->q[0];
+}
+
 static struct dma_fence *
 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
                 struct xe_sync_entry *syncs, u32 num_syncs,
                 bool first_op, bool last_op)
 {
+       struct xe_vm *vm = xe_vma_vm(vma);
+       struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
        struct xe_tile *tile;
        struct dma_fence *fence = NULL;
        struct dma_fence **fences = NULL;
        struct dma_fence_array *cf = NULL;
-       struct xe_vm *vm = xe_vma_vm(vma);
        int cur_fence = 0, i;
        int number_tiles = hweight8(vma->tile_present);
        int err;
@@ -1721,13 +1740,14 @@ next:
                }
        }
 
+       fence = cf ? &cf->base : !fence ?
+               xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
        if (last_op) {
                for (i = 0; i < num_syncs; i++)
-                       xe_sync_entry_signal(&syncs[i], NULL,
-                                            cf ? &cf->base : fence);
+                       xe_sync_entry_signal(&syncs[i], NULL, fence);
        }
 
-       return cf ? &cf->base : !fence ? dma_fence_get_stub() : fence;
+       return fence;
 
 err_fences:
        if (fences) {
@@ -1826,6 +1846,7 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
                        bool last_op)
 {
        struct dma_fence *fence;
+       struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
 
        xe_vm_assert_held(vm);
 
@@ -1839,13 +1860,15 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
 
                xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
 
-               fence = dma_fence_get_stub();
+               fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
                if (last_op) {
                        for (i = 0; i < num_syncs; i++)
                                xe_sync_entry_signal(&syncs[i], NULL, fence);
                }
        }
 
+       if (last_op)
+               xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
        if (last_op && xe_vm_sync_mode(vm, q))
                dma_fence_wait(fence, true);
        dma_fence_put(fence);
@@ -1878,6 +1901,7 @@ static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
                        u32 num_syncs, bool first_op, bool last_op)
 {
        struct dma_fence *fence;
+       struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
 
        xe_vm_assert_held(vm);
        xe_bo_assert_held(xe_vma_bo(vma));
@@ -1887,6 +1911,8 @@ static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
                return PTR_ERR(fence);
 
        xe_vma_destroy(vma, fence);
+       if (last_op)
+               xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
        if (last_op && xe_vm_sync_mode(vm, q))
                dma_fence_wait(fence, true);
        dma_fence_put(fence);
@@ -1894,10 +1920,10 @@ static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
        return 0;
 }
 
-#define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_SCRATCH_PAGE | \
-                                   DRM_XE_VM_CREATE_COMPUTE_MODE | \
-                                   DRM_XE_VM_CREATE_ASYNC_DEFAULT | \
-                                   DRM_XE_VM_CREATE_FAULT_MODE)
+#define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
+                                   DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE | \
+                                   DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT | \
+                                   DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
 
 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file)
@@ -1915,9 +1941,9 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
                return -EINVAL;
 
        if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
-               args->flags |= DRM_XE_VM_CREATE_SCRATCH_PAGE;
+               args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
 
-       if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
+       if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
                         !xe->info.supports_usm))
                return -EINVAL;
 
@@ -1927,32 +1953,32 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
        if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
                return -EINVAL;
 
-       if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE &&
-                        args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
+       if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
+                        args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
                return -EINVAL;
 
-       if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE &&
-                        args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
+       if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE &&
+                        args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
                return -EINVAL;
 
-       if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
+       if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
                         xe_device_in_non_fault_mode(xe)))
                return -EINVAL;
 
-       if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FAULT_MODE) &&
+       if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) &&
                         xe_device_in_fault_mode(xe)))
                return -EINVAL;
 
        if (XE_IOCTL_DBG(xe, args->extensions))
                return -EINVAL;
 
-       if (args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE)
+       if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
                flags |= XE_VM_FLAG_SCRATCH_PAGE;
-       if (args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE)
+       if (args->flags & DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE)
                flags |= XE_VM_FLAG_COMPUTE_MODE;
-       if (args->flags & DRM_XE_VM_CREATE_ASYNC_DEFAULT)
+       if (args->flags & DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT)
                flags |= XE_VM_FLAG_ASYNC_DEFAULT;
-       if (args->flags & DRM_XE_VM_CREATE_FAULT_MODE)
+       if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
                flags |= XE_VM_FLAG_FAULT_MODE;
 
        vm = xe_vm_create(xe, flags);
@@ -1970,13 +1996,14 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
        if (xe->info.has_asid) {
                mutex_lock(&xe->usm.lock);
                err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
-                                     XA_LIMIT(0, XE_MAX_ASID - 1),
+                                     XA_LIMIT(1, XE_MAX_ASID - 1),
                                      &xe->usm.next_asid, GFP_KERNEL);
                mutex_unlock(&xe->usm.lock);
-               if (err) {
+               if (err < 0) {
                        xe_vm_close_and_put(vm);
                        return err;
                }
+               err = 0;
                vm->usm.asid = asid;
        }
 
@@ -2036,6 +2063,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
                          struct xe_sync_entry *syncs, u32 num_syncs,
                          bool first_op, bool last_op)
 {
+       struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
        int err;
 
        xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
@@ -2054,9 +2082,12 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
 
                /* Nothing to do, signal fences now */
                if (last_op) {
-                       for (i = 0; i < num_syncs; i++)
-                               xe_sync_entry_signal(&syncs[i], NULL,
-                                                    dma_fence_get_stub());
+                       for (i = 0; i < num_syncs; i++) {
+                               struct dma_fence *fence =
+                                       xe_exec_queue_last_fence_get(wait_exec_queue, vm);
+
+                               xe_sync_entry_signal(&syncs[i], NULL, fence);
+                       }
                }
 
                return 0;
@@ -2130,7 +2161,8 @@ static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
 static struct drm_gpuva_ops *
 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
                         u64 bo_offset_or_userptr, u64 addr, u64 range,
-                        u32 operation, u32 flags, u8 tile_mask, u32 region)
+                        u32 operation, u32 flags, u8 tile_mask,
+                        u32 prefetch_region)
 {
        struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
        struct drm_gpuva_ops *ops;
@@ -2147,48 +2179,18 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
               (ULL)bo_offset_or_userptr);
 
        switch (operation) {
-       case XE_VM_BIND_OP_MAP:
-       case XE_VM_BIND_OP_MAP_USERPTR:
+       case DRM_XE_VM_BIND_OP_MAP:
+       case DRM_XE_VM_BIND_OP_MAP_USERPTR:
                ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
                                                  obj, bo_offset_or_userptr);
-               if (IS_ERR(ops))
-                       return ops;
-
-               drm_gpuva_for_each_op(__op, ops) {
-                       struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
-
-                       op->tile_mask = tile_mask;
-                       op->map.immediate =
-                               flags & XE_VM_BIND_FLAG_IMMEDIATE;
-                       op->map.read_only =
-                               flags & XE_VM_BIND_FLAG_READONLY;
-                       op->map.is_null = flags & XE_VM_BIND_FLAG_NULL;
-               }
                break;
-       case XE_VM_BIND_OP_UNMAP:
+       case DRM_XE_VM_BIND_OP_UNMAP:
                ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
-               if (IS_ERR(ops))
-                       return ops;
-
-               drm_gpuva_for_each_op(__op, ops) {
-                       struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
-
-                       op->tile_mask = tile_mask;
-               }
                break;
-       case XE_VM_BIND_OP_PREFETCH:
+       case DRM_XE_VM_BIND_OP_PREFETCH:
                ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
-               if (IS_ERR(ops))
-                       return ops;
-
-               drm_gpuva_for_each_op(__op, ops) {
-                       struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
-
-                       op->tile_mask = tile_mask;
-                       op->prefetch.region = region;
-               }
                break;
-       case XE_VM_BIND_OP_UNMAP_ALL:
+       case DRM_XE_VM_BIND_OP_UNMAP_ALL:
                xe_assert(vm->xe, bo);
 
                err = xe_bo_lock(bo, true);
@@ -2202,19 +2204,13 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
                ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
                drm_gpuvm_bo_put(vm_bo);
                xe_bo_unlock(bo);
-               if (IS_ERR(ops))
-                       return ops;
-
-               drm_gpuva_for_each_op(__op, ops) {
-                       struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
-
-                       op->tile_mask = tile_mask;
-               }
                break;
        default:
                drm_warn(&vm->xe->drm, "NOT POSSIBLE");
                ops = ERR_PTR(-EINVAL);
        }
+       if (IS_ERR(ops))
+               return ops;
 
 #ifdef TEST_VM_ASYNC_OPS_ERROR
        if (operation & FORCE_ASYNC_OP_ERROR) {
@@ -2225,9 +2221,22 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
        }
 #endif
 
-       if (!IS_ERR(ops))
-               drm_gpuva_for_each_op(__op, ops)
-                       print_op(vm->xe, __op);
+       drm_gpuva_for_each_op(__op, ops) {
+               struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
+
+               op->tile_mask = tile_mask;
+               if (__op->op == DRM_GPUVA_OP_MAP) {
+                       op->map.immediate =
+                               flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
+                       op->map.read_only =
+                               flags & DRM_XE_VM_BIND_FLAG_READONLY;
+                       op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
+               } else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
+                       op->prefetch.region = prefetch_region;
+               }
+
+               print_op(vm->xe, __op);
+       }
 
        return ops;
 }
@@ -2310,6 +2319,10 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
                        op->flags |= XE_VMA_OP_COMMITTED;
                break;
        case DRM_GPUVA_OP_REMAP:
+       {
+               u8 tile_present =
+                       gpuva_to_vma(op->base.remap.unmap->va)->tile_present;
+
                prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
                                 true);
                op->flags |= XE_VMA_OP_COMMITTED;
@@ -2318,15 +2331,21 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
                        err |= xe_vm_insert_vma(vm, op->remap.prev);
                        if (!err)
                                op->flags |= XE_VMA_OP_PREV_COMMITTED;
-                       if (!err && op->remap.skip_prev)
+                       if (!err && op->remap.skip_prev) {
+                               op->remap.prev->tile_present =
+                                       tile_present;
                                op->remap.prev = NULL;
+                       }
                }
                if (op->remap.next) {
                        err |= xe_vm_insert_vma(vm, op->remap.next);
                        if (!err)
                                op->flags |= XE_VMA_OP_NEXT_COMMITTED;
-                       if (!err && op->remap.skip_next)
+                       if (!err && op->remap.skip_next) {
+                               op->remap.next->tile_present =
+                                       tile_present;
                                op->remap.next = NULL;
+                       }
                }
 
                /* Adjust for partial unbind after removin VMA from VM */
@@ -2335,6 +2354,7 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
                        op->base.remap.unmap->va->va.range = op->remap.range;
                }
                break;
+       }
        case DRM_GPUVA_OP_UNMAP:
                prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
                op->flags |= XE_VMA_OP_COMMITTED;
@@ -2787,13 +2807,13 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
 
 #ifdef TEST_VM_ASYNC_OPS_ERROR
 #define SUPPORTED_FLAGS        \
-       (FORCE_ASYNC_OP_ERROR | XE_VM_BIND_FLAG_ASYNC | \
-        XE_VM_BIND_FLAG_READONLY | XE_VM_BIND_FLAG_IMMEDIATE | \
-        XE_VM_BIND_FLAG_NULL | 0xffff)
+       (FORCE_ASYNC_OP_ERROR | DRM_XE_VM_BIND_FLAG_ASYNC | \
+        DRM_XE_VM_BIND_FLAG_READONLY | DRM_XE_VM_BIND_FLAG_IMMEDIATE | \
+        DRM_XE_VM_BIND_FLAG_NULL | 0xffff)
 #else
 #define SUPPORTED_FLAGS        \
-       (XE_VM_BIND_FLAG_ASYNC | XE_VM_BIND_FLAG_READONLY | \
-        XE_VM_BIND_FLAG_IMMEDIATE | XE_VM_BIND_FLAG_NULL | \
+       (DRM_XE_VM_BIND_FLAG_ASYNC | DRM_XE_VM_BIND_FLAG_READONLY | \
+        DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | \
         0xffff)
 #endif
 #define XE_64K_PAGE_MASK 0xffffull
@@ -2840,46 +2860,46 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
                u32 flags = (*bind_ops)[i].flags;
                u32 obj = (*bind_ops)[i].obj;
                u64 obj_offset = (*bind_ops)[i].obj_offset;
-               u32 region = (*bind_ops)[i].region;
-               bool is_null = flags & XE_VM_BIND_FLAG_NULL;
+               u32 prefetch_region = (*bind_ops)[i].prefetch_mem_region_instance;
+               bool is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
 
                if (i == 0) {
-                       *async = !!(flags & XE_VM_BIND_FLAG_ASYNC);
+                       *async = !!(flags & DRM_XE_VM_BIND_FLAG_ASYNC);
                        if (XE_IOCTL_DBG(xe, !*async && args->num_syncs)) {
                                err = -EINVAL;
                                goto free_bind_ops;
                        }
                } else if (XE_IOCTL_DBG(xe, *async !=
-                                       !!(flags & XE_VM_BIND_FLAG_ASYNC))) {
+                                       !!(flags & DRM_XE_VM_BIND_FLAG_ASYNC))) {
                        err = -EINVAL;
                        goto free_bind_ops;
                }
 
-               if (XE_IOCTL_DBG(xe, op > XE_VM_BIND_OP_PREFETCH) ||
+               if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
                    XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
                    XE_IOCTL_DBG(xe, obj && is_null) ||
                    XE_IOCTL_DBG(xe, obj_offset && is_null) ||
-                   XE_IOCTL_DBG(xe, op != XE_VM_BIND_OP_MAP &&
+                   XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP &&
                                 is_null) ||
                    XE_IOCTL_DBG(xe, !obj &&
-                                op == XE_VM_BIND_OP_MAP &&
+                                op == DRM_XE_VM_BIND_OP_MAP &&
                                 !is_null) ||
                    XE_IOCTL_DBG(xe, !obj &&
-                                op == XE_VM_BIND_OP_UNMAP_ALL) ||
+                                op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
                    XE_IOCTL_DBG(xe, addr &&
-                                op == XE_VM_BIND_OP_UNMAP_ALL) ||
+                                op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
                    XE_IOCTL_DBG(xe, range &&
-                                op == XE_VM_BIND_OP_UNMAP_ALL) ||
+                                op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
                    XE_IOCTL_DBG(xe, obj &&
-                                op == XE_VM_BIND_OP_MAP_USERPTR) ||
+                                op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
                    XE_IOCTL_DBG(xe, obj &&
-                                op == XE_VM_BIND_OP_PREFETCH) ||
-                   XE_IOCTL_DBG(xe, region &&
-                                op != XE_VM_BIND_OP_PREFETCH) ||
-                   XE_IOCTL_DBG(xe, !(BIT(region) &
+                                op == DRM_XE_VM_BIND_OP_PREFETCH) ||
+                   XE_IOCTL_DBG(xe, prefetch_region &&
+                                op != DRM_XE_VM_BIND_OP_PREFETCH) ||
+                   XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
                                       xe->info.mem_region_mask)) ||
                    XE_IOCTL_DBG(xe, obj &&
-                                op == XE_VM_BIND_OP_UNMAP)) {
+                                op == DRM_XE_VM_BIND_OP_UNMAP)) {
                        err = -EINVAL;
                        goto free_bind_ops;
                }
@@ -2888,7 +2908,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
                    XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
                    XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
                    XE_IOCTL_DBG(xe, !range &&
-                                op != XE_VM_BIND_OP_UNMAP_ALL)) {
+                                op != DRM_XE_VM_BIND_OP_UNMAP_ALL)) {
                        err = -EINVAL;
                        goto free_bind_ops;
                }
@@ -3058,11 +3078,11 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
                u32 flags = bind_ops[i].flags;
                u64 obj_offset = bind_ops[i].obj_offset;
                u8 tile_mask = bind_ops[i].tile_mask;
-               u32 region = bind_ops[i].region;
+               u32 prefetch_region = bind_ops[i].prefetch_mem_region_instance;
 
                ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
                                                  addr, range, op, flags,
-                                                 tile_mask, region);
+                                                 tile_mask, prefetch_region);
                if (IS_ERR(ops[i])) {
                        err = PTR_ERR(ops[i]);
                        ops[i] = NULL;
@@ -3108,8 +3128,12 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 unwind_ops:
        vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
 free_syncs:
-       for (i = 0; err == -ENODATA && i < num_syncs; i++)
-               xe_sync_entry_signal(&syncs[i], NULL, dma_fence_get_stub());
+       for (i = 0; err == -ENODATA && i < num_syncs; i++) {
+               struct dma_fence *fence =
+                       xe_exec_queue_last_fence_get(to_wait_exec_queue(vm, q), vm);
+
+               xe_sync_entry_signal(&syncs[i], NULL, fence);
+       }
        while (num_syncs--)
                xe_sync_entry_cleanup(&syncs[num_syncs]);