drm/xe: Add helpers to hide struct xe_vma internals
[linux-2.6-microblaze.git] / drivers / gpu / drm / xe / xe_vm.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5
6 #include "xe_vm.h"
7
8 #include <linux/dma-fence-array.h>
9
10 #include <drm/drm_print.h>
11 #include <drm/ttm/ttm_execbuf_util.h>
12 #include <drm/ttm/ttm_tt.h>
13 #include <drm/xe_drm.h>
14 #include <linux/delay.h>
15 #include <linux/kthread.h>
16 #include <linux/mm.h>
17 #include <linux/swap.h>
18
19 #include "xe_bo.h"
20 #include "xe_device.h"
21 #include "xe_engine.h"
22 #include "xe_gt.h"
23 #include "xe_gt_pagefault.h"
24 #include "xe_gt_tlb_invalidation.h"
25 #include "xe_migrate.h"
26 #include "xe_pm.h"
27 #include "xe_preempt_fence.h"
28 #include "xe_pt.h"
29 #include "xe_res_cursor.h"
30 #include "xe_sync.h"
31 #include "xe_trace.h"
32
33 #define TEST_VM_ASYNC_OPS_ERROR
34
35 /**
36  * xe_vma_userptr_check_repin() - Advisory check for repin needed
37  * @vma: The userptr vma
38  *
39  * Check if the userptr vma has been invalidated since last successful
40  * repin. The check is advisory only and can the function can be called
41  * without the vm->userptr.notifier_lock held. There is no guarantee that the
42  * vma userptr will remain valid after a lockless check, so typically
43  * the call needs to be followed by a proper check under the notifier_lock.
44  *
45  * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
46  */
47 int xe_vma_userptr_check_repin(struct xe_vma *vma)
48 {
49         return mmu_interval_check_retry(&vma->userptr.notifier,
50                                         vma->userptr.notifier_seq) ?
51                 -EAGAIN : 0;
52 }
53
54 int xe_vma_userptr_pin_pages(struct xe_vma *vma)
55 {
56         struct xe_vm *vm = xe_vma_vm(vma);
57         struct xe_device *xe = vm->xe;
58         const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT;
59         struct page **pages;
60         bool in_kthread = !current->mm;
61         unsigned long notifier_seq;
62         int pinned, ret, i;
63         bool read_only = xe_vma_read_only(vma);
64
65         lockdep_assert_held(&vm->lock);
66         XE_BUG_ON(!xe_vma_is_userptr(vma));
67 retry:
68         if (vma->destroyed)
69                 return 0;
70
71         notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier);
72         if (notifier_seq == vma->userptr.notifier_seq)
73                 return 0;
74
75         pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
76         if (!pages)
77                 return -ENOMEM;
78
79         if (vma->userptr.sg) {
80                 dma_unmap_sgtable(xe->drm.dev,
81                                   vma->userptr.sg,
82                                   read_only ? DMA_TO_DEVICE :
83                                   DMA_BIDIRECTIONAL, 0);
84                 sg_free_table(vma->userptr.sg);
85                 vma->userptr.sg = NULL;
86         }
87
88         pinned = ret = 0;
89         if (in_kthread) {
90                 if (!mmget_not_zero(vma->userptr.notifier.mm)) {
91                         ret = -EFAULT;
92                         goto mm_closed;
93                 }
94                 kthread_use_mm(vma->userptr.notifier.mm);
95         }
96
97         while (pinned < num_pages) {
98                 ret = get_user_pages_fast(xe_vma_userptr(vma) +
99                                           pinned * PAGE_SIZE,
100                                           num_pages - pinned,
101                                           read_only ? 0 : FOLL_WRITE,
102                                           &pages[pinned]);
103                 if (ret < 0) {
104                         if (in_kthread)
105                                 ret = 0;
106                         break;
107                 }
108
109                 pinned += ret;
110                 ret = 0;
111         }
112
113         if (in_kthread) {
114                 kthread_unuse_mm(vma->userptr.notifier.mm);
115                 mmput(vma->userptr.notifier.mm);
116         }
117 mm_closed:
118         if (ret)
119                 goto out;
120
121         ret = sg_alloc_table_from_pages_segment(&vma->userptr.sgt, pages,
122                                                 pinned, 0,
123                                                 (u64)pinned << PAGE_SHIFT,
124                                                 xe_sg_segment_size(xe->drm.dev),
125                                                 GFP_KERNEL);
126         if (ret) {
127                 vma->userptr.sg = NULL;
128                 goto out;
129         }
130         vma->userptr.sg = &vma->userptr.sgt;
131
132         ret = dma_map_sgtable(xe->drm.dev, vma->userptr.sg,
133                               read_only ? DMA_TO_DEVICE :
134                               DMA_BIDIRECTIONAL,
135                               DMA_ATTR_SKIP_CPU_SYNC |
136                               DMA_ATTR_NO_KERNEL_MAPPING);
137         if (ret) {
138                 sg_free_table(vma->userptr.sg);
139                 vma->userptr.sg = NULL;
140                 goto out;
141         }
142
143         for (i = 0; i < pinned; ++i) {
144                 if (!read_only) {
145                         lock_page(pages[i]);
146                         set_page_dirty(pages[i]);
147                         unlock_page(pages[i]);
148                 }
149
150                 mark_page_accessed(pages[i]);
151         }
152
153 out:
154         release_pages(pages, pinned);
155         kvfree(pages);
156
157         if (!(ret < 0)) {
158                 vma->userptr.notifier_seq = notifier_seq;
159                 if (xe_vma_userptr_check_repin(vma) == -EAGAIN)
160                         goto retry;
161         }
162
163         return ret < 0 ? ret : 0;
164 }
165
166 static bool preempt_fences_waiting(struct xe_vm *vm)
167 {
168         struct xe_engine *e;
169
170         lockdep_assert_held(&vm->lock);
171         xe_vm_assert_held(vm);
172
173         list_for_each_entry(e, &vm->preempt.engines, compute.link) {
174                 if (!e->compute.pfence || (e->compute.pfence &&
175                     test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
176                              &e->compute.pfence->flags))) {
177                         return true;
178                 }
179         }
180
181         return false;
182 }
183
184 static void free_preempt_fences(struct list_head *list)
185 {
186         struct list_head *link, *next;
187
188         list_for_each_safe(link, next, list)
189                 xe_preempt_fence_free(to_preempt_fence_from_link(link));
190 }
191
192 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
193                                 unsigned int *count)
194 {
195         lockdep_assert_held(&vm->lock);
196         xe_vm_assert_held(vm);
197
198         if (*count >= vm->preempt.num_engines)
199                 return 0;
200
201         for (; *count < vm->preempt.num_engines; ++(*count)) {
202                 struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
203
204                 if (IS_ERR(pfence))
205                         return PTR_ERR(pfence);
206
207                 list_move_tail(xe_preempt_fence_link(pfence), list);
208         }
209
210         return 0;
211 }
212
213 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
214 {
215         struct xe_engine *e;
216
217         xe_vm_assert_held(vm);
218
219         list_for_each_entry(e, &vm->preempt.engines, compute.link) {
220                 if (e->compute.pfence) {
221                         long timeout = dma_fence_wait(e->compute.pfence, false);
222
223                         if (timeout < 0)
224                                 return -ETIME;
225                         dma_fence_put(e->compute.pfence);
226                         e->compute.pfence = NULL;
227                 }
228         }
229
230         return 0;
231 }
232
233 static bool xe_vm_is_idle(struct xe_vm *vm)
234 {
235         struct xe_engine *e;
236
237         xe_vm_assert_held(vm);
238         list_for_each_entry(e, &vm->preempt.engines, compute.link) {
239                 if (!xe_engine_is_idle(e))
240                         return false;
241         }
242
243         return true;
244 }
245
246 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
247 {
248         struct list_head *link;
249         struct xe_engine *e;
250
251         list_for_each_entry(e, &vm->preempt.engines, compute.link) {
252                 struct dma_fence *fence;
253
254                 link = list->next;
255                 XE_BUG_ON(link == list);
256
257                 fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
258                                              e, e->compute.context,
259                                              ++e->compute.seqno);
260                 dma_fence_put(e->compute.pfence);
261                 e->compute.pfence = fence;
262         }
263 }
264
265 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
266 {
267         struct xe_engine *e;
268         struct ww_acquire_ctx ww;
269         int err;
270
271         err = xe_bo_lock(bo, &ww, vm->preempt.num_engines, true);
272         if (err)
273                 return err;
274
275         list_for_each_entry(e, &vm->preempt.engines, compute.link)
276                 if (e->compute.pfence) {
277                         dma_resv_add_fence(bo->ttm.base.resv,
278                                            e->compute.pfence,
279                                            DMA_RESV_USAGE_BOOKKEEP);
280                 }
281
282         xe_bo_unlock(bo, &ww);
283         return 0;
284 }
285
286 /**
287  * xe_vm_fence_all_extobjs() - Add a fence to vm's external objects' resv
288  * @vm: The vm.
289  * @fence: The fence to add.
290  * @usage: The resv usage for the fence.
291  *
292  * Loops over all of the vm's external object bindings and adds a @fence
293  * with the given @usage to all of the external object's reservation
294  * objects.
295  */
296 void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
297                              enum dma_resv_usage usage)
298 {
299         struct xe_vma *vma;
300
301         list_for_each_entry(vma, &vm->extobj.list, extobj.link)
302                 dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence, usage);
303 }
304
305 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm)
306 {
307         struct xe_engine *e;
308
309         lockdep_assert_held(&vm->lock);
310         xe_vm_assert_held(vm);
311
312         list_for_each_entry(e, &vm->preempt.engines, compute.link) {
313                 e->ops->resume(e);
314
315                 dma_resv_add_fence(&vm->resv, e->compute.pfence,
316                                    DMA_RESV_USAGE_BOOKKEEP);
317                 xe_vm_fence_all_extobjs(vm, e->compute.pfence,
318                                         DMA_RESV_USAGE_BOOKKEEP);
319         }
320 }
321
322 int xe_vm_add_compute_engine(struct xe_vm *vm, struct xe_engine *e)
323 {
324         struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
325         struct ttm_validate_buffer *tv;
326         struct ww_acquire_ctx ww;
327         struct list_head objs;
328         struct dma_fence *pfence;
329         int err;
330         bool wait;
331
332         XE_BUG_ON(!xe_vm_in_compute_mode(vm));
333
334         down_write(&vm->lock);
335
336         err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs, true, 1);
337         if (err)
338                 goto out_unlock_outer;
339
340         pfence = xe_preempt_fence_create(e, e->compute.context,
341                                          ++e->compute.seqno);
342         if (!pfence) {
343                 err = -ENOMEM;
344                 goto out_unlock;
345         }
346
347         list_add(&e->compute.link, &vm->preempt.engines);
348         ++vm->preempt.num_engines;
349         e->compute.pfence = pfence;
350
351         down_read(&vm->userptr.notifier_lock);
352
353         dma_resv_add_fence(&vm->resv, pfence,
354                            DMA_RESV_USAGE_BOOKKEEP);
355
356         xe_vm_fence_all_extobjs(vm, pfence, DMA_RESV_USAGE_BOOKKEEP);
357
358         /*
359          * Check to see if a preemption on VM is in flight or userptr
360          * invalidation, if so trigger this preempt fence to sync state with
361          * other preempt fences on the VM.
362          */
363         wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
364         if (wait)
365                 dma_fence_enable_sw_signaling(pfence);
366
367         up_read(&vm->userptr.notifier_lock);
368
369 out_unlock:
370         xe_vm_unlock_dma_resv(vm, tv_onstack, tv, &ww, &objs);
371 out_unlock_outer:
372         up_write(&vm->lock);
373
374         return err;
375 }
376
377 /**
378  * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
379  * that need repinning.
380  * @vm: The VM.
381  *
382  * This function checks for whether the VM has userptrs that need repinning,
383  * and provides a release-type barrier on the userptr.notifier_lock after
384  * checking.
385  *
386  * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
387  */
388 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
389 {
390         lockdep_assert_held_read(&vm->userptr.notifier_lock);
391
392         return (list_empty(&vm->userptr.repin_list) &&
393                 list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
394 }
395
396 /**
397  * xe_vm_lock_dma_resv() - Lock the vm dma_resv object and the dma_resv
398  * objects of the vm's external buffer objects.
399  * @vm: The vm.
400  * @ww: Pointer to a struct ww_acquire_ctx locking context.
401  * @tv_onstack: Array size XE_ONSTACK_TV of storage for the struct
402  * ttm_validate_buffers used for locking.
403  * @tv: Pointer to a pointer that on output contains the actual storage used.
404  * @objs: List head for the buffer objects locked.
405  * @intr: Whether to lock interruptible.
406  * @num_shared: Number of dma-fence slots to reserve in the locked objects.
407  *
408  * Locks the vm dma-resv objects and all the dma-resv objects of the
409  * buffer objects on the vm external object list. The TTM utilities require
410  * a list of struct ttm_validate_buffers pointing to the actual buffer
411  * objects to lock. Storage for those struct ttm_validate_buffers should
412  * be provided in @tv_onstack, and is typically reserved on the stack
413  * of the caller. If the size of @tv_onstack isn't sufficient, then
414  * storage will be allocated internally using kvmalloc().
415  *
416  * The function performs deadlock handling internally, and after a
417  * successful return the ww locking transaction should be considered
418  * sealed.
419  *
420  * Return: 0 on success, Negative error code on error. In particular if
421  * @intr is set to true, -EINTR or -ERESTARTSYS may be returned. In case
422  * of error, any locking performed has been reverted.
423  */
424 int xe_vm_lock_dma_resv(struct xe_vm *vm, struct ww_acquire_ctx *ww,
425                         struct ttm_validate_buffer *tv_onstack,
426                         struct ttm_validate_buffer **tv,
427                         struct list_head *objs,
428                         bool intr,
429                         unsigned int num_shared)
430 {
431         struct ttm_validate_buffer *tv_vm, *tv_bo;
432         struct xe_vma *vma, *next;
433         LIST_HEAD(dups);
434         int err;
435
436         lockdep_assert_held(&vm->lock);
437
438         if (vm->extobj.entries < XE_ONSTACK_TV) {
439                 tv_vm = tv_onstack;
440         } else {
441                 tv_vm = kvmalloc_array(vm->extobj.entries + 1, sizeof(*tv_vm),
442                                        GFP_KERNEL);
443                 if (!tv_vm)
444                         return -ENOMEM;
445         }
446         tv_bo = tv_vm + 1;
447
448         INIT_LIST_HEAD(objs);
449         list_for_each_entry(vma, &vm->extobj.list, extobj.link) {
450                 tv_bo->num_shared = num_shared;
451                 tv_bo->bo = &xe_vma_bo(vma)->ttm;
452
453                 list_add_tail(&tv_bo->head, objs);
454                 tv_bo++;
455         }
456         tv_vm->num_shared = num_shared;
457         tv_vm->bo = xe_vm_ttm_bo(vm);
458         list_add_tail(&tv_vm->head, objs);
459         err = ttm_eu_reserve_buffers(ww, objs, intr, &dups);
460         if (err)
461                 goto out_err;
462
463         spin_lock(&vm->notifier.list_lock);
464         list_for_each_entry_safe(vma, next, &vm->notifier.rebind_list,
465                                  notifier.rebind_link) {
466                 xe_bo_assert_held(xe_vma_bo(vma));
467
468                 list_del_init(&vma->notifier.rebind_link);
469                 if (vma->tile_present && !vma->destroyed)
470                         list_move_tail(&vma->rebind_link, &vm->rebind_list);
471         }
472         spin_unlock(&vm->notifier.list_lock);
473
474         *tv = tv_vm;
475         return 0;
476
477 out_err:
478         if (tv_vm != tv_onstack)
479                 kvfree(tv_vm);
480
481         return err;
482 }
483
484 /**
485  * xe_vm_unlock_dma_resv() - Unlock reservation objects locked by
486  * xe_vm_lock_dma_resv()
487  * @vm: The vm.
488  * @tv_onstack: The @tv_onstack array given to xe_vm_lock_dma_resv().
489  * @tv: The value of *@tv given by xe_vm_lock_dma_resv().
490  * @ww: The ww_acquire_context used for locking.
491  * @objs: The list returned from xe_vm_lock_dma_resv().
492  *
493  * Unlocks the reservation objects and frees any memory allocated by
494  * xe_vm_lock_dma_resv().
495  */
496 void xe_vm_unlock_dma_resv(struct xe_vm *vm,
497                            struct ttm_validate_buffer *tv_onstack,
498                            struct ttm_validate_buffer *tv,
499                            struct ww_acquire_ctx *ww,
500                            struct list_head *objs)
501 {
502         /*
503          * Nothing should've been able to enter the list while we were locked,
504          * since we've held the dma-resvs of all the vm's external objects,
505          * and holding the dma_resv of an object is required for list
506          * addition, and we shouldn't add ourselves.
507          */
508         XE_WARN_ON(!list_empty(&vm->notifier.rebind_list));
509
510         ttm_eu_backoff_reservation(ww, objs);
511         if (tv && tv != tv_onstack)
512                 kvfree(tv);
513 }
514
515 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
516
517 static void xe_vm_kill(struct xe_vm *vm)
518 {
519         struct ww_acquire_ctx ww;
520         struct xe_engine *e;
521
522         lockdep_assert_held(&vm->lock);
523
524         xe_vm_lock(vm, &ww, 0, false);
525         vm->flags |= XE_VM_FLAG_BANNED;
526         trace_xe_vm_kill(vm);
527
528         list_for_each_entry(e, &vm->preempt.engines, compute.link)
529                 e->ops->kill(e);
530         xe_vm_unlock(vm, &ww);
531
532         /* TODO: Inform user the VM is banned */
533 }
534
535 static void preempt_rebind_work_func(struct work_struct *w)
536 {
537         struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
538         struct xe_vma *vma;
539         struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
540         struct ttm_validate_buffer *tv;
541         struct ww_acquire_ctx ww;
542         struct list_head objs;
543         struct dma_fence *rebind_fence;
544         unsigned int fence_count = 0;
545         LIST_HEAD(preempt_fences);
546         ktime_t end = 0;
547         int err;
548         long wait;
549         int __maybe_unused tries = 0;
550
551         XE_BUG_ON(!xe_vm_in_compute_mode(vm));
552         trace_xe_vm_rebind_worker_enter(vm);
553
554         down_write(&vm->lock);
555
556         if (xe_vm_is_closed_or_banned(vm)) {
557                 up_write(&vm->lock);
558                 trace_xe_vm_rebind_worker_exit(vm);
559                 return;
560         }
561
562 retry:
563         if (vm->async_ops.error)
564                 goto out_unlock_outer;
565
566         /*
567          * Extreme corner where we exit a VM error state with a munmap style VM
568          * unbind inflight which requires a rebind. In this case the rebind
569          * needs to install some fences into the dma-resv slots. The worker to
570          * do this queued, let that worker make progress by dropping vm->lock
571          * and trying this again.
572          */
573         if (vm->async_ops.munmap_rebind_inflight) {
574                 up_write(&vm->lock);
575                 flush_work(&vm->async_ops.work);
576                 goto retry;
577         }
578
579         if (xe_vm_userptr_check_repin(vm)) {
580                 err = xe_vm_userptr_pin(vm);
581                 if (err)
582                         goto out_unlock_outer;
583         }
584
585         err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs,
586                                   false, vm->preempt.num_engines);
587         if (err)
588                 goto out_unlock_outer;
589
590         if (xe_vm_is_idle(vm)) {
591                 vm->preempt.rebind_deactivated = true;
592                 goto out_unlock;
593         }
594
595         /* Fresh preempt fences already installed. Everyting is running. */
596         if (!preempt_fences_waiting(vm))
597                 goto out_unlock;
598
599         /*
600          * This makes sure vm is completely suspended and also balances
601          * xe_engine suspend- and resume; we resume *all* vm engines below.
602          */
603         err = wait_for_existing_preempt_fences(vm);
604         if (err)
605                 goto out_unlock;
606
607         err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
608         if (err)
609                 goto out_unlock;
610
611         list_for_each_entry(vma, &vm->rebind_list, rebind_link) {
612                 if (xe_vma_has_no_bo(vma) || vma->destroyed)
613                         continue;
614
615                 err = xe_bo_validate(xe_vma_bo(vma), vm, false);
616                 if (err)
617                         goto out_unlock;
618         }
619
620         rebind_fence = xe_vm_rebind(vm, true);
621         if (IS_ERR(rebind_fence)) {
622                 err = PTR_ERR(rebind_fence);
623                 goto out_unlock;
624         }
625
626         if (rebind_fence) {
627                 dma_fence_wait(rebind_fence, false);
628                 dma_fence_put(rebind_fence);
629         }
630
631         /* Wait on munmap style VM unbinds */
632         wait = dma_resv_wait_timeout(&vm->resv,
633                                      DMA_RESV_USAGE_KERNEL,
634                                      false, MAX_SCHEDULE_TIMEOUT);
635         if (wait <= 0) {
636                 err = -ETIME;
637                 goto out_unlock;
638         }
639
640 #define retry_required(__tries, __vm) \
641         (IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
642         (!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
643         __xe_vm_userptr_needs_repin(__vm))
644
645         down_read(&vm->userptr.notifier_lock);
646         if (retry_required(tries, vm)) {
647                 up_read(&vm->userptr.notifier_lock);
648                 err = -EAGAIN;
649                 goto out_unlock;
650         }
651
652 #undef retry_required
653
654         spin_lock(&vm->xe->ttm.lru_lock);
655         ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
656         spin_unlock(&vm->xe->ttm.lru_lock);
657
658         /* Point of no return. */
659         arm_preempt_fences(vm, &preempt_fences);
660         resume_and_reinstall_preempt_fences(vm);
661         up_read(&vm->userptr.notifier_lock);
662
663 out_unlock:
664         xe_vm_unlock_dma_resv(vm, tv_onstack, tv, &ww, &objs);
665 out_unlock_outer:
666         if (err == -EAGAIN) {
667                 trace_xe_vm_rebind_worker_retry(vm);
668                 goto retry;
669         }
670
671         /*
672          * With multiple active VMs, under memory pressure, it is possible that
673          * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
674          * Until ttm properly handles locking in such scenarios, best thing the
675          * driver can do is retry with a timeout. Killing the VM or putting it
676          * in error state after timeout or other error scenarios is still TBD.
677          */
678         if (err == -ENOMEM) {
679                 ktime_t cur = ktime_get();
680
681                 end = end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
682                 if (ktime_before(cur, end)) {
683                         msleep(20);
684                         trace_xe_vm_rebind_worker_retry(vm);
685                         goto retry;
686                 }
687         }
688         if (err) {
689                 drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
690                 xe_vm_kill(vm);
691         }
692         up_write(&vm->lock);
693
694         free_preempt_fences(&preempt_fences);
695
696         trace_xe_vm_rebind_worker_exit(vm);
697 }
698
699 struct async_op_fence;
700 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
701                         struct xe_engine *e, struct xe_sync_entry *syncs,
702                         u32 num_syncs, struct async_op_fence *afence);
703
704 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
705                                    const struct mmu_notifier_range *range,
706                                    unsigned long cur_seq)
707 {
708         struct xe_vma *vma = container_of(mni, struct xe_vma, userptr.notifier);
709         struct xe_vm *vm = xe_vma_vm(vma);
710         struct dma_resv_iter cursor;
711         struct dma_fence *fence;
712         long err;
713
714         XE_BUG_ON(!xe_vma_is_userptr(vma));
715         trace_xe_vma_userptr_invalidate(vma);
716
717         if (!mmu_notifier_range_blockable(range))
718                 return false;
719
720         down_write(&vm->userptr.notifier_lock);
721         mmu_interval_set_seq(mni, cur_seq);
722
723         /* No need to stop gpu access if the userptr is not yet bound. */
724         if (!vma->userptr.initial_bind) {
725                 up_write(&vm->userptr.notifier_lock);
726                 return true;
727         }
728
729         /*
730          * Tell exec and rebind worker they need to repin and rebind this
731          * userptr.
732          */
733         if (!xe_vm_in_fault_mode(vm) && !vma->destroyed && vma->tile_present) {
734                 spin_lock(&vm->userptr.invalidated_lock);
735                 list_move_tail(&vma->userptr.invalidate_link,
736                                &vm->userptr.invalidated);
737                 spin_unlock(&vm->userptr.invalidated_lock);
738         }
739
740         up_write(&vm->userptr.notifier_lock);
741
742         /*
743          * Preempt fences turn into schedule disables, pipeline these.
744          * Note that even in fault mode, we need to wait for binds and
745          * unbinds to complete, and those are attached as BOOKMARK fences
746          * to the vm.
747          */
748         dma_resv_iter_begin(&cursor, &vm->resv,
749                             DMA_RESV_USAGE_BOOKKEEP);
750         dma_resv_for_each_fence_unlocked(&cursor, fence)
751                 dma_fence_enable_sw_signaling(fence);
752         dma_resv_iter_end(&cursor);
753
754         err = dma_resv_wait_timeout(&vm->resv,
755                                     DMA_RESV_USAGE_BOOKKEEP,
756                                     false, MAX_SCHEDULE_TIMEOUT);
757         XE_WARN_ON(err <= 0);
758
759         if (xe_vm_in_fault_mode(vm)) {
760                 err = xe_vm_invalidate_vma(vma);
761                 XE_WARN_ON(err);
762         }
763
764         trace_xe_vma_userptr_invalidate_complete(vma);
765
766         return true;
767 }
768
769 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
770         .invalidate = vma_userptr_invalidate,
771 };
772
773 int xe_vm_userptr_pin(struct xe_vm *vm)
774 {
775         struct xe_vma *vma, *next;
776         int err = 0;
777         LIST_HEAD(tmp_evict);
778
779         lockdep_assert_held_write(&vm->lock);
780
781         /* Collect invalidated userptrs */
782         spin_lock(&vm->userptr.invalidated_lock);
783         list_for_each_entry_safe(vma, next, &vm->userptr.invalidated,
784                                  userptr.invalidate_link) {
785                 list_del_init(&vma->userptr.invalidate_link);
786                 list_move_tail(&vma->userptr_link, &vm->userptr.repin_list);
787         }
788         spin_unlock(&vm->userptr.invalidated_lock);
789
790         /* Pin and move to temporary list */
791         list_for_each_entry_safe(vma, next, &vm->userptr.repin_list, userptr_link) {
792                 err = xe_vma_userptr_pin_pages(vma);
793                 if (err < 0)
794                         goto out_err;
795
796                 list_move_tail(&vma->userptr_link, &tmp_evict);
797         }
798
799         /* Take lock and move to rebind_list for rebinding. */
800         err = dma_resv_lock_interruptible(&vm->resv, NULL);
801         if (err)
802                 goto out_err;
803
804         list_for_each_entry_safe(vma, next, &tmp_evict, userptr_link) {
805                 list_del_init(&vma->userptr_link);
806                 list_move_tail(&vma->rebind_link, &vm->rebind_list);
807         }
808
809         dma_resv_unlock(&vm->resv);
810
811         return 0;
812
813 out_err:
814         list_splice_tail(&tmp_evict, &vm->userptr.repin_list);
815
816         return err;
817 }
818
819 /**
820  * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
821  * that need repinning.
822  * @vm: The VM.
823  *
824  * This function does an advisory check for whether the VM has userptrs that
825  * need repinning.
826  *
827  * Return: 0 if there are no indications of userptrs needing repinning,
828  * -EAGAIN if there are.
829  */
830 int xe_vm_userptr_check_repin(struct xe_vm *vm)
831 {
832         return (list_empty_careful(&vm->userptr.repin_list) &&
833                 list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
834 }
835
836 static struct dma_fence *
837 xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
838                struct xe_sync_entry *syncs, u32 num_syncs);
839
840 struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
841 {
842         struct dma_fence *fence = NULL;
843         struct xe_vma *vma, *next;
844
845         lockdep_assert_held(&vm->lock);
846         if (xe_vm_no_dma_fences(vm) && !rebind_worker)
847                 return NULL;
848
849         xe_vm_assert_held(vm);
850         list_for_each_entry_safe(vma, next, &vm->rebind_list, rebind_link) {
851                 XE_WARN_ON(!vma->tile_present);
852
853                 list_del_init(&vma->rebind_link);
854                 dma_fence_put(fence);
855                 if (rebind_worker)
856                         trace_xe_vma_rebind_worker(vma);
857                 else
858                         trace_xe_vma_rebind_exec(vma);
859                 fence = xe_vm_bind_vma(vma, NULL, NULL, 0);
860                 if (IS_ERR(fence))
861                         return fence;
862         }
863
864         return fence;
865 }
866
867 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
868                                     struct xe_bo *bo,
869                                     u64 bo_offset_or_userptr,
870                                     u64 start, u64 end,
871                                     bool read_only,
872                                     bool is_null,
873                                     u64 tile_mask)
874 {
875         struct xe_vma *vma;
876         struct xe_tile *tile;
877         u8 id;
878
879         XE_BUG_ON(start >= end);
880         XE_BUG_ON(end >= vm->size);
881
882         vma = kzalloc(sizeof(*vma), GFP_KERNEL);
883         if (!vma) {
884                 vma = ERR_PTR(-ENOMEM);
885                 return vma;
886         }
887
888         INIT_LIST_HEAD(&vma->rebind_link);
889         INIT_LIST_HEAD(&vma->unbind_link);
890         INIT_LIST_HEAD(&vma->userptr_link);
891         INIT_LIST_HEAD(&vma->userptr.invalidate_link);
892         INIT_LIST_HEAD(&vma->notifier.rebind_link);
893         INIT_LIST_HEAD(&vma->extobj.link);
894
895         vma->vm = vm;
896         vma->start = start;
897         vma->end = end;
898         vma->pte_flags = 0;
899         if (read_only)
900                 vma->pte_flags |= XE_PTE_FLAG_READ_ONLY;
901         if (is_null)
902                 vma->pte_flags |= XE_PTE_FLAG_NULL;
903
904         if (tile_mask) {
905                 vma->tile_mask = tile_mask;
906         } else {
907                 for_each_tile(tile, vm->xe, id)
908                         vma->tile_mask |= 0x1 << id;
909         }
910
911         if (vm->xe->info.platform == XE_PVC)
912                 vma->use_atomic_access_pte_bit = true;
913
914         if (bo) {
915                 xe_bo_assert_held(bo);
916                 vma->bo_offset = bo_offset_or_userptr;
917                 vma->bo = xe_bo_get(bo);
918                 list_add_tail(&vma->bo_link, &bo->vmas);
919         } else /* userptr or null */ {
920                 if (!is_null) {
921                         u64 size = end - start + 1;
922                         int err;
923
924                         vma->userptr.ptr = bo_offset_or_userptr;
925
926                         err = mmu_interval_notifier_insert(&vma->userptr.notifier,
927                                                            current->mm,
928                                                            xe_vma_userptr(vma), size,
929                                                            &vma_userptr_notifier_ops);
930                         if (err) {
931                                 kfree(vma);
932                                 vma = ERR_PTR(err);
933                                 return vma;
934                         }
935
936                         vma->userptr.notifier_seq = LONG_MAX;
937                 }
938
939                 xe_vm_get(vm);
940         }
941
942         return vma;
943 }
944
945 static bool vm_remove_extobj(struct xe_vma *vma)
946 {
947         if (!list_empty(&vma->extobj.link)) {
948                 xe_vma_vm(vma)->extobj.entries--;
949                 list_del_init(&vma->extobj.link);
950                 return true;
951         }
952         return false;
953 }
954
955 static void xe_vma_destroy_late(struct xe_vma *vma)
956 {
957         struct xe_vm *vm = xe_vma_vm(vma);
958         struct xe_device *xe = vm->xe;
959         bool read_only = xe_vma_read_only(vma);
960
961         if (xe_vma_is_userptr(vma)) {
962                 if (vma->userptr.sg) {
963                         dma_unmap_sgtable(xe->drm.dev,
964                                           vma->userptr.sg,
965                                           read_only ? DMA_TO_DEVICE :
966                                           DMA_BIDIRECTIONAL, 0);
967                         sg_free_table(vma->userptr.sg);
968                         vma->userptr.sg = NULL;
969                 }
970
971                 /*
972                  * Since userptr pages are not pinned, we can't remove
973                  * the notifer until we're sure the GPU is not accessing
974                  * them anymore
975                  */
976                 mmu_interval_notifier_remove(&vma->userptr.notifier);
977                 xe_vm_put(vm);
978         } else if (xe_vma_is_null(vma)) {
979                 xe_vm_put(vm);
980         } else {
981                 xe_bo_put(xe_vma_bo(vma));
982         }
983
984         kfree(vma);
985 }
986
987 static void vma_destroy_work_func(struct work_struct *w)
988 {
989         struct xe_vma *vma =
990                 container_of(w, struct xe_vma, destroy_work);
991
992         xe_vma_destroy_late(vma);
993 }
994
995 static struct xe_vma *
996 bo_has_vm_references_locked(struct xe_bo *bo, struct xe_vm *vm,
997                             struct xe_vma *ignore)
998 {
999         struct xe_vma *vma;
1000
1001         list_for_each_entry(vma, &bo->vmas, bo_link) {
1002                 if (vma != ignore && xe_vma_vm(vma) == vm)
1003                         return vma;
1004         }
1005
1006         return NULL;
1007 }
1008
1009 static bool bo_has_vm_references(struct xe_bo *bo, struct xe_vm *vm,
1010                                  struct xe_vma *ignore)
1011 {
1012         struct ww_acquire_ctx ww;
1013         bool ret;
1014
1015         xe_bo_lock(bo, &ww, 0, false);
1016         ret = !!bo_has_vm_references_locked(bo, vm, ignore);
1017         xe_bo_unlock(bo, &ww);
1018
1019         return ret;
1020 }
1021
1022 static void __vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
1023 {
1024         list_add(&vma->extobj.link, &vm->extobj.list);
1025         vm->extobj.entries++;
1026 }
1027
1028 static void vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
1029 {
1030         struct xe_bo *bo = xe_vma_bo(vma);
1031
1032         lockdep_assert_held_write(&vm->lock);
1033
1034         if (bo_has_vm_references(bo, vm, vma))
1035                 return;
1036
1037         __vm_insert_extobj(vm, vma);
1038 }
1039
1040 static void vma_destroy_cb(struct dma_fence *fence,
1041                            struct dma_fence_cb *cb)
1042 {
1043         struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
1044
1045         INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
1046         queue_work(system_unbound_wq, &vma->destroy_work);
1047 }
1048
1049 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
1050 {
1051         struct xe_vm *vm = xe_vma_vm(vma);
1052
1053         lockdep_assert_held_write(&vm->lock);
1054         XE_BUG_ON(!list_empty(&vma->unbind_link));
1055
1056         if (xe_vma_is_userptr(vma)) {
1057                 XE_WARN_ON(!vma->destroyed);
1058                 spin_lock(&vm->userptr.invalidated_lock);
1059                 list_del_init(&vma->userptr.invalidate_link);
1060                 spin_unlock(&vm->userptr.invalidated_lock);
1061                 list_del(&vma->userptr_link);
1062         } else if (!xe_vma_is_null(vma)) {
1063                 xe_bo_assert_held(xe_vma_bo(vma));
1064                 list_del(&vma->bo_link);
1065
1066                 spin_lock(&vm->notifier.list_lock);
1067                 list_del(&vma->notifier.rebind_link);
1068                 spin_unlock(&vm->notifier.list_lock);
1069
1070                 if (!xe_vma_bo(vma)->vm && vm_remove_extobj(vma)) {
1071                         struct xe_vma *other;
1072
1073                         other = bo_has_vm_references_locked(xe_vma_bo(vma), vm, NULL);
1074
1075                         if (other)
1076                                 __vm_insert_extobj(vm, other);
1077                 }
1078         }
1079
1080         xe_vm_assert_held(vm);
1081         if (!list_empty(&vma->rebind_link))
1082                 list_del(&vma->rebind_link);
1083
1084         if (fence) {
1085                 int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
1086                                                  vma_destroy_cb);
1087
1088                 if (ret) {
1089                         XE_WARN_ON(ret != -ENOENT);
1090                         xe_vma_destroy_late(vma);
1091                 }
1092         } else {
1093                 xe_vma_destroy_late(vma);
1094         }
1095 }
1096
1097 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1098 {
1099         struct ttm_validate_buffer tv[2];
1100         struct ww_acquire_ctx ww;
1101         struct xe_bo *bo = xe_vma_bo(vma);
1102         LIST_HEAD(objs);
1103         LIST_HEAD(dups);
1104         int err;
1105
1106         memset(tv, 0, sizeof(tv));
1107         tv[0].bo = xe_vm_ttm_bo(xe_vma_vm(vma));
1108         list_add(&tv[0].head, &objs);
1109
1110         if (bo) {
1111                 tv[1].bo = &xe_bo_get(bo)->ttm;
1112                 list_add(&tv[1].head, &objs);
1113         }
1114         err = ttm_eu_reserve_buffers(&ww, &objs, false, &dups);
1115         XE_WARN_ON(err);
1116
1117         xe_vma_destroy(vma, NULL);
1118
1119         ttm_eu_backoff_reservation(&ww, &objs);
1120         if (bo)
1121                 xe_bo_put(bo);
1122 }
1123
1124 static struct xe_vma *to_xe_vma(const struct rb_node *node)
1125 {
1126         BUILD_BUG_ON(offsetof(struct xe_vma, vm_node) != 0);
1127         return (struct xe_vma *)node;
1128 }
1129
1130 static int xe_vma_cmp(struct xe_vma *a, struct xe_vma *b)
1131 {
1132         if (xe_vma_end(a) - 1 < xe_vma_start(b)) {
1133                 return -1;
1134         } else if (xe_vma_end(b) - 1 < xe_vma_start(a)) {
1135                 return 1;
1136         } else {
1137                 return 0;
1138         }
1139 }
1140
1141 static bool xe_vma_less_cb(struct rb_node *a, const struct rb_node *b)
1142 {
1143         return xe_vma_cmp(to_xe_vma(a), to_xe_vma(b)) < 0;
1144 }
1145
1146 int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node)
1147 {
1148         struct xe_vma *cmp = to_xe_vma(node);
1149         struct xe_vma *own = (struct xe_vma *)key;
1150
1151         if (xe_vma_start(own) > xe_vma_end(cmp) - 1)
1152                 return 1;
1153
1154         if (xe_vma_end(own) - 1 < xe_vma_start(cmp))
1155                 return -1;
1156
1157         return 0;
1158 }
1159
1160 struct xe_vma *
1161 xe_vm_find_overlapping_vma(struct xe_vm *vm, struct xe_vma *vma)
1162 {
1163         struct rb_node *node;
1164
1165         lockdep_assert_held(&vm->lock);
1166
1167         if (xe_vm_is_closed_or_banned(vm))
1168                 return NULL;
1169
1170         XE_BUG_ON(xe_vma_end(vma) > vm->size);
1171
1172         node = rb_find(vma, &vm->vmas, xe_vma_cmp_vma_cb);
1173
1174         return node ? to_xe_vma(node) : NULL;
1175 }
1176
1177 static void xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1178 {
1179         XE_BUG_ON(xe_vma_vm(vma) != vm);
1180         lockdep_assert_held(&vm->lock);
1181
1182         rb_add(&vma->vm_node, &vm->vmas, xe_vma_less_cb);
1183 }
1184
1185 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1186 {
1187         XE_BUG_ON(xe_vma_vm(vma) != vm);
1188         lockdep_assert_held(&vm->lock);
1189
1190         rb_erase(&vma->vm_node, &vm->vmas);
1191         if (vm->usm.last_fault_vma == vma)
1192                 vm->usm.last_fault_vma = NULL;
1193 }
1194
1195 static void async_op_work_func(struct work_struct *w);
1196 static void vm_destroy_work_func(struct work_struct *w);
1197
1198 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1199 {
1200         struct xe_vm *vm;
1201         int err, i = 0, number_tiles = 0;
1202         struct xe_tile *tile;
1203         u8 id;
1204
1205         vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1206         if (!vm)
1207                 return ERR_PTR(-ENOMEM);
1208
1209         vm->xe = xe;
1210         kref_init(&vm->refcount);
1211         dma_resv_init(&vm->resv);
1212
1213         vm->size = 1ull << xe_pt_shift(xe->info.vm_max_level + 1);
1214
1215         vm->vmas = RB_ROOT;
1216         vm->flags = flags;
1217
1218         init_rwsem(&vm->lock);
1219
1220         INIT_LIST_HEAD(&vm->rebind_list);
1221
1222         INIT_LIST_HEAD(&vm->userptr.repin_list);
1223         INIT_LIST_HEAD(&vm->userptr.invalidated);
1224         init_rwsem(&vm->userptr.notifier_lock);
1225         spin_lock_init(&vm->userptr.invalidated_lock);
1226
1227         INIT_LIST_HEAD(&vm->notifier.rebind_list);
1228         spin_lock_init(&vm->notifier.list_lock);
1229
1230         INIT_LIST_HEAD(&vm->async_ops.pending);
1231         INIT_WORK(&vm->async_ops.work, async_op_work_func);
1232         spin_lock_init(&vm->async_ops.lock);
1233
1234         INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1235
1236         INIT_LIST_HEAD(&vm->preempt.engines);
1237         vm->preempt.min_run_period_ms = 10;     /* FIXME: Wire up to uAPI */
1238
1239         INIT_LIST_HEAD(&vm->extobj.list);
1240
1241         if (!(flags & XE_VM_FLAG_MIGRATION)) {
1242                 /* We need to immeditatelly exit from any D3 state */
1243                 xe_pm_runtime_get(xe);
1244                 xe_device_mem_access_get(xe);
1245         }
1246
1247         err = dma_resv_lock_interruptible(&vm->resv, NULL);
1248         if (err)
1249                 goto err_put;
1250
1251         if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1252                 vm->flags |= XE_VM_FLAGS_64K;
1253
1254         for_each_tile(tile, xe, id) {
1255                 if (flags & XE_VM_FLAG_MIGRATION &&
1256                     tile->id != XE_VM_FLAG_GT_ID(flags))
1257                         continue;
1258
1259                 vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
1260                 if (IS_ERR(vm->pt_root[id])) {
1261                         err = PTR_ERR(vm->pt_root[id]);
1262                         vm->pt_root[id] = NULL;
1263                         goto err_destroy_root;
1264                 }
1265         }
1266
1267         if (flags & XE_VM_FLAG_SCRATCH_PAGE) {
1268                 for_each_tile(tile, xe, id) {
1269                         if (!vm->pt_root[id])
1270                                 continue;
1271
1272                         err = xe_pt_create_scratch(xe, tile, vm);
1273                         if (err)
1274                                 goto err_scratch_pt;
1275                 }
1276                 vm->batch_invalidate_tlb = true;
1277         }
1278
1279         if (flags & XE_VM_FLAG_COMPUTE_MODE) {
1280                 INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1281                 vm->flags |= XE_VM_FLAG_COMPUTE_MODE;
1282                 vm->batch_invalidate_tlb = false;
1283         }
1284
1285         if (flags & XE_VM_FLAG_ASYNC_BIND_OPS) {
1286                 vm->async_ops.fence.context = dma_fence_context_alloc(1);
1287                 vm->flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
1288         }
1289
1290         /* Fill pt_root after allocating scratch tables */
1291         for_each_tile(tile, xe, id) {
1292                 if (!vm->pt_root[id])
1293                         continue;
1294
1295                 xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
1296         }
1297         dma_resv_unlock(&vm->resv);
1298
1299         /* Kernel migration VM shouldn't have a circular loop.. */
1300         if (!(flags & XE_VM_FLAG_MIGRATION)) {
1301                 for_each_tile(tile, xe, id) {
1302                         struct xe_gt *gt = tile->primary_gt;
1303                         struct xe_vm *migrate_vm;
1304                         struct xe_engine *eng;
1305
1306                         if (!vm->pt_root[id])
1307                                 continue;
1308
1309                         migrate_vm = xe_migrate_get_vm(tile->migrate);
1310                         eng = xe_engine_create_class(xe, gt, migrate_vm,
1311                                                      XE_ENGINE_CLASS_COPY,
1312                                                      ENGINE_FLAG_VM);
1313                         xe_vm_put(migrate_vm);
1314                         if (IS_ERR(eng)) {
1315                                 xe_vm_close_and_put(vm);
1316                                 return ERR_CAST(eng);
1317                         }
1318                         vm->eng[id] = eng;
1319                         number_tiles++;
1320                 }
1321         }
1322
1323         if (number_tiles > 1)
1324                 vm->composite_fence_ctx = dma_fence_context_alloc(1);
1325
1326         mutex_lock(&xe->usm.lock);
1327         if (flags & XE_VM_FLAG_FAULT_MODE)
1328                 xe->usm.num_vm_in_fault_mode++;
1329         else if (!(flags & XE_VM_FLAG_MIGRATION))
1330                 xe->usm.num_vm_in_non_fault_mode++;
1331         mutex_unlock(&xe->usm.lock);
1332
1333         trace_xe_vm_create(vm);
1334
1335         return vm;
1336
1337 err_scratch_pt:
1338         for_each_tile(tile, xe, id) {
1339                 if (!vm->pt_root[id])
1340                         continue;
1341
1342                 i = vm->pt_root[id]->level;
1343                 while (i)
1344                         if (vm->scratch_pt[id][--i])
1345                                 xe_pt_destroy(vm->scratch_pt[id][i],
1346                                               vm->flags, NULL);
1347                 xe_bo_unpin(vm->scratch_bo[id]);
1348                 xe_bo_put(vm->scratch_bo[id]);
1349         }
1350 err_destroy_root:
1351         for_each_tile(tile, xe, id) {
1352                 if (vm->pt_root[id])
1353                         xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1354         }
1355         dma_resv_unlock(&vm->resv);
1356 err_put:
1357         dma_resv_fini(&vm->resv);
1358         kfree(vm);
1359         if (!(flags & XE_VM_FLAG_MIGRATION)) {
1360                 xe_device_mem_access_put(xe);
1361                 xe_pm_runtime_put(xe);
1362         }
1363         return ERR_PTR(err);
1364 }
1365
1366 static void flush_async_ops(struct xe_vm *vm)
1367 {
1368         queue_work(system_unbound_wq, &vm->async_ops.work);
1369         flush_work(&vm->async_ops.work);
1370 }
1371
1372 static void vm_error_capture(struct xe_vm *vm, int err,
1373                              u32 op, u64 addr, u64 size)
1374 {
1375         struct drm_xe_vm_bind_op_error_capture capture;
1376         u64 __user *address =
1377                 u64_to_user_ptr(vm->async_ops.error_capture.addr);
1378         bool in_kthread = !current->mm;
1379
1380         capture.error = err;
1381         capture.op = op;
1382         capture.addr = addr;
1383         capture.size = size;
1384
1385         if (in_kthread) {
1386                 if (!mmget_not_zero(vm->async_ops.error_capture.mm))
1387                         goto mm_closed;
1388                 kthread_use_mm(vm->async_ops.error_capture.mm);
1389         }
1390
1391         if (copy_to_user(address, &capture, sizeof(capture)))
1392                 XE_WARN_ON("Copy to user failed");
1393
1394         if (in_kthread) {
1395                 kthread_unuse_mm(vm->async_ops.error_capture.mm);
1396                 mmput(vm->async_ops.error_capture.mm);
1397         }
1398
1399 mm_closed:
1400         wake_up_all(&vm->async_ops.error_capture.wq);
1401 }
1402
1403 static void xe_vm_close(struct xe_vm *vm)
1404 {
1405         down_write(&vm->lock);
1406         vm->size = 0;
1407         up_write(&vm->lock);
1408 }
1409
1410 void xe_vm_close_and_put(struct xe_vm *vm)
1411 {
1412         struct rb_root contested = RB_ROOT;
1413         struct ww_acquire_ctx ww;
1414         struct xe_device *xe = vm->xe;
1415         struct xe_tile *tile;
1416         u8 id;
1417
1418         XE_BUG_ON(vm->preempt.num_engines);
1419
1420         xe_vm_close(vm);
1421
1422         flush_async_ops(vm);
1423         if (xe_vm_in_compute_mode(vm))
1424                 flush_work(&vm->preempt.rebind_work);
1425
1426         for_each_tile(tile, xe, id) {
1427                 if (vm->eng[id]) {
1428                         xe_engine_kill(vm->eng[id]);
1429                         xe_engine_put(vm->eng[id]);
1430                         vm->eng[id] = NULL;
1431                 }
1432         }
1433
1434         down_write(&vm->lock);
1435         xe_vm_lock(vm, &ww, 0, false);
1436         while (vm->vmas.rb_node) {
1437                 struct xe_vma *vma = to_xe_vma(vm->vmas.rb_node);
1438
1439                 if (xe_vma_has_no_bo(vma)) {
1440                         down_read(&vm->userptr.notifier_lock);
1441                         vma->destroyed = true;
1442                         up_read(&vm->userptr.notifier_lock);
1443                 }
1444
1445                 rb_erase(&vma->vm_node, &vm->vmas);
1446
1447                 /* easy case, remove from VMA? */
1448                 if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
1449                         xe_vma_destroy(vma, NULL);
1450                         continue;
1451                 }
1452
1453                 rb_add(&vma->vm_node, &contested, xe_vma_less_cb);
1454         }
1455
1456         /*
1457          * All vm operations will add shared fences to resv.
1458          * The only exception is eviction for a shared object,
1459          * but even so, the unbind when evicted would still
1460          * install a fence to resv. Hence it's safe to
1461          * destroy the pagetables immediately.
1462          */
1463         for_each_tile(tile, xe, id) {
1464                 if (vm->scratch_bo[id]) {
1465                         u32 i;
1466
1467                         xe_bo_unpin(vm->scratch_bo[id]);
1468                         xe_bo_put(vm->scratch_bo[id]);
1469                         for (i = 0; i < vm->pt_root[id]->level; i++)
1470                                 xe_pt_destroy(vm->scratch_pt[id][i], vm->flags,
1471                                               NULL);
1472                 }
1473         }
1474         xe_vm_unlock(vm, &ww);
1475
1476         if (contested.rb_node) {
1477
1478                 /*
1479                  * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1480                  * Since we hold a refcount to the bo, we can remove and free
1481                  * the members safely without locking.
1482                  */
1483                 while (contested.rb_node) {
1484                         struct xe_vma *vma = to_xe_vma(contested.rb_node);
1485
1486                         rb_erase(&vma->vm_node, &contested);
1487                         xe_vma_destroy_unlocked(vma);
1488                 }
1489         }
1490
1491         if (vm->async_ops.error_capture.addr)
1492                 wake_up_all(&vm->async_ops.error_capture.wq);
1493
1494         XE_WARN_ON(!list_empty(&vm->extobj.list));
1495         up_write(&vm->lock);
1496
1497         mutex_lock(&xe->usm.lock);
1498         if (vm->flags & XE_VM_FLAG_FAULT_MODE)
1499                 xe->usm.num_vm_in_fault_mode--;
1500         else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1501                 xe->usm.num_vm_in_non_fault_mode--;
1502         mutex_unlock(&xe->usm.lock);
1503
1504         xe_vm_put(vm);
1505 }
1506
1507 static void vm_destroy_work_func(struct work_struct *w)
1508 {
1509         struct xe_vm *vm =
1510                 container_of(w, struct xe_vm, destroy_work);
1511         struct ww_acquire_ctx ww;
1512         struct xe_device *xe = vm->xe;
1513         struct xe_tile *tile;
1514         u8 id;
1515         void *lookup;
1516
1517         /* xe_vm_close_and_put was not called? */
1518         XE_WARN_ON(vm->size);
1519
1520         if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
1521                 xe_device_mem_access_put(xe);
1522                 xe_pm_runtime_put(xe);
1523
1524                 if (xe->info.has_asid) {
1525                         mutex_lock(&xe->usm.lock);
1526                         lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1527                         XE_WARN_ON(lookup != vm);
1528                         mutex_unlock(&xe->usm.lock);
1529                 }
1530         }
1531
1532         /*
1533          * XXX: We delay destroying the PT root until the VM if freed as PT root
1534          * is needed for xe_vm_lock to work. If we remove that dependency this
1535          * can be moved to xe_vm_close_and_put.
1536          */
1537         xe_vm_lock(vm, &ww, 0, false);
1538         for_each_tile(tile, xe, id) {
1539                 if (vm->pt_root[id]) {
1540                         xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1541                         vm->pt_root[id] = NULL;
1542                 }
1543         }
1544         xe_vm_unlock(vm, &ww);
1545
1546         trace_xe_vm_free(vm);
1547         dma_fence_put(vm->rebind_fence);
1548         dma_resv_fini(&vm->resv);
1549         kfree(vm);
1550 }
1551
1552 void xe_vm_free(struct kref *ref)
1553 {
1554         struct xe_vm *vm = container_of(ref, struct xe_vm, refcount);
1555
1556         /* To destroy the VM we need to be able to sleep */
1557         queue_work(system_unbound_wq, &vm->destroy_work);
1558 }
1559
1560 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1561 {
1562         struct xe_vm *vm;
1563
1564         mutex_lock(&xef->vm.lock);
1565         vm = xa_load(&xef->vm.xa, id);
1566         if (vm)
1567                 xe_vm_get(vm);
1568         mutex_unlock(&xef->vm.lock);
1569
1570         return vm;
1571 }
1572
1573 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
1574 {
1575         return xe_pde_encode(vm->pt_root[tile->id]->bo, 0,
1576                              XE_CACHE_WB);
1577 }
1578
1579 static struct dma_fence *
1580 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_engine *e,
1581                  struct xe_sync_entry *syncs, u32 num_syncs)
1582 {
1583         struct xe_tile *tile;
1584         struct dma_fence *fence = NULL;
1585         struct dma_fence **fences = NULL;
1586         struct dma_fence_array *cf = NULL;
1587         struct xe_vm *vm = xe_vma_vm(vma);
1588         int cur_fence = 0, i;
1589         int number_tiles = hweight_long(vma->tile_present);
1590         int err;
1591         u8 id;
1592
1593         trace_xe_vma_unbind(vma);
1594
1595         if (number_tiles > 1) {
1596                 fences = kmalloc_array(number_tiles, sizeof(*fences),
1597                                        GFP_KERNEL);
1598                 if (!fences)
1599                         return ERR_PTR(-ENOMEM);
1600         }
1601
1602         for_each_tile(tile, vm->xe, id) {
1603                 if (!(vma->tile_present & BIT(id)))
1604                         goto next;
1605
1606                 fence = __xe_pt_unbind_vma(tile, vma, e, syncs, num_syncs);
1607                 if (IS_ERR(fence)) {
1608                         err = PTR_ERR(fence);
1609                         goto err_fences;
1610                 }
1611
1612                 if (fences)
1613                         fences[cur_fence++] = fence;
1614
1615 next:
1616                 if (e && vm->pt_root[id] && !list_empty(&e->multi_gt_list))
1617                         e = list_next_entry(e, multi_gt_list);
1618         }
1619
1620         if (fences) {
1621                 cf = dma_fence_array_create(number_tiles, fences,
1622                                             vm->composite_fence_ctx,
1623                                             vm->composite_fence_seqno++,
1624                                             false);
1625                 if (!cf) {
1626                         --vm->composite_fence_seqno;
1627                         err = -ENOMEM;
1628                         goto err_fences;
1629                 }
1630         }
1631
1632         for (i = 0; i < num_syncs; i++)
1633                 xe_sync_entry_signal(&syncs[i], NULL, cf ? &cf->base : fence);
1634
1635         return cf ? &cf->base : !fence ? dma_fence_get_stub() : fence;
1636
1637 err_fences:
1638         if (fences) {
1639                 while (cur_fence) {
1640                         /* FIXME: Rewind the previous binds? */
1641                         dma_fence_put(fences[--cur_fence]);
1642                 }
1643                 kfree(fences);
1644         }
1645
1646         return ERR_PTR(err);
1647 }
1648
1649 static struct dma_fence *
1650 xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
1651                struct xe_sync_entry *syncs, u32 num_syncs)
1652 {
1653         struct xe_tile *tile;
1654         struct dma_fence *fence;
1655         struct dma_fence **fences = NULL;
1656         struct dma_fence_array *cf = NULL;
1657         struct xe_vm *vm = xe_vma_vm(vma);
1658         int cur_fence = 0, i;
1659         int number_tiles = hweight_long(vma->tile_mask);
1660         int err;
1661         u8 id;
1662
1663         trace_xe_vma_bind(vma);
1664
1665         if (number_tiles > 1) {
1666                 fences = kmalloc_array(number_tiles, sizeof(*fences),
1667                                        GFP_KERNEL);
1668                 if (!fences)
1669                         return ERR_PTR(-ENOMEM);
1670         }
1671
1672         for_each_tile(tile, vm->xe, id) {
1673                 if (!(vma->tile_mask & BIT(id)))
1674                         goto next;
1675
1676                 fence = __xe_pt_bind_vma(tile, vma, e, syncs, num_syncs,
1677                                          vma->tile_present & BIT(id));
1678                 if (IS_ERR(fence)) {
1679                         err = PTR_ERR(fence);
1680                         goto err_fences;
1681                 }
1682
1683                 if (fences)
1684                         fences[cur_fence++] = fence;
1685
1686 next:
1687                 if (e && vm->pt_root[id] && !list_empty(&e->multi_gt_list))
1688                         e = list_next_entry(e, multi_gt_list);
1689         }
1690
1691         if (fences) {
1692                 cf = dma_fence_array_create(number_tiles, fences,
1693                                             vm->composite_fence_ctx,
1694                                             vm->composite_fence_seqno++,
1695                                             false);
1696                 if (!cf) {
1697                         --vm->composite_fence_seqno;
1698                         err = -ENOMEM;
1699                         goto err_fences;
1700                 }
1701         }
1702
1703         for (i = 0; i < num_syncs; i++)
1704                 xe_sync_entry_signal(&syncs[i], NULL, cf ? &cf->base : fence);
1705
1706         return cf ? &cf->base : fence;
1707
1708 err_fences:
1709         if (fences) {
1710                 while (cur_fence) {
1711                         /* FIXME: Rewind the previous binds? */
1712                         dma_fence_put(fences[--cur_fence]);
1713                 }
1714                 kfree(fences);
1715         }
1716
1717         return ERR_PTR(err);
1718 }
1719
1720 struct async_op_fence {
1721         struct dma_fence fence;
1722         struct dma_fence *wait_fence;
1723         struct dma_fence_cb cb;
1724         struct xe_vm *vm;
1725         wait_queue_head_t wq;
1726         bool started;
1727 };
1728
1729 static const char *async_op_fence_get_driver_name(struct dma_fence *dma_fence)
1730 {
1731         return "xe";
1732 }
1733
1734 static const char *
1735 async_op_fence_get_timeline_name(struct dma_fence *dma_fence)
1736 {
1737         return "async_op_fence";
1738 }
1739
1740 static const struct dma_fence_ops async_op_fence_ops = {
1741         .get_driver_name = async_op_fence_get_driver_name,
1742         .get_timeline_name = async_op_fence_get_timeline_name,
1743 };
1744
1745 static void async_op_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1746 {
1747         struct async_op_fence *afence =
1748                 container_of(cb, struct async_op_fence, cb);
1749
1750         afence->fence.error = afence->wait_fence->error;
1751         dma_fence_signal(&afence->fence);
1752         xe_vm_put(afence->vm);
1753         dma_fence_put(afence->wait_fence);
1754         dma_fence_put(&afence->fence);
1755 }
1756
1757 static void add_async_op_fence_cb(struct xe_vm *vm,
1758                                   struct dma_fence *fence,
1759                                   struct async_op_fence *afence)
1760 {
1761         int ret;
1762
1763         if (!xe_vm_no_dma_fences(vm)) {
1764                 afence->started = true;
1765                 smp_wmb();
1766                 wake_up_all(&afence->wq);
1767         }
1768
1769         afence->wait_fence = dma_fence_get(fence);
1770         afence->vm = xe_vm_get(vm);
1771         dma_fence_get(&afence->fence);
1772         ret = dma_fence_add_callback(fence, &afence->cb, async_op_fence_cb);
1773         if (ret == -ENOENT) {
1774                 afence->fence.error = afence->wait_fence->error;
1775                 dma_fence_signal(&afence->fence);
1776         }
1777         if (ret) {
1778                 xe_vm_put(vm);
1779                 dma_fence_put(afence->wait_fence);
1780                 dma_fence_put(&afence->fence);
1781         }
1782         XE_WARN_ON(ret && ret != -ENOENT);
1783 }
1784
1785 int xe_vm_async_fence_wait_start(struct dma_fence *fence)
1786 {
1787         if (fence->ops == &async_op_fence_ops) {
1788                 struct async_op_fence *afence =
1789                         container_of(fence, struct async_op_fence, fence);
1790
1791                 XE_BUG_ON(xe_vm_no_dma_fences(afence->vm));
1792
1793                 smp_rmb();
1794                 return wait_event_interruptible(afence->wq, afence->started);
1795         }
1796
1797         return 0;
1798 }
1799
1800 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
1801                         struct xe_engine *e, struct xe_sync_entry *syncs,
1802                         u32 num_syncs, struct async_op_fence *afence)
1803 {
1804         struct dma_fence *fence;
1805
1806         xe_vm_assert_held(vm);
1807
1808         fence = xe_vm_bind_vma(vma, e, syncs, num_syncs);
1809         if (IS_ERR(fence))
1810                 return PTR_ERR(fence);
1811         if (afence)
1812                 add_async_op_fence_cb(vm, fence, afence);
1813
1814         dma_fence_put(fence);
1815         return 0;
1816 }
1817
1818 static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_engine *e,
1819                       struct xe_bo *bo, struct xe_sync_entry *syncs,
1820                       u32 num_syncs, struct async_op_fence *afence)
1821 {
1822         int err;
1823
1824         xe_vm_assert_held(vm);
1825         xe_bo_assert_held(bo);
1826
1827         if (bo) {
1828                 err = xe_bo_validate(bo, vm, true);
1829                 if (err)
1830                         return err;
1831         }
1832
1833         return __xe_vm_bind(vm, vma, e, syncs, num_syncs, afence);
1834 }
1835
1836 static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
1837                         struct xe_engine *e, struct xe_sync_entry *syncs,
1838                         u32 num_syncs, struct async_op_fence *afence)
1839 {
1840         struct dma_fence *fence;
1841
1842         xe_vm_assert_held(vm);
1843         xe_bo_assert_held(xe_vma_bo(vma));
1844
1845         fence = xe_vm_unbind_vma(vma, e, syncs, num_syncs);
1846         if (IS_ERR(fence))
1847                 return PTR_ERR(fence);
1848         if (afence)
1849                 add_async_op_fence_cb(vm, fence, afence);
1850
1851         xe_vma_destroy(vma, fence);
1852         dma_fence_put(fence);
1853
1854         return 0;
1855 }
1856
1857 static int vm_set_error_capture_address(struct xe_device *xe, struct xe_vm *vm,
1858                                         u64 value)
1859 {
1860         if (XE_IOCTL_ERR(xe, !value))
1861                 return -EINVAL;
1862
1863         if (XE_IOCTL_ERR(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
1864                 return -EOPNOTSUPP;
1865
1866         if (XE_IOCTL_ERR(xe, vm->async_ops.error_capture.addr))
1867                 return -EOPNOTSUPP;
1868
1869         vm->async_ops.error_capture.mm = current->mm;
1870         vm->async_ops.error_capture.addr = value;
1871         init_waitqueue_head(&vm->async_ops.error_capture.wq);
1872
1873         return 0;
1874 }
1875
1876 typedef int (*xe_vm_set_property_fn)(struct xe_device *xe, struct xe_vm *vm,
1877                                      u64 value);
1878
1879 static const xe_vm_set_property_fn vm_set_property_funcs[] = {
1880         [XE_VM_PROPERTY_BIND_OP_ERROR_CAPTURE_ADDRESS] =
1881                 vm_set_error_capture_address,
1882 };
1883
1884 static int vm_user_ext_set_property(struct xe_device *xe, struct xe_vm *vm,
1885                                     u64 extension)
1886 {
1887         u64 __user *address = u64_to_user_ptr(extension);
1888         struct drm_xe_ext_vm_set_property ext;
1889         int err;
1890
1891         err = __copy_from_user(&ext, address, sizeof(ext));
1892         if (XE_IOCTL_ERR(xe, err))
1893                 return -EFAULT;
1894
1895         if (XE_IOCTL_ERR(xe, ext.property >=
1896                          ARRAY_SIZE(vm_set_property_funcs)) ||
1897             XE_IOCTL_ERR(xe, ext.pad) ||
1898             XE_IOCTL_ERR(xe, ext.reserved[0] || ext.reserved[1]))
1899                 return -EINVAL;
1900
1901         return vm_set_property_funcs[ext.property](xe, vm, ext.value);
1902 }
1903
1904 typedef int (*xe_vm_user_extension_fn)(struct xe_device *xe, struct xe_vm *vm,
1905                                        u64 extension);
1906
1907 static const xe_vm_set_property_fn vm_user_extension_funcs[] = {
1908         [XE_VM_EXTENSION_SET_PROPERTY] = vm_user_ext_set_property,
1909 };
1910
1911 #define MAX_USER_EXTENSIONS     16
1912 static int vm_user_extensions(struct xe_device *xe, struct xe_vm *vm,
1913                               u64 extensions, int ext_number)
1914 {
1915         u64 __user *address = u64_to_user_ptr(extensions);
1916         struct xe_user_extension ext;
1917         int err;
1918
1919         if (XE_IOCTL_ERR(xe, ext_number >= MAX_USER_EXTENSIONS))
1920                 return -E2BIG;
1921
1922         err = __copy_from_user(&ext, address, sizeof(ext));
1923         if (XE_IOCTL_ERR(xe, err))
1924                 return -EFAULT;
1925
1926         if (XE_IOCTL_ERR(xe, ext.pad) ||
1927             XE_IOCTL_ERR(xe, ext.name >=
1928                          ARRAY_SIZE(vm_user_extension_funcs)))
1929                 return -EINVAL;
1930
1931         err = vm_user_extension_funcs[ext.name](xe, vm, extensions);
1932         if (XE_IOCTL_ERR(xe, err))
1933                 return err;
1934
1935         if (ext.next_extension)
1936                 return vm_user_extensions(xe, vm, ext.next_extension,
1937                                           ++ext_number);
1938
1939         return 0;
1940 }
1941
1942 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_SCRATCH_PAGE | \
1943                                     DRM_XE_VM_CREATE_COMPUTE_MODE | \
1944                                     DRM_XE_VM_CREATE_ASYNC_BIND_OPS | \
1945                                     DRM_XE_VM_CREATE_FAULT_MODE)
1946
1947 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1948                        struct drm_file *file)
1949 {
1950         struct xe_device *xe = to_xe_device(dev);
1951         struct xe_file *xef = to_xe_file(file);
1952         struct drm_xe_vm_create *args = data;
1953         struct xe_vm *vm;
1954         u32 id, asid;
1955         int err;
1956         u32 flags = 0;
1957
1958         if (XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1]))
1959                 return -EINVAL;
1960
1961         if (XE_IOCTL_ERR(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1962                 return -EINVAL;
1963
1964         if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE &&
1965                          args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
1966                 return -EINVAL;
1967
1968         if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE &&
1969                          args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
1970                 return -EINVAL;
1971
1972         if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
1973                          xe_device_in_non_fault_mode(xe)))
1974                 return -EINVAL;
1975
1976         if (XE_IOCTL_ERR(xe, !(args->flags & DRM_XE_VM_CREATE_FAULT_MODE) &&
1977                          xe_device_in_fault_mode(xe)))
1978                 return -EINVAL;
1979
1980         if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
1981                          !xe->info.supports_usm))
1982                 return -EINVAL;
1983
1984         if (args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE)
1985                 flags |= XE_VM_FLAG_SCRATCH_PAGE;
1986         if (args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE)
1987                 flags |= XE_VM_FLAG_COMPUTE_MODE;
1988         if (args->flags & DRM_XE_VM_CREATE_ASYNC_BIND_OPS)
1989                 flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
1990         if (args->flags & DRM_XE_VM_CREATE_FAULT_MODE)
1991                 flags |= XE_VM_FLAG_FAULT_MODE;
1992
1993         vm = xe_vm_create(xe, flags);
1994         if (IS_ERR(vm))
1995                 return PTR_ERR(vm);
1996
1997         if (args->extensions) {
1998                 err = vm_user_extensions(xe, vm, args->extensions, 0);
1999                 if (XE_IOCTL_ERR(xe, err)) {
2000                         xe_vm_close_and_put(vm);
2001                         return err;
2002                 }
2003         }
2004
2005         mutex_lock(&xef->vm.lock);
2006         err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
2007         mutex_unlock(&xef->vm.lock);
2008         if (err) {
2009                 xe_vm_close_and_put(vm);
2010                 return err;
2011         }
2012
2013         if (xe->info.has_asid) {
2014                 mutex_lock(&xe->usm.lock);
2015                 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
2016                                       XA_LIMIT(0, XE_MAX_ASID - 1),
2017                                       &xe->usm.next_asid, GFP_KERNEL);
2018                 mutex_unlock(&xe->usm.lock);
2019                 if (err) {
2020                         xe_vm_close_and_put(vm);
2021                         return err;
2022                 }
2023                 vm->usm.asid = asid;
2024         }
2025
2026         args->vm_id = id;
2027
2028 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
2029         /* Warning: Security issue - never enable by default */
2030         args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
2031 #endif
2032
2033         return 0;
2034 }
2035
2036 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
2037                         struct drm_file *file)
2038 {
2039         struct xe_device *xe = to_xe_device(dev);
2040         struct xe_file *xef = to_xe_file(file);
2041         struct drm_xe_vm_destroy *args = data;
2042         struct xe_vm *vm;
2043         int err = 0;
2044
2045         if (XE_IOCTL_ERR(xe, args->pad) ||
2046             XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1]))
2047                 return -EINVAL;
2048
2049         mutex_lock(&xef->vm.lock);
2050         vm = xa_load(&xef->vm.xa, args->vm_id);
2051         if (XE_IOCTL_ERR(xe, !vm))
2052                 err = -ENOENT;
2053         else if (XE_IOCTL_ERR(xe, vm->preempt.num_engines))
2054                 err = -EBUSY;
2055         else
2056                 xa_erase(&xef->vm.xa, args->vm_id);
2057         mutex_unlock(&xef->vm.lock);
2058
2059         if (!err)
2060                 xe_vm_close_and_put(vm);
2061
2062         return err;
2063 }
2064
2065 static const u32 region_to_mem_type[] = {
2066         XE_PL_TT,
2067         XE_PL_VRAM0,
2068         XE_PL_VRAM1,
2069 };
2070
2071 static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
2072                           struct xe_engine *e, u32 region,
2073                           struct xe_sync_entry *syncs, u32 num_syncs,
2074                           struct async_op_fence *afence)
2075 {
2076         int err;
2077
2078         XE_BUG_ON(region > ARRAY_SIZE(region_to_mem_type));
2079
2080         if (!xe_vma_has_no_bo(vma)) {
2081                 err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
2082                 if (err)
2083                         return err;
2084         }
2085
2086         if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
2087                 return xe_vm_bind(vm, vma, e, xe_vma_bo(vma), syncs, num_syncs,
2088                                   afence);
2089         } else {
2090                 int i;
2091
2092                 /* Nothing to do, signal fences now */
2093                 for (i = 0; i < num_syncs; i++)
2094                         xe_sync_entry_signal(&syncs[i], NULL,
2095                                              dma_fence_get_stub());
2096                 if (afence)
2097                         dma_fence_signal(&afence->fence);
2098                 return 0;
2099         }
2100 }
2101
2102 #define VM_BIND_OP(op)  (op & 0xffff)
2103
2104 static int __vm_bind_ioctl(struct xe_vm *vm, struct xe_vma *vma,
2105                            struct xe_engine *e, struct xe_bo *bo, u32 op,
2106                            u32 region, struct xe_sync_entry *syncs,
2107                            u32 num_syncs, struct async_op_fence *afence)
2108 {
2109         switch (VM_BIND_OP(op)) {
2110         case XE_VM_BIND_OP_MAP:
2111                 return xe_vm_bind(vm, vma, e, bo, syncs, num_syncs, afence);
2112         case XE_VM_BIND_OP_UNMAP:
2113         case XE_VM_BIND_OP_UNMAP_ALL:
2114                 return xe_vm_unbind(vm, vma, e, syncs, num_syncs, afence);
2115         case XE_VM_BIND_OP_MAP_USERPTR:
2116                 return xe_vm_bind(vm, vma, e, NULL, syncs, num_syncs, afence);
2117         case XE_VM_BIND_OP_PREFETCH:
2118                 return xe_vm_prefetch(vm, vma, e, region, syncs, num_syncs,
2119                                       afence);
2120                 break;
2121         default:
2122                 XE_BUG_ON("NOT POSSIBLE");
2123                 return -EINVAL;
2124         }
2125 }
2126
2127 struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm)
2128 {
2129         int idx = vm->flags & XE_VM_FLAG_MIGRATION ?
2130                 XE_VM_FLAG_GT_ID(vm->flags) : 0;
2131
2132         /* Safe to use index 0 as all BO in the VM share a single dma-resv lock */
2133         return &vm->pt_root[idx]->bo->ttm;
2134 }
2135
2136 static void xe_vm_tv_populate(struct xe_vm *vm, struct ttm_validate_buffer *tv)
2137 {
2138         tv->num_shared = 1;
2139         tv->bo = xe_vm_ttm_bo(vm);
2140 }
2141
2142 static bool is_map_op(u32 op)
2143 {
2144         return VM_BIND_OP(op) == XE_VM_BIND_OP_MAP ||
2145                 VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR;
2146 }
2147
2148 static bool is_unmap_op(u32 op)
2149 {
2150         return VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP ||
2151                 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL;
2152 }
2153
2154 static int vm_bind_ioctl(struct xe_vm *vm, struct xe_vma *vma,
2155                          struct xe_engine *e, struct xe_bo *bo,
2156                          struct drm_xe_vm_bind_op *bind_op,
2157                          struct xe_sync_entry *syncs, u32 num_syncs,
2158                          struct async_op_fence *afence)
2159 {
2160         LIST_HEAD(objs);
2161         LIST_HEAD(dups);
2162         struct ttm_validate_buffer tv_bo, tv_vm;
2163         struct ww_acquire_ctx ww;
2164         struct xe_bo *vbo;
2165         int err, i;
2166
2167         lockdep_assert_held(&vm->lock);
2168         XE_BUG_ON(!list_empty(&vma->unbind_link));
2169
2170         /* Binds deferred to faults, signal fences now */
2171         if (xe_vm_in_fault_mode(vm) && is_map_op(bind_op->op) &&
2172             !(bind_op->op & XE_VM_BIND_FLAG_IMMEDIATE)) {
2173                 for (i = 0; i < num_syncs; i++)
2174                         xe_sync_entry_signal(&syncs[i], NULL,
2175                                              dma_fence_get_stub());
2176                 if (afence)
2177                         dma_fence_signal(&afence->fence);
2178                 return 0;
2179         }
2180
2181         xe_vm_tv_populate(vm, &tv_vm);
2182         list_add_tail(&tv_vm.head, &objs);
2183         vbo = xe_vma_bo(vma);
2184         if (vbo) {
2185                 /*
2186                  * An unbind can drop the last reference to the BO and
2187                  * the BO is needed for ttm_eu_backoff_reservation so
2188                  * take a reference here.
2189                  */
2190                 xe_bo_get(vbo);
2191
2192                 if (!vbo->vm) {
2193                         tv_bo.bo = &vbo->ttm;
2194                         tv_bo.num_shared = 1;
2195                         list_add(&tv_bo.head, &objs);
2196                 }
2197         }
2198
2199 again:
2200         err = ttm_eu_reserve_buffers(&ww, &objs, true, &dups);
2201         if (!err) {
2202                 err = __vm_bind_ioctl(vm, vma, e, bo,
2203                                       bind_op->op, bind_op->region, syncs,
2204                                       num_syncs, afence);
2205                 ttm_eu_backoff_reservation(&ww, &objs);
2206                 if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
2207                         lockdep_assert_held_write(&vm->lock);
2208                         err = xe_vma_userptr_pin_pages(vma);
2209                         if (!err)
2210                                 goto again;
2211                 }
2212         }
2213         xe_bo_put(vbo);
2214
2215         return err;
2216 }
2217
2218 struct async_op {
2219         struct xe_vma *vma;
2220         struct xe_engine *engine;
2221         struct xe_bo *bo;
2222         struct drm_xe_vm_bind_op bind_op;
2223         struct xe_sync_entry *syncs;
2224         u32 num_syncs;
2225         struct list_head link;
2226         struct async_op_fence *fence;
2227 };
2228
2229 static void async_op_cleanup(struct xe_vm *vm, struct async_op *op)
2230 {
2231         while (op->num_syncs--)
2232                 xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
2233         kfree(op->syncs);
2234         xe_bo_put(op->bo);
2235         if (op->engine)
2236                 xe_engine_put(op->engine);
2237         xe_vm_put(vm);
2238         if (op->fence)
2239                 dma_fence_put(&op->fence->fence);
2240         kfree(op);
2241 }
2242
2243 static struct async_op *next_async_op(struct xe_vm *vm)
2244 {
2245         return list_first_entry_or_null(&vm->async_ops.pending,
2246                                         struct async_op, link);
2247 }
2248
2249 static void vm_set_async_error(struct xe_vm *vm, int err)
2250 {
2251         lockdep_assert_held(&vm->lock);
2252         vm->async_ops.error = err;
2253 }
2254
2255 static void async_op_work_func(struct work_struct *w)
2256 {
2257         struct xe_vm *vm = container_of(w, struct xe_vm, async_ops.work);
2258
2259         for (;;) {
2260                 struct async_op *op;
2261                 int err;
2262
2263                 if (vm->async_ops.error && !xe_vm_is_closed(vm))
2264                         break;
2265
2266                 spin_lock_irq(&vm->async_ops.lock);
2267                 op = next_async_op(vm);
2268                 if (op)
2269                         list_del_init(&op->link);
2270                 spin_unlock_irq(&vm->async_ops.lock);
2271
2272                 if (!op)
2273                         break;
2274
2275                 if (!xe_vm_is_closed(vm)) {
2276                         bool first, last;
2277
2278                         down_write(&vm->lock);
2279 again:
2280                         first = op->vma->first_munmap_rebind;
2281                         last = op->vma->last_munmap_rebind;
2282 #ifdef TEST_VM_ASYNC_OPS_ERROR
2283 #define FORCE_ASYNC_OP_ERROR    BIT(31)
2284                         if (!(op->bind_op.op & FORCE_ASYNC_OP_ERROR)) {
2285                                 err = vm_bind_ioctl(vm, op->vma, op->engine,
2286                                                     op->bo, &op->bind_op,
2287                                                     op->syncs, op->num_syncs,
2288                                                     op->fence);
2289                         } else {
2290                                 err = -ENOMEM;
2291                                 op->bind_op.op &= ~FORCE_ASYNC_OP_ERROR;
2292                         }
2293 #else
2294                         err = vm_bind_ioctl(vm, op->vma, op->engine, op->bo,
2295                                             &op->bind_op, op->syncs,
2296                                             op->num_syncs, op->fence);
2297 #endif
2298                         /*
2299                          * In order for the fencing to work (stall behind
2300                          * existing jobs / prevent new jobs from running) all
2301                          * the dma-resv slots need to be programmed in a batch
2302                          * relative to execs / the rebind worker. The vm->lock
2303                          * ensure this.
2304                          */
2305                         if (!err && ((first && VM_BIND_OP(op->bind_op.op) ==
2306                                       XE_VM_BIND_OP_UNMAP) ||
2307                                      vm->async_ops.munmap_rebind_inflight)) {
2308                                 if (last) {
2309                                         op->vma->last_munmap_rebind = false;
2310                                         vm->async_ops.munmap_rebind_inflight =
2311                                                 false;
2312                                 } else {
2313                                         vm->async_ops.munmap_rebind_inflight =
2314                                                 true;
2315
2316                                         async_op_cleanup(vm, op);
2317
2318                                         spin_lock_irq(&vm->async_ops.lock);
2319                                         op = next_async_op(vm);
2320                                         XE_BUG_ON(!op);
2321                                         list_del_init(&op->link);
2322                                         spin_unlock_irq(&vm->async_ops.lock);
2323
2324                                         goto again;
2325                                 }
2326                         }
2327                         if (err) {
2328                                 trace_xe_vma_fail(op->vma);
2329                                 drm_warn(&vm->xe->drm, "Async VM op(%d) failed with %d",
2330                                          VM_BIND_OP(op->bind_op.op),
2331                                          err);
2332
2333                                 spin_lock_irq(&vm->async_ops.lock);
2334                                 list_add(&op->link, &vm->async_ops.pending);
2335                                 spin_unlock_irq(&vm->async_ops.lock);
2336
2337                                 vm_set_async_error(vm, err);
2338                                 up_write(&vm->lock);
2339
2340                                 if (vm->async_ops.error_capture.addr)
2341                                         vm_error_capture(vm, err,
2342                                                          op->bind_op.op,
2343                                                          op->bind_op.addr,
2344                                                          op->bind_op.range);
2345                                 break;
2346                         }
2347                         up_write(&vm->lock);
2348                 } else {
2349                         trace_xe_vma_flush(op->vma);
2350
2351                         if (is_unmap_op(op->bind_op.op)) {
2352                                 down_write(&vm->lock);
2353                                 xe_vma_destroy_unlocked(op->vma);
2354                                 up_write(&vm->lock);
2355                         }
2356
2357                         if (op->fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
2358                                                    &op->fence->fence.flags)) {
2359                                 if (!xe_vm_no_dma_fences(vm)) {
2360                                         op->fence->started = true;
2361                                         smp_wmb();
2362                                         wake_up_all(&op->fence->wq);
2363                                 }
2364                                 dma_fence_signal(&op->fence->fence);
2365                         }
2366                 }
2367
2368                 async_op_cleanup(vm, op);
2369         }
2370 }
2371
2372 static int __vm_bind_ioctl_async(struct xe_vm *vm, struct xe_vma *vma,
2373                                  struct xe_engine *e, struct xe_bo *bo,
2374                                  struct drm_xe_vm_bind_op *bind_op,
2375                                  struct xe_sync_entry *syncs, u32 num_syncs)
2376 {
2377         struct async_op *op;
2378         bool installed = false;
2379         u64 seqno;
2380         int i;
2381
2382         lockdep_assert_held(&vm->lock);
2383
2384         op = kmalloc(sizeof(*op), GFP_KERNEL);
2385         if (!op) {
2386                 return -ENOMEM;
2387         }
2388
2389         if (num_syncs) {
2390                 op->fence = kmalloc(sizeof(*op->fence), GFP_KERNEL);
2391                 if (!op->fence) {
2392                         kfree(op);
2393                         return -ENOMEM;
2394                 }
2395
2396                 seqno = e ? ++e->bind.fence_seqno : ++vm->async_ops.fence.seqno;
2397                 dma_fence_init(&op->fence->fence, &async_op_fence_ops,
2398                                &vm->async_ops.lock, e ? e->bind.fence_ctx :
2399                                vm->async_ops.fence.context, seqno);
2400
2401                 if (!xe_vm_no_dma_fences(vm)) {
2402                         op->fence->vm = vm;
2403                         op->fence->started = false;
2404                         init_waitqueue_head(&op->fence->wq);
2405                 }
2406         } else {
2407                 op->fence = NULL;
2408         }
2409         op->vma = vma;
2410         op->engine = e;
2411         op->bo = bo;
2412         op->bind_op = *bind_op;
2413         op->syncs = syncs;
2414         op->num_syncs = num_syncs;
2415         INIT_LIST_HEAD(&op->link);
2416
2417         for (i = 0; i < num_syncs; i++)
2418                 installed |= xe_sync_entry_signal(&syncs[i], NULL,
2419                                                   &op->fence->fence);
2420
2421         if (!installed && op->fence)
2422                 dma_fence_signal(&op->fence->fence);
2423
2424         spin_lock_irq(&vm->async_ops.lock);
2425         list_add_tail(&op->link, &vm->async_ops.pending);
2426         spin_unlock_irq(&vm->async_ops.lock);
2427
2428         if (!vm->async_ops.error)
2429                 queue_work(system_unbound_wq, &vm->async_ops.work);
2430
2431         return 0;
2432 }
2433
2434 static int vm_bind_ioctl_async(struct xe_vm *vm, struct xe_vma *vma,
2435                                struct xe_engine *e, struct xe_bo *bo,
2436                                struct drm_xe_vm_bind_op *bind_op,
2437                                struct xe_sync_entry *syncs, u32 num_syncs)
2438 {
2439         struct xe_vma *__vma, *next;
2440         struct list_head rebind_list;
2441         struct xe_sync_entry *in_syncs = NULL, *out_syncs = NULL;
2442         u32 num_in_syncs = 0, num_out_syncs = 0;
2443         bool first = true, last;
2444         int err;
2445         int i;
2446
2447         lockdep_assert_held(&vm->lock);
2448
2449         /* Not a linked list of unbinds + rebinds, easy */
2450         if (list_empty(&vma->unbind_link))
2451                 return __vm_bind_ioctl_async(vm, vma, e, bo, bind_op,
2452                                              syncs, num_syncs);
2453
2454         /*
2455          * Linked list of unbinds + rebinds, decompose syncs into 'in / out'
2456          * passing the 'in' to the first operation and 'out' to the last. Also
2457          * the reference counting is a little tricky, increment the VM / bind
2458          * engine ref count on all but the last operation and increment the BOs
2459          * ref count on each rebind.
2460          */
2461
2462         XE_BUG_ON(VM_BIND_OP(bind_op->op) != XE_VM_BIND_OP_UNMAP &&
2463                   VM_BIND_OP(bind_op->op) != XE_VM_BIND_OP_UNMAP_ALL &&
2464                   VM_BIND_OP(bind_op->op) != XE_VM_BIND_OP_PREFETCH);
2465
2466         /* Decompose syncs */
2467         if (num_syncs) {
2468                 in_syncs = kmalloc(sizeof(*in_syncs) * num_syncs, GFP_KERNEL);
2469                 out_syncs = kmalloc(sizeof(*out_syncs) * num_syncs, GFP_KERNEL);
2470                 if (!in_syncs || !out_syncs) {
2471                         err = -ENOMEM;
2472                         goto out_error;
2473                 }
2474
2475                 for (i = 0; i < num_syncs; ++i) {
2476                         bool signal = syncs[i].flags & DRM_XE_SYNC_SIGNAL;
2477
2478                         if (signal)
2479                                 out_syncs[num_out_syncs++] = syncs[i];
2480                         else
2481                                 in_syncs[num_in_syncs++] = syncs[i];
2482                 }
2483         }
2484
2485         /* Do unbinds + move rebinds to new list */
2486         INIT_LIST_HEAD(&rebind_list);
2487         list_for_each_entry_safe(__vma, next, &vma->unbind_link, unbind_link) {
2488                 if (__vma->destroyed ||
2489                     VM_BIND_OP(bind_op->op) == XE_VM_BIND_OP_PREFETCH) {
2490                         list_del_init(&__vma->unbind_link);
2491                         xe_bo_get(bo);
2492                         err = __vm_bind_ioctl_async(xe_vm_get(vm), __vma,
2493                                                     e ? xe_engine_get(e) : NULL,
2494                                                     bo, bind_op, first ?
2495                                                     in_syncs : NULL,
2496                                                     first ? num_in_syncs : 0);
2497                         if (err) {
2498                                 xe_bo_put(bo);
2499                                 xe_vm_put(vm);
2500                                 if (e)
2501                                         xe_engine_put(e);
2502                                 goto out_error;
2503                         }
2504                         in_syncs = NULL;
2505                         first = false;
2506                 } else {
2507                         list_move_tail(&__vma->unbind_link, &rebind_list);
2508                 }
2509         }
2510         last = list_empty(&rebind_list);
2511         if (!last) {
2512                 xe_vm_get(vm);
2513                 if (e)
2514                         xe_engine_get(e);
2515         }
2516         err = __vm_bind_ioctl_async(vm, vma, e,
2517                                     bo, bind_op,
2518                                     first ? in_syncs :
2519                                     last ? out_syncs : NULL,
2520                                     first ? num_in_syncs :
2521                                     last ? num_out_syncs : 0);
2522         if (err) {
2523                 if (!last) {
2524                         xe_vm_put(vm);
2525                         if (e)
2526                                 xe_engine_put(e);
2527                 }
2528                 goto out_error;
2529         }
2530         in_syncs = NULL;
2531
2532         /* Do rebinds */
2533         list_for_each_entry_safe(__vma, next, &rebind_list, unbind_link) {
2534                 list_del_init(&__vma->unbind_link);
2535                 last = list_empty(&rebind_list);
2536
2537                 if (xe_vma_is_userptr(__vma)) {
2538                         bind_op->op = XE_VM_BIND_FLAG_ASYNC |
2539                                 XE_VM_BIND_OP_MAP_USERPTR;
2540                 } else {
2541                         bind_op->op = XE_VM_BIND_FLAG_ASYNC |
2542                                 XE_VM_BIND_OP_MAP;
2543                         xe_bo_get(xe_vma_bo(__vma));
2544                 }
2545
2546                 if (!last) {
2547                         xe_vm_get(vm);
2548                         if (e)
2549                                 xe_engine_get(e);
2550                 }
2551
2552                 err = __vm_bind_ioctl_async(vm, __vma, e,
2553                                             xe_vma_bo(__vma), bind_op, last ?
2554                                             out_syncs : NULL,
2555                                             last ? num_out_syncs : 0);
2556                 if (err) {
2557                         if (!last) {
2558                                 xe_vm_put(vm);
2559                                 if (e)
2560                                         xe_engine_put(e);
2561                         }
2562                         goto out_error;
2563                 }
2564         }
2565
2566         kfree(syncs);
2567         return 0;
2568
2569 out_error:
2570         kfree(in_syncs);
2571         kfree(out_syncs);
2572         kfree(syncs);
2573
2574         return err;
2575 }
2576
2577 static int __vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo,
2578                                       u64 addr, u64 range, u32 op)
2579 {
2580         struct xe_device *xe = vm->xe;
2581         struct xe_vma *vma, lookup;
2582         bool async = !!(op & XE_VM_BIND_FLAG_ASYNC);
2583
2584         lockdep_assert_held(&vm->lock);
2585
2586         lookup.start = addr;
2587         lookup.end = addr + range - 1;
2588
2589         switch (VM_BIND_OP(op)) {
2590         case XE_VM_BIND_OP_MAP:
2591         case XE_VM_BIND_OP_MAP_USERPTR:
2592                 vma = xe_vm_find_overlapping_vma(vm, &lookup);
2593                 if (XE_IOCTL_ERR(xe, vma))
2594                         return -EBUSY;
2595                 break;
2596         case XE_VM_BIND_OP_UNMAP:
2597         case XE_VM_BIND_OP_PREFETCH:
2598                 vma = xe_vm_find_overlapping_vma(vm, &lookup);
2599                 if (XE_IOCTL_ERR(xe, !vma) ||
2600                     XE_IOCTL_ERR(xe, (xe_vma_start(vma) != addr ||
2601                                  xe_vma_end(vma) != addr + range) && !async))
2602                         return -EINVAL;
2603                 break;
2604         case XE_VM_BIND_OP_UNMAP_ALL:
2605                 break;
2606         default:
2607                 XE_BUG_ON("NOT POSSIBLE");
2608                 return -EINVAL;
2609         }
2610
2611         return 0;
2612 }
2613
2614 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma)
2615 {
2616         down_read(&vm->userptr.notifier_lock);
2617         vma->destroyed = true;
2618         up_read(&vm->userptr.notifier_lock);
2619         xe_vm_remove_vma(vm, vma);
2620 }
2621
2622 static int prep_replacement_vma(struct xe_vm *vm, struct xe_vma *vma)
2623 {
2624         int err;
2625
2626         if (xe_vma_bo(vma) && !xe_vma_bo(vma)->vm) {
2627                 vm_insert_extobj(vm, vma);
2628                 err = add_preempt_fences(vm, xe_vma_bo(vma));
2629                 if (err)
2630                         return err;
2631         }
2632
2633         return 0;
2634 }
2635
2636 /*
2637  * Find all overlapping VMAs in lookup range and add to a list in the returned
2638  * VMA, all of VMAs found will be unbound. Also possibly add 2 new VMAs that
2639  * need to be bound if first / last VMAs are not fully unbound. This is akin to
2640  * how munmap works.
2641  */
2642 static struct xe_vma *vm_unbind_lookup_vmas(struct xe_vm *vm,
2643                                             struct xe_vma *lookup)
2644 {
2645         struct xe_vma *vma = xe_vm_find_overlapping_vma(vm, lookup);
2646         struct rb_node *node;
2647         struct xe_vma *first = vma, *last = vma, *new_first = NULL,
2648                       *new_last = NULL, *__vma, *next;
2649         int err = 0;
2650         bool first_munmap_rebind = false;
2651
2652         lockdep_assert_held(&vm->lock);
2653         XE_BUG_ON(!vma);
2654
2655         node = &vma->vm_node;
2656         while ((node = rb_next(node))) {
2657                 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2658                         __vma = to_xe_vma(node);
2659                         list_add_tail(&__vma->unbind_link, &vma->unbind_link);
2660                         last = __vma;
2661                 } else {
2662                         break;
2663                 }
2664         }
2665
2666         node = &vma->vm_node;
2667         while ((node = rb_prev(node))) {
2668                 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2669                         __vma = to_xe_vma(node);
2670                         list_add(&__vma->unbind_link, &vma->unbind_link);
2671                         first = __vma;
2672                 } else {
2673                         break;
2674                 }
2675         }
2676
2677         if (xe_vma_start(first) != xe_vma_start(lookup)) {
2678                 struct ww_acquire_ctx ww;
2679
2680                 if (xe_vma_bo(first))
2681                         err = xe_bo_lock(xe_vma_bo(first), &ww, 0, true);
2682                 if (err)
2683                         goto unwind;
2684                 new_first = xe_vma_create(xe_vma_vm(first), xe_vma_bo(first),
2685                                           xe_vma_bo(first) ?
2686                                           xe_vma_bo_offset(first) :
2687                                           xe_vma_userptr(first),
2688                                           xe_vma_start(first),
2689                                           xe_vma_start(lookup) - 1,
2690                                           xe_vma_read_only(first),
2691                                           (first->pte_flags &
2692                                            XE_PTE_FLAG_NULL),
2693                                           first->tile_mask);
2694                 if (xe_vma_bo(first))
2695                         xe_bo_unlock(xe_vma_bo(first), &ww);
2696                 if (!new_first) {
2697                         err = -ENOMEM;
2698                         goto unwind;
2699                 }
2700                 if (xe_vma_is_userptr(first)) {
2701                         err = xe_vma_userptr_pin_pages(new_first);
2702                         if (err)
2703                                 goto unwind;
2704                 }
2705                 err = prep_replacement_vma(vm, new_first);
2706                 if (err)
2707                         goto unwind;
2708         }
2709
2710         if (xe_vma_end(last) != xe_vma_end(lookup)) {
2711                 struct ww_acquire_ctx ww;
2712                 u64 chunk = xe_vma_end(lookup) - xe_vma_start(last);
2713
2714                 if (xe_vma_bo(last))
2715                         err = xe_bo_lock(xe_vma_bo(last), &ww, 0, true);
2716                 if (err)
2717                         goto unwind;
2718                 new_last = xe_vma_create(xe_vma_vm(last), xe_vma_bo(last),
2719                                          xe_vma_bo(last) ?
2720                                          xe_vma_bo_offset(last) + chunk :
2721                                          xe_vma_userptr(last) + chunk,
2722                                          xe_vma_start(last) + chunk,
2723                                          xe_vma_end(last) - 1,
2724                                          xe_vma_read_only(last),
2725                                          (last->pte_flags & XE_PTE_FLAG_NULL),
2726                                          last->tile_mask);
2727                 if (xe_vma_bo(last))
2728                         xe_bo_unlock(xe_vma_bo(last), &ww);
2729                 if (!new_last) {
2730                         err = -ENOMEM;
2731                         goto unwind;
2732                 }
2733                 if (xe_vma_is_userptr(last)) {
2734                         err = xe_vma_userptr_pin_pages(new_last);
2735                         if (err)
2736                                 goto unwind;
2737                 }
2738                 err = prep_replacement_vma(vm, new_last);
2739                 if (err)
2740                         goto unwind;
2741         }
2742
2743         prep_vma_destroy(vm, vma);
2744         if (list_empty(&vma->unbind_link) && (new_first || new_last))
2745                 vma->first_munmap_rebind = true;
2746         list_for_each_entry(__vma, &vma->unbind_link, unbind_link) {
2747                 if ((new_first || new_last) && !first_munmap_rebind) {
2748                         __vma->first_munmap_rebind = true;
2749                         first_munmap_rebind = true;
2750                 }
2751                 prep_vma_destroy(vm, __vma);
2752         }
2753         if (new_first) {
2754                 xe_vm_insert_vma(vm, new_first);
2755                 list_add_tail(&new_first->unbind_link, &vma->unbind_link);
2756                 if (!new_last)
2757                         new_first->last_munmap_rebind = true;
2758         }
2759         if (new_last) {
2760                 xe_vm_insert_vma(vm, new_last);
2761                 list_add_tail(&new_last->unbind_link, &vma->unbind_link);
2762                 new_last->last_munmap_rebind = true;
2763         }
2764
2765         return vma;
2766
2767 unwind:
2768         list_for_each_entry_safe(__vma, next, &vma->unbind_link, unbind_link)
2769                 list_del_init(&__vma->unbind_link);
2770         if (new_last) {
2771                 prep_vma_destroy(vm, new_last);
2772                 xe_vma_destroy_unlocked(new_last);
2773         }
2774         if (new_first) {
2775                 prep_vma_destroy(vm, new_first);
2776                 xe_vma_destroy_unlocked(new_first);
2777         }
2778
2779         return ERR_PTR(err);
2780 }
2781
2782 /*
2783  * Similar to vm_unbind_lookup_vmas, find all VMAs in lookup range to prefetch
2784  */
2785 static struct xe_vma *vm_prefetch_lookup_vmas(struct xe_vm *vm,
2786                                               struct xe_vma *lookup,
2787                                               u32 region)
2788 {
2789         struct xe_vma *vma = xe_vm_find_overlapping_vma(vm, lookup), *__vma,
2790                       *next;
2791         struct rb_node *node;
2792
2793         if (!xe_vma_has_no_bo(vma)) {
2794                 if (!xe_bo_can_migrate(xe_vma_bo(vma), region_to_mem_type[region]))
2795                         return ERR_PTR(-EINVAL);
2796         }
2797
2798         node = &vma->vm_node;
2799         while ((node = rb_next(node))) {
2800                 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2801                         __vma = to_xe_vma(node);
2802                         if (!xe_vma_has_no_bo(__vma)) {
2803                                 if (!xe_bo_can_migrate(xe_vma_bo(__vma), region_to_mem_type[region]))
2804                                         goto flush_list;
2805                         }
2806                         list_add_tail(&__vma->unbind_link, &vma->unbind_link);
2807                 } else {
2808                         break;
2809                 }
2810         }
2811
2812         node = &vma->vm_node;
2813         while ((node = rb_prev(node))) {
2814                 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2815                         __vma = to_xe_vma(node);
2816                         if (!xe_vma_has_no_bo(__vma)) {
2817                                 if (!xe_bo_can_migrate(xe_vma_bo(__vma), region_to_mem_type[region]))
2818                                         goto flush_list;
2819                         }
2820                         list_add(&__vma->unbind_link, &vma->unbind_link);
2821                 } else {
2822                         break;
2823                 }
2824         }
2825
2826         return vma;
2827
2828 flush_list:
2829         list_for_each_entry_safe(__vma, next, &vma->unbind_link,
2830                                  unbind_link)
2831                 list_del_init(&__vma->unbind_link);
2832
2833         return ERR_PTR(-EINVAL);
2834 }
2835
2836 static struct xe_vma *vm_unbind_all_lookup_vmas(struct xe_vm *vm,
2837                                                 struct xe_bo *bo)
2838 {
2839         struct xe_vma *first = NULL, *vma;
2840
2841         lockdep_assert_held(&vm->lock);
2842         xe_bo_assert_held(bo);
2843
2844         list_for_each_entry(vma, &bo->vmas, bo_link) {
2845                 if (xe_vma_vm(vma) != vm)
2846                         continue;
2847
2848                 prep_vma_destroy(vm, vma);
2849                 if (!first)
2850                         first = vma;
2851                 else
2852                         list_add_tail(&vma->unbind_link, &first->unbind_link);
2853         }
2854
2855         return first;
2856 }
2857
2858 static struct xe_vma *vm_bind_ioctl_lookup_vma(struct xe_vm *vm,
2859                                                struct xe_bo *bo,
2860                                                u64 bo_offset_or_userptr,
2861                                                u64 addr, u64 range, u32 op,
2862                                                u64 tile_mask, u32 region)
2863 {
2864         struct ww_acquire_ctx ww;
2865         struct xe_vma *vma, lookup;
2866         int err;
2867
2868         lockdep_assert_held(&vm->lock);
2869
2870         lookup.start = addr;
2871         lookup.end = addr + range - 1;
2872
2873         switch (VM_BIND_OP(op)) {
2874         case XE_VM_BIND_OP_MAP:
2875                 if (bo) {
2876                         err = xe_bo_lock(bo, &ww, 0, true);
2877                         if (err)
2878                                 return ERR_PTR(err);
2879                 }
2880                 vma = xe_vma_create(vm, bo, bo_offset_or_userptr, addr,
2881                                     addr + range - 1,
2882                                     op & XE_VM_BIND_FLAG_READONLY,
2883                                     op & XE_VM_BIND_FLAG_NULL,
2884                                     tile_mask);
2885                 if (bo)
2886                         xe_bo_unlock(bo, &ww);
2887                 if (!vma)
2888                         return ERR_PTR(-ENOMEM);
2889
2890                 xe_vm_insert_vma(vm, vma);
2891                 if (bo && !bo->vm) {
2892                         vm_insert_extobj(vm, vma);
2893                         err = add_preempt_fences(vm, bo);
2894                         if (err) {
2895                                 prep_vma_destroy(vm, vma);
2896                                 xe_vma_destroy_unlocked(vma);
2897
2898                                 return ERR_PTR(err);
2899                         }
2900                 }
2901                 break;
2902         case XE_VM_BIND_OP_UNMAP:
2903                 vma = vm_unbind_lookup_vmas(vm, &lookup);
2904                 break;
2905         case XE_VM_BIND_OP_PREFETCH:
2906                 vma = vm_prefetch_lookup_vmas(vm, &lookup, region);
2907                 break;
2908         case XE_VM_BIND_OP_UNMAP_ALL:
2909                 XE_BUG_ON(!bo);
2910
2911                 err = xe_bo_lock(bo, &ww, 0, true);
2912                 if (err)
2913                         return ERR_PTR(err);
2914                 vma = vm_unbind_all_lookup_vmas(vm, bo);
2915                 if (!vma)
2916                         vma = ERR_PTR(-EINVAL);
2917                 xe_bo_unlock(bo, &ww);
2918                 break;
2919         case XE_VM_BIND_OP_MAP_USERPTR:
2920                 XE_BUG_ON(bo);
2921
2922                 vma = xe_vma_create(vm, NULL, bo_offset_or_userptr, addr,
2923                                     addr + range - 1,
2924                                     op & XE_VM_BIND_FLAG_READONLY,
2925                                     op & XE_VM_BIND_FLAG_NULL,
2926                                     tile_mask);
2927                 if (!vma)
2928                         return ERR_PTR(-ENOMEM);
2929
2930                 err = xe_vma_userptr_pin_pages(vma);
2931                 if (err) {
2932                         prep_vma_destroy(vm, vma);
2933                         xe_vma_destroy_unlocked(vma);
2934
2935                         return ERR_PTR(err);
2936                 } else {
2937                         xe_vm_insert_vma(vm, vma);
2938                 }
2939                 break;
2940         default:
2941                 XE_BUG_ON("NOT POSSIBLE");
2942                 vma = ERR_PTR(-EINVAL);
2943         }
2944
2945         return vma;
2946 }
2947
2948 #ifdef TEST_VM_ASYNC_OPS_ERROR
2949 #define SUPPORTED_FLAGS \
2950         (FORCE_ASYNC_OP_ERROR | XE_VM_BIND_FLAG_ASYNC | \
2951          XE_VM_BIND_FLAG_READONLY | XE_VM_BIND_FLAG_IMMEDIATE | \
2952          XE_VM_BIND_FLAG_NULL | 0xffff)
2953 #else
2954 #define SUPPORTED_FLAGS \
2955         (XE_VM_BIND_FLAG_ASYNC | XE_VM_BIND_FLAG_READONLY | \
2956          XE_VM_BIND_FLAG_IMMEDIATE | XE_VM_BIND_FLAG_NULL | 0xffff)
2957 #endif
2958 #define XE_64K_PAGE_MASK 0xffffull
2959
2960 #define MAX_BINDS       512     /* FIXME: Picking random upper limit */
2961
2962 static int vm_bind_ioctl_check_args(struct xe_device *xe,
2963                                     struct drm_xe_vm_bind *args,
2964                                     struct drm_xe_vm_bind_op **bind_ops,
2965                                     bool *async)
2966 {
2967         int err;
2968         int i;
2969
2970         if (XE_IOCTL_ERR(xe, args->extensions) ||
2971             XE_IOCTL_ERR(xe, args->pad || args->pad2) ||
2972             XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1]) ||
2973             XE_IOCTL_ERR(xe, !args->num_binds) ||
2974             XE_IOCTL_ERR(xe, args->num_binds > MAX_BINDS))
2975                 return -EINVAL;
2976
2977         if (args->num_binds > 1) {
2978                 u64 __user *bind_user =
2979                         u64_to_user_ptr(args->vector_of_binds);
2980
2981                 *bind_ops = kmalloc(sizeof(struct drm_xe_vm_bind_op) *
2982                                     args->num_binds, GFP_KERNEL);
2983                 if (!*bind_ops)
2984                         return -ENOMEM;
2985
2986                 err = __copy_from_user(*bind_ops, bind_user,
2987                                        sizeof(struct drm_xe_vm_bind_op) *
2988                                        args->num_binds);
2989                 if (XE_IOCTL_ERR(xe, err)) {
2990                         err = -EFAULT;
2991                         goto free_bind_ops;
2992                 }
2993         } else {
2994                 *bind_ops = &args->bind;
2995         }
2996
2997         for (i = 0; i < args->num_binds; ++i) {
2998                 u64 range = (*bind_ops)[i].range;
2999                 u64 addr = (*bind_ops)[i].addr;
3000                 u32 op = (*bind_ops)[i].op;
3001                 u32 obj = (*bind_ops)[i].obj;
3002                 u64 obj_offset = (*bind_ops)[i].obj_offset;
3003                 u32 region = (*bind_ops)[i].region;
3004                 bool is_null = op &  XE_VM_BIND_FLAG_NULL;
3005
3006                 if (XE_IOCTL_ERR(xe, (*bind_ops)[i].pad) ||
3007                     XE_IOCTL_ERR(xe, (*bind_ops)[i].reserved[0] ||
3008                                      (*bind_ops)[i].reserved[1])) {
3009                         err = -EINVAL;
3010                         goto free_bind_ops;
3011                 }
3012
3013                 if (i == 0) {
3014                         *async = !!(op & XE_VM_BIND_FLAG_ASYNC);
3015                 } else if (XE_IOCTL_ERR(xe, !*async) ||
3016                            XE_IOCTL_ERR(xe, !(op & XE_VM_BIND_FLAG_ASYNC)) ||
3017                            XE_IOCTL_ERR(xe, VM_BIND_OP(op) ==
3018                                         XE_VM_BIND_OP_RESTART)) {
3019                         err = -EINVAL;
3020                         goto free_bind_ops;
3021                 }
3022
3023                 if (XE_IOCTL_ERR(xe, !*async &&
3024                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL)) {
3025                         err = -EINVAL;
3026                         goto free_bind_ops;
3027                 }
3028
3029                 if (XE_IOCTL_ERR(xe, !*async &&
3030                                  VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH)) {
3031                         err = -EINVAL;
3032                         goto free_bind_ops;
3033                 }
3034
3035                 if (XE_IOCTL_ERR(xe, VM_BIND_OP(op) >
3036                                  XE_VM_BIND_OP_PREFETCH) ||
3037                     XE_IOCTL_ERR(xe, op & ~SUPPORTED_FLAGS) ||
3038                     XE_IOCTL_ERR(xe, obj && is_null) ||
3039                     XE_IOCTL_ERR(xe, obj_offset && is_null) ||
3040                     XE_IOCTL_ERR(xe, VM_BIND_OP(op) != XE_VM_BIND_OP_MAP &&
3041                                  is_null) ||
3042                     XE_IOCTL_ERR(xe, !obj &&
3043                                  VM_BIND_OP(op) == XE_VM_BIND_OP_MAP &&
3044                                  !is_null) ||
3045                     XE_IOCTL_ERR(xe, !obj &&
3046                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
3047                     XE_IOCTL_ERR(xe, addr &&
3048                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
3049                     XE_IOCTL_ERR(xe, range &&
3050                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
3051                     XE_IOCTL_ERR(xe, obj &&
3052                                  VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR) ||
3053                     XE_IOCTL_ERR(xe, obj &&
3054                                  VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH) ||
3055                     XE_IOCTL_ERR(xe, region &&
3056                                  VM_BIND_OP(op) != XE_VM_BIND_OP_PREFETCH) ||
3057                     XE_IOCTL_ERR(xe, !(BIT(region) &
3058                                        xe->info.mem_region_mask)) ||
3059                     XE_IOCTL_ERR(xe, obj &&
3060                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP)) {
3061                         err = -EINVAL;
3062                         goto free_bind_ops;
3063                 }
3064
3065                 if (XE_IOCTL_ERR(xe, obj_offset & ~PAGE_MASK) ||
3066                     XE_IOCTL_ERR(xe, addr & ~PAGE_MASK) ||
3067                     XE_IOCTL_ERR(xe, range & ~PAGE_MASK) ||
3068                     XE_IOCTL_ERR(xe, !range && VM_BIND_OP(op) !=
3069                                  XE_VM_BIND_OP_RESTART &&
3070                                  VM_BIND_OP(op) != XE_VM_BIND_OP_UNMAP_ALL)) {
3071                         err = -EINVAL;
3072                         goto free_bind_ops;
3073                 }
3074         }
3075
3076         return 0;
3077
3078 free_bind_ops:
3079         if (args->num_binds > 1)
3080                 kfree(*bind_ops);
3081         return err;
3082 }
3083
3084 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3085 {
3086         struct xe_device *xe = to_xe_device(dev);
3087         struct xe_file *xef = to_xe_file(file);
3088         struct drm_xe_vm_bind *args = data;
3089         struct drm_xe_sync __user *syncs_user;
3090         struct xe_bo **bos = NULL;
3091         struct xe_vma **vmas = NULL;
3092         struct xe_vm *vm;
3093         struct xe_engine *e = NULL;
3094         u32 num_syncs;
3095         struct xe_sync_entry *syncs = NULL;
3096         struct drm_xe_vm_bind_op *bind_ops;
3097         bool async;
3098         int err;
3099         int i, j = 0;
3100
3101         err = vm_bind_ioctl_check_args(xe, args, &bind_ops, &async);
3102         if (err)
3103                 return err;
3104
3105         if (args->engine_id) {
3106                 e = xe_engine_lookup(xef, args->engine_id);
3107                 if (XE_IOCTL_ERR(xe, !e)) {
3108                         err = -ENOENT;
3109                         goto free_objs;
3110                 }
3111
3112                 if (XE_IOCTL_ERR(xe, !(e->flags & ENGINE_FLAG_VM))) {
3113                         err = -EINVAL;
3114                         goto put_engine;
3115                 }
3116         }
3117
3118         vm = xe_vm_lookup(xef, args->vm_id);
3119         if (XE_IOCTL_ERR(xe, !vm)) {
3120                 err = -EINVAL;
3121                 goto put_engine;
3122         }
3123
3124         err = down_write_killable(&vm->lock);
3125         if (err)
3126                 goto put_vm;
3127
3128         if (XE_IOCTL_ERR(xe, xe_vm_is_closed_or_banned(vm))) {
3129                 err = -ENOENT;
3130                 goto release_vm_lock;
3131         }
3132
3133         if (VM_BIND_OP(bind_ops[0].op) == XE_VM_BIND_OP_RESTART) {
3134                 if (XE_IOCTL_ERR(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
3135                         err = -EOPNOTSUPP;
3136                 if (XE_IOCTL_ERR(xe, !err && args->num_syncs))
3137                         err = EINVAL;
3138                 if (XE_IOCTL_ERR(xe, !err && !vm->async_ops.error))
3139                         err = -EPROTO;
3140
3141                 if (!err) {
3142                         trace_xe_vm_restart(vm);
3143                         vm_set_async_error(vm, 0);
3144
3145                         queue_work(system_unbound_wq, &vm->async_ops.work);
3146
3147                         /* Rebinds may have been blocked, give worker a kick */
3148                         if (xe_vm_in_compute_mode(vm))
3149                                 xe_vm_queue_rebind_worker(vm);
3150                 }
3151
3152                 goto release_vm_lock;
3153         }
3154
3155         if (XE_IOCTL_ERR(xe, !vm->async_ops.error &&
3156                          async != !!(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS))) {
3157                 err = -EOPNOTSUPP;
3158                 goto release_vm_lock;
3159         }
3160
3161         for (i = 0; i < args->num_binds; ++i) {
3162                 u64 range = bind_ops[i].range;
3163                 u64 addr = bind_ops[i].addr;
3164
3165                 if (XE_IOCTL_ERR(xe, range > vm->size) ||
3166                     XE_IOCTL_ERR(xe, addr > vm->size - range)) {
3167                         err = -EINVAL;
3168                         goto release_vm_lock;
3169                 }
3170
3171                 if (bind_ops[i].tile_mask) {
3172                         u64 valid_tiles = BIT(xe->info.tile_count) - 1;
3173
3174                         if (XE_IOCTL_ERR(xe, bind_ops[i].tile_mask &
3175                                          ~valid_tiles)) {
3176                                 err = -EINVAL;
3177                                 goto release_vm_lock;
3178                         }
3179                 }
3180         }
3181
3182         bos = kzalloc(sizeof(*bos) * args->num_binds, GFP_KERNEL);
3183         if (!bos) {
3184                 err = -ENOMEM;
3185                 goto release_vm_lock;
3186         }
3187
3188         vmas = kzalloc(sizeof(*vmas) * args->num_binds, GFP_KERNEL);
3189         if (!vmas) {
3190                 err = -ENOMEM;
3191                 goto release_vm_lock;
3192         }
3193
3194         for (i = 0; i < args->num_binds; ++i) {
3195                 struct drm_gem_object *gem_obj;
3196                 u64 range = bind_ops[i].range;
3197                 u64 addr = bind_ops[i].addr;
3198                 u32 obj = bind_ops[i].obj;
3199                 u64 obj_offset = bind_ops[i].obj_offset;
3200
3201                 if (!obj)
3202                         continue;
3203
3204                 gem_obj = drm_gem_object_lookup(file, obj);
3205                 if (XE_IOCTL_ERR(xe, !gem_obj)) {
3206                         err = -ENOENT;
3207                         goto put_obj;
3208                 }
3209                 bos[i] = gem_to_xe_bo(gem_obj);
3210
3211                 if (XE_IOCTL_ERR(xe, range > bos[i]->size) ||
3212                     XE_IOCTL_ERR(xe, obj_offset >
3213                                  bos[i]->size - range)) {
3214                         err = -EINVAL;
3215                         goto put_obj;
3216                 }
3217
3218                 if (bos[i]->flags & XE_BO_INTERNAL_64K) {
3219                         if (XE_IOCTL_ERR(xe, obj_offset &
3220                                          XE_64K_PAGE_MASK) ||
3221                             XE_IOCTL_ERR(xe, addr & XE_64K_PAGE_MASK) ||
3222                             XE_IOCTL_ERR(xe, range & XE_64K_PAGE_MASK)) {
3223                                 err = -EINVAL;
3224                                 goto put_obj;
3225                         }
3226                 }
3227         }
3228
3229         if (args->num_syncs) {
3230                 syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3231                 if (!syncs) {
3232                         err = -ENOMEM;
3233                         goto put_obj;
3234                 }
3235         }
3236
3237         syncs_user = u64_to_user_ptr(args->syncs);
3238         for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3239                 err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3240                                           &syncs_user[num_syncs], false,
3241                                           xe_vm_in_fault_mode(vm));
3242                 if (err)
3243                         goto free_syncs;
3244         }
3245
3246         /* Do some error checking first to make the unwind easier */
3247         for (i = 0; i < args->num_binds; ++i) {
3248                 u64 range = bind_ops[i].range;
3249                 u64 addr = bind_ops[i].addr;
3250                 u32 op = bind_ops[i].op;
3251
3252                 err = __vm_bind_ioctl_lookup_vma(vm, bos[i], addr, range, op);
3253                 if (err)
3254                         goto free_syncs;
3255         }
3256
3257         for (i = 0; i < args->num_binds; ++i) {
3258                 u64 range = bind_ops[i].range;
3259                 u64 addr = bind_ops[i].addr;
3260                 u32 op = bind_ops[i].op;
3261                 u64 obj_offset = bind_ops[i].obj_offset;
3262                 u64 tile_mask = bind_ops[i].tile_mask;
3263                 u32 region = bind_ops[i].region;
3264
3265                 vmas[i] = vm_bind_ioctl_lookup_vma(vm, bos[i], obj_offset,
3266                                                    addr, range, op, tile_mask,
3267                                                    region);
3268                 if (IS_ERR(vmas[i])) {
3269                         err = PTR_ERR(vmas[i]);
3270                         vmas[i] = NULL;
3271                         goto destroy_vmas;
3272                 }
3273         }
3274
3275         for (j = 0; j < args->num_binds; ++j) {
3276                 struct xe_sync_entry *__syncs;
3277                 u32 __num_syncs = 0;
3278                 bool first_or_last = j == 0 || j == args->num_binds - 1;
3279
3280                 if (args->num_binds == 1) {
3281                         __num_syncs = num_syncs;
3282                         __syncs = syncs;
3283                 } else if (first_or_last && num_syncs) {
3284                         bool first = j == 0;
3285
3286                         __syncs = kmalloc(sizeof(*__syncs) * num_syncs,
3287                                           GFP_KERNEL);
3288                         if (!__syncs) {
3289                                 err = ENOMEM;
3290                                 break;
3291                         }
3292
3293                         /* in-syncs on first bind, out-syncs on last bind */
3294                         for (i = 0; i < num_syncs; ++i) {
3295                                 bool signal = syncs[i].flags &
3296                                         DRM_XE_SYNC_SIGNAL;
3297
3298                                 if ((first && !signal) || (!first && signal))
3299                                         __syncs[__num_syncs++] = syncs[i];
3300                         }
3301                 } else {
3302                         __num_syncs = 0;
3303                         __syncs = NULL;
3304                 }
3305
3306                 if (async) {
3307                         bool last = j == args->num_binds - 1;
3308
3309                         /*
3310                          * Each pass of async worker drops the ref, take a ref
3311                          * here, 1 set of refs taken above
3312                          */
3313                         if (!last) {
3314                                 if (e)
3315                                         xe_engine_get(e);
3316                                 xe_vm_get(vm);
3317                         }
3318
3319                         err = vm_bind_ioctl_async(vm, vmas[j], e, bos[j],
3320                                                   bind_ops + j, __syncs,
3321                                                   __num_syncs);
3322                         if (err && !last) {
3323                                 if (e)
3324                                         xe_engine_put(e);
3325                                 xe_vm_put(vm);
3326                         }
3327                         if (err)
3328                                 break;
3329                 } else {
3330                         XE_BUG_ON(j != 0);      /* Not supported */
3331                         err = vm_bind_ioctl(vm, vmas[j], e, bos[j],
3332                                             bind_ops + j, __syncs,
3333                                             __num_syncs, NULL);
3334                         break;  /* Needed so cleanup loops work */
3335                 }
3336         }
3337
3338         /* Most of cleanup owned by the async bind worker */
3339         if (async && !err) {
3340                 up_write(&vm->lock);
3341                 if (args->num_binds > 1)
3342                         kfree(syncs);
3343                 goto free_objs;
3344         }
3345
3346 destroy_vmas:
3347         for (i = j; err && i < args->num_binds; ++i) {
3348                 u32 op = bind_ops[i].op;
3349                 struct xe_vma *vma, *next;
3350
3351                 if (!vmas[i])
3352                         break;
3353
3354                 list_for_each_entry_safe(vma, next, &vmas[i]->unbind_link,
3355                                          unbind_link) {
3356                         list_del_init(&vma->unbind_link);
3357                         if (!vma->destroyed) {
3358                                 prep_vma_destroy(vm, vma);
3359                                 xe_vma_destroy_unlocked(vma);
3360                         }
3361                 }
3362
3363                 switch (VM_BIND_OP(op)) {
3364                 case XE_VM_BIND_OP_MAP:
3365                         prep_vma_destroy(vm, vmas[i]);
3366                         xe_vma_destroy_unlocked(vmas[i]);
3367                         break;
3368                 case XE_VM_BIND_OP_MAP_USERPTR:
3369                         prep_vma_destroy(vm, vmas[i]);
3370                         xe_vma_destroy_unlocked(vmas[i]);
3371                         break;
3372                 }
3373         }
3374 free_syncs:
3375         while (num_syncs--) {
3376                 if (async && j &&
3377                     !(syncs[num_syncs].flags & DRM_XE_SYNC_SIGNAL))
3378                         continue;       /* Still in async worker */
3379                 xe_sync_entry_cleanup(&syncs[num_syncs]);
3380         }
3381
3382         kfree(syncs);
3383 put_obj:
3384         for (i = j; i < args->num_binds; ++i)
3385                 xe_bo_put(bos[i]);
3386 release_vm_lock:
3387         up_write(&vm->lock);
3388 put_vm:
3389         xe_vm_put(vm);
3390 put_engine:
3391         if (e)
3392                 xe_engine_put(e);
3393 free_objs:
3394         kfree(bos);
3395         kfree(vmas);
3396         if (args->num_binds > 1)
3397                 kfree(bind_ops);
3398         return err;
3399 }
3400
3401 /*
3402  * XXX: Using the TTM wrappers for now, likely can call into dma-resv code
3403  * directly to optimize. Also this likely should be an inline function.
3404  */
3405 int xe_vm_lock(struct xe_vm *vm, struct ww_acquire_ctx *ww,
3406                int num_resv, bool intr)
3407 {
3408         struct ttm_validate_buffer tv_vm;
3409         LIST_HEAD(objs);
3410         LIST_HEAD(dups);
3411
3412         XE_BUG_ON(!ww);
3413
3414         tv_vm.num_shared = num_resv;
3415         tv_vm.bo = xe_vm_ttm_bo(vm);;
3416         list_add_tail(&tv_vm.head, &objs);
3417
3418         return ttm_eu_reserve_buffers(ww, &objs, intr, &dups);
3419 }
3420
3421 void xe_vm_unlock(struct xe_vm *vm, struct ww_acquire_ctx *ww)
3422 {
3423         dma_resv_unlock(&vm->resv);
3424         ww_acquire_fini(ww);
3425 }
3426
3427 /**
3428  * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3429  * @vma: VMA to invalidate
3430  *
3431  * Walks a list of page tables leaves which it memset the entries owned by this
3432  * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3433  * complete.
3434  *
3435  * Returns 0 for success, negative error code otherwise.
3436  */
3437 int xe_vm_invalidate_vma(struct xe_vma *vma)
3438 {
3439         struct xe_device *xe = xe_vma_vm(vma)->xe;
3440         struct xe_tile *tile;
3441         u32 tile_needs_invalidate = 0;
3442         int seqno[XE_MAX_TILES_PER_DEVICE];
3443         u8 id;
3444         int ret;
3445
3446         XE_BUG_ON(!xe_vm_in_fault_mode(xe_vma_vm(vma)));
3447         XE_WARN_ON(xe_vma_is_null(vma));
3448         trace_xe_vma_usm_invalidate(vma);
3449
3450         /* Check that we don't race with page-table updates */
3451         if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3452                 if (xe_vma_is_userptr(vma)) {
3453                         WARN_ON_ONCE(!mmu_interval_check_retry
3454                                      (&vma->userptr.notifier,
3455                                       vma->userptr.notifier_seq));
3456                         WARN_ON_ONCE(!dma_resv_test_signaled(&xe_vma_vm(vma)->resv,
3457                                                              DMA_RESV_USAGE_BOOKKEEP));
3458
3459                 } else {
3460                         xe_bo_assert_held(xe_vma_bo(vma));
3461                 }
3462         }
3463
3464         for_each_tile(tile, xe, id) {
3465                 if (xe_pt_zap_ptes(tile, vma)) {
3466                         tile_needs_invalidate |= BIT(id);
3467                         xe_device_wmb(xe);
3468                         /*
3469                          * FIXME: We potentially need to invalidate multiple
3470                          * GTs within the tile
3471                          */
3472                         seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
3473                         if (seqno[id] < 0)
3474                                 return seqno[id];
3475                 }
3476         }
3477
3478         for_each_tile(tile, xe, id) {
3479                 if (tile_needs_invalidate & BIT(id)) {
3480                         ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
3481                         if (ret < 0)
3482                                 return ret;
3483                 }
3484         }
3485
3486         vma->usm.tile_invalidated = vma->tile_mask;
3487
3488         return 0;
3489 }
3490
3491 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
3492 {
3493         struct rb_node *node;
3494         bool is_vram;
3495         uint64_t addr;
3496
3497         if (!down_read_trylock(&vm->lock)) {
3498                 drm_printf(p, " Failed to acquire VM lock to dump capture");
3499                 return 0;
3500         }
3501         if (vm->pt_root[gt_id]) {
3502                 addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE,
3503                                   &is_vram);
3504                 drm_printf(p, " VM root: A:0x%llx %s\n", addr, is_vram ? "VRAM" : "SYS");
3505         }
3506
3507         for (node = rb_first(&vm->vmas); node; node = rb_next(node)) {
3508                 struct xe_vma *vma = to_xe_vma(node);
3509                 bool is_userptr = xe_vma_is_userptr(vma);
3510                 bool is_null = xe_vma_is_null(vma);
3511
3512                 if (is_null) {
3513                         addr = 0;
3514                 } else if (is_userptr) {
3515                         struct xe_res_cursor cur;
3516
3517                         if (vma->userptr.sg) {
3518                                 xe_res_first_sg(vma->userptr.sg, 0, XE_PAGE_SIZE,
3519                                                 &cur);
3520                                 addr = xe_res_dma(&cur);
3521                         } else {
3522                                 addr = 0;
3523                         }
3524                 } else {
3525                         addr = __xe_bo_addr(xe_vma_bo(vma), 0, XE_PAGE_SIZE, &is_vram);
3526                 }
3527                 drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
3528                            xe_vma_start(vma), xe_vma_end(vma) - 1,
3529                            xe_vma_size(vma),
3530                            addr, is_null ? "NULL" : is_userptr ? "USR" :
3531                            is_vram ? "VRAM" : "SYS");
3532         }
3533         up_read(&vm->lock);
3534
3535         return 0;
3536 }