drm/xe: Signal out-syncs on VM binds if no operations
[linux-2.6-microblaze.git] / drivers / gpu / drm / xe / xe_vm.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5
6 #include "xe_vm.h"
7
8 #include <linux/dma-fence-array.h>
9
10 #include <drm/drm_print.h>
11 #include <drm/ttm/ttm_execbuf_util.h>
12 #include <drm/ttm/ttm_tt.h>
13 #include <drm/xe_drm.h>
14 #include <linux/delay.h>
15 #include <linux/kthread.h>
16 #include <linux/mm.h>
17 #include <linux/swap.h>
18
19 #include "xe_bo.h"
20 #include "xe_device.h"
21 #include "xe_engine.h"
22 #include "xe_gt.h"
23 #include "xe_gt_pagefault.h"
24 #include "xe_gt_tlb_invalidation.h"
25 #include "xe_migrate.h"
26 #include "xe_pm.h"
27 #include "xe_preempt_fence.h"
28 #include "xe_pt.h"
29 #include "xe_res_cursor.h"
30 #include "xe_sync.h"
31 #include "xe_trace.h"
32
33 #define TEST_VM_ASYNC_OPS_ERROR
34
35 /**
36  * xe_vma_userptr_check_repin() - Advisory check for repin needed
37  * @vma: The userptr vma
38  *
39  * Check if the userptr vma has been invalidated since last successful
40  * repin. The check is advisory only and can the function can be called
41  * without the vm->userptr.notifier_lock held. There is no guarantee that the
42  * vma userptr will remain valid after a lockless check, so typically
43  * the call needs to be followed by a proper check under the notifier_lock.
44  *
45  * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
46  */
47 int xe_vma_userptr_check_repin(struct xe_vma *vma)
48 {
49         return mmu_interval_check_retry(&vma->userptr.notifier,
50                                         vma->userptr.notifier_seq) ?
51                 -EAGAIN : 0;
52 }
53
54 int xe_vma_userptr_pin_pages(struct xe_vma *vma)
55 {
56         struct xe_vm *vm = xe_vma_vm(vma);
57         struct xe_device *xe = vm->xe;
58         const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT;
59         struct page **pages;
60         bool in_kthread = !current->mm;
61         unsigned long notifier_seq;
62         int pinned, ret, i;
63         bool read_only = xe_vma_read_only(vma);
64
65         lockdep_assert_held(&vm->lock);
66         XE_BUG_ON(!xe_vma_is_userptr(vma));
67 retry:
68         if (vma->gpuva.flags & XE_VMA_DESTROYED)
69                 return 0;
70
71         notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier);
72         if (notifier_seq == vma->userptr.notifier_seq)
73                 return 0;
74
75         pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
76         if (!pages)
77                 return -ENOMEM;
78
79         if (vma->userptr.sg) {
80                 dma_unmap_sgtable(xe->drm.dev,
81                                   vma->userptr.sg,
82                                   read_only ? DMA_TO_DEVICE :
83                                   DMA_BIDIRECTIONAL, 0);
84                 sg_free_table(vma->userptr.sg);
85                 vma->userptr.sg = NULL;
86         }
87
88         pinned = ret = 0;
89         if (in_kthread) {
90                 if (!mmget_not_zero(vma->userptr.notifier.mm)) {
91                         ret = -EFAULT;
92                         goto mm_closed;
93                 }
94                 kthread_use_mm(vma->userptr.notifier.mm);
95         }
96
97         while (pinned < num_pages) {
98                 ret = get_user_pages_fast(xe_vma_userptr(vma) +
99                                           pinned * PAGE_SIZE,
100                                           num_pages - pinned,
101                                           read_only ? 0 : FOLL_WRITE,
102                                           &pages[pinned]);
103                 if (ret < 0) {
104                         if (in_kthread)
105                                 ret = 0;
106                         break;
107                 }
108
109                 pinned += ret;
110                 ret = 0;
111         }
112
113         if (in_kthread) {
114                 kthread_unuse_mm(vma->userptr.notifier.mm);
115                 mmput(vma->userptr.notifier.mm);
116         }
117 mm_closed:
118         if (ret)
119                 goto out;
120
121         ret = sg_alloc_table_from_pages_segment(&vma->userptr.sgt, pages,
122                                                 pinned, 0,
123                                                 (u64)pinned << PAGE_SHIFT,
124                                                 xe_sg_segment_size(xe->drm.dev),
125                                                 GFP_KERNEL);
126         if (ret) {
127                 vma->userptr.sg = NULL;
128                 goto out;
129         }
130         vma->userptr.sg = &vma->userptr.sgt;
131
132         ret = dma_map_sgtable(xe->drm.dev, vma->userptr.sg,
133                               read_only ? DMA_TO_DEVICE :
134                               DMA_BIDIRECTIONAL,
135                               DMA_ATTR_SKIP_CPU_SYNC |
136                               DMA_ATTR_NO_KERNEL_MAPPING);
137         if (ret) {
138                 sg_free_table(vma->userptr.sg);
139                 vma->userptr.sg = NULL;
140                 goto out;
141         }
142
143         for (i = 0; i < pinned; ++i) {
144                 if (!read_only) {
145                         lock_page(pages[i]);
146                         set_page_dirty(pages[i]);
147                         unlock_page(pages[i]);
148                 }
149
150                 mark_page_accessed(pages[i]);
151         }
152
153 out:
154         release_pages(pages, pinned);
155         kvfree(pages);
156
157         if (!(ret < 0)) {
158                 vma->userptr.notifier_seq = notifier_seq;
159                 if (xe_vma_userptr_check_repin(vma) == -EAGAIN)
160                         goto retry;
161         }
162
163         return ret < 0 ? ret : 0;
164 }
165
166 static bool preempt_fences_waiting(struct xe_vm *vm)
167 {
168         struct xe_engine *e;
169
170         lockdep_assert_held(&vm->lock);
171         xe_vm_assert_held(vm);
172
173         list_for_each_entry(e, &vm->preempt.engines, compute.link) {
174                 if (!e->compute.pfence || (e->compute.pfence &&
175                     test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
176                              &e->compute.pfence->flags))) {
177                         return true;
178                 }
179         }
180
181         return false;
182 }
183
184 static void free_preempt_fences(struct list_head *list)
185 {
186         struct list_head *link, *next;
187
188         list_for_each_safe(link, next, list)
189                 xe_preempt_fence_free(to_preempt_fence_from_link(link));
190 }
191
192 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
193                                 unsigned int *count)
194 {
195         lockdep_assert_held(&vm->lock);
196         xe_vm_assert_held(vm);
197
198         if (*count >= vm->preempt.num_engines)
199                 return 0;
200
201         for (; *count < vm->preempt.num_engines; ++(*count)) {
202                 struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
203
204                 if (IS_ERR(pfence))
205                         return PTR_ERR(pfence);
206
207                 list_move_tail(xe_preempt_fence_link(pfence), list);
208         }
209
210         return 0;
211 }
212
213 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
214 {
215         struct xe_engine *e;
216
217         xe_vm_assert_held(vm);
218
219         list_for_each_entry(e, &vm->preempt.engines, compute.link) {
220                 if (e->compute.pfence) {
221                         long timeout = dma_fence_wait(e->compute.pfence, false);
222
223                         if (timeout < 0)
224                                 return -ETIME;
225                         dma_fence_put(e->compute.pfence);
226                         e->compute.pfence = NULL;
227                 }
228         }
229
230         return 0;
231 }
232
233 static bool xe_vm_is_idle(struct xe_vm *vm)
234 {
235         struct xe_engine *e;
236
237         xe_vm_assert_held(vm);
238         list_for_each_entry(e, &vm->preempt.engines, compute.link) {
239                 if (!xe_engine_is_idle(e))
240                         return false;
241         }
242
243         return true;
244 }
245
246 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
247 {
248         struct list_head *link;
249         struct xe_engine *e;
250
251         list_for_each_entry(e, &vm->preempt.engines, compute.link) {
252                 struct dma_fence *fence;
253
254                 link = list->next;
255                 XE_BUG_ON(link == list);
256
257                 fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
258                                              e, e->compute.context,
259                                              ++e->compute.seqno);
260                 dma_fence_put(e->compute.pfence);
261                 e->compute.pfence = fence;
262         }
263 }
264
265 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
266 {
267         struct xe_engine *e;
268         struct ww_acquire_ctx ww;
269         int err;
270
271         err = xe_bo_lock(bo, &ww, vm->preempt.num_engines, true);
272         if (err)
273                 return err;
274
275         list_for_each_entry(e, &vm->preempt.engines, compute.link)
276                 if (e->compute.pfence) {
277                         dma_resv_add_fence(bo->ttm.base.resv,
278                                            e->compute.pfence,
279                                            DMA_RESV_USAGE_BOOKKEEP);
280                 }
281
282         xe_bo_unlock(bo, &ww);
283         return 0;
284 }
285
286 /**
287  * xe_vm_fence_all_extobjs() - Add a fence to vm's external objects' resv
288  * @vm: The vm.
289  * @fence: The fence to add.
290  * @usage: The resv usage for the fence.
291  *
292  * Loops over all of the vm's external object bindings and adds a @fence
293  * with the given @usage to all of the external object's reservation
294  * objects.
295  */
296 void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
297                              enum dma_resv_usage usage)
298 {
299         struct xe_vma *vma;
300
301         list_for_each_entry(vma, &vm->extobj.list, extobj.link)
302                 dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence, usage);
303 }
304
305 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm)
306 {
307         struct xe_engine *e;
308
309         lockdep_assert_held(&vm->lock);
310         xe_vm_assert_held(vm);
311
312         list_for_each_entry(e, &vm->preempt.engines, compute.link) {
313                 e->ops->resume(e);
314
315                 dma_resv_add_fence(xe_vm_resv(vm), e->compute.pfence,
316                                    DMA_RESV_USAGE_BOOKKEEP);
317                 xe_vm_fence_all_extobjs(vm, e->compute.pfence,
318                                         DMA_RESV_USAGE_BOOKKEEP);
319         }
320 }
321
322 int xe_vm_add_compute_engine(struct xe_vm *vm, struct xe_engine *e)
323 {
324         struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
325         struct ttm_validate_buffer *tv;
326         struct ww_acquire_ctx ww;
327         struct list_head objs;
328         struct dma_fence *pfence;
329         int err;
330         bool wait;
331
332         XE_BUG_ON(!xe_vm_in_compute_mode(vm));
333
334         down_write(&vm->lock);
335
336         err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs, true, 1);
337         if (err)
338                 goto out_unlock_outer;
339
340         pfence = xe_preempt_fence_create(e, e->compute.context,
341                                          ++e->compute.seqno);
342         if (!pfence) {
343                 err = -ENOMEM;
344                 goto out_unlock;
345         }
346
347         list_add(&e->compute.link, &vm->preempt.engines);
348         ++vm->preempt.num_engines;
349         e->compute.pfence = pfence;
350
351         down_read(&vm->userptr.notifier_lock);
352
353         dma_resv_add_fence(xe_vm_resv(vm), pfence,
354                            DMA_RESV_USAGE_BOOKKEEP);
355
356         xe_vm_fence_all_extobjs(vm, pfence, DMA_RESV_USAGE_BOOKKEEP);
357
358         /*
359          * Check to see if a preemption on VM is in flight or userptr
360          * invalidation, if so trigger this preempt fence to sync state with
361          * other preempt fences on the VM.
362          */
363         wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
364         if (wait)
365                 dma_fence_enable_sw_signaling(pfence);
366
367         up_read(&vm->userptr.notifier_lock);
368
369 out_unlock:
370         xe_vm_unlock_dma_resv(vm, tv_onstack, tv, &ww, &objs);
371 out_unlock_outer:
372         up_write(&vm->lock);
373
374         return err;
375 }
376
377 /**
378  * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
379  * that need repinning.
380  * @vm: The VM.
381  *
382  * This function checks for whether the VM has userptrs that need repinning,
383  * and provides a release-type barrier on the userptr.notifier_lock after
384  * checking.
385  *
386  * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
387  */
388 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
389 {
390         lockdep_assert_held_read(&vm->userptr.notifier_lock);
391
392         return (list_empty(&vm->userptr.repin_list) &&
393                 list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
394 }
395
396 /**
397  * xe_vm_lock_dma_resv() - Lock the vm dma_resv object and the dma_resv
398  * objects of the vm's external buffer objects.
399  * @vm: The vm.
400  * @ww: Pointer to a struct ww_acquire_ctx locking context.
401  * @tv_onstack: Array size XE_ONSTACK_TV of storage for the struct
402  * ttm_validate_buffers used for locking.
403  * @tv: Pointer to a pointer that on output contains the actual storage used.
404  * @objs: List head for the buffer objects locked.
405  * @intr: Whether to lock interruptible.
406  * @num_shared: Number of dma-fence slots to reserve in the locked objects.
407  *
408  * Locks the vm dma-resv objects and all the dma-resv objects of the
409  * buffer objects on the vm external object list. The TTM utilities require
410  * a list of struct ttm_validate_buffers pointing to the actual buffer
411  * objects to lock. Storage for those struct ttm_validate_buffers should
412  * be provided in @tv_onstack, and is typically reserved on the stack
413  * of the caller. If the size of @tv_onstack isn't sufficient, then
414  * storage will be allocated internally using kvmalloc().
415  *
416  * The function performs deadlock handling internally, and after a
417  * successful return the ww locking transaction should be considered
418  * sealed.
419  *
420  * Return: 0 on success, Negative error code on error. In particular if
421  * @intr is set to true, -EINTR or -ERESTARTSYS may be returned. In case
422  * of error, any locking performed has been reverted.
423  */
424 int xe_vm_lock_dma_resv(struct xe_vm *vm, struct ww_acquire_ctx *ww,
425                         struct ttm_validate_buffer *tv_onstack,
426                         struct ttm_validate_buffer **tv,
427                         struct list_head *objs,
428                         bool intr,
429                         unsigned int num_shared)
430 {
431         struct ttm_validate_buffer *tv_vm, *tv_bo;
432         struct xe_vma *vma, *next;
433         LIST_HEAD(dups);
434         int err;
435
436         lockdep_assert_held(&vm->lock);
437
438         if (vm->extobj.entries < XE_ONSTACK_TV) {
439                 tv_vm = tv_onstack;
440         } else {
441                 tv_vm = kvmalloc_array(vm->extobj.entries + 1, sizeof(*tv_vm),
442                                        GFP_KERNEL);
443                 if (!tv_vm)
444                         return -ENOMEM;
445         }
446         tv_bo = tv_vm + 1;
447
448         INIT_LIST_HEAD(objs);
449         list_for_each_entry(vma, &vm->extobj.list, extobj.link) {
450                 tv_bo->num_shared = num_shared;
451                 tv_bo->bo = &xe_vma_bo(vma)->ttm;
452
453                 list_add_tail(&tv_bo->head, objs);
454                 tv_bo++;
455         }
456         tv_vm->num_shared = num_shared;
457         tv_vm->bo = xe_vm_ttm_bo(vm);
458         list_add_tail(&tv_vm->head, objs);
459         err = ttm_eu_reserve_buffers(ww, objs, intr, &dups);
460         if (err)
461                 goto out_err;
462
463         spin_lock(&vm->notifier.list_lock);
464         list_for_each_entry_safe(vma, next, &vm->notifier.rebind_list,
465                                  notifier.rebind_link) {
466                 xe_bo_assert_held(xe_vma_bo(vma));
467
468                 list_del_init(&vma->notifier.rebind_link);
469                 if (vma->tile_present && !(vma->gpuva.flags & XE_VMA_DESTROYED))
470                         list_move_tail(&vma->combined_links.rebind,
471                                        &vm->rebind_list);
472         }
473         spin_unlock(&vm->notifier.list_lock);
474
475         *tv = tv_vm;
476         return 0;
477
478 out_err:
479         if (tv_vm != tv_onstack)
480                 kvfree(tv_vm);
481
482         return err;
483 }
484
485 /**
486  * xe_vm_unlock_dma_resv() - Unlock reservation objects locked by
487  * xe_vm_lock_dma_resv()
488  * @vm: The vm.
489  * @tv_onstack: The @tv_onstack array given to xe_vm_lock_dma_resv().
490  * @tv: The value of *@tv given by xe_vm_lock_dma_resv().
491  * @ww: The ww_acquire_context used for locking.
492  * @objs: The list returned from xe_vm_lock_dma_resv().
493  *
494  * Unlocks the reservation objects and frees any memory allocated by
495  * xe_vm_lock_dma_resv().
496  */
497 void xe_vm_unlock_dma_resv(struct xe_vm *vm,
498                            struct ttm_validate_buffer *tv_onstack,
499                            struct ttm_validate_buffer *tv,
500                            struct ww_acquire_ctx *ww,
501                            struct list_head *objs)
502 {
503         /*
504          * Nothing should've been able to enter the list while we were locked,
505          * since we've held the dma-resvs of all the vm's external objects,
506          * and holding the dma_resv of an object is required for list
507          * addition, and we shouldn't add ourselves.
508          */
509         XE_WARN_ON(!list_empty(&vm->notifier.rebind_list));
510
511         ttm_eu_backoff_reservation(ww, objs);
512         if (tv && tv != tv_onstack)
513                 kvfree(tv);
514 }
515
516 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
517
518 static void xe_vm_kill(struct xe_vm *vm)
519 {
520         struct ww_acquire_ctx ww;
521         struct xe_engine *e;
522
523         lockdep_assert_held(&vm->lock);
524
525         xe_vm_lock(vm, &ww, 0, false);
526         vm->flags |= XE_VM_FLAG_BANNED;
527         trace_xe_vm_kill(vm);
528
529         list_for_each_entry(e, &vm->preempt.engines, compute.link)
530                 e->ops->kill(e);
531         xe_vm_unlock(vm, &ww);
532
533         /* TODO: Inform user the VM is banned */
534 }
535
536 static void preempt_rebind_work_func(struct work_struct *w)
537 {
538         struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
539         struct xe_vma *vma;
540         struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
541         struct ttm_validate_buffer *tv;
542         struct ww_acquire_ctx ww;
543         struct list_head objs;
544         struct dma_fence *rebind_fence;
545         unsigned int fence_count = 0;
546         LIST_HEAD(preempt_fences);
547         ktime_t end = 0;
548         int err;
549         long wait;
550         int __maybe_unused tries = 0;
551
552         XE_BUG_ON(!xe_vm_in_compute_mode(vm));
553         trace_xe_vm_rebind_worker_enter(vm);
554
555         down_write(&vm->lock);
556
557         if (xe_vm_is_closed_or_banned(vm)) {
558                 up_write(&vm->lock);
559                 trace_xe_vm_rebind_worker_exit(vm);
560                 return;
561         }
562
563 retry:
564         if (vm->async_ops.error)
565                 goto out_unlock_outer;
566
567         /*
568          * Extreme corner where we exit a VM error state with a munmap style VM
569          * unbind inflight which requires a rebind. In this case the rebind
570          * needs to install some fences into the dma-resv slots. The worker to
571          * do this queued, let that worker make progress by dropping vm->lock
572          * and trying this again.
573          */
574         if (vm->async_ops.munmap_rebind_inflight) {
575                 up_write(&vm->lock);
576                 flush_work(&vm->async_ops.work);
577                 goto retry;
578         }
579
580         if (xe_vm_userptr_check_repin(vm)) {
581                 err = xe_vm_userptr_pin(vm);
582                 if (err)
583                         goto out_unlock_outer;
584         }
585
586         err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs,
587                                   false, vm->preempt.num_engines);
588         if (err)
589                 goto out_unlock_outer;
590
591         if (xe_vm_is_idle(vm)) {
592                 vm->preempt.rebind_deactivated = true;
593                 goto out_unlock;
594         }
595
596         /* Fresh preempt fences already installed. Everyting is running. */
597         if (!preempt_fences_waiting(vm))
598                 goto out_unlock;
599
600         /*
601          * This makes sure vm is completely suspended and also balances
602          * xe_engine suspend- and resume; we resume *all* vm engines below.
603          */
604         err = wait_for_existing_preempt_fences(vm);
605         if (err)
606                 goto out_unlock;
607
608         err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
609         if (err)
610                 goto out_unlock;
611
612         list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
613                 if (xe_vma_has_no_bo(vma) ||
614                     vma->gpuva.flags & XE_VMA_DESTROYED)
615                         continue;
616
617                 err = xe_bo_validate(xe_vma_bo(vma), vm, false);
618                 if (err)
619                         goto out_unlock;
620         }
621
622         rebind_fence = xe_vm_rebind(vm, true);
623         if (IS_ERR(rebind_fence)) {
624                 err = PTR_ERR(rebind_fence);
625                 goto out_unlock;
626         }
627
628         if (rebind_fence) {
629                 dma_fence_wait(rebind_fence, false);
630                 dma_fence_put(rebind_fence);
631         }
632
633         /* Wait on munmap style VM unbinds */
634         wait = dma_resv_wait_timeout(xe_vm_resv(vm),
635                                      DMA_RESV_USAGE_KERNEL,
636                                      false, MAX_SCHEDULE_TIMEOUT);
637         if (wait <= 0) {
638                 err = -ETIME;
639                 goto out_unlock;
640         }
641
642 #define retry_required(__tries, __vm) \
643         (IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
644         (!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
645         __xe_vm_userptr_needs_repin(__vm))
646
647         down_read(&vm->userptr.notifier_lock);
648         if (retry_required(tries, vm)) {
649                 up_read(&vm->userptr.notifier_lock);
650                 err = -EAGAIN;
651                 goto out_unlock;
652         }
653
654 #undef retry_required
655
656         spin_lock(&vm->xe->ttm.lru_lock);
657         ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
658         spin_unlock(&vm->xe->ttm.lru_lock);
659
660         /* Point of no return. */
661         arm_preempt_fences(vm, &preempt_fences);
662         resume_and_reinstall_preempt_fences(vm);
663         up_read(&vm->userptr.notifier_lock);
664
665 out_unlock:
666         xe_vm_unlock_dma_resv(vm, tv_onstack, tv, &ww, &objs);
667 out_unlock_outer:
668         if (err == -EAGAIN) {
669                 trace_xe_vm_rebind_worker_retry(vm);
670                 goto retry;
671         }
672
673         /*
674          * With multiple active VMs, under memory pressure, it is possible that
675          * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
676          * Until ttm properly handles locking in such scenarios, best thing the
677          * driver can do is retry with a timeout. Killing the VM or putting it
678          * in error state after timeout or other error scenarios is still TBD.
679          */
680         if (err == -ENOMEM) {
681                 ktime_t cur = ktime_get();
682
683                 end = end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
684                 if (ktime_before(cur, end)) {
685                         msleep(20);
686                         trace_xe_vm_rebind_worker_retry(vm);
687                         goto retry;
688                 }
689         }
690         if (err) {
691                 drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
692                 xe_vm_kill(vm);
693         }
694         up_write(&vm->lock);
695
696         free_preempt_fences(&preempt_fences);
697
698         trace_xe_vm_rebind_worker_exit(vm);
699 }
700
701 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
702                                    const struct mmu_notifier_range *range,
703                                    unsigned long cur_seq)
704 {
705         struct xe_vma *vma = container_of(mni, struct xe_vma, userptr.notifier);
706         struct xe_vm *vm = xe_vma_vm(vma);
707         struct dma_resv_iter cursor;
708         struct dma_fence *fence;
709         long err;
710
711         XE_BUG_ON(!xe_vma_is_userptr(vma));
712         trace_xe_vma_userptr_invalidate(vma);
713
714         if (!mmu_notifier_range_blockable(range))
715                 return false;
716
717         down_write(&vm->userptr.notifier_lock);
718         mmu_interval_set_seq(mni, cur_seq);
719
720         /* No need to stop gpu access if the userptr is not yet bound. */
721         if (!vma->userptr.initial_bind) {
722                 up_write(&vm->userptr.notifier_lock);
723                 return true;
724         }
725
726         /*
727          * Tell exec and rebind worker they need to repin and rebind this
728          * userptr.
729          */
730         if (!xe_vm_in_fault_mode(vm) &&
731             !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
732                 spin_lock(&vm->userptr.invalidated_lock);
733                 list_move_tail(&vma->userptr.invalidate_link,
734                                &vm->userptr.invalidated);
735                 spin_unlock(&vm->userptr.invalidated_lock);
736         }
737
738         up_write(&vm->userptr.notifier_lock);
739
740         /*
741          * Preempt fences turn into schedule disables, pipeline these.
742          * Note that even in fault mode, we need to wait for binds and
743          * unbinds to complete, and those are attached as BOOKMARK fences
744          * to the vm.
745          */
746         dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
747                             DMA_RESV_USAGE_BOOKKEEP);
748         dma_resv_for_each_fence_unlocked(&cursor, fence)
749                 dma_fence_enable_sw_signaling(fence);
750         dma_resv_iter_end(&cursor);
751
752         err = dma_resv_wait_timeout(xe_vm_resv(vm),
753                                     DMA_RESV_USAGE_BOOKKEEP,
754                                     false, MAX_SCHEDULE_TIMEOUT);
755         XE_WARN_ON(err <= 0);
756
757         if (xe_vm_in_fault_mode(vm)) {
758                 err = xe_vm_invalidate_vma(vma);
759                 XE_WARN_ON(err);
760         }
761
762         trace_xe_vma_userptr_invalidate_complete(vma);
763
764         return true;
765 }
766
767 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
768         .invalidate = vma_userptr_invalidate,
769 };
770
771 int xe_vm_userptr_pin(struct xe_vm *vm)
772 {
773         struct xe_vma *vma, *next;
774         int err = 0;
775         LIST_HEAD(tmp_evict);
776
777         lockdep_assert_held_write(&vm->lock);
778
779         /* Collect invalidated userptrs */
780         spin_lock(&vm->userptr.invalidated_lock);
781         list_for_each_entry_safe(vma, next, &vm->userptr.invalidated,
782                                  userptr.invalidate_link) {
783                 list_del_init(&vma->userptr.invalidate_link);
784                 if (list_empty(&vma->combined_links.userptr))
785                         list_move_tail(&vma->combined_links.userptr,
786                                        &vm->userptr.repin_list);
787         }
788         spin_unlock(&vm->userptr.invalidated_lock);
789
790         /* Pin and move to temporary list */
791         list_for_each_entry_safe(vma, next, &vm->userptr.repin_list,
792                                  combined_links.userptr) {
793                 err = xe_vma_userptr_pin_pages(vma);
794                 if (err < 0)
795                         goto out_err;
796
797                 list_move_tail(&vma->combined_links.userptr, &tmp_evict);
798         }
799
800         /* Take lock and move to rebind_list for rebinding. */
801         err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
802         if (err)
803                 goto out_err;
804
805         list_for_each_entry_safe(vma, next, &tmp_evict, combined_links.userptr)
806                 list_move_tail(&vma->combined_links.rebind, &vm->rebind_list);
807
808         dma_resv_unlock(xe_vm_resv(vm));
809
810         return 0;
811
812 out_err:
813         list_splice_tail(&tmp_evict, &vm->userptr.repin_list);
814
815         return err;
816 }
817
818 /**
819  * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
820  * that need repinning.
821  * @vm: The VM.
822  *
823  * This function does an advisory check for whether the VM has userptrs that
824  * need repinning.
825  *
826  * Return: 0 if there are no indications of userptrs needing repinning,
827  * -EAGAIN if there are.
828  */
829 int xe_vm_userptr_check_repin(struct xe_vm *vm)
830 {
831         return (list_empty_careful(&vm->userptr.repin_list) &&
832                 list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
833 }
834
835 static struct dma_fence *
836 xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
837                struct xe_sync_entry *syncs, u32 num_syncs,
838                bool first_op, bool last_op);
839
840 struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
841 {
842         struct dma_fence *fence = NULL;
843         struct xe_vma *vma, *next;
844
845         lockdep_assert_held(&vm->lock);
846         if (xe_vm_no_dma_fences(vm) && !rebind_worker)
847                 return NULL;
848
849         xe_vm_assert_held(vm);
850         list_for_each_entry_safe(vma, next, &vm->rebind_list,
851                                  combined_links.rebind) {
852                 XE_WARN_ON(!vma->tile_present);
853
854                 list_del_init(&vma->combined_links.rebind);
855                 dma_fence_put(fence);
856                 if (rebind_worker)
857                         trace_xe_vma_rebind_worker(vma);
858                 else
859                         trace_xe_vma_rebind_exec(vma);
860                 fence = xe_vm_bind_vma(vma, NULL, NULL, 0, false, false);
861                 if (IS_ERR(fence))
862                         return fence;
863         }
864
865         return fence;
866 }
867
868 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
869                                     struct xe_bo *bo,
870                                     u64 bo_offset_or_userptr,
871                                     u64 start, u64 end,
872                                     bool read_only,
873                                     bool is_null,
874                                     u8 tile_mask)
875 {
876         struct xe_vma *vma;
877         struct xe_tile *tile;
878         u8 id;
879
880         XE_BUG_ON(start >= end);
881         XE_BUG_ON(end >= vm->size);
882
883         if (!bo && !is_null)    /* userptr */
884                 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
885         else
886                 vma = kzalloc(sizeof(*vma) - sizeof(struct xe_userptr),
887                               GFP_KERNEL);
888         if (!vma) {
889                 vma = ERR_PTR(-ENOMEM);
890                 return vma;
891         }
892
893         INIT_LIST_HEAD(&vma->combined_links.rebind);
894         INIT_LIST_HEAD(&vma->notifier.rebind_link);
895         INIT_LIST_HEAD(&vma->extobj.link);
896
897         INIT_LIST_HEAD(&vma->gpuva.gem.entry);
898         vma->gpuva.vm = &vm->gpuvm;
899         vma->gpuva.va.addr = start;
900         vma->gpuva.va.range = end - start + 1;
901         if (read_only)
902                 vma->gpuva.flags |= XE_VMA_READ_ONLY;
903         if (is_null)
904                 vma->gpuva.flags |= DRM_GPUVA_SPARSE;
905
906         if (tile_mask) {
907                 vma->tile_mask = tile_mask;
908         } else {
909                 for_each_tile(tile, vm->xe, id)
910                         vma->tile_mask |= 0x1 << id;
911         }
912
913         if (vm->xe->info.platform == XE_PVC)
914                 vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
915
916         if (bo) {
917                 struct drm_gpuvm_bo *vm_bo;
918
919                 xe_bo_assert_held(bo);
920
921                 vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
922                 if (IS_ERR(vm_bo)) {
923                         kfree(vma);
924                         return ERR_CAST(vm_bo);
925                 }
926
927                 drm_gem_object_get(&bo->ttm.base);
928                 vma->gpuva.gem.obj = &bo->ttm.base;
929                 vma->gpuva.gem.offset = bo_offset_or_userptr;
930                 drm_gpuva_link(&vma->gpuva, vm_bo);
931                 drm_gpuvm_bo_put(vm_bo);
932         } else /* userptr or null */ {
933                 if (!is_null) {
934                         u64 size = end - start + 1;
935                         int err;
936
937                         INIT_LIST_HEAD(&vma->userptr.invalidate_link);
938                         vma->gpuva.gem.offset = bo_offset_or_userptr;
939
940                         err = mmu_interval_notifier_insert(&vma->userptr.notifier,
941                                                            current->mm,
942                                                            xe_vma_userptr(vma), size,
943                                                            &vma_userptr_notifier_ops);
944                         if (err) {
945                                 kfree(vma);
946                                 vma = ERR_PTR(err);
947                                 return vma;
948                         }
949
950                         vma->userptr.notifier_seq = LONG_MAX;
951                 }
952
953                 xe_vm_get(vm);
954         }
955
956         return vma;
957 }
958
959 static bool vm_remove_extobj(struct xe_vma *vma)
960 {
961         if (!list_empty(&vma->extobj.link)) {
962                 xe_vma_vm(vma)->extobj.entries--;
963                 list_del_init(&vma->extobj.link);
964                 return true;
965         }
966         return false;
967 }
968
969 static void xe_vma_destroy_late(struct xe_vma *vma)
970 {
971         struct xe_vm *vm = xe_vma_vm(vma);
972         struct xe_device *xe = vm->xe;
973         bool read_only = xe_vma_read_only(vma);
974
975         if (xe_vma_is_userptr(vma)) {
976                 if (vma->userptr.sg) {
977                         dma_unmap_sgtable(xe->drm.dev,
978                                           vma->userptr.sg,
979                                           read_only ? DMA_TO_DEVICE :
980                                           DMA_BIDIRECTIONAL, 0);
981                         sg_free_table(vma->userptr.sg);
982                         vma->userptr.sg = NULL;
983                 }
984
985                 /*
986                  * Since userptr pages are not pinned, we can't remove
987                  * the notifer until we're sure the GPU is not accessing
988                  * them anymore
989                  */
990                 mmu_interval_notifier_remove(&vma->userptr.notifier);
991                 xe_vm_put(vm);
992         } else if (xe_vma_is_null(vma)) {
993                 xe_vm_put(vm);
994         } else {
995                 xe_bo_put(xe_vma_bo(vma));
996         }
997
998         kfree(vma);
999 }
1000
1001 static void vma_destroy_work_func(struct work_struct *w)
1002 {
1003         struct xe_vma *vma =
1004                 container_of(w, struct xe_vma, destroy_work);
1005
1006         xe_vma_destroy_late(vma);
1007 }
1008
1009 static struct xe_vma *
1010 bo_has_vm_references_locked(struct xe_bo *bo, struct xe_vm *vm,
1011                             struct xe_vma *ignore)
1012 {
1013         struct drm_gpuvm_bo *vm_bo;
1014         struct drm_gpuva *va;
1015         struct drm_gem_object *obj = &bo->ttm.base;
1016
1017         xe_bo_assert_held(bo);
1018
1019         drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
1020                 drm_gpuvm_bo_for_each_va(va, vm_bo) {
1021                         struct xe_vma *vma = gpuva_to_vma(va);
1022
1023                         if (vma != ignore && xe_vma_vm(vma) == vm)
1024                                 return vma;
1025                 }
1026         }
1027
1028         return NULL;
1029 }
1030
1031 static bool bo_has_vm_references(struct xe_bo *bo, struct xe_vm *vm,
1032                                  struct xe_vma *ignore)
1033 {
1034         struct ww_acquire_ctx ww;
1035         bool ret;
1036
1037         xe_bo_lock(bo, &ww, 0, false);
1038         ret = !!bo_has_vm_references_locked(bo, vm, ignore);
1039         xe_bo_unlock(bo, &ww);
1040
1041         return ret;
1042 }
1043
1044 static void __vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
1045 {
1046         lockdep_assert_held_write(&vm->lock);
1047
1048         list_add(&vma->extobj.link, &vm->extobj.list);
1049         vm->extobj.entries++;
1050 }
1051
1052 static void vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
1053 {
1054         struct xe_bo *bo = xe_vma_bo(vma);
1055
1056         lockdep_assert_held_write(&vm->lock);
1057
1058         if (bo_has_vm_references(bo, vm, vma))
1059                 return;
1060
1061         __vm_insert_extobj(vm, vma);
1062 }
1063
1064 static void vma_destroy_cb(struct dma_fence *fence,
1065                            struct dma_fence_cb *cb)
1066 {
1067         struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
1068
1069         INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
1070         queue_work(system_unbound_wq, &vma->destroy_work);
1071 }
1072
1073 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
1074 {
1075         struct xe_vm *vm = xe_vma_vm(vma);
1076
1077         lockdep_assert_held_write(&vm->lock);
1078         XE_BUG_ON(!list_empty(&vma->combined_links.destroy));
1079
1080         if (xe_vma_is_userptr(vma)) {
1081                 XE_WARN_ON(!(vma->gpuva.flags & XE_VMA_DESTROYED));
1082
1083                 spin_lock(&vm->userptr.invalidated_lock);
1084                 list_del(&vma->userptr.invalidate_link);
1085                 spin_unlock(&vm->userptr.invalidated_lock);
1086         } else if (!xe_vma_is_null(vma)) {
1087                 xe_bo_assert_held(xe_vma_bo(vma));
1088
1089                 spin_lock(&vm->notifier.list_lock);
1090                 list_del(&vma->notifier.rebind_link);
1091                 spin_unlock(&vm->notifier.list_lock);
1092
1093                 drm_gpuva_unlink(&vma->gpuva);
1094
1095                 if (!xe_vma_bo(vma)->vm && vm_remove_extobj(vma)) {
1096                         struct xe_vma *other;
1097
1098                         other = bo_has_vm_references_locked(xe_vma_bo(vma), vm, NULL);
1099
1100                         if (other)
1101                                 __vm_insert_extobj(vm, other);
1102                 }
1103         }
1104
1105         xe_vm_assert_held(vm);
1106         if (fence) {
1107                 int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
1108                                                  vma_destroy_cb);
1109
1110                 if (ret) {
1111                         XE_WARN_ON(ret != -ENOENT);
1112                         xe_vma_destroy_late(vma);
1113                 }
1114         } else {
1115                 xe_vma_destroy_late(vma);
1116         }
1117 }
1118
1119 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1120 {
1121         struct ttm_validate_buffer tv[2];
1122         struct ww_acquire_ctx ww;
1123         struct xe_bo *bo = xe_vma_bo(vma);
1124         LIST_HEAD(objs);
1125         LIST_HEAD(dups);
1126         int err;
1127
1128         memset(tv, 0, sizeof(tv));
1129         tv[0].bo = xe_vm_ttm_bo(xe_vma_vm(vma));
1130         list_add(&tv[0].head, &objs);
1131
1132         if (bo) {
1133                 tv[1].bo = &xe_bo_get(bo)->ttm;
1134                 list_add(&tv[1].head, &objs);
1135         }
1136         err = ttm_eu_reserve_buffers(&ww, &objs, false, &dups);
1137         XE_WARN_ON(err);
1138
1139         xe_vma_destroy(vma, NULL);
1140
1141         ttm_eu_backoff_reservation(&ww, &objs);
1142         if (bo)
1143                 xe_bo_put(bo);
1144 }
1145
1146 struct xe_vma *
1147 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
1148 {
1149         struct drm_gpuva *gpuva;
1150
1151         lockdep_assert_held(&vm->lock);
1152
1153         if (xe_vm_is_closed_or_banned(vm))
1154                 return NULL;
1155
1156         XE_BUG_ON(start + range > vm->size);
1157
1158         gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
1159
1160         return gpuva ? gpuva_to_vma(gpuva) : NULL;
1161 }
1162
1163 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1164 {
1165         int err;
1166
1167         XE_BUG_ON(xe_vma_vm(vma) != vm);
1168         lockdep_assert_held(&vm->lock);
1169
1170         err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
1171         XE_WARN_ON(err);        /* Shouldn't be possible */
1172
1173         return err;
1174 }
1175
1176 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1177 {
1178         XE_BUG_ON(xe_vma_vm(vma) != vm);
1179         lockdep_assert_held(&vm->lock);
1180
1181         drm_gpuva_remove(&vma->gpuva);
1182         if (vm->usm.last_fault_vma == vma)
1183                 vm->usm.last_fault_vma = NULL;
1184 }
1185
1186 static struct drm_gpuva_op *xe_vm_op_alloc(void)
1187 {
1188         struct xe_vma_op *op;
1189
1190         op = kzalloc(sizeof(*op), GFP_KERNEL);
1191
1192         if (unlikely(!op))
1193                 return NULL;
1194
1195         return &op->base;
1196 }
1197
1198 static void xe_vm_free(struct drm_gpuvm *gpuvm);
1199
1200 static struct drm_gpuvm_ops gpuvm_ops = {
1201         .op_alloc = xe_vm_op_alloc,
1202         .vm_free = xe_vm_free,
1203 };
1204
1205 static void xe_vma_op_work_func(struct work_struct *w);
1206 static void vm_destroy_work_func(struct work_struct *w);
1207
1208 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1209 {
1210         struct drm_gem_object *vm_resv_obj;
1211         struct xe_vm *vm;
1212         int err, number_tiles = 0;
1213         struct xe_tile *tile;
1214         u8 id;
1215
1216         vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1217         if (!vm)
1218                 return ERR_PTR(-ENOMEM);
1219
1220         vm->xe = xe;
1221
1222         vm->size = 1ull << xe_pt_shift(xe->info.vm_max_level + 1);
1223
1224         vm->flags = flags;
1225
1226         init_rwsem(&vm->lock);
1227
1228         INIT_LIST_HEAD(&vm->rebind_list);
1229
1230         INIT_LIST_HEAD(&vm->userptr.repin_list);
1231         INIT_LIST_HEAD(&vm->userptr.invalidated);
1232         init_rwsem(&vm->userptr.notifier_lock);
1233         spin_lock_init(&vm->userptr.invalidated_lock);
1234
1235         INIT_LIST_HEAD(&vm->notifier.rebind_list);
1236         spin_lock_init(&vm->notifier.list_lock);
1237
1238         INIT_LIST_HEAD(&vm->async_ops.pending);
1239         INIT_WORK(&vm->async_ops.work, xe_vma_op_work_func);
1240         spin_lock_init(&vm->async_ops.lock);
1241
1242         INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1243
1244         INIT_LIST_HEAD(&vm->preempt.engines);
1245         vm->preempt.min_run_period_ms = 10;     /* FIXME: Wire up to uAPI */
1246
1247         for_each_tile(tile, xe, id)
1248                 xe_range_fence_tree_init(&vm->rftree[id]);
1249
1250         INIT_LIST_HEAD(&vm->extobj.list);
1251
1252         if (!(flags & XE_VM_FLAG_MIGRATION))
1253                 xe_device_mem_access_get(xe);
1254
1255         vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
1256         if (!vm_resv_obj) {
1257                 err = -ENOMEM;
1258                 goto err_no_resv;
1259         }
1260
1261         drm_gpuvm_init(&vm->gpuvm, "Xe VM", 0, &xe->drm, vm_resv_obj,
1262                        0, vm->size, 0, 0, &gpuvm_ops);
1263
1264         drm_gem_object_put(vm_resv_obj);
1265
1266         err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
1267         if (err)
1268                 goto err_close;
1269
1270         if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1271                 vm->flags |= XE_VM_FLAG_64K;
1272
1273         for_each_tile(tile, xe, id) {
1274                 if (flags & XE_VM_FLAG_MIGRATION &&
1275                     tile->id != XE_VM_FLAG_TILE_ID(flags))
1276                         continue;
1277
1278                 vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
1279                 if (IS_ERR(vm->pt_root[id])) {
1280                         err = PTR_ERR(vm->pt_root[id]);
1281                         vm->pt_root[id] = NULL;
1282                         goto err_unlock_close;
1283                 }
1284         }
1285
1286         if (flags & XE_VM_FLAG_SCRATCH_PAGE) {
1287                 for_each_tile(tile, xe, id) {
1288                         if (!vm->pt_root[id])
1289                                 continue;
1290
1291                         err = xe_pt_create_scratch(xe, tile, vm);
1292                         if (err)
1293                                 goto err_unlock_close;
1294                 }
1295                 vm->batch_invalidate_tlb = true;
1296         }
1297
1298         if (flags & XE_VM_FLAG_COMPUTE_MODE) {
1299                 INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1300                 vm->flags |= XE_VM_FLAG_COMPUTE_MODE;
1301                 vm->batch_invalidate_tlb = false;
1302         }
1303
1304         if (flags & XE_VM_FLAG_ASYNC_BIND_OPS) {
1305                 vm->async_ops.fence.context = dma_fence_context_alloc(1);
1306                 vm->flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
1307         }
1308
1309         /* Fill pt_root after allocating scratch tables */
1310         for_each_tile(tile, xe, id) {
1311                 if (!vm->pt_root[id])
1312                         continue;
1313
1314                 xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
1315         }
1316         dma_resv_unlock(xe_vm_resv(vm));
1317
1318         /* Kernel migration VM shouldn't have a circular loop.. */
1319         if (!(flags & XE_VM_FLAG_MIGRATION)) {
1320                 for_each_tile(tile, xe, id) {
1321                         struct xe_gt *gt = tile->primary_gt;
1322                         struct xe_vm *migrate_vm;
1323                         struct xe_engine *eng;
1324
1325                         if (!vm->pt_root[id])
1326                                 continue;
1327
1328                         migrate_vm = xe_migrate_get_vm(tile->migrate);
1329                         eng = xe_engine_create_class(xe, gt, migrate_vm,
1330                                                      XE_ENGINE_CLASS_COPY,
1331                                                      ENGINE_FLAG_VM);
1332                         xe_vm_put(migrate_vm);
1333                         if (IS_ERR(eng)) {
1334                                 err = PTR_ERR(eng);
1335                                 goto err_close;
1336                         }
1337                         vm->eng[id] = eng;
1338                         number_tiles++;
1339                 }
1340         }
1341
1342         if (number_tiles > 1)
1343                 vm->composite_fence_ctx = dma_fence_context_alloc(1);
1344
1345         mutex_lock(&xe->usm.lock);
1346         if (flags & XE_VM_FLAG_FAULT_MODE)
1347                 xe->usm.num_vm_in_fault_mode++;
1348         else if (!(flags & XE_VM_FLAG_MIGRATION))
1349                 xe->usm.num_vm_in_non_fault_mode++;
1350         mutex_unlock(&xe->usm.lock);
1351
1352         trace_xe_vm_create(vm);
1353
1354         return vm;
1355
1356 err_unlock_close:
1357         dma_resv_unlock(xe_vm_resv(vm));
1358 err_close:
1359         xe_vm_close_and_put(vm);
1360         return ERR_PTR(err);
1361
1362 err_no_resv:
1363         for_each_tile(tile, xe, id)
1364                 xe_range_fence_tree_fini(&vm->rftree[id]);
1365         kfree(vm);
1366         if (!(flags & XE_VM_FLAG_MIGRATION))
1367                 xe_device_mem_access_put(xe);
1368         return ERR_PTR(err);
1369 }
1370
1371 static void flush_async_ops(struct xe_vm *vm)
1372 {
1373         queue_work(system_unbound_wq, &vm->async_ops.work);
1374         flush_work(&vm->async_ops.work);
1375 }
1376
1377 static void vm_error_capture(struct xe_vm *vm, int err,
1378                              u32 op, u64 addr, u64 size)
1379 {
1380         struct drm_xe_vm_bind_op_error_capture capture;
1381         u64 __user *address =
1382                 u64_to_user_ptr(vm->async_ops.error_capture.addr);
1383         bool in_kthread = !current->mm;
1384
1385         capture.error = err;
1386         capture.op = op;
1387         capture.addr = addr;
1388         capture.size = size;
1389
1390         if (in_kthread) {
1391                 if (!mmget_not_zero(vm->async_ops.error_capture.mm))
1392                         goto mm_closed;
1393                 kthread_use_mm(vm->async_ops.error_capture.mm);
1394         }
1395
1396         if (copy_to_user(address, &capture, sizeof(capture)))
1397                 XE_WARN_ON("Copy to user failed");
1398
1399         if (in_kthread) {
1400                 kthread_unuse_mm(vm->async_ops.error_capture.mm);
1401                 mmput(vm->async_ops.error_capture.mm);
1402         }
1403
1404 mm_closed:
1405         wake_up_all(&vm->async_ops.error_capture.wq);
1406 }
1407
1408 static void xe_vm_close(struct xe_vm *vm)
1409 {
1410         down_write(&vm->lock);
1411         vm->size = 0;
1412         up_write(&vm->lock);
1413 }
1414
1415 void xe_vm_close_and_put(struct xe_vm *vm)
1416 {
1417         LIST_HEAD(contested);
1418         struct ww_acquire_ctx ww;
1419         struct xe_device *xe = vm->xe;
1420         struct xe_tile *tile;
1421         struct xe_vma *vma, *next_vma;
1422         struct drm_gpuva *gpuva, *next;
1423         u8 id;
1424
1425         XE_BUG_ON(vm->preempt.num_engines);
1426
1427         xe_vm_close(vm);
1428         flush_async_ops(vm);
1429         if (xe_vm_in_compute_mode(vm))
1430                 flush_work(&vm->preempt.rebind_work);
1431
1432         for_each_tile(tile, xe, id) {
1433                 if (vm->eng[id]) {
1434                         xe_engine_kill(vm->eng[id]);
1435                         xe_engine_put(vm->eng[id]);
1436                         vm->eng[id] = NULL;
1437                 }
1438         }
1439
1440         down_write(&vm->lock);
1441         xe_vm_lock(vm, &ww, 0, false);
1442         drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) {
1443                 vma = gpuva_to_vma(gpuva);
1444
1445                 if (xe_vma_has_no_bo(vma)) {
1446                         down_read(&vm->userptr.notifier_lock);
1447                         vma->gpuva.flags |= XE_VMA_DESTROYED;
1448                         up_read(&vm->userptr.notifier_lock);
1449                 }
1450
1451                 xe_vm_remove_vma(vm, vma);
1452
1453                 /* easy case, remove from VMA? */
1454                 if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
1455                         list_del_init(&vma->combined_links.rebind);
1456                         xe_vma_destroy(vma, NULL);
1457                         continue;
1458                 }
1459
1460                 list_move_tail(&vma->combined_links.destroy, &contested);
1461         }
1462
1463         /*
1464          * All vm operations will add shared fences to resv.
1465          * The only exception is eviction for a shared object,
1466          * but even so, the unbind when evicted would still
1467          * install a fence to resv. Hence it's safe to
1468          * destroy the pagetables immediately.
1469          */
1470         for_each_tile(tile, xe, id) {
1471                 if (vm->scratch_bo[id]) {
1472                         u32 i;
1473
1474                         xe_bo_unpin(vm->scratch_bo[id]);
1475                         xe_bo_put(vm->scratch_bo[id]);
1476                         for (i = 0; i < vm->pt_root[id]->level; i++)
1477                                 xe_pt_destroy(vm->scratch_pt[id][i], vm->flags,
1478                                               NULL);
1479                 }
1480                 if (vm->pt_root[id]) {
1481                         xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1482                         vm->pt_root[id] = NULL;
1483                 }
1484         }
1485         xe_vm_unlock(vm, &ww);
1486
1487         /*
1488          * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1489          * Since we hold a refcount to the bo, we can remove and free
1490          * the members safely without locking.
1491          */
1492         list_for_each_entry_safe(vma, next_vma, &contested,
1493                                  combined_links.destroy) {
1494                 list_del_init(&vma->combined_links.destroy);
1495                 xe_vma_destroy_unlocked(vma);
1496         }
1497
1498         if (vm->async_ops.error_capture.addr)
1499                 wake_up_all(&vm->async_ops.error_capture.wq);
1500
1501         XE_WARN_ON(!list_empty(&vm->extobj.list));
1502         up_write(&vm->lock);
1503
1504         mutex_lock(&xe->usm.lock);
1505         if (vm->flags & XE_VM_FLAG_FAULT_MODE)
1506                 xe->usm.num_vm_in_fault_mode--;
1507         else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1508                 xe->usm.num_vm_in_non_fault_mode--;
1509         mutex_unlock(&xe->usm.lock);
1510
1511         for_each_tile(tile, xe, id)
1512                 xe_range_fence_tree_fini(&vm->rftree[id]);
1513
1514         xe_vm_put(vm);
1515 }
1516
1517 static void vm_destroy_work_func(struct work_struct *w)
1518 {
1519         struct xe_vm *vm =
1520                 container_of(w, struct xe_vm, destroy_work);
1521         struct xe_device *xe = vm->xe;
1522         struct xe_tile *tile;
1523         u8 id;
1524         void *lookup;
1525
1526         /* xe_vm_close_and_put was not called? */
1527         XE_WARN_ON(vm->size);
1528
1529         if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
1530                 xe_device_mem_access_put(xe);
1531
1532                 if (xe->info.has_asid) {
1533                         mutex_lock(&xe->usm.lock);
1534                         lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1535                         XE_WARN_ON(lookup != vm);
1536                         mutex_unlock(&xe->usm.lock);
1537                 }
1538         }
1539
1540         for_each_tile(tile, xe, id)
1541                 XE_WARN_ON(vm->pt_root[id]);
1542
1543         trace_xe_vm_free(vm);
1544         dma_fence_put(vm->rebind_fence);
1545         kfree(vm);
1546 }
1547
1548 static void xe_vm_free(struct drm_gpuvm *gpuvm)
1549 {
1550         struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
1551
1552         /* To destroy the VM we need to be able to sleep */
1553         queue_work(system_unbound_wq, &vm->destroy_work);
1554 }
1555
1556 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1557 {
1558         struct xe_vm *vm;
1559
1560         mutex_lock(&xef->vm.lock);
1561         vm = xa_load(&xef->vm.xa, id);
1562         if (vm)
1563                 xe_vm_get(vm);
1564         mutex_unlock(&xef->vm.lock);
1565
1566         return vm;
1567 }
1568
1569 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
1570 {
1571         return xe_pde_encode(vm->pt_root[tile->id]->bo, 0,
1572                              XE_CACHE_WB);
1573 }
1574
1575 static struct dma_fence *
1576 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_engine *e,
1577                  struct xe_sync_entry *syncs, u32 num_syncs,
1578                  bool first_op, bool last_op)
1579 {
1580         struct xe_tile *tile;
1581         struct dma_fence *fence = NULL;
1582         struct dma_fence **fences = NULL;
1583         struct dma_fence_array *cf = NULL;
1584         struct xe_vm *vm = xe_vma_vm(vma);
1585         int cur_fence = 0, i;
1586         int number_tiles = hweight8(vma->tile_present);
1587         int err;
1588         u8 id;
1589
1590         trace_xe_vma_unbind(vma);
1591
1592         if (number_tiles > 1) {
1593                 fences = kmalloc_array(number_tiles, sizeof(*fences),
1594                                        GFP_KERNEL);
1595                 if (!fences)
1596                         return ERR_PTR(-ENOMEM);
1597         }
1598
1599         for_each_tile(tile, vm->xe, id) {
1600                 if (!(vma->tile_present & BIT(id)))
1601                         goto next;
1602
1603                 fence = __xe_pt_unbind_vma(tile, vma, e, first_op ? syncs : NULL,
1604                                            first_op ? num_syncs : 0);
1605                 if (IS_ERR(fence)) {
1606                         err = PTR_ERR(fence);
1607                         goto err_fences;
1608                 }
1609
1610                 if (fences)
1611                         fences[cur_fence++] = fence;
1612
1613 next:
1614                 if (e && vm->pt_root[id] && !list_empty(&e->multi_gt_list))
1615                         e = list_next_entry(e, multi_gt_list);
1616         }
1617
1618         if (fences) {
1619                 cf = dma_fence_array_create(number_tiles, fences,
1620                                             vm->composite_fence_ctx,
1621                                             vm->composite_fence_seqno++,
1622                                             false);
1623                 if (!cf) {
1624                         --vm->composite_fence_seqno;
1625                         err = -ENOMEM;
1626                         goto err_fences;
1627                 }
1628         }
1629
1630         if (last_op) {
1631                 for (i = 0; i < num_syncs; i++)
1632                         xe_sync_entry_signal(&syncs[i], NULL,
1633                                              cf ? &cf->base : fence);
1634         }
1635
1636         return cf ? &cf->base : !fence ? dma_fence_get_stub() : fence;
1637
1638 err_fences:
1639         if (fences) {
1640                 while (cur_fence) {
1641                         /* FIXME: Rewind the previous binds? */
1642                         dma_fence_put(fences[--cur_fence]);
1643                 }
1644                 kfree(fences);
1645         }
1646
1647         return ERR_PTR(err);
1648 }
1649
1650 static struct dma_fence *
1651 xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
1652                struct xe_sync_entry *syncs, u32 num_syncs,
1653                bool first_op, bool last_op)
1654 {
1655         struct xe_tile *tile;
1656         struct dma_fence *fence;
1657         struct dma_fence **fences = NULL;
1658         struct dma_fence_array *cf = NULL;
1659         struct xe_vm *vm = xe_vma_vm(vma);
1660         int cur_fence = 0, i;
1661         int number_tiles = hweight8(vma->tile_mask);
1662         int err;
1663         u8 id;
1664
1665         trace_xe_vma_bind(vma);
1666
1667         if (number_tiles > 1) {
1668                 fences = kmalloc_array(number_tiles, sizeof(*fences),
1669                                        GFP_KERNEL);
1670                 if (!fences)
1671                         return ERR_PTR(-ENOMEM);
1672         }
1673
1674         for_each_tile(tile, vm->xe, id) {
1675                 if (!(vma->tile_mask & BIT(id)))
1676                         goto next;
1677
1678                 fence = __xe_pt_bind_vma(tile, vma, e ? e : vm->eng[id],
1679                                          first_op ? syncs : NULL,
1680                                          first_op ? num_syncs : 0,
1681                                          vma->tile_present & BIT(id));
1682                 if (IS_ERR(fence)) {
1683                         err = PTR_ERR(fence);
1684                         goto err_fences;
1685                 }
1686
1687                 if (fences)
1688                         fences[cur_fence++] = fence;
1689
1690 next:
1691                 if (e && vm->pt_root[id] && !list_empty(&e->multi_gt_list))
1692                         e = list_next_entry(e, multi_gt_list);
1693         }
1694
1695         if (fences) {
1696                 cf = dma_fence_array_create(number_tiles, fences,
1697                                             vm->composite_fence_ctx,
1698                                             vm->composite_fence_seqno++,
1699                                             false);
1700                 if (!cf) {
1701                         --vm->composite_fence_seqno;
1702                         err = -ENOMEM;
1703                         goto err_fences;
1704                 }
1705         }
1706
1707         if (last_op) {
1708                 for (i = 0; i < num_syncs; i++)
1709                         xe_sync_entry_signal(&syncs[i], NULL,
1710                                              cf ? &cf->base : fence);
1711         }
1712
1713         return cf ? &cf->base : fence;
1714
1715 err_fences:
1716         if (fences) {
1717                 while (cur_fence) {
1718                         /* FIXME: Rewind the previous binds? */
1719                         dma_fence_put(fences[--cur_fence]);
1720                 }
1721                 kfree(fences);
1722         }
1723
1724         return ERR_PTR(err);
1725 }
1726
1727 struct async_op_fence {
1728         struct dma_fence fence;
1729         struct dma_fence *wait_fence;
1730         struct dma_fence_cb cb;
1731         struct xe_vm *vm;
1732         wait_queue_head_t wq;
1733         bool started;
1734 };
1735
1736 static const char *async_op_fence_get_driver_name(struct dma_fence *dma_fence)
1737 {
1738         return "xe";
1739 }
1740
1741 static const char *
1742 async_op_fence_get_timeline_name(struct dma_fence *dma_fence)
1743 {
1744         return "async_op_fence";
1745 }
1746
1747 static const struct dma_fence_ops async_op_fence_ops = {
1748         .get_driver_name = async_op_fence_get_driver_name,
1749         .get_timeline_name = async_op_fence_get_timeline_name,
1750 };
1751
1752 static void async_op_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1753 {
1754         struct async_op_fence *afence =
1755                 container_of(cb, struct async_op_fence, cb);
1756
1757         afence->fence.error = afence->wait_fence->error;
1758         dma_fence_signal(&afence->fence);
1759         xe_vm_put(afence->vm);
1760         dma_fence_put(afence->wait_fence);
1761         dma_fence_put(&afence->fence);
1762 }
1763
1764 static void add_async_op_fence_cb(struct xe_vm *vm,
1765                                   struct dma_fence *fence,
1766                                   struct async_op_fence *afence)
1767 {
1768         int ret;
1769
1770         if (!xe_vm_no_dma_fences(vm)) {
1771                 afence->started = true;
1772                 smp_wmb();
1773                 wake_up_all(&afence->wq);
1774         }
1775
1776         afence->wait_fence = dma_fence_get(fence);
1777         afence->vm = xe_vm_get(vm);
1778         dma_fence_get(&afence->fence);
1779         ret = dma_fence_add_callback(fence, &afence->cb, async_op_fence_cb);
1780         if (ret == -ENOENT) {
1781                 afence->fence.error = afence->wait_fence->error;
1782                 dma_fence_signal(&afence->fence);
1783         }
1784         if (ret) {
1785                 xe_vm_put(vm);
1786                 dma_fence_put(afence->wait_fence);
1787                 dma_fence_put(&afence->fence);
1788         }
1789         XE_WARN_ON(ret && ret != -ENOENT);
1790 }
1791
1792 int xe_vm_async_fence_wait_start(struct dma_fence *fence)
1793 {
1794         if (fence->ops == &async_op_fence_ops) {
1795                 struct async_op_fence *afence =
1796                         container_of(fence, struct async_op_fence, fence);
1797
1798                 XE_BUG_ON(xe_vm_no_dma_fences(afence->vm));
1799
1800                 smp_rmb();
1801                 return wait_event_interruptible(afence->wq, afence->started);
1802         }
1803
1804         return 0;
1805 }
1806
1807 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
1808                         struct xe_engine *e, struct xe_sync_entry *syncs,
1809                         u32 num_syncs, struct async_op_fence *afence,
1810                         bool immediate, bool first_op, bool last_op)
1811 {
1812         struct dma_fence *fence;
1813
1814         xe_vm_assert_held(vm);
1815
1816         if (immediate) {
1817                 fence = xe_vm_bind_vma(vma, e, syncs, num_syncs, first_op,
1818                                        last_op);
1819                 if (IS_ERR(fence))
1820                         return PTR_ERR(fence);
1821         } else {
1822                 int i;
1823
1824                 XE_BUG_ON(!xe_vm_in_fault_mode(vm));
1825
1826                 fence = dma_fence_get_stub();
1827                 if (last_op) {
1828                         for (i = 0; i < num_syncs; i++)
1829                                 xe_sync_entry_signal(&syncs[i], NULL, fence);
1830                 }
1831         }
1832         if (afence)
1833                 add_async_op_fence_cb(vm, fence, afence);
1834
1835         dma_fence_put(fence);
1836         return 0;
1837 }
1838
1839 static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_engine *e,
1840                       struct xe_bo *bo, struct xe_sync_entry *syncs,
1841                       u32 num_syncs, struct async_op_fence *afence,
1842                       bool immediate, bool first_op, bool last_op)
1843 {
1844         int err;
1845
1846         xe_vm_assert_held(vm);
1847         xe_bo_assert_held(bo);
1848
1849         if (bo && immediate) {
1850                 err = xe_bo_validate(bo, vm, true);
1851                 if (err)
1852                         return err;
1853         }
1854
1855         return __xe_vm_bind(vm, vma, e, syncs, num_syncs, afence, immediate,
1856                             first_op, last_op);
1857 }
1858
1859 static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
1860                         struct xe_engine *e, struct xe_sync_entry *syncs,
1861                         u32 num_syncs, struct async_op_fence *afence,
1862                         bool first_op, bool last_op)
1863 {
1864         struct dma_fence *fence;
1865
1866         xe_vm_assert_held(vm);
1867         xe_bo_assert_held(xe_vma_bo(vma));
1868
1869         fence = xe_vm_unbind_vma(vma, e, syncs, num_syncs, first_op, last_op);
1870         if (IS_ERR(fence))
1871                 return PTR_ERR(fence);
1872         if (afence)
1873                 add_async_op_fence_cb(vm, fence, afence);
1874
1875         xe_vma_destroy(vma, fence);
1876         dma_fence_put(fence);
1877
1878         return 0;
1879 }
1880
1881 static int vm_set_error_capture_address(struct xe_device *xe, struct xe_vm *vm,
1882                                         u64 value)
1883 {
1884         if (XE_IOCTL_DBG(xe, !value))
1885                 return -EINVAL;
1886
1887         if (XE_IOCTL_DBG(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
1888                 return -EOPNOTSUPP;
1889
1890         if (XE_IOCTL_DBG(xe, vm->async_ops.error_capture.addr))
1891                 return -EOPNOTSUPP;
1892
1893         vm->async_ops.error_capture.mm = current->mm;
1894         vm->async_ops.error_capture.addr = value;
1895         init_waitqueue_head(&vm->async_ops.error_capture.wq);
1896
1897         return 0;
1898 }
1899
1900 typedef int (*xe_vm_set_property_fn)(struct xe_device *xe, struct xe_vm *vm,
1901                                      u64 value);
1902
1903 static const xe_vm_set_property_fn vm_set_property_funcs[] = {
1904         [XE_VM_PROPERTY_BIND_OP_ERROR_CAPTURE_ADDRESS] =
1905                 vm_set_error_capture_address,
1906 };
1907
1908 static int vm_user_ext_set_property(struct xe_device *xe, struct xe_vm *vm,
1909                                     u64 extension)
1910 {
1911         u64 __user *address = u64_to_user_ptr(extension);
1912         struct drm_xe_ext_vm_set_property ext;
1913         int err;
1914
1915         err = __copy_from_user(&ext, address, sizeof(ext));
1916         if (XE_IOCTL_DBG(xe, err))
1917                 return -EFAULT;
1918
1919         if (XE_IOCTL_DBG(xe, ext.property >=
1920                          ARRAY_SIZE(vm_set_property_funcs)) ||
1921             XE_IOCTL_DBG(xe, ext.pad) ||
1922             XE_IOCTL_DBG(xe, ext.reserved[0] || ext.reserved[1]))
1923                 return -EINVAL;
1924
1925         return vm_set_property_funcs[ext.property](xe, vm, ext.value);
1926 }
1927
1928 typedef int (*xe_vm_user_extension_fn)(struct xe_device *xe, struct xe_vm *vm,
1929                                        u64 extension);
1930
1931 static const xe_vm_set_property_fn vm_user_extension_funcs[] = {
1932         [XE_VM_EXTENSION_SET_PROPERTY] = vm_user_ext_set_property,
1933 };
1934
1935 #define MAX_USER_EXTENSIONS     16
1936 static int vm_user_extensions(struct xe_device *xe, struct xe_vm *vm,
1937                               u64 extensions, int ext_number)
1938 {
1939         u64 __user *address = u64_to_user_ptr(extensions);
1940         struct xe_user_extension ext;
1941         int err;
1942
1943         if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
1944                 return -E2BIG;
1945
1946         err = __copy_from_user(&ext, address, sizeof(ext));
1947         if (XE_IOCTL_DBG(xe, err))
1948                 return -EFAULT;
1949
1950         if (XE_IOCTL_DBG(xe, ext.pad) ||
1951             XE_IOCTL_DBG(xe, ext.name >=
1952                          ARRAY_SIZE(vm_user_extension_funcs)))
1953                 return -EINVAL;
1954
1955         err = vm_user_extension_funcs[ext.name](xe, vm, extensions);
1956         if (XE_IOCTL_DBG(xe, err))
1957                 return err;
1958
1959         if (ext.next_extension)
1960                 return vm_user_extensions(xe, vm, ext.next_extension,
1961                                           ++ext_number);
1962
1963         return 0;
1964 }
1965
1966 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_SCRATCH_PAGE | \
1967                                     DRM_XE_VM_CREATE_COMPUTE_MODE | \
1968                                     DRM_XE_VM_CREATE_ASYNC_BIND_OPS | \
1969                                     DRM_XE_VM_CREATE_FAULT_MODE)
1970
1971 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1972                        struct drm_file *file)
1973 {
1974         struct xe_device *xe = to_xe_device(dev);
1975         struct xe_file *xef = to_xe_file(file);
1976         struct drm_xe_vm_create *args = data;
1977         struct xe_vm *vm;
1978         u32 id, asid;
1979         int err;
1980         u32 flags = 0;
1981
1982         if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1983                 return -EINVAL;
1984
1985         if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1986                 return -EINVAL;
1987
1988         if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE &&
1989                          args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
1990                 return -EINVAL;
1991
1992         if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE &&
1993                          args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
1994                 return -EINVAL;
1995
1996         if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
1997                          xe_device_in_non_fault_mode(xe)))
1998                 return -EINVAL;
1999
2000         if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FAULT_MODE) &&
2001                          xe_device_in_fault_mode(xe)))
2002                 return -EINVAL;
2003
2004         if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
2005                          !xe->info.supports_usm))
2006                 return -EINVAL;
2007
2008         if (args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE)
2009                 flags |= XE_VM_FLAG_SCRATCH_PAGE;
2010         if (args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE)
2011                 flags |= XE_VM_FLAG_COMPUTE_MODE;
2012         if (args->flags & DRM_XE_VM_CREATE_ASYNC_BIND_OPS)
2013                 flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
2014         if (args->flags & DRM_XE_VM_CREATE_FAULT_MODE)
2015                 flags |= XE_VM_FLAG_FAULT_MODE;
2016
2017         vm = xe_vm_create(xe, flags);
2018         if (IS_ERR(vm))
2019                 return PTR_ERR(vm);
2020
2021         if (args->extensions) {
2022                 err = vm_user_extensions(xe, vm, args->extensions, 0);
2023                 if (XE_IOCTL_DBG(xe, err)) {
2024                         xe_vm_close_and_put(vm);
2025                         return err;
2026                 }
2027         }
2028
2029         mutex_lock(&xef->vm.lock);
2030         err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
2031         mutex_unlock(&xef->vm.lock);
2032         if (err) {
2033                 xe_vm_close_and_put(vm);
2034                 return err;
2035         }
2036
2037         if (xe->info.has_asid) {
2038                 mutex_lock(&xe->usm.lock);
2039                 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
2040                                       XA_LIMIT(0, XE_MAX_ASID - 1),
2041                                       &xe->usm.next_asid, GFP_KERNEL);
2042                 mutex_unlock(&xe->usm.lock);
2043                 if (err) {
2044                         xe_vm_close_and_put(vm);
2045                         return err;
2046                 }
2047                 vm->usm.asid = asid;
2048         }
2049
2050         args->vm_id = id;
2051
2052 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
2053         /* Warning: Security issue - never enable by default */
2054         args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
2055 #endif
2056
2057         return 0;
2058 }
2059
2060 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
2061                         struct drm_file *file)
2062 {
2063         struct xe_device *xe = to_xe_device(dev);
2064         struct xe_file *xef = to_xe_file(file);
2065         struct drm_xe_vm_destroy *args = data;
2066         struct xe_vm *vm;
2067         int err = 0;
2068
2069         if (XE_IOCTL_DBG(xe, args->pad) ||
2070             XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2071                 return -EINVAL;
2072
2073         mutex_lock(&xef->vm.lock);
2074         vm = xa_load(&xef->vm.xa, args->vm_id);
2075         if (XE_IOCTL_DBG(xe, !vm))
2076                 err = -ENOENT;
2077         else if (XE_IOCTL_DBG(xe, vm->preempt.num_engines))
2078                 err = -EBUSY;
2079         else
2080                 xa_erase(&xef->vm.xa, args->vm_id);
2081         mutex_unlock(&xef->vm.lock);
2082
2083         if (!err)
2084                 xe_vm_close_and_put(vm);
2085
2086         return err;
2087 }
2088
2089 static const u32 region_to_mem_type[] = {
2090         XE_PL_TT,
2091         XE_PL_VRAM0,
2092         XE_PL_VRAM1,
2093 };
2094
2095 static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
2096                           struct xe_engine *e, u32 region,
2097                           struct xe_sync_entry *syncs, u32 num_syncs,
2098                           struct async_op_fence *afence, bool first_op,
2099                           bool last_op)
2100 {
2101         int err;
2102
2103         XE_BUG_ON(region > ARRAY_SIZE(region_to_mem_type));
2104
2105         if (!xe_vma_has_no_bo(vma)) {
2106                 err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
2107                 if (err)
2108                         return err;
2109         }
2110
2111         if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
2112                 return xe_vm_bind(vm, vma, e, xe_vma_bo(vma), syncs, num_syncs,
2113                                   afence, true, first_op, last_op);
2114         } else {
2115                 int i;
2116
2117                 /* Nothing to do, signal fences now */
2118                 if (last_op) {
2119                         for (i = 0; i < num_syncs; i++)
2120                                 xe_sync_entry_signal(&syncs[i], NULL,
2121                                                      dma_fence_get_stub());
2122                 }
2123                 if (afence)
2124                         dma_fence_signal(&afence->fence);
2125                 return 0;
2126         }
2127 }
2128
2129 #define VM_BIND_OP(op)  (op & 0xffff)
2130
2131 struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm)
2132 {
2133         int idx = vm->flags & XE_VM_FLAG_MIGRATION ?
2134                 XE_VM_FLAG_TILE_ID(vm->flags) : 0;
2135
2136         /* Safe to use index 0 as all BO in the VM share a single dma-resv lock */
2137         return &vm->pt_root[idx]->bo->ttm;
2138 }
2139
2140 static void xe_vm_tv_populate(struct xe_vm *vm, struct ttm_validate_buffer *tv)
2141 {
2142         tv->num_shared = 1;
2143         tv->bo = xe_vm_ttm_bo(vm);
2144 }
2145
2146 static void vm_set_async_error(struct xe_vm *vm, int err)
2147 {
2148         lockdep_assert_held(&vm->lock);
2149         vm->async_ops.error = err;
2150 }
2151
2152 static int vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo,
2153                                     u64 addr, u64 range, u32 op)
2154 {
2155         struct xe_device *xe = vm->xe;
2156         struct xe_vma *vma;
2157         bool async = !!(op & XE_VM_BIND_FLAG_ASYNC);
2158
2159         lockdep_assert_held(&vm->lock);
2160
2161         switch (VM_BIND_OP(op)) {
2162         case XE_VM_BIND_OP_MAP:
2163         case XE_VM_BIND_OP_MAP_USERPTR:
2164                 vma = xe_vm_find_overlapping_vma(vm, addr, range);
2165                 if (XE_IOCTL_DBG(xe, vma && !async))
2166                         return -EBUSY;
2167                 break;
2168         case XE_VM_BIND_OP_UNMAP:
2169         case XE_VM_BIND_OP_PREFETCH:
2170                 vma = xe_vm_find_overlapping_vma(vm, addr, range);
2171                 if (XE_IOCTL_DBG(xe, !vma))
2172                         /* Not an actual error, IOCTL cleans up returns and 0 */
2173                         return -ENODATA;
2174                 if (XE_IOCTL_DBG(xe, (xe_vma_start(vma) != addr ||
2175                                       xe_vma_end(vma) != addr + range) && !async))
2176                         return -EINVAL;
2177                 break;
2178         case XE_VM_BIND_OP_UNMAP_ALL:
2179                 if (XE_IOCTL_DBG(xe, list_empty(&bo->ttm.base.gpuva.list)))
2180                         /* Not an actual error, IOCTL cleans up returns and 0 */
2181                         return -ENODATA;
2182                 break;
2183         default:
2184                 XE_BUG_ON("NOT POSSIBLE");
2185                 return -EINVAL;
2186         }
2187
2188         return 0;
2189 }
2190
2191 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
2192                              bool post_commit)
2193 {
2194         down_read(&vm->userptr.notifier_lock);
2195         vma->gpuva.flags |= XE_VMA_DESTROYED;
2196         up_read(&vm->userptr.notifier_lock);
2197         if (post_commit)
2198                 xe_vm_remove_vma(vm, vma);
2199 }
2200
2201 #undef ULL
2202 #define ULL     unsigned long long
2203
2204 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
2205 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2206 {
2207         struct xe_vma *vma;
2208
2209         switch (op->op) {
2210         case DRM_GPUVA_OP_MAP:
2211                 vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx",
2212                        (ULL)op->map.va.addr, (ULL)op->map.va.range);
2213                 break;
2214         case DRM_GPUVA_OP_REMAP:
2215                 vma = gpuva_to_vma(op->remap.unmap->va);
2216                 vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2217                        (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2218                        op->unmap.keep ? 1 : 0);
2219                 if (op->remap.prev)
2220                         vm_dbg(&xe->drm,
2221                                "REMAP:PREV: addr=0x%016llx, range=0x%016llx",
2222                                (ULL)op->remap.prev->va.addr,
2223                                (ULL)op->remap.prev->va.range);
2224                 if (op->remap.next)
2225                         vm_dbg(&xe->drm,
2226                                "REMAP:NEXT: addr=0x%016llx, range=0x%016llx",
2227                                (ULL)op->remap.next->va.addr,
2228                                (ULL)op->remap.next->va.range);
2229                 break;
2230         case DRM_GPUVA_OP_UNMAP:
2231                 vma = gpuva_to_vma(op->unmap.va);
2232                 vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2233                        (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2234                        op->unmap.keep ? 1 : 0);
2235                 break;
2236         case DRM_GPUVA_OP_PREFETCH:
2237                 vma = gpuva_to_vma(op->prefetch.va);
2238                 vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx",
2239                        (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
2240                 break;
2241         default:
2242                 XE_BUG_ON("NOT POSSIBLE");
2243         }
2244 }
2245 #else
2246 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2247 {
2248 }
2249 #endif
2250
2251 /*
2252  * Create operations list from IOCTL arguments, setup operations fields so parse
2253  * and commit steps are decoupled from IOCTL arguments. This step can fail.
2254  */
2255 static struct drm_gpuva_ops *
2256 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
2257                          u64 bo_offset_or_userptr, u64 addr, u64 range,
2258                          u32 operation, u8 tile_mask, u32 region)
2259 {
2260         struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
2261         struct ww_acquire_ctx ww;
2262         struct drm_gpuva_ops *ops;
2263         struct drm_gpuva_op *__op;
2264         struct xe_vma_op *op;
2265         struct drm_gpuvm_bo *vm_bo;
2266         int err;
2267
2268         lockdep_assert_held_write(&vm->lock);
2269
2270         vm_dbg(&vm->xe->drm,
2271                "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
2272                VM_BIND_OP(operation), (ULL)addr, (ULL)range,
2273                (ULL)bo_offset_or_userptr);
2274
2275         switch (VM_BIND_OP(operation)) {
2276         case XE_VM_BIND_OP_MAP:
2277         case XE_VM_BIND_OP_MAP_USERPTR:
2278                 ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
2279                                                   obj, bo_offset_or_userptr);
2280                 if (IS_ERR(ops))
2281                         return ops;
2282
2283                 drm_gpuva_for_each_op(__op, ops) {
2284                         struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2285
2286                         op->tile_mask = tile_mask;
2287                         op->map.immediate =
2288                                 operation & XE_VM_BIND_FLAG_IMMEDIATE;
2289                         op->map.read_only =
2290                                 operation & XE_VM_BIND_FLAG_READONLY;
2291                         op->map.is_null = operation & XE_VM_BIND_FLAG_NULL;
2292                 }
2293                 break;
2294         case XE_VM_BIND_OP_UNMAP:
2295                 ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
2296                 if (IS_ERR(ops))
2297                         return ops;
2298
2299                 drm_gpuva_for_each_op(__op, ops) {
2300                         struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2301
2302                         op->tile_mask = tile_mask;
2303                 }
2304                 break;
2305         case XE_VM_BIND_OP_PREFETCH:
2306                 ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
2307                 if (IS_ERR(ops))
2308                         return ops;
2309
2310                 drm_gpuva_for_each_op(__op, ops) {
2311                         struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2312
2313                         op->tile_mask = tile_mask;
2314                         op->prefetch.region = region;
2315                 }
2316                 break;
2317         case XE_VM_BIND_OP_UNMAP_ALL:
2318                 XE_BUG_ON(!bo);
2319
2320                 err = xe_bo_lock(bo, &ww, 0, true);
2321                 if (err)
2322                         return ERR_PTR(err);
2323
2324                 vm_bo = drm_gpuvm_bo_find(&vm->gpuvm, obj);
2325                 if (!vm_bo)
2326                         break;
2327
2328                 ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
2329                 drm_gpuvm_bo_put(vm_bo);
2330                 xe_bo_unlock(bo, &ww);
2331                 if (IS_ERR(ops))
2332                         return ops;
2333
2334                 drm_gpuva_for_each_op(__op, ops) {
2335                         struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2336
2337                         op->tile_mask = tile_mask;
2338                 }
2339                 break;
2340         default:
2341                 XE_BUG_ON("NOT POSSIBLE");
2342                 ops = ERR_PTR(-EINVAL);
2343         }
2344
2345 #ifdef TEST_VM_ASYNC_OPS_ERROR
2346         if (operation & FORCE_ASYNC_OP_ERROR) {
2347                 op = list_first_entry_or_null(&ops->list, struct xe_vma_op,
2348                                               base.entry);
2349                 if (op)
2350                         op->inject_error = true;
2351         }
2352 #endif
2353
2354         if (!IS_ERR(ops))
2355                 drm_gpuva_for_each_op(__op, ops)
2356                         print_op(vm->xe, __op);
2357
2358         return ops;
2359 }
2360
2361 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
2362                               u8 tile_mask, bool read_only, bool is_null)
2363 {
2364         struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
2365         struct xe_vma *vma;
2366         struct ww_acquire_ctx ww;
2367         int err;
2368
2369         lockdep_assert_held_write(&vm->lock);
2370
2371         if (bo) {
2372                 err = xe_bo_lock(bo, &ww, 0, true);
2373                 if (err)
2374                         return ERR_PTR(err);
2375         }
2376         vma = xe_vma_create(vm, bo, op->gem.offset,
2377                             op->va.addr, op->va.addr +
2378                             op->va.range - 1, read_only, is_null,
2379                             tile_mask);
2380         if (bo)
2381                 xe_bo_unlock(bo, &ww);
2382
2383         if (xe_vma_is_userptr(vma)) {
2384                 err = xe_vma_userptr_pin_pages(vma);
2385                 if (err) {
2386                         prep_vma_destroy(vm, vma, false);
2387                         xe_vma_destroy_unlocked(vma);
2388                         return ERR_PTR(err);
2389                 }
2390         } else if (!xe_vma_has_no_bo(vma) && !bo->vm) {
2391                 vm_insert_extobj(vm, vma);
2392                 err = add_preempt_fences(vm, bo);
2393                 if (err) {
2394                         prep_vma_destroy(vm, vma, false);
2395                         xe_vma_destroy_unlocked(vma);
2396                         return ERR_PTR(err);
2397                 }
2398         }
2399
2400         return vma;
2401 }
2402
2403 static u64 xe_vma_max_pte_size(struct xe_vma *vma)
2404 {
2405         if (vma->gpuva.flags & XE_VMA_PTE_1G)
2406                 return SZ_1G;
2407         else if (vma->gpuva.flags & XE_VMA_PTE_2M)
2408                 return SZ_2M;
2409
2410         return SZ_4K;
2411 }
2412
2413 /*
2414  * Parse operations list and create any resources needed for the operations
2415  * prior to fully committing to the operations. This setup can fail.
2416  */
2417 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_engine *e,
2418                                    struct drm_gpuva_ops **ops, int num_ops_list,
2419                                    struct xe_sync_entry *syncs, u32 num_syncs,
2420                                    struct list_head *ops_list, bool async)
2421 {
2422         struct xe_vma_op *last_op = NULL;
2423         struct list_head *async_list = NULL;
2424         struct async_op_fence *fence = NULL;
2425         int err, i;
2426
2427         lockdep_assert_held_write(&vm->lock);
2428         XE_BUG_ON(num_ops_list > 1 && !async);
2429
2430         if (num_syncs && async) {
2431                 u64 seqno;
2432
2433                 fence = kmalloc(sizeof(*fence), GFP_KERNEL);
2434                 if (!fence)
2435                         return -ENOMEM;
2436
2437                 seqno = e ? ++e->bind.fence_seqno : ++vm->async_ops.fence.seqno;
2438                 dma_fence_init(&fence->fence, &async_op_fence_ops,
2439                                &vm->async_ops.lock, e ? e->bind.fence_ctx :
2440                                vm->async_ops.fence.context, seqno);
2441
2442                 if (!xe_vm_no_dma_fences(vm)) {
2443                         fence->vm = vm;
2444                         fence->started = false;
2445                         init_waitqueue_head(&fence->wq);
2446                 }
2447         }
2448
2449         for (i = 0; i < num_ops_list; ++i) {
2450                 struct drm_gpuva_ops *__ops = ops[i];
2451                 struct drm_gpuva_op *__op;
2452
2453                 drm_gpuva_for_each_op(__op, __ops) {
2454                         struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2455                         bool first = !async_list;
2456
2457                         XE_BUG_ON(!first && !async);
2458
2459                         INIT_LIST_HEAD(&op->link);
2460                         if (first)
2461                                 async_list = ops_list;
2462                         list_add_tail(&op->link, async_list);
2463
2464                         if (first) {
2465                                 op->flags |= XE_VMA_OP_FIRST;
2466                                 op->num_syncs = num_syncs;
2467                                 op->syncs = syncs;
2468                         }
2469
2470                         op->engine = e;
2471
2472                         switch (op->base.op) {
2473                         case DRM_GPUVA_OP_MAP:
2474                         {
2475                                 struct xe_vma *vma;
2476
2477                                 vma = new_vma(vm, &op->base.map,
2478                                               op->tile_mask, op->map.read_only,
2479                                               op->map.is_null);
2480                                 if (IS_ERR(vma)) {
2481                                         err = PTR_ERR(vma);
2482                                         goto free_fence;
2483                                 }
2484
2485                                 op->map.vma = vma;
2486                                 break;
2487                         }
2488                         case DRM_GPUVA_OP_REMAP:
2489                         {
2490                                 struct xe_vma *old =
2491                                         gpuva_to_vma(op->base.remap.unmap->va);
2492
2493                                 op->remap.start = xe_vma_start(old);
2494                                 op->remap.range = xe_vma_size(old);
2495
2496                                 if (op->base.remap.prev) {
2497                                         struct xe_vma *vma;
2498                                         bool read_only =
2499                                                 op->base.remap.unmap->va->flags &
2500                                                 XE_VMA_READ_ONLY;
2501                                         bool is_null =
2502                                                 op->base.remap.unmap->va->flags &
2503                                                 DRM_GPUVA_SPARSE;
2504
2505                                         vma = new_vma(vm, op->base.remap.prev,
2506                                                       op->tile_mask, read_only,
2507                                                       is_null);
2508                                         if (IS_ERR(vma)) {
2509                                                 err = PTR_ERR(vma);
2510                                                 goto free_fence;
2511                                         }
2512
2513                                         op->remap.prev = vma;
2514
2515                                         /*
2516                                          * Userptr creates a new SG mapping so
2517                                          * we must also rebind.
2518                                          */
2519                                         op->remap.skip_prev = !xe_vma_is_userptr(old) &&
2520                                                 IS_ALIGNED(xe_vma_end(vma),
2521                                                            xe_vma_max_pte_size(old));
2522                                         if (op->remap.skip_prev) {
2523                                                 op->remap.range -=
2524                                                         xe_vma_end(vma) -
2525                                                         xe_vma_start(old);
2526                                                 op->remap.start = xe_vma_end(vma);
2527                                         }
2528                                 }
2529
2530                                 if (op->base.remap.next) {
2531                                         struct xe_vma *vma;
2532                                         bool read_only =
2533                                                 op->base.remap.unmap->va->flags &
2534                                                 XE_VMA_READ_ONLY;
2535
2536                                         bool is_null =
2537                                                 op->base.remap.unmap->va->flags &
2538                                                 DRM_GPUVA_SPARSE;
2539
2540                                         vma = new_vma(vm, op->base.remap.next,
2541                                                       op->tile_mask, read_only,
2542                                                       is_null);
2543                                         if (IS_ERR(vma)) {
2544                                                 err = PTR_ERR(vma);
2545                                                 goto free_fence;
2546                                         }
2547
2548                                         op->remap.next = vma;
2549
2550                                         /*
2551                                          * Userptr creates a new SG mapping so
2552                                          * we must also rebind.
2553                                          */
2554                                         op->remap.skip_next = !xe_vma_is_userptr(old) &&
2555                                                 IS_ALIGNED(xe_vma_start(vma),
2556                                                            xe_vma_max_pte_size(old));
2557                                         if (op->remap.skip_next)
2558                                                 op->remap.range -=
2559                                                         xe_vma_end(old) -
2560                                                         xe_vma_start(vma);
2561                                 }
2562                                 break;
2563                         }
2564                         case DRM_GPUVA_OP_UNMAP:
2565                         case DRM_GPUVA_OP_PREFETCH:
2566                                 /* Nothing to do */
2567                                 break;
2568                         default:
2569                                 XE_BUG_ON("NOT POSSIBLE");
2570                         }
2571
2572                         last_op = op;
2573                 }
2574
2575                 last_op->ops = __ops;
2576         }
2577
2578         if (!last_op)
2579                 return -ENODATA;
2580
2581         last_op->flags |= XE_VMA_OP_LAST;
2582         last_op->num_syncs = num_syncs;
2583         last_op->syncs = syncs;
2584         last_op->fence = fence;
2585
2586         return 0;
2587
2588 free_fence:
2589         kfree(fence);
2590         return err;
2591 }
2592
2593 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
2594 {
2595         int err = 0;
2596
2597         lockdep_assert_held_write(&vm->lock);
2598
2599         switch (op->base.op) {
2600         case DRM_GPUVA_OP_MAP:
2601                 err |= xe_vm_insert_vma(vm, op->map.vma);
2602                 break;
2603         case DRM_GPUVA_OP_REMAP:
2604                 prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
2605                                  true);
2606
2607                 if (op->remap.prev) {
2608                         err |= xe_vm_insert_vma(vm, op->remap.prev);
2609                         if (!err && op->remap.skip_prev)
2610                                 op->remap.prev = NULL;
2611                 }
2612                 if (op->remap.next) {
2613                         err |= xe_vm_insert_vma(vm, op->remap.next);
2614                         if (!err && op->remap.skip_next)
2615                                 op->remap.next = NULL;
2616                 }
2617
2618                 /* Adjust for partial unbind after removin VMA from VM */
2619                 if (!err) {
2620                         op->base.remap.unmap->va->va.addr = op->remap.start;
2621                         op->base.remap.unmap->va->va.range = op->remap.range;
2622                 }
2623                 break;
2624         case DRM_GPUVA_OP_UNMAP:
2625                 prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
2626                 break;
2627         case DRM_GPUVA_OP_PREFETCH:
2628                 /* Nothing to do */
2629                 break;
2630         default:
2631                 XE_BUG_ON("NOT POSSIBLE");
2632         }
2633
2634         op->flags |= XE_VMA_OP_COMMITTED;
2635         return err;
2636 }
2637
2638 static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
2639                                struct xe_vma_op *op)
2640 {
2641         LIST_HEAD(objs);
2642         LIST_HEAD(dups);
2643         struct ttm_validate_buffer tv_bo, tv_vm;
2644         struct ww_acquire_ctx ww;
2645         struct xe_bo *vbo;
2646         int err;
2647
2648         lockdep_assert_held_write(&vm->lock);
2649
2650         xe_vm_tv_populate(vm, &tv_vm);
2651         list_add_tail(&tv_vm.head, &objs);
2652         vbo = xe_vma_bo(vma);
2653         if (vbo) {
2654                 /*
2655                  * An unbind can drop the last reference to the BO and
2656                  * the BO is needed for ttm_eu_backoff_reservation so
2657                  * take a reference here.
2658                  */
2659                 xe_bo_get(vbo);
2660
2661                 if (!vbo->vm) {
2662                         tv_bo.bo = &vbo->ttm;
2663                         tv_bo.num_shared = 1;
2664                         list_add(&tv_bo.head, &objs);
2665                 }
2666         }
2667
2668 again:
2669         err = ttm_eu_reserve_buffers(&ww, &objs, true, &dups);
2670         if (err) {
2671                 xe_bo_put(vbo);
2672                 return err;
2673         }
2674
2675         xe_vm_assert_held(vm);
2676         xe_bo_assert_held(xe_vma_bo(vma));
2677
2678         switch (op->base.op) {
2679         case DRM_GPUVA_OP_MAP:
2680                 err = xe_vm_bind(vm, vma, op->engine, xe_vma_bo(vma),
2681                                  op->syncs, op->num_syncs, op->fence,
2682                                  op->map.immediate || !xe_vm_in_fault_mode(vm),
2683                                  op->flags & XE_VMA_OP_FIRST,
2684                                  op->flags & XE_VMA_OP_LAST);
2685                 break;
2686         case DRM_GPUVA_OP_REMAP:
2687         {
2688                 bool prev = !!op->remap.prev;
2689                 bool next = !!op->remap.next;
2690
2691                 if (!op->remap.unmap_done) {
2692                         if (prev || next) {
2693                                 vm->async_ops.munmap_rebind_inflight = true;
2694                                 vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
2695                         }
2696                         err = xe_vm_unbind(vm, vma, op->engine, op->syncs,
2697                                            op->num_syncs,
2698                                            !prev && !next ? op->fence : NULL,
2699                                            op->flags & XE_VMA_OP_FIRST,
2700                                            op->flags & XE_VMA_OP_LAST && !prev &&
2701                                            !next);
2702                         if (err)
2703                                 break;
2704                         op->remap.unmap_done = true;
2705                 }
2706
2707                 if (prev) {
2708                         op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
2709                         err = xe_vm_bind(vm, op->remap.prev, op->engine,
2710                                          xe_vma_bo(op->remap.prev), op->syncs,
2711                                          op->num_syncs,
2712                                          !next ? op->fence : NULL, true, false,
2713                                          op->flags & XE_VMA_OP_LAST && !next);
2714                         op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2715                         if (err)
2716                                 break;
2717                         op->remap.prev = NULL;
2718                 }
2719
2720                 if (next) {
2721                         op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
2722                         err = xe_vm_bind(vm, op->remap.next, op->engine,
2723                                          xe_vma_bo(op->remap.next),
2724                                          op->syncs, op->num_syncs,
2725                                          op->fence, true, false,
2726                                          op->flags & XE_VMA_OP_LAST);
2727                         op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2728                         if (err)
2729                                 break;
2730                         op->remap.next = NULL;
2731                 }
2732                 vm->async_ops.munmap_rebind_inflight = false;
2733
2734                 break;
2735         }
2736         case DRM_GPUVA_OP_UNMAP:
2737                 err = xe_vm_unbind(vm, vma, op->engine, op->syncs,
2738                                    op->num_syncs, op->fence,
2739                                    op->flags & XE_VMA_OP_FIRST,
2740                                    op->flags & XE_VMA_OP_LAST);
2741                 break;
2742         case DRM_GPUVA_OP_PREFETCH:
2743                 err = xe_vm_prefetch(vm, vma, op->engine, op->prefetch.region,
2744                                      op->syncs, op->num_syncs, op->fence,
2745                                      op->flags & XE_VMA_OP_FIRST,
2746                                      op->flags & XE_VMA_OP_LAST);
2747                 break;
2748         default:
2749                 XE_BUG_ON("NOT POSSIBLE");
2750         }
2751
2752         ttm_eu_backoff_reservation(&ww, &objs);
2753         if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
2754                 lockdep_assert_held_write(&vm->lock);
2755                 err = xe_vma_userptr_pin_pages(vma);
2756                 if (!err)
2757                         goto again;
2758         }
2759         xe_bo_put(vbo);
2760
2761         if (err)
2762                 trace_xe_vma_fail(vma);
2763
2764         return err;
2765 }
2766
2767 static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
2768 {
2769         int ret = 0;
2770
2771         lockdep_assert_held_write(&vm->lock);
2772
2773 #ifdef TEST_VM_ASYNC_OPS_ERROR
2774         if (op->inject_error) {
2775                 op->inject_error = false;
2776                 return -ENOMEM;
2777         }
2778 #endif
2779
2780         switch (op->base.op) {
2781         case DRM_GPUVA_OP_MAP:
2782                 ret = __xe_vma_op_execute(vm, op->map.vma, op);
2783                 break;
2784         case DRM_GPUVA_OP_REMAP:
2785         {
2786                 struct xe_vma *vma;
2787
2788                 if (!op->remap.unmap_done)
2789                         vma = gpuva_to_vma(op->base.remap.unmap->va);
2790                 else if (op->remap.prev)
2791                         vma = op->remap.prev;
2792                 else
2793                         vma = op->remap.next;
2794
2795                 ret = __xe_vma_op_execute(vm, vma, op);
2796                 break;
2797         }
2798         case DRM_GPUVA_OP_UNMAP:
2799                 ret = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
2800                                           op);
2801                 break;
2802         case DRM_GPUVA_OP_PREFETCH:
2803                 ret = __xe_vma_op_execute(vm,
2804                                           gpuva_to_vma(op->base.prefetch.va),
2805                                           op);
2806                 break;
2807         default:
2808                 XE_BUG_ON("NOT POSSIBLE");
2809         }
2810
2811         return ret;
2812 }
2813
2814 static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op)
2815 {
2816         bool last = op->flags & XE_VMA_OP_LAST;
2817
2818         if (last) {
2819                 while (op->num_syncs--)
2820                         xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
2821                 kfree(op->syncs);
2822                 if (op->engine)
2823                         xe_engine_put(op->engine);
2824                 if (op->fence)
2825                         dma_fence_put(&op->fence->fence);
2826         }
2827         if (!list_empty(&op->link)) {
2828                 spin_lock_irq(&vm->async_ops.lock);
2829                 list_del(&op->link);
2830                 spin_unlock_irq(&vm->async_ops.lock);
2831         }
2832         if (op->ops)
2833                 drm_gpuva_ops_free(&vm->gpuvm, op->ops);
2834         if (last)
2835                 xe_vm_put(vm);
2836 }
2837
2838 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
2839                              bool post_commit)
2840 {
2841         lockdep_assert_held_write(&vm->lock);
2842
2843         switch (op->base.op) {
2844         case DRM_GPUVA_OP_MAP:
2845                 if (op->map.vma) {
2846                         prep_vma_destroy(vm, op->map.vma, post_commit);
2847                         xe_vma_destroy_unlocked(op->map.vma);
2848                 }
2849                 break;
2850         case DRM_GPUVA_OP_UNMAP:
2851         {
2852                 struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2853
2854                 down_read(&vm->userptr.notifier_lock);
2855                 vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2856                 up_read(&vm->userptr.notifier_lock);
2857                 if (post_commit)
2858                         xe_vm_insert_vma(vm, vma);
2859                 break;
2860         }
2861         case DRM_GPUVA_OP_REMAP:
2862         {
2863                 struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
2864
2865                 if (op->remap.prev) {
2866                         prep_vma_destroy(vm, op->remap.prev, post_commit);
2867                         xe_vma_destroy_unlocked(op->remap.prev);
2868                 }
2869                 if (op->remap.next) {
2870                         prep_vma_destroy(vm, op->remap.next, post_commit);
2871                         xe_vma_destroy_unlocked(op->remap.next);
2872                 }
2873                 down_read(&vm->userptr.notifier_lock);
2874                 vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2875                 up_read(&vm->userptr.notifier_lock);
2876                 if (post_commit)
2877                         xe_vm_insert_vma(vm, vma);
2878                 break;
2879         }
2880         case DRM_GPUVA_OP_PREFETCH:
2881                 /* Nothing to do */
2882                 break;
2883         default:
2884                 XE_BUG_ON("NOT POSSIBLE");
2885         }
2886 }
2887
2888 static struct xe_vma_op *next_vma_op(struct xe_vm *vm)
2889 {
2890         return list_first_entry_or_null(&vm->async_ops.pending,
2891                                         struct xe_vma_op, link);
2892 }
2893
2894 static void xe_vma_op_work_func(struct work_struct *w)
2895 {
2896         struct xe_vm *vm = container_of(w, struct xe_vm, async_ops.work);
2897
2898         for (;;) {
2899                 struct xe_vma_op *op;
2900                 int err;
2901
2902                 if (vm->async_ops.error && !xe_vm_is_closed(vm))
2903                         break;
2904
2905                 spin_lock_irq(&vm->async_ops.lock);
2906                 op = next_vma_op(vm);
2907                 spin_unlock_irq(&vm->async_ops.lock);
2908
2909                 if (!op)
2910                         break;
2911
2912                 if (!xe_vm_is_closed(vm)) {
2913                         down_write(&vm->lock);
2914                         err = xe_vma_op_execute(vm, op);
2915                         if (err) {
2916                                 drm_warn(&vm->xe->drm,
2917                                          "Async VM op(%d) failed with %d",
2918                                          op->base.op, err);
2919                                 vm_set_async_error(vm, err);
2920                                 up_write(&vm->lock);
2921
2922                                 if (vm->async_ops.error_capture.addr)
2923                                         vm_error_capture(vm, err, 0, 0, 0);
2924                                 break;
2925                         }
2926                         up_write(&vm->lock);
2927                 } else {
2928                         struct xe_vma *vma;
2929
2930                         switch (op->base.op) {
2931                         case DRM_GPUVA_OP_REMAP:
2932                                 vma = gpuva_to_vma(op->base.remap.unmap->va);
2933                                 trace_xe_vma_flush(vma);
2934
2935                                 down_write(&vm->lock);
2936                                 xe_vma_destroy_unlocked(vma);
2937                                 up_write(&vm->lock);
2938                                 break;
2939                         case DRM_GPUVA_OP_UNMAP:
2940                                 vma = gpuva_to_vma(op->base.unmap.va);
2941                                 trace_xe_vma_flush(vma);
2942
2943                                 down_write(&vm->lock);
2944                                 xe_vma_destroy_unlocked(vma);
2945                                 up_write(&vm->lock);
2946                                 break;
2947                         default:
2948                                 /* Nothing to do */
2949                                 break;
2950                         }
2951
2952                         if (op->fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
2953                                                    &op->fence->fence.flags)) {
2954                                 if (!xe_vm_no_dma_fences(vm)) {
2955                                         op->fence->started = true;
2956                                         wake_up_all(&op->fence->wq);
2957                                 }
2958                                 dma_fence_signal(&op->fence->fence);
2959                         }
2960                 }
2961
2962                 xe_vma_op_cleanup(vm, op);
2963         }
2964 }
2965
2966 static int vm_bind_ioctl_ops_commit(struct xe_vm *vm,
2967                                     struct list_head *ops_list, bool async)
2968 {
2969         struct xe_vma_op *op, *last_op, *next;
2970         int err;
2971
2972         lockdep_assert_held_write(&vm->lock);
2973
2974         list_for_each_entry(op, ops_list, link) {
2975                 last_op = op;
2976                 err = xe_vma_op_commit(vm, op);
2977                 if (err)
2978                         goto unwind;
2979         }
2980
2981         if (!async) {
2982                 err = xe_vma_op_execute(vm, last_op);
2983                 if (err)
2984                         goto unwind;
2985                 xe_vma_op_cleanup(vm, last_op);
2986         } else {
2987                 int i;
2988                 bool installed = false;
2989
2990                 for (i = 0; i < last_op->num_syncs; i++)
2991                         installed |= xe_sync_entry_signal(&last_op->syncs[i],
2992                                                           NULL,
2993                                                           &last_op->fence->fence);
2994                 if (!installed && last_op->fence)
2995                         dma_fence_signal(&last_op->fence->fence);
2996
2997                 spin_lock_irq(&vm->async_ops.lock);
2998                 list_splice_tail(ops_list, &vm->async_ops.pending);
2999                 spin_unlock_irq(&vm->async_ops.lock);
3000
3001                 if (!vm->async_ops.error)
3002                         queue_work(system_unbound_wq, &vm->async_ops.work);
3003         }
3004
3005         return 0;
3006
3007 unwind:
3008         list_for_each_entry_reverse(op, ops_list, link)
3009                 xe_vma_op_unwind(vm, op, op->flags & XE_VMA_OP_COMMITTED);
3010         list_for_each_entry_safe(op, next, ops_list, link)
3011                 xe_vma_op_cleanup(vm, op);
3012
3013         return err;
3014 }
3015
3016 /*
3017  * Unwind operations list, called after a failure of vm_bind_ioctl_ops_create or
3018  * vm_bind_ioctl_ops_parse.
3019  */
3020 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
3021                                      struct drm_gpuva_ops **ops,
3022                                      int num_ops_list)
3023 {
3024         int i;
3025
3026         for (i = 0; i < num_ops_list; ++i) {
3027                 struct drm_gpuva_ops *__ops = ops[i];
3028                 struct drm_gpuva_op *__op;
3029
3030                 if (!__ops)
3031                         continue;
3032
3033                 drm_gpuva_for_each_op(__op, __ops) {
3034                         struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
3035
3036                         xe_vma_op_unwind(vm, op, false);
3037                 }
3038         }
3039 }
3040
3041 #ifdef TEST_VM_ASYNC_OPS_ERROR
3042 #define SUPPORTED_FLAGS \
3043         (FORCE_ASYNC_OP_ERROR | XE_VM_BIND_FLAG_ASYNC | \
3044          XE_VM_BIND_FLAG_READONLY | XE_VM_BIND_FLAG_IMMEDIATE | \
3045          XE_VM_BIND_FLAG_NULL | 0xffff)
3046 #else
3047 #define SUPPORTED_FLAGS \
3048         (XE_VM_BIND_FLAG_ASYNC | XE_VM_BIND_FLAG_READONLY | \
3049          XE_VM_BIND_FLAG_IMMEDIATE | XE_VM_BIND_FLAG_NULL | 0xffff)
3050 #endif
3051 #define XE_64K_PAGE_MASK 0xffffull
3052
3053 #define MAX_BINDS       512     /* FIXME: Picking random upper limit */
3054
3055 static int vm_bind_ioctl_check_args(struct xe_device *xe,
3056                                     struct drm_xe_vm_bind *args,
3057                                     struct drm_xe_vm_bind_op **bind_ops,
3058                                     bool *async)
3059 {
3060         int err;
3061         int i;
3062
3063         if (XE_IOCTL_DBG(xe, args->extensions) ||
3064             XE_IOCTL_DBG(xe, !args->num_binds) ||
3065             XE_IOCTL_DBG(xe, args->num_binds > MAX_BINDS))
3066                 return -EINVAL;
3067
3068         if (args->num_binds > 1) {
3069                 u64 __user *bind_user =
3070                         u64_to_user_ptr(args->vector_of_binds);
3071
3072                 *bind_ops = kmalloc(sizeof(struct drm_xe_vm_bind_op) *
3073                                     args->num_binds, GFP_KERNEL);
3074                 if (!*bind_ops)
3075                         return -ENOMEM;
3076
3077                 err = __copy_from_user(*bind_ops, bind_user,
3078                                        sizeof(struct drm_xe_vm_bind_op) *
3079                                        args->num_binds);
3080                 if (XE_IOCTL_DBG(xe, err)) {
3081                         err = -EFAULT;
3082                         goto free_bind_ops;
3083                 }
3084         } else {
3085                 *bind_ops = &args->bind;
3086         }
3087
3088         for (i = 0; i < args->num_binds; ++i) {
3089                 u64 range = (*bind_ops)[i].range;
3090                 u64 addr = (*bind_ops)[i].addr;
3091                 u32 op = (*bind_ops)[i].op;
3092                 u32 obj = (*bind_ops)[i].obj;
3093                 u64 obj_offset = (*bind_ops)[i].obj_offset;
3094                 u32 region = (*bind_ops)[i].region;
3095                 bool is_null = op & XE_VM_BIND_FLAG_NULL;
3096
3097                 if (i == 0) {
3098                         *async = !!(op & XE_VM_BIND_FLAG_ASYNC);
3099                 } else if (XE_IOCTL_DBG(xe, !*async) ||
3100                            XE_IOCTL_DBG(xe, !(op & XE_VM_BIND_FLAG_ASYNC)) ||
3101                            XE_IOCTL_DBG(xe, VM_BIND_OP(op) ==
3102                                         XE_VM_BIND_OP_RESTART)) {
3103                         err = -EINVAL;
3104                         goto free_bind_ops;
3105                 }
3106
3107                 if (XE_IOCTL_DBG(xe, !*async &&
3108                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL)) {
3109                         err = -EINVAL;
3110                         goto free_bind_ops;
3111                 }
3112
3113                 if (XE_IOCTL_DBG(xe, !*async &&
3114                                  VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH)) {
3115                         err = -EINVAL;
3116                         goto free_bind_ops;
3117                 }
3118
3119                 if (XE_IOCTL_DBG(xe, VM_BIND_OP(op) >
3120                                  XE_VM_BIND_OP_PREFETCH) ||
3121                     XE_IOCTL_DBG(xe, op & ~SUPPORTED_FLAGS) ||
3122                     XE_IOCTL_DBG(xe, obj && is_null) ||
3123                     XE_IOCTL_DBG(xe, obj_offset && is_null) ||
3124                     XE_IOCTL_DBG(xe, VM_BIND_OP(op) != XE_VM_BIND_OP_MAP &&
3125                                  is_null) ||
3126                     XE_IOCTL_DBG(xe, !obj &&
3127                                  VM_BIND_OP(op) == XE_VM_BIND_OP_MAP &&
3128                                  !is_null) ||
3129                     XE_IOCTL_DBG(xe, !obj &&
3130                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
3131                     XE_IOCTL_DBG(xe, addr &&
3132                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
3133                     XE_IOCTL_DBG(xe, range &&
3134                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
3135                     XE_IOCTL_DBG(xe, obj &&
3136                                  VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR) ||
3137                     XE_IOCTL_DBG(xe, obj &&
3138                                  VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH) ||
3139                     XE_IOCTL_DBG(xe, region &&
3140                                  VM_BIND_OP(op) != XE_VM_BIND_OP_PREFETCH) ||
3141                     XE_IOCTL_DBG(xe, !(BIT(region) &
3142                                        xe->info.mem_region_mask)) ||
3143                     XE_IOCTL_DBG(xe, obj &&
3144                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP)) {
3145                         err = -EINVAL;
3146                         goto free_bind_ops;
3147                 }
3148
3149                 if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
3150                     XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
3151                     XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
3152                     XE_IOCTL_DBG(xe, !range && VM_BIND_OP(op) !=
3153                                  XE_VM_BIND_OP_RESTART &&
3154                                  VM_BIND_OP(op) != XE_VM_BIND_OP_UNMAP_ALL)) {
3155                         err = -EINVAL;
3156                         goto free_bind_ops;
3157                 }
3158         }
3159
3160         return 0;
3161
3162 free_bind_ops:
3163         if (args->num_binds > 1)
3164                 kfree(*bind_ops);
3165         return err;
3166 }
3167
3168 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3169 {
3170         struct xe_device *xe = to_xe_device(dev);
3171         struct xe_file *xef = to_xe_file(file);
3172         struct drm_xe_vm_bind *args = data;
3173         struct drm_xe_sync __user *syncs_user;
3174         struct xe_bo **bos = NULL;
3175         struct drm_gpuva_ops **ops = NULL;
3176         struct xe_vm *vm;
3177         struct xe_engine *e = NULL;
3178         u32 num_syncs;
3179         struct xe_sync_entry *syncs = NULL;
3180         struct drm_xe_vm_bind_op *bind_ops;
3181         LIST_HEAD(ops_list);
3182         bool async;
3183         int err;
3184         int i;
3185
3186         err = vm_bind_ioctl_check_args(xe, args, &bind_ops, &async);
3187         if (err)
3188                 return err;
3189
3190         if (args->engine_id) {
3191                 e = xe_engine_lookup(xef, args->engine_id);
3192                 if (XE_IOCTL_DBG(xe, !e)) {
3193                         err = -ENOENT;
3194                         goto free_objs;
3195                 }
3196
3197                 if (XE_IOCTL_DBG(xe, !(e->flags & ENGINE_FLAG_VM))) {
3198                         err = -EINVAL;
3199                         goto put_engine;
3200                 }
3201         }
3202
3203         vm = xe_vm_lookup(xef, args->vm_id);
3204         if (XE_IOCTL_DBG(xe, !vm)) {
3205                 err = -EINVAL;
3206                 goto put_engine;
3207         }
3208
3209         err = down_write_killable(&vm->lock);
3210         if (err)
3211                 goto put_vm;
3212
3213         if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
3214                 err = -ENOENT;
3215                 goto release_vm_lock;
3216         }
3217
3218         if (VM_BIND_OP(bind_ops[0].op) == XE_VM_BIND_OP_RESTART) {
3219                 if (XE_IOCTL_DBG(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
3220                         err = -EOPNOTSUPP;
3221                 if (XE_IOCTL_DBG(xe, !err && args->num_syncs))
3222                         err = EINVAL;
3223                 if (XE_IOCTL_DBG(xe, !err && !vm->async_ops.error))
3224                         err = -EPROTO;
3225
3226                 if (!err) {
3227                         trace_xe_vm_restart(vm);
3228                         vm_set_async_error(vm, 0);
3229
3230                         queue_work(system_unbound_wq, &vm->async_ops.work);
3231
3232                         /* Rebinds may have been blocked, give worker a kick */
3233                         if (xe_vm_in_compute_mode(vm))
3234                                 xe_vm_queue_rebind_worker(vm);
3235                 }
3236
3237                 goto release_vm_lock;
3238         }
3239
3240         if (XE_IOCTL_DBG(xe, !vm->async_ops.error &&
3241                          async != !!(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS))) {
3242                 err = -EOPNOTSUPP;
3243                 goto release_vm_lock;
3244         }
3245
3246         for (i = 0; i < args->num_binds; ++i) {
3247                 u64 range = bind_ops[i].range;
3248                 u64 addr = bind_ops[i].addr;
3249
3250                 if (XE_IOCTL_DBG(xe, range > vm->size) ||
3251                     XE_IOCTL_DBG(xe, addr > vm->size - range)) {
3252                         err = -EINVAL;
3253                         goto release_vm_lock;
3254                 }
3255
3256                 if (bind_ops[i].tile_mask) {
3257                         u64 valid_tiles = BIT(xe->info.tile_count) - 1;
3258
3259                         if (XE_IOCTL_DBG(xe, bind_ops[i].tile_mask &
3260                                          ~valid_tiles)) {
3261                                 err = -EINVAL;
3262                                 goto release_vm_lock;
3263                         }
3264                 }
3265         }
3266
3267         bos = kzalloc(sizeof(*bos) * args->num_binds, GFP_KERNEL);
3268         if (!bos) {
3269                 err = -ENOMEM;
3270                 goto release_vm_lock;
3271         }
3272
3273         ops = kzalloc(sizeof(*ops) * args->num_binds, GFP_KERNEL);
3274         if (!ops) {
3275                 err = -ENOMEM;
3276                 goto release_vm_lock;
3277         }
3278
3279         for (i = 0; i < args->num_binds; ++i) {
3280                 struct drm_gem_object *gem_obj;
3281                 u64 range = bind_ops[i].range;
3282                 u64 addr = bind_ops[i].addr;
3283                 u32 obj = bind_ops[i].obj;
3284                 u64 obj_offset = bind_ops[i].obj_offset;
3285
3286                 if (!obj)
3287                         continue;
3288
3289                 gem_obj = drm_gem_object_lookup(file, obj);
3290                 if (XE_IOCTL_DBG(xe, !gem_obj)) {
3291                         err = -ENOENT;
3292                         goto put_obj;
3293                 }
3294                 bos[i] = gem_to_xe_bo(gem_obj);
3295
3296                 if (XE_IOCTL_DBG(xe, range > bos[i]->size) ||
3297                     XE_IOCTL_DBG(xe, obj_offset >
3298                                  bos[i]->size - range)) {
3299                         err = -EINVAL;
3300                         goto put_obj;
3301                 }
3302
3303                 if (bos[i]->flags & XE_BO_INTERNAL_64K) {
3304                         if (XE_IOCTL_DBG(xe, obj_offset &
3305                                          XE_64K_PAGE_MASK) ||
3306                             XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
3307                             XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
3308                                 err = -EINVAL;
3309                                 goto put_obj;
3310                         }
3311                 }
3312         }
3313
3314         if (args->num_syncs) {
3315                 syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3316                 if (!syncs) {
3317                         err = -ENOMEM;
3318                         goto put_obj;
3319                 }
3320         }
3321
3322         syncs_user = u64_to_user_ptr(args->syncs);
3323         for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3324                 err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3325                                           &syncs_user[num_syncs], false,
3326                                           xe_vm_no_dma_fences(vm));
3327                 if (err)
3328                         goto free_syncs;
3329         }
3330
3331         /* Do some error checking first to make the unwind easier */
3332         for (i = 0; i < args->num_binds; ++i) {
3333                 u64 range = bind_ops[i].range;
3334                 u64 addr = bind_ops[i].addr;
3335                 u32 op = bind_ops[i].op;
3336
3337                 err = vm_bind_ioctl_lookup_vma(vm, bos[i], addr, range, op);
3338                 if (err)
3339                         goto free_syncs;
3340         }
3341
3342         for (i = 0; i < args->num_binds; ++i) {
3343                 u64 range = bind_ops[i].range;
3344                 u64 addr = bind_ops[i].addr;
3345                 u32 op = bind_ops[i].op;
3346                 u64 obj_offset = bind_ops[i].obj_offset;
3347                 u8 tile_mask = bind_ops[i].tile_mask;
3348                 u32 region = bind_ops[i].region;
3349
3350                 ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
3351                                                   addr, range, op, tile_mask,
3352                                                   region);
3353                 if (IS_ERR(ops[i])) {
3354                         err = PTR_ERR(ops[i]);
3355                         ops[i] = NULL;
3356                         goto unwind_ops;
3357                 }
3358         }
3359
3360         err = vm_bind_ioctl_ops_parse(vm, e, ops, args->num_binds,
3361                                       syncs, num_syncs, &ops_list, async);
3362         if (err)
3363                 goto unwind_ops;
3364
3365         err = vm_bind_ioctl_ops_commit(vm, &ops_list, async);
3366         up_write(&vm->lock);
3367
3368         for (i = 0; i < args->num_binds; ++i)
3369                 xe_bo_put(bos[i]);
3370
3371         kfree(bos);
3372         kfree(ops);
3373         if (args->num_binds > 1)
3374                 kfree(bind_ops);
3375
3376         return err;
3377
3378 unwind_ops:
3379         vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
3380 free_syncs:
3381         for (i = 0; err == -ENODATA && i < num_syncs; i++)
3382                 xe_sync_entry_signal(&syncs[i], NULL, dma_fence_get_stub());
3383         while (num_syncs--)
3384                 xe_sync_entry_cleanup(&syncs[num_syncs]);
3385
3386         kfree(syncs);
3387 put_obj:
3388         for (i = 0; i < args->num_binds; ++i)
3389                 xe_bo_put(bos[i]);
3390 release_vm_lock:
3391         up_write(&vm->lock);
3392 put_vm:
3393         xe_vm_put(vm);
3394 put_engine:
3395         if (e)
3396                 xe_engine_put(e);
3397 free_objs:
3398         kfree(bos);
3399         kfree(ops);
3400         if (args->num_binds > 1)
3401                 kfree(bind_ops);
3402         return err == -ENODATA ? 0 : err;
3403 }
3404
3405 /*
3406  * XXX: Using the TTM wrappers for now, likely can call into dma-resv code
3407  * directly to optimize. Also this likely should be an inline function.
3408  */
3409 int xe_vm_lock(struct xe_vm *vm, struct ww_acquire_ctx *ww,
3410                int num_resv, bool intr)
3411 {
3412         struct ttm_validate_buffer tv_vm;
3413         LIST_HEAD(objs);
3414         LIST_HEAD(dups);
3415
3416         XE_BUG_ON(!ww);
3417
3418         tv_vm.num_shared = num_resv;
3419         tv_vm.bo = xe_vm_ttm_bo(vm);
3420         list_add_tail(&tv_vm.head, &objs);
3421
3422         return ttm_eu_reserve_buffers(ww, &objs, intr, &dups);
3423 }
3424
3425 void xe_vm_unlock(struct xe_vm *vm, struct ww_acquire_ctx *ww)
3426 {
3427         dma_resv_unlock(xe_vm_resv(vm));
3428         ww_acquire_fini(ww);
3429 }
3430
3431 /**
3432  * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3433  * @vma: VMA to invalidate
3434  *
3435  * Walks a list of page tables leaves which it memset the entries owned by this
3436  * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3437  * complete.
3438  *
3439  * Returns 0 for success, negative error code otherwise.
3440  */
3441 int xe_vm_invalidate_vma(struct xe_vma *vma)
3442 {
3443         struct xe_device *xe = xe_vma_vm(vma)->xe;
3444         struct xe_tile *tile;
3445         u32 tile_needs_invalidate = 0;
3446         int seqno[XE_MAX_TILES_PER_DEVICE];
3447         u8 id;
3448         int ret;
3449
3450         XE_BUG_ON(!xe_vm_in_fault_mode(xe_vma_vm(vma)));
3451         XE_WARN_ON(xe_vma_is_null(vma));
3452         trace_xe_vma_usm_invalidate(vma);
3453
3454         /* Check that we don't race with page-table updates */
3455         if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3456                 if (xe_vma_is_userptr(vma)) {
3457                         WARN_ON_ONCE(!mmu_interval_check_retry
3458                                      (&vma->userptr.notifier,
3459                                       vma->userptr.notifier_seq));
3460                         WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
3461                                                              DMA_RESV_USAGE_BOOKKEEP));
3462
3463                 } else {
3464                         xe_bo_assert_held(xe_vma_bo(vma));
3465                 }
3466         }
3467
3468         for_each_tile(tile, xe, id) {
3469                 if (xe_pt_zap_ptes(tile, vma)) {
3470                         tile_needs_invalidate |= BIT(id);
3471                         xe_device_wmb(xe);
3472                         /*
3473                          * FIXME: We potentially need to invalidate multiple
3474                          * GTs within the tile
3475                          */
3476                         seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
3477                         if (seqno[id] < 0)
3478                                 return seqno[id];
3479                 }
3480         }
3481
3482         for_each_tile(tile, xe, id) {
3483                 if (tile_needs_invalidate & BIT(id)) {
3484                         ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
3485                         if (ret < 0)
3486                                 return ret;
3487                 }
3488         }
3489
3490         vma->usm.tile_invalidated = vma->tile_mask;
3491
3492         return 0;
3493 }
3494
3495 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
3496 {
3497         struct drm_gpuva *gpuva;
3498         bool is_vram;
3499         uint64_t addr;
3500
3501         if (!down_read_trylock(&vm->lock)) {
3502                 drm_printf(p, " Failed to acquire VM lock to dump capture");
3503                 return 0;
3504         }
3505         if (vm->pt_root[gt_id]) {
3506                 addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE,
3507                                   &is_vram);
3508                 drm_printf(p, " VM root: A:0x%llx %s\n", addr, is_vram ? "VRAM" : "SYS");
3509         }
3510
3511         drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3512                 struct xe_vma *vma = gpuva_to_vma(gpuva);
3513                 bool is_userptr = xe_vma_is_userptr(vma);
3514                 bool is_null = xe_vma_is_null(vma);
3515
3516                 if (is_null) {
3517                         addr = 0;
3518                 } else if (is_userptr) {
3519                         struct xe_res_cursor cur;
3520
3521                         if (vma->userptr.sg) {
3522                                 xe_res_first_sg(vma->userptr.sg, 0, XE_PAGE_SIZE,
3523                                                 &cur);
3524                                 addr = xe_res_dma(&cur);
3525                         } else {
3526                                 addr = 0;
3527                         }
3528                 } else {
3529                         addr = __xe_bo_addr(xe_vma_bo(vma), 0, XE_PAGE_SIZE, &is_vram);
3530                 }
3531                 drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
3532                            xe_vma_start(vma), xe_vma_end(vma) - 1,
3533                            xe_vma_size(vma),
3534                            addr, is_null ? "NULL" : is_userptr ? "USR" :
3535                            is_vram ? "VRAM" : "SYS");
3536         }
3537         up_read(&vm->lock);
3538
3539         return 0;
3540 }