drm/xe: Memory allocations are tile-based, not GT-based
[linux-2.6-microblaze.git] / drivers / gpu / drm / xe / xe_vm.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5
6 #include "xe_vm.h"
7
8 #include <linux/dma-fence-array.h>
9
10 #include <drm/ttm/ttm_execbuf_util.h>
11 #include <drm/ttm/ttm_tt.h>
12 #include <drm/xe_drm.h>
13 #include <linux/delay.h>
14 #include <linux/kthread.h>
15 #include <linux/mm.h>
16 #include <linux/swap.h>
17
18 #include "xe_bo.h"
19 #include "xe_device.h"
20 #include "xe_engine.h"
21 #include "xe_gt.h"
22 #include "xe_gt_pagefault.h"
23 #include "xe_gt_tlb_invalidation.h"
24 #include "xe_migrate.h"
25 #include "xe_pm.h"
26 #include "xe_preempt_fence.h"
27 #include "xe_pt.h"
28 #include "xe_res_cursor.h"
29 #include "xe_sync.h"
30 #include "xe_trace.h"
31
32 #define TEST_VM_ASYNC_OPS_ERROR
33
34 /**
35  * xe_vma_userptr_check_repin() - Advisory check for repin needed
36  * @vma: The userptr vma
37  *
38  * Check if the userptr vma has been invalidated since last successful
39  * repin. The check is advisory only and can the function can be called
40  * without the vm->userptr.notifier_lock held. There is no guarantee that the
41  * vma userptr will remain valid after a lockless check, so typically
42  * the call needs to be followed by a proper check under the notifier_lock.
43  *
44  * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
45  */
46 int xe_vma_userptr_check_repin(struct xe_vma *vma)
47 {
48         return mmu_interval_check_retry(&vma->userptr.notifier,
49                                         vma->userptr.notifier_seq) ?
50                 -EAGAIN : 0;
51 }
52
53 int xe_vma_userptr_pin_pages(struct xe_vma *vma)
54 {
55         struct xe_vm *vm = vma->vm;
56         struct xe_device *xe = vm->xe;
57         const unsigned long num_pages =
58                 (vma->end - vma->start + 1) >> PAGE_SHIFT;
59         struct page **pages;
60         bool in_kthread = !current->mm;
61         unsigned long notifier_seq;
62         int pinned, ret, i;
63         bool read_only = vma->pte_flags & XE_PTE_READ_ONLY;
64
65         lockdep_assert_held(&vm->lock);
66         XE_BUG_ON(!xe_vma_is_userptr(vma));
67 retry:
68         if (vma->destroyed)
69                 return 0;
70
71         notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier);
72         if (notifier_seq == vma->userptr.notifier_seq)
73                 return 0;
74
75         pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
76         if (!pages)
77                 return -ENOMEM;
78
79         if (vma->userptr.sg) {
80                 dma_unmap_sgtable(xe->drm.dev,
81                                   vma->userptr.sg,
82                                   read_only ? DMA_TO_DEVICE :
83                                   DMA_BIDIRECTIONAL, 0);
84                 sg_free_table(vma->userptr.sg);
85                 vma->userptr.sg = NULL;
86         }
87
88         pinned = ret = 0;
89         if (in_kthread) {
90                 if (!mmget_not_zero(vma->userptr.notifier.mm)) {
91                         ret = -EFAULT;
92                         goto mm_closed;
93                 }
94                 kthread_use_mm(vma->userptr.notifier.mm);
95         }
96
97         while (pinned < num_pages) {
98                 ret = get_user_pages_fast(vma->userptr.ptr + pinned * PAGE_SIZE,
99                                           num_pages - pinned,
100                                           read_only ? 0 : FOLL_WRITE,
101                                           &pages[pinned]);
102                 if (ret < 0) {
103                         if (in_kthread)
104                                 ret = 0;
105                         break;
106                 }
107
108                 pinned += ret;
109                 ret = 0;
110         }
111
112         if (in_kthread) {
113                 kthread_unuse_mm(vma->userptr.notifier.mm);
114                 mmput(vma->userptr.notifier.mm);
115         }
116 mm_closed:
117         if (ret)
118                 goto out;
119
120         ret = sg_alloc_table_from_pages_segment(&vma->userptr.sgt, pages,
121                                                 pinned, 0,
122                                                 (u64)pinned << PAGE_SHIFT,
123                                                 xe_sg_segment_size(xe->drm.dev),
124                                                 GFP_KERNEL);
125         if (ret) {
126                 vma->userptr.sg = NULL;
127                 goto out;
128         }
129         vma->userptr.sg = &vma->userptr.sgt;
130
131         ret = dma_map_sgtable(xe->drm.dev, vma->userptr.sg,
132                               read_only ? DMA_TO_DEVICE :
133                               DMA_BIDIRECTIONAL,
134                               DMA_ATTR_SKIP_CPU_SYNC |
135                               DMA_ATTR_NO_KERNEL_MAPPING);
136         if (ret) {
137                 sg_free_table(vma->userptr.sg);
138                 vma->userptr.sg = NULL;
139                 goto out;
140         }
141
142         for (i = 0; i < pinned; ++i) {
143                 if (!read_only) {
144                         lock_page(pages[i]);
145                         set_page_dirty(pages[i]);
146                         unlock_page(pages[i]);
147                 }
148
149                 mark_page_accessed(pages[i]);
150         }
151
152 out:
153         release_pages(pages, pinned);
154         kvfree(pages);
155
156         if (!(ret < 0)) {
157                 vma->userptr.notifier_seq = notifier_seq;
158                 if (xe_vma_userptr_check_repin(vma) == -EAGAIN)
159                         goto retry;
160         }
161
162         return ret < 0 ? ret : 0;
163 }
164
165 static bool preempt_fences_waiting(struct xe_vm *vm)
166 {
167         struct xe_engine *e;
168
169         lockdep_assert_held(&vm->lock);
170         xe_vm_assert_held(vm);
171
172         list_for_each_entry(e, &vm->preempt.engines, compute.link) {
173                 if (!e->compute.pfence || (e->compute.pfence &&
174                     test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
175                              &e->compute.pfence->flags))) {
176                         return true;
177                 }
178         }
179
180         return false;
181 }
182
183 static void free_preempt_fences(struct list_head *list)
184 {
185         struct list_head *link, *next;
186
187         list_for_each_safe(link, next, list)
188                 xe_preempt_fence_free(to_preempt_fence_from_link(link));
189 }
190
191 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
192                                 unsigned int *count)
193 {
194         lockdep_assert_held(&vm->lock);
195         xe_vm_assert_held(vm);
196
197         if (*count >= vm->preempt.num_engines)
198                 return 0;
199
200         for (; *count < vm->preempt.num_engines; ++(*count)) {
201                 struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
202
203                 if (IS_ERR(pfence))
204                         return PTR_ERR(pfence);
205
206                 list_move_tail(xe_preempt_fence_link(pfence), list);
207         }
208
209         return 0;
210 }
211
212 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
213 {
214         struct xe_engine *e;
215
216         xe_vm_assert_held(vm);
217
218         list_for_each_entry(e, &vm->preempt.engines, compute.link) {
219                 if (e->compute.pfence) {
220                         long timeout = dma_fence_wait(e->compute.pfence, false);
221
222                         if (timeout < 0)
223                                 return -ETIME;
224                         dma_fence_put(e->compute.pfence);
225                         e->compute.pfence = NULL;
226                 }
227         }
228
229         return 0;
230 }
231
232 static bool xe_vm_is_idle(struct xe_vm *vm)
233 {
234         struct xe_engine *e;
235
236         xe_vm_assert_held(vm);
237         list_for_each_entry(e, &vm->preempt.engines, compute.link) {
238                 if (!xe_engine_is_idle(e))
239                         return false;
240         }
241
242         return true;
243 }
244
245 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
246 {
247         struct list_head *link;
248         struct xe_engine *e;
249
250         list_for_each_entry(e, &vm->preempt.engines, compute.link) {
251                 struct dma_fence *fence;
252
253                 link = list->next;
254                 XE_BUG_ON(link == list);
255
256                 fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
257                                              e, e->compute.context,
258                                              ++e->compute.seqno);
259                 dma_fence_put(e->compute.pfence);
260                 e->compute.pfence = fence;
261         }
262 }
263
264 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
265 {
266         struct xe_engine *e;
267         struct ww_acquire_ctx ww;
268         int err;
269
270         err = xe_bo_lock(bo, &ww, vm->preempt.num_engines, true);
271         if (err)
272                 return err;
273
274         list_for_each_entry(e, &vm->preempt.engines, compute.link)
275                 if (e->compute.pfence) {
276                         dma_resv_add_fence(bo->ttm.base.resv,
277                                            e->compute.pfence,
278                                            DMA_RESV_USAGE_BOOKKEEP);
279                 }
280
281         xe_bo_unlock(bo, &ww);
282         return 0;
283 }
284
285 /**
286  * xe_vm_fence_all_extobjs() - Add a fence to vm's external objects' resv
287  * @vm: The vm.
288  * @fence: The fence to add.
289  * @usage: The resv usage for the fence.
290  *
291  * Loops over all of the vm's external object bindings and adds a @fence
292  * with the given @usage to all of the external object's reservation
293  * objects.
294  */
295 void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
296                              enum dma_resv_usage usage)
297 {
298         struct xe_vma *vma;
299
300         list_for_each_entry(vma, &vm->extobj.list, extobj.link)
301                 dma_resv_add_fence(vma->bo->ttm.base.resv, fence, usage);
302 }
303
304 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm)
305 {
306         struct xe_engine *e;
307
308         lockdep_assert_held(&vm->lock);
309         xe_vm_assert_held(vm);
310
311         list_for_each_entry(e, &vm->preempt.engines, compute.link) {
312                 e->ops->resume(e);
313
314                 dma_resv_add_fence(&vm->resv, e->compute.pfence,
315                                    DMA_RESV_USAGE_BOOKKEEP);
316                 xe_vm_fence_all_extobjs(vm, e->compute.pfence,
317                                         DMA_RESV_USAGE_BOOKKEEP);
318         }
319 }
320
321 int xe_vm_add_compute_engine(struct xe_vm *vm, struct xe_engine *e)
322 {
323         struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
324         struct ttm_validate_buffer *tv;
325         struct ww_acquire_ctx ww;
326         struct list_head objs;
327         struct dma_fence *pfence;
328         int err;
329         bool wait;
330
331         XE_BUG_ON(!xe_vm_in_compute_mode(vm));
332
333         down_write(&vm->lock);
334
335         err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs, true, 1);
336         if (err)
337                 goto out_unlock_outer;
338
339         pfence = xe_preempt_fence_create(e, e->compute.context,
340                                          ++e->compute.seqno);
341         if (!pfence) {
342                 err = -ENOMEM;
343                 goto out_unlock;
344         }
345
346         list_add(&e->compute.link, &vm->preempt.engines);
347         ++vm->preempt.num_engines;
348         e->compute.pfence = pfence;
349
350         down_read(&vm->userptr.notifier_lock);
351
352         dma_resv_add_fence(&vm->resv, pfence,
353                            DMA_RESV_USAGE_BOOKKEEP);
354
355         xe_vm_fence_all_extobjs(vm, pfence, DMA_RESV_USAGE_BOOKKEEP);
356
357         /*
358          * Check to see if a preemption on VM is in flight or userptr
359          * invalidation, if so trigger this preempt fence to sync state with
360          * other preempt fences on the VM.
361          */
362         wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
363         if (wait)
364                 dma_fence_enable_sw_signaling(pfence);
365
366         up_read(&vm->userptr.notifier_lock);
367
368 out_unlock:
369         xe_vm_unlock_dma_resv(vm, tv_onstack, tv, &ww, &objs);
370 out_unlock_outer:
371         up_write(&vm->lock);
372
373         return err;
374 }
375
376 /**
377  * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
378  * that need repinning.
379  * @vm: The VM.
380  *
381  * This function checks for whether the VM has userptrs that need repinning,
382  * and provides a release-type barrier on the userptr.notifier_lock after
383  * checking.
384  *
385  * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
386  */
387 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
388 {
389         lockdep_assert_held_read(&vm->userptr.notifier_lock);
390
391         return (list_empty(&vm->userptr.repin_list) &&
392                 list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
393 }
394
395 /**
396  * xe_vm_lock_dma_resv() - Lock the vm dma_resv object and the dma_resv
397  * objects of the vm's external buffer objects.
398  * @vm: The vm.
399  * @ww: Pointer to a struct ww_acquire_ctx locking context.
400  * @tv_onstack: Array size XE_ONSTACK_TV of storage for the struct
401  * ttm_validate_buffers used for locking.
402  * @tv: Pointer to a pointer that on output contains the actual storage used.
403  * @objs: List head for the buffer objects locked.
404  * @intr: Whether to lock interruptible.
405  * @num_shared: Number of dma-fence slots to reserve in the locked objects.
406  *
407  * Locks the vm dma-resv objects and all the dma-resv objects of the
408  * buffer objects on the vm external object list. The TTM utilities require
409  * a list of struct ttm_validate_buffers pointing to the actual buffer
410  * objects to lock. Storage for those struct ttm_validate_buffers should
411  * be provided in @tv_onstack, and is typically reserved on the stack
412  * of the caller. If the size of @tv_onstack isn't sufficient, then
413  * storage will be allocated internally using kvmalloc().
414  *
415  * The function performs deadlock handling internally, and after a
416  * successful return the ww locking transaction should be considered
417  * sealed.
418  *
419  * Return: 0 on success, Negative error code on error. In particular if
420  * @intr is set to true, -EINTR or -ERESTARTSYS may be returned. In case
421  * of error, any locking performed has been reverted.
422  */
423 int xe_vm_lock_dma_resv(struct xe_vm *vm, struct ww_acquire_ctx *ww,
424                         struct ttm_validate_buffer *tv_onstack,
425                         struct ttm_validate_buffer **tv,
426                         struct list_head *objs,
427                         bool intr,
428                         unsigned int num_shared)
429 {
430         struct ttm_validate_buffer *tv_vm, *tv_bo;
431         struct xe_vma *vma, *next;
432         LIST_HEAD(dups);
433         int err;
434
435         lockdep_assert_held(&vm->lock);
436
437         if (vm->extobj.entries < XE_ONSTACK_TV) {
438                 tv_vm = tv_onstack;
439         } else {
440                 tv_vm = kvmalloc_array(vm->extobj.entries + 1, sizeof(*tv_vm),
441                                        GFP_KERNEL);
442                 if (!tv_vm)
443                         return -ENOMEM;
444         }
445         tv_bo = tv_vm + 1;
446
447         INIT_LIST_HEAD(objs);
448         list_for_each_entry(vma, &vm->extobj.list, extobj.link) {
449                 tv_bo->num_shared = num_shared;
450                 tv_bo->bo = &vma->bo->ttm;
451
452                 list_add_tail(&tv_bo->head, objs);
453                 tv_bo++;
454         }
455         tv_vm->num_shared = num_shared;
456         tv_vm->bo = xe_vm_ttm_bo(vm);
457         list_add_tail(&tv_vm->head, objs);
458         err = ttm_eu_reserve_buffers(ww, objs, intr, &dups);
459         if (err)
460                 goto out_err;
461
462         spin_lock(&vm->notifier.list_lock);
463         list_for_each_entry_safe(vma, next, &vm->notifier.rebind_list,
464                                  notifier.rebind_link) {
465                 xe_bo_assert_held(vma->bo);
466
467                 list_del_init(&vma->notifier.rebind_link);
468                 if (vma->tile_present && !vma->destroyed)
469                         list_move_tail(&vma->rebind_link, &vm->rebind_list);
470         }
471         spin_unlock(&vm->notifier.list_lock);
472
473         *tv = tv_vm;
474         return 0;
475
476 out_err:
477         if (tv_vm != tv_onstack)
478                 kvfree(tv_vm);
479
480         return err;
481 }
482
483 /**
484  * xe_vm_unlock_dma_resv() - Unlock reservation objects locked by
485  * xe_vm_lock_dma_resv()
486  * @vm: The vm.
487  * @tv_onstack: The @tv_onstack array given to xe_vm_lock_dma_resv().
488  * @tv: The value of *@tv given by xe_vm_lock_dma_resv().
489  * @ww: The ww_acquire_context used for locking.
490  * @objs: The list returned from xe_vm_lock_dma_resv().
491  *
492  * Unlocks the reservation objects and frees any memory allocated by
493  * xe_vm_lock_dma_resv().
494  */
495 void xe_vm_unlock_dma_resv(struct xe_vm *vm,
496                            struct ttm_validate_buffer *tv_onstack,
497                            struct ttm_validate_buffer *tv,
498                            struct ww_acquire_ctx *ww,
499                            struct list_head *objs)
500 {
501         /*
502          * Nothing should've been able to enter the list while we were locked,
503          * since we've held the dma-resvs of all the vm's external objects,
504          * and holding the dma_resv of an object is required for list
505          * addition, and we shouldn't add ourselves.
506          */
507         XE_WARN_ON(!list_empty(&vm->notifier.rebind_list));
508
509         ttm_eu_backoff_reservation(ww, objs);
510         if (tv && tv != tv_onstack)
511                 kvfree(tv);
512 }
513
514 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
515
516 static void preempt_rebind_work_func(struct work_struct *w)
517 {
518         struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
519         struct xe_vma *vma;
520         struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
521         struct ttm_validate_buffer *tv;
522         struct ww_acquire_ctx ww;
523         struct list_head objs;
524         struct dma_fence *rebind_fence;
525         unsigned int fence_count = 0;
526         LIST_HEAD(preempt_fences);
527         ktime_t end = 0;
528         int err;
529         long wait;
530         int __maybe_unused tries = 0;
531
532         XE_BUG_ON(!xe_vm_in_compute_mode(vm));
533         trace_xe_vm_rebind_worker_enter(vm);
534
535         if (xe_vm_is_closed(vm)) {
536                 trace_xe_vm_rebind_worker_exit(vm);
537                 return;
538         }
539
540         down_write(&vm->lock);
541
542 retry:
543         if (vm->async_ops.error)
544                 goto out_unlock_outer;
545
546         /*
547          * Extreme corner where we exit a VM error state with a munmap style VM
548          * unbind inflight which requires a rebind. In this case the rebind
549          * needs to install some fences into the dma-resv slots. The worker to
550          * do this queued, let that worker make progress by dropping vm->lock
551          * and trying this again.
552          */
553         if (vm->async_ops.munmap_rebind_inflight) {
554                 up_write(&vm->lock);
555                 flush_work(&vm->async_ops.work);
556                 goto retry;
557         }
558
559         if (xe_vm_userptr_check_repin(vm)) {
560                 err = xe_vm_userptr_pin(vm);
561                 if (err)
562                         goto out_unlock_outer;
563         }
564
565         err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs,
566                                   false, vm->preempt.num_engines);
567         if (err)
568                 goto out_unlock_outer;
569
570         if (xe_vm_is_idle(vm)) {
571                 vm->preempt.rebind_deactivated = true;
572                 goto out_unlock;
573         }
574
575         /* Fresh preempt fences already installed. Everyting is running. */
576         if (!preempt_fences_waiting(vm))
577                 goto out_unlock;
578
579         /*
580          * This makes sure vm is completely suspended and also balances
581          * xe_engine suspend- and resume; we resume *all* vm engines below.
582          */
583         err = wait_for_existing_preempt_fences(vm);
584         if (err)
585                 goto out_unlock;
586
587         err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
588         if (err)
589                 goto out_unlock;
590
591         list_for_each_entry(vma, &vm->rebind_list, rebind_link) {
592                 if (xe_vma_is_userptr(vma) || vma->destroyed)
593                         continue;
594
595                 err = xe_bo_validate(vma->bo, vm, false);
596                 if (err)
597                         goto out_unlock;
598         }
599
600         rebind_fence = xe_vm_rebind(vm, true);
601         if (IS_ERR(rebind_fence)) {
602                 err = PTR_ERR(rebind_fence);
603                 goto out_unlock;
604         }
605
606         if (rebind_fence) {
607                 dma_fence_wait(rebind_fence, false);
608                 dma_fence_put(rebind_fence);
609         }
610
611         /* Wait on munmap style VM unbinds */
612         wait = dma_resv_wait_timeout(&vm->resv,
613                                      DMA_RESV_USAGE_KERNEL,
614                                      false, MAX_SCHEDULE_TIMEOUT);
615         if (wait <= 0) {
616                 err = -ETIME;
617                 goto out_unlock;
618         }
619
620 #define retry_required(__tries, __vm) \
621         (IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
622         (!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
623         __xe_vm_userptr_needs_repin(__vm))
624
625         down_read(&vm->userptr.notifier_lock);
626         if (retry_required(tries, vm)) {
627                 up_read(&vm->userptr.notifier_lock);
628                 err = -EAGAIN;
629                 goto out_unlock;
630         }
631
632 #undef retry_required
633
634         /* Point of no return. */
635         arm_preempt_fences(vm, &preempt_fences);
636         resume_and_reinstall_preempt_fences(vm);
637         up_read(&vm->userptr.notifier_lock);
638
639 out_unlock:
640         xe_vm_unlock_dma_resv(vm, tv_onstack, tv, &ww, &objs);
641 out_unlock_outer:
642         if (err == -EAGAIN) {
643                 trace_xe_vm_rebind_worker_retry(vm);
644                 goto retry;
645         }
646
647         /*
648          * With multiple active VMs, under memory pressure, it is possible that
649          * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
650          * Until ttm properly handles locking in such scenarios, best thing the
651          * driver can do is retry with a timeout. Killing the VM or putting it
652          * in error state after timeout or other error scenarios is still TBD.
653          */
654         if (err == -ENOMEM) {
655                 ktime_t cur = ktime_get();
656
657                 end = end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
658                 if (ktime_before(cur, end)) {
659                         msleep(20);
660                         trace_xe_vm_rebind_worker_retry(vm);
661                         goto retry;
662                 }
663         }
664         up_write(&vm->lock);
665
666         free_preempt_fences(&preempt_fences);
667
668         XE_WARN_ON(err < 0);    /* TODO: Kill VM or put in error state */
669         trace_xe_vm_rebind_worker_exit(vm);
670 }
671
672 struct async_op_fence;
673 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
674                         struct xe_engine *e, struct xe_sync_entry *syncs,
675                         u32 num_syncs, struct async_op_fence *afence);
676
677 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
678                                    const struct mmu_notifier_range *range,
679                                    unsigned long cur_seq)
680 {
681         struct xe_vma *vma = container_of(mni, struct xe_vma, userptr.notifier);
682         struct xe_vm *vm = vma->vm;
683         struct dma_resv_iter cursor;
684         struct dma_fence *fence;
685         long err;
686
687         XE_BUG_ON(!xe_vma_is_userptr(vma));
688         trace_xe_vma_userptr_invalidate(vma);
689
690         if (!mmu_notifier_range_blockable(range))
691                 return false;
692
693         down_write(&vm->userptr.notifier_lock);
694         mmu_interval_set_seq(mni, cur_seq);
695
696         /* No need to stop gpu access if the userptr is not yet bound. */
697         if (!vma->userptr.initial_bind) {
698                 up_write(&vm->userptr.notifier_lock);
699                 return true;
700         }
701
702         /*
703          * Tell exec and rebind worker they need to repin and rebind this
704          * userptr.
705          */
706         if (!xe_vm_in_fault_mode(vm) && !vma->destroyed && vma->tile_present) {
707                 spin_lock(&vm->userptr.invalidated_lock);
708                 list_move_tail(&vma->userptr.invalidate_link,
709                                &vm->userptr.invalidated);
710                 spin_unlock(&vm->userptr.invalidated_lock);
711         }
712
713         up_write(&vm->userptr.notifier_lock);
714
715         /*
716          * Preempt fences turn into schedule disables, pipeline these.
717          * Note that even in fault mode, we need to wait for binds and
718          * unbinds to complete, and those are attached as BOOKMARK fences
719          * to the vm.
720          */
721         dma_resv_iter_begin(&cursor, &vm->resv,
722                             DMA_RESV_USAGE_BOOKKEEP);
723         dma_resv_for_each_fence_unlocked(&cursor, fence)
724                 dma_fence_enable_sw_signaling(fence);
725         dma_resv_iter_end(&cursor);
726
727         err = dma_resv_wait_timeout(&vm->resv,
728                                     DMA_RESV_USAGE_BOOKKEEP,
729                                     false, MAX_SCHEDULE_TIMEOUT);
730         XE_WARN_ON(err <= 0);
731
732         if (xe_vm_in_fault_mode(vm)) {
733                 err = xe_vm_invalidate_vma(vma);
734                 XE_WARN_ON(err);
735         }
736
737         trace_xe_vma_userptr_invalidate_complete(vma);
738
739         return true;
740 }
741
742 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
743         .invalidate = vma_userptr_invalidate,
744 };
745
746 int xe_vm_userptr_pin(struct xe_vm *vm)
747 {
748         struct xe_vma *vma, *next;
749         int err = 0;
750         LIST_HEAD(tmp_evict);
751
752         lockdep_assert_held_write(&vm->lock);
753
754         /* Collect invalidated userptrs */
755         spin_lock(&vm->userptr.invalidated_lock);
756         list_for_each_entry_safe(vma, next, &vm->userptr.invalidated,
757                                  userptr.invalidate_link) {
758                 list_del_init(&vma->userptr.invalidate_link);
759                 list_move_tail(&vma->userptr_link, &vm->userptr.repin_list);
760         }
761         spin_unlock(&vm->userptr.invalidated_lock);
762
763         /* Pin and move to temporary list */
764         list_for_each_entry_safe(vma, next, &vm->userptr.repin_list, userptr_link) {
765                 err = xe_vma_userptr_pin_pages(vma);
766                 if (err < 0)
767                         goto out_err;
768
769                 list_move_tail(&vma->userptr_link, &tmp_evict);
770         }
771
772         /* Take lock and move to rebind_list for rebinding. */
773         err = dma_resv_lock_interruptible(&vm->resv, NULL);
774         if (err)
775                 goto out_err;
776
777         list_for_each_entry_safe(vma, next, &tmp_evict, userptr_link) {
778                 list_del_init(&vma->userptr_link);
779                 list_move_tail(&vma->rebind_link, &vm->rebind_list);
780         }
781
782         dma_resv_unlock(&vm->resv);
783
784         return 0;
785
786 out_err:
787         list_splice_tail(&tmp_evict, &vm->userptr.repin_list);
788
789         return err;
790 }
791
792 /**
793  * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
794  * that need repinning.
795  * @vm: The VM.
796  *
797  * This function does an advisory check for whether the VM has userptrs that
798  * need repinning.
799  *
800  * Return: 0 if there are no indications of userptrs needing repinning,
801  * -EAGAIN if there are.
802  */
803 int xe_vm_userptr_check_repin(struct xe_vm *vm)
804 {
805         return (list_empty_careful(&vm->userptr.repin_list) &&
806                 list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
807 }
808
809 static struct dma_fence *
810 xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
811                struct xe_sync_entry *syncs, u32 num_syncs);
812
813 struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
814 {
815         struct dma_fence *fence = NULL;
816         struct xe_vma *vma, *next;
817
818         lockdep_assert_held(&vm->lock);
819         if (xe_vm_no_dma_fences(vm) && !rebind_worker)
820                 return NULL;
821
822         xe_vm_assert_held(vm);
823         list_for_each_entry_safe(vma, next, &vm->rebind_list, rebind_link) {
824                 XE_WARN_ON(!vma->tile_present);
825
826                 list_del_init(&vma->rebind_link);
827                 dma_fence_put(fence);
828                 if (rebind_worker)
829                         trace_xe_vma_rebind_worker(vma);
830                 else
831                         trace_xe_vma_rebind_exec(vma);
832                 fence = xe_vm_bind_vma(vma, NULL, NULL, 0);
833                 if (IS_ERR(fence))
834                         return fence;
835         }
836
837         return fence;
838 }
839
840 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
841                                     struct xe_bo *bo,
842                                     u64 bo_offset_or_userptr,
843                                     u64 start, u64 end,
844                                     bool read_only,
845                                     u64 tile_mask)
846 {
847         struct xe_vma *vma;
848         struct xe_tile *tile;
849         u8 id;
850
851         XE_BUG_ON(start >= end);
852         XE_BUG_ON(end >= vm->size);
853
854         vma = kzalloc(sizeof(*vma), GFP_KERNEL);
855         if (!vma) {
856                 vma = ERR_PTR(-ENOMEM);
857                 return vma;
858         }
859
860         INIT_LIST_HEAD(&vma->rebind_link);
861         INIT_LIST_HEAD(&vma->unbind_link);
862         INIT_LIST_HEAD(&vma->userptr_link);
863         INIT_LIST_HEAD(&vma->userptr.invalidate_link);
864         INIT_LIST_HEAD(&vma->notifier.rebind_link);
865         INIT_LIST_HEAD(&vma->extobj.link);
866
867         vma->vm = vm;
868         vma->start = start;
869         vma->end = end;
870         if (read_only)
871                 vma->pte_flags = XE_PTE_READ_ONLY;
872
873         if (tile_mask) {
874                 vma->tile_mask = tile_mask;
875         } else {
876                 for_each_tile(tile, vm->xe, id)
877                         vma->tile_mask |= 0x1 << id;
878         }
879
880         if (vm->xe->info.platform == XE_PVC)
881                 vma->use_atomic_access_pte_bit = true;
882
883         if (bo) {
884                 xe_bo_assert_held(bo);
885                 vma->bo_offset = bo_offset_or_userptr;
886                 vma->bo = xe_bo_get(bo);
887                 list_add_tail(&vma->bo_link, &bo->vmas);
888         } else /* userptr */ {
889                 u64 size = end - start + 1;
890                 int err;
891
892                 vma->userptr.ptr = bo_offset_or_userptr;
893
894                 err = mmu_interval_notifier_insert(&vma->userptr.notifier,
895                                                    current->mm,
896                                                    vma->userptr.ptr, size,
897                                                    &vma_userptr_notifier_ops);
898                 if (err) {
899                         kfree(vma);
900                         vma = ERR_PTR(err);
901                         return vma;
902                 }
903
904                 vma->userptr.notifier_seq = LONG_MAX;
905                 xe_vm_get(vm);
906         }
907
908         return vma;
909 }
910
911 static bool vm_remove_extobj(struct xe_vma *vma)
912 {
913         if (!list_empty(&vma->extobj.link)) {
914                 vma->vm->extobj.entries--;
915                 list_del_init(&vma->extobj.link);
916                 return true;
917         }
918         return false;
919 }
920
921 static void xe_vma_destroy_late(struct xe_vma *vma)
922 {
923         struct xe_vm *vm = vma->vm;
924         struct xe_device *xe = vm->xe;
925         bool read_only = vma->pte_flags & XE_PTE_READ_ONLY;
926
927         if (xe_vma_is_userptr(vma)) {
928                 if (vma->userptr.sg) {
929                         dma_unmap_sgtable(xe->drm.dev,
930                                           vma->userptr.sg,
931                                           read_only ? DMA_TO_DEVICE :
932                                           DMA_BIDIRECTIONAL, 0);
933                         sg_free_table(vma->userptr.sg);
934                         vma->userptr.sg = NULL;
935                 }
936
937                 /*
938                  * Since userptr pages are not pinned, we can't remove
939                  * the notifer until we're sure the GPU is not accessing
940                  * them anymore
941                  */
942                 mmu_interval_notifier_remove(&vma->userptr.notifier);
943                 xe_vm_put(vm);
944         } else {
945                 xe_bo_put(vma->bo);
946         }
947
948         kfree(vma);
949 }
950
951 static void vma_destroy_work_func(struct work_struct *w)
952 {
953         struct xe_vma *vma =
954                 container_of(w, struct xe_vma, destroy_work);
955
956         xe_vma_destroy_late(vma);
957 }
958
959 static struct xe_vma *
960 bo_has_vm_references_locked(struct xe_bo *bo, struct xe_vm *vm,
961                             struct xe_vma *ignore)
962 {
963         struct xe_vma *vma;
964
965         list_for_each_entry(vma, &bo->vmas, bo_link) {
966                 if (vma != ignore && vma->vm == vm)
967                         return vma;
968         }
969
970         return NULL;
971 }
972
973 static bool bo_has_vm_references(struct xe_bo *bo, struct xe_vm *vm,
974                                  struct xe_vma *ignore)
975 {
976         struct ww_acquire_ctx ww;
977         bool ret;
978
979         xe_bo_lock(bo, &ww, 0, false);
980         ret = !!bo_has_vm_references_locked(bo, vm, ignore);
981         xe_bo_unlock(bo, &ww);
982
983         return ret;
984 }
985
986 static void __vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
987 {
988         list_add(&vma->extobj.link, &vm->extobj.list);
989         vm->extobj.entries++;
990 }
991
992 static void vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
993 {
994         struct xe_bo *bo = vma->bo;
995
996         lockdep_assert_held_write(&vm->lock);
997
998         if (bo_has_vm_references(bo, vm, vma))
999                 return;
1000
1001         __vm_insert_extobj(vm, vma);
1002 }
1003
1004 static void vma_destroy_cb(struct dma_fence *fence,
1005                            struct dma_fence_cb *cb)
1006 {
1007         struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
1008
1009         INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
1010         queue_work(system_unbound_wq, &vma->destroy_work);
1011 }
1012
1013 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
1014 {
1015         struct xe_vm *vm = vma->vm;
1016
1017         lockdep_assert_held_write(&vm->lock);
1018         XE_BUG_ON(!list_empty(&vma->unbind_link));
1019
1020         if (xe_vma_is_userptr(vma)) {
1021                 XE_WARN_ON(!vma->destroyed);
1022                 spin_lock(&vm->userptr.invalidated_lock);
1023                 list_del_init(&vma->userptr.invalidate_link);
1024                 spin_unlock(&vm->userptr.invalidated_lock);
1025                 list_del(&vma->userptr_link);
1026         } else {
1027                 xe_bo_assert_held(vma->bo);
1028                 list_del(&vma->bo_link);
1029
1030                 spin_lock(&vm->notifier.list_lock);
1031                 list_del(&vma->notifier.rebind_link);
1032                 spin_unlock(&vm->notifier.list_lock);
1033
1034                 if (!vma->bo->vm && vm_remove_extobj(vma)) {
1035                         struct xe_vma *other;
1036
1037                         other = bo_has_vm_references_locked(vma->bo, vm, NULL);
1038
1039                         if (other)
1040                                 __vm_insert_extobj(vm, other);
1041                 }
1042         }
1043
1044         xe_vm_assert_held(vm);
1045         if (!list_empty(&vma->rebind_link))
1046                 list_del(&vma->rebind_link);
1047
1048         if (fence) {
1049                 int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
1050                                                  vma_destroy_cb);
1051
1052                 if (ret) {
1053                         XE_WARN_ON(ret != -ENOENT);
1054                         xe_vma_destroy_late(vma);
1055                 }
1056         } else {
1057                 xe_vma_destroy_late(vma);
1058         }
1059 }
1060
1061 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1062 {
1063         struct ttm_validate_buffer tv[2];
1064         struct ww_acquire_ctx ww;
1065         struct xe_bo *bo = vma->bo;
1066         LIST_HEAD(objs);
1067         LIST_HEAD(dups);
1068         int err;
1069
1070         memset(tv, 0, sizeof(tv));
1071         tv[0].bo = xe_vm_ttm_bo(vma->vm);
1072         list_add(&tv[0].head, &objs);
1073
1074         if (bo) {
1075                 tv[1].bo = &xe_bo_get(bo)->ttm;
1076                 list_add(&tv[1].head, &objs);
1077         }
1078         err = ttm_eu_reserve_buffers(&ww, &objs, false, &dups);
1079         XE_WARN_ON(err);
1080
1081         xe_vma_destroy(vma, NULL);
1082
1083         ttm_eu_backoff_reservation(&ww, &objs);
1084         if (bo)
1085                 xe_bo_put(bo);
1086 }
1087
1088 static struct xe_vma *to_xe_vma(const struct rb_node *node)
1089 {
1090         BUILD_BUG_ON(offsetof(struct xe_vma, vm_node) != 0);
1091         return (struct xe_vma *)node;
1092 }
1093
1094 static int xe_vma_cmp(const struct xe_vma *a, const struct xe_vma *b)
1095 {
1096         if (a->end < b->start) {
1097                 return -1;
1098         } else if (b->end < a->start) {
1099                 return 1;
1100         } else {
1101                 return 0;
1102         }
1103 }
1104
1105 static bool xe_vma_less_cb(struct rb_node *a, const struct rb_node *b)
1106 {
1107         return xe_vma_cmp(to_xe_vma(a), to_xe_vma(b)) < 0;
1108 }
1109
1110 int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node)
1111 {
1112         struct xe_vma *cmp = to_xe_vma(node);
1113         const struct xe_vma *own = key;
1114
1115         if (own->start > cmp->end)
1116                 return 1;
1117
1118         if (own->end < cmp->start)
1119                 return -1;
1120
1121         return 0;
1122 }
1123
1124 struct xe_vma *
1125 xe_vm_find_overlapping_vma(struct xe_vm *vm, const struct xe_vma *vma)
1126 {
1127         struct rb_node *node;
1128
1129         if (xe_vm_is_closed(vm))
1130                 return NULL;
1131
1132         XE_BUG_ON(vma->end >= vm->size);
1133         lockdep_assert_held(&vm->lock);
1134
1135         node = rb_find(vma, &vm->vmas, xe_vma_cmp_vma_cb);
1136
1137         return node ? to_xe_vma(node) : NULL;
1138 }
1139
1140 static void xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1141 {
1142         XE_BUG_ON(vma->vm != vm);
1143         lockdep_assert_held(&vm->lock);
1144
1145         rb_add(&vma->vm_node, &vm->vmas, xe_vma_less_cb);
1146 }
1147
1148 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1149 {
1150         XE_BUG_ON(vma->vm != vm);
1151         lockdep_assert_held(&vm->lock);
1152
1153         rb_erase(&vma->vm_node, &vm->vmas);
1154         if (vm->usm.last_fault_vma == vma)
1155                 vm->usm.last_fault_vma = NULL;
1156 }
1157
1158 static void async_op_work_func(struct work_struct *w);
1159 static void vm_destroy_work_func(struct work_struct *w);
1160
1161 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1162 {
1163         struct xe_vm *vm;
1164         int err, i = 0, number_tiles = 0;
1165         struct xe_tile *tile;
1166         u8 id;
1167
1168         vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1169         if (!vm)
1170                 return ERR_PTR(-ENOMEM);
1171
1172         vm->xe = xe;
1173         kref_init(&vm->refcount);
1174         dma_resv_init(&vm->resv);
1175
1176         vm->size = 1ull << xe_pt_shift(xe->info.vm_max_level + 1);
1177
1178         vm->vmas = RB_ROOT;
1179         vm->flags = flags;
1180
1181         init_rwsem(&vm->lock);
1182
1183         INIT_LIST_HEAD(&vm->rebind_list);
1184
1185         INIT_LIST_HEAD(&vm->userptr.repin_list);
1186         INIT_LIST_HEAD(&vm->userptr.invalidated);
1187         init_rwsem(&vm->userptr.notifier_lock);
1188         spin_lock_init(&vm->userptr.invalidated_lock);
1189
1190         INIT_LIST_HEAD(&vm->notifier.rebind_list);
1191         spin_lock_init(&vm->notifier.list_lock);
1192
1193         INIT_LIST_HEAD(&vm->async_ops.pending);
1194         INIT_WORK(&vm->async_ops.work, async_op_work_func);
1195         spin_lock_init(&vm->async_ops.lock);
1196
1197         INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1198
1199         INIT_LIST_HEAD(&vm->preempt.engines);
1200         vm->preempt.min_run_period_ms = 10;     /* FIXME: Wire up to uAPI */
1201
1202         INIT_LIST_HEAD(&vm->extobj.list);
1203
1204         if (!(flags & XE_VM_FLAG_MIGRATION)) {
1205                 /* We need to immeditatelly exit from any D3 state */
1206                 xe_pm_runtime_get(xe);
1207                 xe_device_mem_access_get(xe);
1208         }
1209
1210         err = dma_resv_lock_interruptible(&vm->resv, NULL);
1211         if (err)
1212                 goto err_put;
1213
1214         if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1215                 vm->flags |= XE_VM_FLAGS_64K;
1216
1217         for_each_tile(tile, xe, id) {
1218                 if (flags & XE_VM_FLAG_MIGRATION &&
1219                     tile->id != XE_VM_FLAG_GT_ID(flags))
1220                         continue;
1221
1222                 vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
1223                 if (IS_ERR(vm->pt_root[id])) {
1224                         err = PTR_ERR(vm->pt_root[id]);
1225                         vm->pt_root[id] = NULL;
1226                         goto err_destroy_root;
1227                 }
1228         }
1229
1230         if (flags & XE_VM_FLAG_SCRATCH_PAGE) {
1231                 for_each_tile(tile, xe, id) {
1232                         if (!vm->pt_root[id])
1233                                 continue;
1234
1235                         err = xe_pt_create_scratch(xe, tile, vm);
1236                         if (err)
1237                                 goto err_scratch_pt;
1238                 }
1239         }
1240
1241         if (flags & DRM_XE_VM_CREATE_COMPUTE_MODE) {
1242                 INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1243                 vm->flags |= XE_VM_FLAG_COMPUTE_MODE;
1244         }
1245
1246         if (flags & DRM_XE_VM_CREATE_ASYNC_BIND_OPS) {
1247                 vm->async_ops.fence.context = dma_fence_context_alloc(1);
1248                 vm->flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
1249         }
1250
1251         /* Fill pt_root after allocating scratch tables */
1252         for_each_tile(tile, xe, id) {
1253                 if (!vm->pt_root[id])
1254                         continue;
1255
1256                 xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
1257         }
1258         dma_resv_unlock(&vm->resv);
1259
1260         /* Kernel migration VM shouldn't have a circular loop.. */
1261         if (!(flags & XE_VM_FLAG_MIGRATION)) {
1262                 for_each_tile(tile, xe, id) {
1263                         struct xe_gt *gt = &tile->primary_gt;
1264                         struct xe_vm *migrate_vm;
1265                         struct xe_engine *eng;
1266
1267                         if (!vm->pt_root[id])
1268                                 continue;
1269
1270                         migrate_vm = xe_migrate_get_vm(gt->migrate);
1271                         eng = xe_engine_create_class(xe, gt, migrate_vm,
1272                                                      XE_ENGINE_CLASS_COPY,
1273                                                      ENGINE_FLAG_VM);
1274                         xe_vm_put(migrate_vm);
1275                         if (IS_ERR(eng)) {
1276                                 xe_vm_close_and_put(vm);
1277                                 return ERR_CAST(eng);
1278                         }
1279                         vm->eng[id] = eng;
1280                         number_tiles++;
1281                 }
1282         }
1283
1284         if (number_tiles > 1)
1285                 vm->composite_fence_ctx = dma_fence_context_alloc(1);
1286
1287         mutex_lock(&xe->usm.lock);
1288         if (flags & XE_VM_FLAG_FAULT_MODE)
1289                 xe->usm.num_vm_in_fault_mode++;
1290         else if (!(flags & XE_VM_FLAG_MIGRATION))
1291                 xe->usm.num_vm_in_non_fault_mode++;
1292         mutex_unlock(&xe->usm.lock);
1293
1294         trace_xe_vm_create(vm);
1295
1296         return vm;
1297
1298 err_scratch_pt:
1299         for_each_tile(tile, xe, id) {
1300                 if (!vm->pt_root[id])
1301                         continue;
1302
1303                 i = vm->pt_root[id]->level;
1304                 while (i)
1305                         if (vm->scratch_pt[id][--i])
1306                                 xe_pt_destroy(vm->scratch_pt[id][i],
1307                                               vm->flags, NULL);
1308                 xe_bo_unpin(vm->scratch_bo[id]);
1309                 xe_bo_put(vm->scratch_bo[id]);
1310         }
1311 err_destroy_root:
1312         for_each_tile(tile, xe, id) {
1313                 if (vm->pt_root[id])
1314                         xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1315         }
1316         dma_resv_unlock(&vm->resv);
1317 err_put:
1318         dma_resv_fini(&vm->resv);
1319         kfree(vm);
1320         if (!(flags & XE_VM_FLAG_MIGRATION)) {
1321                 xe_device_mem_access_put(xe);
1322                 xe_pm_runtime_put(xe);
1323         }
1324         return ERR_PTR(err);
1325 }
1326
1327 static void flush_async_ops(struct xe_vm *vm)
1328 {
1329         queue_work(system_unbound_wq, &vm->async_ops.work);
1330         flush_work(&vm->async_ops.work);
1331 }
1332
1333 static void vm_error_capture(struct xe_vm *vm, int err,
1334                              u32 op, u64 addr, u64 size)
1335 {
1336         struct drm_xe_vm_bind_op_error_capture capture;
1337         u64 __user *address =
1338                 u64_to_user_ptr(vm->async_ops.error_capture.addr);
1339         bool in_kthread = !current->mm;
1340
1341         capture.error = err;
1342         capture.op = op;
1343         capture.addr = addr;
1344         capture.size = size;
1345
1346         if (in_kthread) {
1347                 if (!mmget_not_zero(vm->async_ops.error_capture.mm))
1348                         goto mm_closed;
1349                 kthread_use_mm(vm->async_ops.error_capture.mm);
1350         }
1351
1352         if (copy_to_user(address, &capture, sizeof(capture)))
1353                 XE_WARN_ON("Copy to user failed");
1354
1355         if (in_kthread) {
1356                 kthread_unuse_mm(vm->async_ops.error_capture.mm);
1357                 mmput(vm->async_ops.error_capture.mm);
1358         }
1359
1360 mm_closed:
1361         wake_up_all(&vm->async_ops.error_capture.wq);
1362 }
1363
1364 void xe_vm_close_and_put(struct xe_vm *vm)
1365 {
1366         struct rb_root contested = RB_ROOT;
1367         struct ww_acquire_ctx ww;
1368         struct xe_device *xe = vm->xe;
1369         struct xe_tile *tile;
1370         u8 id;
1371
1372         XE_BUG_ON(vm->preempt.num_engines);
1373
1374         vm->size = 0;
1375         smp_mb();
1376         flush_async_ops(vm);
1377         if (xe_vm_in_compute_mode(vm))
1378                 flush_work(&vm->preempt.rebind_work);
1379
1380         for_each_tile(tile, xe, id) {
1381                 if (vm->eng[id]) {
1382                         xe_engine_kill(vm->eng[id]);
1383                         xe_engine_put(vm->eng[id]);
1384                         vm->eng[id] = NULL;
1385                 }
1386         }
1387
1388         down_write(&vm->lock);
1389         xe_vm_lock(vm, &ww, 0, false);
1390         while (vm->vmas.rb_node) {
1391                 struct xe_vma *vma = to_xe_vma(vm->vmas.rb_node);
1392
1393                 if (xe_vma_is_userptr(vma)) {
1394                         down_read(&vm->userptr.notifier_lock);
1395                         vma->destroyed = true;
1396                         up_read(&vm->userptr.notifier_lock);
1397                 }
1398
1399                 rb_erase(&vma->vm_node, &vm->vmas);
1400
1401                 /* easy case, remove from VMA? */
1402                 if (xe_vma_is_userptr(vma) || vma->bo->vm) {
1403                         xe_vma_destroy(vma, NULL);
1404                         continue;
1405                 }
1406
1407                 rb_add(&vma->vm_node, &contested, xe_vma_less_cb);
1408         }
1409
1410         /*
1411          * All vm operations will add shared fences to resv.
1412          * The only exception is eviction for a shared object,
1413          * but even so, the unbind when evicted would still
1414          * install a fence to resv. Hence it's safe to
1415          * destroy the pagetables immediately.
1416          */
1417         for_each_tile(tile, xe, id) {
1418                 if (vm->scratch_bo[id]) {
1419                         u32 i;
1420
1421                         xe_bo_unpin(vm->scratch_bo[id]);
1422                         xe_bo_put(vm->scratch_bo[id]);
1423                         for (i = 0; i < vm->pt_root[id]->level; i++)
1424                                 xe_pt_destroy(vm->scratch_pt[id][i], vm->flags,
1425                                               NULL);
1426                 }
1427         }
1428         xe_vm_unlock(vm, &ww);
1429
1430         if (contested.rb_node) {
1431
1432                 /*
1433                  * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1434                  * Since we hold a refcount to the bo, we can remove and free
1435                  * the members safely without locking.
1436                  */
1437                 while (contested.rb_node) {
1438                         struct xe_vma *vma = to_xe_vma(contested.rb_node);
1439
1440                         rb_erase(&vma->vm_node, &contested);
1441                         xe_vma_destroy_unlocked(vma);
1442                 }
1443         }
1444
1445         if (vm->async_ops.error_capture.addr)
1446                 wake_up_all(&vm->async_ops.error_capture.wq);
1447
1448         XE_WARN_ON(!list_empty(&vm->extobj.list));
1449         up_write(&vm->lock);
1450
1451         mutex_lock(&xe->usm.lock);
1452         if (vm->flags & XE_VM_FLAG_FAULT_MODE)
1453                 xe->usm.num_vm_in_fault_mode--;
1454         else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1455                 xe->usm.num_vm_in_non_fault_mode--;
1456         mutex_unlock(&xe->usm.lock);
1457
1458         xe_vm_put(vm);
1459 }
1460
1461 static void vm_destroy_work_func(struct work_struct *w)
1462 {
1463         struct xe_vm *vm =
1464                 container_of(w, struct xe_vm, destroy_work);
1465         struct ww_acquire_ctx ww;
1466         struct xe_device *xe = vm->xe;
1467         struct xe_tile *tile;
1468         u8 id;
1469         void *lookup;
1470
1471         /* xe_vm_close_and_put was not called? */
1472         XE_WARN_ON(vm->size);
1473
1474         if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
1475                 xe_device_mem_access_put(xe);
1476                 xe_pm_runtime_put(xe);
1477
1478                 if (xe->info.has_asid) {
1479                         mutex_lock(&xe->usm.lock);
1480                         lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1481                         XE_WARN_ON(lookup != vm);
1482                         mutex_unlock(&xe->usm.lock);
1483                 }
1484         }
1485
1486         /*
1487          * XXX: We delay destroying the PT root until the VM if freed as PT root
1488          * is needed for xe_vm_lock to work. If we remove that dependency this
1489          * can be moved to xe_vm_close_and_put.
1490          */
1491         xe_vm_lock(vm, &ww, 0, false);
1492         for_each_tile(tile, xe, id) {
1493                 if (vm->pt_root[id]) {
1494                         xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1495                         vm->pt_root[id] = NULL;
1496                 }
1497         }
1498         xe_vm_unlock(vm, &ww);
1499
1500         trace_xe_vm_free(vm);
1501         dma_fence_put(vm->rebind_fence);
1502         dma_resv_fini(&vm->resv);
1503         kfree(vm);
1504 }
1505
1506 void xe_vm_free(struct kref *ref)
1507 {
1508         struct xe_vm *vm = container_of(ref, struct xe_vm, refcount);
1509
1510         /* To destroy the VM we need to be able to sleep */
1511         queue_work(system_unbound_wq, &vm->destroy_work);
1512 }
1513
1514 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1515 {
1516         struct xe_vm *vm;
1517
1518         mutex_lock(&xef->vm.lock);
1519         vm = xa_load(&xef->vm.xa, id);
1520         mutex_unlock(&xef->vm.lock);
1521
1522         if (vm)
1523                 xe_vm_get(vm);
1524
1525         return vm;
1526 }
1527
1528 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
1529 {
1530         return gen8_pde_encode(vm->pt_root[tile->id]->bo, 0,
1531                                XE_CACHE_WB);
1532 }
1533
1534 static struct dma_fence *
1535 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_engine *e,
1536                  struct xe_sync_entry *syncs, u32 num_syncs)
1537 {
1538         struct xe_tile *tile;
1539         struct dma_fence *fence = NULL;
1540         struct dma_fence **fences = NULL;
1541         struct dma_fence_array *cf = NULL;
1542         struct xe_vm *vm = vma->vm;
1543         int cur_fence = 0, i;
1544         int number_tiles = hweight_long(vma->tile_present);
1545         int err;
1546         u8 id;
1547
1548         trace_xe_vma_unbind(vma);
1549
1550         if (number_tiles > 1) {
1551                 fences = kmalloc_array(number_tiles, sizeof(*fences),
1552                                        GFP_KERNEL);
1553                 if (!fences)
1554                         return ERR_PTR(-ENOMEM);
1555         }
1556
1557         for_each_tile(tile, vm->xe, id) {
1558                 if (!(vma->tile_present & BIT(id)))
1559                         goto next;
1560
1561                 fence = __xe_pt_unbind_vma(tile, vma, e, syncs, num_syncs);
1562                 if (IS_ERR(fence)) {
1563                         err = PTR_ERR(fence);
1564                         goto err_fences;
1565                 }
1566
1567                 if (fences)
1568                         fences[cur_fence++] = fence;
1569
1570 next:
1571                 if (e && vm->pt_root[id] && !list_empty(&e->multi_gt_list))
1572                         e = list_next_entry(e, multi_gt_list);
1573         }
1574
1575         if (fences) {
1576                 cf = dma_fence_array_create(number_tiles, fences,
1577                                             vm->composite_fence_ctx,
1578                                             vm->composite_fence_seqno++,
1579                                             false);
1580                 if (!cf) {
1581                         --vm->composite_fence_seqno;
1582                         err = -ENOMEM;
1583                         goto err_fences;
1584                 }
1585         }
1586
1587         for (i = 0; i < num_syncs; i++)
1588                 xe_sync_entry_signal(&syncs[i], NULL, cf ? &cf->base : fence);
1589
1590         return cf ? &cf->base : !fence ? dma_fence_get_stub() : fence;
1591
1592 err_fences:
1593         if (fences) {
1594                 while (cur_fence) {
1595                         /* FIXME: Rewind the previous binds? */
1596                         dma_fence_put(fences[--cur_fence]);
1597                 }
1598                 kfree(fences);
1599         }
1600
1601         return ERR_PTR(err);
1602 }
1603
1604 static struct dma_fence *
1605 xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
1606                struct xe_sync_entry *syncs, u32 num_syncs)
1607 {
1608         struct xe_tile *tile;
1609         struct dma_fence *fence;
1610         struct dma_fence **fences = NULL;
1611         struct dma_fence_array *cf = NULL;
1612         struct xe_vm *vm = vma->vm;
1613         int cur_fence = 0, i;
1614         int number_tiles = hweight_long(vma->tile_mask);
1615         int err;
1616         u8 id;
1617
1618         trace_xe_vma_bind(vma);
1619
1620         if (number_tiles > 1) {
1621                 fences = kmalloc_array(number_tiles, sizeof(*fences),
1622                                        GFP_KERNEL);
1623                 if (!fences)
1624                         return ERR_PTR(-ENOMEM);
1625         }
1626
1627         for_each_tile(tile, vm->xe, id) {
1628                 if (!(vma->tile_mask & BIT(id)))
1629                         goto next;
1630
1631                 fence = __xe_pt_bind_vma(tile, vma, e, syncs, num_syncs,
1632                                          vma->tile_present & BIT(id));
1633                 if (IS_ERR(fence)) {
1634                         err = PTR_ERR(fence);
1635                         goto err_fences;
1636                 }
1637
1638                 if (fences)
1639                         fences[cur_fence++] = fence;
1640
1641 next:
1642                 if (e && vm->pt_root[id] && !list_empty(&e->multi_gt_list))
1643                         e = list_next_entry(e, multi_gt_list);
1644         }
1645
1646         if (fences) {
1647                 cf = dma_fence_array_create(number_tiles, fences,
1648                                             vm->composite_fence_ctx,
1649                                             vm->composite_fence_seqno++,
1650                                             false);
1651                 if (!cf) {
1652                         --vm->composite_fence_seqno;
1653                         err = -ENOMEM;
1654                         goto err_fences;
1655                 }
1656         }
1657
1658         for (i = 0; i < num_syncs; i++)
1659                 xe_sync_entry_signal(&syncs[i], NULL, cf ? &cf->base : fence);
1660
1661         return cf ? &cf->base : fence;
1662
1663 err_fences:
1664         if (fences) {
1665                 while (cur_fence) {
1666                         /* FIXME: Rewind the previous binds? */
1667                         dma_fence_put(fences[--cur_fence]);
1668                 }
1669                 kfree(fences);
1670         }
1671
1672         return ERR_PTR(err);
1673 }
1674
1675 struct async_op_fence {
1676         struct dma_fence fence;
1677         struct dma_fence *wait_fence;
1678         struct dma_fence_cb cb;
1679         struct xe_vm *vm;
1680         wait_queue_head_t wq;
1681         bool started;
1682 };
1683
1684 static const char *async_op_fence_get_driver_name(struct dma_fence *dma_fence)
1685 {
1686         return "xe";
1687 }
1688
1689 static const char *
1690 async_op_fence_get_timeline_name(struct dma_fence *dma_fence)
1691 {
1692         return "async_op_fence";
1693 }
1694
1695 static const struct dma_fence_ops async_op_fence_ops = {
1696         .get_driver_name = async_op_fence_get_driver_name,
1697         .get_timeline_name = async_op_fence_get_timeline_name,
1698 };
1699
1700 static void async_op_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1701 {
1702         struct async_op_fence *afence =
1703                 container_of(cb, struct async_op_fence, cb);
1704
1705         afence->fence.error = afence->wait_fence->error;
1706         dma_fence_signal(&afence->fence);
1707         xe_vm_put(afence->vm);
1708         dma_fence_put(afence->wait_fence);
1709         dma_fence_put(&afence->fence);
1710 }
1711
1712 static void add_async_op_fence_cb(struct xe_vm *vm,
1713                                   struct dma_fence *fence,
1714                                   struct async_op_fence *afence)
1715 {
1716         int ret;
1717
1718         if (!xe_vm_no_dma_fences(vm)) {
1719                 afence->started = true;
1720                 smp_wmb();
1721                 wake_up_all(&afence->wq);
1722         }
1723
1724         afence->wait_fence = dma_fence_get(fence);
1725         afence->vm = xe_vm_get(vm);
1726         dma_fence_get(&afence->fence);
1727         ret = dma_fence_add_callback(fence, &afence->cb, async_op_fence_cb);
1728         if (ret == -ENOENT) {
1729                 afence->fence.error = afence->wait_fence->error;
1730                 dma_fence_signal(&afence->fence);
1731         }
1732         if (ret) {
1733                 xe_vm_put(vm);
1734                 dma_fence_put(afence->wait_fence);
1735                 dma_fence_put(&afence->fence);
1736         }
1737         XE_WARN_ON(ret && ret != -ENOENT);
1738 }
1739
1740 int xe_vm_async_fence_wait_start(struct dma_fence *fence)
1741 {
1742         if (fence->ops == &async_op_fence_ops) {
1743                 struct async_op_fence *afence =
1744                         container_of(fence, struct async_op_fence, fence);
1745
1746                 XE_BUG_ON(xe_vm_no_dma_fences(afence->vm));
1747
1748                 smp_rmb();
1749                 return wait_event_interruptible(afence->wq, afence->started);
1750         }
1751
1752         return 0;
1753 }
1754
1755 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
1756                         struct xe_engine *e, struct xe_sync_entry *syncs,
1757                         u32 num_syncs, struct async_op_fence *afence)
1758 {
1759         struct dma_fence *fence;
1760
1761         xe_vm_assert_held(vm);
1762
1763         fence = xe_vm_bind_vma(vma, e, syncs, num_syncs);
1764         if (IS_ERR(fence))
1765                 return PTR_ERR(fence);
1766         if (afence)
1767                 add_async_op_fence_cb(vm, fence, afence);
1768
1769         dma_fence_put(fence);
1770         return 0;
1771 }
1772
1773 static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_engine *e,
1774                       struct xe_bo *bo, struct xe_sync_entry *syncs,
1775                       u32 num_syncs, struct async_op_fence *afence)
1776 {
1777         int err;
1778
1779         xe_vm_assert_held(vm);
1780         xe_bo_assert_held(bo);
1781
1782         if (bo) {
1783                 err = xe_bo_validate(bo, vm, true);
1784                 if (err)
1785                         return err;
1786         }
1787
1788         return __xe_vm_bind(vm, vma, e, syncs, num_syncs, afence);
1789 }
1790
1791 static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
1792                         struct xe_engine *e, struct xe_sync_entry *syncs,
1793                         u32 num_syncs, struct async_op_fence *afence)
1794 {
1795         struct dma_fence *fence;
1796
1797         xe_vm_assert_held(vm);
1798         xe_bo_assert_held(vma->bo);
1799
1800         fence = xe_vm_unbind_vma(vma, e, syncs, num_syncs);
1801         if (IS_ERR(fence))
1802                 return PTR_ERR(fence);
1803         if (afence)
1804                 add_async_op_fence_cb(vm, fence, afence);
1805
1806         xe_vma_destroy(vma, fence);
1807         dma_fence_put(fence);
1808
1809         return 0;
1810 }
1811
1812 static int vm_set_error_capture_address(struct xe_device *xe, struct xe_vm *vm,
1813                                         u64 value)
1814 {
1815         if (XE_IOCTL_ERR(xe, !value))
1816                 return -EINVAL;
1817
1818         if (XE_IOCTL_ERR(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
1819                 return -ENOTSUPP;
1820
1821         if (XE_IOCTL_ERR(xe, vm->async_ops.error_capture.addr))
1822                 return -ENOTSUPP;
1823
1824         vm->async_ops.error_capture.mm = current->mm;
1825         vm->async_ops.error_capture.addr = value;
1826         init_waitqueue_head(&vm->async_ops.error_capture.wq);
1827
1828         return 0;
1829 }
1830
1831 typedef int (*xe_vm_set_property_fn)(struct xe_device *xe, struct xe_vm *vm,
1832                                      u64 value);
1833
1834 static const xe_vm_set_property_fn vm_set_property_funcs[] = {
1835         [XE_VM_PROPERTY_BIND_OP_ERROR_CAPTURE_ADDRESS] =
1836                 vm_set_error_capture_address,
1837 };
1838
1839 static int vm_user_ext_set_property(struct xe_device *xe, struct xe_vm *vm,
1840                                     u64 extension)
1841 {
1842         u64 __user *address = u64_to_user_ptr(extension);
1843         struct drm_xe_ext_vm_set_property ext;
1844         int err;
1845
1846         err = __copy_from_user(&ext, address, sizeof(ext));
1847         if (XE_IOCTL_ERR(xe, err))
1848                 return -EFAULT;
1849
1850         if (XE_IOCTL_ERR(xe, ext.property >=
1851                          ARRAY_SIZE(vm_set_property_funcs)) ||
1852             XE_IOCTL_ERR(xe, ext.pad) ||
1853             XE_IOCTL_ERR(xe, ext.reserved[0] || ext.reserved[1]))
1854                 return -EINVAL;
1855
1856         return vm_set_property_funcs[ext.property](xe, vm, ext.value);
1857 }
1858
1859 typedef int (*xe_vm_user_extension_fn)(struct xe_device *xe, struct xe_vm *vm,
1860                                        u64 extension);
1861
1862 static const xe_vm_set_property_fn vm_user_extension_funcs[] = {
1863         [XE_VM_EXTENSION_SET_PROPERTY] = vm_user_ext_set_property,
1864 };
1865
1866 #define MAX_USER_EXTENSIONS     16
1867 static int vm_user_extensions(struct xe_device *xe, struct xe_vm *vm,
1868                               u64 extensions, int ext_number)
1869 {
1870         u64 __user *address = u64_to_user_ptr(extensions);
1871         struct xe_user_extension ext;
1872         int err;
1873
1874         if (XE_IOCTL_ERR(xe, ext_number >= MAX_USER_EXTENSIONS))
1875                 return -E2BIG;
1876
1877         err = __copy_from_user(&ext, address, sizeof(ext));
1878         if (XE_IOCTL_ERR(xe, err))
1879                 return -EFAULT;
1880
1881         if (XE_IOCTL_ERR(xe, ext.pad) ||
1882             XE_IOCTL_ERR(xe, ext.name >=
1883                          ARRAY_SIZE(vm_user_extension_funcs)))
1884                 return -EINVAL;
1885
1886         err = vm_user_extension_funcs[ext.name](xe, vm, extensions);
1887         if (XE_IOCTL_ERR(xe, err))
1888                 return err;
1889
1890         if (ext.next_extension)
1891                 return vm_user_extensions(xe, vm, ext.next_extension,
1892                                           ++ext_number);
1893
1894         return 0;
1895 }
1896
1897 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_SCRATCH_PAGE | \
1898                                     DRM_XE_VM_CREATE_COMPUTE_MODE | \
1899                                     DRM_XE_VM_CREATE_ASYNC_BIND_OPS | \
1900                                     DRM_XE_VM_CREATE_FAULT_MODE)
1901
1902 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1903                        struct drm_file *file)
1904 {
1905         struct xe_device *xe = to_xe_device(dev);
1906         struct xe_file *xef = to_xe_file(file);
1907         struct drm_xe_vm_create *args = data;
1908         struct xe_vm *vm;
1909         u32 id, asid;
1910         int err;
1911         u32 flags = 0;
1912
1913         if (XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1]))
1914                 return -EINVAL;
1915
1916         if (XE_IOCTL_ERR(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1917                 return -EINVAL;
1918
1919         if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE &&
1920                          args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
1921                 return -EINVAL;
1922
1923         if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE &&
1924                          args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
1925                 return -EINVAL;
1926
1927         if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
1928                          xe_device_in_non_fault_mode(xe)))
1929                 return -EINVAL;
1930
1931         if (XE_IOCTL_ERR(xe, !(args->flags & DRM_XE_VM_CREATE_FAULT_MODE) &&
1932                          xe_device_in_fault_mode(xe)))
1933                 return -EINVAL;
1934
1935         if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
1936                          !xe->info.supports_usm))
1937                 return -EINVAL;
1938
1939         if (args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE)
1940                 flags |= XE_VM_FLAG_SCRATCH_PAGE;
1941         if (args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE)
1942                 flags |= XE_VM_FLAG_COMPUTE_MODE;
1943         if (args->flags & DRM_XE_VM_CREATE_ASYNC_BIND_OPS)
1944                 flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
1945         if (args->flags & DRM_XE_VM_CREATE_FAULT_MODE)
1946                 flags |= XE_VM_FLAG_FAULT_MODE;
1947
1948         vm = xe_vm_create(xe, flags);
1949         if (IS_ERR(vm))
1950                 return PTR_ERR(vm);
1951
1952         if (args->extensions) {
1953                 err = vm_user_extensions(xe, vm, args->extensions, 0);
1954                 if (XE_IOCTL_ERR(xe, err)) {
1955                         xe_vm_close_and_put(vm);
1956                         return err;
1957                 }
1958         }
1959
1960         mutex_lock(&xef->vm.lock);
1961         err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1962         mutex_unlock(&xef->vm.lock);
1963         if (err) {
1964                 xe_vm_close_and_put(vm);
1965                 return err;
1966         }
1967
1968         if (xe->info.has_asid) {
1969                 mutex_lock(&xe->usm.lock);
1970                 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
1971                                       XA_LIMIT(0, XE_MAX_ASID - 1),
1972                                       &xe->usm.next_asid, GFP_KERNEL);
1973                 mutex_unlock(&xe->usm.lock);
1974                 if (err) {
1975                         xe_vm_close_and_put(vm);
1976                         return err;
1977                 }
1978                 vm->usm.asid = asid;
1979         }
1980
1981         args->vm_id = id;
1982
1983 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
1984         /* Warning: Security issue - never enable by default */
1985         args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
1986 #endif
1987
1988         return 0;
1989 }
1990
1991 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
1992                         struct drm_file *file)
1993 {
1994         struct xe_device *xe = to_xe_device(dev);
1995         struct xe_file *xef = to_xe_file(file);
1996         struct drm_xe_vm_destroy *args = data;
1997         struct xe_vm *vm;
1998
1999         if (XE_IOCTL_ERR(xe, args->pad) ||
2000             XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1]))
2001                 return -EINVAL;
2002
2003         vm = xe_vm_lookup(xef, args->vm_id);
2004         if (XE_IOCTL_ERR(xe, !vm))
2005                 return -ENOENT;
2006         xe_vm_put(vm);
2007
2008         /* FIXME: Extend this check to non-compute mode VMs */
2009         if (XE_IOCTL_ERR(xe, vm->preempt.num_engines))
2010                 return -EBUSY;
2011
2012         mutex_lock(&xef->vm.lock);
2013         xa_erase(&xef->vm.xa, args->vm_id);
2014         mutex_unlock(&xef->vm.lock);
2015
2016         xe_vm_close_and_put(vm);
2017
2018         return 0;
2019 }
2020
2021 static const u32 region_to_mem_type[] = {
2022         XE_PL_TT,
2023         XE_PL_VRAM0,
2024         XE_PL_VRAM1,
2025 };
2026
2027 static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
2028                           struct xe_engine *e, u32 region,
2029                           struct xe_sync_entry *syncs, u32 num_syncs,
2030                           struct async_op_fence *afence)
2031 {
2032         int err;
2033
2034         XE_BUG_ON(region > ARRAY_SIZE(region_to_mem_type));
2035
2036         if (!xe_vma_is_userptr(vma)) {
2037                 err = xe_bo_migrate(vma->bo, region_to_mem_type[region]);
2038                 if (err)
2039                         return err;
2040         }
2041
2042         if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
2043                 return xe_vm_bind(vm, vma, e, vma->bo, syncs, num_syncs,
2044                                   afence);
2045         } else {
2046                 int i;
2047
2048                 /* Nothing to do, signal fences now */
2049                 for (i = 0; i < num_syncs; i++)
2050                         xe_sync_entry_signal(&syncs[i], NULL,
2051                                              dma_fence_get_stub());
2052                 if (afence)
2053                         dma_fence_signal(&afence->fence);
2054                 return 0;
2055         }
2056 }
2057
2058 #define VM_BIND_OP(op)  (op & 0xffff)
2059
2060 static int __vm_bind_ioctl(struct xe_vm *vm, struct xe_vma *vma,
2061                            struct xe_engine *e, struct xe_bo *bo, u32 op,
2062                            u32 region, struct xe_sync_entry *syncs,
2063                            u32 num_syncs, struct async_op_fence *afence)
2064 {
2065         switch (VM_BIND_OP(op)) {
2066         case XE_VM_BIND_OP_MAP:
2067                 return xe_vm_bind(vm, vma, e, bo, syncs, num_syncs, afence);
2068         case XE_VM_BIND_OP_UNMAP:
2069         case XE_VM_BIND_OP_UNMAP_ALL:
2070                 return xe_vm_unbind(vm, vma, e, syncs, num_syncs, afence);
2071         case XE_VM_BIND_OP_MAP_USERPTR:
2072                 return xe_vm_bind(vm, vma, e, NULL, syncs, num_syncs, afence);
2073         case XE_VM_BIND_OP_PREFETCH:
2074                 return xe_vm_prefetch(vm, vma, e, region, syncs, num_syncs,
2075                                       afence);
2076                 break;
2077         default:
2078                 XE_BUG_ON("NOT POSSIBLE");
2079                 return -EINVAL;
2080         }
2081 }
2082
2083 struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm)
2084 {
2085         int idx = vm->flags & XE_VM_FLAG_MIGRATION ?
2086                 XE_VM_FLAG_GT_ID(vm->flags) : 0;
2087
2088         /* Safe to use index 0 as all BO in the VM share a single dma-resv lock */
2089         return &vm->pt_root[idx]->bo->ttm;
2090 }
2091
2092 static void xe_vm_tv_populate(struct xe_vm *vm, struct ttm_validate_buffer *tv)
2093 {
2094         tv->num_shared = 1;
2095         tv->bo = xe_vm_ttm_bo(vm);
2096 }
2097
2098 static bool is_map_op(u32 op)
2099 {
2100         return VM_BIND_OP(op) == XE_VM_BIND_OP_MAP ||
2101                 VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR;
2102 }
2103
2104 static bool is_unmap_op(u32 op)
2105 {
2106         return VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP ||
2107                 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL;
2108 }
2109
2110 static int vm_bind_ioctl(struct xe_vm *vm, struct xe_vma *vma,
2111                          struct xe_engine *e, struct xe_bo *bo,
2112                          struct drm_xe_vm_bind_op *bind_op,
2113                          struct xe_sync_entry *syncs, u32 num_syncs,
2114                          struct async_op_fence *afence)
2115 {
2116         LIST_HEAD(objs);
2117         LIST_HEAD(dups);
2118         struct ttm_validate_buffer tv_bo, tv_vm;
2119         struct ww_acquire_ctx ww;
2120         struct xe_bo *vbo;
2121         int err, i;
2122
2123         lockdep_assert_held(&vm->lock);
2124         XE_BUG_ON(!list_empty(&vma->unbind_link));
2125
2126         /* Binds deferred to faults, signal fences now */
2127         if (xe_vm_in_fault_mode(vm) && is_map_op(bind_op->op) &&
2128             !(bind_op->op & XE_VM_BIND_FLAG_IMMEDIATE)) {
2129                 for (i = 0; i < num_syncs; i++)
2130                         xe_sync_entry_signal(&syncs[i], NULL,
2131                                              dma_fence_get_stub());
2132                 if (afence)
2133                         dma_fence_signal(&afence->fence);
2134                 return 0;
2135         }
2136
2137         xe_vm_tv_populate(vm, &tv_vm);
2138         list_add_tail(&tv_vm.head, &objs);
2139         vbo = vma->bo;
2140         if (vbo) {
2141                 /*
2142                  * An unbind can drop the last reference to the BO and
2143                  * the BO is needed for ttm_eu_backoff_reservation so
2144                  * take a reference here.
2145                  */
2146                 xe_bo_get(vbo);
2147
2148                 tv_bo.bo = &vbo->ttm;
2149                 tv_bo.num_shared = 1;
2150                 list_add(&tv_bo.head, &objs);
2151         }
2152
2153 again:
2154         err = ttm_eu_reserve_buffers(&ww, &objs, true, &dups);
2155         if (!err) {
2156                 err = __vm_bind_ioctl(vm, vma, e, bo,
2157                                       bind_op->op, bind_op->region, syncs,
2158                                       num_syncs, afence);
2159                 ttm_eu_backoff_reservation(&ww, &objs);
2160                 if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
2161                         lockdep_assert_held_write(&vm->lock);
2162                         err = xe_vma_userptr_pin_pages(vma);
2163                         if (!err)
2164                                 goto again;
2165                 }
2166         }
2167         xe_bo_put(vbo);
2168
2169         return err;
2170 }
2171
2172 struct async_op {
2173         struct xe_vma *vma;
2174         struct xe_engine *engine;
2175         struct xe_bo *bo;
2176         struct drm_xe_vm_bind_op bind_op;
2177         struct xe_sync_entry *syncs;
2178         u32 num_syncs;
2179         struct list_head link;
2180         struct async_op_fence *fence;
2181 };
2182
2183 static void async_op_cleanup(struct xe_vm *vm, struct async_op *op)
2184 {
2185         while (op->num_syncs--)
2186                 xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
2187         kfree(op->syncs);
2188         xe_bo_put(op->bo);
2189         if (op->engine)
2190                 xe_engine_put(op->engine);
2191         xe_vm_put(vm);
2192         if (op->fence)
2193                 dma_fence_put(&op->fence->fence);
2194         kfree(op);
2195 }
2196
2197 static struct async_op *next_async_op(struct xe_vm *vm)
2198 {
2199         return list_first_entry_or_null(&vm->async_ops.pending,
2200                                         struct async_op, link);
2201 }
2202
2203 static void vm_set_async_error(struct xe_vm *vm, int err)
2204 {
2205         lockdep_assert_held(&vm->lock);
2206         vm->async_ops.error = err;
2207 }
2208
2209 static void async_op_work_func(struct work_struct *w)
2210 {
2211         struct xe_vm *vm = container_of(w, struct xe_vm, async_ops.work);
2212
2213         for (;;) {
2214                 struct async_op *op;
2215                 int err;
2216
2217                 if (vm->async_ops.error && !xe_vm_is_closed(vm))
2218                         break;
2219
2220                 spin_lock_irq(&vm->async_ops.lock);
2221                 op = next_async_op(vm);
2222                 if (op)
2223                         list_del_init(&op->link);
2224                 spin_unlock_irq(&vm->async_ops.lock);
2225
2226                 if (!op)
2227                         break;
2228
2229                 if (!xe_vm_is_closed(vm)) {
2230                         bool first, last;
2231
2232                         down_write(&vm->lock);
2233 again:
2234                         first = op->vma->first_munmap_rebind;
2235                         last = op->vma->last_munmap_rebind;
2236 #ifdef TEST_VM_ASYNC_OPS_ERROR
2237 #define FORCE_ASYNC_OP_ERROR    BIT(31)
2238                         if (!(op->bind_op.op & FORCE_ASYNC_OP_ERROR)) {
2239                                 err = vm_bind_ioctl(vm, op->vma, op->engine,
2240                                                     op->bo, &op->bind_op,
2241                                                     op->syncs, op->num_syncs,
2242                                                     op->fence);
2243                         } else {
2244                                 err = -ENOMEM;
2245                                 op->bind_op.op &= ~FORCE_ASYNC_OP_ERROR;
2246                         }
2247 #else
2248                         err = vm_bind_ioctl(vm, op->vma, op->engine, op->bo,
2249                                             &op->bind_op, op->syncs,
2250                                             op->num_syncs, op->fence);
2251 #endif
2252                         /*
2253                          * In order for the fencing to work (stall behind
2254                          * existing jobs / prevent new jobs from running) all
2255                          * the dma-resv slots need to be programmed in a batch
2256                          * relative to execs / the rebind worker. The vm->lock
2257                          * ensure this.
2258                          */
2259                         if (!err && ((first && VM_BIND_OP(op->bind_op.op) ==
2260                                       XE_VM_BIND_OP_UNMAP) ||
2261                                      vm->async_ops.munmap_rebind_inflight)) {
2262                                 if (last) {
2263                                         op->vma->last_munmap_rebind = false;
2264                                         vm->async_ops.munmap_rebind_inflight =
2265                                                 false;
2266                                 } else {
2267                                         vm->async_ops.munmap_rebind_inflight =
2268                                                 true;
2269
2270                                         async_op_cleanup(vm, op);
2271
2272                                         spin_lock_irq(&vm->async_ops.lock);
2273                                         op = next_async_op(vm);
2274                                         XE_BUG_ON(!op);
2275                                         list_del_init(&op->link);
2276                                         spin_unlock_irq(&vm->async_ops.lock);
2277
2278                                         goto again;
2279                                 }
2280                         }
2281                         if (err) {
2282                                 trace_xe_vma_fail(op->vma);
2283                                 drm_warn(&vm->xe->drm, "Async VM op(%d) failed with %d",
2284                                          VM_BIND_OP(op->bind_op.op),
2285                                          err);
2286
2287                                 spin_lock_irq(&vm->async_ops.lock);
2288                                 list_add(&op->link, &vm->async_ops.pending);
2289                                 spin_unlock_irq(&vm->async_ops.lock);
2290
2291                                 vm_set_async_error(vm, err);
2292                                 up_write(&vm->lock);
2293
2294                                 if (vm->async_ops.error_capture.addr)
2295                                         vm_error_capture(vm, err,
2296                                                          op->bind_op.op,
2297                                                          op->bind_op.addr,
2298                                                          op->bind_op.range);
2299                                 break;
2300                         }
2301                         up_write(&vm->lock);
2302                 } else {
2303                         trace_xe_vma_flush(op->vma);
2304
2305                         if (is_unmap_op(op->bind_op.op)) {
2306                                 down_write(&vm->lock);
2307                                 xe_vma_destroy_unlocked(op->vma);
2308                                 up_write(&vm->lock);
2309                         }
2310
2311                         if (op->fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
2312                                                    &op->fence->fence.flags)) {
2313                                 if (!xe_vm_no_dma_fences(vm)) {
2314                                         op->fence->started = true;
2315                                         smp_wmb();
2316                                         wake_up_all(&op->fence->wq);
2317                                 }
2318                                 dma_fence_signal(&op->fence->fence);
2319                         }
2320                 }
2321
2322                 async_op_cleanup(vm, op);
2323         }
2324 }
2325
2326 static int __vm_bind_ioctl_async(struct xe_vm *vm, struct xe_vma *vma,
2327                                  struct xe_engine *e, struct xe_bo *bo,
2328                                  struct drm_xe_vm_bind_op *bind_op,
2329                                  struct xe_sync_entry *syncs, u32 num_syncs)
2330 {
2331         struct async_op *op;
2332         bool installed = false;
2333         u64 seqno;
2334         int i;
2335
2336         lockdep_assert_held(&vm->lock);
2337
2338         op = kmalloc(sizeof(*op), GFP_KERNEL);
2339         if (!op) {
2340                 return -ENOMEM;
2341         }
2342
2343         if (num_syncs) {
2344                 op->fence = kmalloc(sizeof(*op->fence), GFP_KERNEL);
2345                 if (!op->fence) {
2346                         kfree(op);
2347                         return -ENOMEM;
2348                 }
2349
2350                 seqno = e ? ++e->bind.fence_seqno : ++vm->async_ops.fence.seqno;
2351                 dma_fence_init(&op->fence->fence, &async_op_fence_ops,
2352                                &vm->async_ops.lock, e ? e->bind.fence_ctx :
2353                                vm->async_ops.fence.context, seqno);
2354
2355                 if (!xe_vm_no_dma_fences(vm)) {
2356                         op->fence->vm = vm;
2357                         op->fence->started = false;
2358                         init_waitqueue_head(&op->fence->wq);
2359                 }
2360         } else {
2361                 op->fence = NULL;
2362         }
2363         op->vma = vma;
2364         op->engine = e;
2365         op->bo = bo;
2366         op->bind_op = *bind_op;
2367         op->syncs = syncs;
2368         op->num_syncs = num_syncs;
2369         INIT_LIST_HEAD(&op->link);
2370
2371         for (i = 0; i < num_syncs; i++)
2372                 installed |= xe_sync_entry_signal(&syncs[i], NULL,
2373                                                   &op->fence->fence);
2374
2375         if (!installed && op->fence)
2376                 dma_fence_signal(&op->fence->fence);
2377
2378         spin_lock_irq(&vm->async_ops.lock);
2379         list_add_tail(&op->link, &vm->async_ops.pending);
2380         spin_unlock_irq(&vm->async_ops.lock);
2381
2382         if (!vm->async_ops.error)
2383                 queue_work(system_unbound_wq, &vm->async_ops.work);
2384
2385         return 0;
2386 }
2387
2388 static int vm_bind_ioctl_async(struct xe_vm *vm, struct xe_vma *vma,
2389                                struct xe_engine *e, struct xe_bo *bo,
2390                                struct drm_xe_vm_bind_op *bind_op,
2391                                struct xe_sync_entry *syncs, u32 num_syncs)
2392 {
2393         struct xe_vma *__vma, *next;
2394         struct list_head rebind_list;
2395         struct xe_sync_entry *in_syncs = NULL, *out_syncs = NULL;
2396         u32 num_in_syncs = 0, num_out_syncs = 0;
2397         bool first = true, last;
2398         int err;
2399         int i;
2400
2401         lockdep_assert_held(&vm->lock);
2402
2403         /* Not a linked list of unbinds + rebinds, easy */
2404         if (list_empty(&vma->unbind_link))
2405                 return __vm_bind_ioctl_async(vm, vma, e, bo, bind_op,
2406                                              syncs, num_syncs);
2407
2408         /*
2409          * Linked list of unbinds + rebinds, decompose syncs into 'in / out'
2410          * passing the 'in' to the first operation and 'out' to the last. Also
2411          * the reference counting is a little tricky, increment the VM / bind
2412          * engine ref count on all but the last operation and increment the BOs
2413          * ref count on each rebind.
2414          */
2415
2416         XE_BUG_ON(VM_BIND_OP(bind_op->op) != XE_VM_BIND_OP_UNMAP &&
2417                   VM_BIND_OP(bind_op->op) != XE_VM_BIND_OP_UNMAP_ALL &&
2418                   VM_BIND_OP(bind_op->op) != XE_VM_BIND_OP_PREFETCH);
2419
2420         /* Decompose syncs */
2421         if (num_syncs) {
2422                 in_syncs = kmalloc(sizeof(*in_syncs) * num_syncs, GFP_KERNEL);
2423                 out_syncs = kmalloc(sizeof(*out_syncs) * num_syncs, GFP_KERNEL);
2424                 if (!in_syncs || !out_syncs) {
2425                         err = -ENOMEM;
2426                         goto out_error;
2427                 }
2428
2429                 for (i = 0; i < num_syncs; ++i) {
2430                         bool signal = syncs[i].flags & DRM_XE_SYNC_SIGNAL;
2431
2432                         if (signal)
2433                                 out_syncs[num_out_syncs++] = syncs[i];
2434                         else
2435                                 in_syncs[num_in_syncs++] = syncs[i];
2436                 }
2437         }
2438
2439         /* Do unbinds + move rebinds to new list */
2440         INIT_LIST_HEAD(&rebind_list);
2441         list_for_each_entry_safe(__vma, next, &vma->unbind_link, unbind_link) {
2442                 if (__vma->destroyed ||
2443                     VM_BIND_OP(bind_op->op) == XE_VM_BIND_OP_PREFETCH) {
2444                         list_del_init(&__vma->unbind_link);
2445                         xe_bo_get(bo);
2446                         err = __vm_bind_ioctl_async(xe_vm_get(vm), __vma,
2447                                                     e ? xe_engine_get(e) : NULL,
2448                                                     bo, bind_op, first ?
2449                                                     in_syncs : NULL,
2450                                                     first ? num_in_syncs : 0);
2451                         if (err) {
2452                                 xe_bo_put(bo);
2453                                 xe_vm_put(vm);
2454                                 if (e)
2455                                         xe_engine_put(e);
2456                                 goto out_error;
2457                         }
2458                         in_syncs = NULL;
2459                         first = false;
2460                 } else {
2461                         list_move_tail(&__vma->unbind_link, &rebind_list);
2462                 }
2463         }
2464         last = list_empty(&rebind_list);
2465         if (!last) {
2466                 xe_vm_get(vm);
2467                 if (e)
2468                         xe_engine_get(e);
2469         }
2470         err = __vm_bind_ioctl_async(vm, vma, e,
2471                                     bo, bind_op,
2472                                     first ? in_syncs :
2473                                     last ? out_syncs : NULL,
2474                                     first ? num_in_syncs :
2475                                     last ? num_out_syncs : 0);
2476         if (err) {
2477                 if (!last) {
2478                         xe_vm_put(vm);
2479                         if (e)
2480                                 xe_engine_put(e);
2481                 }
2482                 goto out_error;
2483         }
2484         in_syncs = NULL;
2485
2486         /* Do rebinds */
2487         list_for_each_entry_safe(__vma, next, &rebind_list, unbind_link) {
2488                 list_del_init(&__vma->unbind_link);
2489                 last = list_empty(&rebind_list);
2490
2491                 if (xe_vma_is_userptr(__vma)) {
2492                         bind_op->op = XE_VM_BIND_FLAG_ASYNC |
2493                                 XE_VM_BIND_OP_MAP_USERPTR;
2494                 } else {
2495                         bind_op->op = XE_VM_BIND_FLAG_ASYNC |
2496                                 XE_VM_BIND_OP_MAP;
2497                         xe_bo_get(__vma->bo);
2498                 }
2499
2500                 if (!last) {
2501                         xe_vm_get(vm);
2502                         if (e)
2503                                 xe_engine_get(e);
2504                 }
2505
2506                 err = __vm_bind_ioctl_async(vm, __vma, e,
2507                                             __vma->bo, bind_op, last ?
2508                                             out_syncs : NULL,
2509                                             last ? num_out_syncs : 0);
2510                 if (err) {
2511                         if (!last) {
2512                                 xe_vm_put(vm);
2513                                 if (e)
2514                                         xe_engine_put(e);
2515                         }
2516                         goto out_error;
2517                 }
2518         }
2519
2520         kfree(syncs);
2521         return 0;
2522
2523 out_error:
2524         kfree(in_syncs);
2525         kfree(out_syncs);
2526         kfree(syncs);
2527
2528         return err;
2529 }
2530
2531 static int __vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo,
2532                                       u64 addr, u64 range, u32 op)
2533 {
2534         struct xe_device *xe = vm->xe;
2535         struct xe_vma *vma, lookup;
2536         bool async = !!(op & XE_VM_BIND_FLAG_ASYNC);
2537
2538         lockdep_assert_held(&vm->lock);
2539
2540         lookup.start = addr;
2541         lookup.end = addr + range - 1;
2542
2543         switch (VM_BIND_OP(op)) {
2544         case XE_VM_BIND_OP_MAP:
2545         case XE_VM_BIND_OP_MAP_USERPTR:
2546                 vma = xe_vm_find_overlapping_vma(vm, &lookup);
2547                 if (XE_IOCTL_ERR(xe, vma))
2548                         return -EBUSY;
2549                 break;
2550         case XE_VM_BIND_OP_UNMAP:
2551         case XE_VM_BIND_OP_PREFETCH:
2552                 vma = xe_vm_find_overlapping_vma(vm, &lookup);
2553                 if (XE_IOCTL_ERR(xe, !vma) ||
2554                     XE_IOCTL_ERR(xe, (vma->start != addr ||
2555                                  vma->end != addr + range - 1) && !async))
2556                         return -EINVAL;
2557                 break;
2558         case XE_VM_BIND_OP_UNMAP_ALL:
2559                 break;
2560         default:
2561                 XE_BUG_ON("NOT POSSIBLE");
2562                 return -EINVAL;
2563         }
2564
2565         return 0;
2566 }
2567
2568 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma)
2569 {
2570         down_read(&vm->userptr.notifier_lock);
2571         vma->destroyed = true;
2572         up_read(&vm->userptr.notifier_lock);
2573         xe_vm_remove_vma(vm, vma);
2574 }
2575
2576 static int prep_replacement_vma(struct xe_vm *vm, struct xe_vma *vma)
2577 {
2578         int err;
2579
2580         if (vma->bo && !vma->bo->vm) {
2581                 vm_insert_extobj(vm, vma);
2582                 err = add_preempt_fences(vm, vma->bo);
2583                 if (err)
2584                         return err;
2585         }
2586
2587         return 0;
2588 }
2589
2590 /*
2591  * Find all overlapping VMAs in lookup range and add to a list in the returned
2592  * VMA, all of VMAs found will be unbound. Also possibly add 2 new VMAs that
2593  * need to be bound if first / last VMAs are not fully unbound. This is akin to
2594  * how munmap works.
2595  */
2596 static struct xe_vma *vm_unbind_lookup_vmas(struct xe_vm *vm,
2597                                             struct xe_vma *lookup)
2598 {
2599         struct xe_vma *vma = xe_vm_find_overlapping_vma(vm, lookup);
2600         struct rb_node *node;
2601         struct xe_vma *first = vma, *last = vma, *new_first = NULL,
2602                       *new_last = NULL, *__vma, *next;
2603         int err = 0;
2604         bool first_munmap_rebind = false;
2605
2606         lockdep_assert_held(&vm->lock);
2607         XE_BUG_ON(!vma);
2608
2609         node = &vma->vm_node;
2610         while ((node = rb_next(node))) {
2611                 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2612                         __vma = to_xe_vma(node);
2613                         list_add_tail(&__vma->unbind_link, &vma->unbind_link);
2614                         last = __vma;
2615                 } else {
2616                         break;
2617                 }
2618         }
2619
2620         node = &vma->vm_node;
2621         while ((node = rb_prev(node))) {
2622                 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2623                         __vma = to_xe_vma(node);
2624                         list_add(&__vma->unbind_link, &vma->unbind_link);
2625                         first = __vma;
2626                 } else {
2627                         break;
2628                 }
2629         }
2630
2631         if (first->start != lookup->start) {
2632                 struct ww_acquire_ctx ww;
2633
2634                 if (first->bo)
2635                         err = xe_bo_lock(first->bo, &ww, 0, true);
2636                 if (err)
2637                         goto unwind;
2638                 new_first = xe_vma_create(first->vm, first->bo,
2639                                           first->bo ? first->bo_offset :
2640                                           first->userptr.ptr,
2641                                           first->start,
2642                                           lookup->start - 1,
2643                                           (first->pte_flags & XE_PTE_READ_ONLY),
2644                                           first->tile_mask);
2645                 if (first->bo)
2646                         xe_bo_unlock(first->bo, &ww);
2647                 if (!new_first) {
2648                         err = -ENOMEM;
2649                         goto unwind;
2650                 }
2651                 if (!first->bo) {
2652                         err = xe_vma_userptr_pin_pages(new_first);
2653                         if (err)
2654                                 goto unwind;
2655                 }
2656                 err = prep_replacement_vma(vm, new_first);
2657                 if (err)
2658                         goto unwind;
2659         }
2660
2661         if (last->end != lookup->end) {
2662                 struct ww_acquire_ctx ww;
2663                 u64 chunk = lookup->end + 1 - last->start;
2664
2665                 if (last->bo)
2666                         err = xe_bo_lock(last->bo, &ww, 0, true);
2667                 if (err)
2668                         goto unwind;
2669                 new_last = xe_vma_create(last->vm, last->bo,
2670                                          last->bo ? last->bo_offset + chunk :
2671                                          last->userptr.ptr + chunk,
2672                                          last->start + chunk,
2673                                          last->end,
2674                                          (last->pte_flags & XE_PTE_READ_ONLY),
2675                                          last->tile_mask);
2676                 if (last->bo)
2677                         xe_bo_unlock(last->bo, &ww);
2678                 if (!new_last) {
2679                         err = -ENOMEM;
2680                         goto unwind;
2681                 }
2682                 if (!last->bo) {
2683                         err = xe_vma_userptr_pin_pages(new_last);
2684                         if (err)
2685                                 goto unwind;
2686                 }
2687                 err = prep_replacement_vma(vm, new_last);
2688                 if (err)
2689                         goto unwind;
2690         }
2691
2692         prep_vma_destroy(vm, vma);
2693         if (list_empty(&vma->unbind_link) && (new_first || new_last))
2694                 vma->first_munmap_rebind = true;
2695         list_for_each_entry(__vma, &vma->unbind_link, unbind_link) {
2696                 if ((new_first || new_last) && !first_munmap_rebind) {
2697                         __vma->first_munmap_rebind = true;
2698                         first_munmap_rebind = true;
2699                 }
2700                 prep_vma_destroy(vm, __vma);
2701         }
2702         if (new_first) {
2703                 xe_vm_insert_vma(vm, new_first);
2704                 list_add_tail(&new_first->unbind_link, &vma->unbind_link);
2705                 if (!new_last)
2706                         new_first->last_munmap_rebind = true;
2707         }
2708         if (new_last) {
2709                 xe_vm_insert_vma(vm, new_last);
2710                 list_add_tail(&new_last->unbind_link, &vma->unbind_link);
2711                 new_last->last_munmap_rebind = true;
2712         }
2713
2714         return vma;
2715
2716 unwind:
2717         list_for_each_entry_safe(__vma, next, &vma->unbind_link, unbind_link)
2718                 list_del_init(&__vma->unbind_link);
2719         if (new_last) {
2720                 prep_vma_destroy(vm, new_last);
2721                 xe_vma_destroy_unlocked(new_last);
2722         }
2723         if (new_first) {
2724                 prep_vma_destroy(vm, new_first);
2725                 xe_vma_destroy_unlocked(new_first);
2726         }
2727
2728         return ERR_PTR(err);
2729 }
2730
2731 /*
2732  * Similar to vm_unbind_lookup_vmas, find all VMAs in lookup range to prefetch
2733  */
2734 static struct xe_vma *vm_prefetch_lookup_vmas(struct xe_vm *vm,
2735                                               struct xe_vma *lookup,
2736                                               u32 region)
2737 {
2738         struct xe_vma *vma = xe_vm_find_overlapping_vma(vm, lookup), *__vma,
2739                       *next;
2740         struct rb_node *node;
2741
2742         if (!xe_vma_is_userptr(vma)) {
2743                 if (!xe_bo_can_migrate(vma->bo, region_to_mem_type[region]))
2744                         return ERR_PTR(-EINVAL);
2745         }
2746
2747         node = &vma->vm_node;
2748         while ((node = rb_next(node))) {
2749                 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2750                         __vma = to_xe_vma(node);
2751                         if (!xe_vma_is_userptr(__vma)) {
2752                                 if (!xe_bo_can_migrate(__vma->bo, region_to_mem_type[region]))
2753                                         goto flush_list;
2754                         }
2755                         list_add_tail(&__vma->unbind_link, &vma->unbind_link);
2756                 } else {
2757                         break;
2758                 }
2759         }
2760
2761         node = &vma->vm_node;
2762         while ((node = rb_prev(node))) {
2763                 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2764                         __vma = to_xe_vma(node);
2765                         if (!xe_vma_is_userptr(__vma)) {
2766                                 if (!xe_bo_can_migrate(__vma->bo, region_to_mem_type[region]))
2767                                         goto flush_list;
2768                         }
2769                         list_add(&__vma->unbind_link, &vma->unbind_link);
2770                 } else {
2771                         break;
2772                 }
2773         }
2774
2775         return vma;
2776
2777 flush_list:
2778         list_for_each_entry_safe(__vma, next, &vma->unbind_link,
2779                                  unbind_link)
2780                 list_del_init(&__vma->unbind_link);
2781
2782         return ERR_PTR(-EINVAL);
2783 }
2784
2785 static struct xe_vma *vm_unbind_all_lookup_vmas(struct xe_vm *vm,
2786                                                 struct xe_bo *bo)
2787 {
2788         struct xe_vma *first = NULL, *vma;
2789
2790         lockdep_assert_held(&vm->lock);
2791         xe_bo_assert_held(bo);
2792
2793         list_for_each_entry(vma, &bo->vmas, bo_link) {
2794                 if (vma->vm != vm)
2795                         continue;
2796
2797                 prep_vma_destroy(vm, vma);
2798                 if (!first)
2799                         first = vma;
2800                 else
2801                         list_add_tail(&vma->unbind_link, &first->unbind_link);
2802         }
2803
2804         return first;
2805 }
2806
2807 static struct xe_vma *vm_bind_ioctl_lookup_vma(struct xe_vm *vm,
2808                                                struct xe_bo *bo,
2809                                                u64 bo_offset_or_userptr,
2810                                                u64 addr, u64 range, u32 op,
2811                                                u64 tile_mask, u32 region)
2812 {
2813         struct ww_acquire_ctx ww;
2814         struct xe_vma *vma, lookup;
2815         int err;
2816
2817         lockdep_assert_held(&vm->lock);
2818
2819         lookup.start = addr;
2820         lookup.end = addr + range - 1;
2821
2822         switch (VM_BIND_OP(op)) {
2823         case XE_VM_BIND_OP_MAP:
2824                 XE_BUG_ON(!bo);
2825
2826                 err = xe_bo_lock(bo, &ww, 0, true);
2827                 if (err)
2828                         return ERR_PTR(err);
2829                 vma = xe_vma_create(vm, bo, bo_offset_or_userptr, addr,
2830                                     addr + range - 1,
2831                                     op & XE_VM_BIND_FLAG_READONLY,
2832                                     tile_mask);
2833                 xe_bo_unlock(bo, &ww);
2834                 if (!vma)
2835                         return ERR_PTR(-ENOMEM);
2836
2837                 xe_vm_insert_vma(vm, vma);
2838                 if (!bo->vm) {
2839                         vm_insert_extobj(vm, vma);
2840                         err = add_preempt_fences(vm, bo);
2841                         if (err) {
2842                                 prep_vma_destroy(vm, vma);
2843                                 xe_vma_destroy_unlocked(vma);
2844
2845                                 return ERR_PTR(err);
2846                         }
2847                 }
2848                 break;
2849         case XE_VM_BIND_OP_UNMAP:
2850                 vma = vm_unbind_lookup_vmas(vm, &lookup);
2851                 break;
2852         case XE_VM_BIND_OP_PREFETCH:
2853                 vma = vm_prefetch_lookup_vmas(vm, &lookup, region);
2854                 break;
2855         case XE_VM_BIND_OP_UNMAP_ALL:
2856                 XE_BUG_ON(!bo);
2857
2858                 err = xe_bo_lock(bo, &ww, 0, true);
2859                 if (err)
2860                         return ERR_PTR(err);
2861                 vma = vm_unbind_all_lookup_vmas(vm, bo);
2862                 if (!vma)
2863                         vma = ERR_PTR(-EINVAL);
2864                 xe_bo_unlock(bo, &ww);
2865                 break;
2866         case XE_VM_BIND_OP_MAP_USERPTR:
2867                 XE_BUG_ON(bo);
2868
2869                 vma = xe_vma_create(vm, NULL, bo_offset_or_userptr, addr,
2870                                     addr + range - 1,
2871                                     op & XE_VM_BIND_FLAG_READONLY,
2872                                     tile_mask);
2873                 if (!vma)
2874                         return ERR_PTR(-ENOMEM);
2875
2876                 err = xe_vma_userptr_pin_pages(vma);
2877                 if (err) {
2878                         prep_vma_destroy(vm, vma);
2879                         xe_vma_destroy_unlocked(vma);
2880
2881                         return ERR_PTR(err);
2882                 } else {
2883                         xe_vm_insert_vma(vm, vma);
2884                 }
2885                 break;
2886         default:
2887                 XE_BUG_ON("NOT POSSIBLE");
2888                 vma = ERR_PTR(-EINVAL);
2889         }
2890
2891         return vma;
2892 }
2893
2894 #ifdef TEST_VM_ASYNC_OPS_ERROR
2895 #define SUPPORTED_FLAGS \
2896         (FORCE_ASYNC_OP_ERROR | XE_VM_BIND_FLAG_ASYNC | \
2897          XE_VM_BIND_FLAG_READONLY | XE_VM_BIND_FLAG_IMMEDIATE | 0xffff)
2898 #else
2899 #define SUPPORTED_FLAGS \
2900         (XE_VM_BIND_FLAG_ASYNC | XE_VM_BIND_FLAG_READONLY | \
2901          XE_VM_BIND_FLAG_IMMEDIATE | 0xffff)
2902 #endif
2903 #define XE_64K_PAGE_MASK 0xffffull
2904
2905 #define MAX_BINDS       512     /* FIXME: Picking random upper limit */
2906
2907 static int vm_bind_ioctl_check_args(struct xe_device *xe,
2908                                     struct drm_xe_vm_bind *args,
2909                                     struct drm_xe_vm_bind_op **bind_ops,
2910                                     bool *async)
2911 {
2912         int err;
2913         int i;
2914
2915         if (XE_IOCTL_ERR(xe, args->extensions) ||
2916             XE_IOCTL_ERR(xe, args->pad || args->pad2) ||
2917             XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1]) ||
2918             XE_IOCTL_ERR(xe, !args->num_binds) ||
2919             XE_IOCTL_ERR(xe, args->num_binds > MAX_BINDS))
2920                 return -EINVAL;
2921
2922         if (args->num_binds > 1) {
2923                 u64 __user *bind_user =
2924                         u64_to_user_ptr(args->vector_of_binds);
2925
2926                 *bind_ops = kmalloc(sizeof(struct drm_xe_vm_bind_op) *
2927                                     args->num_binds, GFP_KERNEL);
2928                 if (!*bind_ops)
2929                         return -ENOMEM;
2930
2931                 err = __copy_from_user(*bind_ops, bind_user,
2932                                        sizeof(struct drm_xe_vm_bind_op) *
2933                                        args->num_binds);
2934                 if (XE_IOCTL_ERR(xe, err)) {
2935                         err = -EFAULT;
2936                         goto free_bind_ops;
2937                 }
2938         } else {
2939                 *bind_ops = &args->bind;
2940         }
2941
2942         for (i = 0; i < args->num_binds; ++i) {
2943                 u64 range = (*bind_ops)[i].range;
2944                 u64 addr = (*bind_ops)[i].addr;
2945                 u32 op = (*bind_ops)[i].op;
2946                 u32 obj = (*bind_ops)[i].obj;
2947                 u64 obj_offset = (*bind_ops)[i].obj_offset;
2948                 u32 region = (*bind_ops)[i].region;
2949
2950                 if (XE_IOCTL_ERR(xe, (*bind_ops)[i].pad) ||
2951                     XE_IOCTL_ERR(xe, (*bind_ops)[i].reserved[0] ||
2952                                      (*bind_ops)[i].reserved[1])) {
2953                         err = -EINVAL;
2954                         goto free_bind_ops;
2955                 }
2956
2957                 if (i == 0) {
2958                         *async = !!(op & XE_VM_BIND_FLAG_ASYNC);
2959                 } else if (XE_IOCTL_ERR(xe, !*async) ||
2960                            XE_IOCTL_ERR(xe, !(op & XE_VM_BIND_FLAG_ASYNC)) ||
2961                            XE_IOCTL_ERR(xe, VM_BIND_OP(op) ==
2962                                         XE_VM_BIND_OP_RESTART)) {
2963                         err = -EINVAL;
2964                         goto free_bind_ops;
2965                 }
2966
2967                 if (XE_IOCTL_ERR(xe, !*async &&
2968                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL)) {
2969                         err = -EINVAL;
2970                         goto free_bind_ops;
2971                 }
2972
2973                 if (XE_IOCTL_ERR(xe, !*async &&
2974                                  VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH)) {
2975                         err = -EINVAL;
2976                         goto free_bind_ops;
2977                 }
2978
2979                 if (XE_IOCTL_ERR(xe, VM_BIND_OP(op) >
2980                                  XE_VM_BIND_OP_PREFETCH) ||
2981                     XE_IOCTL_ERR(xe, op & ~SUPPORTED_FLAGS) ||
2982                     XE_IOCTL_ERR(xe, !obj &&
2983                                  VM_BIND_OP(op) == XE_VM_BIND_OP_MAP) ||
2984                     XE_IOCTL_ERR(xe, !obj &&
2985                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
2986                     XE_IOCTL_ERR(xe, addr &&
2987                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
2988                     XE_IOCTL_ERR(xe, range &&
2989                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
2990                     XE_IOCTL_ERR(xe, obj &&
2991                                  VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR) ||
2992                     XE_IOCTL_ERR(xe, obj &&
2993                                  VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH) ||
2994                     XE_IOCTL_ERR(xe, region &&
2995                                  VM_BIND_OP(op) != XE_VM_BIND_OP_PREFETCH) ||
2996                     XE_IOCTL_ERR(xe, !(BIT(region) &
2997                                        xe->info.mem_region_mask)) ||
2998                     XE_IOCTL_ERR(xe, obj &&
2999                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP)) {
3000                         err = -EINVAL;
3001                         goto free_bind_ops;
3002                 }
3003
3004                 if (XE_IOCTL_ERR(xe, obj_offset & ~PAGE_MASK) ||
3005                     XE_IOCTL_ERR(xe, addr & ~PAGE_MASK) ||
3006                     XE_IOCTL_ERR(xe, range & ~PAGE_MASK) ||
3007                     XE_IOCTL_ERR(xe, !range && VM_BIND_OP(op) !=
3008                                  XE_VM_BIND_OP_RESTART &&
3009                                  VM_BIND_OP(op) != XE_VM_BIND_OP_UNMAP_ALL)) {
3010                         err = -EINVAL;
3011                         goto free_bind_ops;
3012                 }
3013         }
3014
3015         return 0;
3016
3017 free_bind_ops:
3018         if (args->num_binds > 1)
3019                 kfree(*bind_ops);
3020         return err;
3021 }
3022
3023 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3024 {
3025         struct xe_device *xe = to_xe_device(dev);
3026         struct xe_file *xef = to_xe_file(file);
3027         struct drm_xe_vm_bind *args = data;
3028         struct drm_xe_sync __user *syncs_user;
3029         struct xe_bo **bos = NULL;
3030         struct xe_vma **vmas = NULL;
3031         struct xe_vm *vm;
3032         struct xe_engine *e = NULL;
3033         u32 num_syncs;
3034         struct xe_sync_entry *syncs = NULL;
3035         struct drm_xe_vm_bind_op *bind_ops;
3036         bool async;
3037         int err;
3038         int i, j = 0;
3039
3040         err = vm_bind_ioctl_check_args(xe, args, &bind_ops, &async);
3041         if (err)
3042                 return err;
3043
3044         vm = xe_vm_lookup(xef, args->vm_id);
3045         if (XE_IOCTL_ERR(xe, !vm)) {
3046                 err = -EINVAL;
3047                 goto free_objs;
3048         }
3049
3050         if (XE_IOCTL_ERR(xe, xe_vm_is_closed(vm))) {
3051                 DRM_ERROR("VM closed while we began looking up?\n");
3052                 err = -ENOENT;
3053                 goto put_vm;
3054         }
3055
3056         if (args->engine_id) {
3057                 e = xe_engine_lookup(xef, args->engine_id);
3058                 if (XE_IOCTL_ERR(xe, !e)) {
3059                         err = -ENOENT;
3060                         goto put_vm;
3061                 }
3062                 if (XE_IOCTL_ERR(xe, !(e->flags & ENGINE_FLAG_VM))) {
3063                         err = -EINVAL;
3064                         goto put_engine;
3065                 }
3066         }
3067
3068         if (VM_BIND_OP(bind_ops[0].op) == XE_VM_BIND_OP_RESTART) {
3069                 if (XE_IOCTL_ERR(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
3070                         err = -ENOTSUPP;
3071                 if (XE_IOCTL_ERR(xe, !err && args->num_syncs))
3072                         err = EINVAL;
3073                 if (XE_IOCTL_ERR(xe, !err && !vm->async_ops.error))
3074                         err = -EPROTO;
3075
3076                 if (!err) {
3077                         down_write(&vm->lock);
3078                         trace_xe_vm_restart(vm);
3079                         vm_set_async_error(vm, 0);
3080                         up_write(&vm->lock);
3081
3082                         queue_work(system_unbound_wq, &vm->async_ops.work);
3083
3084                         /* Rebinds may have been blocked, give worker a kick */
3085                         if (xe_vm_in_compute_mode(vm))
3086                                 queue_work(vm->xe->ordered_wq,
3087                                            &vm->preempt.rebind_work);
3088                 }
3089
3090                 goto put_engine;
3091         }
3092
3093         if (XE_IOCTL_ERR(xe, !vm->async_ops.error &&
3094                          async != !!(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS))) {
3095                 err = -ENOTSUPP;
3096                 goto put_engine;
3097         }
3098
3099         for (i = 0; i < args->num_binds; ++i) {
3100                 u64 range = bind_ops[i].range;
3101                 u64 addr = bind_ops[i].addr;
3102
3103                 if (XE_IOCTL_ERR(xe, range > vm->size) ||
3104                     XE_IOCTL_ERR(xe, addr > vm->size - range)) {
3105                         err = -EINVAL;
3106                         goto put_engine;
3107                 }
3108
3109                 if (bind_ops[i].tile_mask) {
3110                         u64 valid_tiles = BIT(xe->info.tile_count) - 1;
3111
3112                         if (XE_IOCTL_ERR(xe, bind_ops[i].tile_mask &
3113                                          ~valid_tiles)) {
3114                                 err = -EINVAL;
3115                                 goto put_engine;
3116                         }
3117                 }
3118         }
3119
3120         bos = kzalloc(sizeof(*bos) * args->num_binds, GFP_KERNEL);
3121         if (!bos) {
3122                 err = -ENOMEM;
3123                 goto put_engine;
3124         }
3125
3126         vmas = kzalloc(sizeof(*vmas) * args->num_binds, GFP_KERNEL);
3127         if (!vmas) {
3128                 err = -ENOMEM;
3129                 goto put_engine;
3130         }
3131
3132         for (i = 0; i < args->num_binds; ++i) {
3133                 struct drm_gem_object *gem_obj;
3134                 u64 range = bind_ops[i].range;
3135                 u64 addr = bind_ops[i].addr;
3136                 u32 obj = bind_ops[i].obj;
3137                 u64 obj_offset = bind_ops[i].obj_offset;
3138
3139                 if (!obj)
3140                         continue;
3141
3142                 gem_obj = drm_gem_object_lookup(file, obj);
3143                 if (XE_IOCTL_ERR(xe, !gem_obj)) {
3144                         err = -ENOENT;
3145                         goto put_obj;
3146                 }
3147                 bos[i] = gem_to_xe_bo(gem_obj);
3148
3149                 if (XE_IOCTL_ERR(xe, range > bos[i]->size) ||
3150                     XE_IOCTL_ERR(xe, obj_offset >
3151                                  bos[i]->size - range)) {
3152                         err = -EINVAL;
3153                         goto put_obj;
3154                 }
3155
3156                 if (bos[i]->flags & XE_BO_INTERNAL_64K) {
3157                         if (XE_IOCTL_ERR(xe, obj_offset &
3158                                          XE_64K_PAGE_MASK) ||
3159                             XE_IOCTL_ERR(xe, addr & XE_64K_PAGE_MASK) ||
3160                             XE_IOCTL_ERR(xe, range & XE_64K_PAGE_MASK)) {
3161                                 err = -EINVAL;
3162                                 goto put_obj;
3163                         }
3164                 }
3165         }
3166
3167         if (args->num_syncs) {
3168                 syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3169                 if (!syncs) {
3170                         err = -ENOMEM;
3171                         goto put_obj;
3172                 }
3173         }
3174
3175         syncs_user = u64_to_user_ptr(args->syncs);
3176         for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3177                 err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3178                                           &syncs_user[num_syncs], false,
3179                                           xe_vm_in_fault_mode(vm));
3180                 if (err)
3181                         goto free_syncs;
3182         }
3183
3184         err = down_write_killable(&vm->lock);
3185         if (err)
3186                 goto free_syncs;
3187
3188         /* Do some error checking first to make the unwind easier */
3189         for (i = 0; i < args->num_binds; ++i) {
3190                 u64 range = bind_ops[i].range;
3191                 u64 addr = bind_ops[i].addr;
3192                 u32 op = bind_ops[i].op;
3193
3194                 err = __vm_bind_ioctl_lookup_vma(vm, bos[i], addr, range, op);
3195                 if (err)
3196                         goto release_vm_lock;
3197         }
3198
3199         for (i = 0; i < args->num_binds; ++i) {
3200                 u64 range = bind_ops[i].range;
3201                 u64 addr = bind_ops[i].addr;
3202                 u32 op = bind_ops[i].op;
3203                 u64 obj_offset = bind_ops[i].obj_offset;
3204                 u64 tile_mask = bind_ops[i].tile_mask;
3205                 u32 region = bind_ops[i].region;
3206
3207                 vmas[i] = vm_bind_ioctl_lookup_vma(vm, bos[i], obj_offset,
3208                                                    addr, range, op, tile_mask,
3209                                                    region);
3210                 if (IS_ERR(vmas[i])) {
3211                         err = PTR_ERR(vmas[i]);
3212                         vmas[i] = NULL;
3213                         goto destroy_vmas;
3214                 }
3215         }
3216
3217         for (j = 0; j < args->num_binds; ++j) {
3218                 struct xe_sync_entry *__syncs;
3219                 u32 __num_syncs = 0;
3220                 bool first_or_last = j == 0 || j == args->num_binds - 1;
3221
3222                 if (args->num_binds == 1) {
3223                         __num_syncs = num_syncs;
3224                         __syncs = syncs;
3225                 } else if (first_or_last && num_syncs) {
3226                         bool first = j == 0;
3227
3228                         __syncs = kmalloc(sizeof(*__syncs) * num_syncs,
3229                                           GFP_KERNEL);
3230                         if (!__syncs) {
3231                                 err = ENOMEM;
3232                                 break;
3233                         }
3234
3235                         /* in-syncs on first bind, out-syncs on last bind */
3236                         for (i = 0; i < num_syncs; ++i) {
3237                                 bool signal = syncs[i].flags &
3238                                         DRM_XE_SYNC_SIGNAL;
3239
3240                                 if ((first && !signal) || (!first && signal))
3241                                         __syncs[__num_syncs++] = syncs[i];
3242                         }
3243                 } else {
3244                         __num_syncs = 0;
3245                         __syncs = NULL;
3246                 }
3247
3248                 if (async) {
3249                         bool last = j == args->num_binds - 1;
3250
3251                         /*
3252                          * Each pass of async worker drops the ref, take a ref
3253                          * here, 1 set of refs taken above
3254                          */
3255                         if (!last) {
3256                                 if (e)
3257                                         xe_engine_get(e);
3258                                 xe_vm_get(vm);
3259                         }
3260
3261                         err = vm_bind_ioctl_async(vm, vmas[j], e, bos[j],
3262                                                   bind_ops + j, __syncs,
3263                                                   __num_syncs);
3264                         if (err && !last) {
3265                                 if (e)
3266                                         xe_engine_put(e);
3267                                 xe_vm_put(vm);
3268                         }
3269                         if (err)
3270                                 break;
3271                 } else {
3272                         XE_BUG_ON(j != 0);      /* Not supported */
3273                         err = vm_bind_ioctl(vm, vmas[j], e, bos[j],
3274                                             bind_ops + j, __syncs,
3275                                             __num_syncs, NULL);
3276                         break;  /* Needed so cleanup loops work */
3277                 }
3278         }
3279
3280         /* Most of cleanup owned by the async bind worker */
3281         if (async && !err) {
3282                 up_write(&vm->lock);
3283                 if (args->num_binds > 1)
3284                         kfree(syncs);
3285                 goto free_objs;
3286         }
3287
3288 destroy_vmas:
3289         for (i = j; err && i < args->num_binds; ++i) {
3290                 u32 op = bind_ops[i].op;
3291                 struct xe_vma *vma, *next;
3292
3293                 if (!vmas[i])
3294                         break;
3295
3296                 list_for_each_entry_safe(vma, next, &vma->unbind_link,
3297                                          unbind_link) {
3298                         list_del_init(&vma->unbind_link);
3299                         if (!vma->destroyed) {
3300                                 prep_vma_destroy(vm, vma);
3301                                 xe_vma_destroy_unlocked(vma);
3302                         }
3303                 }
3304
3305                 switch (VM_BIND_OP(op)) {
3306                 case XE_VM_BIND_OP_MAP:
3307                         prep_vma_destroy(vm, vmas[i]);
3308                         xe_vma_destroy_unlocked(vmas[i]);
3309                         break;
3310                 case XE_VM_BIND_OP_MAP_USERPTR:
3311                         prep_vma_destroy(vm, vmas[i]);
3312                         xe_vma_destroy_unlocked(vmas[i]);
3313                         break;
3314                 }
3315         }
3316 release_vm_lock:
3317         up_write(&vm->lock);
3318 free_syncs:
3319         while (num_syncs--) {
3320                 if (async && j &&
3321                     !(syncs[num_syncs].flags & DRM_XE_SYNC_SIGNAL))
3322                         continue;       /* Still in async worker */
3323                 xe_sync_entry_cleanup(&syncs[num_syncs]);
3324         }
3325
3326         kfree(syncs);
3327 put_obj:
3328         for (i = j; i < args->num_binds; ++i)
3329                 xe_bo_put(bos[i]);
3330 put_engine:
3331         if (e)
3332                 xe_engine_put(e);
3333 put_vm:
3334         xe_vm_put(vm);
3335 free_objs:
3336         kfree(bos);
3337         kfree(vmas);
3338         if (args->num_binds > 1)
3339                 kfree(bind_ops);
3340         return err;
3341 }
3342
3343 /*
3344  * XXX: Using the TTM wrappers for now, likely can call into dma-resv code
3345  * directly to optimize. Also this likely should be an inline function.
3346  */
3347 int xe_vm_lock(struct xe_vm *vm, struct ww_acquire_ctx *ww,
3348                int num_resv, bool intr)
3349 {
3350         struct ttm_validate_buffer tv_vm;
3351         LIST_HEAD(objs);
3352         LIST_HEAD(dups);
3353
3354         XE_BUG_ON(!ww);
3355
3356         tv_vm.num_shared = num_resv;
3357         tv_vm.bo = xe_vm_ttm_bo(vm);;
3358         list_add_tail(&tv_vm.head, &objs);
3359
3360         return ttm_eu_reserve_buffers(ww, &objs, intr, &dups);
3361 }
3362
3363 void xe_vm_unlock(struct xe_vm *vm, struct ww_acquire_ctx *ww)
3364 {
3365         dma_resv_unlock(&vm->resv);
3366         ww_acquire_fini(ww);
3367 }
3368
3369 /**
3370  * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3371  * @vma: VMA to invalidate
3372  *
3373  * Walks a list of page tables leaves which it memset the entries owned by this
3374  * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3375  * complete.
3376  *
3377  * Returns 0 for success, negative error code otherwise.
3378  */
3379 int xe_vm_invalidate_vma(struct xe_vma *vma)
3380 {
3381         struct xe_device *xe = vma->vm->xe;
3382         struct xe_tile *tile;
3383         u32 tile_needs_invalidate = 0;
3384         int seqno[XE_MAX_TILES_PER_DEVICE];
3385         u8 id;
3386         int ret;
3387
3388         XE_BUG_ON(!xe_vm_in_fault_mode(vma->vm));
3389         trace_xe_vma_usm_invalidate(vma);
3390
3391         /* Check that we don't race with page-table updates */
3392         if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3393                 if (xe_vma_is_userptr(vma)) {
3394                         WARN_ON_ONCE(!mmu_interval_check_retry
3395                                      (&vma->userptr.notifier,
3396                                       vma->userptr.notifier_seq));
3397                         WARN_ON_ONCE(!dma_resv_test_signaled(&vma->vm->resv,
3398                                                              DMA_RESV_USAGE_BOOKKEEP));
3399
3400                 } else {
3401                         xe_bo_assert_held(vma->bo);
3402                 }
3403         }
3404
3405         for_each_tile(tile, xe, id) {
3406                 if (xe_pt_zap_ptes(tile, vma)) {
3407                         tile_needs_invalidate |= BIT(id);
3408                         xe_device_wmb(xe);
3409                         /*
3410                          * FIXME: We potentially need to invalidate multiple
3411                          * GTs within the tile
3412                          */
3413                         seqno[id] = xe_gt_tlb_invalidation_vma(&tile->primary_gt, NULL, vma);
3414                         if (seqno[id] < 0)
3415                                 return seqno[id];
3416                 }
3417         }
3418
3419         for_each_tile(tile, xe, id) {
3420                 if (tile_needs_invalidate & BIT(id)) {
3421                         ret = xe_gt_tlb_invalidation_wait(&tile->primary_gt, seqno[id]);
3422                         if (ret < 0)
3423                                 return ret;
3424                 }
3425         }
3426
3427         vma->usm.tile_invalidated = vma->tile_mask;
3428
3429         return 0;
3430 }
3431
3432 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
3433 {
3434         struct rb_node *node;
3435         bool is_vram;
3436         uint64_t addr;
3437
3438         if (!down_read_trylock(&vm->lock)) {
3439                 drm_printf(p, " Failed to acquire VM lock to dump capture");
3440                 return 0;
3441         }
3442         if (vm->pt_root[gt_id]) {
3443                 addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE,
3444                                   &is_vram);
3445                 drm_printf(p, " VM root: A:0x%llx %s\n", addr, is_vram ? "VRAM" : "SYS");
3446         }
3447
3448         for (node = rb_first(&vm->vmas); node; node = rb_next(node)) {
3449                 struct xe_vma *vma = to_xe_vma(node);
3450                 bool is_userptr = xe_vma_is_userptr(vma);
3451
3452                 if (is_userptr) {
3453                         struct xe_res_cursor cur;
3454
3455                         xe_res_first_sg(vma->userptr.sg, 0, XE_PAGE_SIZE,
3456                                         &cur);
3457                         addr = xe_res_dma(&cur);
3458                 } else {
3459                         addr = __xe_bo_addr(vma->bo, 0, XE_PAGE_SIZE, &is_vram);
3460                 }
3461                 drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
3462                            vma->start, vma->end, vma->end - vma->start + 1ull,
3463                            addr, is_userptr ? "USR" : is_vram ? "VRAM" : "SYS");
3464         }
3465         up_read(&vm->lock);
3466
3467         return 0;
3468 }