drm/xe: Use GuC to do GGTT invalidations for the GuC firmware
[linux-2.6-microblaze.git] / drivers / gpu / drm / xe / xe_vm.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5
6 #include "xe_vm.h"
7
8 #include <linux/dma-fence-array.h>
9
10 #include <drm/ttm/ttm_execbuf_util.h>
11 #include <drm/ttm/ttm_tt.h>
12 #include <drm/xe_drm.h>
13 #include <linux/kthread.h>
14 #include <linux/mm.h>
15 #include <linux/swap.h>
16
17 #include "xe_bo.h"
18 #include "xe_device.h"
19 #include "xe_engine.h"
20 #include "xe_gt.h"
21 #include "xe_gt_pagefault.h"
22 #include "xe_gt_tlb_invalidation.h"
23 #include "xe_migrate.h"
24 #include "xe_pm.h"
25 #include "xe_preempt_fence.h"
26 #include "xe_pt.h"
27 #include "xe_res_cursor.h"
28 #include "xe_trace.h"
29 #include "xe_sync.h"
30
31 #define TEST_VM_ASYNC_OPS_ERROR
32
33 /**
34  * xe_vma_userptr_check_repin() - Advisory check for repin needed
35  * @vma: The userptr vma
36  *
37  * Check if the userptr vma has been invalidated since last successful
38  * repin. The check is advisory only and can the function can be called
39  * without the vm->userptr.notifier_lock held. There is no guarantee that the
40  * vma userptr will remain valid after a lockless check, so typically
41  * the call needs to be followed by a proper check under the notifier_lock.
42  *
43  * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
44  */
45 int xe_vma_userptr_check_repin(struct xe_vma *vma)
46 {
47         return mmu_interval_check_retry(&vma->userptr.notifier,
48                                         vma->userptr.notifier_seq) ?
49                 -EAGAIN : 0;
50 }
51
52 int xe_vma_userptr_pin_pages(struct xe_vma *vma)
53 {
54         struct xe_vm *vm = vma->vm;
55         struct xe_device *xe = vm->xe;
56         const unsigned long num_pages =
57                 (vma->end - vma->start + 1) >> PAGE_SHIFT;
58         struct page **pages;
59         bool in_kthread = !current->mm;
60         unsigned long notifier_seq;
61         int pinned, ret, i;
62         bool read_only = vma->pte_flags & PTE_READ_ONLY;
63
64         lockdep_assert_held(&vm->lock);
65         XE_BUG_ON(!xe_vma_is_userptr(vma));
66 retry:
67         if (vma->destroyed)
68                 return 0;
69
70         notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier);
71         if (notifier_seq == vma->userptr.notifier_seq)
72                 return 0;
73
74         pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
75         if (!pages)
76                 return -ENOMEM;
77
78         if (vma->userptr.sg) {
79                 dma_unmap_sgtable(xe->drm.dev,
80                                   vma->userptr.sg,
81                                   read_only ? DMA_TO_DEVICE :
82                                   DMA_BIDIRECTIONAL, 0);
83                 sg_free_table(vma->userptr.sg);
84                 vma->userptr.sg = NULL;
85         }
86
87         pinned = ret = 0;
88         if (in_kthread) {
89                 if (!mmget_not_zero(vma->userptr.notifier.mm)) {
90                         ret = -EFAULT;
91                         goto mm_closed;
92                 }
93                 kthread_use_mm(vma->userptr.notifier.mm);
94         }
95
96         while (pinned < num_pages) {
97                 ret = get_user_pages_fast(vma->userptr.ptr + pinned * PAGE_SIZE,
98                                           num_pages - pinned,
99                                           read_only ? 0 : FOLL_WRITE,
100                                           &pages[pinned]);
101                 if (ret < 0) {
102                         if (in_kthread)
103                                 ret = 0;
104                         break;
105                 }
106
107                 pinned += ret;
108                 ret = 0;
109         }
110
111         if (in_kthread) {
112                 kthread_unuse_mm(vma->userptr.notifier.mm);
113                 mmput(vma->userptr.notifier.mm);
114         }
115 mm_closed:
116         if (ret)
117                 goto out;
118
119         ret = sg_alloc_table_from_pages(&vma->userptr.sgt, pages, pinned,
120                                         0, (u64)pinned << PAGE_SHIFT,
121                                         GFP_KERNEL);
122         if (ret) {
123                 vma->userptr.sg = NULL;
124                 goto out;
125         }
126         vma->userptr.sg = &vma->userptr.sgt;
127
128         ret = dma_map_sgtable(xe->drm.dev, vma->userptr.sg,
129                               read_only ? DMA_TO_DEVICE :
130                               DMA_BIDIRECTIONAL,
131                               DMA_ATTR_SKIP_CPU_SYNC |
132                               DMA_ATTR_NO_KERNEL_MAPPING);
133         if (ret) {
134                 sg_free_table(vma->userptr.sg);
135                 vma->userptr.sg = NULL;
136                 goto out;
137         }
138
139         for (i = 0; i < pinned; ++i) {
140                 if (!read_only) {
141                         lock_page(pages[i]);
142                         set_page_dirty(pages[i]);
143                         unlock_page(pages[i]);
144                 }
145
146                 mark_page_accessed(pages[i]);
147         }
148
149 out:
150         release_pages(pages, pinned);
151         kvfree(pages);
152
153         if (!(ret < 0)) {
154                 vma->userptr.notifier_seq = notifier_seq;
155                 if (xe_vma_userptr_check_repin(vma) == -EAGAIN)
156                         goto retry;
157         }
158
159         return ret < 0 ? ret : 0;
160 }
161
162 static bool preempt_fences_waiting(struct xe_vm *vm)
163 {
164         struct xe_engine *e;
165
166         lockdep_assert_held(&vm->lock);
167         xe_vm_assert_held(vm);
168
169         list_for_each_entry(e, &vm->preempt.engines, compute.link) {
170                 if (!e->compute.pfence || (e->compute.pfence &&
171                     test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
172                              &e->compute.pfence->flags))) {
173                         return true;
174                 }
175         }
176
177         return false;
178 }
179
180 static void free_preempt_fences(struct list_head *list)
181 {
182         struct list_head *link, *next;
183
184         list_for_each_safe(link, next, list)
185                 xe_preempt_fence_free(to_preempt_fence_from_link(link));
186 }
187
188 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
189                                 unsigned int *count)
190 {
191         lockdep_assert_held(&vm->lock);
192         xe_vm_assert_held(vm);
193
194         if (*count >= vm->preempt.num_engines)
195                 return 0;
196
197         for (; *count < vm->preempt.num_engines; ++(*count)) {
198                 struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
199
200                 if (IS_ERR(pfence))
201                         return PTR_ERR(pfence);
202
203                 list_move_tail(xe_preempt_fence_link(pfence), list);
204         }
205
206         return 0;
207 }
208
209 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
210 {
211         struct xe_engine *e;
212
213         xe_vm_assert_held(vm);
214
215         list_for_each_entry(e, &vm->preempt.engines, compute.link) {
216                 if (e->compute.pfence) {
217                         long timeout = dma_fence_wait(e->compute.pfence, false);
218
219                         if (timeout < 0)
220                                 return -ETIME;
221                         dma_fence_put(e->compute.pfence);
222                         e->compute.pfence = NULL;
223                 }
224         }
225
226         return 0;
227 }
228
229 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
230 {
231         struct list_head *link;
232         struct xe_engine *e;
233
234         list_for_each_entry(e, &vm->preempt.engines, compute.link) {
235                 struct dma_fence *fence;
236
237                 link = list->next;
238                 XE_BUG_ON(link == list);
239
240                 fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
241                                              e, e->compute.context,
242                                              ++e->compute.seqno);
243                 dma_fence_put(e->compute.pfence);
244                 e->compute.pfence = fence;
245         }
246 }
247
248 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
249 {
250         struct xe_engine *e;
251         struct ww_acquire_ctx ww;
252         int err;
253
254         err = xe_bo_lock(bo, &ww, vm->preempt.num_engines, true);
255         if (err)
256                 return err;
257
258         list_for_each_entry(e, &vm->preempt.engines, compute.link)
259                 if (e->compute.pfence) {
260                         dma_resv_add_fence(bo->ttm.base.resv,
261                                            e->compute.pfence,
262                                            DMA_RESV_USAGE_BOOKKEEP);
263                 }
264
265         xe_bo_unlock(bo, &ww);
266         return 0;
267 }
268
269 /**
270  * xe_vm_fence_all_extobjs() - Add a fence to vm's external objects' resv
271  * @vm: The vm.
272  * @fence: The fence to add.
273  * @usage: The resv usage for the fence.
274  *
275  * Loops over all of the vm's external object bindings and adds a @fence
276  * with the given @usage to all of the external object's reservation
277  * objects.
278  */
279 void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
280                              enum dma_resv_usage usage)
281 {
282         struct xe_vma *vma;
283
284         list_for_each_entry(vma, &vm->extobj.list, extobj.link)
285                 dma_resv_add_fence(vma->bo->ttm.base.resv, fence, usage);
286 }
287
288 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm)
289 {
290         struct xe_engine *e;
291
292         lockdep_assert_held(&vm->lock);
293         xe_vm_assert_held(vm);
294
295         list_for_each_entry(e, &vm->preempt.engines, compute.link) {
296                 e->ops->resume(e);
297
298                 dma_resv_add_fence(&vm->resv, e->compute.pfence,
299                                    DMA_RESV_USAGE_BOOKKEEP);
300                 xe_vm_fence_all_extobjs(vm, e->compute.pfence,
301                                         DMA_RESV_USAGE_BOOKKEEP);
302         }
303 }
304
305 int xe_vm_add_compute_engine(struct xe_vm *vm, struct xe_engine *e)
306 {
307         struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
308         struct ttm_validate_buffer *tv;
309         struct ww_acquire_ctx ww;
310         struct list_head objs;
311         struct dma_fence *pfence;
312         int err;
313         bool wait;
314
315         XE_BUG_ON(!xe_vm_in_compute_mode(vm));
316
317         down_write(&vm->lock);
318
319         err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs, true, 1);
320         if (err)
321                 goto out_unlock_outer;
322
323         pfence = xe_preempt_fence_create(e, e->compute.context,
324                                          ++e->compute.seqno);
325         if (!pfence) {
326                 err = -ENOMEM;
327                 goto out_unlock;
328         }
329
330         list_add(&e->compute.link, &vm->preempt.engines);
331         ++vm->preempt.num_engines;
332         e->compute.pfence = pfence;
333
334         down_read(&vm->userptr.notifier_lock);
335
336         dma_resv_add_fence(&vm->resv, pfence,
337                            DMA_RESV_USAGE_BOOKKEEP);
338
339         xe_vm_fence_all_extobjs(vm, pfence, DMA_RESV_USAGE_BOOKKEEP);
340
341         /*
342          * Check to see if a preemption on VM is in flight or userptr
343          * invalidation, if so trigger this preempt fence to sync state with
344          * other preempt fences on the VM.
345          */
346         wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
347         if (wait)
348                 dma_fence_enable_sw_signaling(pfence);
349
350         up_read(&vm->userptr.notifier_lock);
351
352 out_unlock:
353         xe_vm_unlock_dma_resv(vm, tv_onstack, tv, &ww, &objs);
354 out_unlock_outer:
355         up_write(&vm->lock);
356
357         return err;
358 }
359
360 /**
361  * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
362  * that need repinning.
363  * @vm: The VM.
364  *
365  * This function checks for whether the VM has userptrs that need repinning,
366  * and provides a release-type barrier on the userptr.notifier_lock after
367  * checking.
368  *
369  * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
370  */
371 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
372 {
373         lockdep_assert_held_read(&vm->userptr.notifier_lock);
374
375         return (list_empty(&vm->userptr.repin_list) &&
376                 list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
377 }
378
379 /**
380  * xe_vm_lock_dma_resv() - Lock the vm dma_resv object and the dma_resv
381  * objects of the vm's external buffer objects.
382  * @vm: The vm.
383  * @ww: Pointer to a struct ww_acquire_ctx locking context.
384  * @tv_onstack: Array size XE_ONSTACK_TV of storage for the struct
385  * ttm_validate_buffers used for locking.
386  * @tv: Pointer to a pointer that on output contains the actual storage used.
387  * @objs: List head for the buffer objects locked.
388  * @intr: Whether to lock interruptible.
389  * @num_shared: Number of dma-fence slots to reserve in the locked objects.
390  *
391  * Locks the vm dma-resv objects and all the dma-resv objects of the
392  * buffer objects on the vm external object list. The TTM utilities require
393  * a list of struct ttm_validate_buffers pointing to the actual buffer
394  * objects to lock. Storage for those struct ttm_validate_buffers should
395  * be provided in @tv_onstack, and is typically reserved on the stack
396  * of the caller. If the size of @tv_onstack isn't sufficient, then
397  * storage will be allocated internally using kvmalloc().
398  *
399  * The function performs deadlock handling internally, and after a
400  * successful return the ww locking transaction should be considered
401  * sealed.
402  *
403  * Return: 0 on success, Negative error code on error. In particular if
404  * @intr is set to true, -EINTR or -ERESTARTSYS may be returned. In case
405  * of error, any locking performed has been reverted.
406  */
407 int xe_vm_lock_dma_resv(struct xe_vm *vm, struct ww_acquire_ctx *ww,
408                         struct ttm_validate_buffer *tv_onstack,
409                         struct ttm_validate_buffer **tv,
410                         struct list_head *objs,
411                         bool intr,
412                         unsigned int num_shared)
413 {
414         struct ttm_validate_buffer *tv_vm, *tv_bo;
415         struct xe_vma *vma, *next;
416         LIST_HEAD(dups);
417         int err;
418
419         lockdep_assert_held(&vm->lock);
420
421         if (vm->extobj.entries < XE_ONSTACK_TV) {
422                 tv_vm = tv_onstack;
423         } else {
424                 tv_vm = kvmalloc_array(vm->extobj.entries + 1, sizeof(*tv_vm),
425                                        GFP_KERNEL);
426                 if (!tv_vm)
427                         return -ENOMEM;
428         }
429         tv_bo = tv_vm + 1;
430
431         INIT_LIST_HEAD(objs);
432         list_for_each_entry(vma, &vm->extobj.list, extobj.link) {
433                 tv_bo->num_shared = num_shared;
434                 tv_bo->bo = &vma->bo->ttm;
435
436                 list_add_tail(&tv_bo->head, objs);
437                 tv_bo++;
438         }
439         tv_vm->num_shared = num_shared;
440         tv_vm->bo = xe_vm_ttm_bo(vm);
441         list_add_tail(&tv_vm->head, objs);
442         err = ttm_eu_reserve_buffers(ww, objs, intr, &dups);
443         if (err)
444                 goto out_err;
445
446         spin_lock(&vm->notifier.list_lock);
447         list_for_each_entry_safe(vma, next, &vm->notifier.rebind_list,
448                                  notifier.rebind_link) {
449                 xe_bo_assert_held(vma->bo);
450
451                 list_del_init(&vma->notifier.rebind_link);
452                 if (vma->gt_present && !vma->destroyed)
453                         list_move_tail(&vma->rebind_link, &vm->rebind_list);
454         }
455         spin_unlock(&vm->notifier.list_lock);
456
457         *tv = tv_vm;
458         return 0;
459
460 out_err:
461         if (tv_vm != tv_onstack)
462                 kvfree(tv_vm);
463
464         return err;
465 }
466
467 /**
468  * xe_vm_unlock_dma_resv() - Unlock reservation objects locked by
469  * xe_vm_lock_dma_resv()
470  * @vm: The vm.
471  * @tv_onstack: The @tv_onstack array given to xe_vm_lock_dma_resv().
472  * @tv: The value of *@tv given by xe_vm_lock_dma_resv().
473  * @ww: The ww_acquire_context used for locking.
474  * @objs: The list returned from xe_vm_lock_dma_resv().
475  *
476  * Unlocks the reservation objects and frees any memory allocated by
477  * xe_vm_lock_dma_resv().
478  */
479 void xe_vm_unlock_dma_resv(struct xe_vm *vm,
480                            struct ttm_validate_buffer *tv_onstack,
481                            struct ttm_validate_buffer *tv,
482                            struct ww_acquire_ctx *ww,
483                            struct list_head *objs)
484 {
485         /*
486          * Nothing should've been able to enter the list while we were locked,
487          * since we've held the dma-resvs of all the vm's external objects,
488          * and holding the dma_resv of an object is required for list
489          * addition, and we shouldn't add ourselves.
490          */
491         XE_WARN_ON(!list_empty(&vm->notifier.rebind_list));
492
493         ttm_eu_backoff_reservation(ww, objs);
494         if (tv && tv != tv_onstack)
495                 kvfree(tv);
496 }
497
498 static void preempt_rebind_work_func(struct work_struct *w)
499 {
500         struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
501         struct xe_vma *vma;
502         struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
503         struct ttm_validate_buffer *tv;
504         struct ww_acquire_ctx ww;
505         struct list_head objs;
506         struct dma_fence *rebind_fence;
507         unsigned int fence_count = 0;
508         LIST_HEAD(preempt_fences);
509         int err;
510         long wait;
511         int __maybe_unused tries = 0;
512
513         XE_BUG_ON(!xe_vm_in_compute_mode(vm));
514         trace_xe_vm_rebind_worker_enter(vm);
515
516         if (xe_vm_is_closed(vm)) {
517                 trace_xe_vm_rebind_worker_exit(vm);
518                 return;
519         }
520
521         down_write(&vm->lock);
522
523 retry:
524         if (vm->async_ops.error)
525                 goto out_unlock_outer;
526
527         /*
528          * Extreme corner where we exit a VM error state with a munmap style VM
529          * unbind inflight which requires a rebind. In this case the rebind
530          * needs to install some fences into the dma-resv slots. The worker to
531          * do this queued, let that worker make progress by dropping vm->lock
532          * and trying this again.
533          */
534         if (vm->async_ops.munmap_rebind_inflight) {
535                 up_write(&vm->lock);
536                 flush_work(&vm->async_ops.work);
537                 goto retry;
538         }
539
540         if (xe_vm_userptr_check_repin(vm)) {
541                 err = xe_vm_userptr_pin(vm);
542                 if (err)
543                         goto out_unlock_outer;
544         }
545
546         err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs,
547                                   false, vm->preempt.num_engines);
548         if (err)
549                 goto out_unlock_outer;
550
551         /* Fresh preempt fences already installed. Everyting is running. */
552         if (!preempt_fences_waiting(vm))
553                 goto out_unlock;
554
555         /*
556          * This makes sure vm is completely suspended and also balances
557          * xe_engine suspend- and resume; we resume *all* vm engines below.
558          */
559         err = wait_for_existing_preempt_fences(vm);
560         if (err)
561                 goto out_unlock;
562
563         err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
564         if (err)
565                 goto out_unlock;
566
567         list_for_each_entry(vma, &vm->rebind_list, rebind_link) {
568                 if (xe_vma_is_userptr(vma) || vma->destroyed)
569                         continue;
570
571                 err = xe_bo_validate(vma->bo, vm, false);
572                 if (err)
573                         goto out_unlock;
574         }
575
576         rebind_fence = xe_vm_rebind(vm, true);
577         if (IS_ERR(rebind_fence)) {
578                 err = PTR_ERR(rebind_fence);
579                 goto out_unlock;
580         }
581
582         if (rebind_fence) {
583                 dma_fence_wait(rebind_fence, false);
584                 dma_fence_put(rebind_fence);
585         }
586
587         /* Wait on munmap style VM unbinds */
588         wait = dma_resv_wait_timeout(&vm->resv,
589                                      DMA_RESV_USAGE_KERNEL,
590                                      false, MAX_SCHEDULE_TIMEOUT);
591         if (wait <= 0) {
592                 err = -ETIME;
593                 goto out_unlock;
594         }
595
596 #define retry_required(__tries, __vm) \
597         (IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
598         (!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
599         __xe_vm_userptr_needs_repin(__vm))
600
601         down_read(&vm->userptr.notifier_lock);
602         if (retry_required(tries, vm)) {
603                 up_read(&vm->userptr.notifier_lock);
604                 err = -EAGAIN;
605                 goto out_unlock;
606         }
607
608 #undef retry_required
609
610         /* Point of no return. */
611         arm_preempt_fences(vm, &preempt_fences);
612         resume_and_reinstall_preempt_fences(vm);
613         up_read(&vm->userptr.notifier_lock);
614
615 out_unlock:
616         xe_vm_unlock_dma_resv(vm, tv_onstack, tv, &ww, &objs);
617 out_unlock_outer:
618         if (err == -EAGAIN) {
619                 trace_xe_vm_rebind_worker_retry(vm);
620                 goto retry;
621         }
622         up_write(&vm->lock);
623
624         free_preempt_fences(&preempt_fences);
625
626         XE_WARN_ON(err < 0);    /* TODO: Kill VM or put in error state */
627         trace_xe_vm_rebind_worker_exit(vm);
628 }
629
630 struct async_op_fence;
631 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
632                         struct xe_engine *e, struct xe_sync_entry *syncs,
633                         u32 num_syncs, struct async_op_fence *afence);
634
635 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
636                                    const struct mmu_notifier_range *range,
637                                    unsigned long cur_seq)
638 {
639         struct xe_vma *vma = container_of(mni, struct xe_vma, userptr.notifier);
640         struct xe_vm *vm = vma->vm;
641         struct dma_resv_iter cursor;
642         struct dma_fence *fence;
643         long err;
644
645         XE_BUG_ON(!xe_vma_is_userptr(vma));
646         trace_xe_vma_userptr_invalidate(vma);
647
648         if (!mmu_notifier_range_blockable(range))
649                 return false;
650
651         down_write(&vm->userptr.notifier_lock);
652         mmu_interval_set_seq(mni, cur_seq);
653
654         /* No need to stop gpu access if the userptr is not yet bound. */
655         if (!vma->userptr.initial_bind) {
656                 up_write(&vm->userptr.notifier_lock);
657                 return true;
658         }
659
660         /*
661          * Tell exec and rebind worker they need to repin and rebind this
662          * userptr.
663          */
664         if (!xe_vm_in_fault_mode(vm) && !vma->destroyed && vma->gt_present) {
665                 spin_lock(&vm->userptr.invalidated_lock);
666                 list_move_tail(&vma->userptr.invalidate_link,
667                                &vm->userptr.invalidated);
668                 spin_unlock(&vm->userptr.invalidated_lock);
669         }
670
671         up_write(&vm->userptr.notifier_lock);
672
673         /*
674          * Preempt fences turn into schedule disables, pipeline these.
675          * Note that even in fault mode, we need to wait for binds and
676          * unbinds to complete, and those are attached as BOOKMARK fences
677          * to the vm.
678          */
679         dma_resv_iter_begin(&cursor, &vm->resv,
680                             DMA_RESV_USAGE_BOOKKEEP);
681         dma_resv_for_each_fence_unlocked(&cursor, fence)
682                 dma_fence_enable_sw_signaling(fence);
683         dma_resv_iter_end(&cursor);
684
685         err = dma_resv_wait_timeout(&vm->resv,
686                                     DMA_RESV_USAGE_BOOKKEEP,
687                                     false, MAX_SCHEDULE_TIMEOUT);
688         XE_WARN_ON(err <= 0);
689
690         if (xe_vm_in_fault_mode(vm)) {
691                 err = xe_vm_invalidate_vma(vma);
692                 XE_WARN_ON(err);
693         }
694
695         trace_xe_vma_userptr_invalidate_complete(vma);
696
697         return true;
698 }
699
700 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
701         .invalidate = vma_userptr_invalidate,
702 };
703
704 int xe_vm_userptr_pin(struct xe_vm *vm)
705 {
706         struct xe_vma *vma, *next;
707         int err = 0;
708         LIST_HEAD(tmp_evict);
709
710         lockdep_assert_held_write(&vm->lock);
711
712         /* Collect invalidated userptrs */
713         spin_lock(&vm->userptr.invalidated_lock);
714         list_for_each_entry_safe(vma, next, &vm->userptr.invalidated,
715                                  userptr.invalidate_link) {
716                 list_del_init(&vma->userptr.invalidate_link);
717                 list_move_tail(&vma->userptr_link, &vm->userptr.repin_list);
718         }
719         spin_unlock(&vm->userptr.invalidated_lock);
720
721         /* Pin and move to temporary list */
722         list_for_each_entry_safe(vma, next, &vm->userptr.repin_list, userptr_link) {
723                 err = xe_vma_userptr_pin_pages(vma);
724                 if (err < 0)
725                         goto out_err;
726
727                 list_move_tail(&vma->userptr_link, &tmp_evict);
728         }
729
730         /* Take lock and move to rebind_list for rebinding. */
731         err = dma_resv_lock_interruptible(&vm->resv, NULL);
732         if (err)
733                 goto out_err;
734
735         list_for_each_entry_safe(vma, next, &tmp_evict, userptr_link) {
736                 list_del_init(&vma->userptr_link);
737                 list_move_tail(&vma->rebind_link, &vm->rebind_list);
738         }
739
740         dma_resv_unlock(&vm->resv);
741
742         return 0;
743
744 out_err:
745         list_splice_tail(&tmp_evict, &vm->userptr.repin_list);
746
747         return err;
748 }
749
750 /**
751  * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
752  * that need repinning.
753  * @vm: The VM.
754  *
755  * This function does an advisory check for whether the VM has userptrs that
756  * need repinning.
757  *
758  * Return: 0 if there are no indications of userptrs needing repinning,
759  * -EAGAIN if there are.
760  */
761 int xe_vm_userptr_check_repin(struct xe_vm *vm)
762 {
763         return (list_empty_careful(&vm->userptr.repin_list) &&
764                 list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
765 }
766
767 static struct dma_fence *
768 xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
769                struct xe_sync_entry *syncs, u32 num_syncs);
770
771 struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
772 {
773         struct dma_fence *fence = NULL;
774         struct xe_vma *vma, *next;
775
776         lockdep_assert_held(&vm->lock);
777         if (xe_vm_no_dma_fences(vm) && !rebind_worker)
778                 return NULL;
779
780         xe_vm_assert_held(vm);
781         list_for_each_entry_safe(vma, next, &vm->rebind_list, rebind_link) {
782                 XE_WARN_ON(!vma->gt_present);
783
784                 list_del_init(&vma->rebind_link);
785                 dma_fence_put(fence);
786                 if (rebind_worker)
787                         trace_xe_vma_rebind_worker(vma);
788                 else
789                         trace_xe_vma_rebind_exec(vma);
790                 fence = xe_vm_bind_vma(vma, NULL, NULL, 0);
791                 if (IS_ERR(fence))
792                         return fence;
793         }
794
795         return fence;
796 }
797
798 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
799                                     struct xe_bo *bo,
800                                     u64 bo_offset_or_userptr,
801                                     u64 start, u64 end,
802                                     bool read_only,
803                                     u64 gt_mask)
804 {
805         struct xe_vma *vma;
806         struct xe_gt *gt;
807         u8 id;
808
809         XE_BUG_ON(start >= end);
810         XE_BUG_ON(end >= vm->size);
811
812         vma = kzalloc(sizeof(*vma), GFP_KERNEL);
813         if (!vma) {
814                 vma = ERR_PTR(-ENOMEM);
815                 return vma;
816         }
817
818         INIT_LIST_HEAD(&vma->rebind_link);
819         INIT_LIST_HEAD(&vma->unbind_link);
820         INIT_LIST_HEAD(&vma->userptr_link);
821         INIT_LIST_HEAD(&vma->userptr.invalidate_link);
822         INIT_LIST_HEAD(&vma->notifier.rebind_link);
823         INIT_LIST_HEAD(&vma->extobj.link);
824
825         vma->vm = vm;
826         vma->start = start;
827         vma->end = end;
828         if (read_only)
829                 vma->pte_flags = PTE_READ_ONLY;
830
831         if (gt_mask) {
832                 vma->gt_mask = gt_mask;
833         } else {
834                 for_each_gt(gt, vm->xe, id)
835                         if (!xe_gt_is_media_type(gt))
836                                 vma->gt_mask |= 0x1 << id;
837         }
838
839         if (vm->xe->info.platform == XE_PVC)
840                 vma->use_atomic_access_pte_bit = true;
841
842         if (bo) {
843                 xe_bo_assert_held(bo);
844                 vma->bo_offset = bo_offset_or_userptr;
845                 vma->bo = xe_bo_get(bo);
846                 list_add_tail(&vma->bo_link, &bo->vmas);
847         } else /* userptr */ {
848                 u64 size = end - start + 1;
849                 int err;
850
851                 vma->userptr.ptr = bo_offset_or_userptr;
852
853                 err = mmu_interval_notifier_insert(&vma->userptr.notifier,
854                                                    current->mm,
855                                                    vma->userptr.ptr, size,
856                                                    &vma_userptr_notifier_ops);
857                 if (err) {
858                         kfree(vma);
859                         vma = ERR_PTR(err);
860                         return vma;
861                 }
862
863                 vma->userptr.notifier_seq = LONG_MAX;
864                 xe_vm_get(vm);
865         }
866
867         return vma;
868 }
869
870 static bool vm_remove_extobj(struct xe_vma *vma)
871 {
872         if (!list_empty(&vma->extobj.link)) {
873                 vma->vm->extobj.entries--;
874                 list_del_init(&vma->extobj.link);
875                 return true;
876         }
877         return false;
878 }
879
880 static void xe_vma_destroy_late(struct xe_vma *vma)
881 {
882         struct xe_vm *vm = vma->vm;
883         struct xe_device *xe = vm->xe;
884         bool read_only = vma->pte_flags & PTE_READ_ONLY;
885
886         if (xe_vma_is_userptr(vma)) {
887                 if (vma->userptr.sg) {
888                         dma_unmap_sgtable(xe->drm.dev,
889                                           vma->userptr.sg,
890                                           read_only ? DMA_TO_DEVICE :
891                                           DMA_BIDIRECTIONAL, 0);
892                         sg_free_table(vma->userptr.sg);
893                         vma->userptr.sg = NULL;
894                 }
895
896                 /*
897                  * Since userptr pages are not pinned, we can't remove
898                  * the notifer until we're sure the GPU is not accessing
899                  * them anymore
900                  */
901                 mmu_interval_notifier_remove(&vma->userptr.notifier);
902                 xe_vm_put(vm);
903         } else {
904                 xe_bo_put(vma->bo);
905         }
906
907         kfree(vma);
908 }
909
910 static void vma_destroy_work_func(struct work_struct *w)
911 {
912         struct xe_vma *vma =
913                 container_of(w, struct xe_vma, destroy_work);
914
915         xe_vma_destroy_late(vma);
916 }
917
918 static struct xe_vma *
919 bo_has_vm_references_locked(struct xe_bo *bo, struct xe_vm *vm,
920                             struct xe_vma *ignore)
921 {
922         struct xe_vma *vma;
923
924         list_for_each_entry(vma, &bo->vmas, bo_link) {
925                 if (vma != ignore && vma->vm == vm && !vma->destroyed)
926                         return vma;
927         }
928
929         return NULL;
930 }
931
932 static bool bo_has_vm_references(struct xe_bo *bo, struct xe_vm *vm,
933                                  struct xe_vma *ignore)
934 {
935         struct ww_acquire_ctx ww;
936         bool ret;
937
938         xe_bo_lock(bo, &ww, 0, false);
939         ret = !!bo_has_vm_references_locked(bo, vm, ignore);
940         xe_bo_unlock(bo, &ww);
941
942         return ret;
943 }
944
945 static void __vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
946 {
947         list_add(&vma->extobj.link, &vm->extobj.list);
948         vm->extobj.entries++;
949 }
950
951 static void vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
952 {
953         struct xe_bo *bo = vma->bo;
954
955         lockdep_assert_held_write(&vm->lock);
956
957         if (bo_has_vm_references(bo, vm, vma))
958                 return;
959
960         __vm_insert_extobj(vm, vma);
961 }
962
963 static void vma_destroy_cb(struct dma_fence *fence,
964                            struct dma_fence_cb *cb)
965 {
966         struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
967
968         INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
969         queue_work(system_unbound_wq, &vma->destroy_work);
970 }
971
972 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
973 {
974         struct xe_vm *vm = vma->vm;
975
976         lockdep_assert_held_write(&vm->lock);
977         XE_BUG_ON(!list_empty(&vma->unbind_link));
978
979         if (xe_vma_is_userptr(vma)) {
980                 XE_WARN_ON(!vma->destroyed);
981                 spin_lock(&vm->userptr.invalidated_lock);
982                 list_del_init(&vma->userptr.invalidate_link);
983                 spin_unlock(&vm->userptr.invalidated_lock);
984                 list_del(&vma->userptr_link);
985         } else {
986                 xe_bo_assert_held(vma->bo);
987                 list_del(&vma->bo_link);
988
989                 spin_lock(&vm->notifier.list_lock);
990                 list_del(&vma->notifier.rebind_link);
991                 spin_unlock(&vm->notifier.list_lock);
992
993                 if (!vma->bo->vm && vm_remove_extobj(vma)) {
994                         struct xe_vma *other;
995
996                         other = bo_has_vm_references_locked(vma->bo, vm, NULL);
997
998                         if (other)
999                                 __vm_insert_extobj(vm, other);
1000                 }
1001         }
1002
1003         xe_vm_assert_held(vm);
1004         if (!list_empty(&vma->rebind_link))
1005                 list_del(&vma->rebind_link);
1006
1007         if (fence) {
1008                 int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
1009                                                  vma_destroy_cb);
1010
1011                 if (ret) {
1012                         XE_WARN_ON(ret != -ENOENT);
1013                         xe_vma_destroy_late(vma);
1014                 }
1015         } else {
1016                 xe_vma_destroy_late(vma);
1017         }
1018 }
1019
1020 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1021 {
1022         struct ttm_validate_buffer tv[2];
1023         struct ww_acquire_ctx ww;
1024         struct xe_bo *bo = vma->bo;
1025         LIST_HEAD(objs);
1026         LIST_HEAD(dups);
1027         int err;
1028
1029         memset(tv, 0, sizeof(tv));
1030         tv[0].bo = xe_vm_ttm_bo(vma->vm);
1031         list_add(&tv[0].head, &objs);
1032
1033         if (bo) {
1034                 tv[1].bo = &xe_bo_get(bo)->ttm;
1035                 list_add(&tv[1].head, &objs);
1036         }
1037         err = ttm_eu_reserve_buffers(&ww, &objs, false, &dups);
1038         XE_WARN_ON(err);
1039
1040         xe_vma_destroy(vma, NULL);
1041
1042         ttm_eu_backoff_reservation(&ww, &objs);
1043         if (bo)
1044                 xe_bo_put(bo);
1045 }
1046
1047 static struct xe_vma *to_xe_vma(const struct rb_node *node)
1048 {
1049         BUILD_BUG_ON(offsetof(struct xe_vma, vm_node) != 0);
1050         return (struct xe_vma *)node;
1051 }
1052
1053 static int xe_vma_cmp(const struct xe_vma *a, const struct xe_vma *b)
1054 {
1055         if (a->end < b->start) {
1056                 return -1;
1057         } else if (b->end < a->start) {
1058                 return 1;
1059         } else {
1060                 return 0;
1061         }
1062 }
1063
1064 static bool xe_vma_less_cb(struct rb_node *a, const struct rb_node *b)
1065 {
1066         return xe_vma_cmp(to_xe_vma(a), to_xe_vma(b)) < 0;
1067 }
1068
1069 int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node)
1070 {
1071         struct xe_vma *cmp = to_xe_vma(node);
1072         const struct xe_vma *own = key;
1073
1074         if (own->start > cmp->end)
1075                 return 1;
1076
1077         if (own->end < cmp->start)
1078                 return -1;
1079
1080         return 0;
1081 }
1082
1083 struct xe_vma *
1084 xe_vm_find_overlapping_vma(struct xe_vm *vm, const struct xe_vma *vma)
1085 {
1086         struct rb_node *node;
1087
1088         if (xe_vm_is_closed(vm))
1089                 return NULL;
1090
1091         XE_BUG_ON(vma->end >= vm->size);
1092         lockdep_assert_held(&vm->lock);
1093
1094         node = rb_find(vma, &vm->vmas, xe_vma_cmp_vma_cb);
1095
1096         return node ? to_xe_vma(node) : NULL;
1097 }
1098
1099 static void xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1100 {
1101         XE_BUG_ON(vma->vm != vm);
1102         lockdep_assert_held(&vm->lock);
1103
1104         rb_add(&vma->vm_node, &vm->vmas, xe_vma_less_cb);
1105 }
1106
1107 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1108 {
1109         XE_BUG_ON(vma->vm != vm);
1110         lockdep_assert_held(&vm->lock);
1111
1112         rb_erase(&vma->vm_node, &vm->vmas);
1113         if (vm->usm.last_fault_vma == vma)
1114                 vm->usm.last_fault_vma = NULL;
1115 }
1116
1117 static void async_op_work_func(struct work_struct *w);
1118 static void vm_destroy_work_func(struct work_struct *w);
1119
1120 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1121 {
1122         struct xe_vm *vm;
1123         int err, i = 0, number_gts = 0;
1124         struct xe_gt *gt;
1125         u8 id;
1126
1127         vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1128         if (!vm)
1129                 return ERR_PTR(-ENOMEM);
1130
1131         vm->xe = xe;
1132         kref_init(&vm->refcount);
1133         dma_resv_init(&vm->resv);
1134
1135         vm->size = 1ull << xe_pt_shift(xe->info.vm_max_level + 1);
1136
1137         vm->vmas = RB_ROOT;
1138         vm->flags = flags;
1139
1140         init_rwsem(&vm->lock);
1141
1142         INIT_LIST_HEAD(&vm->rebind_list);
1143
1144         INIT_LIST_HEAD(&vm->userptr.repin_list);
1145         INIT_LIST_HEAD(&vm->userptr.invalidated);
1146         init_rwsem(&vm->userptr.notifier_lock);
1147         spin_lock_init(&vm->userptr.invalidated_lock);
1148
1149         INIT_LIST_HEAD(&vm->notifier.rebind_list);
1150         spin_lock_init(&vm->notifier.list_lock);
1151
1152         INIT_LIST_HEAD(&vm->async_ops.pending);
1153         INIT_WORK(&vm->async_ops.work, async_op_work_func);
1154         spin_lock_init(&vm->async_ops.lock);
1155
1156         INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1157
1158         INIT_LIST_HEAD(&vm->preempt.engines);
1159         vm->preempt.min_run_period_ms = 10;     /* FIXME: Wire up to uAPI */
1160
1161         INIT_LIST_HEAD(&vm->extobj.list);
1162
1163         if (!(flags & XE_VM_FLAG_MIGRATION)) {
1164                 /* We need to immeditatelly exit from any D3 state */
1165                 xe_pm_runtime_get(xe);
1166                 xe_device_mem_access_get(xe);
1167         }
1168
1169         err = dma_resv_lock_interruptible(&vm->resv, NULL);
1170         if (err)
1171                 goto err_put;
1172
1173         if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1174                 vm->flags |= XE_VM_FLAGS_64K;
1175
1176         for_each_gt(gt, xe, id) {
1177                 if (xe_gt_is_media_type(gt))
1178                         continue;
1179
1180                 if (flags & XE_VM_FLAG_MIGRATION &&
1181                     gt->info.id != XE_VM_FLAG_GT_ID(flags))
1182                         continue;
1183
1184                 vm->pt_root[id] = xe_pt_create(vm, gt, xe->info.vm_max_level);
1185                 if (IS_ERR(vm->pt_root[id])) {
1186                         err = PTR_ERR(vm->pt_root[id]);
1187                         vm->pt_root[id] = NULL;
1188                         goto err_destroy_root;
1189                 }
1190         }
1191
1192         if (flags & XE_VM_FLAG_SCRATCH_PAGE) {
1193                 for_each_gt(gt, xe, id) {
1194                         if (!vm->pt_root[id])
1195                                 continue;
1196
1197                         err = xe_pt_create_scratch(xe, gt, vm);
1198                         if (err)
1199                                 goto err_scratch_pt;
1200                 }
1201         }
1202
1203         if (flags & DRM_XE_VM_CREATE_COMPUTE_MODE) {
1204                 INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1205                 vm->flags |= XE_VM_FLAG_COMPUTE_MODE;
1206         }
1207
1208         if (flags & DRM_XE_VM_CREATE_ASYNC_BIND_OPS) {
1209                 vm->async_ops.fence.context = dma_fence_context_alloc(1);
1210                 vm->flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
1211         }
1212
1213         /* Fill pt_root after allocating scratch tables */
1214         for_each_gt(gt, xe, id) {
1215                 if (!vm->pt_root[id])
1216                         continue;
1217
1218                 xe_pt_populate_empty(gt, vm, vm->pt_root[id]);
1219         }
1220         dma_resv_unlock(&vm->resv);
1221
1222         /* Kernel migration VM shouldn't have a circular loop.. */
1223         if (!(flags & XE_VM_FLAG_MIGRATION)) {
1224                 for_each_gt(gt, xe, id) {
1225                         struct xe_vm *migrate_vm;
1226                         struct xe_engine *eng;
1227
1228                         if (!vm->pt_root[id])
1229                                 continue;
1230
1231                         migrate_vm = xe_migrate_get_vm(gt->migrate);
1232                         eng = xe_engine_create_class(xe, gt, migrate_vm,
1233                                                      XE_ENGINE_CLASS_COPY,
1234                                                      ENGINE_FLAG_VM);
1235                         xe_vm_put(migrate_vm);
1236                         if (IS_ERR(eng)) {
1237                                 xe_vm_close_and_put(vm);
1238                                 return ERR_CAST(eng);
1239                         }
1240                         vm->eng[id] = eng;
1241                         number_gts++;
1242                 }
1243         }
1244
1245         if (number_gts > 1)
1246                 vm->composite_fence_ctx = dma_fence_context_alloc(1);
1247
1248         mutex_lock(&xe->usm.lock);
1249         if (flags & XE_VM_FLAG_FAULT_MODE)
1250                 xe->usm.num_vm_in_fault_mode++;
1251         else if (!(flags & XE_VM_FLAG_MIGRATION))
1252                 xe->usm.num_vm_in_non_fault_mode++;
1253         mutex_unlock(&xe->usm.lock);
1254
1255         trace_xe_vm_create(vm);
1256
1257         return vm;
1258
1259 err_scratch_pt:
1260         for_each_gt(gt, xe, id) {
1261                 if (!vm->pt_root[id])
1262                         continue;
1263
1264                 i = vm->pt_root[id]->level;
1265                 while (i)
1266                         if (vm->scratch_pt[id][--i])
1267                                 xe_pt_destroy(vm->scratch_pt[id][i],
1268                                               vm->flags, NULL);
1269                 xe_bo_unpin(vm->scratch_bo[id]);
1270                 xe_bo_put(vm->scratch_bo[id]);
1271         }
1272 err_destroy_root:
1273         for_each_gt(gt, xe, id) {
1274                 if (vm->pt_root[id])
1275                         xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1276         }
1277         dma_resv_unlock(&vm->resv);
1278 err_put:
1279         dma_resv_fini(&vm->resv);
1280         kfree(vm);
1281         if (!(flags & XE_VM_FLAG_MIGRATION)) {
1282                 xe_device_mem_access_put(xe);
1283                 xe_pm_runtime_put(xe);
1284         }
1285         return ERR_PTR(err);
1286 }
1287
1288 static void flush_async_ops(struct xe_vm *vm)
1289 {
1290         queue_work(system_unbound_wq, &vm->async_ops.work);
1291         flush_work(&vm->async_ops.work);
1292 }
1293
1294 static void vm_error_capture(struct xe_vm *vm, int err,
1295                              u32 op, u64 addr, u64 size)
1296 {
1297         struct drm_xe_vm_bind_op_error_capture capture;
1298         u64 __user *address =
1299                 u64_to_user_ptr(vm->async_ops.error_capture.addr);
1300         bool in_kthread = !current->mm;
1301
1302         capture.error = err;
1303         capture.op = op;
1304         capture.addr = addr;
1305         capture.size = size;
1306
1307         if (in_kthread) {
1308                 if (!mmget_not_zero(vm->async_ops.error_capture.mm))
1309                         goto mm_closed;
1310                 kthread_use_mm(vm->async_ops.error_capture.mm);
1311         }
1312
1313         if (copy_to_user(address, &capture, sizeof(capture)))
1314                 XE_WARN_ON("Copy to user failed");
1315
1316         if (in_kthread) {
1317                 kthread_unuse_mm(vm->async_ops.error_capture.mm);
1318                 mmput(vm->async_ops.error_capture.mm);
1319         }
1320
1321 mm_closed:
1322         wake_up_all(&vm->async_ops.error_capture.wq);
1323 }
1324
1325 void xe_vm_close_and_put(struct xe_vm *vm)
1326 {
1327         struct rb_root contested = RB_ROOT;
1328         struct ww_acquire_ctx ww;
1329         struct xe_device *xe = vm->xe;
1330         struct xe_gt *gt;
1331         u8 id;
1332
1333         XE_BUG_ON(vm->preempt.num_engines);
1334
1335         vm->size = 0;
1336         smp_mb();
1337         flush_async_ops(vm);
1338         if (xe_vm_in_compute_mode(vm))
1339                 flush_work(&vm->preempt.rebind_work);
1340
1341         for_each_gt(gt, xe, id) {
1342                 if (vm->eng[id]) {
1343                         xe_engine_kill(vm->eng[id]);
1344                         xe_engine_put(vm->eng[id]);
1345                         vm->eng[id] = NULL;
1346                 }
1347         }
1348
1349         down_write(&vm->lock);
1350         xe_vm_lock(vm, &ww, 0, false);
1351         while (vm->vmas.rb_node) {
1352                 struct xe_vma *vma = to_xe_vma(vm->vmas.rb_node);
1353
1354                 if (xe_vma_is_userptr(vma)) {
1355                         down_read(&vm->userptr.notifier_lock);
1356                         vma->destroyed = true;
1357                         up_read(&vm->userptr.notifier_lock);
1358                 }
1359
1360                 rb_erase(&vma->vm_node, &vm->vmas);
1361
1362                 /* easy case, remove from VMA? */
1363                 if (xe_vma_is_userptr(vma) || vma->bo->vm) {
1364                         xe_vma_destroy(vma, NULL);
1365                         continue;
1366                 }
1367
1368                 rb_add(&vma->vm_node, &contested, xe_vma_less_cb);
1369         }
1370
1371         /*
1372          * All vm operations will add shared fences to resv.
1373          * The only exception is eviction for a shared object,
1374          * but even so, the unbind when evicted would still
1375          * install a fence to resv. Hence it's safe to
1376          * destroy the pagetables immediately.
1377          */
1378         for_each_gt(gt, xe, id) {
1379                 if (vm->scratch_bo[id]) {
1380                         u32 i;
1381
1382                         xe_bo_unpin(vm->scratch_bo[id]);
1383                         xe_bo_put(vm->scratch_bo[id]);
1384                         for (i = 0; i < vm->pt_root[id]->level; i++)
1385                                 xe_pt_destroy(vm->scratch_pt[id][i], vm->flags,
1386                                               NULL);
1387                 }
1388         }
1389         xe_vm_unlock(vm, &ww);
1390
1391         if (contested.rb_node) {
1392
1393                 /*
1394                  * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1395                  * Since we hold a refcount to the bo, we can remove and free
1396                  * the members safely without locking.
1397                  */
1398                 while (contested.rb_node) {
1399                         struct xe_vma *vma = to_xe_vma(contested.rb_node);
1400
1401                         rb_erase(&vma->vm_node, &contested);
1402                         xe_vma_destroy_unlocked(vma);
1403                 }
1404         }
1405
1406         if (vm->async_ops.error_capture.addr)
1407                 wake_up_all(&vm->async_ops.error_capture.wq);
1408
1409         XE_WARN_ON(!list_empty(&vm->extobj.list));
1410         up_write(&vm->lock);
1411
1412         xe_vm_put(vm);
1413 }
1414
1415 static void vm_destroy_work_func(struct work_struct *w)
1416 {
1417         struct xe_vm *vm =
1418                 container_of(w, struct xe_vm, destroy_work);
1419         struct ww_acquire_ctx ww;
1420         struct xe_device *xe = vm->xe;
1421         struct xe_gt *gt;
1422         u8 id;
1423         void *lookup;
1424
1425         /* xe_vm_close_and_put was not called? */
1426         XE_WARN_ON(vm->size);
1427
1428         if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
1429                 xe_device_mem_access_put(xe);
1430                 xe_pm_runtime_put(xe);
1431
1432                 if (xe->info.supports_usm) {
1433                         mutex_lock(&xe->usm.lock);
1434                         lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1435                         XE_WARN_ON(lookup != vm);
1436                         mutex_unlock(&xe->usm.lock);
1437                 }
1438         }
1439
1440         /*
1441          * XXX: We delay destroying the PT root until the VM if freed as PT root
1442          * is needed for xe_vm_lock to work. If we remove that dependency this
1443          * can be moved to xe_vm_close_and_put.
1444          */
1445         xe_vm_lock(vm, &ww, 0, false);
1446         for_each_gt(gt, xe, id) {
1447                 if (vm->pt_root[id]) {
1448                         xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1449                         vm->pt_root[id] = NULL;
1450                 }
1451         }
1452         xe_vm_unlock(vm, &ww);
1453
1454         mutex_lock(&xe->usm.lock);
1455         if (vm->flags & XE_VM_FLAG_FAULT_MODE)
1456                 xe->usm.num_vm_in_fault_mode--;
1457         else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1458                 xe->usm.num_vm_in_non_fault_mode--;
1459         mutex_unlock(&xe->usm.lock);
1460
1461         trace_xe_vm_free(vm);
1462         dma_fence_put(vm->rebind_fence);
1463         dma_resv_fini(&vm->resv);
1464         kfree(vm);
1465
1466 }
1467
1468 void xe_vm_free(struct kref *ref)
1469 {
1470         struct xe_vm *vm = container_of(ref, struct xe_vm, refcount);
1471
1472         /* To destroy the VM we need to be able to sleep */
1473         queue_work(system_unbound_wq, &vm->destroy_work);
1474 }
1475
1476 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1477 {
1478         struct xe_vm *vm;
1479
1480         mutex_lock(&xef->vm.lock);
1481         vm = xa_load(&xef->vm.xa, id);
1482         mutex_unlock(&xef->vm.lock);
1483
1484         if (vm)
1485                 xe_vm_get(vm);
1486
1487         return vm;
1488 }
1489
1490 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_gt *full_gt)
1491 {
1492         XE_BUG_ON(xe_gt_is_media_type(full_gt));
1493
1494         return gen8_pde_encode(vm->pt_root[full_gt->info.id]->bo, 0,
1495                                XE_CACHE_WB);
1496 }
1497
1498 static struct dma_fence *
1499 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_engine *e,
1500                  struct xe_sync_entry *syncs, u32 num_syncs)
1501 {
1502         struct xe_gt *gt;
1503         struct dma_fence *fence = NULL;
1504         struct dma_fence **fences = NULL;
1505         struct dma_fence_array *cf = NULL;
1506         struct xe_vm *vm = vma->vm;
1507         int cur_fence = 0, i;
1508         int number_gts = hweight_long(vma->gt_present);
1509         int err;
1510         u8 id;
1511
1512         trace_xe_vma_unbind(vma);
1513
1514         if (number_gts > 1) {
1515                 fences = kmalloc_array(number_gts, sizeof(*fences),
1516                                        GFP_KERNEL);
1517                 if (!fences)
1518                         return ERR_PTR(-ENOMEM);
1519         }
1520
1521         for_each_gt(gt, vm->xe, id) {
1522                 if (!(vma->gt_present & BIT(id)))
1523                         goto next;
1524
1525                 XE_BUG_ON(xe_gt_is_media_type(gt));
1526
1527                 fence = __xe_pt_unbind_vma(gt, vma, e, syncs, num_syncs);
1528                 if (IS_ERR(fence)) {
1529                         err = PTR_ERR(fence);
1530                         goto err_fences;
1531                 }
1532
1533                 if (fences)
1534                         fences[cur_fence++] = fence;
1535
1536 next:
1537                 if (e && vm->pt_root[id] && !list_empty(&e->multi_gt_list))
1538                         e = list_next_entry(e, multi_gt_list);
1539         }
1540
1541         if (fences) {
1542                 cf = dma_fence_array_create(number_gts, fences,
1543                                             vm->composite_fence_ctx,
1544                                             vm->composite_fence_seqno++,
1545                                             false);
1546                 if (!cf) {
1547                         --vm->composite_fence_seqno;
1548                         err = -ENOMEM;
1549                         goto err_fences;
1550                 }
1551         }
1552
1553         for (i = 0; i < num_syncs; i++)
1554                 xe_sync_entry_signal(&syncs[i], NULL, cf ? &cf->base : fence);
1555
1556         return cf ? &cf->base : !fence ? dma_fence_get_stub() : fence;
1557
1558 err_fences:
1559         if (fences) {
1560                 while (cur_fence) {
1561                         /* FIXME: Rewind the previous binds? */
1562                         dma_fence_put(fences[--cur_fence]);
1563                 }
1564                 kfree(fences);
1565         }
1566
1567         return ERR_PTR(err);
1568 }
1569
1570 static struct dma_fence *
1571 xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
1572                struct xe_sync_entry *syncs, u32 num_syncs)
1573 {
1574         struct xe_gt *gt;
1575         struct dma_fence *fence;
1576         struct dma_fence **fences = NULL;
1577         struct dma_fence_array *cf = NULL;
1578         struct xe_vm *vm = vma->vm;
1579         int cur_fence = 0, i;
1580         int number_gts = hweight_long(vma->gt_mask);
1581         int err;
1582         u8 id;
1583
1584         trace_xe_vma_bind(vma);
1585
1586         if (number_gts > 1) {
1587                 fences = kmalloc_array(number_gts, sizeof(*fences),
1588                                        GFP_KERNEL);
1589                 if (!fences)
1590                         return ERR_PTR(-ENOMEM);
1591         }
1592
1593         for_each_gt(gt, vm->xe, id) {
1594                 if (!(vma->gt_mask & BIT(id)))
1595                         goto next;
1596
1597                 XE_BUG_ON(xe_gt_is_media_type(gt));
1598                 fence = __xe_pt_bind_vma(gt, vma, e, syncs, num_syncs,
1599                                          vma->gt_present & BIT(id));
1600                 if (IS_ERR(fence)) {
1601                         err = PTR_ERR(fence);
1602                         goto err_fences;
1603                 }
1604
1605                 if (fences)
1606                         fences[cur_fence++] = fence;
1607
1608 next:
1609                 if (e && vm->pt_root[id] && !list_empty(&e->multi_gt_list))
1610                         e = list_next_entry(e, multi_gt_list);
1611         }
1612
1613         if (fences) {
1614                 cf = dma_fence_array_create(number_gts, fences,
1615                                             vm->composite_fence_ctx,
1616                                             vm->composite_fence_seqno++,
1617                                             false);
1618                 if (!cf) {
1619                         --vm->composite_fence_seqno;
1620                         err = -ENOMEM;
1621                         goto err_fences;
1622                 }
1623         }
1624
1625         for (i = 0; i < num_syncs; i++)
1626                 xe_sync_entry_signal(&syncs[i], NULL, cf ? &cf->base : fence);
1627
1628         return cf ? &cf->base : fence;
1629
1630 err_fences:
1631         if (fences) {
1632                 while (cur_fence) {
1633                         /* FIXME: Rewind the previous binds? */
1634                         dma_fence_put(fences[--cur_fence]);
1635                 }
1636                 kfree(fences);
1637         }
1638
1639         return ERR_PTR(err);
1640 }
1641
1642 struct async_op_fence {
1643         struct dma_fence fence;
1644         struct dma_fence *wait_fence;
1645         struct dma_fence_cb cb;
1646         struct xe_vm *vm;
1647         wait_queue_head_t wq;
1648         bool started;
1649 };
1650
1651 static const char *async_op_fence_get_driver_name(struct dma_fence *dma_fence)
1652 {
1653         return "xe";
1654 }
1655
1656 static const char *
1657 async_op_fence_get_timeline_name(struct dma_fence *dma_fence)
1658 {
1659         return "async_op_fence";
1660 }
1661
1662 static const struct dma_fence_ops async_op_fence_ops = {
1663         .get_driver_name = async_op_fence_get_driver_name,
1664         .get_timeline_name = async_op_fence_get_timeline_name,
1665 };
1666
1667 static void async_op_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1668 {
1669         struct async_op_fence *afence =
1670                 container_of(cb, struct async_op_fence, cb);
1671
1672         afence->fence.error = afence->wait_fence->error;
1673         dma_fence_signal(&afence->fence);
1674         xe_vm_put(afence->vm);
1675         dma_fence_put(afence->wait_fence);
1676         dma_fence_put(&afence->fence);
1677 }
1678
1679 static void add_async_op_fence_cb(struct xe_vm *vm,
1680                                   struct dma_fence *fence,
1681                                   struct async_op_fence *afence)
1682 {
1683         int ret;
1684
1685         if (!xe_vm_no_dma_fences(vm)) {
1686                 afence->started = true;
1687                 smp_wmb();
1688                 wake_up_all(&afence->wq);
1689         }
1690
1691         afence->wait_fence = dma_fence_get(fence);
1692         afence->vm = xe_vm_get(vm);
1693         dma_fence_get(&afence->fence);
1694         ret = dma_fence_add_callback(fence, &afence->cb, async_op_fence_cb);
1695         if (ret == -ENOENT) {
1696                 afence->fence.error = afence->wait_fence->error;
1697                 dma_fence_signal(&afence->fence);
1698         }
1699         if (ret) {
1700                 xe_vm_put(vm);
1701                 dma_fence_put(afence->wait_fence);
1702                 dma_fence_put(&afence->fence);
1703         }
1704         XE_WARN_ON(ret && ret != -ENOENT);
1705 }
1706
1707 int xe_vm_async_fence_wait_start(struct dma_fence *fence)
1708 {
1709         if (fence->ops == &async_op_fence_ops) {
1710                 struct async_op_fence *afence =
1711                         container_of(fence, struct async_op_fence, fence);
1712
1713                 XE_BUG_ON(xe_vm_no_dma_fences(afence->vm));
1714
1715                 smp_rmb();
1716                 return wait_event_interruptible(afence->wq, afence->started);
1717         }
1718
1719         return 0;
1720 }
1721
1722 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
1723                         struct xe_engine *e, struct xe_sync_entry *syncs,
1724                         u32 num_syncs, struct async_op_fence *afence)
1725 {
1726         struct dma_fence *fence;
1727
1728         xe_vm_assert_held(vm);
1729
1730         fence = xe_vm_bind_vma(vma, e, syncs, num_syncs);
1731         if (IS_ERR(fence))
1732                 return PTR_ERR(fence);
1733         if (afence)
1734                 add_async_op_fence_cb(vm, fence, afence);
1735
1736         dma_fence_put(fence);
1737         return 0;
1738 }
1739
1740 static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_engine *e,
1741                       struct xe_bo *bo, struct xe_sync_entry *syncs,
1742                       u32 num_syncs, struct async_op_fence *afence)
1743 {
1744         int err;
1745
1746         xe_vm_assert_held(vm);
1747         xe_bo_assert_held(bo);
1748
1749         if (bo) {
1750                 err = xe_bo_validate(bo, vm, true);
1751                 if (err)
1752                         return err;
1753         }
1754
1755         return __xe_vm_bind(vm, vma, e, syncs, num_syncs, afence);
1756 }
1757
1758 static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
1759                         struct xe_engine *e, struct xe_sync_entry *syncs,
1760                         u32 num_syncs, struct async_op_fence *afence)
1761 {
1762         struct dma_fence *fence;
1763
1764         xe_vm_assert_held(vm);
1765         xe_bo_assert_held(vma->bo);
1766
1767         fence = xe_vm_unbind_vma(vma, e, syncs, num_syncs);
1768         if (IS_ERR(fence))
1769                 return PTR_ERR(fence);
1770         if (afence)
1771                 add_async_op_fence_cb(vm, fence, afence);
1772
1773         xe_vma_destroy(vma, fence);
1774         dma_fence_put(fence);
1775
1776         return 0;
1777 }
1778
1779 static int vm_set_error_capture_address(struct xe_device *xe, struct xe_vm *vm,
1780                                         u64 value)
1781 {
1782         if (XE_IOCTL_ERR(xe, !value))
1783                 return -EINVAL;
1784
1785         if (XE_IOCTL_ERR(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
1786                 return -ENOTSUPP;
1787
1788         if (XE_IOCTL_ERR(xe, vm->async_ops.error_capture.addr))
1789                 return -ENOTSUPP;
1790
1791         vm->async_ops.error_capture.mm = current->mm;
1792         vm->async_ops.error_capture.addr = value;
1793         init_waitqueue_head(&vm->async_ops.error_capture.wq);
1794
1795         return 0;
1796 }
1797
1798 typedef int (*xe_vm_set_property_fn)(struct xe_device *xe, struct xe_vm *vm,
1799                                      u64 value);
1800
1801 static const xe_vm_set_property_fn vm_set_property_funcs[] = {
1802         [XE_VM_PROPERTY_BIND_OP_ERROR_CAPTURE_ADDRESS] =
1803                 vm_set_error_capture_address,
1804 };
1805
1806 static int vm_user_ext_set_property(struct xe_device *xe, struct xe_vm *vm,
1807                                     u64 extension)
1808 {
1809         u64 __user *address = u64_to_user_ptr(extension);
1810         struct drm_xe_ext_vm_set_property ext;
1811         int err;
1812
1813         err = __copy_from_user(&ext, address, sizeof(ext));
1814         if (XE_IOCTL_ERR(xe, err))
1815                 return -EFAULT;
1816
1817         if (XE_IOCTL_ERR(xe, ext.property >=
1818                          ARRAY_SIZE(vm_set_property_funcs)))
1819                 return -EINVAL;
1820
1821         return vm_set_property_funcs[ext.property](xe, vm, ext.value);
1822 }
1823
1824 typedef int (*xe_vm_user_extension_fn)(struct xe_device *xe, struct xe_vm *vm,
1825                                        u64 extension);
1826
1827 static const xe_vm_set_property_fn vm_user_extension_funcs[] = {
1828         [XE_VM_EXTENSION_SET_PROPERTY] = vm_user_ext_set_property,
1829 };
1830
1831 #define MAX_USER_EXTENSIONS     16
1832 static int vm_user_extensions(struct xe_device *xe, struct xe_vm *vm,
1833                               u64 extensions, int ext_number)
1834 {
1835         u64 __user *address = u64_to_user_ptr(extensions);
1836         struct xe_user_extension ext;
1837         int err;
1838
1839         if (XE_IOCTL_ERR(xe, ext_number >= MAX_USER_EXTENSIONS))
1840                 return -E2BIG;
1841
1842         err = __copy_from_user(&ext, address, sizeof(ext));
1843         if (XE_IOCTL_ERR(xe, err))
1844                 return -EFAULT;
1845
1846         if (XE_IOCTL_ERR(xe, ext.name >=
1847                          ARRAY_SIZE(vm_user_extension_funcs)))
1848                 return -EINVAL;
1849
1850         err = vm_user_extension_funcs[ext.name](xe, vm, extensions);
1851         if (XE_IOCTL_ERR(xe, err))
1852                 return err;
1853
1854         if (ext.next_extension)
1855                 return vm_user_extensions(xe, vm, ext.next_extension,
1856                                           ++ext_number);
1857
1858         return 0;
1859 }
1860
1861 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_SCRATCH_PAGE | \
1862                                     DRM_XE_VM_CREATE_COMPUTE_MODE | \
1863                                     DRM_XE_VM_CREATE_ASYNC_BIND_OPS | \
1864                                     DRM_XE_VM_CREATE_FAULT_MODE)
1865
1866 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1867                        struct drm_file *file)
1868 {
1869         struct xe_device *xe = to_xe_device(dev);
1870         struct xe_file *xef = to_xe_file(file);
1871         struct drm_xe_vm_create *args = data;
1872         struct xe_vm *vm;
1873         u32 id, asid;
1874         int err;
1875         u32 flags = 0;
1876
1877         if (XE_IOCTL_ERR(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1878                 return -EINVAL;
1879
1880         if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE &&
1881                          args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
1882                 return -EINVAL;
1883
1884         if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE &&
1885                          args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
1886                 return -EINVAL;
1887
1888         if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
1889                          xe_device_in_non_fault_mode(xe)))
1890                 return -EINVAL;
1891
1892         if (XE_IOCTL_ERR(xe, !(args->flags & DRM_XE_VM_CREATE_FAULT_MODE) &&
1893                          xe_device_in_fault_mode(xe)))
1894                 return -EINVAL;
1895
1896         if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
1897                          !xe->info.supports_usm))
1898                 return -EINVAL;
1899
1900         if (args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE)
1901                 flags |= XE_VM_FLAG_SCRATCH_PAGE;
1902         if (args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE)
1903                 flags |= XE_VM_FLAG_COMPUTE_MODE;
1904         if (args->flags & DRM_XE_VM_CREATE_ASYNC_BIND_OPS)
1905                 flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
1906         if (args->flags & DRM_XE_VM_CREATE_FAULT_MODE)
1907                 flags |= XE_VM_FLAG_FAULT_MODE;
1908
1909         vm = xe_vm_create(xe, flags);
1910         if (IS_ERR(vm))
1911                 return PTR_ERR(vm);
1912
1913         if (args->extensions) {
1914                 err = vm_user_extensions(xe, vm, args->extensions, 0);
1915                 if (XE_IOCTL_ERR(xe, err)) {
1916                         xe_vm_close_and_put(vm);
1917                         return err;
1918                 }
1919         }
1920
1921         mutex_lock(&xef->vm.lock);
1922         err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1923         mutex_unlock(&xef->vm.lock);
1924         if (err) {
1925                 xe_vm_close_and_put(vm);
1926                 return err;
1927         }
1928
1929         if (xe->info.supports_usm) {
1930                 mutex_lock(&xe->usm.lock);
1931                 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
1932                                       XA_LIMIT(0, XE_MAX_ASID - 1),
1933                                       &xe->usm.next_asid, GFP_KERNEL);
1934                 mutex_unlock(&xe->usm.lock);
1935                 if (err) {
1936                         xe_vm_close_and_put(vm);
1937                         return err;
1938                 }
1939                 vm->usm.asid = asid;
1940         }
1941
1942         args->vm_id = id;
1943
1944 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
1945         /* Warning: Security issue - never enable by default */
1946         args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, GEN8_PAGE_SIZE);
1947 #endif
1948
1949         return 0;
1950 }
1951
1952 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
1953                         struct drm_file *file)
1954 {
1955         struct xe_device *xe = to_xe_device(dev);
1956         struct xe_file *xef = to_xe_file(file);
1957         struct drm_xe_vm_destroy *args = data;
1958         struct xe_vm *vm;
1959
1960         if (XE_IOCTL_ERR(xe, args->pad))
1961                 return -EINVAL;
1962
1963         vm = xe_vm_lookup(xef, args->vm_id);
1964         if (XE_IOCTL_ERR(xe, !vm))
1965                 return -ENOENT;
1966         xe_vm_put(vm);
1967
1968         /* FIXME: Extend this check to non-compute mode VMs */
1969         if (XE_IOCTL_ERR(xe, vm->preempt.num_engines))
1970                 return -EBUSY;
1971
1972         mutex_lock(&xef->vm.lock);
1973         xa_erase(&xef->vm.xa, args->vm_id);
1974         mutex_unlock(&xef->vm.lock);
1975
1976         xe_vm_close_and_put(vm);
1977
1978         return 0;
1979 }
1980
1981 static const u32 region_to_mem_type[] = {
1982         XE_PL_TT,
1983         XE_PL_VRAM0,
1984         XE_PL_VRAM1,
1985 };
1986
1987 static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
1988                           struct xe_engine *e, u32 region,
1989                           struct xe_sync_entry *syncs, u32 num_syncs,
1990                           struct async_op_fence *afence)
1991 {
1992         int err;
1993
1994         XE_BUG_ON(region > ARRAY_SIZE(region_to_mem_type));
1995
1996         if (!xe_vma_is_userptr(vma)) {
1997                 err = xe_bo_migrate(vma->bo, region_to_mem_type[region]);
1998                 if (err)
1999                         return err;
2000         }
2001
2002         if (vma->gt_mask != (vma->gt_present & ~vma->usm.gt_invalidated)) {
2003                 return xe_vm_bind(vm, vma, e, vma->bo, syncs, num_syncs,
2004                                   afence);
2005         } else {
2006                 int i;
2007
2008                 /* Nothing to do, signal fences now */
2009                 for (i = 0; i < num_syncs; i++)
2010                         xe_sync_entry_signal(&syncs[i], NULL,
2011                                              dma_fence_get_stub());
2012                 if (afence)
2013                         dma_fence_signal(&afence->fence);
2014                 return 0;
2015         }
2016 }
2017
2018 #define VM_BIND_OP(op)  (op & 0xffff)
2019
2020 static int __vm_bind_ioctl(struct xe_vm *vm, struct xe_vma *vma,
2021                            struct xe_engine *e, struct xe_bo *bo, u32 op,
2022                            u32 region, struct xe_sync_entry *syncs,
2023                            u32 num_syncs, struct async_op_fence *afence)
2024 {
2025         switch (VM_BIND_OP(op)) {
2026         case XE_VM_BIND_OP_MAP:
2027                 return xe_vm_bind(vm, vma, e, bo, syncs, num_syncs, afence);
2028         case XE_VM_BIND_OP_UNMAP:
2029         case XE_VM_BIND_OP_UNMAP_ALL:
2030                 return xe_vm_unbind(vm, vma, e, syncs, num_syncs, afence);
2031         case XE_VM_BIND_OP_MAP_USERPTR:
2032                 return xe_vm_bind(vm, vma, e, NULL, syncs, num_syncs, afence);
2033         case XE_VM_BIND_OP_PREFETCH:
2034                 return xe_vm_prefetch(vm, vma, e, region, syncs, num_syncs,
2035                                       afence);
2036                 break;
2037         default:
2038                 XE_BUG_ON("NOT POSSIBLE");
2039                 return -EINVAL;
2040         }
2041 }
2042
2043 struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm)
2044 {
2045         int idx = vm->flags & XE_VM_FLAG_MIGRATION ?
2046                 XE_VM_FLAG_GT_ID(vm->flags) : 0;
2047
2048         /* Safe to use index 0 as all BO in the VM share a single dma-resv lock */
2049         return &vm->pt_root[idx]->bo->ttm;
2050 }
2051
2052 static void xe_vm_tv_populate(struct xe_vm *vm, struct ttm_validate_buffer *tv)
2053 {
2054         tv->num_shared = 1;
2055         tv->bo = xe_vm_ttm_bo(vm);
2056 }
2057
2058 static bool is_map_op(u32 op)
2059 {
2060         return VM_BIND_OP(op) == XE_VM_BIND_OP_MAP ||
2061                 VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR;
2062 }
2063
2064 static bool is_unmap_op(u32 op)
2065 {
2066         return VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP ||
2067                 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL;
2068 }
2069
2070 static int vm_bind_ioctl(struct xe_vm *vm, struct xe_vma *vma,
2071                          struct xe_engine *e, struct xe_bo *bo,
2072                          struct drm_xe_vm_bind_op *bind_op,
2073                          struct xe_sync_entry *syncs, u32 num_syncs,
2074                          struct async_op_fence *afence)
2075 {
2076         LIST_HEAD(objs);
2077         LIST_HEAD(dups);
2078         struct ttm_validate_buffer tv_bo, tv_vm;
2079         struct ww_acquire_ctx ww;
2080         struct xe_bo *vbo;
2081         int err, i;
2082
2083         lockdep_assert_held(&vm->lock);
2084         XE_BUG_ON(!list_empty(&vma->unbind_link));
2085
2086         /* Binds deferred to faults, signal fences now */
2087         if (xe_vm_in_fault_mode(vm) && is_map_op(bind_op->op) &&
2088             !(bind_op->op & XE_VM_BIND_FLAG_IMMEDIATE)) {
2089                 for (i = 0; i < num_syncs; i++)
2090                         xe_sync_entry_signal(&syncs[i], NULL,
2091                                              dma_fence_get_stub());
2092                 if (afence)
2093                         dma_fence_signal(&afence->fence);
2094                 return 0;
2095         }
2096
2097         xe_vm_tv_populate(vm, &tv_vm);
2098         list_add_tail(&tv_vm.head, &objs);
2099         vbo = vma->bo;
2100         if (vbo) {
2101                 /*
2102                  * An unbind can drop the last reference to the BO and
2103                  * the BO is needed for ttm_eu_backoff_reservation so
2104                  * take a reference here.
2105                  */
2106                 xe_bo_get(vbo);
2107
2108                 tv_bo.bo = &vbo->ttm;
2109                 tv_bo.num_shared = 1;
2110                 list_add(&tv_bo.head, &objs);
2111         }
2112
2113 again:
2114         err = ttm_eu_reserve_buffers(&ww, &objs, true, &dups);
2115         if (!err) {
2116                 err = __vm_bind_ioctl(vm, vma, e, bo,
2117                                       bind_op->op, bind_op->region, syncs,
2118                                       num_syncs, afence);
2119                 ttm_eu_backoff_reservation(&ww, &objs);
2120                 if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
2121                         lockdep_assert_held_write(&vm->lock);
2122                         err = xe_vma_userptr_pin_pages(vma);
2123                         if (!err)
2124                                 goto again;
2125                 }
2126         }
2127         xe_bo_put(vbo);
2128
2129         return err;
2130 }
2131
2132 struct async_op {
2133         struct xe_vma *vma;
2134         struct xe_engine *engine;
2135         struct xe_bo *bo;
2136         struct drm_xe_vm_bind_op bind_op;
2137         struct xe_sync_entry *syncs;
2138         u32 num_syncs;
2139         struct list_head link;
2140         struct async_op_fence *fence;
2141 };
2142
2143 static void async_op_cleanup(struct xe_vm *vm, struct async_op *op)
2144 {
2145         while (op->num_syncs--)
2146                 xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
2147         kfree(op->syncs);
2148         xe_bo_put(op->bo);
2149         if (op->engine)
2150                 xe_engine_put(op->engine);
2151         xe_vm_put(vm);
2152         if (op->fence)
2153                 dma_fence_put(&op->fence->fence);
2154         kfree(op);
2155 }
2156
2157 static struct async_op *next_async_op(struct xe_vm *vm)
2158 {
2159         return list_first_entry_or_null(&vm->async_ops.pending,
2160                                         struct async_op, link);
2161 }
2162
2163 static void vm_set_async_error(struct xe_vm *vm, int err)
2164 {
2165         lockdep_assert_held(&vm->lock);
2166         vm->async_ops.error = err;
2167 }
2168
2169 static void async_op_work_func(struct work_struct *w)
2170 {
2171         struct xe_vm *vm = container_of(w, struct xe_vm, async_ops.work);
2172
2173         for (;;) {
2174                 struct async_op *op;
2175                 int err;
2176
2177                 if (vm->async_ops.error && !xe_vm_is_closed(vm))
2178                         break;
2179
2180                 spin_lock_irq(&vm->async_ops.lock);
2181                 op = next_async_op(vm);
2182                 if (op)
2183                         list_del_init(&op->link);
2184                 spin_unlock_irq(&vm->async_ops.lock);
2185
2186                 if (!op)
2187                         break;
2188
2189                 if (!xe_vm_is_closed(vm)) {
2190                         bool first, last;
2191
2192                         down_write(&vm->lock);
2193 again:
2194                         first = op->vma->first_munmap_rebind;
2195                         last = op->vma->last_munmap_rebind;
2196 #ifdef TEST_VM_ASYNC_OPS_ERROR
2197 #define FORCE_ASYNC_OP_ERROR    BIT(31)
2198                         if (!(op->bind_op.op & FORCE_ASYNC_OP_ERROR)) {
2199                                 err = vm_bind_ioctl(vm, op->vma, op->engine,
2200                                                     op->bo, &op->bind_op,
2201                                                     op->syncs, op->num_syncs,
2202                                                     op->fence);
2203                         } else {
2204                                 err = -ENOMEM;
2205                                 op->bind_op.op &= ~FORCE_ASYNC_OP_ERROR;
2206                         }
2207 #else
2208                         err = vm_bind_ioctl(vm, op->vma, op->engine, op->bo,
2209                                             &op->bind_op, op->syncs,
2210                                             op->num_syncs, op->fence);
2211 #endif
2212                         /*
2213                          * In order for the fencing to work (stall behind
2214                          * existing jobs / prevent new jobs from running) all
2215                          * the dma-resv slots need to be programmed in a batch
2216                          * relative to execs / the rebind worker. The vm->lock
2217                          * ensure this.
2218                          */
2219                         if (!err && ((first && VM_BIND_OP(op->bind_op.op) ==
2220                                       XE_VM_BIND_OP_UNMAP) ||
2221                                      vm->async_ops.munmap_rebind_inflight)) {
2222                                 if (last) {
2223                                         op->vma->last_munmap_rebind = false;
2224                                         vm->async_ops.munmap_rebind_inflight =
2225                                                 false;
2226                                 } else {
2227                                         vm->async_ops.munmap_rebind_inflight =
2228                                                 true;
2229
2230                                         async_op_cleanup(vm, op);
2231
2232                                         spin_lock_irq(&vm->async_ops.lock);
2233                                         op = next_async_op(vm);
2234                                         XE_BUG_ON(!op);
2235                                         list_del_init(&op->link);
2236                                         spin_unlock_irq(&vm->async_ops.lock);
2237
2238                                         goto again;
2239                                 }
2240                         }
2241                         if (err) {
2242                                 trace_xe_vma_fail(op->vma);
2243                                 drm_warn(&vm->xe->drm, "Async VM op(%d) failed with %d",
2244                                          VM_BIND_OP(op->bind_op.op),
2245                                          err);
2246
2247                                 spin_lock_irq(&vm->async_ops.lock);
2248                                 list_add(&op->link, &vm->async_ops.pending);
2249                                 spin_unlock_irq(&vm->async_ops.lock);
2250
2251                                 vm_set_async_error(vm, err);
2252                                 up_write(&vm->lock);
2253
2254                                 if (vm->async_ops.error_capture.addr)
2255                                         vm_error_capture(vm, err,
2256                                                          op->bind_op.op,
2257                                                          op->bind_op.addr,
2258                                                          op->bind_op.range);
2259                                 break;
2260                         }
2261                         up_write(&vm->lock);
2262                 } else {
2263                         trace_xe_vma_flush(op->vma);
2264
2265                         if (is_unmap_op(op->bind_op.op)) {
2266                                 down_write(&vm->lock);
2267                                 xe_vma_destroy_unlocked(op->vma);
2268                                 up_write(&vm->lock);
2269                         }
2270
2271                         if (op->fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
2272                                                    &op->fence->fence.flags)) {
2273                                 if (!xe_vm_no_dma_fences(vm)) {
2274                                         op->fence->started = true;
2275                                         smp_wmb();
2276                                         wake_up_all(&op->fence->wq);
2277                                 }
2278                                 dma_fence_signal(&op->fence->fence);
2279                         }
2280                 }
2281
2282                 async_op_cleanup(vm, op);
2283         }
2284 }
2285
2286 static int __vm_bind_ioctl_async(struct xe_vm *vm, struct xe_vma *vma,
2287                                  struct xe_engine *e, struct xe_bo *bo,
2288                                  struct drm_xe_vm_bind_op *bind_op,
2289                                  struct xe_sync_entry *syncs, u32 num_syncs)
2290 {
2291         struct async_op *op;
2292         bool installed = false;
2293         u64 seqno;
2294         int i;
2295
2296         lockdep_assert_held(&vm->lock);
2297
2298         op = kmalloc(sizeof(*op), GFP_KERNEL);
2299         if (!op) {
2300                 return -ENOMEM;
2301         }
2302
2303         if (num_syncs) {
2304                 op->fence = kmalloc(sizeof(*op->fence), GFP_KERNEL);
2305                 if (!op->fence) {
2306                         kfree(op);
2307                         return -ENOMEM;
2308                 }
2309
2310                 seqno = e ? ++e->bind.fence_seqno : ++vm->async_ops.fence.seqno;
2311                 dma_fence_init(&op->fence->fence, &async_op_fence_ops,
2312                                &vm->async_ops.lock, e ? e->bind.fence_ctx :
2313                                vm->async_ops.fence.context, seqno);
2314
2315                 if (!xe_vm_no_dma_fences(vm)) {
2316                         op->fence->vm = vm;
2317                         op->fence->started = false;
2318                         init_waitqueue_head(&op->fence->wq);
2319                 }
2320         } else {
2321                 op->fence = NULL;
2322         }
2323         op->vma = vma;
2324         op->engine = e;
2325         op->bo = bo;
2326         op->bind_op = *bind_op;
2327         op->syncs = syncs;
2328         op->num_syncs = num_syncs;
2329         INIT_LIST_HEAD(&op->link);
2330
2331         for (i = 0; i < num_syncs; i++)
2332                 installed |= xe_sync_entry_signal(&syncs[i], NULL,
2333                                                   &op->fence->fence);
2334
2335         if (!installed && op->fence)
2336                 dma_fence_signal(&op->fence->fence);
2337
2338         spin_lock_irq(&vm->async_ops.lock);
2339         list_add_tail(&op->link, &vm->async_ops.pending);
2340         spin_unlock_irq(&vm->async_ops.lock);
2341
2342         if (!vm->async_ops.error)
2343                 queue_work(system_unbound_wq, &vm->async_ops.work);
2344
2345         return 0;
2346 }
2347
2348 static int vm_bind_ioctl_async(struct xe_vm *vm, struct xe_vma *vma,
2349                                struct xe_engine *e, struct xe_bo *bo,
2350                                struct drm_xe_vm_bind_op *bind_op,
2351                                struct xe_sync_entry *syncs, u32 num_syncs)
2352 {
2353         struct xe_vma *__vma, *next;
2354         struct list_head rebind_list;
2355         struct xe_sync_entry *in_syncs = NULL, *out_syncs = NULL;
2356         u32 num_in_syncs = 0, num_out_syncs = 0;
2357         bool first = true, last;
2358         int err;
2359         int i;
2360
2361         lockdep_assert_held(&vm->lock);
2362
2363         /* Not a linked list of unbinds + rebinds, easy */
2364         if (list_empty(&vma->unbind_link))
2365                 return __vm_bind_ioctl_async(vm, vma, e, bo, bind_op,
2366                                              syncs, num_syncs);
2367
2368         /*
2369          * Linked list of unbinds + rebinds, decompose syncs into 'in / out'
2370          * passing the 'in' to the first operation and 'out' to the last. Also
2371          * the reference counting is a little tricky, increment the VM / bind
2372          * engine ref count on all but the last operation and increment the BOs
2373          * ref count on each rebind.
2374          */
2375
2376         XE_BUG_ON(VM_BIND_OP(bind_op->op) != XE_VM_BIND_OP_UNMAP &&
2377                   VM_BIND_OP(bind_op->op) != XE_VM_BIND_OP_UNMAP_ALL &&
2378                   VM_BIND_OP(bind_op->op) != XE_VM_BIND_OP_PREFETCH);
2379
2380         /* Decompose syncs */
2381         if (num_syncs) {
2382                 in_syncs = kmalloc(sizeof(*in_syncs) * num_syncs, GFP_KERNEL);
2383                 out_syncs = kmalloc(sizeof(*out_syncs) * num_syncs, GFP_KERNEL);
2384                 if (!in_syncs || !out_syncs) {
2385                         err = -ENOMEM;
2386                         goto out_error;
2387                 }
2388
2389                 for (i = 0; i < num_syncs; ++i) {
2390                         bool signal = syncs[i].flags & DRM_XE_SYNC_SIGNAL;
2391
2392                         if (signal)
2393                                 out_syncs[num_out_syncs++] = syncs[i];
2394                         else
2395                                 in_syncs[num_in_syncs++] = syncs[i];
2396                 }
2397         }
2398
2399         /* Do unbinds + move rebinds to new list */
2400         INIT_LIST_HEAD(&rebind_list);
2401         list_for_each_entry_safe(__vma, next, &vma->unbind_link, unbind_link) {
2402                 if (__vma->destroyed ||
2403                     VM_BIND_OP(bind_op->op) == XE_VM_BIND_OP_PREFETCH) {
2404                         list_del_init(&__vma->unbind_link);
2405                         xe_bo_get(bo);
2406                         err = __vm_bind_ioctl_async(xe_vm_get(vm), __vma,
2407                                                     e ? xe_engine_get(e) : NULL,
2408                                                     bo, bind_op, first ?
2409                                                     in_syncs : NULL,
2410                                                     first ? num_in_syncs : 0);
2411                         if (err) {
2412                                 xe_bo_put(bo);
2413                                 xe_vm_put(vm);
2414                                 if (e)
2415                                         xe_engine_put(e);
2416                                 goto out_error;
2417                         }
2418                         in_syncs = NULL;
2419                         first = false;
2420                 } else {
2421                         list_move_tail(&__vma->unbind_link, &rebind_list);
2422                 }
2423         }
2424         last = list_empty(&rebind_list);
2425         if (!last) {
2426                 xe_vm_get(vm);
2427                 if (e)
2428                         xe_engine_get(e);
2429         }
2430         err = __vm_bind_ioctl_async(vm, vma, e,
2431                                     bo, bind_op,
2432                                     first ? in_syncs :
2433                                     last ? out_syncs : NULL,
2434                                     first ? num_in_syncs :
2435                                     last ? num_out_syncs : 0);
2436         if (err) {
2437                 if (!last) {
2438                         xe_vm_put(vm);
2439                         if (e)
2440                                 xe_engine_put(e);
2441                 }
2442                 goto out_error;
2443         }
2444         in_syncs = NULL;
2445
2446         /* Do rebinds */
2447         list_for_each_entry_safe(__vma, next, &rebind_list, unbind_link) {
2448                 list_del_init(&__vma->unbind_link);
2449                 last = list_empty(&rebind_list);
2450
2451                 if (xe_vma_is_userptr(__vma)) {
2452                         bind_op->op = XE_VM_BIND_FLAG_ASYNC |
2453                                 XE_VM_BIND_OP_MAP_USERPTR;
2454                 } else {
2455                         bind_op->op = XE_VM_BIND_FLAG_ASYNC |
2456                                 XE_VM_BIND_OP_MAP;
2457                         xe_bo_get(__vma->bo);
2458                 }
2459
2460                 if (!last) {
2461                         xe_vm_get(vm);
2462                         if (e)
2463                                 xe_engine_get(e);
2464                 }
2465
2466                 err = __vm_bind_ioctl_async(vm, __vma, e,
2467                                             __vma->bo, bind_op, last ?
2468                                             out_syncs : NULL,
2469                                             last ? num_out_syncs : 0);
2470                 if (err) {
2471                         if (!last) {
2472                                 xe_vm_put(vm);
2473                                 if (e)
2474                                         xe_engine_put(e);
2475                         }
2476                         goto out_error;
2477                 }
2478         }
2479
2480         kfree(syncs);
2481         return 0;
2482
2483 out_error:
2484         kfree(in_syncs);
2485         kfree(out_syncs);
2486         kfree(syncs);
2487
2488         return err;
2489 }
2490
2491 static int __vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo,
2492                                       u64 addr, u64 range, u32 op)
2493 {
2494         struct xe_device *xe = vm->xe;
2495         struct xe_vma *vma, lookup;
2496         bool async = !!(op & XE_VM_BIND_FLAG_ASYNC);
2497
2498         lockdep_assert_held(&vm->lock);
2499
2500         lookup.start = addr;
2501         lookup.end = addr + range - 1;
2502
2503         switch (VM_BIND_OP(op)) {
2504         case XE_VM_BIND_OP_MAP:
2505         case XE_VM_BIND_OP_MAP_USERPTR:
2506                 vma = xe_vm_find_overlapping_vma(vm, &lookup);
2507                 if (XE_IOCTL_ERR(xe, vma))
2508                         return -EBUSY;
2509                 break;
2510         case XE_VM_BIND_OP_UNMAP:
2511         case XE_VM_BIND_OP_PREFETCH:
2512                 vma = xe_vm_find_overlapping_vma(vm, &lookup);
2513                 if (XE_IOCTL_ERR(xe, !vma) ||
2514                     XE_IOCTL_ERR(xe, (vma->start != addr ||
2515                                  vma->end != addr + range - 1) && !async))
2516                         return -EINVAL;
2517                 break;
2518         case XE_VM_BIND_OP_UNMAP_ALL:
2519                 break;
2520         default:
2521                 XE_BUG_ON("NOT POSSIBLE");
2522                 return -EINVAL;
2523         }
2524
2525         return 0;
2526 }
2527
2528 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma)
2529 {
2530         down_read(&vm->userptr.notifier_lock);
2531         vma->destroyed = true;
2532         up_read(&vm->userptr.notifier_lock);
2533         xe_vm_remove_vma(vm, vma);
2534 }
2535
2536 static int prep_replacement_vma(struct xe_vm *vm, struct xe_vma *vma)
2537 {
2538         int err;
2539
2540         if (vma->bo && !vma->bo->vm) {
2541                 vm_insert_extobj(vm, vma);
2542                 err = add_preempt_fences(vm, vma->bo);
2543                 if (err)
2544                         return err;
2545         }
2546
2547         return 0;
2548 }
2549
2550 /*
2551  * Find all overlapping VMAs in lookup range and add to a list in the returned
2552  * VMA, all of VMAs found will be unbound. Also possibly add 2 new VMAs that
2553  * need to be bound if first / last VMAs are not fully unbound. This is akin to
2554  * how munmap works.
2555  */
2556 static struct xe_vma *vm_unbind_lookup_vmas(struct xe_vm *vm,
2557                                             struct xe_vma *lookup)
2558 {
2559         struct xe_vma *vma = xe_vm_find_overlapping_vma(vm, lookup);
2560         struct rb_node *node;
2561         struct xe_vma *first = vma, *last = vma, *new_first = NULL,
2562                       *new_last = NULL, *__vma, *next;
2563         int err = 0;
2564         bool first_munmap_rebind = false;
2565
2566         lockdep_assert_held(&vm->lock);
2567         XE_BUG_ON(!vma);
2568
2569         node = &vma->vm_node;
2570         while ((node = rb_next(node))) {
2571                 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2572                         __vma = to_xe_vma(node);
2573                         list_add_tail(&__vma->unbind_link, &vma->unbind_link);
2574                         last = __vma;
2575                 } else {
2576                         break;
2577                 }
2578         }
2579
2580         node = &vma->vm_node;
2581         while ((node = rb_prev(node))) {
2582                 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2583                         __vma = to_xe_vma(node);
2584                         list_add(&__vma->unbind_link, &vma->unbind_link);
2585                         first = __vma;
2586                 } else {
2587                         break;
2588                 }
2589         }
2590
2591         if (first->start != lookup->start) {
2592                 struct ww_acquire_ctx ww;
2593
2594                 if (first->bo)
2595                         err = xe_bo_lock(first->bo, &ww, 0, true);
2596                 if (err)
2597                         goto unwind;
2598                 new_first = xe_vma_create(first->vm, first->bo,
2599                                           first->bo ? first->bo_offset :
2600                                           first->userptr.ptr,
2601                                           first->start,
2602                                           lookup->start - 1,
2603                                           (first->pte_flags & PTE_READ_ONLY),
2604                                           first->gt_mask);
2605                 if (first->bo)
2606                         xe_bo_unlock(first->bo, &ww);
2607                 if (!new_first) {
2608                         err = -ENOMEM;
2609                         goto unwind;
2610                 }
2611                 if (!first->bo) {
2612                         err = xe_vma_userptr_pin_pages(new_first);
2613                         if (err)
2614                                 goto unwind;
2615                 }
2616                 err = prep_replacement_vma(vm, new_first);
2617                 if (err)
2618                         goto unwind;
2619         }
2620
2621         if (last->end != lookup->end) {
2622                 struct ww_acquire_ctx ww;
2623                 u64 chunk = lookup->end + 1 - last->start;
2624
2625                 if (last->bo)
2626                         err = xe_bo_lock(last->bo, &ww, 0, true);
2627                 if (err)
2628                         goto unwind;
2629                 new_last = xe_vma_create(last->vm, last->bo,
2630                                          last->bo ? last->bo_offset + chunk :
2631                                          last->userptr.ptr + chunk,
2632                                          last->start + chunk,
2633                                          last->end,
2634                                          (last->pte_flags & PTE_READ_ONLY),
2635                                          last->gt_mask);
2636                 if (last->bo)
2637                         xe_bo_unlock(last->bo, &ww);
2638                 if (!new_last) {
2639                         err = -ENOMEM;
2640                         goto unwind;
2641                 }
2642                 if (!last->bo) {
2643                         err = xe_vma_userptr_pin_pages(new_last);
2644                         if (err)
2645                                 goto unwind;
2646                 }
2647                 err = prep_replacement_vma(vm, new_last);
2648                 if (err)
2649                         goto unwind;
2650         }
2651
2652         prep_vma_destroy(vm, vma);
2653         if (list_empty(&vma->unbind_link) && (new_first || new_last))
2654                 vma->first_munmap_rebind = true;
2655         list_for_each_entry(__vma, &vma->unbind_link, unbind_link) {
2656                 if ((new_first || new_last) && !first_munmap_rebind) {
2657                         __vma->first_munmap_rebind = true;
2658                         first_munmap_rebind = true;
2659                 }
2660                 prep_vma_destroy(vm, __vma);
2661         }
2662         if (new_first) {
2663                 xe_vm_insert_vma(vm, new_first);
2664                 list_add_tail(&new_first->unbind_link, &vma->unbind_link);
2665                 if (!new_last)
2666                         new_first->last_munmap_rebind = true;
2667         }
2668         if (new_last) {
2669                 xe_vm_insert_vma(vm, new_last);
2670                 list_add_tail(&new_last->unbind_link, &vma->unbind_link);
2671                 new_last->last_munmap_rebind = true;
2672         }
2673
2674         return vma;
2675
2676 unwind:
2677         list_for_each_entry_safe(__vma, next, &vma->unbind_link, unbind_link)
2678                 list_del_init(&__vma->unbind_link);
2679         if (new_last) {
2680                 prep_vma_destroy(vm, new_last);
2681                 xe_vma_destroy_unlocked(new_last);
2682         }
2683         if (new_first) {
2684                 prep_vma_destroy(vm, new_first);
2685                 xe_vma_destroy_unlocked(new_first);
2686         }
2687
2688         return ERR_PTR(err);
2689 }
2690
2691 /*
2692  * Similar to vm_unbind_lookup_vmas, find all VMAs in lookup range to prefetch
2693  */
2694 static struct xe_vma *vm_prefetch_lookup_vmas(struct xe_vm *vm,
2695                                               struct xe_vma *lookup,
2696                                               u32 region)
2697 {
2698         struct xe_vma *vma = xe_vm_find_overlapping_vma(vm, lookup), *__vma,
2699                       *next;
2700         struct rb_node *node;
2701
2702         if (!xe_vma_is_userptr(vma)) {
2703                 if (!xe_bo_can_migrate(vma->bo, region_to_mem_type[region]))
2704                         return ERR_PTR(-EINVAL);
2705         }
2706
2707         node = &vma->vm_node;
2708         while ((node = rb_next(node))) {
2709                 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2710                         __vma = to_xe_vma(node);
2711                         if (!xe_vma_is_userptr(__vma)) {
2712                                 if (!xe_bo_can_migrate(__vma->bo, region_to_mem_type[region]))
2713                                         goto flush_list;
2714                         }
2715                         list_add_tail(&__vma->unbind_link, &vma->unbind_link);
2716                 } else {
2717                         break;
2718                 }
2719         }
2720
2721         node = &vma->vm_node;
2722         while ((node = rb_prev(node))) {
2723                 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2724                         __vma = to_xe_vma(node);
2725                         if (!xe_vma_is_userptr(__vma)) {
2726                                 if (!xe_bo_can_migrate(__vma->bo, region_to_mem_type[region]))
2727                                         goto flush_list;
2728                         }
2729                         list_add(&__vma->unbind_link, &vma->unbind_link);
2730                 } else {
2731                         break;
2732                 }
2733         }
2734
2735         return vma;
2736
2737 flush_list:
2738         list_for_each_entry_safe(__vma, next, &vma->unbind_link,
2739                                  unbind_link)
2740                 list_del_init(&__vma->unbind_link);
2741
2742         return ERR_PTR(-EINVAL);
2743 }
2744
2745 static struct xe_vma *vm_unbind_all_lookup_vmas(struct xe_vm *vm,
2746                                                 struct xe_bo *bo)
2747 {
2748         struct xe_vma *first = NULL, *vma;
2749
2750         lockdep_assert_held(&vm->lock);
2751         xe_bo_assert_held(bo);
2752
2753         list_for_each_entry(vma, &bo->vmas, bo_link) {
2754                 if (vma->vm != vm)
2755                         continue;
2756
2757                 prep_vma_destroy(vm, vma);
2758                 if (!first)
2759                         first = vma;
2760                 else
2761                         list_add_tail(&vma->unbind_link, &first->unbind_link);
2762         }
2763
2764         return first;
2765 }
2766
2767 static struct xe_vma *vm_bind_ioctl_lookup_vma(struct xe_vm *vm,
2768                                                struct xe_bo *bo,
2769                                                u64 bo_offset_or_userptr,
2770                                                u64 addr, u64 range, u32 op,
2771                                                u64 gt_mask, u32 region)
2772 {
2773         struct ww_acquire_ctx ww;
2774         struct xe_vma *vma, lookup;
2775         int err;
2776
2777         lockdep_assert_held(&vm->lock);
2778
2779         lookup.start = addr;
2780         lookup.end = addr + range - 1;
2781
2782         switch (VM_BIND_OP(op)) {
2783         case XE_VM_BIND_OP_MAP:
2784                 XE_BUG_ON(!bo);
2785
2786                 err = xe_bo_lock(bo, &ww, 0, true);
2787                 if (err)
2788                         return ERR_PTR(err);
2789                 vma = xe_vma_create(vm, bo, bo_offset_or_userptr, addr,
2790                                     addr + range - 1,
2791                                     op & XE_VM_BIND_FLAG_READONLY,
2792                                     gt_mask);
2793                 xe_bo_unlock(bo, &ww);
2794                 if (!vma)
2795                         return ERR_PTR(-ENOMEM);
2796
2797                 xe_vm_insert_vma(vm, vma);
2798                 if (!bo->vm) {
2799                         vm_insert_extobj(vm, vma);
2800                         err = add_preempt_fences(vm, bo);
2801                         if (err) {
2802                                 prep_vma_destroy(vm, vma);
2803                                 xe_vma_destroy_unlocked(vma);
2804
2805                                 return ERR_PTR(err);
2806                         }
2807                 }
2808                 break;
2809         case XE_VM_BIND_OP_UNMAP:
2810                 vma = vm_unbind_lookup_vmas(vm, &lookup);
2811                 break;
2812         case XE_VM_BIND_OP_PREFETCH:
2813                 vma = vm_prefetch_lookup_vmas(vm, &lookup, region);
2814                 break;
2815         case XE_VM_BIND_OP_UNMAP_ALL:
2816                 XE_BUG_ON(!bo);
2817
2818                 err = xe_bo_lock(bo, &ww, 0, true);
2819                 if (err)
2820                         return ERR_PTR(err);
2821                 vma = vm_unbind_all_lookup_vmas(vm, bo);
2822                 if (!vma)
2823                         vma = ERR_PTR(-EINVAL);
2824                 xe_bo_unlock(bo, &ww);
2825                 break;
2826         case XE_VM_BIND_OP_MAP_USERPTR:
2827                 XE_BUG_ON(bo);
2828
2829                 vma = xe_vma_create(vm, NULL, bo_offset_or_userptr, addr,
2830                                     addr + range - 1,
2831                                     op & XE_VM_BIND_FLAG_READONLY,
2832                                     gt_mask);
2833                 if (!vma)
2834                         return ERR_PTR(-ENOMEM);
2835
2836                 err = xe_vma_userptr_pin_pages(vma);
2837                 if (err) {
2838                         xe_vma_destroy(vma, NULL);
2839
2840                         return ERR_PTR(err);
2841                 } else {
2842                         xe_vm_insert_vma(vm, vma);
2843                 }
2844                 break;
2845         default:
2846                 XE_BUG_ON("NOT POSSIBLE");
2847                 vma = ERR_PTR(-EINVAL);
2848         }
2849
2850         return vma;
2851 }
2852
2853 #ifdef TEST_VM_ASYNC_OPS_ERROR
2854 #define SUPPORTED_FLAGS \
2855         (FORCE_ASYNC_OP_ERROR | XE_VM_BIND_FLAG_ASYNC | \
2856          XE_VM_BIND_FLAG_READONLY | XE_VM_BIND_FLAG_IMMEDIATE | 0xffff)
2857 #else
2858 #define SUPPORTED_FLAGS \
2859         (XE_VM_BIND_FLAG_ASYNC | XE_VM_BIND_FLAG_READONLY | \
2860          XE_VM_BIND_FLAG_IMMEDIATE | 0xffff)
2861 #endif
2862 #define XE_64K_PAGE_MASK 0xffffull
2863
2864 #define MAX_BINDS       512     /* FIXME: Picking random upper limit */
2865
2866 static int vm_bind_ioctl_check_args(struct xe_device *xe,
2867                                     struct drm_xe_vm_bind *args,
2868                                     struct drm_xe_vm_bind_op **bind_ops,
2869                                     bool *async)
2870 {
2871         int err;
2872         int i;
2873
2874         if (XE_IOCTL_ERR(xe, args->extensions) ||
2875             XE_IOCTL_ERR(xe, !args->num_binds) ||
2876             XE_IOCTL_ERR(xe, args->num_binds > MAX_BINDS))
2877                 return -EINVAL;
2878
2879         if (args->num_binds > 1) {
2880                 u64 __user *bind_user =
2881                         u64_to_user_ptr(args->vector_of_binds);
2882
2883                 *bind_ops = kmalloc(sizeof(struct drm_xe_vm_bind_op) *
2884                                     args->num_binds, GFP_KERNEL);
2885                 if (!*bind_ops)
2886                         return -ENOMEM;
2887
2888                 err = __copy_from_user(*bind_ops, bind_user,
2889                                        sizeof(struct drm_xe_vm_bind_op) *
2890                                        args->num_binds);
2891                 if (XE_IOCTL_ERR(xe, err)) {
2892                         err = -EFAULT;
2893                         goto free_bind_ops;
2894                 }
2895         } else {
2896                 *bind_ops = &args->bind;
2897         }
2898
2899         for (i = 0; i < args->num_binds; ++i) {
2900                 u64 range = (*bind_ops)[i].range;
2901                 u64 addr = (*bind_ops)[i].addr;
2902                 u32 op = (*bind_ops)[i].op;
2903                 u32 obj = (*bind_ops)[i].obj;
2904                 u64 obj_offset = (*bind_ops)[i].obj_offset;
2905                 u32 region = (*bind_ops)[i].region;
2906
2907                 if (i == 0) {
2908                         *async = !!(op & XE_VM_BIND_FLAG_ASYNC);
2909                 } else if (XE_IOCTL_ERR(xe, !*async) ||
2910                            XE_IOCTL_ERR(xe, !(op & XE_VM_BIND_FLAG_ASYNC)) ||
2911                            XE_IOCTL_ERR(xe, VM_BIND_OP(op) ==
2912                                         XE_VM_BIND_OP_RESTART)) {
2913                         err = -EINVAL;
2914                         goto free_bind_ops;
2915                 }
2916
2917                 if (XE_IOCTL_ERR(xe, !*async &&
2918                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL)) {
2919                         err = -EINVAL;
2920                         goto free_bind_ops;
2921                 }
2922
2923                 if (XE_IOCTL_ERR(xe, !*async &&
2924                                  VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH)) {
2925                         err = -EINVAL;
2926                         goto free_bind_ops;
2927                 }
2928
2929                 if (XE_IOCTL_ERR(xe, VM_BIND_OP(op) >
2930                                  XE_VM_BIND_OP_PREFETCH) ||
2931                     XE_IOCTL_ERR(xe, op & ~SUPPORTED_FLAGS) ||
2932                     XE_IOCTL_ERR(xe, !obj &&
2933                                  VM_BIND_OP(op) == XE_VM_BIND_OP_MAP) ||
2934                     XE_IOCTL_ERR(xe, !obj &&
2935                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
2936                     XE_IOCTL_ERR(xe, addr &&
2937                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
2938                     XE_IOCTL_ERR(xe, range &&
2939                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
2940                     XE_IOCTL_ERR(xe, obj &&
2941                                  VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR) ||
2942                     XE_IOCTL_ERR(xe, obj &&
2943                                  VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH) ||
2944                     XE_IOCTL_ERR(xe, region &&
2945                                  VM_BIND_OP(op) != XE_VM_BIND_OP_PREFETCH) ||
2946                     XE_IOCTL_ERR(xe, !(BIT(region) &
2947                                        xe->info.mem_region_mask)) ||
2948                     XE_IOCTL_ERR(xe, obj &&
2949                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP)) {
2950                         err = -EINVAL;
2951                         goto free_bind_ops;
2952                 }
2953
2954                 if (XE_IOCTL_ERR(xe, obj_offset & ~PAGE_MASK) ||
2955                     XE_IOCTL_ERR(xe, addr & ~PAGE_MASK) ||
2956                     XE_IOCTL_ERR(xe, range & ~PAGE_MASK) ||
2957                     XE_IOCTL_ERR(xe, !range && VM_BIND_OP(op) !=
2958                                  XE_VM_BIND_OP_RESTART &&
2959                                  VM_BIND_OP(op) != XE_VM_BIND_OP_UNMAP_ALL)) {
2960                         err = -EINVAL;
2961                         goto free_bind_ops;
2962                 }
2963         }
2964
2965         return 0;
2966
2967 free_bind_ops:
2968         if (args->num_binds > 1)
2969                 kfree(*bind_ops);
2970         return err;
2971 }
2972
2973 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2974 {
2975         struct xe_device *xe = to_xe_device(dev);
2976         struct xe_file *xef = to_xe_file(file);
2977         struct drm_xe_vm_bind *args = data;
2978         struct drm_xe_sync __user *syncs_user;
2979         struct xe_bo **bos = NULL;
2980         struct xe_vma **vmas = NULL;
2981         struct xe_vm *vm;
2982         struct xe_engine *e = NULL;
2983         u32 num_syncs;
2984         struct xe_sync_entry *syncs = NULL;
2985         struct drm_xe_vm_bind_op *bind_ops;
2986         bool async;
2987         int err;
2988         int i, j = 0;
2989
2990         err = vm_bind_ioctl_check_args(xe, args, &bind_ops, &async);
2991         if (err)
2992                 return err;
2993
2994         vm = xe_vm_lookup(xef, args->vm_id);
2995         if (XE_IOCTL_ERR(xe, !vm)) {
2996                 err = -EINVAL;
2997                 goto free_objs;
2998         }
2999
3000         if (XE_IOCTL_ERR(xe, xe_vm_is_closed(vm))) {
3001                 DRM_ERROR("VM closed while we began looking up?\n");
3002                 err = -ENOENT;
3003                 goto put_vm;
3004         }
3005
3006         if (args->engine_id) {
3007                 e = xe_engine_lookup(xef, args->engine_id);
3008                 if (XE_IOCTL_ERR(xe, !e)) {
3009                         err = -ENOENT;
3010                         goto put_vm;
3011                 }
3012                 if (XE_IOCTL_ERR(xe, !(e->flags & ENGINE_FLAG_VM))) {
3013                         err = -EINVAL;
3014                         goto put_engine;
3015                 }
3016         }
3017
3018         if (VM_BIND_OP(bind_ops[0].op) == XE_VM_BIND_OP_RESTART) {
3019                 if (XE_IOCTL_ERR(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
3020                         err = -ENOTSUPP;
3021                 if (XE_IOCTL_ERR(xe, !err && args->num_syncs))
3022                         err = EINVAL;
3023                 if (XE_IOCTL_ERR(xe, !err && !vm->async_ops.error))
3024                         err = -EPROTO;
3025
3026                 if (!err) {
3027                         down_write(&vm->lock);
3028                         trace_xe_vm_restart(vm);
3029                         vm_set_async_error(vm, 0);
3030                         up_write(&vm->lock);
3031
3032                         queue_work(system_unbound_wq, &vm->async_ops.work);
3033
3034                         /* Rebinds may have been blocked, give worker a kick */
3035                         if (xe_vm_in_compute_mode(vm))
3036                                 queue_work(vm->xe->ordered_wq,
3037                                            &vm->preempt.rebind_work);
3038                 }
3039
3040                 goto put_engine;
3041         }
3042
3043         if (XE_IOCTL_ERR(xe, !vm->async_ops.error &&
3044                          async != !!(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS))) {
3045                 err = -ENOTSUPP;
3046                 goto put_engine;
3047         }
3048
3049         for (i = 0; i < args->num_binds; ++i) {
3050                 u64 range = bind_ops[i].range;
3051                 u64 addr = bind_ops[i].addr;
3052
3053                 if (XE_IOCTL_ERR(xe, range > vm->size) ||
3054                     XE_IOCTL_ERR(xe, addr > vm->size - range)) {
3055                         err = -EINVAL;
3056                         goto put_engine;
3057                 }
3058
3059                 if (bind_ops[i].gt_mask) {
3060                         u64 valid_gts = BIT(xe->info.tile_count) - 1;
3061
3062                         if (XE_IOCTL_ERR(xe, bind_ops[i].gt_mask &
3063                                          ~valid_gts)) {
3064                                 err = -EINVAL;
3065                                 goto put_engine;
3066                         }
3067                 }
3068         }
3069
3070         bos = kzalloc(sizeof(*bos) * args->num_binds, GFP_KERNEL);
3071         if (!bos) {
3072                 err = -ENOMEM;
3073                 goto put_engine;
3074         }
3075
3076         vmas = kzalloc(sizeof(*vmas) * args->num_binds, GFP_KERNEL);
3077         if (!vmas) {
3078                 err = -ENOMEM;
3079                 goto put_engine;
3080         }
3081
3082         for (i = 0; i < args->num_binds; ++i) {
3083                 struct drm_gem_object *gem_obj;
3084                 u64 range = bind_ops[i].range;
3085                 u64 addr = bind_ops[i].addr;
3086                 u32 obj = bind_ops[i].obj;
3087                 u64 obj_offset = bind_ops[i].obj_offset;
3088
3089                 if (!obj)
3090                         continue;
3091
3092                 gem_obj = drm_gem_object_lookup(file, obj);
3093                 if (XE_IOCTL_ERR(xe, !gem_obj)) {
3094                         err = -ENOENT;
3095                         goto put_obj;
3096                 }
3097                 bos[i] = gem_to_xe_bo(gem_obj);
3098
3099                 if (XE_IOCTL_ERR(xe, range > bos[i]->size) ||
3100                     XE_IOCTL_ERR(xe, obj_offset >
3101                                  bos[i]->size - range)) {
3102                         err = -EINVAL;
3103                         goto put_obj;
3104                 }
3105
3106                 if (bos[i]->flags & XE_BO_INTERNAL_64K) {
3107                         if (XE_IOCTL_ERR(xe, obj_offset &
3108                                          XE_64K_PAGE_MASK) ||
3109                             XE_IOCTL_ERR(xe, addr & XE_64K_PAGE_MASK) ||
3110                             XE_IOCTL_ERR(xe, range & XE_64K_PAGE_MASK)) {
3111                                 err = -EINVAL;
3112                                 goto put_obj;
3113                         }
3114                 }
3115         }
3116
3117         if (args->num_syncs) {
3118                 syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3119                 if (!syncs) {
3120                         err = -ENOMEM;
3121                         goto put_obj;
3122                 }
3123         }
3124
3125         syncs_user = u64_to_user_ptr(args->syncs);
3126         for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3127                 err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3128                                           &syncs_user[num_syncs], false,
3129                                           xe_vm_no_dma_fences(vm));
3130                 if (err)
3131                         goto free_syncs;
3132         }
3133
3134         err = down_write_killable(&vm->lock);
3135         if (err)
3136                 goto free_syncs;
3137
3138         /* Do some error checking first to make the unwind easier */
3139         for (i = 0; i < args->num_binds; ++i) {
3140                 u64 range = bind_ops[i].range;
3141                 u64 addr = bind_ops[i].addr;
3142                 u32 op = bind_ops[i].op;
3143
3144                 err = __vm_bind_ioctl_lookup_vma(vm, bos[i], addr, range, op);
3145                 if (err)
3146                         goto release_vm_lock;
3147         }
3148
3149         for (i = 0; i < args->num_binds; ++i) {
3150                 u64 range = bind_ops[i].range;
3151                 u64 addr = bind_ops[i].addr;
3152                 u32 op = bind_ops[i].op;
3153                 u64 obj_offset = bind_ops[i].obj_offset;
3154                 u64 gt_mask = bind_ops[i].gt_mask;
3155                 u32 region = bind_ops[i].region;
3156
3157                 vmas[i] = vm_bind_ioctl_lookup_vma(vm, bos[i], obj_offset,
3158                                                    addr, range, op, gt_mask,
3159                                                    region);
3160                 if (IS_ERR(vmas[i])) {
3161                         err = PTR_ERR(vmas[i]);
3162                         vmas[i] = NULL;
3163                         goto destroy_vmas;
3164                 }
3165         }
3166
3167         for (j = 0; j < args->num_binds; ++j) {
3168                 struct xe_sync_entry *__syncs;
3169                 u32 __num_syncs = 0;
3170                 bool first_or_last = j == 0 || j == args->num_binds - 1;
3171
3172                 if (args->num_binds == 1) {
3173                         __num_syncs = num_syncs;
3174                         __syncs = syncs;
3175                 } else if (first_or_last && num_syncs) {
3176                         bool first = j == 0;
3177
3178                         __syncs = kmalloc(sizeof(*__syncs) * num_syncs,
3179                                           GFP_KERNEL);
3180                         if (!__syncs) {
3181                                 err = ENOMEM;
3182                                 break;
3183                         }
3184
3185                         /* in-syncs on first bind, out-syncs on last bind */
3186                         for (i = 0; i < num_syncs; ++i) {
3187                                 bool signal = syncs[i].flags &
3188                                         DRM_XE_SYNC_SIGNAL;
3189
3190                                 if ((first && !signal) || (!first && signal))
3191                                         __syncs[__num_syncs++] = syncs[i];
3192                         }
3193                 } else {
3194                         __num_syncs = 0;
3195                         __syncs = NULL;
3196                 }
3197
3198                 if (async) {
3199                         bool last = j == args->num_binds - 1;
3200
3201                         /*
3202                          * Each pass of async worker drops the ref, take a ref
3203                          * here, 1 set of refs taken above
3204                          */
3205                         if (!last) {
3206                                 if (e)
3207                                         xe_engine_get(e);
3208                                 xe_vm_get(vm);
3209                         }
3210
3211                         err = vm_bind_ioctl_async(vm, vmas[j], e, bos[j],
3212                                                   bind_ops + j, __syncs,
3213                                                   __num_syncs);
3214                         if (err && !last) {
3215                                 if (e)
3216                                         xe_engine_put(e);
3217                                 xe_vm_put(vm);
3218                         }
3219                         if (err)
3220                                 break;
3221                 } else {
3222                         XE_BUG_ON(j != 0);      /* Not supported */
3223                         err = vm_bind_ioctl(vm, vmas[j], e, bos[j],
3224                                             bind_ops + j, __syncs,
3225                                             __num_syncs, NULL);
3226                         break;  /* Needed so cleanup loops work */
3227                 }
3228         }
3229
3230         /* Most of cleanup owned by the async bind worker */
3231         if (async && !err) {
3232                 up_write(&vm->lock);
3233                 if (args->num_binds > 1)
3234                         kfree(syncs);
3235                 goto free_objs;
3236         }
3237
3238 destroy_vmas:
3239         for (i = j; err && i < args->num_binds; ++i) {
3240                 u32 op = bind_ops[i].op;
3241                 struct xe_vma *vma, *next;
3242
3243                 if (!vmas[i])
3244                         break;
3245
3246                 list_for_each_entry_safe(vma, next, &vma->unbind_link,
3247                                          unbind_link) {
3248                         list_del_init(&vma->unbind_link);
3249                         if (!vma->destroyed) {
3250                                 prep_vma_destroy(vm, vma);
3251                                 xe_vma_destroy_unlocked(vma);
3252                         }
3253                 }
3254
3255                 switch (VM_BIND_OP(op)) {
3256                 case XE_VM_BIND_OP_MAP:
3257                         prep_vma_destroy(vm, vmas[i]);
3258                         xe_vma_destroy_unlocked(vmas[i]);
3259                         break;
3260                 case XE_VM_BIND_OP_MAP_USERPTR:
3261                         prep_vma_destroy(vm, vmas[i]);
3262                         xe_vma_destroy_unlocked(vmas[i]);
3263                         break;
3264                 }
3265         }
3266 release_vm_lock:
3267         up_write(&vm->lock);
3268 free_syncs:
3269         while (num_syncs--) {
3270                 if (async && j &&
3271                     !(syncs[num_syncs].flags & DRM_XE_SYNC_SIGNAL))
3272                         continue;       /* Still in async worker */
3273                 xe_sync_entry_cleanup(&syncs[num_syncs]);
3274         }
3275
3276         kfree(syncs);
3277 put_obj:
3278         for (i = j; i < args->num_binds; ++i)
3279                 xe_bo_put(bos[i]);
3280 put_engine:
3281         if (e)
3282                 xe_engine_put(e);
3283 put_vm:
3284         xe_vm_put(vm);
3285 free_objs:
3286         kfree(bos);
3287         kfree(vmas);
3288         if (args->num_binds > 1)
3289                 kfree(bind_ops);
3290         return err;
3291 }
3292
3293 /*
3294  * XXX: Using the TTM wrappers for now, likely can call into dma-resv code
3295  * directly to optimize. Also this likely should be an inline function.
3296  */
3297 int xe_vm_lock(struct xe_vm *vm, struct ww_acquire_ctx *ww,
3298                int num_resv, bool intr)
3299 {
3300         struct ttm_validate_buffer tv_vm;
3301         LIST_HEAD(objs);
3302         LIST_HEAD(dups);
3303
3304         XE_BUG_ON(!ww);
3305
3306         tv_vm.num_shared = num_resv;
3307         tv_vm.bo = xe_vm_ttm_bo(vm);;
3308         list_add_tail(&tv_vm.head, &objs);
3309
3310         return ttm_eu_reserve_buffers(ww, &objs, intr, &dups);
3311 }
3312
3313 void xe_vm_unlock(struct xe_vm *vm, struct ww_acquire_ctx *ww)
3314 {
3315         dma_resv_unlock(&vm->resv);
3316         ww_acquire_fini(ww);
3317 }
3318
3319 /**
3320  * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3321  * @vma: VMA to invalidate
3322  *
3323  * Walks a list of page tables leaves which it memset the entries owned by this
3324  * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3325  * complete.
3326  *
3327  * Returns 0 for success, negative error code otherwise.
3328  */
3329 int xe_vm_invalidate_vma(struct xe_vma *vma)
3330 {
3331         struct xe_device *xe = vma->vm->xe;
3332         struct xe_gt *gt;
3333         u32 gt_needs_invalidate = 0;
3334         int seqno[XE_MAX_GT];
3335         u8 id;
3336         int ret;
3337
3338         XE_BUG_ON(!xe_vm_in_fault_mode(vma->vm));
3339         trace_xe_vma_usm_invalidate(vma);
3340
3341         /* Check that we don't race with page-table updates */
3342         if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3343                 if (xe_vma_is_userptr(vma)) {
3344                         WARN_ON_ONCE(!mmu_interval_check_retry
3345                                      (&vma->userptr.notifier,
3346                                       vma->userptr.notifier_seq));
3347                         WARN_ON_ONCE(!dma_resv_test_signaled(&vma->vm->resv,
3348                                                              DMA_RESV_USAGE_BOOKKEEP));
3349
3350                 } else {
3351                         xe_bo_assert_held(vma->bo);
3352                 }
3353         }
3354
3355         for_each_gt(gt, xe, id) {
3356                 if (xe_pt_zap_ptes(gt, vma)) {
3357                         gt_needs_invalidate |= BIT(id);
3358                         xe_device_wmb(xe);
3359                         seqno[id] = xe_gt_tlb_invalidation_vma(gt, NULL, vma);
3360                         if (seqno[id] < 0)
3361                                 return seqno[id];
3362                 }
3363         }
3364
3365         for_each_gt(gt, xe, id) {
3366                 if (gt_needs_invalidate & BIT(id)) {
3367                         ret = xe_gt_tlb_invalidation_wait(gt, seqno[id]);
3368                         if (ret < 0)
3369                                 return ret;
3370                 }
3371         }
3372
3373         vma->usm.gt_invalidated = vma->gt_mask;
3374
3375         return 0;
3376 }
3377
3378 #if IS_ENABLED(CONFIG_DRM_XE_SIMPLE_ERROR_CAPTURE)
3379 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
3380 {
3381         struct rb_node *node;
3382         bool is_lmem;
3383         uint64_t addr;
3384
3385         if (!down_read_trylock(&vm->lock)) {
3386                 drm_printf(p, " Failed to acquire VM lock to dump capture");
3387                 return 0;
3388         }
3389         if (vm->pt_root[gt_id]) {
3390                 addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, GEN8_PAGE_SIZE, &is_lmem);
3391                 drm_printf(p, " VM root: A:0x%llx %s\n", addr, is_lmem ? "LMEM" : "SYS");
3392         }
3393
3394         for (node = rb_first(&vm->vmas); node; node = rb_next(node)) {
3395                 struct xe_vma *vma = to_xe_vma(node);
3396                 bool is_userptr = xe_vma_is_userptr(vma);
3397
3398                 if (is_userptr) {
3399                         struct xe_res_cursor cur;
3400
3401                         xe_res_first_sg(vma->userptr.sg, 0, GEN8_PAGE_SIZE, &cur);
3402                         addr = xe_res_dma(&cur);
3403                 } else {
3404                         addr = xe_bo_addr(vma->bo, 0, GEN8_PAGE_SIZE, &is_lmem);
3405                 }
3406                 drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
3407                            vma->start, vma->end, vma->end - vma->start + 1ull,
3408                            addr, is_userptr ? "USR" : is_lmem ? "VRAM" : "SYS");
3409         }
3410         up_read(&vm->lock);
3411
3412         return 0;
3413 }
3414 #else
3415 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
3416 {
3417         return 0;
3418 }
3419 #endif