drm/xe: Handle -EDEADLK case in preempt worker
[linux-2.6-microblaze.git] / drivers / gpu / drm / xe / xe_vm.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5
6 #include "xe_vm.h"
7
8 #include <linux/dma-fence-array.h>
9
10 #include <drm/ttm/ttm_execbuf_util.h>
11 #include <drm/ttm/ttm_tt.h>
12 #include <drm/xe_drm.h>
13 #include <linux/delay.h>
14 #include <linux/kthread.h>
15 #include <linux/mm.h>
16 #include <linux/swap.h>
17
18 #include "xe_bo.h"
19 #include "xe_device.h"
20 #include "xe_engine.h"
21 #include "xe_gt.h"
22 #include "xe_gt_pagefault.h"
23 #include "xe_gt_tlb_invalidation.h"
24 #include "xe_migrate.h"
25 #include "xe_pm.h"
26 #include "xe_preempt_fence.h"
27 #include "xe_pt.h"
28 #include "xe_res_cursor.h"
29 #include "xe_sync.h"
30 #include "xe_trace.h"
31
32 #define TEST_VM_ASYNC_OPS_ERROR
33
34 /**
35  * xe_vma_userptr_check_repin() - Advisory check for repin needed
36  * @vma: The userptr vma
37  *
38  * Check if the userptr vma has been invalidated since last successful
39  * repin. The check is advisory only and can the function can be called
40  * without the vm->userptr.notifier_lock held. There is no guarantee that the
41  * vma userptr will remain valid after a lockless check, so typically
42  * the call needs to be followed by a proper check under the notifier_lock.
43  *
44  * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
45  */
46 int xe_vma_userptr_check_repin(struct xe_vma *vma)
47 {
48         return mmu_interval_check_retry(&vma->userptr.notifier,
49                                         vma->userptr.notifier_seq) ?
50                 -EAGAIN : 0;
51 }
52
53 int xe_vma_userptr_pin_pages(struct xe_vma *vma)
54 {
55         struct xe_vm *vm = vma->vm;
56         struct xe_device *xe = vm->xe;
57         const unsigned long num_pages =
58                 (vma->end - vma->start + 1) >> PAGE_SHIFT;
59         struct page **pages;
60         bool in_kthread = !current->mm;
61         unsigned long notifier_seq;
62         int pinned, ret, i;
63         bool read_only = vma->pte_flags & XE_PTE_READ_ONLY;
64
65         lockdep_assert_held(&vm->lock);
66         XE_BUG_ON(!xe_vma_is_userptr(vma));
67 retry:
68         if (vma->destroyed)
69                 return 0;
70
71         notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier);
72         if (notifier_seq == vma->userptr.notifier_seq)
73                 return 0;
74
75         pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
76         if (!pages)
77                 return -ENOMEM;
78
79         if (vma->userptr.sg) {
80                 dma_unmap_sgtable(xe->drm.dev,
81                                   vma->userptr.sg,
82                                   read_only ? DMA_TO_DEVICE :
83                                   DMA_BIDIRECTIONAL, 0);
84                 sg_free_table(vma->userptr.sg);
85                 vma->userptr.sg = NULL;
86         }
87
88         pinned = ret = 0;
89         if (in_kthread) {
90                 if (!mmget_not_zero(vma->userptr.notifier.mm)) {
91                         ret = -EFAULT;
92                         goto mm_closed;
93                 }
94                 kthread_use_mm(vma->userptr.notifier.mm);
95         }
96
97         while (pinned < num_pages) {
98                 ret = get_user_pages_fast(vma->userptr.ptr + pinned * PAGE_SIZE,
99                                           num_pages - pinned,
100                                           read_only ? 0 : FOLL_WRITE,
101                                           &pages[pinned]);
102                 if (ret < 0) {
103                         if (in_kthread)
104                                 ret = 0;
105                         break;
106                 }
107
108                 pinned += ret;
109                 ret = 0;
110         }
111
112         if (in_kthread) {
113                 kthread_unuse_mm(vma->userptr.notifier.mm);
114                 mmput(vma->userptr.notifier.mm);
115         }
116 mm_closed:
117         if (ret)
118                 goto out;
119
120         ret = sg_alloc_table_from_pages(&vma->userptr.sgt, pages, pinned,
121                                         0, (u64)pinned << PAGE_SHIFT,
122                                         GFP_KERNEL);
123         if (ret) {
124                 vma->userptr.sg = NULL;
125                 goto out;
126         }
127         vma->userptr.sg = &vma->userptr.sgt;
128
129         ret = dma_map_sgtable(xe->drm.dev, vma->userptr.sg,
130                               read_only ? DMA_TO_DEVICE :
131                               DMA_BIDIRECTIONAL,
132                               DMA_ATTR_SKIP_CPU_SYNC |
133                               DMA_ATTR_NO_KERNEL_MAPPING);
134         if (ret) {
135                 sg_free_table(vma->userptr.sg);
136                 vma->userptr.sg = NULL;
137                 goto out;
138         }
139
140         for (i = 0; i < pinned; ++i) {
141                 if (!read_only) {
142                         lock_page(pages[i]);
143                         set_page_dirty(pages[i]);
144                         unlock_page(pages[i]);
145                 }
146
147                 mark_page_accessed(pages[i]);
148         }
149
150 out:
151         release_pages(pages, pinned);
152         kvfree(pages);
153
154         if (!(ret < 0)) {
155                 vma->userptr.notifier_seq = notifier_seq;
156                 if (xe_vma_userptr_check_repin(vma) == -EAGAIN)
157                         goto retry;
158         }
159
160         return ret < 0 ? ret : 0;
161 }
162
163 static bool preempt_fences_waiting(struct xe_vm *vm)
164 {
165         struct xe_engine *e;
166
167         lockdep_assert_held(&vm->lock);
168         xe_vm_assert_held(vm);
169
170         list_for_each_entry(e, &vm->preempt.engines, compute.link) {
171                 if (!e->compute.pfence || (e->compute.pfence &&
172                     test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
173                              &e->compute.pfence->flags))) {
174                         return true;
175                 }
176         }
177
178         return false;
179 }
180
181 static void free_preempt_fences(struct list_head *list)
182 {
183         struct list_head *link, *next;
184
185         list_for_each_safe(link, next, list)
186                 xe_preempt_fence_free(to_preempt_fence_from_link(link));
187 }
188
189 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
190                                 unsigned int *count)
191 {
192         lockdep_assert_held(&vm->lock);
193         xe_vm_assert_held(vm);
194
195         if (*count >= vm->preempt.num_engines)
196                 return 0;
197
198         for (; *count < vm->preempt.num_engines; ++(*count)) {
199                 struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
200
201                 if (IS_ERR(pfence))
202                         return PTR_ERR(pfence);
203
204                 list_move_tail(xe_preempt_fence_link(pfence), list);
205         }
206
207         return 0;
208 }
209
210 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
211 {
212         struct xe_engine *e;
213
214         xe_vm_assert_held(vm);
215
216         list_for_each_entry(e, &vm->preempt.engines, compute.link) {
217                 if (e->compute.pfence) {
218                         long timeout = dma_fence_wait(e->compute.pfence, false);
219
220                         if (timeout < 0)
221                                 return -ETIME;
222                         dma_fence_put(e->compute.pfence);
223                         e->compute.pfence = NULL;
224                 }
225         }
226
227         return 0;
228 }
229
230 static bool xe_vm_is_idle(struct xe_vm *vm)
231 {
232         struct xe_engine *e;
233
234         xe_vm_assert_held(vm);
235         list_for_each_entry(e, &vm->preempt.engines, compute.link) {
236                 if (!xe_engine_is_idle(e))
237                         return false;
238         }
239
240         return true;
241 }
242
243 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
244 {
245         struct list_head *link;
246         struct xe_engine *e;
247
248         list_for_each_entry(e, &vm->preempt.engines, compute.link) {
249                 struct dma_fence *fence;
250
251                 link = list->next;
252                 XE_BUG_ON(link == list);
253
254                 fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
255                                              e, e->compute.context,
256                                              ++e->compute.seqno);
257                 dma_fence_put(e->compute.pfence);
258                 e->compute.pfence = fence;
259         }
260 }
261
262 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
263 {
264         struct xe_engine *e;
265         struct ww_acquire_ctx ww;
266         int err;
267
268         err = xe_bo_lock(bo, &ww, vm->preempt.num_engines, true);
269         if (err)
270                 return err;
271
272         list_for_each_entry(e, &vm->preempt.engines, compute.link)
273                 if (e->compute.pfence) {
274                         dma_resv_add_fence(bo->ttm.base.resv,
275                                            e->compute.pfence,
276                                            DMA_RESV_USAGE_BOOKKEEP);
277                 }
278
279         xe_bo_unlock(bo, &ww);
280         return 0;
281 }
282
283 /**
284  * xe_vm_fence_all_extobjs() - Add a fence to vm's external objects' resv
285  * @vm: The vm.
286  * @fence: The fence to add.
287  * @usage: The resv usage for the fence.
288  *
289  * Loops over all of the vm's external object bindings and adds a @fence
290  * with the given @usage to all of the external object's reservation
291  * objects.
292  */
293 void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
294                              enum dma_resv_usage usage)
295 {
296         struct xe_vma *vma;
297
298         list_for_each_entry(vma, &vm->extobj.list, extobj.link)
299                 dma_resv_add_fence(vma->bo->ttm.base.resv, fence, usage);
300 }
301
302 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm)
303 {
304         struct xe_engine *e;
305
306         lockdep_assert_held(&vm->lock);
307         xe_vm_assert_held(vm);
308
309         list_for_each_entry(e, &vm->preempt.engines, compute.link) {
310                 e->ops->resume(e);
311
312                 dma_resv_add_fence(&vm->resv, e->compute.pfence,
313                                    DMA_RESV_USAGE_BOOKKEEP);
314                 xe_vm_fence_all_extobjs(vm, e->compute.pfence,
315                                         DMA_RESV_USAGE_BOOKKEEP);
316         }
317 }
318
319 int xe_vm_add_compute_engine(struct xe_vm *vm, struct xe_engine *e)
320 {
321         struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
322         struct ttm_validate_buffer *tv;
323         struct ww_acquire_ctx ww;
324         struct list_head objs;
325         struct dma_fence *pfence;
326         int err;
327         bool wait;
328
329         XE_BUG_ON(!xe_vm_in_compute_mode(vm));
330
331         down_write(&vm->lock);
332
333         err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs, true, 1);
334         if (err)
335                 goto out_unlock_outer;
336
337         pfence = xe_preempt_fence_create(e, e->compute.context,
338                                          ++e->compute.seqno);
339         if (!pfence) {
340                 err = -ENOMEM;
341                 goto out_unlock;
342         }
343
344         list_add(&e->compute.link, &vm->preempt.engines);
345         ++vm->preempt.num_engines;
346         e->compute.pfence = pfence;
347
348         down_read(&vm->userptr.notifier_lock);
349
350         dma_resv_add_fence(&vm->resv, pfence,
351                            DMA_RESV_USAGE_BOOKKEEP);
352
353         xe_vm_fence_all_extobjs(vm, pfence, DMA_RESV_USAGE_BOOKKEEP);
354
355         /*
356          * Check to see if a preemption on VM is in flight or userptr
357          * invalidation, if so trigger this preempt fence to sync state with
358          * other preempt fences on the VM.
359          */
360         wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
361         if (wait)
362                 dma_fence_enable_sw_signaling(pfence);
363
364         up_read(&vm->userptr.notifier_lock);
365
366 out_unlock:
367         xe_vm_unlock_dma_resv(vm, tv_onstack, tv, &ww, &objs);
368 out_unlock_outer:
369         up_write(&vm->lock);
370
371         return err;
372 }
373
374 /**
375  * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
376  * that need repinning.
377  * @vm: The VM.
378  *
379  * This function checks for whether the VM has userptrs that need repinning,
380  * and provides a release-type barrier on the userptr.notifier_lock after
381  * checking.
382  *
383  * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
384  */
385 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
386 {
387         lockdep_assert_held_read(&vm->userptr.notifier_lock);
388
389         return (list_empty(&vm->userptr.repin_list) &&
390                 list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
391 }
392
393 /**
394  * xe_vm_lock_dma_resv() - Lock the vm dma_resv object and the dma_resv
395  * objects of the vm's external buffer objects.
396  * @vm: The vm.
397  * @ww: Pointer to a struct ww_acquire_ctx locking context.
398  * @tv_onstack: Array size XE_ONSTACK_TV of storage for the struct
399  * ttm_validate_buffers used for locking.
400  * @tv: Pointer to a pointer that on output contains the actual storage used.
401  * @objs: List head for the buffer objects locked.
402  * @intr: Whether to lock interruptible.
403  * @num_shared: Number of dma-fence slots to reserve in the locked objects.
404  *
405  * Locks the vm dma-resv objects and all the dma-resv objects of the
406  * buffer objects on the vm external object list. The TTM utilities require
407  * a list of struct ttm_validate_buffers pointing to the actual buffer
408  * objects to lock. Storage for those struct ttm_validate_buffers should
409  * be provided in @tv_onstack, and is typically reserved on the stack
410  * of the caller. If the size of @tv_onstack isn't sufficient, then
411  * storage will be allocated internally using kvmalloc().
412  *
413  * The function performs deadlock handling internally, and after a
414  * successful return the ww locking transaction should be considered
415  * sealed.
416  *
417  * Return: 0 on success, Negative error code on error. In particular if
418  * @intr is set to true, -EINTR or -ERESTARTSYS may be returned. In case
419  * of error, any locking performed has been reverted.
420  */
421 int xe_vm_lock_dma_resv(struct xe_vm *vm, struct ww_acquire_ctx *ww,
422                         struct ttm_validate_buffer *tv_onstack,
423                         struct ttm_validate_buffer **tv,
424                         struct list_head *objs,
425                         bool intr,
426                         unsigned int num_shared)
427 {
428         struct ttm_validate_buffer *tv_vm, *tv_bo;
429         struct xe_vma *vma, *next;
430         LIST_HEAD(dups);
431         int err;
432
433         lockdep_assert_held(&vm->lock);
434
435         if (vm->extobj.entries < XE_ONSTACK_TV) {
436                 tv_vm = tv_onstack;
437         } else {
438                 tv_vm = kvmalloc_array(vm->extobj.entries + 1, sizeof(*tv_vm),
439                                        GFP_KERNEL);
440                 if (!tv_vm)
441                         return -ENOMEM;
442         }
443         tv_bo = tv_vm + 1;
444
445         INIT_LIST_HEAD(objs);
446         list_for_each_entry(vma, &vm->extobj.list, extobj.link) {
447                 tv_bo->num_shared = num_shared;
448                 tv_bo->bo = &vma->bo->ttm;
449
450                 list_add_tail(&tv_bo->head, objs);
451                 tv_bo++;
452         }
453         tv_vm->num_shared = num_shared;
454         tv_vm->bo = xe_vm_ttm_bo(vm);
455         list_add_tail(&tv_vm->head, objs);
456         err = ttm_eu_reserve_buffers(ww, objs, intr, &dups);
457         if (err)
458                 goto out_err;
459
460         spin_lock(&vm->notifier.list_lock);
461         list_for_each_entry_safe(vma, next, &vm->notifier.rebind_list,
462                                  notifier.rebind_link) {
463                 xe_bo_assert_held(vma->bo);
464
465                 list_del_init(&vma->notifier.rebind_link);
466                 if (vma->gt_present && !vma->destroyed)
467                         list_move_tail(&vma->rebind_link, &vm->rebind_list);
468         }
469         spin_unlock(&vm->notifier.list_lock);
470
471         *tv = tv_vm;
472         return 0;
473
474 out_err:
475         if (tv_vm != tv_onstack)
476                 kvfree(tv_vm);
477
478         return err;
479 }
480
481 /**
482  * xe_vm_unlock_dma_resv() - Unlock reservation objects locked by
483  * xe_vm_lock_dma_resv()
484  * @vm: The vm.
485  * @tv_onstack: The @tv_onstack array given to xe_vm_lock_dma_resv().
486  * @tv: The value of *@tv given by xe_vm_lock_dma_resv().
487  * @ww: The ww_acquire_context used for locking.
488  * @objs: The list returned from xe_vm_lock_dma_resv().
489  *
490  * Unlocks the reservation objects and frees any memory allocated by
491  * xe_vm_lock_dma_resv().
492  */
493 void xe_vm_unlock_dma_resv(struct xe_vm *vm,
494                            struct ttm_validate_buffer *tv_onstack,
495                            struct ttm_validate_buffer *tv,
496                            struct ww_acquire_ctx *ww,
497                            struct list_head *objs)
498 {
499         /*
500          * Nothing should've been able to enter the list while we were locked,
501          * since we've held the dma-resvs of all the vm's external objects,
502          * and holding the dma_resv of an object is required for list
503          * addition, and we shouldn't add ourselves.
504          */
505         XE_WARN_ON(!list_empty(&vm->notifier.rebind_list));
506
507         ttm_eu_backoff_reservation(ww, objs);
508         if (tv && tv != tv_onstack)
509                 kvfree(tv);
510 }
511
512 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
513
514 static void preempt_rebind_work_func(struct work_struct *w)
515 {
516         struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
517         struct xe_vma *vma;
518         struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
519         struct ttm_validate_buffer *tv;
520         struct ww_acquire_ctx ww;
521         struct list_head objs;
522         struct dma_fence *rebind_fence;
523         unsigned int fence_count = 0;
524         LIST_HEAD(preempt_fences);
525         ktime_t end = 0;
526         int err;
527         long wait;
528         int __maybe_unused tries = 0;
529
530         XE_BUG_ON(!xe_vm_in_compute_mode(vm));
531         trace_xe_vm_rebind_worker_enter(vm);
532
533         if (xe_vm_is_closed(vm)) {
534                 trace_xe_vm_rebind_worker_exit(vm);
535                 return;
536         }
537
538         down_write(&vm->lock);
539
540 retry:
541         if (vm->async_ops.error)
542                 goto out_unlock_outer;
543
544         /*
545          * Extreme corner where we exit a VM error state with a munmap style VM
546          * unbind inflight which requires a rebind. In this case the rebind
547          * needs to install some fences into the dma-resv slots. The worker to
548          * do this queued, let that worker make progress by dropping vm->lock
549          * and trying this again.
550          */
551         if (vm->async_ops.munmap_rebind_inflight) {
552                 up_write(&vm->lock);
553                 flush_work(&vm->async_ops.work);
554                 goto retry;
555         }
556
557         if (xe_vm_userptr_check_repin(vm)) {
558                 err = xe_vm_userptr_pin(vm);
559                 if (err)
560                         goto out_unlock_outer;
561         }
562
563         err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs,
564                                   false, vm->preempt.num_engines);
565         if (err)
566                 goto out_unlock_outer;
567
568         if (xe_vm_is_idle(vm)) {
569                 vm->preempt.rebind_deactivated = true;
570                 goto out_unlock;
571         }
572
573         /* Fresh preempt fences already installed. Everyting is running. */
574         if (!preempt_fences_waiting(vm))
575                 goto out_unlock;
576
577         /*
578          * This makes sure vm is completely suspended and also balances
579          * xe_engine suspend- and resume; we resume *all* vm engines below.
580          */
581         err = wait_for_existing_preempt_fences(vm);
582         if (err)
583                 goto out_unlock;
584
585         err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
586         if (err)
587                 goto out_unlock;
588
589         list_for_each_entry(vma, &vm->rebind_list, rebind_link) {
590                 if (xe_vma_is_userptr(vma) || vma->destroyed)
591                         continue;
592
593                 err = xe_bo_validate(vma->bo, vm, false);
594                 if (err)
595                         goto out_unlock;
596         }
597
598         rebind_fence = xe_vm_rebind(vm, true);
599         if (IS_ERR(rebind_fence)) {
600                 err = PTR_ERR(rebind_fence);
601                 goto out_unlock;
602         }
603
604         if (rebind_fence) {
605                 dma_fence_wait(rebind_fence, false);
606                 dma_fence_put(rebind_fence);
607         }
608
609         /* Wait on munmap style VM unbinds */
610         wait = dma_resv_wait_timeout(&vm->resv,
611                                      DMA_RESV_USAGE_KERNEL,
612                                      false, MAX_SCHEDULE_TIMEOUT);
613         if (wait <= 0) {
614                 err = -ETIME;
615                 goto out_unlock;
616         }
617
618 #define retry_required(__tries, __vm) \
619         (IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
620         (!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
621         __xe_vm_userptr_needs_repin(__vm))
622
623         down_read(&vm->userptr.notifier_lock);
624         if (retry_required(tries, vm)) {
625                 up_read(&vm->userptr.notifier_lock);
626                 err = -EAGAIN;
627                 goto out_unlock;
628         }
629
630 #undef retry_required
631
632         /* Point of no return. */
633         arm_preempt_fences(vm, &preempt_fences);
634         resume_and_reinstall_preempt_fences(vm);
635         up_read(&vm->userptr.notifier_lock);
636
637 out_unlock:
638         xe_vm_unlock_dma_resv(vm, tv_onstack, tv, &ww, &objs);
639 out_unlock_outer:
640         if (err == -EAGAIN) {
641                 trace_xe_vm_rebind_worker_retry(vm);
642                 goto retry;
643         }
644
645         /*
646          * With multiple active VMs, under memory pressure, it is possible that
647          * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
648          * Until ttm properly handles locking in such scenarios, best thing the
649          * driver can do is retry with a timeout. Killing the VM or putting it
650          * in error state after timeout or other error scenarios is still TBD.
651          */
652         if (err == -ENOMEM) {
653                 ktime_t cur = ktime_get();
654
655                 end = end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
656                 if (ktime_before(cur, end)) {
657                         msleep(20);
658                         trace_xe_vm_rebind_worker_retry(vm);
659                         goto retry;
660                 }
661         }
662         up_write(&vm->lock);
663
664         free_preempt_fences(&preempt_fences);
665
666         XE_WARN_ON(err < 0);    /* TODO: Kill VM or put in error state */
667         trace_xe_vm_rebind_worker_exit(vm);
668 }
669
670 struct async_op_fence;
671 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
672                         struct xe_engine *e, struct xe_sync_entry *syncs,
673                         u32 num_syncs, struct async_op_fence *afence);
674
675 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
676                                    const struct mmu_notifier_range *range,
677                                    unsigned long cur_seq)
678 {
679         struct xe_vma *vma = container_of(mni, struct xe_vma, userptr.notifier);
680         struct xe_vm *vm = vma->vm;
681         struct dma_resv_iter cursor;
682         struct dma_fence *fence;
683         long err;
684
685         XE_BUG_ON(!xe_vma_is_userptr(vma));
686         trace_xe_vma_userptr_invalidate(vma);
687
688         if (!mmu_notifier_range_blockable(range))
689                 return false;
690
691         down_write(&vm->userptr.notifier_lock);
692         mmu_interval_set_seq(mni, cur_seq);
693
694         /* No need to stop gpu access if the userptr is not yet bound. */
695         if (!vma->userptr.initial_bind) {
696                 up_write(&vm->userptr.notifier_lock);
697                 return true;
698         }
699
700         /*
701          * Tell exec and rebind worker they need to repin and rebind this
702          * userptr.
703          */
704         if (!xe_vm_in_fault_mode(vm) && !vma->destroyed && vma->gt_present) {
705                 spin_lock(&vm->userptr.invalidated_lock);
706                 list_move_tail(&vma->userptr.invalidate_link,
707                                &vm->userptr.invalidated);
708                 spin_unlock(&vm->userptr.invalidated_lock);
709         }
710
711         up_write(&vm->userptr.notifier_lock);
712
713         /*
714          * Preempt fences turn into schedule disables, pipeline these.
715          * Note that even in fault mode, we need to wait for binds and
716          * unbinds to complete, and those are attached as BOOKMARK fences
717          * to the vm.
718          */
719         dma_resv_iter_begin(&cursor, &vm->resv,
720                             DMA_RESV_USAGE_BOOKKEEP);
721         dma_resv_for_each_fence_unlocked(&cursor, fence)
722                 dma_fence_enable_sw_signaling(fence);
723         dma_resv_iter_end(&cursor);
724
725         err = dma_resv_wait_timeout(&vm->resv,
726                                     DMA_RESV_USAGE_BOOKKEEP,
727                                     false, MAX_SCHEDULE_TIMEOUT);
728         XE_WARN_ON(err <= 0);
729
730         if (xe_vm_in_fault_mode(vm)) {
731                 err = xe_vm_invalidate_vma(vma);
732                 XE_WARN_ON(err);
733         }
734
735         trace_xe_vma_userptr_invalidate_complete(vma);
736
737         return true;
738 }
739
740 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
741         .invalidate = vma_userptr_invalidate,
742 };
743
744 int xe_vm_userptr_pin(struct xe_vm *vm)
745 {
746         struct xe_vma *vma, *next;
747         int err = 0;
748         LIST_HEAD(tmp_evict);
749
750         lockdep_assert_held_write(&vm->lock);
751
752         /* Collect invalidated userptrs */
753         spin_lock(&vm->userptr.invalidated_lock);
754         list_for_each_entry_safe(vma, next, &vm->userptr.invalidated,
755                                  userptr.invalidate_link) {
756                 list_del_init(&vma->userptr.invalidate_link);
757                 list_move_tail(&vma->userptr_link, &vm->userptr.repin_list);
758         }
759         spin_unlock(&vm->userptr.invalidated_lock);
760
761         /* Pin and move to temporary list */
762         list_for_each_entry_safe(vma, next, &vm->userptr.repin_list, userptr_link) {
763                 err = xe_vma_userptr_pin_pages(vma);
764                 if (err < 0)
765                         goto out_err;
766
767                 list_move_tail(&vma->userptr_link, &tmp_evict);
768         }
769
770         /* Take lock and move to rebind_list for rebinding. */
771         err = dma_resv_lock_interruptible(&vm->resv, NULL);
772         if (err)
773                 goto out_err;
774
775         list_for_each_entry_safe(vma, next, &tmp_evict, userptr_link) {
776                 list_del_init(&vma->userptr_link);
777                 list_move_tail(&vma->rebind_link, &vm->rebind_list);
778         }
779
780         dma_resv_unlock(&vm->resv);
781
782         return 0;
783
784 out_err:
785         list_splice_tail(&tmp_evict, &vm->userptr.repin_list);
786
787         return err;
788 }
789
790 /**
791  * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
792  * that need repinning.
793  * @vm: The VM.
794  *
795  * This function does an advisory check for whether the VM has userptrs that
796  * need repinning.
797  *
798  * Return: 0 if there are no indications of userptrs needing repinning,
799  * -EAGAIN if there are.
800  */
801 int xe_vm_userptr_check_repin(struct xe_vm *vm)
802 {
803         return (list_empty_careful(&vm->userptr.repin_list) &&
804                 list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
805 }
806
807 static struct dma_fence *
808 xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
809                struct xe_sync_entry *syncs, u32 num_syncs);
810
811 struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
812 {
813         struct dma_fence *fence = NULL;
814         struct xe_vma *vma, *next;
815
816         lockdep_assert_held(&vm->lock);
817         if (xe_vm_no_dma_fences(vm) && !rebind_worker)
818                 return NULL;
819
820         xe_vm_assert_held(vm);
821         list_for_each_entry_safe(vma, next, &vm->rebind_list, rebind_link) {
822                 XE_WARN_ON(!vma->gt_present);
823
824                 list_del_init(&vma->rebind_link);
825                 dma_fence_put(fence);
826                 if (rebind_worker)
827                         trace_xe_vma_rebind_worker(vma);
828                 else
829                         trace_xe_vma_rebind_exec(vma);
830                 fence = xe_vm_bind_vma(vma, NULL, NULL, 0);
831                 if (IS_ERR(fence))
832                         return fence;
833         }
834
835         return fence;
836 }
837
838 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
839                                     struct xe_bo *bo,
840                                     u64 bo_offset_or_userptr,
841                                     u64 start, u64 end,
842                                     bool read_only,
843                                     u64 gt_mask)
844 {
845         struct xe_vma *vma;
846         struct xe_gt *gt;
847         u8 id;
848
849         XE_BUG_ON(start >= end);
850         XE_BUG_ON(end >= vm->size);
851
852         vma = kzalloc(sizeof(*vma), GFP_KERNEL);
853         if (!vma) {
854                 vma = ERR_PTR(-ENOMEM);
855                 return vma;
856         }
857
858         INIT_LIST_HEAD(&vma->rebind_link);
859         INIT_LIST_HEAD(&vma->unbind_link);
860         INIT_LIST_HEAD(&vma->userptr_link);
861         INIT_LIST_HEAD(&vma->userptr.invalidate_link);
862         INIT_LIST_HEAD(&vma->notifier.rebind_link);
863         INIT_LIST_HEAD(&vma->extobj.link);
864
865         vma->vm = vm;
866         vma->start = start;
867         vma->end = end;
868         if (read_only)
869                 vma->pte_flags = XE_PTE_READ_ONLY;
870
871         if (gt_mask) {
872                 vma->gt_mask = gt_mask;
873         } else {
874                 for_each_gt(gt, vm->xe, id)
875                         if (!xe_gt_is_media_type(gt))
876                                 vma->gt_mask |= 0x1 << id;
877         }
878
879         if (vm->xe->info.platform == XE_PVC)
880                 vma->use_atomic_access_pte_bit = true;
881
882         if (bo) {
883                 xe_bo_assert_held(bo);
884                 vma->bo_offset = bo_offset_or_userptr;
885                 vma->bo = xe_bo_get(bo);
886                 list_add_tail(&vma->bo_link, &bo->vmas);
887         } else /* userptr */ {
888                 u64 size = end - start + 1;
889                 int err;
890
891                 vma->userptr.ptr = bo_offset_or_userptr;
892
893                 err = mmu_interval_notifier_insert(&vma->userptr.notifier,
894                                                    current->mm,
895                                                    vma->userptr.ptr, size,
896                                                    &vma_userptr_notifier_ops);
897                 if (err) {
898                         kfree(vma);
899                         vma = ERR_PTR(err);
900                         return vma;
901                 }
902
903                 vma->userptr.notifier_seq = LONG_MAX;
904                 xe_vm_get(vm);
905         }
906
907         return vma;
908 }
909
910 static bool vm_remove_extobj(struct xe_vma *vma)
911 {
912         if (!list_empty(&vma->extobj.link)) {
913                 vma->vm->extobj.entries--;
914                 list_del_init(&vma->extobj.link);
915                 return true;
916         }
917         return false;
918 }
919
920 static void xe_vma_destroy_late(struct xe_vma *vma)
921 {
922         struct xe_vm *vm = vma->vm;
923         struct xe_device *xe = vm->xe;
924         bool read_only = vma->pte_flags & XE_PTE_READ_ONLY;
925
926         if (xe_vma_is_userptr(vma)) {
927                 if (vma->userptr.sg) {
928                         dma_unmap_sgtable(xe->drm.dev,
929                                           vma->userptr.sg,
930                                           read_only ? DMA_TO_DEVICE :
931                                           DMA_BIDIRECTIONAL, 0);
932                         sg_free_table(vma->userptr.sg);
933                         vma->userptr.sg = NULL;
934                 }
935
936                 /*
937                  * Since userptr pages are not pinned, we can't remove
938                  * the notifer until we're sure the GPU is not accessing
939                  * them anymore
940                  */
941                 mmu_interval_notifier_remove(&vma->userptr.notifier);
942                 xe_vm_put(vm);
943         } else {
944                 xe_bo_put(vma->bo);
945         }
946
947         kfree(vma);
948 }
949
950 static void vma_destroy_work_func(struct work_struct *w)
951 {
952         struct xe_vma *vma =
953                 container_of(w, struct xe_vma, destroy_work);
954
955         xe_vma_destroy_late(vma);
956 }
957
958 static struct xe_vma *
959 bo_has_vm_references_locked(struct xe_bo *bo, struct xe_vm *vm,
960                             struct xe_vma *ignore)
961 {
962         struct xe_vma *vma;
963
964         list_for_each_entry(vma, &bo->vmas, bo_link) {
965                 if (vma != ignore && vma->vm == vm && !vma->destroyed)
966                         return vma;
967         }
968
969         return NULL;
970 }
971
972 static bool bo_has_vm_references(struct xe_bo *bo, struct xe_vm *vm,
973                                  struct xe_vma *ignore)
974 {
975         struct ww_acquire_ctx ww;
976         bool ret;
977
978         xe_bo_lock(bo, &ww, 0, false);
979         ret = !!bo_has_vm_references_locked(bo, vm, ignore);
980         xe_bo_unlock(bo, &ww);
981
982         return ret;
983 }
984
985 static void __vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
986 {
987         list_add(&vma->extobj.link, &vm->extobj.list);
988         vm->extobj.entries++;
989 }
990
991 static void vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
992 {
993         struct xe_bo *bo = vma->bo;
994
995         lockdep_assert_held_write(&vm->lock);
996
997         if (bo_has_vm_references(bo, vm, vma))
998                 return;
999
1000         __vm_insert_extobj(vm, vma);
1001 }
1002
1003 static void vma_destroy_cb(struct dma_fence *fence,
1004                            struct dma_fence_cb *cb)
1005 {
1006         struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
1007
1008         INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
1009         queue_work(system_unbound_wq, &vma->destroy_work);
1010 }
1011
1012 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
1013 {
1014         struct xe_vm *vm = vma->vm;
1015
1016         lockdep_assert_held_write(&vm->lock);
1017         XE_BUG_ON(!list_empty(&vma->unbind_link));
1018
1019         if (xe_vma_is_userptr(vma)) {
1020                 XE_WARN_ON(!vma->destroyed);
1021                 spin_lock(&vm->userptr.invalidated_lock);
1022                 list_del_init(&vma->userptr.invalidate_link);
1023                 spin_unlock(&vm->userptr.invalidated_lock);
1024                 list_del(&vma->userptr_link);
1025         } else {
1026                 xe_bo_assert_held(vma->bo);
1027                 list_del(&vma->bo_link);
1028
1029                 spin_lock(&vm->notifier.list_lock);
1030                 list_del(&vma->notifier.rebind_link);
1031                 spin_unlock(&vm->notifier.list_lock);
1032
1033                 if (!vma->bo->vm && vm_remove_extobj(vma)) {
1034                         struct xe_vma *other;
1035
1036                         other = bo_has_vm_references_locked(vma->bo, vm, NULL);
1037
1038                         if (other)
1039                                 __vm_insert_extobj(vm, other);
1040                 }
1041         }
1042
1043         xe_vm_assert_held(vm);
1044         if (!list_empty(&vma->rebind_link))
1045                 list_del(&vma->rebind_link);
1046
1047         if (fence) {
1048                 int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
1049                                                  vma_destroy_cb);
1050
1051                 if (ret) {
1052                         XE_WARN_ON(ret != -ENOENT);
1053                         xe_vma_destroy_late(vma);
1054                 }
1055         } else {
1056                 xe_vma_destroy_late(vma);
1057         }
1058 }
1059
1060 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1061 {
1062         struct ttm_validate_buffer tv[2];
1063         struct ww_acquire_ctx ww;
1064         struct xe_bo *bo = vma->bo;
1065         LIST_HEAD(objs);
1066         LIST_HEAD(dups);
1067         int err;
1068
1069         memset(tv, 0, sizeof(tv));
1070         tv[0].bo = xe_vm_ttm_bo(vma->vm);
1071         list_add(&tv[0].head, &objs);
1072
1073         if (bo) {
1074                 tv[1].bo = &xe_bo_get(bo)->ttm;
1075                 list_add(&tv[1].head, &objs);
1076         }
1077         err = ttm_eu_reserve_buffers(&ww, &objs, false, &dups);
1078         XE_WARN_ON(err);
1079
1080         xe_vma_destroy(vma, NULL);
1081
1082         ttm_eu_backoff_reservation(&ww, &objs);
1083         if (bo)
1084                 xe_bo_put(bo);
1085 }
1086
1087 static struct xe_vma *to_xe_vma(const struct rb_node *node)
1088 {
1089         BUILD_BUG_ON(offsetof(struct xe_vma, vm_node) != 0);
1090         return (struct xe_vma *)node;
1091 }
1092
1093 static int xe_vma_cmp(const struct xe_vma *a, const struct xe_vma *b)
1094 {
1095         if (a->end < b->start) {
1096                 return -1;
1097         } else if (b->end < a->start) {
1098                 return 1;
1099         } else {
1100                 return 0;
1101         }
1102 }
1103
1104 static bool xe_vma_less_cb(struct rb_node *a, const struct rb_node *b)
1105 {
1106         return xe_vma_cmp(to_xe_vma(a), to_xe_vma(b)) < 0;
1107 }
1108
1109 int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node)
1110 {
1111         struct xe_vma *cmp = to_xe_vma(node);
1112         const struct xe_vma *own = key;
1113
1114         if (own->start > cmp->end)
1115                 return 1;
1116
1117         if (own->end < cmp->start)
1118                 return -1;
1119
1120         return 0;
1121 }
1122
1123 struct xe_vma *
1124 xe_vm_find_overlapping_vma(struct xe_vm *vm, const struct xe_vma *vma)
1125 {
1126         struct rb_node *node;
1127
1128         if (xe_vm_is_closed(vm))
1129                 return NULL;
1130
1131         XE_BUG_ON(vma->end >= vm->size);
1132         lockdep_assert_held(&vm->lock);
1133
1134         node = rb_find(vma, &vm->vmas, xe_vma_cmp_vma_cb);
1135
1136         return node ? to_xe_vma(node) : NULL;
1137 }
1138
1139 static void xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1140 {
1141         XE_BUG_ON(vma->vm != vm);
1142         lockdep_assert_held(&vm->lock);
1143
1144         rb_add(&vma->vm_node, &vm->vmas, xe_vma_less_cb);
1145 }
1146
1147 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1148 {
1149         XE_BUG_ON(vma->vm != vm);
1150         lockdep_assert_held(&vm->lock);
1151
1152         rb_erase(&vma->vm_node, &vm->vmas);
1153         if (vm->usm.last_fault_vma == vma)
1154                 vm->usm.last_fault_vma = NULL;
1155 }
1156
1157 static void async_op_work_func(struct work_struct *w);
1158 static void vm_destroy_work_func(struct work_struct *w);
1159
1160 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1161 {
1162         struct xe_vm *vm;
1163         int err, i = 0, number_gts = 0;
1164         struct xe_gt *gt;
1165         u8 id;
1166
1167         vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1168         if (!vm)
1169                 return ERR_PTR(-ENOMEM);
1170
1171         vm->xe = xe;
1172         kref_init(&vm->refcount);
1173         dma_resv_init(&vm->resv);
1174
1175         vm->size = 1ull << xe_pt_shift(xe->info.vm_max_level + 1);
1176
1177         vm->vmas = RB_ROOT;
1178         vm->flags = flags;
1179
1180         init_rwsem(&vm->lock);
1181
1182         INIT_LIST_HEAD(&vm->rebind_list);
1183
1184         INIT_LIST_HEAD(&vm->userptr.repin_list);
1185         INIT_LIST_HEAD(&vm->userptr.invalidated);
1186         init_rwsem(&vm->userptr.notifier_lock);
1187         spin_lock_init(&vm->userptr.invalidated_lock);
1188
1189         INIT_LIST_HEAD(&vm->notifier.rebind_list);
1190         spin_lock_init(&vm->notifier.list_lock);
1191
1192         INIT_LIST_HEAD(&vm->async_ops.pending);
1193         INIT_WORK(&vm->async_ops.work, async_op_work_func);
1194         spin_lock_init(&vm->async_ops.lock);
1195
1196         INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1197
1198         INIT_LIST_HEAD(&vm->preempt.engines);
1199         vm->preempt.min_run_period_ms = 10;     /* FIXME: Wire up to uAPI */
1200
1201         INIT_LIST_HEAD(&vm->extobj.list);
1202
1203         if (!(flags & XE_VM_FLAG_MIGRATION)) {
1204                 /* We need to immeditatelly exit from any D3 state */
1205                 xe_pm_runtime_get(xe);
1206                 xe_device_mem_access_get(xe);
1207         }
1208
1209         err = dma_resv_lock_interruptible(&vm->resv, NULL);
1210         if (err)
1211                 goto err_put;
1212
1213         if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1214                 vm->flags |= XE_VM_FLAGS_64K;
1215
1216         for_each_gt(gt, xe, id) {
1217                 if (xe_gt_is_media_type(gt))
1218                         continue;
1219
1220                 if (flags & XE_VM_FLAG_MIGRATION &&
1221                     gt->info.id != XE_VM_FLAG_GT_ID(flags))
1222                         continue;
1223
1224                 vm->pt_root[id] = xe_pt_create(vm, gt, xe->info.vm_max_level);
1225                 if (IS_ERR(vm->pt_root[id])) {
1226                         err = PTR_ERR(vm->pt_root[id]);
1227                         vm->pt_root[id] = NULL;
1228                         goto err_destroy_root;
1229                 }
1230         }
1231
1232         if (flags & XE_VM_FLAG_SCRATCH_PAGE) {
1233                 for_each_gt(gt, xe, id) {
1234                         if (!vm->pt_root[id])
1235                                 continue;
1236
1237                         err = xe_pt_create_scratch(xe, gt, vm);
1238                         if (err)
1239                                 goto err_scratch_pt;
1240                 }
1241         }
1242
1243         if (flags & DRM_XE_VM_CREATE_COMPUTE_MODE) {
1244                 INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1245                 vm->flags |= XE_VM_FLAG_COMPUTE_MODE;
1246         }
1247
1248         if (flags & DRM_XE_VM_CREATE_ASYNC_BIND_OPS) {
1249                 vm->async_ops.fence.context = dma_fence_context_alloc(1);
1250                 vm->flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
1251         }
1252
1253         /* Fill pt_root after allocating scratch tables */
1254         for_each_gt(gt, xe, id) {
1255                 if (!vm->pt_root[id])
1256                         continue;
1257
1258                 xe_pt_populate_empty(gt, vm, vm->pt_root[id]);
1259         }
1260         dma_resv_unlock(&vm->resv);
1261
1262         /* Kernel migration VM shouldn't have a circular loop.. */
1263         if (!(flags & XE_VM_FLAG_MIGRATION)) {
1264                 for_each_gt(gt, xe, id) {
1265                         struct xe_vm *migrate_vm;
1266                         struct xe_engine *eng;
1267
1268                         if (!vm->pt_root[id])
1269                                 continue;
1270
1271                         migrate_vm = xe_migrate_get_vm(gt->migrate);
1272                         eng = xe_engine_create_class(xe, gt, migrate_vm,
1273                                                      XE_ENGINE_CLASS_COPY,
1274                                                      ENGINE_FLAG_VM);
1275                         xe_vm_put(migrate_vm);
1276                         if (IS_ERR(eng)) {
1277                                 xe_vm_close_and_put(vm);
1278                                 return ERR_CAST(eng);
1279                         }
1280                         vm->eng[id] = eng;
1281                         number_gts++;
1282                 }
1283         }
1284
1285         if (number_gts > 1)
1286                 vm->composite_fence_ctx = dma_fence_context_alloc(1);
1287
1288         mutex_lock(&xe->usm.lock);
1289         if (flags & XE_VM_FLAG_FAULT_MODE)
1290                 xe->usm.num_vm_in_fault_mode++;
1291         else if (!(flags & XE_VM_FLAG_MIGRATION))
1292                 xe->usm.num_vm_in_non_fault_mode++;
1293         mutex_unlock(&xe->usm.lock);
1294
1295         trace_xe_vm_create(vm);
1296
1297         return vm;
1298
1299 err_scratch_pt:
1300         for_each_gt(gt, xe, id) {
1301                 if (!vm->pt_root[id])
1302                         continue;
1303
1304                 i = vm->pt_root[id]->level;
1305                 while (i)
1306                         if (vm->scratch_pt[id][--i])
1307                                 xe_pt_destroy(vm->scratch_pt[id][i],
1308                                               vm->flags, NULL);
1309                 xe_bo_unpin(vm->scratch_bo[id]);
1310                 xe_bo_put(vm->scratch_bo[id]);
1311         }
1312 err_destroy_root:
1313         for_each_gt(gt, xe, id) {
1314                 if (vm->pt_root[id])
1315                         xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1316         }
1317         dma_resv_unlock(&vm->resv);
1318 err_put:
1319         dma_resv_fini(&vm->resv);
1320         kfree(vm);
1321         if (!(flags & XE_VM_FLAG_MIGRATION)) {
1322                 xe_device_mem_access_put(xe);
1323                 xe_pm_runtime_put(xe);
1324         }
1325         return ERR_PTR(err);
1326 }
1327
1328 static void flush_async_ops(struct xe_vm *vm)
1329 {
1330         queue_work(system_unbound_wq, &vm->async_ops.work);
1331         flush_work(&vm->async_ops.work);
1332 }
1333
1334 static void vm_error_capture(struct xe_vm *vm, int err,
1335                              u32 op, u64 addr, u64 size)
1336 {
1337         struct drm_xe_vm_bind_op_error_capture capture;
1338         u64 __user *address =
1339                 u64_to_user_ptr(vm->async_ops.error_capture.addr);
1340         bool in_kthread = !current->mm;
1341
1342         capture.error = err;
1343         capture.op = op;
1344         capture.addr = addr;
1345         capture.size = size;
1346
1347         if (in_kthread) {
1348                 if (!mmget_not_zero(vm->async_ops.error_capture.mm))
1349                         goto mm_closed;
1350                 kthread_use_mm(vm->async_ops.error_capture.mm);
1351         }
1352
1353         if (copy_to_user(address, &capture, sizeof(capture)))
1354                 XE_WARN_ON("Copy to user failed");
1355
1356         if (in_kthread) {
1357                 kthread_unuse_mm(vm->async_ops.error_capture.mm);
1358                 mmput(vm->async_ops.error_capture.mm);
1359         }
1360
1361 mm_closed:
1362         wake_up_all(&vm->async_ops.error_capture.wq);
1363 }
1364
1365 void xe_vm_close_and_put(struct xe_vm *vm)
1366 {
1367         struct rb_root contested = RB_ROOT;
1368         struct ww_acquire_ctx ww;
1369         struct xe_device *xe = vm->xe;
1370         struct xe_gt *gt;
1371         u8 id;
1372
1373         XE_BUG_ON(vm->preempt.num_engines);
1374
1375         vm->size = 0;
1376         smp_mb();
1377         flush_async_ops(vm);
1378         if (xe_vm_in_compute_mode(vm))
1379                 flush_work(&vm->preempt.rebind_work);
1380
1381         for_each_gt(gt, xe, id) {
1382                 if (vm->eng[id]) {
1383                         xe_engine_kill(vm->eng[id]);
1384                         xe_engine_put(vm->eng[id]);
1385                         vm->eng[id] = NULL;
1386                 }
1387         }
1388
1389         down_write(&vm->lock);
1390         xe_vm_lock(vm, &ww, 0, false);
1391         while (vm->vmas.rb_node) {
1392                 struct xe_vma *vma = to_xe_vma(vm->vmas.rb_node);
1393
1394                 if (xe_vma_is_userptr(vma)) {
1395                         down_read(&vm->userptr.notifier_lock);
1396                         vma->destroyed = true;
1397                         up_read(&vm->userptr.notifier_lock);
1398                 }
1399
1400                 rb_erase(&vma->vm_node, &vm->vmas);
1401
1402                 /* easy case, remove from VMA? */
1403                 if (xe_vma_is_userptr(vma) || vma->bo->vm) {
1404                         xe_vma_destroy(vma, NULL);
1405                         continue;
1406                 }
1407
1408                 rb_add(&vma->vm_node, &contested, xe_vma_less_cb);
1409         }
1410
1411         /*
1412          * All vm operations will add shared fences to resv.
1413          * The only exception is eviction for a shared object,
1414          * but even so, the unbind when evicted would still
1415          * install a fence to resv. Hence it's safe to
1416          * destroy the pagetables immediately.
1417          */
1418         for_each_gt(gt, xe, id) {
1419                 if (vm->scratch_bo[id]) {
1420                         u32 i;
1421
1422                         xe_bo_unpin(vm->scratch_bo[id]);
1423                         xe_bo_put(vm->scratch_bo[id]);
1424                         for (i = 0; i < vm->pt_root[id]->level; i++)
1425                                 xe_pt_destroy(vm->scratch_pt[id][i], vm->flags,
1426                                               NULL);
1427                 }
1428         }
1429         xe_vm_unlock(vm, &ww);
1430
1431         if (contested.rb_node) {
1432
1433                 /*
1434                  * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1435                  * Since we hold a refcount to the bo, we can remove and free
1436                  * the members safely without locking.
1437                  */
1438                 while (contested.rb_node) {
1439                         struct xe_vma *vma = to_xe_vma(contested.rb_node);
1440
1441                         rb_erase(&vma->vm_node, &contested);
1442                         xe_vma_destroy_unlocked(vma);
1443                 }
1444         }
1445
1446         if (vm->async_ops.error_capture.addr)
1447                 wake_up_all(&vm->async_ops.error_capture.wq);
1448
1449         XE_WARN_ON(!list_empty(&vm->extobj.list));
1450         up_write(&vm->lock);
1451
1452         mutex_lock(&xe->usm.lock);
1453         if (vm->flags & XE_VM_FLAG_FAULT_MODE)
1454                 xe->usm.num_vm_in_fault_mode--;
1455         else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1456                 xe->usm.num_vm_in_non_fault_mode--;
1457         mutex_unlock(&xe->usm.lock);
1458
1459         xe_vm_put(vm);
1460 }
1461
1462 static void vm_destroy_work_func(struct work_struct *w)
1463 {
1464         struct xe_vm *vm =
1465                 container_of(w, struct xe_vm, destroy_work);
1466         struct ww_acquire_ctx ww;
1467         struct xe_device *xe = vm->xe;
1468         struct xe_gt *gt;
1469         u8 id;
1470         void *lookup;
1471
1472         /* xe_vm_close_and_put was not called? */
1473         XE_WARN_ON(vm->size);
1474
1475         if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
1476                 xe_device_mem_access_put(xe);
1477                 xe_pm_runtime_put(xe);
1478
1479                 if (xe->info.has_asid) {
1480                         mutex_lock(&xe->usm.lock);
1481                         lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1482                         XE_WARN_ON(lookup != vm);
1483                         mutex_unlock(&xe->usm.lock);
1484                 }
1485         }
1486
1487         /*
1488          * XXX: We delay destroying the PT root until the VM if freed as PT root
1489          * is needed for xe_vm_lock to work. If we remove that dependency this
1490          * can be moved to xe_vm_close_and_put.
1491          */
1492         xe_vm_lock(vm, &ww, 0, false);
1493         for_each_gt(gt, xe, id) {
1494                 if (vm->pt_root[id]) {
1495                         xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1496                         vm->pt_root[id] = NULL;
1497                 }
1498         }
1499         xe_vm_unlock(vm, &ww);
1500
1501         trace_xe_vm_free(vm);
1502         dma_fence_put(vm->rebind_fence);
1503         dma_resv_fini(&vm->resv);
1504         kfree(vm);
1505 }
1506
1507 void xe_vm_free(struct kref *ref)
1508 {
1509         struct xe_vm *vm = container_of(ref, struct xe_vm, refcount);
1510
1511         /* To destroy the VM we need to be able to sleep */
1512         queue_work(system_unbound_wq, &vm->destroy_work);
1513 }
1514
1515 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1516 {
1517         struct xe_vm *vm;
1518
1519         mutex_lock(&xef->vm.lock);
1520         vm = xa_load(&xef->vm.xa, id);
1521         mutex_unlock(&xef->vm.lock);
1522
1523         if (vm)
1524                 xe_vm_get(vm);
1525
1526         return vm;
1527 }
1528
1529 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_gt *full_gt)
1530 {
1531         XE_BUG_ON(xe_gt_is_media_type(full_gt));
1532
1533         return gen8_pde_encode(vm->pt_root[full_gt->info.id]->bo, 0,
1534                                XE_CACHE_WB);
1535 }
1536
1537 static struct dma_fence *
1538 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_engine *e,
1539                  struct xe_sync_entry *syncs, u32 num_syncs)
1540 {
1541         struct xe_gt *gt;
1542         struct dma_fence *fence = NULL;
1543         struct dma_fence **fences = NULL;
1544         struct dma_fence_array *cf = NULL;
1545         struct xe_vm *vm = vma->vm;
1546         int cur_fence = 0, i;
1547         int number_gts = hweight_long(vma->gt_present);
1548         int err;
1549         u8 id;
1550
1551         trace_xe_vma_unbind(vma);
1552
1553         if (number_gts > 1) {
1554                 fences = kmalloc_array(number_gts, sizeof(*fences),
1555                                        GFP_KERNEL);
1556                 if (!fences)
1557                         return ERR_PTR(-ENOMEM);
1558         }
1559
1560         for_each_gt(gt, vm->xe, id) {
1561                 if (!(vma->gt_present & BIT(id)))
1562                         goto next;
1563
1564                 XE_BUG_ON(xe_gt_is_media_type(gt));
1565
1566                 fence = __xe_pt_unbind_vma(gt, vma, e, syncs, num_syncs);
1567                 if (IS_ERR(fence)) {
1568                         err = PTR_ERR(fence);
1569                         goto err_fences;
1570                 }
1571
1572                 if (fences)
1573                         fences[cur_fence++] = fence;
1574
1575 next:
1576                 if (e && vm->pt_root[id] && !list_empty(&e->multi_gt_list))
1577                         e = list_next_entry(e, multi_gt_list);
1578         }
1579
1580         if (fences) {
1581                 cf = dma_fence_array_create(number_gts, fences,
1582                                             vm->composite_fence_ctx,
1583                                             vm->composite_fence_seqno++,
1584                                             false);
1585                 if (!cf) {
1586                         --vm->composite_fence_seqno;
1587                         err = -ENOMEM;
1588                         goto err_fences;
1589                 }
1590         }
1591
1592         for (i = 0; i < num_syncs; i++)
1593                 xe_sync_entry_signal(&syncs[i], NULL, cf ? &cf->base : fence);
1594
1595         return cf ? &cf->base : !fence ? dma_fence_get_stub() : fence;
1596
1597 err_fences:
1598         if (fences) {
1599                 while (cur_fence) {
1600                         /* FIXME: Rewind the previous binds? */
1601                         dma_fence_put(fences[--cur_fence]);
1602                 }
1603                 kfree(fences);
1604         }
1605
1606         return ERR_PTR(err);
1607 }
1608
1609 static struct dma_fence *
1610 xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
1611                struct xe_sync_entry *syncs, u32 num_syncs)
1612 {
1613         struct xe_gt *gt;
1614         struct dma_fence *fence;
1615         struct dma_fence **fences = NULL;
1616         struct dma_fence_array *cf = NULL;
1617         struct xe_vm *vm = vma->vm;
1618         int cur_fence = 0, i;
1619         int number_gts = hweight_long(vma->gt_mask);
1620         int err;
1621         u8 id;
1622
1623         trace_xe_vma_bind(vma);
1624
1625         if (number_gts > 1) {
1626                 fences = kmalloc_array(number_gts, sizeof(*fences),
1627                                        GFP_KERNEL);
1628                 if (!fences)
1629                         return ERR_PTR(-ENOMEM);
1630         }
1631
1632         for_each_gt(gt, vm->xe, id) {
1633                 if (!(vma->gt_mask & BIT(id)))
1634                         goto next;
1635
1636                 XE_BUG_ON(xe_gt_is_media_type(gt));
1637                 fence = __xe_pt_bind_vma(gt, vma, e, syncs, num_syncs,
1638                                          vma->gt_present & BIT(id));
1639                 if (IS_ERR(fence)) {
1640                         err = PTR_ERR(fence);
1641                         goto err_fences;
1642                 }
1643
1644                 if (fences)
1645                         fences[cur_fence++] = fence;
1646
1647 next:
1648                 if (e && vm->pt_root[id] && !list_empty(&e->multi_gt_list))
1649                         e = list_next_entry(e, multi_gt_list);
1650         }
1651
1652         if (fences) {
1653                 cf = dma_fence_array_create(number_gts, fences,
1654                                             vm->composite_fence_ctx,
1655                                             vm->composite_fence_seqno++,
1656                                             false);
1657                 if (!cf) {
1658                         --vm->composite_fence_seqno;
1659                         err = -ENOMEM;
1660                         goto err_fences;
1661                 }
1662         }
1663
1664         for (i = 0; i < num_syncs; i++)
1665                 xe_sync_entry_signal(&syncs[i], NULL, cf ? &cf->base : fence);
1666
1667         return cf ? &cf->base : fence;
1668
1669 err_fences:
1670         if (fences) {
1671                 while (cur_fence) {
1672                         /* FIXME: Rewind the previous binds? */
1673                         dma_fence_put(fences[--cur_fence]);
1674                 }
1675                 kfree(fences);
1676         }
1677
1678         return ERR_PTR(err);
1679 }
1680
1681 struct async_op_fence {
1682         struct dma_fence fence;
1683         struct dma_fence *wait_fence;
1684         struct dma_fence_cb cb;
1685         struct xe_vm *vm;
1686         wait_queue_head_t wq;
1687         bool started;
1688 };
1689
1690 static const char *async_op_fence_get_driver_name(struct dma_fence *dma_fence)
1691 {
1692         return "xe";
1693 }
1694
1695 static const char *
1696 async_op_fence_get_timeline_name(struct dma_fence *dma_fence)
1697 {
1698         return "async_op_fence";
1699 }
1700
1701 static const struct dma_fence_ops async_op_fence_ops = {
1702         .get_driver_name = async_op_fence_get_driver_name,
1703         .get_timeline_name = async_op_fence_get_timeline_name,
1704 };
1705
1706 static void async_op_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1707 {
1708         struct async_op_fence *afence =
1709                 container_of(cb, struct async_op_fence, cb);
1710
1711         afence->fence.error = afence->wait_fence->error;
1712         dma_fence_signal(&afence->fence);
1713         xe_vm_put(afence->vm);
1714         dma_fence_put(afence->wait_fence);
1715         dma_fence_put(&afence->fence);
1716 }
1717
1718 static void add_async_op_fence_cb(struct xe_vm *vm,
1719                                   struct dma_fence *fence,
1720                                   struct async_op_fence *afence)
1721 {
1722         int ret;
1723
1724         if (!xe_vm_no_dma_fences(vm)) {
1725                 afence->started = true;
1726                 smp_wmb();
1727                 wake_up_all(&afence->wq);
1728         }
1729
1730         afence->wait_fence = dma_fence_get(fence);
1731         afence->vm = xe_vm_get(vm);
1732         dma_fence_get(&afence->fence);
1733         ret = dma_fence_add_callback(fence, &afence->cb, async_op_fence_cb);
1734         if (ret == -ENOENT) {
1735                 afence->fence.error = afence->wait_fence->error;
1736                 dma_fence_signal(&afence->fence);
1737         }
1738         if (ret) {
1739                 xe_vm_put(vm);
1740                 dma_fence_put(afence->wait_fence);
1741                 dma_fence_put(&afence->fence);
1742         }
1743         XE_WARN_ON(ret && ret != -ENOENT);
1744 }
1745
1746 int xe_vm_async_fence_wait_start(struct dma_fence *fence)
1747 {
1748         if (fence->ops == &async_op_fence_ops) {
1749                 struct async_op_fence *afence =
1750                         container_of(fence, struct async_op_fence, fence);
1751
1752                 XE_BUG_ON(xe_vm_no_dma_fences(afence->vm));
1753
1754                 smp_rmb();
1755                 return wait_event_interruptible(afence->wq, afence->started);
1756         }
1757
1758         return 0;
1759 }
1760
1761 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
1762                         struct xe_engine *e, struct xe_sync_entry *syncs,
1763                         u32 num_syncs, struct async_op_fence *afence)
1764 {
1765         struct dma_fence *fence;
1766
1767         xe_vm_assert_held(vm);
1768
1769         fence = xe_vm_bind_vma(vma, e, syncs, num_syncs);
1770         if (IS_ERR(fence))
1771                 return PTR_ERR(fence);
1772         if (afence)
1773                 add_async_op_fence_cb(vm, fence, afence);
1774
1775         dma_fence_put(fence);
1776         return 0;
1777 }
1778
1779 static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_engine *e,
1780                       struct xe_bo *bo, struct xe_sync_entry *syncs,
1781                       u32 num_syncs, struct async_op_fence *afence)
1782 {
1783         int err;
1784
1785         xe_vm_assert_held(vm);
1786         xe_bo_assert_held(bo);
1787
1788         if (bo) {
1789                 err = xe_bo_validate(bo, vm, true);
1790                 if (err)
1791                         return err;
1792         }
1793
1794         return __xe_vm_bind(vm, vma, e, syncs, num_syncs, afence);
1795 }
1796
1797 static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
1798                         struct xe_engine *e, struct xe_sync_entry *syncs,
1799                         u32 num_syncs, struct async_op_fence *afence)
1800 {
1801         struct dma_fence *fence;
1802
1803         xe_vm_assert_held(vm);
1804         xe_bo_assert_held(vma->bo);
1805
1806         fence = xe_vm_unbind_vma(vma, e, syncs, num_syncs);
1807         if (IS_ERR(fence))
1808                 return PTR_ERR(fence);
1809         if (afence)
1810                 add_async_op_fence_cb(vm, fence, afence);
1811
1812         xe_vma_destroy(vma, fence);
1813         dma_fence_put(fence);
1814
1815         return 0;
1816 }
1817
1818 static int vm_set_error_capture_address(struct xe_device *xe, struct xe_vm *vm,
1819                                         u64 value)
1820 {
1821         if (XE_IOCTL_ERR(xe, !value))
1822                 return -EINVAL;
1823
1824         if (XE_IOCTL_ERR(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
1825                 return -ENOTSUPP;
1826
1827         if (XE_IOCTL_ERR(xe, vm->async_ops.error_capture.addr))
1828                 return -ENOTSUPP;
1829
1830         vm->async_ops.error_capture.mm = current->mm;
1831         vm->async_ops.error_capture.addr = value;
1832         init_waitqueue_head(&vm->async_ops.error_capture.wq);
1833
1834         return 0;
1835 }
1836
1837 typedef int (*xe_vm_set_property_fn)(struct xe_device *xe, struct xe_vm *vm,
1838                                      u64 value);
1839
1840 static const xe_vm_set_property_fn vm_set_property_funcs[] = {
1841         [XE_VM_PROPERTY_BIND_OP_ERROR_CAPTURE_ADDRESS] =
1842                 vm_set_error_capture_address,
1843 };
1844
1845 static int vm_user_ext_set_property(struct xe_device *xe, struct xe_vm *vm,
1846                                     u64 extension)
1847 {
1848         u64 __user *address = u64_to_user_ptr(extension);
1849         struct drm_xe_ext_vm_set_property ext;
1850         int err;
1851
1852         err = __copy_from_user(&ext, address, sizeof(ext));
1853         if (XE_IOCTL_ERR(xe, err))
1854                 return -EFAULT;
1855
1856         if (XE_IOCTL_ERR(xe, ext.property >=
1857                          ARRAY_SIZE(vm_set_property_funcs)))
1858                 return -EINVAL;
1859
1860         return vm_set_property_funcs[ext.property](xe, vm, ext.value);
1861 }
1862
1863 typedef int (*xe_vm_user_extension_fn)(struct xe_device *xe, struct xe_vm *vm,
1864                                        u64 extension);
1865
1866 static const xe_vm_set_property_fn vm_user_extension_funcs[] = {
1867         [XE_VM_EXTENSION_SET_PROPERTY] = vm_user_ext_set_property,
1868 };
1869
1870 #define MAX_USER_EXTENSIONS     16
1871 static int vm_user_extensions(struct xe_device *xe, struct xe_vm *vm,
1872                               u64 extensions, int ext_number)
1873 {
1874         u64 __user *address = u64_to_user_ptr(extensions);
1875         struct xe_user_extension ext;
1876         int err;
1877
1878         if (XE_IOCTL_ERR(xe, ext_number >= MAX_USER_EXTENSIONS))
1879                 return -E2BIG;
1880
1881         err = __copy_from_user(&ext, address, sizeof(ext));
1882         if (XE_IOCTL_ERR(xe, err))
1883                 return -EFAULT;
1884
1885         if (XE_IOCTL_ERR(xe, ext.name >=
1886                          ARRAY_SIZE(vm_user_extension_funcs)))
1887                 return -EINVAL;
1888
1889         err = vm_user_extension_funcs[ext.name](xe, vm, extensions);
1890         if (XE_IOCTL_ERR(xe, err))
1891                 return err;
1892
1893         if (ext.next_extension)
1894                 return vm_user_extensions(xe, vm, ext.next_extension,
1895                                           ++ext_number);
1896
1897         return 0;
1898 }
1899
1900 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_SCRATCH_PAGE | \
1901                                     DRM_XE_VM_CREATE_COMPUTE_MODE | \
1902                                     DRM_XE_VM_CREATE_ASYNC_BIND_OPS | \
1903                                     DRM_XE_VM_CREATE_FAULT_MODE)
1904
1905 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1906                        struct drm_file *file)
1907 {
1908         struct xe_device *xe = to_xe_device(dev);
1909         struct xe_file *xef = to_xe_file(file);
1910         struct drm_xe_vm_create *args = data;
1911         struct xe_vm *vm;
1912         u32 id, asid;
1913         int err;
1914         u32 flags = 0;
1915
1916         if (XE_IOCTL_ERR(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1917                 return -EINVAL;
1918
1919         if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE &&
1920                          args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
1921                 return -EINVAL;
1922
1923         if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE &&
1924                          args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
1925                 return -EINVAL;
1926
1927         if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
1928                          xe_device_in_non_fault_mode(xe)))
1929                 return -EINVAL;
1930
1931         if (XE_IOCTL_ERR(xe, !(args->flags & DRM_XE_VM_CREATE_FAULT_MODE) &&
1932                          xe_device_in_fault_mode(xe)))
1933                 return -EINVAL;
1934
1935         if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
1936                          !xe->info.supports_usm))
1937                 return -EINVAL;
1938
1939         if (args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE)
1940                 flags |= XE_VM_FLAG_SCRATCH_PAGE;
1941         if (args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE)
1942                 flags |= XE_VM_FLAG_COMPUTE_MODE;
1943         if (args->flags & DRM_XE_VM_CREATE_ASYNC_BIND_OPS)
1944                 flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
1945         if (args->flags & DRM_XE_VM_CREATE_FAULT_MODE)
1946                 flags |= XE_VM_FLAG_FAULT_MODE;
1947
1948         vm = xe_vm_create(xe, flags);
1949         if (IS_ERR(vm))
1950                 return PTR_ERR(vm);
1951
1952         if (args->extensions) {
1953                 err = vm_user_extensions(xe, vm, args->extensions, 0);
1954                 if (XE_IOCTL_ERR(xe, err)) {
1955                         xe_vm_close_and_put(vm);
1956                         return err;
1957                 }
1958         }
1959
1960         mutex_lock(&xef->vm.lock);
1961         err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1962         mutex_unlock(&xef->vm.lock);
1963         if (err) {
1964                 xe_vm_close_and_put(vm);
1965                 return err;
1966         }
1967
1968         if (xe->info.has_asid) {
1969                 mutex_lock(&xe->usm.lock);
1970                 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
1971                                       XA_LIMIT(0, XE_MAX_ASID - 1),
1972                                       &xe->usm.next_asid, GFP_KERNEL);
1973                 mutex_unlock(&xe->usm.lock);
1974                 if (err) {
1975                         xe_vm_close_and_put(vm);
1976                         return err;
1977                 }
1978                 vm->usm.asid = asid;
1979         }
1980
1981         args->vm_id = id;
1982
1983 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
1984         /* Warning: Security issue - never enable by default */
1985         args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
1986 #endif
1987
1988         return 0;
1989 }
1990
1991 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
1992                         struct drm_file *file)
1993 {
1994         struct xe_device *xe = to_xe_device(dev);
1995         struct xe_file *xef = to_xe_file(file);
1996         struct drm_xe_vm_destroy *args = data;
1997         struct xe_vm *vm;
1998
1999         if (XE_IOCTL_ERR(xe, args->pad))
2000                 return -EINVAL;
2001
2002         vm = xe_vm_lookup(xef, args->vm_id);
2003         if (XE_IOCTL_ERR(xe, !vm))
2004                 return -ENOENT;
2005         xe_vm_put(vm);
2006
2007         /* FIXME: Extend this check to non-compute mode VMs */
2008         if (XE_IOCTL_ERR(xe, vm->preempt.num_engines))
2009                 return -EBUSY;
2010
2011         mutex_lock(&xef->vm.lock);
2012         xa_erase(&xef->vm.xa, args->vm_id);
2013         mutex_unlock(&xef->vm.lock);
2014
2015         xe_vm_close_and_put(vm);
2016
2017         return 0;
2018 }
2019
2020 static const u32 region_to_mem_type[] = {
2021         XE_PL_TT,
2022         XE_PL_VRAM0,
2023         XE_PL_VRAM1,
2024 };
2025
2026 static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
2027                           struct xe_engine *e, u32 region,
2028                           struct xe_sync_entry *syncs, u32 num_syncs,
2029                           struct async_op_fence *afence)
2030 {
2031         int err;
2032
2033         XE_BUG_ON(region > ARRAY_SIZE(region_to_mem_type));
2034
2035         if (!xe_vma_is_userptr(vma)) {
2036                 err = xe_bo_migrate(vma->bo, region_to_mem_type[region]);
2037                 if (err)
2038                         return err;
2039         }
2040
2041         if (vma->gt_mask != (vma->gt_present & ~vma->usm.gt_invalidated)) {
2042                 return xe_vm_bind(vm, vma, e, vma->bo, syncs, num_syncs,
2043                                   afence);
2044         } else {
2045                 int i;
2046
2047                 /* Nothing to do, signal fences now */
2048                 for (i = 0; i < num_syncs; i++)
2049                         xe_sync_entry_signal(&syncs[i], NULL,
2050                                              dma_fence_get_stub());
2051                 if (afence)
2052                         dma_fence_signal(&afence->fence);
2053                 return 0;
2054         }
2055 }
2056
2057 #define VM_BIND_OP(op)  (op & 0xffff)
2058
2059 static int __vm_bind_ioctl(struct xe_vm *vm, struct xe_vma *vma,
2060                            struct xe_engine *e, struct xe_bo *bo, u32 op,
2061                            u32 region, struct xe_sync_entry *syncs,
2062                            u32 num_syncs, struct async_op_fence *afence)
2063 {
2064         switch (VM_BIND_OP(op)) {
2065         case XE_VM_BIND_OP_MAP:
2066                 return xe_vm_bind(vm, vma, e, bo, syncs, num_syncs, afence);
2067         case XE_VM_BIND_OP_UNMAP:
2068         case XE_VM_BIND_OP_UNMAP_ALL:
2069                 return xe_vm_unbind(vm, vma, e, syncs, num_syncs, afence);
2070         case XE_VM_BIND_OP_MAP_USERPTR:
2071                 return xe_vm_bind(vm, vma, e, NULL, syncs, num_syncs, afence);
2072         case XE_VM_BIND_OP_PREFETCH:
2073                 return xe_vm_prefetch(vm, vma, e, region, syncs, num_syncs,
2074                                       afence);
2075                 break;
2076         default:
2077                 XE_BUG_ON("NOT POSSIBLE");
2078                 return -EINVAL;
2079         }
2080 }
2081
2082 struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm)
2083 {
2084         int idx = vm->flags & XE_VM_FLAG_MIGRATION ?
2085                 XE_VM_FLAG_GT_ID(vm->flags) : 0;
2086
2087         /* Safe to use index 0 as all BO in the VM share a single dma-resv lock */
2088         return &vm->pt_root[idx]->bo->ttm;
2089 }
2090
2091 static void xe_vm_tv_populate(struct xe_vm *vm, struct ttm_validate_buffer *tv)
2092 {
2093         tv->num_shared = 1;
2094         tv->bo = xe_vm_ttm_bo(vm);
2095 }
2096
2097 static bool is_map_op(u32 op)
2098 {
2099         return VM_BIND_OP(op) == XE_VM_BIND_OP_MAP ||
2100                 VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR;
2101 }
2102
2103 static bool is_unmap_op(u32 op)
2104 {
2105         return VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP ||
2106                 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL;
2107 }
2108
2109 static int vm_bind_ioctl(struct xe_vm *vm, struct xe_vma *vma,
2110                          struct xe_engine *e, struct xe_bo *bo,
2111                          struct drm_xe_vm_bind_op *bind_op,
2112                          struct xe_sync_entry *syncs, u32 num_syncs,
2113                          struct async_op_fence *afence)
2114 {
2115         LIST_HEAD(objs);
2116         LIST_HEAD(dups);
2117         struct ttm_validate_buffer tv_bo, tv_vm;
2118         struct ww_acquire_ctx ww;
2119         struct xe_bo *vbo;
2120         int err, i;
2121
2122         lockdep_assert_held(&vm->lock);
2123         XE_BUG_ON(!list_empty(&vma->unbind_link));
2124
2125         /* Binds deferred to faults, signal fences now */
2126         if (xe_vm_in_fault_mode(vm) && is_map_op(bind_op->op) &&
2127             !(bind_op->op & XE_VM_BIND_FLAG_IMMEDIATE)) {
2128                 for (i = 0; i < num_syncs; i++)
2129                         xe_sync_entry_signal(&syncs[i], NULL,
2130                                              dma_fence_get_stub());
2131                 if (afence)
2132                         dma_fence_signal(&afence->fence);
2133                 return 0;
2134         }
2135
2136         xe_vm_tv_populate(vm, &tv_vm);
2137         list_add_tail(&tv_vm.head, &objs);
2138         vbo = vma->bo;
2139         if (vbo) {
2140                 /*
2141                  * An unbind can drop the last reference to the BO and
2142                  * the BO is needed for ttm_eu_backoff_reservation so
2143                  * take a reference here.
2144                  */
2145                 xe_bo_get(vbo);
2146
2147                 tv_bo.bo = &vbo->ttm;
2148                 tv_bo.num_shared = 1;
2149                 list_add(&tv_bo.head, &objs);
2150         }
2151
2152 again:
2153         err = ttm_eu_reserve_buffers(&ww, &objs, true, &dups);
2154         if (!err) {
2155                 err = __vm_bind_ioctl(vm, vma, e, bo,
2156                                       bind_op->op, bind_op->region, syncs,
2157                                       num_syncs, afence);
2158                 ttm_eu_backoff_reservation(&ww, &objs);
2159                 if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
2160                         lockdep_assert_held_write(&vm->lock);
2161                         err = xe_vma_userptr_pin_pages(vma);
2162                         if (!err)
2163                                 goto again;
2164                 }
2165         }
2166         xe_bo_put(vbo);
2167
2168         return err;
2169 }
2170
2171 struct async_op {
2172         struct xe_vma *vma;
2173         struct xe_engine *engine;
2174         struct xe_bo *bo;
2175         struct drm_xe_vm_bind_op bind_op;
2176         struct xe_sync_entry *syncs;
2177         u32 num_syncs;
2178         struct list_head link;
2179         struct async_op_fence *fence;
2180 };
2181
2182 static void async_op_cleanup(struct xe_vm *vm, struct async_op *op)
2183 {
2184         while (op->num_syncs--)
2185                 xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
2186         kfree(op->syncs);
2187         xe_bo_put(op->bo);
2188         if (op->engine)
2189                 xe_engine_put(op->engine);
2190         xe_vm_put(vm);
2191         if (op->fence)
2192                 dma_fence_put(&op->fence->fence);
2193         kfree(op);
2194 }
2195
2196 static struct async_op *next_async_op(struct xe_vm *vm)
2197 {
2198         return list_first_entry_or_null(&vm->async_ops.pending,
2199                                         struct async_op, link);
2200 }
2201
2202 static void vm_set_async_error(struct xe_vm *vm, int err)
2203 {
2204         lockdep_assert_held(&vm->lock);
2205         vm->async_ops.error = err;
2206 }
2207
2208 static void async_op_work_func(struct work_struct *w)
2209 {
2210         struct xe_vm *vm = container_of(w, struct xe_vm, async_ops.work);
2211
2212         for (;;) {
2213                 struct async_op *op;
2214                 int err;
2215
2216                 if (vm->async_ops.error && !xe_vm_is_closed(vm))
2217                         break;
2218
2219                 spin_lock_irq(&vm->async_ops.lock);
2220                 op = next_async_op(vm);
2221                 if (op)
2222                         list_del_init(&op->link);
2223                 spin_unlock_irq(&vm->async_ops.lock);
2224
2225                 if (!op)
2226                         break;
2227
2228                 if (!xe_vm_is_closed(vm)) {
2229                         bool first, last;
2230
2231                         down_write(&vm->lock);
2232 again:
2233                         first = op->vma->first_munmap_rebind;
2234                         last = op->vma->last_munmap_rebind;
2235 #ifdef TEST_VM_ASYNC_OPS_ERROR
2236 #define FORCE_ASYNC_OP_ERROR    BIT(31)
2237                         if (!(op->bind_op.op & FORCE_ASYNC_OP_ERROR)) {
2238                                 err = vm_bind_ioctl(vm, op->vma, op->engine,
2239                                                     op->bo, &op->bind_op,
2240                                                     op->syncs, op->num_syncs,
2241                                                     op->fence);
2242                         } else {
2243                                 err = -ENOMEM;
2244                                 op->bind_op.op &= ~FORCE_ASYNC_OP_ERROR;
2245                         }
2246 #else
2247                         err = vm_bind_ioctl(vm, op->vma, op->engine, op->bo,
2248                                             &op->bind_op, op->syncs,
2249                                             op->num_syncs, op->fence);
2250 #endif
2251                         /*
2252                          * In order for the fencing to work (stall behind
2253                          * existing jobs / prevent new jobs from running) all
2254                          * the dma-resv slots need to be programmed in a batch
2255                          * relative to execs / the rebind worker. The vm->lock
2256                          * ensure this.
2257                          */
2258                         if (!err && ((first && VM_BIND_OP(op->bind_op.op) ==
2259                                       XE_VM_BIND_OP_UNMAP) ||
2260                                      vm->async_ops.munmap_rebind_inflight)) {
2261                                 if (last) {
2262                                         op->vma->last_munmap_rebind = false;
2263                                         vm->async_ops.munmap_rebind_inflight =
2264                                                 false;
2265                                 } else {
2266                                         vm->async_ops.munmap_rebind_inflight =
2267                                                 true;
2268
2269                                         async_op_cleanup(vm, op);
2270
2271                                         spin_lock_irq(&vm->async_ops.lock);
2272                                         op = next_async_op(vm);
2273                                         XE_BUG_ON(!op);
2274                                         list_del_init(&op->link);
2275                                         spin_unlock_irq(&vm->async_ops.lock);
2276
2277                                         goto again;
2278                                 }
2279                         }
2280                         if (err) {
2281                                 trace_xe_vma_fail(op->vma);
2282                                 drm_warn(&vm->xe->drm, "Async VM op(%d) failed with %d",
2283                                          VM_BIND_OP(op->bind_op.op),
2284                                          err);
2285
2286                                 spin_lock_irq(&vm->async_ops.lock);
2287                                 list_add(&op->link, &vm->async_ops.pending);
2288                                 spin_unlock_irq(&vm->async_ops.lock);
2289
2290                                 vm_set_async_error(vm, err);
2291                                 up_write(&vm->lock);
2292
2293                                 if (vm->async_ops.error_capture.addr)
2294                                         vm_error_capture(vm, err,
2295                                                          op->bind_op.op,
2296                                                          op->bind_op.addr,
2297                                                          op->bind_op.range);
2298                                 break;
2299                         }
2300                         up_write(&vm->lock);
2301                 } else {
2302                         trace_xe_vma_flush(op->vma);
2303
2304                         if (is_unmap_op(op->bind_op.op)) {
2305                                 down_write(&vm->lock);
2306                                 xe_vma_destroy_unlocked(op->vma);
2307                                 up_write(&vm->lock);
2308                         }
2309
2310                         if (op->fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
2311                                                    &op->fence->fence.flags)) {
2312                                 if (!xe_vm_no_dma_fences(vm)) {
2313                                         op->fence->started = true;
2314                                         smp_wmb();
2315                                         wake_up_all(&op->fence->wq);
2316                                 }
2317                                 dma_fence_signal(&op->fence->fence);
2318                         }
2319                 }
2320
2321                 async_op_cleanup(vm, op);
2322         }
2323 }
2324
2325 static int __vm_bind_ioctl_async(struct xe_vm *vm, struct xe_vma *vma,
2326                                  struct xe_engine *e, struct xe_bo *bo,
2327                                  struct drm_xe_vm_bind_op *bind_op,
2328                                  struct xe_sync_entry *syncs, u32 num_syncs)
2329 {
2330         struct async_op *op;
2331         bool installed = false;
2332         u64 seqno;
2333         int i;
2334
2335         lockdep_assert_held(&vm->lock);
2336
2337         op = kmalloc(sizeof(*op), GFP_KERNEL);
2338         if (!op) {
2339                 return -ENOMEM;
2340         }
2341
2342         if (num_syncs) {
2343                 op->fence = kmalloc(sizeof(*op->fence), GFP_KERNEL);
2344                 if (!op->fence) {
2345                         kfree(op);
2346                         return -ENOMEM;
2347                 }
2348
2349                 seqno = e ? ++e->bind.fence_seqno : ++vm->async_ops.fence.seqno;
2350                 dma_fence_init(&op->fence->fence, &async_op_fence_ops,
2351                                &vm->async_ops.lock, e ? e->bind.fence_ctx :
2352                                vm->async_ops.fence.context, seqno);
2353
2354                 if (!xe_vm_no_dma_fences(vm)) {
2355                         op->fence->vm = vm;
2356                         op->fence->started = false;
2357                         init_waitqueue_head(&op->fence->wq);
2358                 }
2359         } else {
2360                 op->fence = NULL;
2361         }
2362         op->vma = vma;
2363         op->engine = e;
2364         op->bo = bo;
2365         op->bind_op = *bind_op;
2366         op->syncs = syncs;
2367         op->num_syncs = num_syncs;
2368         INIT_LIST_HEAD(&op->link);
2369
2370         for (i = 0; i < num_syncs; i++)
2371                 installed |= xe_sync_entry_signal(&syncs[i], NULL,
2372                                                   &op->fence->fence);
2373
2374         if (!installed && op->fence)
2375                 dma_fence_signal(&op->fence->fence);
2376
2377         spin_lock_irq(&vm->async_ops.lock);
2378         list_add_tail(&op->link, &vm->async_ops.pending);
2379         spin_unlock_irq(&vm->async_ops.lock);
2380
2381         if (!vm->async_ops.error)
2382                 queue_work(system_unbound_wq, &vm->async_ops.work);
2383
2384         return 0;
2385 }
2386
2387 static int vm_bind_ioctl_async(struct xe_vm *vm, struct xe_vma *vma,
2388                                struct xe_engine *e, struct xe_bo *bo,
2389                                struct drm_xe_vm_bind_op *bind_op,
2390                                struct xe_sync_entry *syncs, u32 num_syncs)
2391 {
2392         struct xe_vma *__vma, *next;
2393         struct list_head rebind_list;
2394         struct xe_sync_entry *in_syncs = NULL, *out_syncs = NULL;
2395         u32 num_in_syncs = 0, num_out_syncs = 0;
2396         bool first = true, last;
2397         int err;
2398         int i;
2399
2400         lockdep_assert_held(&vm->lock);
2401
2402         /* Not a linked list of unbinds + rebinds, easy */
2403         if (list_empty(&vma->unbind_link))
2404                 return __vm_bind_ioctl_async(vm, vma, e, bo, bind_op,
2405                                              syncs, num_syncs);
2406
2407         /*
2408          * Linked list of unbinds + rebinds, decompose syncs into 'in / out'
2409          * passing the 'in' to the first operation and 'out' to the last. Also
2410          * the reference counting is a little tricky, increment the VM / bind
2411          * engine ref count on all but the last operation and increment the BOs
2412          * ref count on each rebind.
2413          */
2414
2415         XE_BUG_ON(VM_BIND_OP(bind_op->op) != XE_VM_BIND_OP_UNMAP &&
2416                   VM_BIND_OP(bind_op->op) != XE_VM_BIND_OP_UNMAP_ALL &&
2417                   VM_BIND_OP(bind_op->op) != XE_VM_BIND_OP_PREFETCH);
2418
2419         /* Decompose syncs */
2420         if (num_syncs) {
2421                 in_syncs = kmalloc(sizeof(*in_syncs) * num_syncs, GFP_KERNEL);
2422                 out_syncs = kmalloc(sizeof(*out_syncs) * num_syncs, GFP_KERNEL);
2423                 if (!in_syncs || !out_syncs) {
2424                         err = -ENOMEM;
2425                         goto out_error;
2426                 }
2427
2428                 for (i = 0; i < num_syncs; ++i) {
2429                         bool signal = syncs[i].flags & DRM_XE_SYNC_SIGNAL;
2430
2431                         if (signal)
2432                                 out_syncs[num_out_syncs++] = syncs[i];
2433                         else
2434                                 in_syncs[num_in_syncs++] = syncs[i];
2435                 }
2436         }
2437
2438         /* Do unbinds + move rebinds to new list */
2439         INIT_LIST_HEAD(&rebind_list);
2440         list_for_each_entry_safe(__vma, next, &vma->unbind_link, unbind_link) {
2441                 if (__vma->destroyed ||
2442                     VM_BIND_OP(bind_op->op) == XE_VM_BIND_OP_PREFETCH) {
2443                         list_del_init(&__vma->unbind_link);
2444                         xe_bo_get(bo);
2445                         err = __vm_bind_ioctl_async(xe_vm_get(vm), __vma,
2446                                                     e ? xe_engine_get(e) : NULL,
2447                                                     bo, bind_op, first ?
2448                                                     in_syncs : NULL,
2449                                                     first ? num_in_syncs : 0);
2450                         if (err) {
2451                                 xe_bo_put(bo);
2452                                 xe_vm_put(vm);
2453                                 if (e)
2454                                         xe_engine_put(e);
2455                                 goto out_error;
2456                         }
2457                         in_syncs = NULL;
2458                         first = false;
2459                 } else {
2460                         list_move_tail(&__vma->unbind_link, &rebind_list);
2461                 }
2462         }
2463         last = list_empty(&rebind_list);
2464         if (!last) {
2465                 xe_vm_get(vm);
2466                 if (e)
2467                         xe_engine_get(e);
2468         }
2469         err = __vm_bind_ioctl_async(vm, vma, e,
2470                                     bo, bind_op,
2471                                     first ? in_syncs :
2472                                     last ? out_syncs : NULL,
2473                                     first ? num_in_syncs :
2474                                     last ? num_out_syncs : 0);
2475         if (err) {
2476                 if (!last) {
2477                         xe_vm_put(vm);
2478                         if (e)
2479                                 xe_engine_put(e);
2480                 }
2481                 goto out_error;
2482         }
2483         in_syncs = NULL;
2484
2485         /* Do rebinds */
2486         list_for_each_entry_safe(__vma, next, &rebind_list, unbind_link) {
2487                 list_del_init(&__vma->unbind_link);
2488                 last = list_empty(&rebind_list);
2489
2490                 if (xe_vma_is_userptr(__vma)) {
2491                         bind_op->op = XE_VM_BIND_FLAG_ASYNC |
2492                                 XE_VM_BIND_OP_MAP_USERPTR;
2493                 } else {
2494                         bind_op->op = XE_VM_BIND_FLAG_ASYNC |
2495                                 XE_VM_BIND_OP_MAP;
2496                         xe_bo_get(__vma->bo);
2497                 }
2498
2499                 if (!last) {
2500                         xe_vm_get(vm);
2501                         if (e)
2502                                 xe_engine_get(e);
2503                 }
2504
2505                 err = __vm_bind_ioctl_async(vm, __vma, e,
2506                                             __vma->bo, bind_op, last ?
2507                                             out_syncs : NULL,
2508                                             last ? num_out_syncs : 0);
2509                 if (err) {
2510                         if (!last) {
2511                                 xe_vm_put(vm);
2512                                 if (e)
2513                                         xe_engine_put(e);
2514                         }
2515                         goto out_error;
2516                 }
2517         }
2518
2519         kfree(syncs);
2520         return 0;
2521
2522 out_error:
2523         kfree(in_syncs);
2524         kfree(out_syncs);
2525         kfree(syncs);
2526
2527         return err;
2528 }
2529
2530 static int __vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo,
2531                                       u64 addr, u64 range, u32 op)
2532 {
2533         struct xe_device *xe = vm->xe;
2534         struct xe_vma *vma, lookup;
2535         bool async = !!(op & XE_VM_BIND_FLAG_ASYNC);
2536
2537         lockdep_assert_held(&vm->lock);
2538
2539         lookup.start = addr;
2540         lookup.end = addr + range - 1;
2541
2542         switch (VM_BIND_OP(op)) {
2543         case XE_VM_BIND_OP_MAP:
2544         case XE_VM_BIND_OP_MAP_USERPTR:
2545                 vma = xe_vm_find_overlapping_vma(vm, &lookup);
2546                 if (XE_IOCTL_ERR(xe, vma))
2547                         return -EBUSY;
2548                 break;
2549         case XE_VM_BIND_OP_UNMAP:
2550         case XE_VM_BIND_OP_PREFETCH:
2551                 vma = xe_vm_find_overlapping_vma(vm, &lookup);
2552                 if (XE_IOCTL_ERR(xe, !vma) ||
2553                     XE_IOCTL_ERR(xe, (vma->start != addr ||
2554                                  vma->end != addr + range - 1) && !async))
2555                         return -EINVAL;
2556                 break;
2557         case XE_VM_BIND_OP_UNMAP_ALL:
2558                 break;
2559         default:
2560                 XE_BUG_ON("NOT POSSIBLE");
2561                 return -EINVAL;
2562         }
2563
2564         return 0;
2565 }
2566
2567 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma)
2568 {
2569         down_read(&vm->userptr.notifier_lock);
2570         vma->destroyed = true;
2571         up_read(&vm->userptr.notifier_lock);
2572         xe_vm_remove_vma(vm, vma);
2573 }
2574
2575 static int prep_replacement_vma(struct xe_vm *vm, struct xe_vma *vma)
2576 {
2577         int err;
2578
2579         if (vma->bo && !vma->bo->vm) {
2580                 vm_insert_extobj(vm, vma);
2581                 err = add_preempt_fences(vm, vma->bo);
2582                 if (err)
2583                         return err;
2584         }
2585
2586         return 0;
2587 }
2588
2589 /*
2590  * Find all overlapping VMAs in lookup range and add to a list in the returned
2591  * VMA, all of VMAs found will be unbound. Also possibly add 2 new VMAs that
2592  * need to be bound if first / last VMAs are not fully unbound. This is akin to
2593  * how munmap works.
2594  */
2595 static struct xe_vma *vm_unbind_lookup_vmas(struct xe_vm *vm,
2596                                             struct xe_vma *lookup)
2597 {
2598         struct xe_vma *vma = xe_vm_find_overlapping_vma(vm, lookup);
2599         struct rb_node *node;
2600         struct xe_vma *first = vma, *last = vma, *new_first = NULL,
2601                       *new_last = NULL, *__vma, *next;
2602         int err = 0;
2603         bool first_munmap_rebind = false;
2604
2605         lockdep_assert_held(&vm->lock);
2606         XE_BUG_ON(!vma);
2607
2608         node = &vma->vm_node;
2609         while ((node = rb_next(node))) {
2610                 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2611                         __vma = to_xe_vma(node);
2612                         list_add_tail(&__vma->unbind_link, &vma->unbind_link);
2613                         last = __vma;
2614                 } else {
2615                         break;
2616                 }
2617         }
2618
2619         node = &vma->vm_node;
2620         while ((node = rb_prev(node))) {
2621                 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2622                         __vma = to_xe_vma(node);
2623                         list_add(&__vma->unbind_link, &vma->unbind_link);
2624                         first = __vma;
2625                 } else {
2626                         break;
2627                 }
2628         }
2629
2630         if (first->start != lookup->start) {
2631                 struct ww_acquire_ctx ww;
2632
2633                 if (first->bo)
2634                         err = xe_bo_lock(first->bo, &ww, 0, true);
2635                 if (err)
2636                         goto unwind;
2637                 new_first = xe_vma_create(first->vm, first->bo,
2638                                           first->bo ? first->bo_offset :
2639                                           first->userptr.ptr,
2640                                           first->start,
2641                                           lookup->start - 1,
2642                                           (first->pte_flags & XE_PTE_READ_ONLY),
2643                                           first->gt_mask);
2644                 if (first->bo)
2645                         xe_bo_unlock(first->bo, &ww);
2646                 if (!new_first) {
2647                         err = -ENOMEM;
2648                         goto unwind;
2649                 }
2650                 if (!first->bo) {
2651                         err = xe_vma_userptr_pin_pages(new_first);
2652                         if (err)
2653                                 goto unwind;
2654                 }
2655                 err = prep_replacement_vma(vm, new_first);
2656                 if (err)
2657                         goto unwind;
2658         }
2659
2660         if (last->end != lookup->end) {
2661                 struct ww_acquire_ctx ww;
2662                 u64 chunk = lookup->end + 1 - last->start;
2663
2664                 if (last->bo)
2665                         err = xe_bo_lock(last->bo, &ww, 0, true);
2666                 if (err)
2667                         goto unwind;
2668                 new_last = xe_vma_create(last->vm, last->bo,
2669                                          last->bo ? last->bo_offset + chunk :
2670                                          last->userptr.ptr + chunk,
2671                                          last->start + chunk,
2672                                          last->end,
2673                                          (last->pte_flags & XE_PTE_READ_ONLY),
2674                                          last->gt_mask);
2675                 if (last->bo)
2676                         xe_bo_unlock(last->bo, &ww);
2677                 if (!new_last) {
2678                         err = -ENOMEM;
2679                         goto unwind;
2680                 }
2681                 if (!last->bo) {
2682                         err = xe_vma_userptr_pin_pages(new_last);
2683                         if (err)
2684                                 goto unwind;
2685                 }
2686                 err = prep_replacement_vma(vm, new_last);
2687                 if (err)
2688                         goto unwind;
2689         }
2690
2691         prep_vma_destroy(vm, vma);
2692         if (list_empty(&vma->unbind_link) && (new_first || new_last))
2693                 vma->first_munmap_rebind = true;
2694         list_for_each_entry(__vma, &vma->unbind_link, unbind_link) {
2695                 if ((new_first || new_last) && !first_munmap_rebind) {
2696                         __vma->first_munmap_rebind = true;
2697                         first_munmap_rebind = true;
2698                 }
2699                 prep_vma_destroy(vm, __vma);
2700         }
2701         if (new_first) {
2702                 xe_vm_insert_vma(vm, new_first);
2703                 list_add_tail(&new_first->unbind_link, &vma->unbind_link);
2704                 if (!new_last)
2705                         new_first->last_munmap_rebind = true;
2706         }
2707         if (new_last) {
2708                 xe_vm_insert_vma(vm, new_last);
2709                 list_add_tail(&new_last->unbind_link, &vma->unbind_link);
2710                 new_last->last_munmap_rebind = true;
2711         }
2712
2713         return vma;
2714
2715 unwind:
2716         list_for_each_entry_safe(__vma, next, &vma->unbind_link, unbind_link)
2717                 list_del_init(&__vma->unbind_link);
2718         if (new_last) {
2719                 prep_vma_destroy(vm, new_last);
2720                 xe_vma_destroy_unlocked(new_last);
2721         }
2722         if (new_first) {
2723                 prep_vma_destroy(vm, new_first);
2724                 xe_vma_destroy_unlocked(new_first);
2725         }
2726
2727         return ERR_PTR(err);
2728 }
2729
2730 /*
2731  * Similar to vm_unbind_lookup_vmas, find all VMAs in lookup range to prefetch
2732  */
2733 static struct xe_vma *vm_prefetch_lookup_vmas(struct xe_vm *vm,
2734                                               struct xe_vma *lookup,
2735                                               u32 region)
2736 {
2737         struct xe_vma *vma = xe_vm_find_overlapping_vma(vm, lookup), *__vma,
2738                       *next;
2739         struct rb_node *node;
2740
2741         if (!xe_vma_is_userptr(vma)) {
2742                 if (!xe_bo_can_migrate(vma->bo, region_to_mem_type[region]))
2743                         return ERR_PTR(-EINVAL);
2744         }
2745
2746         node = &vma->vm_node;
2747         while ((node = rb_next(node))) {
2748                 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2749                         __vma = to_xe_vma(node);
2750                         if (!xe_vma_is_userptr(__vma)) {
2751                                 if (!xe_bo_can_migrate(__vma->bo, region_to_mem_type[region]))
2752                                         goto flush_list;
2753                         }
2754                         list_add_tail(&__vma->unbind_link, &vma->unbind_link);
2755                 } else {
2756                         break;
2757                 }
2758         }
2759
2760         node = &vma->vm_node;
2761         while ((node = rb_prev(node))) {
2762                 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2763                         __vma = to_xe_vma(node);
2764                         if (!xe_vma_is_userptr(__vma)) {
2765                                 if (!xe_bo_can_migrate(__vma->bo, region_to_mem_type[region]))
2766                                         goto flush_list;
2767                         }
2768                         list_add(&__vma->unbind_link, &vma->unbind_link);
2769                 } else {
2770                         break;
2771                 }
2772         }
2773
2774         return vma;
2775
2776 flush_list:
2777         list_for_each_entry_safe(__vma, next, &vma->unbind_link,
2778                                  unbind_link)
2779                 list_del_init(&__vma->unbind_link);
2780
2781         return ERR_PTR(-EINVAL);
2782 }
2783
2784 static struct xe_vma *vm_unbind_all_lookup_vmas(struct xe_vm *vm,
2785                                                 struct xe_bo *bo)
2786 {
2787         struct xe_vma *first = NULL, *vma;
2788
2789         lockdep_assert_held(&vm->lock);
2790         xe_bo_assert_held(bo);
2791
2792         list_for_each_entry(vma, &bo->vmas, bo_link) {
2793                 if (vma->vm != vm)
2794                         continue;
2795
2796                 prep_vma_destroy(vm, vma);
2797                 if (!first)
2798                         first = vma;
2799                 else
2800                         list_add_tail(&vma->unbind_link, &first->unbind_link);
2801         }
2802
2803         return first;
2804 }
2805
2806 static struct xe_vma *vm_bind_ioctl_lookup_vma(struct xe_vm *vm,
2807                                                struct xe_bo *bo,
2808                                                u64 bo_offset_or_userptr,
2809                                                u64 addr, u64 range, u32 op,
2810                                                u64 gt_mask, u32 region)
2811 {
2812         struct ww_acquire_ctx ww;
2813         struct xe_vma *vma, lookup;
2814         int err;
2815
2816         lockdep_assert_held(&vm->lock);
2817
2818         lookup.start = addr;
2819         lookup.end = addr + range - 1;
2820
2821         switch (VM_BIND_OP(op)) {
2822         case XE_VM_BIND_OP_MAP:
2823                 XE_BUG_ON(!bo);
2824
2825                 err = xe_bo_lock(bo, &ww, 0, true);
2826                 if (err)
2827                         return ERR_PTR(err);
2828                 vma = xe_vma_create(vm, bo, bo_offset_or_userptr, addr,
2829                                     addr + range - 1,
2830                                     op & XE_VM_BIND_FLAG_READONLY,
2831                                     gt_mask);
2832                 xe_bo_unlock(bo, &ww);
2833                 if (!vma)
2834                         return ERR_PTR(-ENOMEM);
2835
2836                 xe_vm_insert_vma(vm, vma);
2837                 if (!bo->vm) {
2838                         vm_insert_extobj(vm, vma);
2839                         err = add_preempt_fences(vm, bo);
2840                         if (err) {
2841                                 prep_vma_destroy(vm, vma);
2842                                 xe_vma_destroy_unlocked(vma);
2843
2844                                 return ERR_PTR(err);
2845                         }
2846                 }
2847                 break;
2848         case XE_VM_BIND_OP_UNMAP:
2849                 vma = vm_unbind_lookup_vmas(vm, &lookup);
2850                 break;
2851         case XE_VM_BIND_OP_PREFETCH:
2852                 vma = vm_prefetch_lookup_vmas(vm, &lookup, region);
2853                 break;
2854         case XE_VM_BIND_OP_UNMAP_ALL:
2855                 XE_BUG_ON(!bo);
2856
2857                 err = xe_bo_lock(bo, &ww, 0, true);
2858                 if (err)
2859                         return ERR_PTR(err);
2860                 vma = vm_unbind_all_lookup_vmas(vm, bo);
2861                 if (!vma)
2862                         vma = ERR_PTR(-EINVAL);
2863                 xe_bo_unlock(bo, &ww);
2864                 break;
2865         case XE_VM_BIND_OP_MAP_USERPTR:
2866                 XE_BUG_ON(bo);
2867
2868                 vma = xe_vma_create(vm, NULL, bo_offset_or_userptr, addr,
2869                                     addr + range - 1,
2870                                     op & XE_VM_BIND_FLAG_READONLY,
2871                                     gt_mask);
2872                 if (!vma)
2873                         return ERR_PTR(-ENOMEM);
2874
2875                 err = xe_vma_userptr_pin_pages(vma);
2876                 if (err) {
2877                         prep_vma_destroy(vm, vma);
2878                         xe_vma_destroy_unlocked(vma);
2879
2880                         return ERR_PTR(err);
2881                 } else {
2882                         xe_vm_insert_vma(vm, vma);
2883                 }
2884                 break;
2885         default:
2886                 XE_BUG_ON("NOT POSSIBLE");
2887                 vma = ERR_PTR(-EINVAL);
2888         }
2889
2890         return vma;
2891 }
2892
2893 #ifdef TEST_VM_ASYNC_OPS_ERROR
2894 #define SUPPORTED_FLAGS \
2895         (FORCE_ASYNC_OP_ERROR | XE_VM_BIND_FLAG_ASYNC | \
2896          XE_VM_BIND_FLAG_READONLY | XE_VM_BIND_FLAG_IMMEDIATE | 0xffff)
2897 #else
2898 #define SUPPORTED_FLAGS \
2899         (XE_VM_BIND_FLAG_ASYNC | XE_VM_BIND_FLAG_READONLY | \
2900          XE_VM_BIND_FLAG_IMMEDIATE | 0xffff)
2901 #endif
2902 #define XE_64K_PAGE_MASK 0xffffull
2903
2904 #define MAX_BINDS       512     /* FIXME: Picking random upper limit */
2905
2906 static int vm_bind_ioctl_check_args(struct xe_device *xe,
2907                                     struct drm_xe_vm_bind *args,
2908                                     struct drm_xe_vm_bind_op **bind_ops,
2909                                     bool *async)
2910 {
2911         int err;
2912         int i;
2913
2914         if (XE_IOCTL_ERR(xe, args->extensions) ||
2915             XE_IOCTL_ERR(xe, !args->num_binds) ||
2916             XE_IOCTL_ERR(xe, args->num_binds > MAX_BINDS))
2917                 return -EINVAL;
2918
2919         if (args->num_binds > 1) {
2920                 u64 __user *bind_user =
2921                         u64_to_user_ptr(args->vector_of_binds);
2922
2923                 *bind_ops = kmalloc(sizeof(struct drm_xe_vm_bind_op) *
2924                                     args->num_binds, GFP_KERNEL);
2925                 if (!*bind_ops)
2926                         return -ENOMEM;
2927
2928                 err = __copy_from_user(*bind_ops, bind_user,
2929                                        sizeof(struct drm_xe_vm_bind_op) *
2930                                        args->num_binds);
2931                 if (XE_IOCTL_ERR(xe, err)) {
2932                         err = -EFAULT;
2933                         goto free_bind_ops;
2934                 }
2935         } else {
2936                 *bind_ops = &args->bind;
2937         }
2938
2939         for (i = 0; i < args->num_binds; ++i) {
2940                 u64 range = (*bind_ops)[i].range;
2941                 u64 addr = (*bind_ops)[i].addr;
2942                 u32 op = (*bind_ops)[i].op;
2943                 u32 obj = (*bind_ops)[i].obj;
2944                 u64 obj_offset = (*bind_ops)[i].obj_offset;
2945                 u32 region = (*bind_ops)[i].region;
2946
2947                 if (i == 0) {
2948                         *async = !!(op & XE_VM_BIND_FLAG_ASYNC);
2949                 } else if (XE_IOCTL_ERR(xe, !*async) ||
2950                            XE_IOCTL_ERR(xe, !(op & XE_VM_BIND_FLAG_ASYNC)) ||
2951                            XE_IOCTL_ERR(xe, VM_BIND_OP(op) ==
2952                                         XE_VM_BIND_OP_RESTART)) {
2953                         err = -EINVAL;
2954                         goto free_bind_ops;
2955                 }
2956
2957                 if (XE_IOCTL_ERR(xe, !*async &&
2958                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL)) {
2959                         err = -EINVAL;
2960                         goto free_bind_ops;
2961                 }
2962
2963                 if (XE_IOCTL_ERR(xe, !*async &&
2964                                  VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH)) {
2965                         err = -EINVAL;
2966                         goto free_bind_ops;
2967                 }
2968
2969                 if (XE_IOCTL_ERR(xe, VM_BIND_OP(op) >
2970                                  XE_VM_BIND_OP_PREFETCH) ||
2971                     XE_IOCTL_ERR(xe, op & ~SUPPORTED_FLAGS) ||
2972                     XE_IOCTL_ERR(xe, !obj &&
2973                                  VM_BIND_OP(op) == XE_VM_BIND_OP_MAP) ||
2974                     XE_IOCTL_ERR(xe, !obj &&
2975                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
2976                     XE_IOCTL_ERR(xe, addr &&
2977                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
2978                     XE_IOCTL_ERR(xe, range &&
2979                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
2980                     XE_IOCTL_ERR(xe, obj &&
2981                                  VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR) ||
2982                     XE_IOCTL_ERR(xe, obj &&
2983                                  VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH) ||
2984                     XE_IOCTL_ERR(xe, region &&
2985                                  VM_BIND_OP(op) != XE_VM_BIND_OP_PREFETCH) ||
2986                     XE_IOCTL_ERR(xe, !(BIT(region) &
2987                                        xe->info.mem_region_mask)) ||
2988                     XE_IOCTL_ERR(xe, obj &&
2989                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP)) {
2990                         err = -EINVAL;
2991                         goto free_bind_ops;
2992                 }
2993
2994                 if (XE_IOCTL_ERR(xe, obj_offset & ~PAGE_MASK) ||
2995                     XE_IOCTL_ERR(xe, addr & ~PAGE_MASK) ||
2996                     XE_IOCTL_ERR(xe, range & ~PAGE_MASK) ||
2997                     XE_IOCTL_ERR(xe, !range && VM_BIND_OP(op) !=
2998                                  XE_VM_BIND_OP_RESTART &&
2999                                  VM_BIND_OP(op) != XE_VM_BIND_OP_UNMAP_ALL)) {
3000                         err = -EINVAL;
3001                         goto free_bind_ops;
3002                 }
3003         }
3004
3005         return 0;
3006
3007 free_bind_ops:
3008         if (args->num_binds > 1)
3009                 kfree(*bind_ops);
3010         return err;
3011 }
3012
3013 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3014 {
3015         struct xe_device *xe = to_xe_device(dev);
3016         struct xe_file *xef = to_xe_file(file);
3017         struct drm_xe_vm_bind *args = data;
3018         struct drm_xe_sync __user *syncs_user;
3019         struct xe_bo **bos = NULL;
3020         struct xe_vma **vmas = NULL;
3021         struct xe_vm *vm;
3022         struct xe_engine *e = NULL;
3023         u32 num_syncs;
3024         struct xe_sync_entry *syncs = NULL;
3025         struct drm_xe_vm_bind_op *bind_ops;
3026         bool async;
3027         int err;
3028         int i, j = 0;
3029
3030         err = vm_bind_ioctl_check_args(xe, args, &bind_ops, &async);
3031         if (err)
3032                 return err;
3033
3034         vm = xe_vm_lookup(xef, args->vm_id);
3035         if (XE_IOCTL_ERR(xe, !vm)) {
3036                 err = -EINVAL;
3037                 goto free_objs;
3038         }
3039
3040         if (XE_IOCTL_ERR(xe, xe_vm_is_closed(vm))) {
3041                 DRM_ERROR("VM closed while we began looking up?\n");
3042                 err = -ENOENT;
3043                 goto put_vm;
3044         }
3045
3046         if (args->engine_id) {
3047                 e = xe_engine_lookup(xef, args->engine_id);
3048                 if (XE_IOCTL_ERR(xe, !e)) {
3049                         err = -ENOENT;
3050                         goto put_vm;
3051                 }
3052                 if (XE_IOCTL_ERR(xe, !(e->flags & ENGINE_FLAG_VM))) {
3053                         err = -EINVAL;
3054                         goto put_engine;
3055                 }
3056         }
3057
3058         if (VM_BIND_OP(bind_ops[0].op) == XE_VM_BIND_OP_RESTART) {
3059                 if (XE_IOCTL_ERR(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
3060                         err = -ENOTSUPP;
3061                 if (XE_IOCTL_ERR(xe, !err && args->num_syncs))
3062                         err = EINVAL;
3063                 if (XE_IOCTL_ERR(xe, !err && !vm->async_ops.error))
3064                         err = -EPROTO;
3065
3066                 if (!err) {
3067                         down_write(&vm->lock);
3068                         trace_xe_vm_restart(vm);
3069                         vm_set_async_error(vm, 0);
3070                         up_write(&vm->lock);
3071
3072                         queue_work(system_unbound_wq, &vm->async_ops.work);
3073
3074                         /* Rebinds may have been blocked, give worker a kick */
3075                         if (xe_vm_in_compute_mode(vm))
3076                                 queue_work(vm->xe->ordered_wq,
3077                                            &vm->preempt.rebind_work);
3078                 }
3079
3080                 goto put_engine;
3081         }
3082
3083         if (XE_IOCTL_ERR(xe, !vm->async_ops.error &&
3084                          async != !!(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS))) {
3085                 err = -ENOTSUPP;
3086                 goto put_engine;
3087         }
3088
3089         for (i = 0; i < args->num_binds; ++i) {
3090                 u64 range = bind_ops[i].range;
3091                 u64 addr = bind_ops[i].addr;
3092
3093                 if (XE_IOCTL_ERR(xe, range > vm->size) ||
3094                     XE_IOCTL_ERR(xe, addr > vm->size - range)) {
3095                         err = -EINVAL;
3096                         goto put_engine;
3097                 }
3098
3099                 if (bind_ops[i].gt_mask) {
3100                         u64 valid_gts = BIT(xe->info.tile_count) - 1;
3101
3102                         if (XE_IOCTL_ERR(xe, bind_ops[i].gt_mask &
3103                                          ~valid_gts)) {
3104                                 err = -EINVAL;
3105                                 goto put_engine;
3106                         }
3107                 }
3108         }
3109
3110         bos = kzalloc(sizeof(*bos) * args->num_binds, GFP_KERNEL);
3111         if (!bos) {
3112                 err = -ENOMEM;
3113                 goto put_engine;
3114         }
3115
3116         vmas = kzalloc(sizeof(*vmas) * args->num_binds, GFP_KERNEL);
3117         if (!vmas) {
3118                 err = -ENOMEM;
3119                 goto put_engine;
3120         }
3121
3122         for (i = 0; i < args->num_binds; ++i) {
3123                 struct drm_gem_object *gem_obj;
3124                 u64 range = bind_ops[i].range;
3125                 u64 addr = bind_ops[i].addr;
3126                 u32 obj = bind_ops[i].obj;
3127                 u64 obj_offset = bind_ops[i].obj_offset;
3128
3129                 if (!obj)
3130                         continue;
3131
3132                 gem_obj = drm_gem_object_lookup(file, obj);
3133                 if (XE_IOCTL_ERR(xe, !gem_obj)) {
3134                         err = -ENOENT;
3135                         goto put_obj;
3136                 }
3137                 bos[i] = gem_to_xe_bo(gem_obj);
3138
3139                 if (XE_IOCTL_ERR(xe, range > bos[i]->size) ||
3140                     XE_IOCTL_ERR(xe, obj_offset >
3141                                  bos[i]->size - range)) {
3142                         err = -EINVAL;
3143                         goto put_obj;
3144                 }
3145
3146                 if (bos[i]->flags & XE_BO_INTERNAL_64K) {
3147                         if (XE_IOCTL_ERR(xe, obj_offset &
3148                                          XE_64K_PAGE_MASK) ||
3149                             XE_IOCTL_ERR(xe, addr & XE_64K_PAGE_MASK) ||
3150                             XE_IOCTL_ERR(xe, range & XE_64K_PAGE_MASK)) {
3151                                 err = -EINVAL;
3152                                 goto put_obj;
3153                         }
3154                 }
3155         }
3156
3157         if (args->num_syncs) {
3158                 syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3159                 if (!syncs) {
3160                         err = -ENOMEM;
3161                         goto put_obj;
3162                 }
3163         }
3164
3165         syncs_user = u64_to_user_ptr(args->syncs);
3166         for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3167                 err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3168                                           &syncs_user[num_syncs], false,
3169                                           xe_vm_no_dma_fences(vm));
3170                 if (err)
3171                         goto free_syncs;
3172         }
3173
3174         err = down_write_killable(&vm->lock);
3175         if (err)
3176                 goto free_syncs;
3177
3178         /* Do some error checking first to make the unwind easier */
3179         for (i = 0; i < args->num_binds; ++i) {
3180                 u64 range = bind_ops[i].range;
3181                 u64 addr = bind_ops[i].addr;
3182                 u32 op = bind_ops[i].op;
3183
3184                 err = __vm_bind_ioctl_lookup_vma(vm, bos[i], addr, range, op);
3185                 if (err)
3186                         goto release_vm_lock;
3187         }
3188
3189         for (i = 0; i < args->num_binds; ++i) {
3190                 u64 range = bind_ops[i].range;
3191                 u64 addr = bind_ops[i].addr;
3192                 u32 op = bind_ops[i].op;
3193                 u64 obj_offset = bind_ops[i].obj_offset;
3194                 u64 gt_mask = bind_ops[i].gt_mask;
3195                 u32 region = bind_ops[i].region;
3196
3197                 vmas[i] = vm_bind_ioctl_lookup_vma(vm, bos[i], obj_offset,
3198                                                    addr, range, op, gt_mask,
3199                                                    region);
3200                 if (IS_ERR(vmas[i])) {
3201                         err = PTR_ERR(vmas[i]);
3202                         vmas[i] = NULL;
3203                         goto destroy_vmas;
3204                 }
3205         }
3206
3207         for (j = 0; j < args->num_binds; ++j) {
3208                 struct xe_sync_entry *__syncs;
3209                 u32 __num_syncs = 0;
3210                 bool first_or_last = j == 0 || j == args->num_binds - 1;
3211
3212                 if (args->num_binds == 1) {
3213                         __num_syncs = num_syncs;
3214                         __syncs = syncs;
3215                 } else if (first_or_last && num_syncs) {
3216                         bool first = j == 0;
3217
3218                         __syncs = kmalloc(sizeof(*__syncs) * num_syncs,
3219                                           GFP_KERNEL);
3220                         if (!__syncs) {
3221                                 err = ENOMEM;
3222                                 break;
3223                         }
3224
3225                         /* in-syncs on first bind, out-syncs on last bind */
3226                         for (i = 0; i < num_syncs; ++i) {
3227                                 bool signal = syncs[i].flags &
3228                                         DRM_XE_SYNC_SIGNAL;
3229
3230                                 if ((first && !signal) || (!first && signal))
3231                                         __syncs[__num_syncs++] = syncs[i];
3232                         }
3233                 } else {
3234                         __num_syncs = 0;
3235                         __syncs = NULL;
3236                 }
3237
3238                 if (async) {
3239                         bool last = j == args->num_binds - 1;
3240
3241                         /*
3242                          * Each pass of async worker drops the ref, take a ref
3243                          * here, 1 set of refs taken above
3244                          */
3245                         if (!last) {
3246                                 if (e)
3247                                         xe_engine_get(e);
3248                                 xe_vm_get(vm);
3249                         }
3250
3251                         err = vm_bind_ioctl_async(vm, vmas[j], e, bos[j],
3252                                                   bind_ops + j, __syncs,
3253                                                   __num_syncs);
3254                         if (err && !last) {
3255                                 if (e)
3256                                         xe_engine_put(e);
3257                                 xe_vm_put(vm);
3258                         }
3259                         if (err)
3260                                 break;
3261                 } else {
3262                         XE_BUG_ON(j != 0);      /* Not supported */
3263                         err = vm_bind_ioctl(vm, vmas[j], e, bos[j],
3264                                             bind_ops + j, __syncs,
3265                                             __num_syncs, NULL);
3266                         break;  /* Needed so cleanup loops work */
3267                 }
3268         }
3269
3270         /* Most of cleanup owned by the async bind worker */
3271         if (async && !err) {
3272                 up_write(&vm->lock);
3273                 if (args->num_binds > 1)
3274                         kfree(syncs);
3275                 goto free_objs;
3276         }
3277
3278 destroy_vmas:
3279         for (i = j; err && i < args->num_binds; ++i) {
3280                 u32 op = bind_ops[i].op;
3281                 struct xe_vma *vma, *next;
3282
3283                 if (!vmas[i])
3284                         break;
3285
3286                 list_for_each_entry_safe(vma, next, &vma->unbind_link,
3287                                          unbind_link) {
3288                         list_del_init(&vma->unbind_link);
3289                         if (!vma->destroyed) {
3290                                 prep_vma_destroy(vm, vma);
3291                                 xe_vma_destroy_unlocked(vma);
3292                         }
3293                 }
3294
3295                 switch (VM_BIND_OP(op)) {
3296                 case XE_VM_BIND_OP_MAP:
3297                         prep_vma_destroy(vm, vmas[i]);
3298                         xe_vma_destroy_unlocked(vmas[i]);
3299                         break;
3300                 case XE_VM_BIND_OP_MAP_USERPTR:
3301                         prep_vma_destroy(vm, vmas[i]);
3302                         xe_vma_destroy_unlocked(vmas[i]);
3303                         break;
3304                 }
3305         }
3306 release_vm_lock:
3307         up_write(&vm->lock);
3308 free_syncs:
3309         while (num_syncs--) {
3310                 if (async && j &&
3311                     !(syncs[num_syncs].flags & DRM_XE_SYNC_SIGNAL))
3312                         continue;       /* Still in async worker */
3313                 xe_sync_entry_cleanup(&syncs[num_syncs]);
3314         }
3315
3316         kfree(syncs);
3317 put_obj:
3318         for (i = j; i < args->num_binds; ++i)
3319                 xe_bo_put(bos[i]);
3320 put_engine:
3321         if (e)
3322                 xe_engine_put(e);
3323 put_vm:
3324         xe_vm_put(vm);
3325 free_objs:
3326         kfree(bos);
3327         kfree(vmas);
3328         if (args->num_binds > 1)
3329                 kfree(bind_ops);
3330         return err;
3331 }
3332
3333 /*
3334  * XXX: Using the TTM wrappers for now, likely can call into dma-resv code
3335  * directly to optimize. Also this likely should be an inline function.
3336  */
3337 int xe_vm_lock(struct xe_vm *vm, struct ww_acquire_ctx *ww,
3338                int num_resv, bool intr)
3339 {
3340         struct ttm_validate_buffer tv_vm;
3341         LIST_HEAD(objs);
3342         LIST_HEAD(dups);
3343
3344         XE_BUG_ON(!ww);
3345
3346         tv_vm.num_shared = num_resv;
3347         tv_vm.bo = xe_vm_ttm_bo(vm);;
3348         list_add_tail(&tv_vm.head, &objs);
3349
3350         return ttm_eu_reserve_buffers(ww, &objs, intr, &dups);
3351 }
3352
3353 void xe_vm_unlock(struct xe_vm *vm, struct ww_acquire_ctx *ww)
3354 {
3355         dma_resv_unlock(&vm->resv);
3356         ww_acquire_fini(ww);
3357 }
3358
3359 /**
3360  * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3361  * @vma: VMA to invalidate
3362  *
3363  * Walks a list of page tables leaves which it memset the entries owned by this
3364  * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3365  * complete.
3366  *
3367  * Returns 0 for success, negative error code otherwise.
3368  */
3369 int xe_vm_invalidate_vma(struct xe_vma *vma)
3370 {
3371         struct xe_device *xe = vma->vm->xe;
3372         struct xe_gt *gt;
3373         u32 gt_needs_invalidate = 0;
3374         int seqno[XE_MAX_GT];
3375         u8 id;
3376         int ret;
3377
3378         XE_BUG_ON(!xe_vm_in_fault_mode(vma->vm));
3379         trace_xe_vma_usm_invalidate(vma);
3380
3381         /* Check that we don't race with page-table updates */
3382         if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3383                 if (xe_vma_is_userptr(vma)) {
3384                         WARN_ON_ONCE(!mmu_interval_check_retry
3385                                      (&vma->userptr.notifier,
3386                                       vma->userptr.notifier_seq));
3387                         WARN_ON_ONCE(!dma_resv_test_signaled(&vma->vm->resv,
3388                                                              DMA_RESV_USAGE_BOOKKEEP));
3389
3390                 } else {
3391                         xe_bo_assert_held(vma->bo);
3392                 }
3393         }
3394
3395         for_each_gt(gt, xe, id) {
3396                 if (xe_pt_zap_ptes(gt, vma)) {
3397                         gt_needs_invalidate |= BIT(id);
3398                         xe_device_wmb(xe);
3399                         seqno[id] = xe_gt_tlb_invalidation_vma(gt, NULL, vma);
3400                         if (seqno[id] < 0)
3401                                 return seqno[id];
3402                 }
3403         }
3404
3405         for_each_gt(gt, xe, id) {
3406                 if (gt_needs_invalidate & BIT(id)) {
3407                         ret = xe_gt_tlb_invalidation_wait(gt, seqno[id]);
3408                         if (ret < 0)
3409                                 return ret;
3410                 }
3411         }
3412
3413         vma->usm.gt_invalidated = vma->gt_mask;
3414
3415         return 0;
3416 }
3417
3418 #if IS_ENABLED(CONFIG_DRM_XE_SIMPLE_ERROR_CAPTURE)
3419 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
3420 {
3421         struct rb_node *node;
3422         bool is_vram;
3423         uint64_t addr;
3424
3425         if (!down_read_trylock(&vm->lock)) {
3426                 drm_printf(p, " Failed to acquire VM lock to dump capture");
3427                 return 0;
3428         }
3429         if (vm->pt_root[gt_id]) {
3430                 addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE,
3431                                   &is_vram);
3432                 drm_printf(p, " VM root: A:0x%llx %s\n", addr, is_vram ? "VRAM" : "SYS");
3433         }
3434
3435         for (node = rb_first(&vm->vmas); node; node = rb_next(node)) {
3436                 struct xe_vma *vma = to_xe_vma(node);
3437                 bool is_userptr = xe_vma_is_userptr(vma);
3438
3439                 if (is_userptr) {
3440                         struct xe_res_cursor cur;
3441
3442                         xe_res_first_sg(vma->userptr.sg, 0, XE_PAGE_SIZE,
3443                                         &cur);
3444                         addr = xe_res_dma(&cur);
3445                 } else {
3446                         addr = __xe_bo_addr(vma->bo, 0, XE_PAGE_SIZE, &is_vram);
3447                 }
3448                 drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
3449                            vma->start, vma->end, vma->end - vma->start + 1ull,
3450                            addr, is_userptr ? "USR" : is_vram ? "VRAM" : "SYS");
3451         }
3452         up_read(&vm->lock);
3453
3454         return 0;
3455 }
3456 #else
3457 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
3458 {
3459         return 0;
3460 }
3461 #endif