drm/xe: VM LRU bulk move
[linux-2.6-microblaze.git] / drivers / gpu / drm / xe / xe_vm.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5
6 #include "xe_vm.h"
7
8 #include <linux/dma-fence-array.h>
9
10 #include <drm/drm_print.h>
11 #include <drm/ttm/ttm_execbuf_util.h>
12 #include <drm/ttm/ttm_tt.h>
13 #include <drm/xe_drm.h>
14 #include <linux/delay.h>
15 #include <linux/kthread.h>
16 #include <linux/mm.h>
17 #include <linux/swap.h>
18
19 #include "xe_bo.h"
20 #include "xe_device.h"
21 #include "xe_engine.h"
22 #include "xe_gt.h"
23 #include "xe_gt_pagefault.h"
24 #include "xe_gt_tlb_invalidation.h"
25 #include "xe_migrate.h"
26 #include "xe_pm.h"
27 #include "xe_preempt_fence.h"
28 #include "xe_pt.h"
29 #include "xe_res_cursor.h"
30 #include "xe_sync.h"
31 #include "xe_trace.h"
32
33 #define TEST_VM_ASYNC_OPS_ERROR
34
35 /**
36  * xe_vma_userptr_check_repin() - Advisory check for repin needed
37  * @vma: The userptr vma
38  *
39  * Check if the userptr vma has been invalidated since last successful
40  * repin. The check is advisory only and can the function can be called
41  * without the vm->userptr.notifier_lock held. There is no guarantee that the
42  * vma userptr will remain valid after a lockless check, so typically
43  * the call needs to be followed by a proper check under the notifier_lock.
44  *
45  * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
46  */
47 int xe_vma_userptr_check_repin(struct xe_vma *vma)
48 {
49         return mmu_interval_check_retry(&vma->userptr.notifier,
50                                         vma->userptr.notifier_seq) ?
51                 -EAGAIN : 0;
52 }
53
54 int xe_vma_userptr_pin_pages(struct xe_vma *vma)
55 {
56         struct xe_vm *vm = vma->vm;
57         struct xe_device *xe = vm->xe;
58         const unsigned long num_pages =
59                 (vma->end - vma->start + 1) >> PAGE_SHIFT;
60         struct page **pages;
61         bool in_kthread = !current->mm;
62         unsigned long notifier_seq;
63         int pinned, ret, i;
64         bool read_only = vma->pte_flags & XE_PTE_FLAG_READ_ONLY;
65
66         lockdep_assert_held(&vm->lock);
67         XE_BUG_ON(!xe_vma_is_userptr(vma));
68 retry:
69         if (vma->destroyed)
70                 return 0;
71
72         notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier);
73         if (notifier_seq == vma->userptr.notifier_seq)
74                 return 0;
75
76         pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
77         if (!pages)
78                 return -ENOMEM;
79
80         if (vma->userptr.sg) {
81                 dma_unmap_sgtable(xe->drm.dev,
82                                   vma->userptr.sg,
83                                   read_only ? DMA_TO_DEVICE :
84                                   DMA_BIDIRECTIONAL, 0);
85                 sg_free_table(vma->userptr.sg);
86                 vma->userptr.sg = NULL;
87         }
88
89         pinned = ret = 0;
90         if (in_kthread) {
91                 if (!mmget_not_zero(vma->userptr.notifier.mm)) {
92                         ret = -EFAULT;
93                         goto mm_closed;
94                 }
95                 kthread_use_mm(vma->userptr.notifier.mm);
96         }
97
98         while (pinned < num_pages) {
99                 ret = get_user_pages_fast(vma->userptr.ptr + pinned * PAGE_SIZE,
100                                           num_pages - pinned,
101                                           read_only ? 0 : FOLL_WRITE,
102                                           &pages[pinned]);
103                 if (ret < 0) {
104                         if (in_kthread)
105                                 ret = 0;
106                         break;
107                 }
108
109                 pinned += ret;
110                 ret = 0;
111         }
112
113         if (in_kthread) {
114                 kthread_unuse_mm(vma->userptr.notifier.mm);
115                 mmput(vma->userptr.notifier.mm);
116         }
117 mm_closed:
118         if (ret)
119                 goto out;
120
121         ret = sg_alloc_table_from_pages_segment(&vma->userptr.sgt, pages,
122                                                 pinned, 0,
123                                                 (u64)pinned << PAGE_SHIFT,
124                                                 xe_sg_segment_size(xe->drm.dev),
125                                                 GFP_KERNEL);
126         if (ret) {
127                 vma->userptr.sg = NULL;
128                 goto out;
129         }
130         vma->userptr.sg = &vma->userptr.sgt;
131
132         ret = dma_map_sgtable(xe->drm.dev, vma->userptr.sg,
133                               read_only ? DMA_TO_DEVICE :
134                               DMA_BIDIRECTIONAL,
135                               DMA_ATTR_SKIP_CPU_SYNC |
136                               DMA_ATTR_NO_KERNEL_MAPPING);
137         if (ret) {
138                 sg_free_table(vma->userptr.sg);
139                 vma->userptr.sg = NULL;
140                 goto out;
141         }
142
143         for (i = 0; i < pinned; ++i) {
144                 if (!read_only) {
145                         lock_page(pages[i]);
146                         set_page_dirty(pages[i]);
147                         unlock_page(pages[i]);
148                 }
149
150                 mark_page_accessed(pages[i]);
151         }
152
153 out:
154         release_pages(pages, pinned);
155         kvfree(pages);
156
157         if (!(ret < 0)) {
158                 vma->userptr.notifier_seq = notifier_seq;
159                 if (xe_vma_userptr_check_repin(vma) == -EAGAIN)
160                         goto retry;
161         }
162
163         return ret < 0 ? ret : 0;
164 }
165
166 static bool preempt_fences_waiting(struct xe_vm *vm)
167 {
168         struct xe_engine *e;
169
170         lockdep_assert_held(&vm->lock);
171         xe_vm_assert_held(vm);
172
173         list_for_each_entry(e, &vm->preempt.engines, compute.link) {
174                 if (!e->compute.pfence || (e->compute.pfence &&
175                     test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
176                              &e->compute.pfence->flags))) {
177                         return true;
178                 }
179         }
180
181         return false;
182 }
183
184 static void free_preempt_fences(struct list_head *list)
185 {
186         struct list_head *link, *next;
187
188         list_for_each_safe(link, next, list)
189                 xe_preempt_fence_free(to_preempt_fence_from_link(link));
190 }
191
192 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
193                                 unsigned int *count)
194 {
195         lockdep_assert_held(&vm->lock);
196         xe_vm_assert_held(vm);
197
198         if (*count >= vm->preempt.num_engines)
199                 return 0;
200
201         for (; *count < vm->preempt.num_engines; ++(*count)) {
202                 struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
203
204                 if (IS_ERR(pfence))
205                         return PTR_ERR(pfence);
206
207                 list_move_tail(xe_preempt_fence_link(pfence), list);
208         }
209
210         return 0;
211 }
212
213 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
214 {
215         struct xe_engine *e;
216
217         xe_vm_assert_held(vm);
218
219         list_for_each_entry(e, &vm->preempt.engines, compute.link) {
220                 if (e->compute.pfence) {
221                         long timeout = dma_fence_wait(e->compute.pfence, false);
222
223                         if (timeout < 0)
224                                 return -ETIME;
225                         dma_fence_put(e->compute.pfence);
226                         e->compute.pfence = NULL;
227                 }
228         }
229
230         return 0;
231 }
232
233 static bool xe_vm_is_idle(struct xe_vm *vm)
234 {
235         struct xe_engine *e;
236
237         xe_vm_assert_held(vm);
238         list_for_each_entry(e, &vm->preempt.engines, compute.link) {
239                 if (!xe_engine_is_idle(e))
240                         return false;
241         }
242
243         return true;
244 }
245
246 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
247 {
248         struct list_head *link;
249         struct xe_engine *e;
250
251         list_for_each_entry(e, &vm->preempt.engines, compute.link) {
252                 struct dma_fence *fence;
253
254                 link = list->next;
255                 XE_BUG_ON(link == list);
256
257                 fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
258                                              e, e->compute.context,
259                                              ++e->compute.seqno);
260                 dma_fence_put(e->compute.pfence);
261                 e->compute.pfence = fence;
262         }
263 }
264
265 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
266 {
267         struct xe_engine *e;
268         struct ww_acquire_ctx ww;
269         int err;
270
271         err = xe_bo_lock(bo, &ww, vm->preempt.num_engines, true);
272         if (err)
273                 return err;
274
275         list_for_each_entry(e, &vm->preempt.engines, compute.link)
276                 if (e->compute.pfence) {
277                         dma_resv_add_fence(bo->ttm.base.resv,
278                                            e->compute.pfence,
279                                            DMA_RESV_USAGE_BOOKKEEP);
280                 }
281
282         xe_bo_unlock(bo, &ww);
283         return 0;
284 }
285
286 /**
287  * xe_vm_fence_all_extobjs() - Add a fence to vm's external objects' resv
288  * @vm: The vm.
289  * @fence: The fence to add.
290  * @usage: The resv usage for the fence.
291  *
292  * Loops over all of the vm's external object bindings and adds a @fence
293  * with the given @usage to all of the external object's reservation
294  * objects.
295  */
296 void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
297                              enum dma_resv_usage usage)
298 {
299         struct xe_vma *vma;
300
301         list_for_each_entry(vma, &vm->extobj.list, extobj.link)
302                 dma_resv_add_fence(vma->bo->ttm.base.resv, fence, usage);
303 }
304
305 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm)
306 {
307         struct xe_engine *e;
308
309         lockdep_assert_held(&vm->lock);
310         xe_vm_assert_held(vm);
311
312         list_for_each_entry(e, &vm->preempt.engines, compute.link) {
313                 e->ops->resume(e);
314
315                 dma_resv_add_fence(&vm->resv, e->compute.pfence,
316                                    DMA_RESV_USAGE_BOOKKEEP);
317                 xe_vm_fence_all_extobjs(vm, e->compute.pfence,
318                                         DMA_RESV_USAGE_BOOKKEEP);
319         }
320 }
321
322 int xe_vm_add_compute_engine(struct xe_vm *vm, struct xe_engine *e)
323 {
324         struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
325         struct ttm_validate_buffer *tv;
326         struct ww_acquire_ctx ww;
327         struct list_head objs;
328         struct dma_fence *pfence;
329         int err;
330         bool wait;
331
332         XE_BUG_ON(!xe_vm_in_compute_mode(vm));
333
334         down_write(&vm->lock);
335
336         err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs, true, 1);
337         if (err)
338                 goto out_unlock_outer;
339
340         pfence = xe_preempt_fence_create(e, e->compute.context,
341                                          ++e->compute.seqno);
342         if (!pfence) {
343                 err = -ENOMEM;
344                 goto out_unlock;
345         }
346
347         list_add(&e->compute.link, &vm->preempt.engines);
348         ++vm->preempt.num_engines;
349         e->compute.pfence = pfence;
350
351         down_read(&vm->userptr.notifier_lock);
352
353         dma_resv_add_fence(&vm->resv, pfence,
354                            DMA_RESV_USAGE_BOOKKEEP);
355
356         xe_vm_fence_all_extobjs(vm, pfence, DMA_RESV_USAGE_BOOKKEEP);
357
358         /*
359          * Check to see if a preemption on VM is in flight or userptr
360          * invalidation, if so trigger this preempt fence to sync state with
361          * other preempt fences on the VM.
362          */
363         wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
364         if (wait)
365                 dma_fence_enable_sw_signaling(pfence);
366
367         up_read(&vm->userptr.notifier_lock);
368
369 out_unlock:
370         xe_vm_unlock_dma_resv(vm, tv_onstack, tv, &ww, &objs);
371 out_unlock_outer:
372         up_write(&vm->lock);
373
374         return err;
375 }
376
377 /**
378  * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
379  * that need repinning.
380  * @vm: The VM.
381  *
382  * This function checks for whether the VM has userptrs that need repinning,
383  * and provides a release-type barrier on the userptr.notifier_lock after
384  * checking.
385  *
386  * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
387  */
388 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
389 {
390         lockdep_assert_held_read(&vm->userptr.notifier_lock);
391
392         return (list_empty(&vm->userptr.repin_list) &&
393                 list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
394 }
395
396 /**
397  * xe_vm_lock_dma_resv() - Lock the vm dma_resv object and the dma_resv
398  * objects of the vm's external buffer objects.
399  * @vm: The vm.
400  * @ww: Pointer to a struct ww_acquire_ctx locking context.
401  * @tv_onstack: Array size XE_ONSTACK_TV of storage for the struct
402  * ttm_validate_buffers used for locking.
403  * @tv: Pointer to a pointer that on output contains the actual storage used.
404  * @objs: List head for the buffer objects locked.
405  * @intr: Whether to lock interruptible.
406  * @num_shared: Number of dma-fence slots to reserve in the locked objects.
407  *
408  * Locks the vm dma-resv objects and all the dma-resv objects of the
409  * buffer objects on the vm external object list. The TTM utilities require
410  * a list of struct ttm_validate_buffers pointing to the actual buffer
411  * objects to lock. Storage for those struct ttm_validate_buffers should
412  * be provided in @tv_onstack, and is typically reserved on the stack
413  * of the caller. If the size of @tv_onstack isn't sufficient, then
414  * storage will be allocated internally using kvmalloc().
415  *
416  * The function performs deadlock handling internally, and after a
417  * successful return the ww locking transaction should be considered
418  * sealed.
419  *
420  * Return: 0 on success, Negative error code on error. In particular if
421  * @intr is set to true, -EINTR or -ERESTARTSYS may be returned. In case
422  * of error, any locking performed has been reverted.
423  */
424 int xe_vm_lock_dma_resv(struct xe_vm *vm, struct ww_acquire_ctx *ww,
425                         struct ttm_validate_buffer *tv_onstack,
426                         struct ttm_validate_buffer **tv,
427                         struct list_head *objs,
428                         bool intr,
429                         unsigned int num_shared)
430 {
431         struct ttm_validate_buffer *tv_vm, *tv_bo;
432         struct xe_vma *vma, *next;
433         LIST_HEAD(dups);
434         int err;
435
436         lockdep_assert_held(&vm->lock);
437
438         if (vm->extobj.entries < XE_ONSTACK_TV) {
439                 tv_vm = tv_onstack;
440         } else {
441                 tv_vm = kvmalloc_array(vm->extobj.entries + 1, sizeof(*tv_vm),
442                                        GFP_KERNEL);
443                 if (!tv_vm)
444                         return -ENOMEM;
445         }
446         tv_bo = tv_vm + 1;
447
448         INIT_LIST_HEAD(objs);
449         list_for_each_entry(vma, &vm->extobj.list, extobj.link) {
450                 tv_bo->num_shared = num_shared;
451                 tv_bo->bo = &vma->bo->ttm;
452
453                 list_add_tail(&tv_bo->head, objs);
454                 tv_bo++;
455         }
456         tv_vm->num_shared = num_shared;
457         tv_vm->bo = xe_vm_ttm_bo(vm);
458         list_add_tail(&tv_vm->head, objs);
459         err = ttm_eu_reserve_buffers(ww, objs, intr, &dups);
460         if (err)
461                 goto out_err;
462
463         spin_lock(&vm->notifier.list_lock);
464         list_for_each_entry_safe(vma, next, &vm->notifier.rebind_list,
465                                  notifier.rebind_link) {
466                 xe_bo_assert_held(vma->bo);
467
468                 list_del_init(&vma->notifier.rebind_link);
469                 if (vma->tile_present && !vma->destroyed)
470                         list_move_tail(&vma->rebind_link, &vm->rebind_list);
471         }
472         spin_unlock(&vm->notifier.list_lock);
473
474         *tv = tv_vm;
475         return 0;
476
477 out_err:
478         if (tv_vm != tv_onstack)
479                 kvfree(tv_vm);
480
481         return err;
482 }
483
484 /**
485  * xe_vm_unlock_dma_resv() - Unlock reservation objects locked by
486  * xe_vm_lock_dma_resv()
487  * @vm: The vm.
488  * @tv_onstack: The @tv_onstack array given to xe_vm_lock_dma_resv().
489  * @tv: The value of *@tv given by xe_vm_lock_dma_resv().
490  * @ww: The ww_acquire_context used for locking.
491  * @objs: The list returned from xe_vm_lock_dma_resv().
492  *
493  * Unlocks the reservation objects and frees any memory allocated by
494  * xe_vm_lock_dma_resv().
495  */
496 void xe_vm_unlock_dma_resv(struct xe_vm *vm,
497                            struct ttm_validate_buffer *tv_onstack,
498                            struct ttm_validate_buffer *tv,
499                            struct ww_acquire_ctx *ww,
500                            struct list_head *objs)
501 {
502         /*
503          * Nothing should've been able to enter the list while we were locked,
504          * since we've held the dma-resvs of all the vm's external objects,
505          * and holding the dma_resv of an object is required for list
506          * addition, and we shouldn't add ourselves.
507          */
508         XE_WARN_ON(!list_empty(&vm->notifier.rebind_list));
509
510         ttm_eu_backoff_reservation(ww, objs);
511         if (tv && tv != tv_onstack)
512                 kvfree(tv);
513 }
514
515 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
516
517 static void preempt_rebind_work_func(struct work_struct *w)
518 {
519         struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
520         struct xe_vma *vma;
521         struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
522         struct ttm_validate_buffer *tv;
523         struct ww_acquire_ctx ww;
524         struct list_head objs;
525         struct dma_fence *rebind_fence;
526         unsigned int fence_count = 0;
527         LIST_HEAD(preempt_fences);
528         ktime_t end = 0;
529         int err;
530         long wait;
531         int __maybe_unused tries = 0;
532
533         XE_BUG_ON(!xe_vm_in_compute_mode(vm));
534         trace_xe_vm_rebind_worker_enter(vm);
535
536         if (xe_vm_is_closed(vm)) {
537                 trace_xe_vm_rebind_worker_exit(vm);
538                 return;
539         }
540
541         down_write(&vm->lock);
542
543 retry:
544         if (vm->async_ops.error)
545                 goto out_unlock_outer;
546
547         /*
548          * Extreme corner where we exit a VM error state with a munmap style VM
549          * unbind inflight which requires a rebind. In this case the rebind
550          * needs to install some fences into the dma-resv slots. The worker to
551          * do this queued, let that worker make progress by dropping vm->lock
552          * and trying this again.
553          */
554         if (vm->async_ops.munmap_rebind_inflight) {
555                 up_write(&vm->lock);
556                 flush_work(&vm->async_ops.work);
557                 goto retry;
558         }
559
560         if (xe_vm_userptr_check_repin(vm)) {
561                 err = xe_vm_userptr_pin(vm);
562                 if (err)
563                         goto out_unlock_outer;
564         }
565
566         err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs,
567                                   false, vm->preempt.num_engines);
568         if (err)
569                 goto out_unlock_outer;
570
571         if (xe_vm_is_idle(vm)) {
572                 vm->preempt.rebind_deactivated = true;
573                 goto out_unlock;
574         }
575
576         /* Fresh preempt fences already installed. Everyting is running. */
577         if (!preempt_fences_waiting(vm))
578                 goto out_unlock;
579
580         /*
581          * This makes sure vm is completely suspended and also balances
582          * xe_engine suspend- and resume; we resume *all* vm engines below.
583          */
584         err = wait_for_existing_preempt_fences(vm);
585         if (err)
586                 goto out_unlock;
587
588         err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
589         if (err)
590                 goto out_unlock;
591
592         list_for_each_entry(vma, &vm->rebind_list, rebind_link) {
593                 if (xe_vma_has_no_bo(vma) || vma->destroyed)
594                         continue;
595
596                 err = xe_bo_validate(vma->bo, vm, false);
597                 if (err)
598                         goto out_unlock;
599         }
600
601         rebind_fence = xe_vm_rebind(vm, true);
602         if (IS_ERR(rebind_fence)) {
603                 err = PTR_ERR(rebind_fence);
604                 goto out_unlock;
605         }
606
607         if (rebind_fence) {
608                 dma_fence_wait(rebind_fence, false);
609                 dma_fence_put(rebind_fence);
610         }
611
612         /* Wait on munmap style VM unbinds */
613         wait = dma_resv_wait_timeout(&vm->resv,
614                                      DMA_RESV_USAGE_KERNEL,
615                                      false, MAX_SCHEDULE_TIMEOUT);
616         if (wait <= 0) {
617                 err = -ETIME;
618                 goto out_unlock;
619         }
620
621 #define retry_required(__tries, __vm) \
622         (IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
623         (!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
624         __xe_vm_userptr_needs_repin(__vm))
625
626         down_read(&vm->userptr.notifier_lock);
627         if (retry_required(tries, vm)) {
628                 up_read(&vm->userptr.notifier_lock);
629                 err = -EAGAIN;
630                 goto out_unlock;
631         }
632
633 #undef retry_required
634
635         spin_lock(&vm->xe->ttm.lru_lock);
636         ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
637         spin_unlock(&vm->xe->ttm.lru_lock);
638
639         /* Point of no return. */
640         arm_preempt_fences(vm, &preempt_fences);
641         resume_and_reinstall_preempt_fences(vm);
642         up_read(&vm->userptr.notifier_lock);
643
644 out_unlock:
645         xe_vm_unlock_dma_resv(vm, tv_onstack, tv, &ww, &objs);
646 out_unlock_outer:
647         if (err == -EAGAIN) {
648                 trace_xe_vm_rebind_worker_retry(vm);
649                 goto retry;
650         }
651
652         /*
653          * With multiple active VMs, under memory pressure, it is possible that
654          * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
655          * Until ttm properly handles locking in such scenarios, best thing the
656          * driver can do is retry with a timeout. Killing the VM or putting it
657          * in error state after timeout or other error scenarios is still TBD.
658          */
659         if (err == -ENOMEM) {
660                 ktime_t cur = ktime_get();
661
662                 end = end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
663                 if (ktime_before(cur, end)) {
664                         msleep(20);
665                         trace_xe_vm_rebind_worker_retry(vm);
666                         goto retry;
667                 }
668         }
669         up_write(&vm->lock);
670
671         free_preempt_fences(&preempt_fences);
672
673         XE_WARN_ON(err < 0);    /* TODO: Kill VM or put in error state */
674         trace_xe_vm_rebind_worker_exit(vm);
675 }
676
677 struct async_op_fence;
678 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
679                         struct xe_engine *e, struct xe_sync_entry *syncs,
680                         u32 num_syncs, struct async_op_fence *afence);
681
682 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
683                                    const struct mmu_notifier_range *range,
684                                    unsigned long cur_seq)
685 {
686         struct xe_vma *vma = container_of(mni, struct xe_vma, userptr.notifier);
687         struct xe_vm *vm = vma->vm;
688         struct dma_resv_iter cursor;
689         struct dma_fence *fence;
690         long err;
691
692         XE_BUG_ON(!xe_vma_is_userptr(vma));
693         trace_xe_vma_userptr_invalidate(vma);
694
695         if (!mmu_notifier_range_blockable(range))
696                 return false;
697
698         down_write(&vm->userptr.notifier_lock);
699         mmu_interval_set_seq(mni, cur_seq);
700
701         /* No need to stop gpu access if the userptr is not yet bound. */
702         if (!vma->userptr.initial_bind) {
703                 up_write(&vm->userptr.notifier_lock);
704                 return true;
705         }
706
707         /*
708          * Tell exec and rebind worker they need to repin and rebind this
709          * userptr.
710          */
711         if (!xe_vm_in_fault_mode(vm) && !vma->destroyed && vma->tile_present) {
712                 spin_lock(&vm->userptr.invalidated_lock);
713                 list_move_tail(&vma->userptr.invalidate_link,
714                                &vm->userptr.invalidated);
715                 spin_unlock(&vm->userptr.invalidated_lock);
716         }
717
718         up_write(&vm->userptr.notifier_lock);
719
720         /*
721          * Preempt fences turn into schedule disables, pipeline these.
722          * Note that even in fault mode, we need to wait for binds and
723          * unbinds to complete, and those are attached as BOOKMARK fences
724          * to the vm.
725          */
726         dma_resv_iter_begin(&cursor, &vm->resv,
727                             DMA_RESV_USAGE_BOOKKEEP);
728         dma_resv_for_each_fence_unlocked(&cursor, fence)
729                 dma_fence_enable_sw_signaling(fence);
730         dma_resv_iter_end(&cursor);
731
732         err = dma_resv_wait_timeout(&vm->resv,
733                                     DMA_RESV_USAGE_BOOKKEEP,
734                                     false, MAX_SCHEDULE_TIMEOUT);
735         XE_WARN_ON(err <= 0);
736
737         if (xe_vm_in_fault_mode(vm)) {
738                 err = xe_vm_invalidate_vma(vma);
739                 XE_WARN_ON(err);
740         }
741
742         trace_xe_vma_userptr_invalidate_complete(vma);
743
744         return true;
745 }
746
747 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
748         .invalidate = vma_userptr_invalidate,
749 };
750
751 int xe_vm_userptr_pin(struct xe_vm *vm)
752 {
753         struct xe_vma *vma, *next;
754         int err = 0;
755         LIST_HEAD(tmp_evict);
756
757         lockdep_assert_held_write(&vm->lock);
758
759         /* Collect invalidated userptrs */
760         spin_lock(&vm->userptr.invalidated_lock);
761         list_for_each_entry_safe(vma, next, &vm->userptr.invalidated,
762                                  userptr.invalidate_link) {
763                 list_del_init(&vma->userptr.invalidate_link);
764                 list_move_tail(&vma->userptr_link, &vm->userptr.repin_list);
765         }
766         spin_unlock(&vm->userptr.invalidated_lock);
767
768         /* Pin and move to temporary list */
769         list_for_each_entry_safe(vma, next, &vm->userptr.repin_list, userptr_link) {
770                 err = xe_vma_userptr_pin_pages(vma);
771                 if (err < 0)
772                         goto out_err;
773
774                 list_move_tail(&vma->userptr_link, &tmp_evict);
775         }
776
777         /* Take lock and move to rebind_list for rebinding. */
778         err = dma_resv_lock_interruptible(&vm->resv, NULL);
779         if (err)
780                 goto out_err;
781
782         list_for_each_entry_safe(vma, next, &tmp_evict, userptr_link) {
783                 list_del_init(&vma->userptr_link);
784                 list_move_tail(&vma->rebind_link, &vm->rebind_list);
785         }
786
787         dma_resv_unlock(&vm->resv);
788
789         return 0;
790
791 out_err:
792         list_splice_tail(&tmp_evict, &vm->userptr.repin_list);
793
794         return err;
795 }
796
797 /**
798  * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
799  * that need repinning.
800  * @vm: The VM.
801  *
802  * This function does an advisory check for whether the VM has userptrs that
803  * need repinning.
804  *
805  * Return: 0 if there are no indications of userptrs needing repinning,
806  * -EAGAIN if there are.
807  */
808 int xe_vm_userptr_check_repin(struct xe_vm *vm)
809 {
810         return (list_empty_careful(&vm->userptr.repin_list) &&
811                 list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
812 }
813
814 static struct dma_fence *
815 xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
816                struct xe_sync_entry *syncs, u32 num_syncs);
817
818 struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
819 {
820         struct dma_fence *fence = NULL;
821         struct xe_vma *vma, *next;
822
823         lockdep_assert_held(&vm->lock);
824         if (xe_vm_no_dma_fences(vm) && !rebind_worker)
825                 return NULL;
826
827         xe_vm_assert_held(vm);
828         list_for_each_entry_safe(vma, next, &vm->rebind_list, rebind_link) {
829                 XE_WARN_ON(!vma->tile_present);
830
831                 list_del_init(&vma->rebind_link);
832                 dma_fence_put(fence);
833                 if (rebind_worker)
834                         trace_xe_vma_rebind_worker(vma);
835                 else
836                         trace_xe_vma_rebind_exec(vma);
837                 fence = xe_vm_bind_vma(vma, NULL, NULL, 0);
838                 if (IS_ERR(fence))
839                         return fence;
840         }
841
842         return fence;
843 }
844
845 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
846                                     struct xe_bo *bo,
847                                     u64 bo_offset_or_userptr,
848                                     u64 start, u64 end,
849                                     bool read_only,
850                                     bool is_null,
851                                     u64 tile_mask)
852 {
853         struct xe_vma *vma;
854         struct xe_tile *tile;
855         u8 id;
856
857         XE_BUG_ON(start >= end);
858         XE_BUG_ON(end >= vm->size);
859
860         vma = kzalloc(sizeof(*vma), GFP_KERNEL);
861         if (!vma) {
862                 vma = ERR_PTR(-ENOMEM);
863                 return vma;
864         }
865
866         INIT_LIST_HEAD(&vma->rebind_link);
867         INIT_LIST_HEAD(&vma->unbind_link);
868         INIT_LIST_HEAD(&vma->userptr_link);
869         INIT_LIST_HEAD(&vma->userptr.invalidate_link);
870         INIT_LIST_HEAD(&vma->notifier.rebind_link);
871         INIT_LIST_HEAD(&vma->extobj.link);
872
873         vma->vm = vm;
874         vma->start = start;
875         vma->end = end;
876         vma->pte_flags = 0;
877         if (read_only)
878                 vma->pte_flags |= XE_PTE_FLAG_READ_ONLY;
879         if (is_null)
880                 vma->pte_flags |= XE_PTE_FLAG_NULL;
881
882         if (tile_mask) {
883                 vma->tile_mask = tile_mask;
884         } else {
885                 for_each_tile(tile, vm->xe, id)
886                         vma->tile_mask |= 0x1 << id;
887         }
888
889         if (vm->xe->info.platform == XE_PVC)
890                 vma->use_atomic_access_pte_bit = true;
891
892         if (bo) {
893                 xe_bo_assert_held(bo);
894                 vma->bo_offset = bo_offset_or_userptr;
895                 vma->bo = xe_bo_get(bo);
896                 list_add_tail(&vma->bo_link, &bo->vmas);
897         } else /* userptr or null */ {
898                 if (!is_null) {
899                         u64 size = end - start + 1;
900                         int err;
901
902                         vma->userptr.ptr = bo_offset_or_userptr;
903
904                         err = mmu_interval_notifier_insert(&vma->userptr.notifier,
905                                                            current->mm,
906                                                            vma->userptr.ptr, size,
907                                                            &vma_userptr_notifier_ops);
908                         if (err) {
909                                 kfree(vma);
910                                 vma = ERR_PTR(err);
911                                 return vma;
912                         }
913
914                         vma->userptr.notifier_seq = LONG_MAX;
915                 }
916
917                 xe_vm_get(vm);
918         }
919
920         return vma;
921 }
922
923 static bool vm_remove_extobj(struct xe_vma *vma)
924 {
925         if (!list_empty(&vma->extobj.link)) {
926                 vma->vm->extobj.entries--;
927                 list_del_init(&vma->extobj.link);
928                 return true;
929         }
930         return false;
931 }
932
933 static void xe_vma_destroy_late(struct xe_vma *vma)
934 {
935         struct xe_vm *vm = vma->vm;
936         struct xe_device *xe = vm->xe;
937         bool read_only = vma->pte_flags & XE_PTE_FLAG_READ_ONLY;
938
939         if (xe_vma_is_userptr(vma)) {
940                 if (vma->userptr.sg) {
941                         dma_unmap_sgtable(xe->drm.dev,
942                                           vma->userptr.sg,
943                                           read_only ? DMA_TO_DEVICE :
944                                           DMA_BIDIRECTIONAL, 0);
945                         sg_free_table(vma->userptr.sg);
946                         vma->userptr.sg = NULL;
947                 }
948
949                 /*
950                  * Since userptr pages are not pinned, we can't remove
951                  * the notifer until we're sure the GPU is not accessing
952                  * them anymore
953                  */
954                 mmu_interval_notifier_remove(&vma->userptr.notifier);
955                 xe_vm_put(vm);
956         } else if (xe_vma_is_null(vma)) {
957                 xe_vm_put(vm);
958         } else {
959                 xe_bo_put(vma->bo);
960         }
961
962         kfree(vma);
963 }
964
965 static void vma_destroy_work_func(struct work_struct *w)
966 {
967         struct xe_vma *vma =
968                 container_of(w, struct xe_vma, destroy_work);
969
970         xe_vma_destroy_late(vma);
971 }
972
973 static struct xe_vma *
974 bo_has_vm_references_locked(struct xe_bo *bo, struct xe_vm *vm,
975                             struct xe_vma *ignore)
976 {
977         struct xe_vma *vma;
978
979         list_for_each_entry(vma, &bo->vmas, bo_link) {
980                 if (vma != ignore && vma->vm == vm)
981                         return vma;
982         }
983
984         return NULL;
985 }
986
987 static bool bo_has_vm_references(struct xe_bo *bo, struct xe_vm *vm,
988                                  struct xe_vma *ignore)
989 {
990         struct ww_acquire_ctx ww;
991         bool ret;
992
993         xe_bo_lock(bo, &ww, 0, false);
994         ret = !!bo_has_vm_references_locked(bo, vm, ignore);
995         xe_bo_unlock(bo, &ww);
996
997         return ret;
998 }
999
1000 static void __vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
1001 {
1002         list_add(&vma->extobj.link, &vm->extobj.list);
1003         vm->extobj.entries++;
1004 }
1005
1006 static void vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
1007 {
1008         struct xe_bo *bo = vma->bo;
1009
1010         lockdep_assert_held_write(&vm->lock);
1011
1012         if (bo_has_vm_references(bo, vm, vma))
1013                 return;
1014
1015         __vm_insert_extobj(vm, vma);
1016 }
1017
1018 static void vma_destroy_cb(struct dma_fence *fence,
1019                            struct dma_fence_cb *cb)
1020 {
1021         struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
1022
1023         INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
1024         queue_work(system_unbound_wq, &vma->destroy_work);
1025 }
1026
1027 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
1028 {
1029         struct xe_vm *vm = vma->vm;
1030
1031         lockdep_assert_held_write(&vm->lock);
1032         XE_BUG_ON(!list_empty(&vma->unbind_link));
1033
1034         if (xe_vma_is_userptr(vma)) {
1035                 XE_WARN_ON(!vma->destroyed);
1036                 spin_lock(&vm->userptr.invalidated_lock);
1037                 list_del_init(&vma->userptr.invalidate_link);
1038                 spin_unlock(&vm->userptr.invalidated_lock);
1039                 list_del(&vma->userptr_link);
1040         } else if (!xe_vma_is_null(vma)) {
1041                 xe_bo_assert_held(vma->bo);
1042                 list_del(&vma->bo_link);
1043
1044                 spin_lock(&vm->notifier.list_lock);
1045                 list_del(&vma->notifier.rebind_link);
1046                 spin_unlock(&vm->notifier.list_lock);
1047
1048                 if (!vma->bo->vm && vm_remove_extobj(vma)) {
1049                         struct xe_vma *other;
1050
1051                         other = bo_has_vm_references_locked(vma->bo, vm, NULL);
1052
1053                         if (other)
1054                                 __vm_insert_extobj(vm, other);
1055                 }
1056         }
1057
1058         xe_vm_assert_held(vm);
1059         if (!list_empty(&vma->rebind_link))
1060                 list_del(&vma->rebind_link);
1061
1062         if (fence) {
1063                 int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
1064                                                  vma_destroy_cb);
1065
1066                 if (ret) {
1067                         XE_WARN_ON(ret != -ENOENT);
1068                         xe_vma_destroy_late(vma);
1069                 }
1070         } else {
1071                 xe_vma_destroy_late(vma);
1072         }
1073 }
1074
1075 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1076 {
1077         struct ttm_validate_buffer tv[2];
1078         struct ww_acquire_ctx ww;
1079         struct xe_bo *bo = vma->bo;
1080         LIST_HEAD(objs);
1081         LIST_HEAD(dups);
1082         int err;
1083
1084         memset(tv, 0, sizeof(tv));
1085         tv[0].bo = xe_vm_ttm_bo(vma->vm);
1086         list_add(&tv[0].head, &objs);
1087
1088         if (bo) {
1089                 tv[1].bo = &xe_bo_get(bo)->ttm;
1090                 list_add(&tv[1].head, &objs);
1091         }
1092         err = ttm_eu_reserve_buffers(&ww, &objs, false, &dups);
1093         XE_WARN_ON(err);
1094
1095         xe_vma_destroy(vma, NULL);
1096
1097         ttm_eu_backoff_reservation(&ww, &objs);
1098         if (bo)
1099                 xe_bo_put(bo);
1100 }
1101
1102 static struct xe_vma *to_xe_vma(const struct rb_node *node)
1103 {
1104         BUILD_BUG_ON(offsetof(struct xe_vma, vm_node) != 0);
1105         return (struct xe_vma *)node;
1106 }
1107
1108 static int xe_vma_cmp(const struct xe_vma *a, const struct xe_vma *b)
1109 {
1110         if (a->end < b->start) {
1111                 return -1;
1112         } else if (b->end < a->start) {
1113                 return 1;
1114         } else {
1115                 return 0;
1116         }
1117 }
1118
1119 static bool xe_vma_less_cb(struct rb_node *a, const struct rb_node *b)
1120 {
1121         return xe_vma_cmp(to_xe_vma(a), to_xe_vma(b)) < 0;
1122 }
1123
1124 int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node)
1125 {
1126         struct xe_vma *cmp = to_xe_vma(node);
1127         const struct xe_vma *own = key;
1128
1129         if (own->start > cmp->end)
1130                 return 1;
1131
1132         if (own->end < cmp->start)
1133                 return -1;
1134
1135         return 0;
1136 }
1137
1138 struct xe_vma *
1139 xe_vm_find_overlapping_vma(struct xe_vm *vm, const struct xe_vma *vma)
1140 {
1141         struct rb_node *node;
1142
1143         if (xe_vm_is_closed(vm))
1144                 return NULL;
1145
1146         XE_BUG_ON(vma->end >= vm->size);
1147         lockdep_assert_held(&vm->lock);
1148
1149         node = rb_find(vma, &vm->vmas, xe_vma_cmp_vma_cb);
1150
1151         return node ? to_xe_vma(node) : NULL;
1152 }
1153
1154 static void xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1155 {
1156         XE_BUG_ON(vma->vm != vm);
1157         lockdep_assert_held(&vm->lock);
1158
1159         rb_add(&vma->vm_node, &vm->vmas, xe_vma_less_cb);
1160 }
1161
1162 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1163 {
1164         XE_BUG_ON(vma->vm != vm);
1165         lockdep_assert_held(&vm->lock);
1166
1167         rb_erase(&vma->vm_node, &vm->vmas);
1168         if (vm->usm.last_fault_vma == vma)
1169                 vm->usm.last_fault_vma = NULL;
1170 }
1171
1172 static void async_op_work_func(struct work_struct *w);
1173 static void vm_destroy_work_func(struct work_struct *w);
1174
1175 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1176 {
1177         struct xe_vm *vm;
1178         int err, i = 0, number_tiles = 0;
1179         struct xe_tile *tile;
1180         u8 id;
1181
1182         vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1183         if (!vm)
1184                 return ERR_PTR(-ENOMEM);
1185
1186         vm->xe = xe;
1187         kref_init(&vm->refcount);
1188         dma_resv_init(&vm->resv);
1189
1190         vm->size = 1ull << xe_pt_shift(xe->info.vm_max_level + 1);
1191
1192         vm->vmas = RB_ROOT;
1193         vm->flags = flags;
1194
1195         init_rwsem(&vm->lock);
1196
1197         INIT_LIST_HEAD(&vm->rebind_list);
1198
1199         INIT_LIST_HEAD(&vm->userptr.repin_list);
1200         INIT_LIST_HEAD(&vm->userptr.invalidated);
1201         init_rwsem(&vm->userptr.notifier_lock);
1202         spin_lock_init(&vm->userptr.invalidated_lock);
1203
1204         INIT_LIST_HEAD(&vm->notifier.rebind_list);
1205         spin_lock_init(&vm->notifier.list_lock);
1206
1207         INIT_LIST_HEAD(&vm->async_ops.pending);
1208         INIT_WORK(&vm->async_ops.work, async_op_work_func);
1209         spin_lock_init(&vm->async_ops.lock);
1210
1211         INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1212
1213         INIT_LIST_HEAD(&vm->preempt.engines);
1214         vm->preempt.min_run_period_ms = 10;     /* FIXME: Wire up to uAPI */
1215
1216         INIT_LIST_HEAD(&vm->extobj.list);
1217
1218         if (!(flags & XE_VM_FLAG_MIGRATION)) {
1219                 /* We need to immeditatelly exit from any D3 state */
1220                 xe_pm_runtime_get(xe);
1221                 xe_device_mem_access_get(xe);
1222         }
1223
1224         err = dma_resv_lock_interruptible(&vm->resv, NULL);
1225         if (err)
1226                 goto err_put;
1227
1228         if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1229                 vm->flags |= XE_VM_FLAGS_64K;
1230
1231         for_each_tile(tile, xe, id) {
1232                 if (flags & XE_VM_FLAG_MIGRATION &&
1233                     tile->id != XE_VM_FLAG_GT_ID(flags))
1234                         continue;
1235
1236                 vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
1237                 if (IS_ERR(vm->pt_root[id])) {
1238                         err = PTR_ERR(vm->pt_root[id]);
1239                         vm->pt_root[id] = NULL;
1240                         goto err_destroy_root;
1241                 }
1242         }
1243
1244         if (flags & XE_VM_FLAG_SCRATCH_PAGE) {
1245                 for_each_tile(tile, xe, id) {
1246                         if (!vm->pt_root[id])
1247                                 continue;
1248
1249                         err = xe_pt_create_scratch(xe, tile, vm);
1250                         if (err)
1251                                 goto err_scratch_pt;
1252                 }
1253                 vm->batch_invalidate_tlb = true;
1254         }
1255
1256         if (flags & DRM_XE_VM_CREATE_COMPUTE_MODE) {
1257                 INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1258                 vm->flags |= XE_VM_FLAG_COMPUTE_MODE;
1259                 vm->batch_invalidate_tlb = false;
1260         }
1261
1262         if (flags & DRM_XE_VM_CREATE_ASYNC_BIND_OPS) {
1263                 vm->async_ops.fence.context = dma_fence_context_alloc(1);
1264                 vm->flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
1265         }
1266
1267         /* Fill pt_root after allocating scratch tables */
1268         for_each_tile(tile, xe, id) {
1269                 if (!vm->pt_root[id])
1270                         continue;
1271
1272                 xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
1273         }
1274         dma_resv_unlock(&vm->resv);
1275
1276         /* Kernel migration VM shouldn't have a circular loop.. */
1277         if (!(flags & XE_VM_FLAG_MIGRATION)) {
1278                 for_each_tile(tile, xe, id) {
1279                         struct xe_gt *gt = tile->primary_gt;
1280                         struct xe_vm *migrate_vm;
1281                         struct xe_engine *eng;
1282
1283                         if (!vm->pt_root[id])
1284                                 continue;
1285
1286                         migrate_vm = xe_migrate_get_vm(tile->migrate);
1287                         eng = xe_engine_create_class(xe, gt, migrate_vm,
1288                                                      XE_ENGINE_CLASS_COPY,
1289                                                      ENGINE_FLAG_VM);
1290                         xe_vm_put(migrate_vm);
1291                         if (IS_ERR(eng)) {
1292                                 xe_vm_close_and_put(vm);
1293                                 return ERR_CAST(eng);
1294                         }
1295                         vm->eng[id] = eng;
1296                         number_tiles++;
1297                 }
1298         }
1299
1300         if (number_tiles > 1)
1301                 vm->composite_fence_ctx = dma_fence_context_alloc(1);
1302
1303         mutex_lock(&xe->usm.lock);
1304         if (flags & XE_VM_FLAG_FAULT_MODE)
1305                 xe->usm.num_vm_in_fault_mode++;
1306         else if (!(flags & XE_VM_FLAG_MIGRATION))
1307                 xe->usm.num_vm_in_non_fault_mode++;
1308         mutex_unlock(&xe->usm.lock);
1309
1310         trace_xe_vm_create(vm);
1311
1312         return vm;
1313
1314 err_scratch_pt:
1315         for_each_tile(tile, xe, id) {
1316                 if (!vm->pt_root[id])
1317                         continue;
1318
1319                 i = vm->pt_root[id]->level;
1320                 while (i)
1321                         if (vm->scratch_pt[id][--i])
1322                                 xe_pt_destroy(vm->scratch_pt[id][i],
1323                                               vm->flags, NULL);
1324                 xe_bo_unpin(vm->scratch_bo[id]);
1325                 xe_bo_put(vm->scratch_bo[id]);
1326         }
1327 err_destroy_root:
1328         for_each_tile(tile, xe, id) {
1329                 if (vm->pt_root[id])
1330                         xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1331         }
1332         dma_resv_unlock(&vm->resv);
1333 err_put:
1334         dma_resv_fini(&vm->resv);
1335         kfree(vm);
1336         if (!(flags & XE_VM_FLAG_MIGRATION)) {
1337                 xe_device_mem_access_put(xe);
1338                 xe_pm_runtime_put(xe);
1339         }
1340         return ERR_PTR(err);
1341 }
1342
1343 static void flush_async_ops(struct xe_vm *vm)
1344 {
1345         queue_work(system_unbound_wq, &vm->async_ops.work);
1346         flush_work(&vm->async_ops.work);
1347 }
1348
1349 static void vm_error_capture(struct xe_vm *vm, int err,
1350                              u32 op, u64 addr, u64 size)
1351 {
1352         struct drm_xe_vm_bind_op_error_capture capture;
1353         u64 __user *address =
1354                 u64_to_user_ptr(vm->async_ops.error_capture.addr);
1355         bool in_kthread = !current->mm;
1356
1357         capture.error = err;
1358         capture.op = op;
1359         capture.addr = addr;
1360         capture.size = size;
1361
1362         if (in_kthread) {
1363                 if (!mmget_not_zero(vm->async_ops.error_capture.mm))
1364                         goto mm_closed;
1365                 kthread_use_mm(vm->async_ops.error_capture.mm);
1366         }
1367
1368         if (copy_to_user(address, &capture, sizeof(capture)))
1369                 XE_WARN_ON("Copy to user failed");
1370
1371         if (in_kthread) {
1372                 kthread_unuse_mm(vm->async_ops.error_capture.mm);
1373                 mmput(vm->async_ops.error_capture.mm);
1374         }
1375
1376 mm_closed:
1377         wake_up_all(&vm->async_ops.error_capture.wq);
1378 }
1379
1380 void xe_vm_close_and_put(struct xe_vm *vm)
1381 {
1382         struct rb_root contested = RB_ROOT;
1383         struct ww_acquire_ctx ww;
1384         struct xe_device *xe = vm->xe;
1385         struct xe_tile *tile;
1386         u8 id;
1387
1388         XE_BUG_ON(vm->preempt.num_engines);
1389
1390         vm->size = 0;
1391         smp_mb();
1392         flush_async_ops(vm);
1393         if (xe_vm_in_compute_mode(vm))
1394                 flush_work(&vm->preempt.rebind_work);
1395
1396         for_each_tile(tile, xe, id) {
1397                 if (vm->eng[id]) {
1398                         xe_engine_kill(vm->eng[id]);
1399                         xe_engine_put(vm->eng[id]);
1400                         vm->eng[id] = NULL;
1401                 }
1402         }
1403
1404         down_write(&vm->lock);
1405         xe_vm_lock(vm, &ww, 0, false);
1406         while (vm->vmas.rb_node) {
1407                 struct xe_vma *vma = to_xe_vma(vm->vmas.rb_node);
1408
1409                 if (xe_vma_has_no_bo(vma)) {
1410                         down_read(&vm->userptr.notifier_lock);
1411                         vma->destroyed = true;
1412                         up_read(&vm->userptr.notifier_lock);
1413                 }
1414
1415                 rb_erase(&vma->vm_node, &vm->vmas);
1416
1417                 /* easy case, remove from VMA? */
1418                 if (xe_vma_has_no_bo(vma) || vma->bo->vm) {
1419                         xe_vma_destroy(vma, NULL);
1420                         continue;
1421                 }
1422
1423                 rb_add(&vma->vm_node, &contested, xe_vma_less_cb);
1424         }
1425
1426         /*
1427          * All vm operations will add shared fences to resv.
1428          * The only exception is eviction for a shared object,
1429          * but even so, the unbind when evicted would still
1430          * install a fence to resv. Hence it's safe to
1431          * destroy the pagetables immediately.
1432          */
1433         for_each_tile(tile, xe, id) {
1434                 if (vm->scratch_bo[id]) {
1435                         u32 i;
1436
1437                         xe_bo_unpin(vm->scratch_bo[id]);
1438                         xe_bo_put(vm->scratch_bo[id]);
1439                         for (i = 0; i < vm->pt_root[id]->level; i++)
1440                                 xe_pt_destroy(vm->scratch_pt[id][i], vm->flags,
1441                                               NULL);
1442                 }
1443         }
1444         xe_vm_unlock(vm, &ww);
1445
1446         if (contested.rb_node) {
1447
1448                 /*
1449                  * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1450                  * Since we hold a refcount to the bo, we can remove and free
1451                  * the members safely without locking.
1452                  */
1453                 while (contested.rb_node) {
1454                         struct xe_vma *vma = to_xe_vma(contested.rb_node);
1455
1456                         rb_erase(&vma->vm_node, &contested);
1457                         xe_vma_destroy_unlocked(vma);
1458                 }
1459         }
1460
1461         if (vm->async_ops.error_capture.addr)
1462                 wake_up_all(&vm->async_ops.error_capture.wq);
1463
1464         XE_WARN_ON(!list_empty(&vm->extobj.list));
1465         up_write(&vm->lock);
1466
1467         mutex_lock(&xe->usm.lock);
1468         if (vm->flags & XE_VM_FLAG_FAULT_MODE)
1469                 xe->usm.num_vm_in_fault_mode--;
1470         else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1471                 xe->usm.num_vm_in_non_fault_mode--;
1472         mutex_unlock(&xe->usm.lock);
1473
1474         xe_vm_put(vm);
1475 }
1476
1477 static void vm_destroy_work_func(struct work_struct *w)
1478 {
1479         struct xe_vm *vm =
1480                 container_of(w, struct xe_vm, destroy_work);
1481         struct ww_acquire_ctx ww;
1482         struct xe_device *xe = vm->xe;
1483         struct xe_tile *tile;
1484         u8 id;
1485         void *lookup;
1486
1487         /* xe_vm_close_and_put was not called? */
1488         XE_WARN_ON(vm->size);
1489
1490         if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
1491                 xe_device_mem_access_put(xe);
1492                 xe_pm_runtime_put(xe);
1493
1494                 if (xe->info.has_asid) {
1495                         mutex_lock(&xe->usm.lock);
1496                         lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1497                         XE_WARN_ON(lookup != vm);
1498                         mutex_unlock(&xe->usm.lock);
1499                 }
1500         }
1501
1502         /*
1503          * XXX: We delay destroying the PT root until the VM if freed as PT root
1504          * is needed for xe_vm_lock to work. If we remove that dependency this
1505          * can be moved to xe_vm_close_and_put.
1506          */
1507         xe_vm_lock(vm, &ww, 0, false);
1508         for_each_tile(tile, xe, id) {
1509                 if (vm->pt_root[id]) {
1510                         xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1511                         vm->pt_root[id] = NULL;
1512                 }
1513         }
1514         xe_vm_unlock(vm, &ww);
1515
1516         trace_xe_vm_free(vm);
1517         dma_fence_put(vm->rebind_fence);
1518         dma_resv_fini(&vm->resv);
1519         kfree(vm);
1520 }
1521
1522 void xe_vm_free(struct kref *ref)
1523 {
1524         struct xe_vm *vm = container_of(ref, struct xe_vm, refcount);
1525
1526         /* To destroy the VM we need to be able to sleep */
1527         queue_work(system_unbound_wq, &vm->destroy_work);
1528 }
1529
1530 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1531 {
1532         struct xe_vm *vm;
1533
1534         mutex_lock(&xef->vm.lock);
1535         vm = xa_load(&xef->vm.xa, id);
1536         mutex_unlock(&xef->vm.lock);
1537
1538         if (vm)
1539                 xe_vm_get(vm);
1540
1541         return vm;
1542 }
1543
1544 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
1545 {
1546         return xe_pde_encode(vm->pt_root[tile->id]->bo, 0,
1547                              XE_CACHE_WB);
1548 }
1549
1550 static struct dma_fence *
1551 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_engine *e,
1552                  struct xe_sync_entry *syncs, u32 num_syncs)
1553 {
1554         struct xe_tile *tile;
1555         struct dma_fence *fence = NULL;
1556         struct dma_fence **fences = NULL;
1557         struct dma_fence_array *cf = NULL;
1558         struct xe_vm *vm = vma->vm;
1559         int cur_fence = 0, i;
1560         int number_tiles = hweight_long(vma->tile_present);
1561         int err;
1562         u8 id;
1563
1564         trace_xe_vma_unbind(vma);
1565
1566         if (number_tiles > 1) {
1567                 fences = kmalloc_array(number_tiles, sizeof(*fences),
1568                                        GFP_KERNEL);
1569                 if (!fences)
1570                         return ERR_PTR(-ENOMEM);
1571         }
1572
1573         for_each_tile(tile, vm->xe, id) {
1574                 if (!(vma->tile_present & BIT(id)))
1575                         goto next;
1576
1577                 fence = __xe_pt_unbind_vma(tile, vma, e, syncs, num_syncs);
1578                 if (IS_ERR(fence)) {
1579                         err = PTR_ERR(fence);
1580                         goto err_fences;
1581                 }
1582
1583                 if (fences)
1584                         fences[cur_fence++] = fence;
1585
1586 next:
1587                 if (e && vm->pt_root[id] && !list_empty(&e->multi_gt_list))
1588                         e = list_next_entry(e, multi_gt_list);
1589         }
1590
1591         if (fences) {
1592                 cf = dma_fence_array_create(number_tiles, fences,
1593                                             vm->composite_fence_ctx,
1594                                             vm->composite_fence_seqno++,
1595                                             false);
1596                 if (!cf) {
1597                         --vm->composite_fence_seqno;
1598                         err = -ENOMEM;
1599                         goto err_fences;
1600                 }
1601         }
1602
1603         for (i = 0; i < num_syncs; i++)
1604                 xe_sync_entry_signal(&syncs[i], NULL, cf ? &cf->base : fence);
1605
1606         return cf ? &cf->base : !fence ? dma_fence_get_stub() : fence;
1607
1608 err_fences:
1609         if (fences) {
1610                 while (cur_fence) {
1611                         /* FIXME: Rewind the previous binds? */
1612                         dma_fence_put(fences[--cur_fence]);
1613                 }
1614                 kfree(fences);
1615         }
1616
1617         return ERR_PTR(err);
1618 }
1619
1620 static struct dma_fence *
1621 xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
1622                struct xe_sync_entry *syncs, u32 num_syncs)
1623 {
1624         struct xe_tile *tile;
1625         struct dma_fence *fence;
1626         struct dma_fence **fences = NULL;
1627         struct dma_fence_array *cf = NULL;
1628         struct xe_vm *vm = vma->vm;
1629         int cur_fence = 0, i;
1630         int number_tiles = hweight_long(vma->tile_mask);
1631         int err;
1632         u8 id;
1633
1634         trace_xe_vma_bind(vma);
1635
1636         if (number_tiles > 1) {
1637                 fences = kmalloc_array(number_tiles, sizeof(*fences),
1638                                        GFP_KERNEL);
1639                 if (!fences)
1640                         return ERR_PTR(-ENOMEM);
1641         }
1642
1643         for_each_tile(tile, vm->xe, id) {
1644                 if (!(vma->tile_mask & BIT(id)))
1645                         goto next;
1646
1647                 fence = __xe_pt_bind_vma(tile, vma, e, syncs, num_syncs,
1648                                          vma->tile_present & BIT(id));
1649                 if (IS_ERR(fence)) {
1650                         err = PTR_ERR(fence);
1651                         goto err_fences;
1652                 }
1653
1654                 if (fences)
1655                         fences[cur_fence++] = fence;
1656
1657 next:
1658                 if (e && vm->pt_root[id] && !list_empty(&e->multi_gt_list))
1659                         e = list_next_entry(e, multi_gt_list);
1660         }
1661
1662         if (fences) {
1663                 cf = dma_fence_array_create(number_tiles, fences,
1664                                             vm->composite_fence_ctx,
1665                                             vm->composite_fence_seqno++,
1666                                             false);
1667                 if (!cf) {
1668                         --vm->composite_fence_seqno;
1669                         err = -ENOMEM;
1670                         goto err_fences;
1671                 }
1672         }
1673
1674         for (i = 0; i < num_syncs; i++)
1675                 xe_sync_entry_signal(&syncs[i], NULL, cf ? &cf->base : fence);
1676
1677         return cf ? &cf->base : fence;
1678
1679 err_fences:
1680         if (fences) {
1681                 while (cur_fence) {
1682                         /* FIXME: Rewind the previous binds? */
1683                         dma_fence_put(fences[--cur_fence]);
1684                 }
1685                 kfree(fences);
1686         }
1687
1688         return ERR_PTR(err);
1689 }
1690
1691 struct async_op_fence {
1692         struct dma_fence fence;
1693         struct dma_fence *wait_fence;
1694         struct dma_fence_cb cb;
1695         struct xe_vm *vm;
1696         wait_queue_head_t wq;
1697         bool started;
1698 };
1699
1700 static const char *async_op_fence_get_driver_name(struct dma_fence *dma_fence)
1701 {
1702         return "xe";
1703 }
1704
1705 static const char *
1706 async_op_fence_get_timeline_name(struct dma_fence *dma_fence)
1707 {
1708         return "async_op_fence";
1709 }
1710
1711 static const struct dma_fence_ops async_op_fence_ops = {
1712         .get_driver_name = async_op_fence_get_driver_name,
1713         .get_timeline_name = async_op_fence_get_timeline_name,
1714 };
1715
1716 static void async_op_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1717 {
1718         struct async_op_fence *afence =
1719                 container_of(cb, struct async_op_fence, cb);
1720
1721         afence->fence.error = afence->wait_fence->error;
1722         dma_fence_signal(&afence->fence);
1723         xe_vm_put(afence->vm);
1724         dma_fence_put(afence->wait_fence);
1725         dma_fence_put(&afence->fence);
1726 }
1727
1728 static void add_async_op_fence_cb(struct xe_vm *vm,
1729                                   struct dma_fence *fence,
1730                                   struct async_op_fence *afence)
1731 {
1732         int ret;
1733
1734         if (!xe_vm_no_dma_fences(vm)) {
1735                 afence->started = true;
1736                 smp_wmb();
1737                 wake_up_all(&afence->wq);
1738         }
1739
1740         afence->wait_fence = dma_fence_get(fence);
1741         afence->vm = xe_vm_get(vm);
1742         dma_fence_get(&afence->fence);
1743         ret = dma_fence_add_callback(fence, &afence->cb, async_op_fence_cb);
1744         if (ret == -ENOENT) {
1745                 afence->fence.error = afence->wait_fence->error;
1746                 dma_fence_signal(&afence->fence);
1747         }
1748         if (ret) {
1749                 xe_vm_put(vm);
1750                 dma_fence_put(afence->wait_fence);
1751                 dma_fence_put(&afence->fence);
1752         }
1753         XE_WARN_ON(ret && ret != -ENOENT);
1754 }
1755
1756 int xe_vm_async_fence_wait_start(struct dma_fence *fence)
1757 {
1758         if (fence->ops == &async_op_fence_ops) {
1759                 struct async_op_fence *afence =
1760                         container_of(fence, struct async_op_fence, fence);
1761
1762                 XE_BUG_ON(xe_vm_no_dma_fences(afence->vm));
1763
1764                 smp_rmb();
1765                 return wait_event_interruptible(afence->wq, afence->started);
1766         }
1767
1768         return 0;
1769 }
1770
1771 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
1772                         struct xe_engine *e, struct xe_sync_entry *syncs,
1773                         u32 num_syncs, struct async_op_fence *afence)
1774 {
1775         struct dma_fence *fence;
1776
1777         xe_vm_assert_held(vm);
1778
1779         fence = xe_vm_bind_vma(vma, e, syncs, num_syncs);
1780         if (IS_ERR(fence))
1781                 return PTR_ERR(fence);
1782         if (afence)
1783                 add_async_op_fence_cb(vm, fence, afence);
1784
1785         dma_fence_put(fence);
1786         return 0;
1787 }
1788
1789 static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_engine *e,
1790                       struct xe_bo *bo, struct xe_sync_entry *syncs,
1791                       u32 num_syncs, struct async_op_fence *afence)
1792 {
1793         int err;
1794
1795         xe_vm_assert_held(vm);
1796         xe_bo_assert_held(bo);
1797
1798         if (bo) {
1799                 err = xe_bo_validate(bo, vm, true);
1800                 if (err)
1801                         return err;
1802         }
1803
1804         return __xe_vm_bind(vm, vma, e, syncs, num_syncs, afence);
1805 }
1806
1807 static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
1808                         struct xe_engine *e, struct xe_sync_entry *syncs,
1809                         u32 num_syncs, struct async_op_fence *afence)
1810 {
1811         struct dma_fence *fence;
1812
1813         xe_vm_assert_held(vm);
1814         xe_bo_assert_held(vma->bo);
1815
1816         fence = xe_vm_unbind_vma(vma, e, syncs, num_syncs);
1817         if (IS_ERR(fence))
1818                 return PTR_ERR(fence);
1819         if (afence)
1820                 add_async_op_fence_cb(vm, fence, afence);
1821
1822         xe_vma_destroy(vma, fence);
1823         dma_fence_put(fence);
1824
1825         return 0;
1826 }
1827
1828 static int vm_set_error_capture_address(struct xe_device *xe, struct xe_vm *vm,
1829                                         u64 value)
1830 {
1831         if (XE_IOCTL_ERR(xe, !value))
1832                 return -EINVAL;
1833
1834         if (XE_IOCTL_ERR(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
1835                 return -EOPNOTSUPP;
1836
1837         if (XE_IOCTL_ERR(xe, vm->async_ops.error_capture.addr))
1838                 return -EOPNOTSUPP;
1839
1840         vm->async_ops.error_capture.mm = current->mm;
1841         vm->async_ops.error_capture.addr = value;
1842         init_waitqueue_head(&vm->async_ops.error_capture.wq);
1843
1844         return 0;
1845 }
1846
1847 typedef int (*xe_vm_set_property_fn)(struct xe_device *xe, struct xe_vm *vm,
1848                                      u64 value);
1849
1850 static const xe_vm_set_property_fn vm_set_property_funcs[] = {
1851         [XE_VM_PROPERTY_BIND_OP_ERROR_CAPTURE_ADDRESS] =
1852                 vm_set_error_capture_address,
1853 };
1854
1855 static int vm_user_ext_set_property(struct xe_device *xe, struct xe_vm *vm,
1856                                     u64 extension)
1857 {
1858         u64 __user *address = u64_to_user_ptr(extension);
1859         struct drm_xe_ext_vm_set_property ext;
1860         int err;
1861
1862         err = __copy_from_user(&ext, address, sizeof(ext));
1863         if (XE_IOCTL_ERR(xe, err))
1864                 return -EFAULT;
1865
1866         if (XE_IOCTL_ERR(xe, ext.property >=
1867                          ARRAY_SIZE(vm_set_property_funcs)) ||
1868             XE_IOCTL_ERR(xe, ext.pad) ||
1869             XE_IOCTL_ERR(xe, ext.reserved[0] || ext.reserved[1]))
1870                 return -EINVAL;
1871
1872         return vm_set_property_funcs[ext.property](xe, vm, ext.value);
1873 }
1874
1875 typedef int (*xe_vm_user_extension_fn)(struct xe_device *xe, struct xe_vm *vm,
1876                                        u64 extension);
1877
1878 static const xe_vm_set_property_fn vm_user_extension_funcs[] = {
1879         [XE_VM_EXTENSION_SET_PROPERTY] = vm_user_ext_set_property,
1880 };
1881
1882 #define MAX_USER_EXTENSIONS     16
1883 static int vm_user_extensions(struct xe_device *xe, struct xe_vm *vm,
1884                               u64 extensions, int ext_number)
1885 {
1886         u64 __user *address = u64_to_user_ptr(extensions);
1887         struct xe_user_extension ext;
1888         int err;
1889
1890         if (XE_IOCTL_ERR(xe, ext_number >= MAX_USER_EXTENSIONS))
1891                 return -E2BIG;
1892
1893         err = __copy_from_user(&ext, address, sizeof(ext));
1894         if (XE_IOCTL_ERR(xe, err))
1895                 return -EFAULT;
1896
1897         if (XE_IOCTL_ERR(xe, ext.pad) ||
1898             XE_IOCTL_ERR(xe, ext.name >=
1899                          ARRAY_SIZE(vm_user_extension_funcs)))
1900                 return -EINVAL;
1901
1902         err = vm_user_extension_funcs[ext.name](xe, vm, extensions);
1903         if (XE_IOCTL_ERR(xe, err))
1904                 return err;
1905
1906         if (ext.next_extension)
1907                 return vm_user_extensions(xe, vm, ext.next_extension,
1908                                           ++ext_number);
1909
1910         return 0;
1911 }
1912
1913 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_SCRATCH_PAGE | \
1914                                     DRM_XE_VM_CREATE_COMPUTE_MODE | \
1915                                     DRM_XE_VM_CREATE_ASYNC_BIND_OPS | \
1916                                     DRM_XE_VM_CREATE_FAULT_MODE)
1917
1918 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1919                        struct drm_file *file)
1920 {
1921         struct xe_device *xe = to_xe_device(dev);
1922         struct xe_file *xef = to_xe_file(file);
1923         struct drm_xe_vm_create *args = data;
1924         struct xe_vm *vm;
1925         u32 id, asid;
1926         int err;
1927         u32 flags = 0;
1928
1929         if (XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1]))
1930                 return -EINVAL;
1931
1932         if (XE_IOCTL_ERR(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1933                 return -EINVAL;
1934
1935         if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE &&
1936                          args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
1937                 return -EINVAL;
1938
1939         if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE &&
1940                          args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
1941                 return -EINVAL;
1942
1943         if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
1944                          xe_device_in_non_fault_mode(xe)))
1945                 return -EINVAL;
1946
1947         if (XE_IOCTL_ERR(xe, !(args->flags & DRM_XE_VM_CREATE_FAULT_MODE) &&
1948                          xe_device_in_fault_mode(xe)))
1949                 return -EINVAL;
1950
1951         if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
1952                          !xe->info.supports_usm))
1953                 return -EINVAL;
1954
1955         if (args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE)
1956                 flags |= XE_VM_FLAG_SCRATCH_PAGE;
1957         if (args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE)
1958                 flags |= XE_VM_FLAG_COMPUTE_MODE;
1959         if (args->flags & DRM_XE_VM_CREATE_ASYNC_BIND_OPS)
1960                 flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
1961         if (args->flags & DRM_XE_VM_CREATE_FAULT_MODE)
1962                 flags |= XE_VM_FLAG_FAULT_MODE;
1963
1964         vm = xe_vm_create(xe, flags);
1965         if (IS_ERR(vm))
1966                 return PTR_ERR(vm);
1967
1968         if (args->extensions) {
1969                 err = vm_user_extensions(xe, vm, args->extensions, 0);
1970                 if (XE_IOCTL_ERR(xe, err)) {
1971                         xe_vm_close_and_put(vm);
1972                         return err;
1973                 }
1974         }
1975
1976         mutex_lock(&xef->vm.lock);
1977         err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1978         mutex_unlock(&xef->vm.lock);
1979         if (err) {
1980                 xe_vm_close_and_put(vm);
1981                 return err;
1982         }
1983
1984         if (xe->info.has_asid) {
1985                 mutex_lock(&xe->usm.lock);
1986                 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
1987                                       XA_LIMIT(0, XE_MAX_ASID - 1),
1988                                       &xe->usm.next_asid, GFP_KERNEL);
1989                 mutex_unlock(&xe->usm.lock);
1990                 if (err) {
1991                         xe_vm_close_and_put(vm);
1992                         return err;
1993                 }
1994                 vm->usm.asid = asid;
1995         }
1996
1997         args->vm_id = id;
1998
1999 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
2000         /* Warning: Security issue - never enable by default */
2001         args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
2002 #endif
2003
2004         return 0;
2005 }
2006
2007 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
2008                         struct drm_file *file)
2009 {
2010         struct xe_device *xe = to_xe_device(dev);
2011         struct xe_file *xef = to_xe_file(file);
2012         struct drm_xe_vm_destroy *args = data;
2013         struct xe_vm *vm;
2014
2015         if (XE_IOCTL_ERR(xe, args->pad) ||
2016             XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1]))
2017                 return -EINVAL;
2018
2019         vm = xe_vm_lookup(xef, args->vm_id);
2020         if (XE_IOCTL_ERR(xe, !vm))
2021                 return -ENOENT;
2022         xe_vm_put(vm);
2023
2024         /* FIXME: Extend this check to non-compute mode VMs */
2025         if (XE_IOCTL_ERR(xe, vm->preempt.num_engines))
2026                 return -EBUSY;
2027
2028         mutex_lock(&xef->vm.lock);
2029         xa_erase(&xef->vm.xa, args->vm_id);
2030         mutex_unlock(&xef->vm.lock);
2031
2032         xe_vm_close_and_put(vm);
2033
2034         return 0;
2035 }
2036
2037 static const u32 region_to_mem_type[] = {
2038         XE_PL_TT,
2039         XE_PL_VRAM0,
2040         XE_PL_VRAM1,
2041 };
2042
2043 static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
2044                           struct xe_engine *e, u32 region,
2045                           struct xe_sync_entry *syncs, u32 num_syncs,
2046                           struct async_op_fence *afence)
2047 {
2048         int err;
2049
2050         XE_BUG_ON(region > ARRAY_SIZE(region_to_mem_type));
2051
2052         if (!xe_vma_has_no_bo(vma)) {
2053                 err = xe_bo_migrate(vma->bo, region_to_mem_type[region]);
2054                 if (err)
2055                         return err;
2056         }
2057
2058         if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
2059                 return xe_vm_bind(vm, vma, e, vma->bo, syncs, num_syncs,
2060                                   afence);
2061         } else {
2062                 int i;
2063
2064                 /* Nothing to do, signal fences now */
2065                 for (i = 0; i < num_syncs; i++)
2066                         xe_sync_entry_signal(&syncs[i], NULL,
2067                                              dma_fence_get_stub());
2068                 if (afence)
2069                         dma_fence_signal(&afence->fence);
2070                 return 0;
2071         }
2072 }
2073
2074 #define VM_BIND_OP(op)  (op & 0xffff)
2075
2076 static int __vm_bind_ioctl(struct xe_vm *vm, struct xe_vma *vma,
2077                            struct xe_engine *e, struct xe_bo *bo, u32 op,
2078                            u32 region, struct xe_sync_entry *syncs,
2079                            u32 num_syncs, struct async_op_fence *afence)
2080 {
2081         switch (VM_BIND_OP(op)) {
2082         case XE_VM_BIND_OP_MAP:
2083                 return xe_vm_bind(vm, vma, e, bo, syncs, num_syncs, afence);
2084         case XE_VM_BIND_OP_UNMAP:
2085         case XE_VM_BIND_OP_UNMAP_ALL:
2086                 return xe_vm_unbind(vm, vma, e, syncs, num_syncs, afence);
2087         case XE_VM_BIND_OP_MAP_USERPTR:
2088                 return xe_vm_bind(vm, vma, e, NULL, syncs, num_syncs, afence);
2089         case XE_VM_BIND_OP_PREFETCH:
2090                 return xe_vm_prefetch(vm, vma, e, region, syncs, num_syncs,
2091                                       afence);
2092                 break;
2093         default:
2094                 XE_BUG_ON("NOT POSSIBLE");
2095                 return -EINVAL;
2096         }
2097 }
2098
2099 struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm)
2100 {
2101         int idx = vm->flags & XE_VM_FLAG_MIGRATION ?
2102                 XE_VM_FLAG_GT_ID(vm->flags) : 0;
2103
2104         /* Safe to use index 0 as all BO in the VM share a single dma-resv lock */
2105         return &vm->pt_root[idx]->bo->ttm;
2106 }
2107
2108 static void xe_vm_tv_populate(struct xe_vm *vm, struct ttm_validate_buffer *tv)
2109 {
2110         tv->num_shared = 1;
2111         tv->bo = xe_vm_ttm_bo(vm);
2112 }
2113
2114 static bool is_map_op(u32 op)
2115 {
2116         return VM_BIND_OP(op) == XE_VM_BIND_OP_MAP ||
2117                 VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR;
2118 }
2119
2120 static bool is_unmap_op(u32 op)
2121 {
2122         return VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP ||
2123                 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL;
2124 }
2125
2126 static int vm_bind_ioctl(struct xe_vm *vm, struct xe_vma *vma,
2127                          struct xe_engine *e, struct xe_bo *bo,
2128                          struct drm_xe_vm_bind_op *bind_op,
2129                          struct xe_sync_entry *syncs, u32 num_syncs,
2130                          struct async_op_fence *afence)
2131 {
2132         LIST_HEAD(objs);
2133         LIST_HEAD(dups);
2134         struct ttm_validate_buffer tv_bo, tv_vm;
2135         struct ww_acquire_ctx ww;
2136         struct xe_bo *vbo;
2137         int err, i;
2138
2139         lockdep_assert_held(&vm->lock);
2140         XE_BUG_ON(!list_empty(&vma->unbind_link));
2141
2142         /* Binds deferred to faults, signal fences now */
2143         if (xe_vm_in_fault_mode(vm) && is_map_op(bind_op->op) &&
2144             !(bind_op->op & XE_VM_BIND_FLAG_IMMEDIATE)) {
2145                 for (i = 0; i < num_syncs; i++)
2146                         xe_sync_entry_signal(&syncs[i], NULL,
2147                                              dma_fence_get_stub());
2148                 if (afence)
2149                         dma_fence_signal(&afence->fence);
2150                 return 0;
2151         }
2152
2153         xe_vm_tv_populate(vm, &tv_vm);
2154         list_add_tail(&tv_vm.head, &objs);
2155         vbo = vma->bo;
2156         if (vbo) {
2157                 /*
2158                  * An unbind can drop the last reference to the BO and
2159                  * the BO is needed for ttm_eu_backoff_reservation so
2160                  * take a reference here.
2161                  */
2162                 xe_bo_get(vbo);
2163
2164                 if (!vbo->vm) {
2165                         tv_bo.bo = &vbo->ttm;
2166                         tv_bo.num_shared = 1;
2167                         list_add(&tv_bo.head, &objs);
2168                 }
2169         }
2170
2171 again:
2172         err = ttm_eu_reserve_buffers(&ww, &objs, true, &dups);
2173         if (!err) {
2174                 err = __vm_bind_ioctl(vm, vma, e, bo,
2175                                       bind_op->op, bind_op->region, syncs,
2176                                       num_syncs, afence);
2177                 ttm_eu_backoff_reservation(&ww, &objs);
2178                 if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
2179                         lockdep_assert_held_write(&vm->lock);
2180                         err = xe_vma_userptr_pin_pages(vma);
2181                         if (!err)
2182                                 goto again;
2183                 }
2184         }
2185         xe_bo_put(vbo);
2186
2187         return err;
2188 }
2189
2190 struct async_op {
2191         struct xe_vma *vma;
2192         struct xe_engine *engine;
2193         struct xe_bo *bo;
2194         struct drm_xe_vm_bind_op bind_op;
2195         struct xe_sync_entry *syncs;
2196         u32 num_syncs;
2197         struct list_head link;
2198         struct async_op_fence *fence;
2199 };
2200
2201 static void async_op_cleanup(struct xe_vm *vm, struct async_op *op)
2202 {
2203         while (op->num_syncs--)
2204                 xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
2205         kfree(op->syncs);
2206         xe_bo_put(op->bo);
2207         if (op->engine)
2208                 xe_engine_put(op->engine);
2209         xe_vm_put(vm);
2210         if (op->fence)
2211                 dma_fence_put(&op->fence->fence);
2212         kfree(op);
2213 }
2214
2215 static struct async_op *next_async_op(struct xe_vm *vm)
2216 {
2217         return list_first_entry_or_null(&vm->async_ops.pending,
2218                                         struct async_op, link);
2219 }
2220
2221 static void vm_set_async_error(struct xe_vm *vm, int err)
2222 {
2223         lockdep_assert_held(&vm->lock);
2224         vm->async_ops.error = err;
2225 }
2226
2227 static void async_op_work_func(struct work_struct *w)
2228 {
2229         struct xe_vm *vm = container_of(w, struct xe_vm, async_ops.work);
2230
2231         for (;;) {
2232                 struct async_op *op;
2233                 int err;
2234
2235                 if (vm->async_ops.error && !xe_vm_is_closed(vm))
2236                         break;
2237
2238                 spin_lock_irq(&vm->async_ops.lock);
2239                 op = next_async_op(vm);
2240                 if (op)
2241                         list_del_init(&op->link);
2242                 spin_unlock_irq(&vm->async_ops.lock);
2243
2244                 if (!op)
2245                         break;
2246
2247                 if (!xe_vm_is_closed(vm)) {
2248                         bool first, last;
2249
2250                         down_write(&vm->lock);
2251 again:
2252                         first = op->vma->first_munmap_rebind;
2253                         last = op->vma->last_munmap_rebind;
2254 #ifdef TEST_VM_ASYNC_OPS_ERROR
2255 #define FORCE_ASYNC_OP_ERROR    BIT(31)
2256                         if (!(op->bind_op.op & FORCE_ASYNC_OP_ERROR)) {
2257                                 err = vm_bind_ioctl(vm, op->vma, op->engine,
2258                                                     op->bo, &op->bind_op,
2259                                                     op->syncs, op->num_syncs,
2260                                                     op->fence);
2261                         } else {
2262                                 err = -ENOMEM;
2263                                 op->bind_op.op &= ~FORCE_ASYNC_OP_ERROR;
2264                         }
2265 #else
2266                         err = vm_bind_ioctl(vm, op->vma, op->engine, op->bo,
2267                                             &op->bind_op, op->syncs,
2268                                             op->num_syncs, op->fence);
2269 #endif
2270                         /*
2271                          * In order for the fencing to work (stall behind
2272                          * existing jobs / prevent new jobs from running) all
2273                          * the dma-resv slots need to be programmed in a batch
2274                          * relative to execs / the rebind worker. The vm->lock
2275                          * ensure this.
2276                          */
2277                         if (!err && ((first && VM_BIND_OP(op->bind_op.op) ==
2278                                       XE_VM_BIND_OP_UNMAP) ||
2279                                      vm->async_ops.munmap_rebind_inflight)) {
2280                                 if (last) {
2281                                         op->vma->last_munmap_rebind = false;
2282                                         vm->async_ops.munmap_rebind_inflight =
2283                                                 false;
2284                                 } else {
2285                                         vm->async_ops.munmap_rebind_inflight =
2286                                                 true;
2287
2288                                         async_op_cleanup(vm, op);
2289
2290                                         spin_lock_irq(&vm->async_ops.lock);
2291                                         op = next_async_op(vm);
2292                                         XE_BUG_ON(!op);
2293                                         list_del_init(&op->link);
2294                                         spin_unlock_irq(&vm->async_ops.lock);
2295
2296                                         goto again;
2297                                 }
2298                         }
2299                         if (err) {
2300                                 trace_xe_vma_fail(op->vma);
2301                                 drm_warn(&vm->xe->drm, "Async VM op(%d) failed with %d",
2302                                          VM_BIND_OP(op->bind_op.op),
2303                                          err);
2304
2305                                 spin_lock_irq(&vm->async_ops.lock);
2306                                 list_add(&op->link, &vm->async_ops.pending);
2307                                 spin_unlock_irq(&vm->async_ops.lock);
2308
2309                                 vm_set_async_error(vm, err);
2310                                 up_write(&vm->lock);
2311
2312                                 if (vm->async_ops.error_capture.addr)
2313                                         vm_error_capture(vm, err,
2314                                                          op->bind_op.op,
2315                                                          op->bind_op.addr,
2316                                                          op->bind_op.range);
2317                                 break;
2318                         }
2319                         up_write(&vm->lock);
2320                 } else {
2321                         trace_xe_vma_flush(op->vma);
2322
2323                         if (is_unmap_op(op->bind_op.op)) {
2324                                 down_write(&vm->lock);
2325                                 xe_vma_destroy_unlocked(op->vma);
2326                                 up_write(&vm->lock);
2327                         }
2328
2329                         if (op->fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
2330                                                    &op->fence->fence.flags)) {
2331                                 if (!xe_vm_no_dma_fences(vm)) {
2332                                         op->fence->started = true;
2333                                         smp_wmb();
2334                                         wake_up_all(&op->fence->wq);
2335                                 }
2336                                 dma_fence_signal(&op->fence->fence);
2337                         }
2338                 }
2339
2340                 async_op_cleanup(vm, op);
2341         }
2342 }
2343
2344 static int __vm_bind_ioctl_async(struct xe_vm *vm, struct xe_vma *vma,
2345                                  struct xe_engine *e, struct xe_bo *bo,
2346                                  struct drm_xe_vm_bind_op *bind_op,
2347                                  struct xe_sync_entry *syncs, u32 num_syncs)
2348 {
2349         struct async_op *op;
2350         bool installed = false;
2351         u64 seqno;
2352         int i;
2353
2354         lockdep_assert_held(&vm->lock);
2355
2356         op = kmalloc(sizeof(*op), GFP_KERNEL);
2357         if (!op) {
2358                 return -ENOMEM;
2359         }
2360
2361         if (num_syncs) {
2362                 op->fence = kmalloc(sizeof(*op->fence), GFP_KERNEL);
2363                 if (!op->fence) {
2364                         kfree(op);
2365                         return -ENOMEM;
2366                 }
2367
2368                 seqno = e ? ++e->bind.fence_seqno : ++vm->async_ops.fence.seqno;
2369                 dma_fence_init(&op->fence->fence, &async_op_fence_ops,
2370                                &vm->async_ops.lock, e ? e->bind.fence_ctx :
2371                                vm->async_ops.fence.context, seqno);
2372
2373                 if (!xe_vm_no_dma_fences(vm)) {
2374                         op->fence->vm = vm;
2375                         op->fence->started = false;
2376                         init_waitqueue_head(&op->fence->wq);
2377                 }
2378         } else {
2379                 op->fence = NULL;
2380         }
2381         op->vma = vma;
2382         op->engine = e;
2383         op->bo = bo;
2384         op->bind_op = *bind_op;
2385         op->syncs = syncs;
2386         op->num_syncs = num_syncs;
2387         INIT_LIST_HEAD(&op->link);
2388
2389         for (i = 0; i < num_syncs; i++)
2390                 installed |= xe_sync_entry_signal(&syncs[i], NULL,
2391                                                   &op->fence->fence);
2392
2393         if (!installed && op->fence)
2394                 dma_fence_signal(&op->fence->fence);
2395
2396         spin_lock_irq(&vm->async_ops.lock);
2397         list_add_tail(&op->link, &vm->async_ops.pending);
2398         spin_unlock_irq(&vm->async_ops.lock);
2399
2400         if (!vm->async_ops.error)
2401                 queue_work(system_unbound_wq, &vm->async_ops.work);
2402
2403         return 0;
2404 }
2405
2406 static int vm_bind_ioctl_async(struct xe_vm *vm, struct xe_vma *vma,
2407                                struct xe_engine *e, struct xe_bo *bo,
2408                                struct drm_xe_vm_bind_op *bind_op,
2409                                struct xe_sync_entry *syncs, u32 num_syncs)
2410 {
2411         struct xe_vma *__vma, *next;
2412         struct list_head rebind_list;
2413         struct xe_sync_entry *in_syncs = NULL, *out_syncs = NULL;
2414         u32 num_in_syncs = 0, num_out_syncs = 0;
2415         bool first = true, last;
2416         int err;
2417         int i;
2418
2419         lockdep_assert_held(&vm->lock);
2420
2421         /* Not a linked list of unbinds + rebinds, easy */
2422         if (list_empty(&vma->unbind_link))
2423                 return __vm_bind_ioctl_async(vm, vma, e, bo, bind_op,
2424                                              syncs, num_syncs);
2425
2426         /*
2427          * Linked list of unbinds + rebinds, decompose syncs into 'in / out'
2428          * passing the 'in' to the first operation and 'out' to the last. Also
2429          * the reference counting is a little tricky, increment the VM / bind
2430          * engine ref count on all but the last operation and increment the BOs
2431          * ref count on each rebind.
2432          */
2433
2434         XE_BUG_ON(VM_BIND_OP(bind_op->op) != XE_VM_BIND_OP_UNMAP &&
2435                   VM_BIND_OP(bind_op->op) != XE_VM_BIND_OP_UNMAP_ALL &&
2436                   VM_BIND_OP(bind_op->op) != XE_VM_BIND_OP_PREFETCH);
2437
2438         /* Decompose syncs */
2439         if (num_syncs) {
2440                 in_syncs = kmalloc(sizeof(*in_syncs) * num_syncs, GFP_KERNEL);
2441                 out_syncs = kmalloc(sizeof(*out_syncs) * num_syncs, GFP_KERNEL);
2442                 if (!in_syncs || !out_syncs) {
2443                         err = -ENOMEM;
2444                         goto out_error;
2445                 }
2446
2447                 for (i = 0; i < num_syncs; ++i) {
2448                         bool signal = syncs[i].flags & DRM_XE_SYNC_SIGNAL;
2449
2450                         if (signal)
2451                                 out_syncs[num_out_syncs++] = syncs[i];
2452                         else
2453                                 in_syncs[num_in_syncs++] = syncs[i];
2454                 }
2455         }
2456
2457         /* Do unbinds + move rebinds to new list */
2458         INIT_LIST_HEAD(&rebind_list);
2459         list_for_each_entry_safe(__vma, next, &vma->unbind_link, unbind_link) {
2460                 if (__vma->destroyed ||
2461                     VM_BIND_OP(bind_op->op) == XE_VM_BIND_OP_PREFETCH) {
2462                         list_del_init(&__vma->unbind_link);
2463                         xe_bo_get(bo);
2464                         err = __vm_bind_ioctl_async(xe_vm_get(vm), __vma,
2465                                                     e ? xe_engine_get(e) : NULL,
2466                                                     bo, bind_op, first ?
2467                                                     in_syncs : NULL,
2468                                                     first ? num_in_syncs : 0);
2469                         if (err) {
2470                                 xe_bo_put(bo);
2471                                 xe_vm_put(vm);
2472                                 if (e)
2473                                         xe_engine_put(e);
2474                                 goto out_error;
2475                         }
2476                         in_syncs = NULL;
2477                         first = false;
2478                 } else {
2479                         list_move_tail(&__vma->unbind_link, &rebind_list);
2480                 }
2481         }
2482         last = list_empty(&rebind_list);
2483         if (!last) {
2484                 xe_vm_get(vm);
2485                 if (e)
2486                         xe_engine_get(e);
2487         }
2488         err = __vm_bind_ioctl_async(vm, vma, e,
2489                                     bo, bind_op,
2490                                     first ? in_syncs :
2491                                     last ? out_syncs : NULL,
2492                                     first ? num_in_syncs :
2493                                     last ? num_out_syncs : 0);
2494         if (err) {
2495                 if (!last) {
2496                         xe_vm_put(vm);
2497                         if (e)
2498                                 xe_engine_put(e);
2499                 }
2500                 goto out_error;
2501         }
2502         in_syncs = NULL;
2503
2504         /* Do rebinds */
2505         list_for_each_entry_safe(__vma, next, &rebind_list, unbind_link) {
2506                 list_del_init(&__vma->unbind_link);
2507                 last = list_empty(&rebind_list);
2508
2509                 if (xe_vma_is_userptr(__vma)) {
2510                         bind_op->op = XE_VM_BIND_FLAG_ASYNC |
2511                                 XE_VM_BIND_OP_MAP_USERPTR;
2512                 } else {
2513                         bind_op->op = XE_VM_BIND_FLAG_ASYNC |
2514                                 XE_VM_BIND_OP_MAP;
2515                         xe_bo_get(__vma->bo);
2516                 }
2517
2518                 if (!last) {
2519                         xe_vm_get(vm);
2520                         if (e)
2521                                 xe_engine_get(e);
2522                 }
2523
2524                 err = __vm_bind_ioctl_async(vm, __vma, e,
2525                                             __vma->bo, bind_op, last ?
2526                                             out_syncs : NULL,
2527                                             last ? num_out_syncs : 0);
2528                 if (err) {
2529                         if (!last) {
2530                                 xe_vm_put(vm);
2531                                 if (e)
2532                                         xe_engine_put(e);
2533                         }
2534                         goto out_error;
2535                 }
2536         }
2537
2538         kfree(syncs);
2539         return 0;
2540
2541 out_error:
2542         kfree(in_syncs);
2543         kfree(out_syncs);
2544         kfree(syncs);
2545
2546         return err;
2547 }
2548
2549 static int __vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo,
2550                                       u64 addr, u64 range, u32 op)
2551 {
2552         struct xe_device *xe = vm->xe;
2553         struct xe_vma *vma, lookup;
2554         bool async = !!(op & XE_VM_BIND_FLAG_ASYNC);
2555
2556         lockdep_assert_held(&vm->lock);
2557
2558         lookup.start = addr;
2559         lookup.end = addr + range - 1;
2560
2561         switch (VM_BIND_OP(op)) {
2562         case XE_VM_BIND_OP_MAP:
2563         case XE_VM_BIND_OP_MAP_USERPTR:
2564                 vma = xe_vm_find_overlapping_vma(vm, &lookup);
2565                 if (XE_IOCTL_ERR(xe, vma))
2566                         return -EBUSY;
2567                 break;
2568         case XE_VM_BIND_OP_UNMAP:
2569         case XE_VM_BIND_OP_PREFETCH:
2570                 vma = xe_vm_find_overlapping_vma(vm, &lookup);
2571                 if (XE_IOCTL_ERR(xe, !vma) ||
2572                     XE_IOCTL_ERR(xe, (vma->start != addr ||
2573                                  vma->end != addr + range - 1) && !async))
2574                         return -EINVAL;
2575                 break;
2576         case XE_VM_BIND_OP_UNMAP_ALL:
2577                 break;
2578         default:
2579                 XE_BUG_ON("NOT POSSIBLE");
2580                 return -EINVAL;
2581         }
2582
2583         return 0;
2584 }
2585
2586 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma)
2587 {
2588         down_read(&vm->userptr.notifier_lock);
2589         vma->destroyed = true;
2590         up_read(&vm->userptr.notifier_lock);
2591         xe_vm_remove_vma(vm, vma);
2592 }
2593
2594 static int prep_replacement_vma(struct xe_vm *vm, struct xe_vma *vma)
2595 {
2596         int err;
2597
2598         if (vma->bo && !vma->bo->vm) {
2599                 vm_insert_extobj(vm, vma);
2600                 err = add_preempt_fences(vm, vma->bo);
2601                 if (err)
2602                         return err;
2603         }
2604
2605         return 0;
2606 }
2607
2608 /*
2609  * Find all overlapping VMAs in lookup range and add to a list in the returned
2610  * VMA, all of VMAs found will be unbound. Also possibly add 2 new VMAs that
2611  * need to be bound if first / last VMAs are not fully unbound. This is akin to
2612  * how munmap works.
2613  */
2614 static struct xe_vma *vm_unbind_lookup_vmas(struct xe_vm *vm,
2615                                             struct xe_vma *lookup)
2616 {
2617         struct xe_vma *vma = xe_vm_find_overlapping_vma(vm, lookup);
2618         struct rb_node *node;
2619         struct xe_vma *first = vma, *last = vma, *new_first = NULL,
2620                       *new_last = NULL, *__vma, *next;
2621         int err = 0;
2622         bool first_munmap_rebind = false;
2623
2624         lockdep_assert_held(&vm->lock);
2625         XE_BUG_ON(!vma);
2626
2627         node = &vma->vm_node;
2628         while ((node = rb_next(node))) {
2629                 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2630                         __vma = to_xe_vma(node);
2631                         list_add_tail(&__vma->unbind_link, &vma->unbind_link);
2632                         last = __vma;
2633                 } else {
2634                         break;
2635                 }
2636         }
2637
2638         node = &vma->vm_node;
2639         while ((node = rb_prev(node))) {
2640                 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2641                         __vma = to_xe_vma(node);
2642                         list_add(&__vma->unbind_link, &vma->unbind_link);
2643                         first = __vma;
2644                 } else {
2645                         break;
2646                 }
2647         }
2648
2649         if (first->start != lookup->start) {
2650                 struct ww_acquire_ctx ww;
2651
2652                 if (first->bo)
2653                         err = xe_bo_lock(first->bo, &ww, 0, true);
2654                 if (err)
2655                         goto unwind;
2656                 new_first = xe_vma_create(first->vm, first->bo,
2657                                           first->bo ? first->bo_offset :
2658                                           first->userptr.ptr,
2659                                           first->start,
2660                                           lookup->start - 1,
2661                                           (first->pte_flags &
2662                                            XE_PTE_FLAG_READ_ONLY),
2663                                           (first->pte_flags &
2664                                            XE_PTE_FLAG_NULL),
2665                                           first->tile_mask);
2666                 if (first->bo)
2667                         xe_bo_unlock(first->bo, &ww);
2668                 if (!new_first) {
2669                         err = -ENOMEM;
2670                         goto unwind;
2671                 }
2672                 if (xe_vma_is_userptr(first)) {
2673                         err = xe_vma_userptr_pin_pages(new_first);
2674                         if (err)
2675                                 goto unwind;
2676                 }
2677                 err = prep_replacement_vma(vm, new_first);
2678                 if (err)
2679                         goto unwind;
2680         }
2681
2682         if (last->end != lookup->end) {
2683                 struct ww_acquire_ctx ww;
2684                 u64 chunk = lookup->end + 1 - last->start;
2685
2686                 if (last->bo)
2687                         err = xe_bo_lock(last->bo, &ww, 0, true);
2688                 if (err)
2689                         goto unwind;
2690                 new_last = xe_vma_create(last->vm, last->bo,
2691                                          last->bo ? last->bo_offset + chunk :
2692                                          last->userptr.ptr + chunk,
2693                                          last->start + chunk,
2694                                          last->end,
2695                                          (last->pte_flags &
2696                                           XE_PTE_FLAG_READ_ONLY),
2697                                          (last->pte_flags & XE_PTE_FLAG_NULL),
2698                                          last->tile_mask);
2699                 if (last->bo)
2700                         xe_bo_unlock(last->bo, &ww);
2701                 if (!new_last) {
2702                         err = -ENOMEM;
2703                         goto unwind;
2704                 }
2705                 if (xe_vma_is_userptr(last)) {
2706                         err = xe_vma_userptr_pin_pages(new_last);
2707                         if (err)
2708                                 goto unwind;
2709                 }
2710                 err = prep_replacement_vma(vm, new_last);
2711                 if (err)
2712                         goto unwind;
2713         }
2714
2715         prep_vma_destroy(vm, vma);
2716         if (list_empty(&vma->unbind_link) && (new_first || new_last))
2717                 vma->first_munmap_rebind = true;
2718         list_for_each_entry(__vma, &vma->unbind_link, unbind_link) {
2719                 if ((new_first || new_last) && !first_munmap_rebind) {
2720                         __vma->first_munmap_rebind = true;
2721                         first_munmap_rebind = true;
2722                 }
2723                 prep_vma_destroy(vm, __vma);
2724         }
2725         if (new_first) {
2726                 xe_vm_insert_vma(vm, new_first);
2727                 list_add_tail(&new_first->unbind_link, &vma->unbind_link);
2728                 if (!new_last)
2729                         new_first->last_munmap_rebind = true;
2730         }
2731         if (new_last) {
2732                 xe_vm_insert_vma(vm, new_last);
2733                 list_add_tail(&new_last->unbind_link, &vma->unbind_link);
2734                 new_last->last_munmap_rebind = true;
2735         }
2736
2737         return vma;
2738
2739 unwind:
2740         list_for_each_entry_safe(__vma, next, &vma->unbind_link, unbind_link)
2741                 list_del_init(&__vma->unbind_link);
2742         if (new_last) {
2743                 prep_vma_destroy(vm, new_last);
2744                 xe_vma_destroy_unlocked(new_last);
2745         }
2746         if (new_first) {
2747                 prep_vma_destroy(vm, new_first);
2748                 xe_vma_destroy_unlocked(new_first);
2749         }
2750
2751         return ERR_PTR(err);
2752 }
2753
2754 /*
2755  * Similar to vm_unbind_lookup_vmas, find all VMAs in lookup range to prefetch
2756  */
2757 static struct xe_vma *vm_prefetch_lookup_vmas(struct xe_vm *vm,
2758                                               struct xe_vma *lookup,
2759                                               u32 region)
2760 {
2761         struct xe_vma *vma = xe_vm_find_overlapping_vma(vm, lookup), *__vma,
2762                       *next;
2763         struct rb_node *node;
2764
2765         if (!xe_vma_has_no_bo(vma)) {
2766                 if (!xe_bo_can_migrate(vma->bo, region_to_mem_type[region]))
2767                         return ERR_PTR(-EINVAL);
2768         }
2769
2770         node = &vma->vm_node;
2771         while ((node = rb_next(node))) {
2772                 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2773                         __vma = to_xe_vma(node);
2774                         if (!xe_vma_has_no_bo(__vma)) {
2775                                 if (!xe_bo_can_migrate(__vma->bo, region_to_mem_type[region]))
2776                                         goto flush_list;
2777                         }
2778                         list_add_tail(&__vma->unbind_link, &vma->unbind_link);
2779                 } else {
2780                         break;
2781                 }
2782         }
2783
2784         node = &vma->vm_node;
2785         while ((node = rb_prev(node))) {
2786                 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2787                         __vma = to_xe_vma(node);
2788                         if (!xe_vma_has_no_bo(__vma)) {
2789                                 if (!xe_bo_can_migrate(__vma->bo, region_to_mem_type[region]))
2790                                         goto flush_list;
2791                         }
2792                         list_add(&__vma->unbind_link, &vma->unbind_link);
2793                 } else {
2794                         break;
2795                 }
2796         }
2797
2798         return vma;
2799
2800 flush_list:
2801         list_for_each_entry_safe(__vma, next, &vma->unbind_link,
2802                                  unbind_link)
2803                 list_del_init(&__vma->unbind_link);
2804
2805         return ERR_PTR(-EINVAL);
2806 }
2807
2808 static struct xe_vma *vm_unbind_all_lookup_vmas(struct xe_vm *vm,
2809                                                 struct xe_bo *bo)
2810 {
2811         struct xe_vma *first = NULL, *vma;
2812
2813         lockdep_assert_held(&vm->lock);
2814         xe_bo_assert_held(bo);
2815
2816         list_for_each_entry(vma, &bo->vmas, bo_link) {
2817                 if (vma->vm != vm)
2818                         continue;
2819
2820                 prep_vma_destroy(vm, vma);
2821                 if (!first)
2822                         first = vma;
2823                 else
2824                         list_add_tail(&vma->unbind_link, &first->unbind_link);
2825         }
2826
2827         return first;
2828 }
2829
2830 static struct xe_vma *vm_bind_ioctl_lookup_vma(struct xe_vm *vm,
2831                                                struct xe_bo *bo,
2832                                                u64 bo_offset_or_userptr,
2833                                                u64 addr, u64 range, u32 op,
2834                                                u64 tile_mask, u32 region)
2835 {
2836         struct ww_acquire_ctx ww;
2837         struct xe_vma *vma, lookup;
2838         int err;
2839
2840         lockdep_assert_held(&vm->lock);
2841
2842         lookup.start = addr;
2843         lookup.end = addr + range - 1;
2844
2845         switch (VM_BIND_OP(op)) {
2846         case XE_VM_BIND_OP_MAP:
2847                 if (bo) {
2848                         err = xe_bo_lock(bo, &ww, 0, true);
2849                         if (err)
2850                                 return ERR_PTR(err);
2851                 }
2852                 vma = xe_vma_create(vm, bo, bo_offset_or_userptr, addr,
2853                                     addr + range - 1,
2854                                     op & XE_VM_BIND_FLAG_READONLY,
2855                                     op & XE_VM_BIND_FLAG_NULL,
2856                                     tile_mask);
2857                 if (bo)
2858                         xe_bo_unlock(bo, &ww);
2859                 if (!vma)
2860                         return ERR_PTR(-ENOMEM);
2861
2862                 xe_vm_insert_vma(vm, vma);
2863                 if (bo && !bo->vm) {
2864                         vm_insert_extobj(vm, vma);
2865                         err = add_preempt_fences(vm, bo);
2866                         if (err) {
2867                                 prep_vma_destroy(vm, vma);
2868                                 xe_vma_destroy_unlocked(vma);
2869
2870                                 return ERR_PTR(err);
2871                         }
2872                 }
2873                 break;
2874         case XE_VM_BIND_OP_UNMAP:
2875                 vma = vm_unbind_lookup_vmas(vm, &lookup);
2876                 break;
2877         case XE_VM_BIND_OP_PREFETCH:
2878                 vma = vm_prefetch_lookup_vmas(vm, &lookup, region);
2879                 break;
2880         case XE_VM_BIND_OP_UNMAP_ALL:
2881                 XE_BUG_ON(!bo);
2882
2883                 err = xe_bo_lock(bo, &ww, 0, true);
2884                 if (err)
2885                         return ERR_PTR(err);
2886                 vma = vm_unbind_all_lookup_vmas(vm, bo);
2887                 if (!vma)
2888                         vma = ERR_PTR(-EINVAL);
2889                 xe_bo_unlock(bo, &ww);
2890                 break;
2891         case XE_VM_BIND_OP_MAP_USERPTR:
2892                 XE_BUG_ON(bo);
2893
2894                 vma = xe_vma_create(vm, NULL, bo_offset_or_userptr, addr,
2895                                     addr + range - 1,
2896                                     op & XE_VM_BIND_FLAG_READONLY,
2897                                     op & XE_VM_BIND_FLAG_NULL,
2898                                     tile_mask);
2899                 if (!vma)
2900                         return ERR_PTR(-ENOMEM);
2901
2902                 err = xe_vma_userptr_pin_pages(vma);
2903                 if (err) {
2904                         prep_vma_destroy(vm, vma);
2905                         xe_vma_destroy_unlocked(vma);
2906
2907                         return ERR_PTR(err);
2908                 } else {
2909                         xe_vm_insert_vma(vm, vma);
2910                 }
2911                 break;
2912         default:
2913                 XE_BUG_ON("NOT POSSIBLE");
2914                 vma = ERR_PTR(-EINVAL);
2915         }
2916
2917         return vma;
2918 }
2919
2920 #ifdef TEST_VM_ASYNC_OPS_ERROR
2921 #define SUPPORTED_FLAGS \
2922         (FORCE_ASYNC_OP_ERROR | XE_VM_BIND_FLAG_ASYNC | \
2923          XE_VM_BIND_FLAG_READONLY | XE_VM_BIND_FLAG_IMMEDIATE | \
2924          XE_VM_BIND_FLAG_NULL | 0xffff)
2925 #else
2926 #define SUPPORTED_FLAGS \
2927         (XE_VM_BIND_FLAG_ASYNC | XE_VM_BIND_FLAG_READONLY | \
2928          XE_VM_BIND_FLAG_IMMEDIATE | XE_VM_BIND_FLAG_NULL | 0xffff)
2929 #endif
2930 #define XE_64K_PAGE_MASK 0xffffull
2931
2932 #define MAX_BINDS       512     /* FIXME: Picking random upper limit */
2933
2934 static int vm_bind_ioctl_check_args(struct xe_device *xe,
2935                                     struct drm_xe_vm_bind *args,
2936                                     struct drm_xe_vm_bind_op **bind_ops,
2937                                     bool *async)
2938 {
2939         int err;
2940         int i;
2941
2942         if (XE_IOCTL_ERR(xe, args->extensions) ||
2943             XE_IOCTL_ERR(xe, args->pad || args->pad2) ||
2944             XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1]) ||
2945             XE_IOCTL_ERR(xe, !args->num_binds) ||
2946             XE_IOCTL_ERR(xe, args->num_binds > MAX_BINDS))
2947                 return -EINVAL;
2948
2949         if (args->num_binds > 1) {
2950                 u64 __user *bind_user =
2951                         u64_to_user_ptr(args->vector_of_binds);
2952
2953                 *bind_ops = kmalloc(sizeof(struct drm_xe_vm_bind_op) *
2954                                     args->num_binds, GFP_KERNEL);
2955                 if (!*bind_ops)
2956                         return -ENOMEM;
2957
2958                 err = __copy_from_user(*bind_ops, bind_user,
2959                                        sizeof(struct drm_xe_vm_bind_op) *
2960                                        args->num_binds);
2961                 if (XE_IOCTL_ERR(xe, err)) {
2962                         err = -EFAULT;
2963                         goto free_bind_ops;
2964                 }
2965         } else {
2966                 *bind_ops = &args->bind;
2967         }
2968
2969         for (i = 0; i < args->num_binds; ++i) {
2970                 u64 range = (*bind_ops)[i].range;
2971                 u64 addr = (*bind_ops)[i].addr;
2972                 u32 op = (*bind_ops)[i].op;
2973                 u32 obj = (*bind_ops)[i].obj;
2974                 u64 obj_offset = (*bind_ops)[i].obj_offset;
2975                 u32 region = (*bind_ops)[i].region;
2976                 bool is_null = op &  XE_VM_BIND_FLAG_NULL;
2977
2978                 if (XE_IOCTL_ERR(xe, (*bind_ops)[i].pad) ||
2979                     XE_IOCTL_ERR(xe, (*bind_ops)[i].reserved[0] ||
2980                                      (*bind_ops)[i].reserved[1])) {
2981                         err = -EINVAL;
2982                         goto free_bind_ops;
2983                 }
2984
2985                 if (i == 0) {
2986                         *async = !!(op & XE_VM_BIND_FLAG_ASYNC);
2987                 } else if (XE_IOCTL_ERR(xe, !*async) ||
2988                            XE_IOCTL_ERR(xe, !(op & XE_VM_BIND_FLAG_ASYNC)) ||
2989                            XE_IOCTL_ERR(xe, VM_BIND_OP(op) ==
2990                                         XE_VM_BIND_OP_RESTART)) {
2991                         err = -EINVAL;
2992                         goto free_bind_ops;
2993                 }
2994
2995                 if (XE_IOCTL_ERR(xe, !*async &&
2996                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL)) {
2997                         err = -EINVAL;
2998                         goto free_bind_ops;
2999                 }
3000
3001                 if (XE_IOCTL_ERR(xe, !*async &&
3002                                  VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH)) {
3003                         err = -EINVAL;
3004                         goto free_bind_ops;
3005                 }
3006
3007                 if (XE_IOCTL_ERR(xe, VM_BIND_OP(op) >
3008                                  XE_VM_BIND_OP_PREFETCH) ||
3009                     XE_IOCTL_ERR(xe, op & ~SUPPORTED_FLAGS) ||
3010                     XE_IOCTL_ERR(xe, obj && is_null) ||
3011                     XE_IOCTL_ERR(xe, obj_offset && is_null) ||
3012                     XE_IOCTL_ERR(xe, VM_BIND_OP(op) != XE_VM_BIND_OP_MAP &&
3013                                  is_null) ||
3014                     XE_IOCTL_ERR(xe, !obj &&
3015                                  VM_BIND_OP(op) == XE_VM_BIND_OP_MAP &&
3016                                  !is_null) ||
3017                     XE_IOCTL_ERR(xe, !obj &&
3018                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
3019                     XE_IOCTL_ERR(xe, addr &&
3020                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
3021                     XE_IOCTL_ERR(xe, range &&
3022                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
3023                     XE_IOCTL_ERR(xe, obj &&
3024                                  VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR) ||
3025                     XE_IOCTL_ERR(xe, obj &&
3026                                  VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH) ||
3027                     XE_IOCTL_ERR(xe, region &&
3028                                  VM_BIND_OP(op) != XE_VM_BIND_OP_PREFETCH) ||
3029                     XE_IOCTL_ERR(xe, !(BIT(region) &
3030                                        xe->info.mem_region_mask)) ||
3031                     XE_IOCTL_ERR(xe, obj &&
3032                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP)) {
3033                         err = -EINVAL;
3034                         goto free_bind_ops;
3035                 }
3036
3037                 if (XE_IOCTL_ERR(xe, obj_offset & ~PAGE_MASK) ||
3038                     XE_IOCTL_ERR(xe, addr & ~PAGE_MASK) ||
3039                     XE_IOCTL_ERR(xe, range & ~PAGE_MASK) ||
3040                     XE_IOCTL_ERR(xe, !range && VM_BIND_OP(op) !=
3041                                  XE_VM_BIND_OP_RESTART &&
3042                                  VM_BIND_OP(op) != XE_VM_BIND_OP_UNMAP_ALL)) {
3043                         err = -EINVAL;
3044                         goto free_bind_ops;
3045                 }
3046         }
3047
3048         return 0;
3049
3050 free_bind_ops:
3051         if (args->num_binds > 1)
3052                 kfree(*bind_ops);
3053         return err;
3054 }
3055
3056 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3057 {
3058         struct xe_device *xe = to_xe_device(dev);
3059         struct xe_file *xef = to_xe_file(file);
3060         struct drm_xe_vm_bind *args = data;
3061         struct drm_xe_sync __user *syncs_user;
3062         struct xe_bo **bos = NULL;
3063         struct xe_vma **vmas = NULL;
3064         struct xe_vm *vm;
3065         struct xe_engine *e = NULL;
3066         u32 num_syncs;
3067         struct xe_sync_entry *syncs = NULL;
3068         struct drm_xe_vm_bind_op *bind_ops;
3069         bool async;
3070         int err;
3071         int i, j = 0;
3072
3073         err = vm_bind_ioctl_check_args(xe, args, &bind_ops, &async);
3074         if (err)
3075                 return err;
3076
3077         vm = xe_vm_lookup(xef, args->vm_id);
3078         if (XE_IOCTL_ERR(xe, !vm)) {
3079                 err = -EINVAL;
3080                 goto free_objs;
3081         }
3082
3083         if (XE_IOCTL_ERR(xe, xe_vm_is_closed(vm))) {
3084                 drm_err(dev, "VM closed while we began looking up?\n");
3085                 err = -ENOENT;
3086                 goto put_vm;
3087         }
3088
3089         if (args->engine_id) {
3090                 e = xe_engine_lookup(xef, args->engine_id);
3091                 if (XE_IOCTL_ERR(xe, !e)) {
3092                         err = -ENOENT;
3093                         goto put_vm;
3094                 }
3095                 if (XE_IOCTL_ERR(xe, !(e->flags & ENGINE_FLAG_VM))) {
3096                         err = -EINVAL;
3097                         goto put_engine;
3098                 }
3099         }
3100
3101         if (VM_BIND_OP(bind_ops[0].op) == XE_VM_BIND_OP_RESTART) {
3102                 if (XE_IOCTL_ERR(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
3103                         err = -EOPNOTSUPP;
3104                 if (XE_IOCTL_ERR(xe, !err && args->num_syncs))
3105                         err = EINVAL;
3106                 if (XE_IOCTL_ERR(xe, !err && !vm->async_ops.error))
3107                         err = -EPROTO;
3108
3109                 if (!err) {
3110                         down_write(&vm->lock);
3111                         trace_xe_vm_restart(vm);
3112                         vm_set_async_error(vm, 0);
3113                         up_write(&vm->lock);
3114
3115                         queue_work(system_unbound_wq, &vm->async_ops.work);
3116
3117                         /* Rebinds may have been blocked, give worker a kick */
3118                         if (xe_vm_in_compute_mode(vm))
3119                                 xe_vm_queue_rebind_worker(vm);
3120                 }
3121
3122                 goto put_engine;
3123         }
3124
3125         if (XE_IOCTL_ERR(xe, !vm->async_ops.error &&
3126                          async != !!(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS))) {
3127                 err = -EOPNOTSUPP;
3128                 goto put_engine;
3129         }
3130
3131         for (i = 0; i < args->num_binds; ++i) {
3132                 u64 range = bind_ops[i].range;
3133                 u64 addr = bind_ops[i].addr;
3134
3135                 if (XE_IOCTL_ERR(xe, range > vm->size) ||
3136                     XE_IOCTL_ERR(xe, addr > vm->size - range)) {
3137                         err = -EINVAL;
3138                         goto put_engine;
3139                 }
3140
3141                 if (bind_ops[i].tile_mask) {
3142                         u64 valid_tiles = BIT(xe->info.tile_count) - 1;
3143
3144                         if (XE_IOCTL_ERR(xe, bind_ops[i].tile_mask &
3145                                          ~valid_tiles)) {
3146                                 err = -EINVAL;
3147                                 goto put_engine;
3148                         }
3149                 }
3150         }
3151
3152         bos = kzalloc(sizeof(*bos) * args->num_binds, GFP_KERNEL);
3153         if (!bos) {
3154                 err = -ENOMEM;
3155                 goto put_engine;
3156         }
3157
3158         vmas = kzalloc(sizeof(*vmas) * args->num_binds, GFP_KERNEL);
3159         if (!vmas) {
3160                 err = -ENOMEM;
3161                 goto put_engine;
3162         }
3163
3164         for (i = 0; i < args->num_binds; ++i) {
3165                 struct drm_gem_object *gem_obj;
3166                 u64 range = bind_ops[i].range;
3167                 u64 addr = bind_ops[i].addr;
3168                 u32 obj = bind_ops[i].obj;
3169                 u64 obj_offset = bind_ops[i].obj_offset;
3170
3171                 if (!obj)
3172                         continue;
3173
3174                 gem_obj = drm_gem_object_lookup(file, obj);
3175                 if (XE_IOCTL_ERR(xe, !gem_obj)) {
3176                         err = -ENOENT;
3177                         goto put_obj;
3178                 }
3179                 bos[i] = gem_to_xe_bo(gem_obj);
3180
3181                 if (XE_IOCTL_ERR(xe, range > bos[i]->size) ||
3182                     XE_IOCTL_ERR(xe, obj_offset >
3183                                  bos[i]->size - range)) {
3184                         err = -EINVAL;
3185                         goto put_obj;
3186                 }
3187
3188                 if (bos[i]->flags & XE_BO_INTERNAL_64K) {
3189                         if (XE_IOCTL_ERR(xe, obj_offset &
3190                                          XE_64K_PAGE_MASK) ||
3191                             XE_IOCTL_ERR(xe, addr & XE_64K_PAGE_MASK) ||
3192                             XE_IOCTL_ERR(xe, range & XE_64K_PAGE_MASK)) {
3193                                 err = -EINVAL;
3194                                 goto put_obj;
3195                         }
3196                 }
3197         }
3198
3199         if (args->num_syncs) {
3200                 syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3201                 if (!syncs) {
3202                         err = -ENOMEM;
3203                         goto put_obj;
3204                 }
3205         }
3206
3207         syncs_user = u64_to_user_ptr(args->syncs);
3208         for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3209                 err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3210                                           &syncs_user[num_syncs], false,
3211                                           xe_vm_in_fault_mode(vm));
3212                 if (err)
3213                         goto free_syncs;
3214         }
3215
3216         err = down_write_killable(&vm->lock);
3217         if (err)
3218                 goto free_syncs;
3219
3220         /* Do some error checking first to make the unwind easier */
3221         for (i = 0; i < args->num_binds; ++i) {
3222                 u64 range = bind_ops[i].range;
3223                 u64 addr = bind_ops[i].addr;
3224                 u32 op = bind_ops[i].op;
3225
3226                 err = __vm_bind_ioctl_lookup_vma(vm, bos[i], addr, range, op);
3227                 if (err)
3228                         goto release_vm_lock;
3229         }
3230
3231         for (i = 0; i < args->num_binds; ++i) {
3232                 u64 range = bind_ops[i].range;
3233                 u64 addr = bind_ops[i].addr;
3234                 u32 op = bind_ops[i].op;
3235                 u64 obj_offset = bind_ops[i].obj_offset;
3236                 u64 tile_mask = bind_ops[i].tile_mask;
3237                 u32 region = bind_ops[i].region;
3238
3239                 vmas[i] = vm_bind_ioctl_lookup_vma(vm, bos[i], obj_offset,
3240                                                    addr, range, op, tile_mask,
3241                                                    region);
3242                 if (IS_ERR(vmas[i])) {
3243                         err = PTR_ERR(vmas[i]);
3244                         vmas[i] = NULL;
3245                         goto destroy_vmas;
3246                 }
3247         }
3248
3249         for (j = 0; j < args->num_binds; ++j) {
3250                 struct xe_sync_entry *__syncs;
3251                 u32 __num_syncs = 0;
3252                 bool first_or_last = j == 0 || j == args->num_binds - 1;
3253
3254                 if (args->num_binds == 1) {
3255                         __num_syncs = num_syncs;
3256                         __syncs = syncs;
3257                 } else if (first_or_last && num_syncs) {
3258                         bool first = j == 0;
3259
3260                         __syncs = kmalloc(sizeof(*__syncs) * num_syncs,
3261                                           GFP_KERNEL);
3262                         if (!__syncs) {
3263                                 err = ENOMEM;
3264                                 break;
3265                         }
3266
3267                         /* in-syncs on first bind, out-syncs on last bind */
3268                         for (i = 0; i < num_syncs; ++i) {
3269                                 bool signal = syncs[i].flags &
3270                                         DRM_XE_SYNC_SIGNAL;
3271
3272                                 if ((first && !signal) || (!first && signal))
3273                                         __syncs[__num_syncs++] = syncs[i];
3274                         }
3275                 } else {
3276                         __num_syncs = 0;
3277                         __syncs = NULL;
3278                 }
3279
3280                 if (async) {
3281                         bool last = j == args->num_binds - 1;
3282
3283                         /*
3284                          * Each pass of async worker drops the ref, take a ref
3285                          * here, 1 set of refs taken above
3286                          */
3287                         if (!last) {
3288                                 if (e)
3289                                         xe_engine_get(e);
3290                                 xe_vm_get(vm);
3291                         }
3292
3293                         err = vm_bind_ioctl_async(vm, vmas[j], e, bos[j],
3294                                                   bind_ops + j, __syncs,
3295                                                   __num_syncs);
3296                         if (err && !last) {
3297                                 if (e)
3298                                         xe_engine_put(e);
3299                                 xe_vm_put(vm);
3300                         }
3301                         if (err)
3302                                 break;
3303                 } else {
3304                         XE_BUG_ON(j != 0);      /* Not supported */
3305                         err = vm_bind_ioctl(vm, vmas[j], e, bos[j],
3306                                             bind_ops + j, __syncs,
3307                                             __num_syncs, NULL);
3308                         break;  /* Needed so cleanup loops work */
3309                 }
3310         }
3311
3312         /* Most of cleanup owned by the async bind worker */
3313         if (async && !err) {
3314                 up_write(&vm->lock);
3315                 if (args->num_binds > 1)
3316                         kfree(syncs);
3317                 goto free_objs;
3318         }
3319
3320 destroy_vmas:
3321         for (i = j; err && i < args->num_binds; ++i) {
3322                 u32 op = bind_ops[i].op;
3323                 struct xe_vma *vma, *next;
3324
3325                 if (!vmas[i])
3326                         break;
3327
3328                 list_for_each_entry_safe(vma, next, &vmas[i]->unbind_link,
3329                                          unbind_link) {
3330                         list_del_init(&vma->unbind_link);
3331                         if (!vma->destroyed) {
3332                                 prep_vma_destroy(vm, vma);
3333                                 xe_vma_destroy_unlocked(vma);
3334                         }
3335                 }
3336
3337                 switch (VM_BIND_OP(op)) {
3338                 case XE_VM_BIND_OP_MAP:
3339                         prep_vma_destroy(vm, vmas[i]);
3340                         xe_vma_destroy_unlocked(vmas[i]);
3341                         break;
3342                 case XE_VM_BIND_OP_MAP_USERPTR:
3343                         prep_vma_destroy(vm, vmas[i]);
3344                         xe_vma_destroy_unlocked(vmas[i]);
3345                         break;
3346                 }
3347         }
3348 release_vm_lock:
3349         up_write(&vm->lock);
3350 free_syncs:
3351         while (num_syncs--) {
3352                 if (async && j &&
3353                     !(syncs[num_syncs].flags & DRM_XE_SYNC_SIGNAL))
3354                         continue;       /* Still in async worker */
3355                 xe_sync_entry_cleanup(&syncs[num_syncs]);
3356         }
3357
3358         kfree(syncs);
3359 put_obj:
3360         for (i = j; i < args->num_binds; ++i)
3361                 xe_bo_put(bos[i]);
3362 put_engine:
3363         if (e)
3364                 xe_engine_put(e);
3365 put_vm:
3366         xe_vm_put(vm);
3367 free_objs:
3368         kfree(bos);
3369         kfree(vmas);
3370         if (args->num_binds > 1)
3371                 kfree(bind_ops);
3372         return err;
3373 }
3374
3375 /*
3376  * XXX: Using the TTM wrappers for now, likely can call into dma-resv code
3377  * directly to optimize. Also this likely should be an inline function.
3378  */
3379 int xe_vm_lock(struct xe_vm *vm, struct ww_acquire_ctx *ww,
3380                int num_resv, bool intr)
3381 {
3382         struct ttm_validate_buffer tv_vm;
3383         LIST_HEAD(objs);
3384         LIST_HEAD(dups);
3385
3386         XE_BUG_ON(!ww);
3387
3388         tv_vm.num_shared = num_resv;
3389         tv_vm.bo = xe_vm_ttm_bo(vm);;
3390         list_add_tail(&tv_vm.head, &objs);
3391
3392         return ttm_eu_reserve_buffers(ww, &objs, intr, &dups);
3393 }
3394
3395 void xe_vm_unlock(struct xe_vm *vm, struct ww_acquire_ctx *ww)
3396 {
3397         dma_resv_unlock(&vm->resv);
3398         ww_acquire_fini(ww);
3399 }
3400
3401 /**
3402  * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3403  * @vma: VMA to invalidate
3404  *
3405  * Walks a list of page tables leaves which it memset the entries owned by this
3406  * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3407  * complete.
3408  *
3409  * Returns 0 for success, negative error code otherwise.
3410  */
3411 int xe_vm_invalidate_vma(struct xe_vma *vma)
3412 {
3413         struct xe_device *xe = vma->vm->xe;
3414         struct xe_tile *tile;
3415         u32 tile_needs_invalidate = 0;
3416         int seqno[XE_MAX_TILES_PER_DEVICE];
3417         u8 id;
3418         int ret;
3419
3420         XE_BUG_ON(!xe_vm_in_fault_mode(vma->vm));
3421         XE_WARN_ON(xe_vma_is_null(vma));
3422         trace_xe_vma_usm_invalidate(vma);
3423
3424         /* Check that we don't race with page-table updates */
3425         if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3426                 if (xe_vma_is_userptr(vma)) {
3427                         WARN_ON_ONCE(!mmu_interval_check_retry
3428                                      (&vma->userptr.notifier,
3429                                       vma->userptr.notifier_seq));
3430                         WARN_ON_ONCE(!dma_resv_test_signaled(&vma->vm->resv,
3431                                                              DMA_RESV_USAGE_BOOKKEEP));
3432
3433                 } else {
3434                         xe_bo_assert_held(vma->bo);
3435                 }
3436         }
3437
3438         for_each_tile(tile, xe, id) {
3439                 if (xe_pt_zap_ptes(tile, vma)) {
3440                         tile_needs_invalidate |= BIT(id);
3441                         xe_device_wmb(xe);
3442                         /*
3443                          * FIXME: We potentially need to invalidate multiple
3444                          * GTs within the tile
3445                          */
3446                         seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
3447                         if (seqno[id] < 0)
3448                                 return seqno[id];
3449                 }
3450         }
3451
3452         for_each_tile(tile, xe, id) {
3453                 if (tile_needs_invalidate & BIT(id)) {
3454                         ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
3455                         if (ret < 0)
3456                                 return ret;
3457                 }
3458         }
3459
3460         vma->usm.tile_invalidated = vma->tile_mask;
3461
3462         return 0;
3463 }
3464
3465 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
3466 {
3467         struct rb_node *node;
3468         bool is_vram;
3469         uint64_t addr;
3470
3471         if (!down_read_trylock(&vm->lock)) {
3472                 drm_printf(p, " Failed to acquire VM lock to dump capture");
3473                 return 0;
3474         }
3475         if (vm->pt_root[gt_id]) {
3476                 addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE,
3477                                   &is_vram);
3478                 drm_printf(p, " VM root: A:0x%llx %s\n", addr, is_vram ? "VRAM" : "SYS");
3479         }
3480
3481         for (node = rb_first(&vm->vmas); node; node = rb_next(node)) {
3482                 struct xe_vma *vma = to_xe_vma(node);
3483                 bool is_userptr = xe_vma_is_userptr(vma);
3484                 bool is_null = xe_vma_is_null(vma);
3485
3486                 if (is_null) {
3487                         addr = 0;
3488                 } else if (is_userptr) {
3489                         struct xe_res_cursor cur;
3490
3491                         if (vma->userptr.sg) {
3492                                 xe_res_first_sg(vma->userptr.sg, 0, XE_PAGE_SIZE,
3493                                                 &cur);
3494                                 addr = xe_res_dma(&cur);
3495                         } else {
3496                                 addr = 0;
3497                         }
3498                 } else {
3499                         addr = __xe_bo_addr(vma->bo, 0, XE_PAGE_SIZE, &is_vram);
3500                 }
3501                 drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
3502                            vma->start, vma->end, vma->end - vma->start + 1ull,
3503                            addr, is_null ? "NULL" : is_userptr ? "USR" :
3504                            is_vram ? "VRAM" : "SYS");
3505         }
3506         up_read(&vm->lock);
3507
3508         return 0;
3509 }