drm/xe/vm: Fix ASID XA usage
[linux-2.6-microblaze.git] / drivers / gpu / drm / xe / xe_vm.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5
6 #include "xe_vm.h"
7
8 #include <linux/dma-fence-array.h>
9
10 #include <drm/drm_exec.h>
11 #include <drm/drm_print.h>
12 #include <drm/ttm/ttm_execbuf_util.h>
13 #include <drm/ttm/ttm_tt.h>
14 #include <drm/xe_drm.h>
15 #include <linux/delay.h>
16 #include <linux/kthread.h>
17 #include <linux/mm.h>
18 #include <linux/swap.h>
19
20 #include "xe_assert.h"
21 #include "xe_bo.h"
22 #include "xe_device.h"
23 #include "xe_drm_client.h"
24 #include "xe_exec_queue.h"
25 #include "xe_gt.h"
26 #include "xe_gt_pagefault.h"
27 #include "xe_gt_tlb_invalidation.h"
28 #include "xe_migrate.h"
29 #include "xe_pm.h"
30 #include "xe_preempt_fence.h"
31 #include "xe_pt.h"
32 #include "xe_res_cursor.h"
33 #include "xe_sync.h"
34 #include "xe_trace.h"
35 #include "generated/xe_wa_oob.h"
36 #include "xe_wa.h"
37
38 #define TEST_VM_ASYNC_OPS_ERROR
39
40 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
41 {
42         return vm->gpuvm.r_obj;
43 }
44
45 /**
46  * xe_vma_userptr_check_repin() - Advisory check for repin needed
47  * @vma: The userptr vma
48  *
49  * Check if the userptr vma has been invalidated since last successful
50  * repin. The check is advisory only and can the function can be called
51  * without the vm->userptr.notifier_lock held. There is no guarantee that the
52  * vma userptr will remain valid after a lockless check, so typically
53  * the call needs to be followed by a proper check under the notifier_lock.
54  *
55  * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
56  */
57 int xe_vma_userptr_check_repin(struct xe_vma *vma)
58 {
59         return mmu_interval_check_retry(&vma->userptr.notifier,
60                                         vma->userptr.notifier_seq) ?
61                 -EAGAIN : 0;
62 }
63
64 int xe_vma_userptr_pin_pages(struct xe_vma *vma)
65 {
66         struct xe_vm *vm = xe_vma_vm(vma);
67         struct xe_device *xe = vm->xe;
68         const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT;
69         struct page **pages;
70         bool in_kthread = !current->mm;
71         unsigned long notifier_seq;
72         int pinned, ret, i;
73         bool read_only = xe_vma_read_only(vma);
74
75         lockdep_assert_held(&vm->lock);
76         xe_assert(xe, xe_vma_is_userptr(vma));
77 retry:
78         if (vma->gpuva.flags & XE_VMA_DESTROYED)
79                 return 0;
80
81         notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier);
82         if (notifier_seq == vma->userptr.notifier_seq)
83                 return 0;
84
85         pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
86         if (!pages)
87                 return -ENOMEM;
88
89         if (vma->userptr.sg) {
90                 dma_unmap_sgtable(xe->drm.dev,
91                                   vma->userptr.sg,
92                                   read_only ? DMA_TO_DEVICE :
93                                   DMA_BIDIRECTIONAL, 0);
94                 sg_free_table(vma->userptr.sg);
95                 vma->userptr.sg = NULL;
96         }
97
98         pinned = ret = 0;
99         if (in_kthread) {
100                 if (!mmget_not_zero(vma->userptr.notifier.mm)) {
101                         ret = -EFAULT;
102                         goto mm_closed;
103                 }
104                 kthread_use_mm(vma->userptr.notifier.mm);
105         }
106
107         while (pinned < num_pages) {
108                 ret = get_user_pages_fast(xe_vma_userptr(vma) +
109                                           pinned * PAGE_SIZE,
110                                           num_pages - pinned,
111                                           read_only ? 0 : FOLL_WRITE,
112                                           &pages[pinned]);
113                 if (ret < 0) {
114                         if (in_kthread)
115                                 ret = 0;
116                         break;
117                 }
118
119                 pinned += ret;
120                 ret = 0;
121         }
122
123         if (in_kthread) {
124                 kthread_unuse_mm(vma->userptr.notifier.mm);
125                 mmput(vma->userptr.notifier.mm);
126         }
127 mm_closed:
128         if (ret)
129                 goto out;
130
131         ret = sg_alloc_table_from_pages_segment(&vma->userptr.sgt, pages,
132                                                 pinned, 0,
133                                                 (u64)pinned << PAGE_SHIFT,
134                                                 xe_sg_segment_size(xe->drm.dev),
135                                                 GFP_KERNEL);
136         if (ret) {
137                 vma->userptr.sg = NULL;
138                 goto out;
139         }
140         vma->userptr.sg = &vma->userptr.sgt;
141
142         ret = dma_map_sgtable(xe->drm.dev, vma->userptr.sg,
143                               read_only ? DMA_TO_DEVICE :
144                               DMA_BIDIRECTIONAL,
145                               DMA_ATTR_SKIP_CPU_SYNC |
146                               DMA_ATTR_NO_KERNEL_MAPPING);
147         if (ret) {
148                 sg_free_table(vma->userptr.sg);
149                 vma->userptr.sg = NULL;
150                 goto out;
151         }
152
153         for (i = 0; i < pinned; ++i) {
154                 if (!read_only) {
155                         lock_page(pages[i]);
156                         set_page_dirty(pages[i]);
157                         unlock_page(pages[i]);
158                 }
159
160                 mark_page_accessed(pages[i]);
161         }
162
163 out:
164         release_pages(pages, pinned);
165         kvfree(pages);
166
167         if (!(ret < 0)) {
168                 vma->userptr.notifier_seq = notifier_seq;
169                 if (xe_vma_userptr_check_repin(vma) == -EAGAIN)
170                         goto retry;
171         }
172
173         return ret < 0 ? ret : 0;
174 }
175
176 static bool preempt_fences_waiting(struct xe_vm *vm)
177 {
178         struct xe_exec_queue *q;
179
180         lockdep_assert_held(&vm->lock);
181         xe_vm_assert_held(vm);
182
183         list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
184                 if (!q->compute.pfence ||
185                     (q->compute.pfence && test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
186                                                    &q->compute.pfence->flags))) {
187                         return true;
188                 }
189         }
190
191         return false;
192 }
193
194 static void free_preempt_fences(struct list_head *list)
195 {
196         struct list_head *link, *next;
197
198         list_for_each_safe(link, next, list)
199                 xe_preempt_fence_free(to_preempt_fence_from_link(link));
200 }
201
202 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
203                                 unsigned int *count)
204 {
205         lockdep_assert_held(&vm->lock);
206         xe_vm_assert_held(vm);
207
208         if (*count >= vm->preempt.num_exec_queues)
209                 return 0;
210
211         for (; *count < vm->preempt.num_exec_queues; ++(*count)) {
212                 struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
213
214                 if (IS_ERR(pfence))
215                         return PTR_ERR(pfence);
216
217                 list_move_tail(xe_preempt_fence_link(pfence), list);
218         }
219
220         return 0;
221 }
222
223 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
224 {
225         struct xe_exec_queue *q;
226
227         xe_vm_assert_held(vm);
228
229         list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
230                 if (q->compute.pfence) {
231                         long timeout = dma_fence_wait(q->compute.pfence, false);
232
233                         if (timeout < 0)
234                                 return -ETIME;
235                         dma_fence_put(q->compute.pfence);
236                         q->compute.pfence = NULL;
237                 }
238         }
239
240         return 0;
241 }
242
243 static bool xe_vm_is_idle(struct xe_vm *vm)
244 {
245         struct xe_exec_queue *q;
246
247         xe_vm_assert_held(vm);
248         list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
249                 if (!xe_exec_queue_is_idle(q))
250                         return false;
251         }
252
253         return true;
254 }
255
256 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
257 {
258         struct list_head *link;
259         struct xe_exec_queue *q;
260
261         list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
262                 struct dma_fence *fence;
263
264                 link = list->next;
265                 xe_assert(vm->xe, link != list);
266
267                 fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
268                                              q, q->compute.context,
269                                              ++q->compute.seqno);
270                 dma_fence_put(q->compute.pfence);
271                 q->compute.pfence = fence;
272         }
273 }
274
275 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
276 {
277         struct xe_exec_queue *q;
278         int err;
279
280         err = xe_bo_lock(bo, true);
281         if (err)
282                 return err;
283
284         err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
285         if (err)
286                 goto out_unlock;
287
288         list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
289                 if (q->compute.pfence) {
290                         dma_resv_add_fence(bo->ttm.base.resv,
291                                            q->compute.pfence,
292                                            DMA_RESV_USAGE_BOOKKEEP);
293                 }
294
295 out_unlock:
296         xe_bo_unlock(bo);
297         return err;
298 }
299
300 /**
301  * xe_vm_fence_all_extobjs() - Add a fence to vm's external objects' resv
302  * @vm: The vm.
303  * @fence: The fence to add.
304  * @usage: The resv usage for the fence.
305  *
306  * Loops over all of the vm's external object bindings and adds a @fence
307  * with the given @usage to all of the external object's reservation
308  * objects.
309  */
310 void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
311                              enum dma_resv_usage usage)
312 {
313         struct xe_vma *vma;
314
315         list_for_each_entry(vma, &vm->extobj.list, extobj.link)
316                 dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence, usage);
317 }
318
319 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm)
320 {
321         struct xe_exec_queue *q;
322
323         lockdep_assert_held(&vm->lock);
324         xe_vm_assert_held(vm);
325
326         list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
327                 q->ops->resume(q);
328
329                 dma_resv_add_fence(xe_vm_resv(vm), q->compute.pfence,
330                                    DMA_RESV_USAGE_BOOKKEEP);
331                 xe_vm_fence_all_extobjs(vm, q->compute.pfence,
332                                         DMA_RESV_USAGE_BOOKKEEP);
333         }
334 }
335
336 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
337 {
338         struct drm_exec exec;
339         struct dma_fence *pfence;
340         int err;
341         bool wait;
342
343         xe_assert(vm->xe, xe_vm_in_compute_mode(vm));
344
345         down_write(&vm->lock);
346         drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
347         drm_exec_until_all_locked(&exec) {
348                 err = xe_vm_lock_dma_resv(vm, &exec, 1, true);
349                 drm_exec_retry_on_contention(&exec);
350                 if (err)
351                         goto out_unlock;
352         }
353
354         pfence = xe_preempt_fence_create(q, q->compute.context,
355                                          ++q->compute.seqno);
356         if (!pfence) {
357                 err = -ENOMEM;
358                 goto out_unlock;
359         }
360
361         list_add(&q->compute.link, &vm->preempt.exec_queues);
362         ++vm->preempt.num_exec_queues;
363         q->compute.pfence = pfence;
364
365         down_read(&vm->userptr.notifier_lock);
366
367         dma_resv_add_fence(xe_vm_resv(vm), pfence,
368                            DMA_RESV_USAGE_BOOKKEEP);
369
370         xe_vm_fence_all_extobjs(vm, pfence, DMA_RESV_USAGE_BOOKKEEP);
371
372         /*
373          * Check to see if a preemption on VM is in flight or userptr
374          * invalidation, if so trigger this preempt fence to sync state with
375          * other preempt fences on the VM.
376          */
377         wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
378         if (wait)
379                 dma_fence_enable_sw_signaling(pfence);
380
381         up_read(&vm->userptr.notifier_lock);
382
383 out_unlock:
384         drm_exec_fini(&exec);
385         up_write(&vm->lock);
386
387         return err;
388 }
389
390 /**
391  * xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
392  * @vm: The VM.
393  * @q: The exec_queue
394  */
395 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
396 {
397         if (!xe_vm_in_compute_mode(vm))
398                 return;
399
400         down_write(&vm->lock);
401         list_del(&q->compute.link);
402         --vm->preempt.num_exec_queues;
403         if (q->compute.pfence) {
404                 dma_fence_enable_sw_signaling(q->compute.pfence);
405                 dma_fence_put(q->compute.pfence);
406                 q->compute.pfence = NULL;
407         }
408         up_write(&vm->lock);
409 }
410
411 /**
412  * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
413  * that need repinning.
414  * @vm: The VM.
415  *
416  * This function checks for whether the VM has userptrs that need repinning,
417  * and provides a release-type barrier on the userptr.notifier_lock after
418  * checking.
419  *
420  * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
421  */
422 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
423 {
424         lockdep_assert_held_read(&vm->userptr.notifier_lock);
425
426         return (list_empty(&vm->userptr.repin_list) &&
427                 list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
428 }
429
430 /**
431  * xe_vm_lock_dma_resv() - Lock the vm dma_resv object and the dma_resv
432  * objects of the vm's external buffer objects.
433  * @vm: The vm.
434  * @exec: Pointer to a struct drm_exec locking context.
435  * @num_shared: Number of dma-fence slots to reserve in the locked objects.
436  * @lock_vm: Lock also the vm's dma_resv.
437  *
438  * Locks the vm dma-resv objects and all the dma-resv objects of the
439  * buffer objects on the vm external object list.
440  *
441  * Return: 0 on success, Negative error code on error. In particular if
442  * @intr is set to true, -EINTR or -ERESTARTSYS may be returned.
443  */
444 int xe_vm_lock_dma_resv(struct xe_vm *vm, struct drm_exec *exec,
445                         unsigned int num_shared, bool lock_vm)
446 {
447         struct xe_vma *vma, *next;
448         int err = 0;
449
450         lockdep_assert_held(&vm->lock);
451
452         if (lock_vm) {
453                 err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
454                 if (err)
455                         return err;
456         }
457
458         list_for_each_entry(vma, &vm->extobj.list, extobj.link) {
459                 err = drm_exec_prepare_obj(exec, &xe_vma_bo(vma)->ttm.base, num_shared);
460                 if (err)
461                         return err;
462         }
463
464         spin_lock(&vm->notifier.list_lock);
465         list_for_each_entry_safe(vma, next, &vm->notifier.rebind_list,
466                                  notifier.rebind_link) {
467                 xe_bo_assert_held(xe_vma_bo(vma));
468
469                 list_del_init(&vma->notifier.rebind_link);
470                 if (vma->tile_present && !(vma->gpuva.flags & XE_VMA_DESTROYED))
471                         list_move_tail(&vma->combined_links.rebind,
472                                        &vm->rebind_list);
473         }
474         spin_unlock(&vm->notifier.list_lock);
475
476         return 0;
477 }
478
479 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
480
481 static void xe_vm_kill(struct xe_vm *vm)
482 {
483         struct xe_exec_queue *q;
484
485         lockdep_assert_held(&vm->lock);
486
487         xe_vm_lock(vm, false);
488         vm->flags |= XE_VM_FLAG_BANNED;
489         trace_xe_vm_kill(vm);
490
491         list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
492                 q->ops->kill(q);
493         xe_vm_unlock(vm);
494
495         /* TODO: Inform user the VM is banned */
496 }
497
498 /**
499  * xe_vm_validate_should_retry() - Whether to retry after a validate error.
500  * @exec: The drm_exec object used for locking before validation.
501  * @err: The error returned from ttm_bo_validate().
502  * @end: A ktime_t cookie that should be set to 0 before first use and
503  * that should be reused on subsequent calls.
504  *
505  * With multiple active VMs, under memory pressure, it is possible that
506  * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
507  * Until ttm properly handles locking in such scenarios, best thing the
508  * driver can do is retry with a timeout. Check if that is necessary, and
509  * if so unlock the drm_exec's objects while keeping the ticket to prepare
510  * for a rerun.
511  *
512  * Return: true if a retry after drm_exec_init() is recommended;
513  * false otherwise.
514  */
515 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
516 {
517         ktime_t cur;
518
519         if (err != -ENOMEM)
520                 return false;
521
522         cur = ktime_get();
523         *end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
524         if (!ktime_before(cur, *end))
525                 return false;
526
527         /*
528          * We would like to keep the ticket here with
529          * drm_exec_unlock_all(), but WW mutex asserts currently
530          * stop us from that. In any case this function could go away
531          * with proper TTM -EDEADLK handling.
532          */
533         drm_exec_fini(exec);
534
535         msleep(20);
536         return true;
537 }
538
539 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
540                                  bool *done)
541 {
542         struct xe_vma *vma;
543         int err;
544
545         /*
546          * 1 fence for each preempt fence plus a fence for each tile from a
547          * possible rebind
548          */
549         err = drm_exec_prepare_obj(exec, xe_vm_obj(vm),
550                                    vm->preempt.num_exec_queues +
551                                    vm->xe->info.tile_count);
552         if (err)
553                 return err;
554
555         if (xe_vm_is_idle(vm)) {
556                 vm->preempt.rebind_deactivated = true;
557                 *done = true;
558                 return 0;
559         }
560
561         if (!preempt_fences_waiting(vm)) {
562                 *done = true;
563                 return 0;
564         }
565
566         err = xe_vm_lock_dma_resv(vm, exec, vm->preempt.num_exec_queues, false);
567         if (err)
568                 return err;
569
570         err = wait_for_existing_preempt_fences(vm);
571         if (err)
572                 return err;
573
574         list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
575                 if (xe_vma_has_no_bo(vma) ||
576                     vma->gpuva.flags & XE_VMA_DESTROYED)
577                         continue;
578
579                 err = xe_bo_validate(xe_vma_bo(vma), vm, false);
580                 if (err)
581                         break;
582         }
583
584         return err;
585 }
586
587 static void preempt_rebind_work_func(struct work_struct *w)
588 {
589         struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
590         struct drm_exec exec;
591         struct dma_fence *rebind_fence;
592         unsigned int fence_count = 0;
593         LIST_HEAD(preempt_fences);
594         ktime_t end = 0;
595         int err = 0;
596         long wait;
597         int __maybe_unused tries = 0;
598
599         xe_assert(vm->xe, xe_vm_in_compute_mode(vm));
600         trace_xe_vm_rebind_worker_enter(vm);
601
602         down_write(&vm->lock);
603
604         if (xe_vm_is_closed_or_banned(vm)) {
605                 up_write(&vm->lock);
606                 trace_xe_vm_rebind_worker_exit(vm);
607                 return;
608         }
609
610 retry:
611         if (xe_vm_userptr_check_repin(vm)) {
612                 err = xe_vm_userptr_pin(vm);
613                 if (err)
614                         goto out_unlock_outer;
615         }
616
617         drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
618
619         drm_exec_until_all_locked(&exec) {
620                 bool done = false;
621
622                 err = xe_preempt_work_begin(&exec, vm, &done);
623                 drm_exec_retry_on_contention(&exec);
624                 if (err && xe_vm_validate_should_retry(&exec, err, &end)) {
625                         err = -EAGAIN;
626                         goto out_unlock_outer;
627                 }
628                 if (err || done)
629                         goto out_unlock;
630         }
631
632         err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
633         if (err)
634                 goto out_unlock;
635
636         rebind_fence = xe_vm_rebind(vm, true);
637         if (IS_ERR(rebind_fence)) {
638                 err = PTR_ERR(rebind_fence);
639                 goto out_unlock;
640         }
641
642         if (rebind_fence) {
643                 dma_fence_wait(rebind_fence, false);
644                 dma_fence_put(rebind_fence);
645         }
646
647         /* Wait on munmap style VM unbinds */
648         wait = dma_resv_wait_timeout(xe_vm_resv(vm),
649                                      DMA_RESV_USAGE_KERNEL,
650                                      false, MAX_SCHEDULE_TIMEOUT);
651         if (wait <= 0) {
652                 err = -ETIME;
653                 goto out_unlock;
654         }
655
656 #define retry_required(__tries, __vm) \
657         (IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
658         (!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
659         __xe_vm_userptr_needs_repin(__vm))
660
661         down_read(&vm->userptr.notifier_lock);
662         if (retry_required(tries, vm)) {
663                 up_read(&vm->userptr.notifier_lock);
664                 err = -EAGAIN;
665                 goto out_unlock;
666         }
667
668 #undef retry_required
669
670         spin_lock(&vm->xe->ttm.lru_lock);
671         ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
672         spin_unlock(&vm->xe->ttm.lru_lock);
673
674         /* Point of no return. */
675         arm_preempt_fences(vm, &preempt_fences);
676         resume_and_reinstall_preempt_fences(vm);
677         up_read(&vm->userptr.notifier_lock);
678
679 out_unlock:
680         drm_exec_fini(&exec);
681 out_unlock_outer:
682         if (err == -EAGAIN) {
683                 trace_xe_vm_rebind_worker_retry(vm);
684                 goto retry;
685         }
686
687         if (err) {
688                 drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
689                 xe_vm_kill(vm);
690         }
691         up_write(&vm->lock);
692
693         free_preempt_fences(&preempt_fences);
694
695         trace_xe_vm_rebind_worker_exit(vm);
696 }
697
698 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
699                                    const struct mmu_notifier_range *range,
700                                    unsigned long cur_seq)
701 {
702         struct xe_vma *vma = container_of(mni, struct xe_vma, userptr.notifier);
703         struct xe_vm *vm = xe_vma_vm(vma);
704         struct dma_resv_iter cursor;
705         struct dma_fence *fence;
706         long err;
707
708         xe_assert(vm->xe, xe_vma_is_userptr(vma));
709         trace_xe_vma_userptr_invalidate(vma);
710
711         if (!mmu_notifier_range_blockable(range))
712                 return false;
713
714         down_write(&vm->userptr.notifier_lock);
715         mmu_interval_set_seq(mni, cur_seq);
716
717         /* No need to stop gpu access if the userptr is not yet bound. */
718         if (!vma->userptr.initial_bind) {
719                 up_write(&vm->userptr.notifier_lock);
720                 return true;
721         }
722
723         /*
724          * Tell exec and rebind worker they need to repin and rebind this
725          * userptr.
726          */
727         if (!xe_vm_in_fault_mode(vm) &&
728             !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
729                 spin_lock(&vm->userptr.invalidated_lock);
730                 list_move_tail(&vma->userptr.invalidate_link,
731                                &vm->userptr.invalidated);
732                 spin_unlock(&vm->userptr.invalidated_lock);
733         }
734
735         up_write(&vm->userptr.notifier_lock);
736
737         /*
738          * Preempt fences turn into schedule disables, pipeline these.
739          * Note that even in fault mode, we need to wait for binds and
740          * unbinds to complete, and those are attached as BOOKMARK fences
741          * to the vm.
742          */
743         dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
744                             DMA_RESV_USAGE_BOOKKEEP);
745         dma_resv_for_each_fence_unlocked(&cursor, fence)
746                 dma_fence_enable_sw_signaling(fence);
747         dma_resv_iter_end(&cursor);
748
749         err = dma_resv_wait_timeout(xe_vm_resv(vm),
750                                     DMA_RESV_USAGE_BOOKKEEP,
751                                     false, MAX_SCHEDULE_TIMEOUT);
752         XE_WARN_ON(err <= 0);
753
754         if (xe_vm_in_fault_mode(vm)) {
755                 err = xe_vm_invalidate_vma(vma);
756                 XE_WARN_ON(err);
757         }
758
759         trace_xe_vma_userptr_invalidate_complete(vma);
760
761         return true;
762 }
763
764 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
765         .invalidate = vma_userptr_invalidate,
766 };
767
768 int xe_vm_userptr_pin(struct xe_vm *vm)
769 {
770         struct xe_vma *vma, *next;
771         int err = 0;
772         LIST_HEAD(tmp_evict);
773
774         lockdep_assert_held_write(&vm->lock);
775
776         /* Collect invalidated userptrs */
777         spin_lock(&vm->userptr.invalidated_lock);
778         list_for_each_entry_safe(vma, next, &vm->userptr.invalidated,
779                                  userptr.invalidate_link) {
780                 list_del_init(&vma->userptr.invalidate_link);
781                 if (list_empty(&vma->combined_links.userptr))
782                         list_move_tail(&vma->combined_links.userptr,
783                                        &vm->userptr.repin_list);
784         }
785         spin_unlock(&vm->userptr.invalidated_lock);
786
787         /* Pin and move to temporary list */
788         list_for_each_entry_safe(vma, next, &vm->userptr.repin_list,
789                                  combined_links.userptr) {
790                 err = xe_vma_userptr_pin_pages(vma);
791                 if (err < 0)
792                         goto out_err;
793
794                 list_move_tail(&vma->combined_links.userptr, &tmp_evict);
795         }
796
797         /* Take lock and move to rebind_list for rebinding. */
798         err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
799         if (err)
800                 goto out_err;
801
802         list_for_each_entry_safe(vma, next, &tmp_evict, combined_links.userptr)
803                 list_move_tail(&vma->combined_links.rebind, &vm->rebind_list);
804
805         dma_resv_unlock(xe_vm_resv(vm));
806
807         return 0;
808
809 out_err:
810         list_splice_tail(&tmp_evict, &vm->userptr.repin_list);
811
812         return err;
813 }
814
815 /**
816  * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
817  * that need repinning.
818  * @vm: The VM.
819  *
820  * This function does an advisory check for whether the VM has userptrs that
821  * need repinning.
822  *
823  * Return: 0 if there are no indications of userptrs needing repinning,
824  * -EAGAIN if there are.
825  */
826 int xe_vm_userptr_check_repin(struct xe_vm *vm)
827 {
828         return (list_empty_careful(&vm->userptr.repin_list) &&
829                 list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
830 }
831
832 static struct dma_fence *
833 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
834                struct xe_sync_entry *syncs, u32 num_syncs,
835                bool first_op, bool last_op);
836
837 struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
838 {
839         struct dma_fence *fence = NULL;
840         struct xe_vma *vma, *next;
841
842         lockdep_assert_held(&vm->lock);
843         if (xe_vm_no_dma_fences(vm) && !rebind_worker)
844                 return NULL;
845
846         xe_vm_assert_held(vm);
847         list_for_each_entry_safe(vma, next, &vm->rebind_list,
848                                  combined_links.rebind) {
849                 xe_assert(vm->xe, vma->tile_present);
850
851                 list_del_init(&vma->combined_links.rebind);
852                 dma_fence_put(fence);
853                 if (rebind_worker)
854                         trace_xe_vma_rebind_worker(vma);
855                 else
856                         trace_xe_vma_rebind_exec(vma);
857                 fence = xe_vm_bind_vma(vma, NULL, NULL, 0, false, false);
858                 if (IS_ERR(fence))
859                         return fence;
860         }
861
862         return fence;
863 }
864
865 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
866                                     struct xe_bo *bo,
867                                     u64 bo_offset_or_userptr,
868                                     u64 start, u64 end,
869                                     bool read_only,
870                                     bool is_null,
871                                     u8 tile_mask)
872 {
873         struct xe_vma *vma;
874         struct xe_tile *tile;
875         u8 id;
876
877         xe_assert(vm->xe, start < end);
878         xe_assert(vm->xe, end < vm->size);
879
880         if (!bo && !is_null)    /* userptr */
881                 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
882         else
883                 vma = kzalloc(sizeof(*vma) - sizeof(struct xe_userptr),
884                               GFP_KERNEL);
885         if (!vma) {
886                 vma = ERR_PTR(-ENOMEM);
887                 return vma;
888         }
889
890         INIT_LIST_HEAD(&vma->combined_links.rebind);
891         INIT_LIST_HEAD(&vma->notifier.rebind_link);
892         INIT_LIST_HEAD(&vma->extobj.link);
893
894         INIT_LIST_HEAD(&vma->gpuva.gem.entry);
895         vma->gpuva.vm = &vm->gpuvm;
896         vma->gpuva.va.addr = start;
897         vma->gpuva.va.range = end - start + 1;
898         if (read_only)
899                 vma->gpuva.flags |= XE_VMA_READ_ONLY;
900         if (is_null)
901                 vma->gpuva.flags |= DRM_GPUVA_SPARSE;
902
903         if (tile_mask) {
904                 vma->tile_mask = tile_mask;
905         } else {
906                 for_each_tile(tile, vm->xe, id)
907                         vma->tile_mask |= 0x1 << id;
908         }
909
910         if (GRAPHICS_VER(vm->xe) >= 20 || vm->xe->info.platform == XE_PVC)
911                 vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
912
913         if (bo) {
914                 struct drm_gpuvm_bo *vm_bo;
915
916                 xe_bo_assert_held(bo);
917
918                 vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
919                 if (IS_ERR(vm_bo)) {
920                         kfree(vma);
921                         return ERR_CAST(vm_bo);
922                 }
923
924                 drm_gem_object_get(&bo->ttm.base);
925                 vma->gpuva.gem.obj = &bo->ttm.base;
926                 vma->gpuva.gem.offset = bo_offset_or_userptr;
927                 drm_gpuva_link(&vma->gpuva, vm_bo);
928                 drm_gpuvm_bo_put(vm_bo);
929         } else /* userptr or null */ {
930                 if (!is_null) {
931                         u64 size = end - start + 1;
932                         int err;
933
934                         INIT_LIST_HEAD(&vma->userptr.invalidate_link);
935                         vma->gpuva.gem.offset = bo_offset_or_userptr;
936
937                         err = mmu_interval_notifier_insert(&vma->userptr.notifier,
938                                                            current->mm,
939                                                            xe_vma_userptr(vma), size,
940                                                            &vma_userptr_notifier_ops);
941                         if (err) {
942                                 kfree(vma);
943                                 vma = ERR_PTR(err);
944                                 return vma;
945                         }
946
947                         vma->userptr.notifier_seq = LONG_MAX;
948                 }
949
950                 xe_vm_get(vm);
951         }
952
953         return vma;
954 }
955
956 static bool vm_remove_extobj(struct xe_vma *vma)
957 {
958         if (!list_empty(&vma->extobj.link)) {
959                 xe_vma_vm(vma)->extobj.entries--;
960                 list_del_init(&vma->extobj.link);
961                 return true;
962         }
963         return false;
964 }
965
966 static void xe_vma_destroy_late(struct xe_vma *vma)
967 {
968         struct xe_vm *vm = xe_vma_vm(vma);
969         struct xe_device *xe = vm->xe;
970         bool read_only = xe_vma_read_only(vma);
971
972         if (xe_vma_is_userptr(vma)) {
973                 if (vma->userptr.sg) {
974                         dma_unmap_sgtable(xe->drm.dev,
975                                           vma->userptr.sg,
976                                           read_only ? DMA_TO_DEVICE :
977                                           DMA_BIDIRECTIONAL, 0);
978                         sg_free_table(vma->userptr.sg);
979                         vma->userptr.sg = NULL;
980                 }
981
982                 /*
983                  * Since userptr pages are not pinned, we can't remove
984                  * the notifer until we're sure the GPU is not accessing
985                  * them anymore
986                  */
987                 mmu_interval_notifier_remove(&vma->userptr.notifier);
988                 xe_vm_put(vm);
989         } else if (xe_vma_is_null(vma)) {
990                 xe_vm_put(vm);
991         } else {
992                 xe_bo_put(xe_vma_bo(vma));
993         }
994
995         kfree(vma);
996 }
997
998 static void vma_destroy_work_func(struct work_struct *w)
999 {
1000         struct xe_vma *vma =
1001                 container_of(w, struct xe_vma, destroy_work);
1002
1003         xe_vma_destroy_late(vma);
1004 }
1005
1006 static struct xe_vma *
1007 bo_has_vm_references_locked(struct xe_bo *bo, struct xe_vm *vm,
1008                             struct xe_vma *ignore)
1009 {
1010         struct drm_gpuvm_bo *vm_bo;
1011         struct drm_gpuva *va;
1012         struct drm_gem_object *obj = &bo->ttm.base;
1013
1014         xe_bo_assert_held(bo);
1015
1016         drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
1017                 drm_gpuvm_bo_for_each_va(va, vm_bo) {
1018                         struct xe_vma *vma = gpuva_to_vma(va);
1019
1020                         if (vma != ignore && xe_vma_vm(vma) == vm)
1021                                 return vma;
1022                 }
1023         }
1024
1025         return NULL;
1026 }
1027
1028 static bool bo_has_vm_references(struct xe_bo *bo, struct xe_vm *vm,
1029                                  struct xe_vma *ignore)
1030 {
1031         bool ret;
1032
1033         xe_bo_lock(bo, false);
1034         ret = !!bo_has_vm_references_locked(bo, vm, ignore);
1035         xe_bo_unlock(bo);
1036
1037         return ret;
1038 }
1039
1040 static void __vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
1041 {
1042         lockdep_assert_held_write(&vm->lock);
1043
1044         list_add(&vma->extobj.link, &vm->extobj.list);
1045         vm->extobj.entries++;
1046 }
1047
1048 static void vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
1049 {
1050         struct xe_bo *bo = xe_vma_bo(vma);
1051
1052         lockdep_assert_held_write(&vm->lock);
1053
1054         if (bo_has_vm_references(bo, vm, vma))
1055                 return;
1056
1057         __vm_insert_extobj(vm, vma);
1058 }
1059
1060 static void vma_destroy_cb(struct dma_fence *fence,
1061                            struct dma_fence_cb *cb)
1062 {
1063         struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
1064
1065         INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
1066         queue_work(system_unbound_wq, &vma->destroy_work);
1067 }
1068
1069 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
1070 {
1071         struct xe_vm *vm = xe_vma_vm(vma);
1072
1073         lockdep_assert_held_write(&vm->lock);
1074         xe_assert(vm->xe, list_empty(&vma->combined_links.destroy));
1075
1076         if (xe_vma_is_userptr(vma)) {
1077                 xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
1078
1079                 spin_lock(&vm->userptr.invalidated_lock);
1080                 list_del(&vma->userptr.invalidate_link);
1081                 spin_unlock(&vm->userptr.invalidated_lock);
1082         } else if (!xe_vma_is_null(vma)) {
1083                 xe_bo_assert_held(xe_vma_bo(vma));
1084
1085                 spin_lock(&vm->notifier.list_lock);
1086                 list_del(&vma->notifier.rebind_link);
1087                 spin_unlock(&vm->notifier.list_lock);
1088
1089                 drm_gpuva_unlink(&vma->gpuva);
1090
1091                 if (!xe_vma_bo(vma)->vm && vm_remove_extobj(vma)) {
1092                         struct xe_vma *other;
1093
1094                         other = bo_has_vm_references_locked(xe_vma_bo(vma), vm, NULL);
1095
1096                         if (other)
1097                                 __vm_insert_extobj(vm, other);
1098                 }
1099         }
1100
1101         xe_vm_assert_held(vm);
1102         if (fence) {
1103                 int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
1104                                                  vma_destroy_cb);
1105
1106                 if (ret) {
1107                         XE_WARN_ON(ret != -ENOENT);
1108                         xe_vma_destroy_late(vma);
1109                 }
1110         } else {
1111                 xe_vma_destroy_late(vma);
1112         }
1113 }
1114
1115 /**
1116  * xe_vm_prepare_vma() - drm_exec utility to lock a vma
1117  * @exec: The drm_exec object we're currently locking for.
1118  * @vma: The vma for witch we want to lock the vm resv and any attached
1119  * object's resv.
1120  * @num_shared: The number of dma-fence slots to pre-allocate in the
1121  * objects' reservation objects.
1122  *
1123  * Return: 0 on success, negative error code on error. In particular
1124  * may return -EDEADLK on WW transaction contention and -EINTR if
1125  * an interruptible wait is terminated by a signal.
1126  */
1127 int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
1128                       unsigned int num_shared)
1129 {
1130         struct xe_vm *vm = xe_vma_vm(vma);
1131         struct xe_bo *bo = xe_vma_bo(vma);
1132         int err;
1133
1134         XE_WARN_ON(!vm);
1135         err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
1136         if (!err && bo && !bo->vm)
1137                 err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared);
1138
1139         return err;
1140 }
1141
1142 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1143 {
1144         struct drm_exec exec;
1145         int err;
1146
1147         drm_exec_init(&exec, 0);
1148         drm_exec_until_all_locked(&exec) {
1149                 err = xe_vm_prepare_vma(&exec, vma, 0);
1150                 drm_exec_retry_on_contention(&exec);
1151                 if (XE_WARN_ON(err))
1152                         break;
1153         }
1154
1155         xe_vma_destroy(vma, NULL);
1156
1157         drm_exec_fini(&exec);
1158 }
1159
1160 struct xe_vma *
1161 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
1162 {
1163         struct drm_gpuva *gpuva;
1164
1165         lockdep_assert_held(&vm->lock);
1166
1167         if (xe_vm_is_closed_or_banned(vm))
1168                 return NULL;
1169
1170         xe_assert(vm->xe, start + range <= vm->size);
1171
1172         gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
1173
1174         return gpuva ? gpuva_to_vma(gpuva) : NULL;
1175 }
1176
1177 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1178 {
1179         int err;
1180
1181         xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1182         lockdep_assert_held(&vm->lock);
1183
1184         err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
1185         XE_WARN_ON(err);        /* Shouldn't be possible */
1186
1187         return err;
1188 }
1189
1190 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1191 {
1192         xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1193         lockdep_assert_held(&vm->lock);
1194
1195         drm_gpuva_remove(&vma->gpuva);
1196         if (vm->usm.last_fault_vma == vma)
1197                 vm->usm.last_fault_vma = NULL;
1198 }
1199
1200 static struct drm_gpuva_op *xe_vm_op_alloc(void)
1201 {
1202         struct xe_vma_op *op;
1203
1204         op = kzalloc(sizeof(*op), GFP_KERNEL);
1205
1206         if (unlikely(!op))
1207                 return NULL;
1208
1209         return &op->base;
1210 }
1211
1212 static void xe_vm_free(struct drm_gpuvm *gpuvm);
1213
1214 static struct drm_gpuvm_ops gpuvm_ops = {
1215         .op_alloc = xe_vm_op_alloc,
1216         .vm_free = xe_vm_free,
1217 };
1218
1219 static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index)
1220 {
1221         u64 pte = 0;
1222
1223         if (pat_index & BIT(0))
1224                 pte |= XE_PPGTT_PTE_PAT0;
1225
1226         if (pat_index & BIT(1))
1227                 pte |= XE_PPGTT_PTE_PAT1;
1228
1229         return pte;
1230 }
1231
1232 static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index,
1233                                 u32 pt_level)
1234 {
1235         u64 pte = 0;
1236
1237         if (pat_index & BIT(0))
1238                 pte |= XE_PPGTT_PTE_PAT0;
1239
1240         if (pat_index & BIT(1))
1241                 pte |= XE_PPGTT_PTE_PAT1;
1242
1243         if (pat_index & BIT(2)) {
1244                 if (pt_level)
1245                         pte |= XE_PPGTT_PDE_PDPE_PAT2;
1246                 else
1247                         pte |= XE_PPGTT_PTE_PAT2;
1248         }
1249
1250         if (pat_index & BIT(3))
1251                 pte |= XELPG_PPGTT_PTE_PAT3;
1252
1253         if (pat_index & (BIT(4)))
1254                 pte |= XE2_PPGTT_PTE_PAT4;
1255
1256         return pte;
1257 }
1258
1259 static u64 pte_encode_ps(u32 pt_level)
1260 {
1261         XE_WARN_ON(pt_level > 2);
1262
1263         if (pt_level == 1)
1264                 return XE_PDE_PS_2M;
1265         else if (pt_level == 2)
1266                 return XE_PDPE_PS_1G;
1267
1268         return 0;
1269 }
1270
1271 static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
1272                               const u16 pat_index)
1273 {
1274         struct xe_device *xe = xe_bo_device(bo);
1275         u64 pde;
1276
1277         pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1278         pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
1279         pde |= pde_encode_pat_index(xe, pat_index);
1280
1281         return pde;
1282 }
1283
1284 static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
1285                               u16 pat_index, u32 pt_level)
1286 {
1287         struct xe_device *xe = xe_bo_device(bo);
1288         u64 pte;
1289
1290         pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1291         pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1292         pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1293         pte |= pte_encode_ps(pt_level);
1294
1295         if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
1296                 pte |= XE_PPGTT_PTE_DM;
1297
1298         return pte;
1299 }
1300
1301 static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
1302                                u16 pat_index, u32 pt_level)
1303 {
1304         struct xe_device *xe = xe_vma_vm(vma)->xe;
1305
1306         pte |= XE_PAGE_PRESENT;
1307
1308         if (likely(!xe_vma_read_only(vma)))
1309                 pte |= XE_PAGE_RW;
1310
1311         pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1312         pte |= pte_encode_ps(pt_level);
1313
1314         if (unlikely(xe_vma_is_null(vma)))
1315                 pte |= XE_PTE_NULL;
1316
1317         return pte;
1318 }
1319
1320 static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
1321                                 u16 pat_index,
1322                                 u32 pt_level, bool devmem, u64 flags)
1323 {
1324         u64 pte;
1325
1326         /* Avoid passing random bits directly as flags */
1327         xe_assert(xe, !(flags & ~XE_PTE_PS64));
1328
1329         pte = addr;
1330         pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1331         pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1332         pte |= pte_encode_ps(pt_level);
1333
1334         if (devmem)
1335                 pte |= XE_PPGTT_PTE_DM;
1336
1337         pte |= flags;
1338
1339         return pte;
1340 }
1341
1342 static const struct xe_pt_ops xelp_pt_ops = {
1343         .pte_encode_bo = xelp_pte_encode_bo,
1344         .pte_encode_vma = xelp_pte_encode_vma,
1345         .pte_encode_addr = xelp_pte_encode_addr,
1346         .pde_encode_bo = xelp_pde_encode_bo,
1347 };
1348
1349 static void vm_destroy_work_func(struct work_struct *w);
1350
1351 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1352 {
1353         struct drm_gem_object *vm_resv_obj;
1354         struct xe_vm *vm;
1355         int err, number_tiles = 0;
1356         struct xe_tile *tile;
1357         u8 id;
1358
1359         vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1360         if (!vm)
1361                 return ERR_PTR(-ENOMEM);
1362
1363         vm->xe = xe;
1364
1365         vm->size = 1ull << xe->info.va_bits;
1366
1367         vm->flags = flags;
1368
1369         init_rwsem(&vm->lock);
1370
1371         INIT_LIST_HEAD(&vm->rebind_list);
1372
1373         INIT_LIST_HEAD(&vm->userptr.repin_list);
1374         INIT_LIST_HEAD(&vm->userptr.invalidated);
1375         init_rwsem(&vm->userptr.notifier_lock);
1376         spin_lock_init(&vm->userptr.invalidated_lock);
1377
1378         INIT_LIST_HEAD(&vm->notifier.rebind_list);
1379         spin_lock_init(&vm->notifier.list_lock);
1380
1381         INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1382
1383         INIT_LIST_HEAD(&vm->preempt.exec_queues);
1384         vm->preempt.min_run_period_ms = 10;     /* FIXME: Wire up to uAPI */
1385
1386         for_each_tile(tile, xe, id)
1387                 xe_range_fence_tree_init(&vm->rftree[id]);
1388
1389         INIT_LIST_HEAD(&vm->extobj.list);
1390
1391         vm->pt_ops = &xelp_pt_ops;
1392
1393         if (!(flags & XE_VM_FLAG_MIGRATION))
1394                 xe_device_mem_access_get(xe);
1395
1396         vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
1397         if (!vm_resv_obj) {
1398                 err = -ENOMEM;
1399                 goto err_no_resv;
1400         }
1401
1402         drm_gpuvm_init(&vm->gpuvm, "Xe VM", 0, &xe->drm, vm_resv_obj,
1403                        0, vm->size, 0, 0, &gpuvm_ops);
1404
1405         drm_gem_object_put(vm_resv_obj);
1406
1407         err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
1408         if (err)
1409                 goto err_close;
1410
1411         if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1412                 vm->flags |= XE_VM_FLAG_64K;
1413
1414         for_each_tile(tile, xe, id) {
1415                 if (flags & XE_VM_FLAG_MIGRATION &&
1416                     tile->id != XE_VM_FLAG_TILE_ID(flags))
1417                         continue;
1418
1419                 vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
1420                 if (IS_ERR(vm->pt_root[id])) {
1421                         err = PTR_ERR(vm->pt_root[id]);
1422                         vm->pt_root[id] = NULL;
1423                         goto err_unlock_close;
1424                 }
1425         }
1426
1427         if (flags & XE_VM_FLAG_SCRATCH_PAGE) {
1428                 for_each_tile(tile, xe, id) {
1429                         if (!vm->pt_root[id])
1430                                 continue;
1431
1432                         err = xe_pt_create_scratch(xe, tile, vm);
1433                         if (err)
1434                                 goto err_unlock_close;
1435                 }
1436                 vm->batch_invalidate_tlb = true;
1437         }
1438
1439         if (flags & XE_VM_FLAG_COMPUTE_MODE) {
1440                 INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1441                 vm->flags |= XE_VM_FLAG_COMPUTE_MODE;
1442                 vm->batch_invalidate_tlb = false;
1443         }
1444
1445         /* Fill pt_root after allocating scratch tables */
1446         for_each_tile(tile, xe, id) {
1447                 if (!vm->pt_root[id])
1448                         continue;
1449
1450                 xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
1451         }
1452         dma_resv_unlock(xe_vm_resv(vm));
1453
1454         /* Kernel migration VM shouldn't have a circular loop.. */
1455         if (!(flags & XE_VM_FLAG_MIGRATION)) {
1456                 for_each_tile(tile, xe, id) {
1457                         struct xe_gt *gt = tile->primary_gt;
1458                         struct xe_vm *migrate_vm;
1459                         struct xe_exec_queue *q;
1460                         u32 create_flags = EXEC_QUEUE_FLAG_VM |
1461                                 ((flags & XE_VM_FLAG_ASYNC_DEFAULT) ?
1462                                 EXEC_QUEUE_FLAG_VM_ASYNC : 0);
1463
1464                         if (!vm->pt_root[id])
1465                                 continue;
1466
1467                         migrate_vm = xe_migrate_get_vm(tile->migrate);
1468                         q = xe_exec_queue_create_class(xe, gt, migrate_vm,
1469                                                        XE_ENGINE_CLASS_COPY,
1470                                                        create_flags);
1471                         xe_vm_put(migrate_vm);
1472                         if (IS_ERR(q)) {
1473                                 err = PTR_ERR(q);
1474                                 goto err_close;
1475                         }
1476                         vm->q[id] = q;
1477                         number_tiles++;
1478                 }
1479         }
1480
1481         if (number_tiles > 1)
1482                 vm->composite_fence_ctx = dma_fence_context_alloc(1);
1483
1484         mutex_lock(&xe->usm.lock);
1485         if (flags & XE_VM_FLAG_FAULT_MODE)
1486                 xe->usm.num_vm_in_fault_mode++;
1487         else if (!(flags & XE_VM_FLAG_MIGRATION))
1488                 xe->usm.num_vm_in_non_fault_mode++;
1489         mutex_unlock(&xe->usm.lock);
1490
1491         trace_xe_vm_create(vm);
1492
1493         return vm;
1494
1495 err_unlock_close:
1496         dma_resv_unlock(xe_vm_resv(vm));
1497 err_close:
1498         xe_vm_close_and_put(vm);
1499         return ERR_PTR(err);
1500
1501 err_no_resv:
1502         for_each_tile(tile, xe, id)
1503                 xe_range_fence_tree_fini(&vm->rftree[id]);
1504         kfree(vm);
1505         if (!(flags & XE_VM_FLAG_MIGRATION))
1506                 xe_device_mem_access_put(xe);
1507         return ERR_PTR(err);
1508 }
1509
1510 static void xe_vm_close(struct xe_vm *vm)
1511 {
1512         down_write(&vm->lock);
1513         vm->size = 0;
1514         up_write(&vm->lock);
1515 }
1516
1517 void xe_vm_close_and_put(struct xe_vm *vm)
1518 {
1519         LIST_HEAD(contested);
1520         struct xe_device *xe = vm->xe;
1521         struct xe_tile *tile;
1522         struct xe_vma *vma, *next_vma;
1523         struct drm_gpuva *gpuva, *next;
1524         u8 id;
1525
1526         xe_assert(xe, !vm->preempt.num_exec_queues);
1527
1528         xe_vm_close(vm);
1529         if (xe_vm_in_compute_mode(vm))
1530                 flush_work(&vm->preempt.rebind_work);
1531
1532         down_write(&vm->lock);
1533         for_each_tile(tile, xe, id) {
1534                 if (vm->q[id])
1535                         xe_exec_queue_last_fence_put(vm->q[id], vm);
1536         }
1537         up_write(&vm->lock);
1538
1539         for_each_tile(tile, xe, id) {
1540                 if (vm->q[id]) {
1541                         xe_exec_queue_kill(vm->q[id]);
1542                         xe_exec_queue_put(vm->q[id]);
1543                         vm->q[id] = NULL;
1544                 }
1545         }
1546
1547         down_write(&vm->lock);
1548         xe_vm_lock(vm, false);
1549         drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) {
1550                 vma = gpuva_to_vma(gpuva);
1551
1552                 if (xe_vma_has_no_bo(vma)) {
1553                         down_read(&vm->userptr.notifier_lock);
1554                         vma->gpuva.flags |= XE_VMA_DESTROYED;
1555                         up_read(&vm->userptr.notifier_lock);
1556                 }
1557
1558                 xe_vm_remove_vma(vm, vma);
1559
1560                 /* easy case, remove from VMA? */
1561                 if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
1562                         list_del_init(&vma->combined_links.rebind);
1563                         xe_vma_destroy(vma, NULL);
1564                         continue;
1565                 }
1566
1567                 list_move_tail(&vma->combined_links.destroy, &contested);
1568                 vma->gpuva.flags |= XE_VMA_DESTROYED;
1569         }
1570
1571         /*
1572          * All vm operations will add shared fences to resv.
1573          * The only exception is eviction for a shared object,
1574          * but even so, the unbind when evicted would still
1575          * install a fence to resv. Hence it's safe to
1576          * destroy the pagetables immediately.
1577          */
1578         for_each_tile(tile, xe, id) {
1579                 if (vm->scratch_bo[id]) {
1580                         u32 i;
1581
1582                         xe_bo_unpin(vm->scratch_bo[id]);
1583                         xe_bo_put(vm->scratch_bo[id]);
1584                         for (i = 0; i < vm->pt_root[id]->level; i++)
1585                                 xe_pt_destroy(vm->scratch_pt[id][i], vm->flags,
1586                                               NULL);
1587                 }
1588                 if (vm->pt_root[id]) {
1589                         xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1590                         vm->pt_root[id] = NULL;
1591                 }
1592         }
1593         xe_vm_unlock(vm);
1594
1595         /*
1596          * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1597          * Since we hold a refcount to the bo, we can remove and free
1598          * the members safely without locking.
1599          */
1600         list_for_each_entry_safe(vma, next_vma, &contested,
1601                                  combined_links.destroy) {
1602                 list_del_init(&vma->combined_links.destroy);
1603                 xe_vma_destroy_unlocked(vma);
1604         }
1605
1606         xe_assert(xe, list_empty(&vm->extobj.list));
1607         up_write(&vm->lock);
1608
1609         mutex_lock(&xe->usm.lock);
1610         if (vm->flags & XE_VM_FLAG_FAULT_MODE)
1611                 xe->usm.num_vm_in_fault_mode--;
1612         else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1613                 xe->usm.num_vm_in_non_fault_mode--;
1614         mutex_unlock(&xe->usm.lock);
1615
1616         for_each_tile(tile, xe, id)
1617                 xe_range_fence_tree_fini(&vm->rftree[id]);
1618
1619         xe_vm_put(vm);
1620 }
1621
1622 static void vm_destroy_work_func(struct work_struct *w)
1623 {
1624         struct xe_vm *vm =
1625                 container_of(w, struct xe_vm, destroy_work);
1626         struct xe_device *xe = vm->xe;
1627         struct xe_tile *tile;
1628         u8 id;
1629         void *lookup;
1630
1631         /* xe_vm_close_and_put was not called? */
1632         xe_assert(xe, !vm->size);
1633
1634         if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
1635                 xe_device_mem_access_put(xe);
1636
1637                 if (xe->info.has_asid) {
1638                         mutex_lock(&xe->usm.lock);
1639                         lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1640                         xe_assert(xe, lookup == vm);
1641                         mutex_unlock(&xe->usm.lock);
1642                 }
1643         }
1644
1645         for_each_tile(tile, xe, id)
1646                 XE_WARN_ON(vm->pt_root[id]);
1647
1648         trace_xe_vm_free(vm);
1649         dma_fence_put(vm->rebind_fence);
1650         kfree(vm);
1651 }
1652
1653 static void xe_vm_free(struct drm_gpuvm *gpuvm)
1654 {
1655         struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
1656
1657         /* To destroy the VM we need to be able to sleep */
1658         queue_work(system_unbound_wq, &vm->destroy_work);
1659 }
1660
1661 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1662 {
1663         struct xe_vm *vm;
1664
1665         mutex_lock(&xef->vm.lock);
1666         vm = xa_load(&xef->vm.xa, id);
1667         if (vm)
1668                 xe_vm_get(vm);
1669         mutex_unlock(&xef->vm.lock);
1670
1671         return vm;
1672 }
1673
1674 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
1675 {
1676         return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0,
1677                                          tile_to_xe(tile)->pat.idx[XE_CACHE_WB]);
1678 }
1679
1680 static struct xe_exec_queue *
1681 to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
1682 {
1683         return q ? q : vm->q[0];
1684 }
1685
1686 static struct dma_fence *
1687 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1688                  struct xe_sync_entry *syncs, u32 num_syncs,
1689                  bool first_op, bool last_op)
1690 {
1691         struct xe_vm *vm = xe_vma_vm(vma);
1692         struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1693         struct xe_tile *tile;
1694         struct dma_fence *fence = NULL;
1695         struct dma_fence **fences = NULL;
1696         struct dma_fence_array *cf = NULL;
1697         int cur_fence = 0, i;
1698         int number_tiles = hweight8(vma->tile_present);
1699         int err;
1700         u8 id;
1701
1702         trace_xe_vma_unbind(vma);
1703
1704         if (number_tiles > 1) {
1705                 fences = kmalloc_array(number_tiles, sizeof(*fences),
1706                                        GFP_KERNEL);
1707                 if (!fences)
1708                         return ERR_PTR(-ENOMEM);
1709         }
1710
1711         for_each_tile(tile, vm->xe, id) {
1712                 if (!(vma->tile_present & BIT(id)))
1713                         goto next;
1714
1715                 fence = __xe_pt_unbind_vma(tile, vma, q ? q : vm->q[id],
1716                                            first_op ? syncs : NULL,
1717                                            first_op ? num_syncs : 0);
1718                 if (IS_ERR(fence)) {
1719                         err = PTR_ERR(fence);
1720                         goto err_fences;
1721                 }
1722
1723                 if (fences)
1724                         fences[cur_fence++] = fence;
1725
1726 next:
1727                 if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1728                         q = list_next_entry(q, multi_gt_list);
1729         }
1730
1731         if (fences) {
1732                 cf = dma_fence_array_create(number_tiles, fences,
1733                                             vm->composite_fence_ctx,
1734                                             vm->composite_fence_seqno++,
1735                                             false);
1736                 if (!cf) {
1737                         --vm->composite_fence_seqno;
1738                         err = -ENOMEM;
1739                         goto err_fences;
1740                 }
1741         }
1742
1743         fence = cf ? &cf->base : !fence ?
1744                 xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
1745         if (last_op) {
1746                 for (i = 0; i < num_syncs; i++)
1747                         xe_sync_entry_signal(&syncs[i], NULL, fence);
1748         }
1749
1750         return fence;
1751
1752 err_fences:
1753         if (fences) {
1754                 while (cur_fence)
1755                         dma_fence_put(fences[--cur_fence]);
1756                 kfree(fences);
1757         }
1758
1759         return ERR_PTR(err);
1760 }
1761
1762 static struct dma_fence *
1763 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1764                struct xe_sync_entry *syncs, u32 num_syncs,
1765                bool first_op, bool last_op)
1766 {
1767         struct xe_tile *tile;
1768         struct dma_fence *fence;
1769         struct dma_fence **fences = NULL;
1770         struct dma_fence_array *cf = NULL;
1771         struct xe_vm *vm = xe_vma_vm(vma);
1772         int cur_fence = 0, i;
1773         int number_tiles = hweight8(vma->tile_mask);
1774         int err;
1775         u8 id;
1776
1777         trace_xe_vma_bind(vma);
1778
1779         if (number_tiles > 1) {
1780                 fences = kmalloc_array(number_tiles, sizeof(*fences),
1781                                        GFP_KERNEL);
1782                 if (!fences)
1783                         return ERR_PTR(-ENOMEM);
1784         }
1785
1786         for_each_tile(tile, vm->xe, id) {
1787                 if (!(vma->tile_mask & BIT(id)))
1788                         goto next;
1789
1790                 fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id],
1791                                          first_op ? syncs : NULL,
1792                                          first_op ? num_syncs : 0,
1793                                          vma->tile_present & BIT(id));
1794                 if (IS_ERR(fence)) {
1795                         err = PTR_ERR(fence);
1796                         goto err_fences;
1797                 }
1798
1799                 if (fences)
1800                         fences[cur_fence++] = fence;
1801
1802 next:
1803                 if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1804                         q = list_next_entry(q, multi_gt_list);
1805         }
1806
1807         if (fences) {
1808                 cf = dma_fence_array_create(number_tiles, fences,
1809                                             vm->composite_fence_ctx,
1810                                             vm->composite_fence_seqno++,
1811                                             false);
1812                 if (!cf) {
1813                         --vm->composite_fence_seqno;
1814                         err = -ENOMEM;
1815                         goto err_fences;
1816                 }
1817         }
1818
1819         if (last_op) {
1820                 for (i = 0; i < num_syncs; i++)
1821                         xe_sync_entry_signal(&syncs[i], NULL,
1822                                              cf ? &cf->base : fence);
1823         }
1824
1825         return cf ? &cf->base : fence;
1826
1827 err_fences:
1828         if (fences) {
1829                 while (cur_fence)
1830                         dma_fence_put(fences[--cur_fence]);
1831                 kfree(fences);
1832         }
1833
1834         return ERR_PTR(err);
1835 }
1836
1837 static bool xe_vm_sync_mode(struct xe_vm *vm, struct xe_exec_queue *q)
1838 {
1839         return q ? !(q->flags & EXEC_QUEUE_FLAG_VM_ASYNC) :
1840                 !(vm->flags & XE_VM_FLAG_ASYNC_DEFAULT);
1841 }
1842
1843 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
1844                         struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1845                         u32 num_syncs, bool immediate, bool first_op,
1846                         bool last_op)
1847 {
1848         struct dma_fence *fence;
1849         struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1850
1851         xe_vm_assert_held(vm);
1852
1853         if (immediate) {
1854                 fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
1855                                        last_op);
1856                 if (IS_ERR(fence))
1857                         return PTR_ERR(fence);
1858         } else {
1859                 int i;
1860
1861                 xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
1862
1863                 fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
1864                 if (last_op) {
1865                         for (i = 0; i < num_syncs; i++)
1866                                 xe_sync_entry_signal(&syncs[i], NULL, fence);
1867                 }
1868         }
1869
1870         if (last_op)
1871                 xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
1872         if (last_op && xe_vm_sync_mode(vm, q))
1873                 dma_fence_wait(fence, true);
1874         dma_fence_put(fence);
1875
1876         return 0;
1877 }
1878
1879 static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
1880                       struct xe_bo *bo, struct xe_sync_entry *syncs,
1881                       u32 num_syncs, bool immediate, bool first_op,
1882                       bool last_op)
1883 {
1884         int err;
1885
1886         xe_vm_assert_held(vm);
1887         xe_bo_assert_held(bo);
1888
1889         if (bo && immediate) {
1890                 err = xe_bo_validate(bo, vm, true);
1891                 if (err)
1892                         return err;
1893         }
1894
1895         return __xe_vm_bind(vm, vma, q, syncs, num_syncs, immediate, first_op,
1896                             last_op);
1897 }
1898
1899 static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
1900                         struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1901                         u32 num_syncs, bool first_op, bool last_op)
1902 {
1903         struct dma_fence *fence;
1904         struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1905
1906         xe_vm_assert_held(vm);
1907         xe_bo_assert_held(xe_vma_bo(vma));
1908
1909         fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
1910         if (IS_ERR(fence))
1911                 return PTR_ERR(fence);
1912
1913         xe_vma_destroy(vma, fence);
1914         if (last_op)
1915                 xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
1916         if (last_op && xe_vm_sync_mode(vm, q))
1917                 dma_fence_wait(fence, true);
1918         dma_fence_put(fence);
1919
1920         return 0;
1921 }
1922
1923 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
1924                                     DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE | \
1925                                     DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT | \
1926                                     DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1927
1928 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1929                        struct drm_file *file)
1930 {
1931         struct xe_device *xe = to_xe_device(dev);
1932         struct xe_file *xef = to_xe_file(file);
1933         struct drm_xe_vm_create *args = data;
1934         struct xe_tile *tile;
1935         struct xe_vm *vm;
1936         u32 id, asid;
1937         int err;
1938         u32 flags = 0;
1939
1940         if (XE_IOCTL_DBG(xe, args->extensions))
1941                 return -EINVAL;
1942
1943         if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
1944                 args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
1945
1946         if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1947                          !xe->info.supports_usm))
1948                 return -EINVAL;
1949
1950         if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1951                 return -EINVAL;
1952
1953         if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1954                 return -EINVAL;
1955
1956         if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
1957                          args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1958                 return -EINVAL;
1959
1960         if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE &&
1961                          args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1962                 return -EINVAL;
1963
1964         if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1965                          xe_device_in_non_fault_mode(xe)))
1966                 return -EINVAL;
1967
1968         if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) &&
1969                          xe_device_in_fault_mode(xe)))
1970                 return -EINVAL;
1971
1972         if (XE_IOCTL_DBG(xe, args->extensions))
1973                 return -EINVAL;
1974
1975         if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
1976                 flags |= XE_VM_FLAG_SCRATCH_PAGE;
1977         if (args->flags & DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE)
1978                 flags |= XE_VM_FLAG_COMPUTE_MODE;
1979         if (args->flags & DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT)
1980                 flags |= XE_VM_FLAG_ASYNC_DEFAULT;
1981         if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1982                 flags |= XE_VM_FLAG_FAULT_MODE;
1983
1984         vm = xe_vm_create(xe, flags);
1985         if (IS_ERR(vm))
1986                 return PTR_ERR(vm);
1987
1988         mutex_lock(&xef->vm.lock);
1989         err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1990         mutex_unlock(&xef->vm.lock);
1991         if (err) {
1992                 xe_vm_close_and_put(vm);
1993                 return err;
1994         }
1995
1996         if (xe->info.has_asid) {
1997                 mutex_lock(&xe->usm.lock);
1998                 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
1999                                       XA_LIMIT(1, XE_MAX_ASID - 1),
2000                                       &xe->usm.next_asid, GFP_KERNEL);
2001                 mutex_unlock(&xe->usm.lock);
2002                 if (err < 0) {
2003                         xe_vm_close_and_put(vm);
2004                         return err;
2005                 }
2006                 err = 0;
2007                 vm->usm.asid = asid;
2008         }
2009
2010         args->vm_id = id;
2011         vm->xef = xef;
2012
2013         /* Record BO memory for VM pagetable created against client */
2014         for_each_tile(tile, xe, id)
2015                 if (vm->pt_root[id])
2016                         xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo);
2017
2018 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
2019         /* Warning: Security issue - never enable by default */
2020         args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
2021 #endif
2022
2023         return 0;
2024 }
2025
2026 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
2027                         struct drm_file *file)
2028 {
2029         struct xe_device *xe = to_xe_device(dev);
2030         struct xe_file *xef = to_xe_file(file);
2031         struct drm_xe_vm_destroy *args = data;
2032         struct xe_vm *vm;
2033         int err = 0;
2034
2035         if (XE_IOCTL_DBG(xe, args->pad) ||
2036             XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2037                 return -EINVAL;
2038
2039         mutex_lock(&xef->vm.lock);
2040         vm = xa_load(&xef->vm.xa, args->vm_id);
2041         if (XE_IOCTL_DBG(xe, !vm))
2042                 err = -ENOENT;
2043         else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues))
2044                 err = -EBUSY;
2045         else
2046                 xa_erase(&xef->vm.xa, args->vm_id);
2047         mutex_unlock(&xef->vm.lock);
2048
2049         if (!err)
2050                 xe_vm_close_and_put(vm);
2051
2052         return err;
2053 }
2054
2055 static const u32 region_to_mem_type[] = {
2056         XE_PL_TT,
2057         XE_PL_VRAM0,
2058         XE_PL_VRAM1,
2059 };
2060
2061 static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
2062                           struct xe_exec_queue *q, u32 region,
2063                           struct xe_sync_entry *syncs, u32 num_syncs,
2064                           bool first_op, bool last_op)
2065 {
2066         struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
2067         int err;
2068
2069         xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
2070
2071         if (!xe_vma_has_no_bo(vma)) {
2072                 err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
2073                 if (err)
2074                         return err;
2075         }
2076
2077         if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
2078                 return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
2079                                   true, first_op, last_op);
2080         } else {
2081                 int i;
2082
2083                 /* Nothing to do, signal fences now */
2084                 if (last_op) {
2085                         for (i = 0; i < num_syncs; i++) {
2086                                 struct dma_fence *fence =
2087                                         xe_exec_queue_last_fence_get(wait_exec_queue, vm);
2088
2089                                 xe_sync_entry_signal(&syncs[i], NULL, fence);
2090                         }
2091                 }
2092
2093                 return 0;
2094         }
2095 }
2096
2097 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
2098                              bool post_commit)
2099 {
2100         down_read(&vm->userptr.notifier_lock);
2101         vma->gpuva.flags |= XE_VMA_DESTROYED;
2102         up_read(&vm->userptr.notifier_lock);
2103         if (post_commit)
2104                 xe_vm_remove_vma(vm, vma);
2105 }
2106
2107 #undef ULL
2108 #define ULL     unsigned long long
2109
2110 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
2111 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2112 {
2113         struct xe_vma *vma;
2114
2115         switch (op->op) {
2116         case DRM_GPUVA_OP_MAP:
2117                 vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx",
2118                        (ULL)op->map.va.addr, (ULL)op->map.va.range);
2119                 break;
2120         case DRM_GPUVA_OP_REMAP:
2121                 vma = gpuva_to_vma(op->remap.unmap->va);
2122                 vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2123                        (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2124                        op->remap.unmap->keep ? 1 : 0);
2125                 if (op->remap.prev)
2126                         vm_dbg(&xe->drm,
2127                                "REMAP:PREV: addr=0x%016llx, range=0x%016llx",
2128                                (ULL)op->remap.prev->va.addr,
2129                                (ULL)op->remap.prev->va.range);
2130                 if (op->remap.next)
2131                         vm_dbg(&xe->drm,
2132                                "REMAP:NEXT: addr=0x%016llx, range=0x%016llx",
2133                                (ULL)op->remap.next->va.addr,
2134                                (ULL)op->remap.next->va.range);
2135                 break;
2136         case DRM_GPUVA_OP_UNMAP:
2137                 vma = gpuva_to_vma(op->unmap.va);
2138                 vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2139                        (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2140                        op->unmap.keep ? 1 : 0);
2141                 break;
2142         case DRM_GPUVA_OP_PREFETCH:
2143                 vma = gpuva_to_vma(op->prefetch.va);
2144                 vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx",
2145                        (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
2146                 break;
2147         default:
2148                 drm_warn(&xe->drm, "NOT POSSIBLE");
2149         }
2150 }
2151 #else
2152 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2153 {
2154 }
2155 #endif
2156
2157 /*
2158  * Create operations list from IOCTL arguments, setup operations fields so parse
2159  * and commit steps are decoupled from IOCTL arguments. This step can fail.
2160  */
2161 static struct drm_gpuva_ops *
2162 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
2163                          u64 bo_offset_or_userptr, u64 addr, u64 range,
2164                          u32 operation, u32 flags, u8 tile_mask,
2165                          u32 prefetch_region)
2166 {
2167         struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
2168         struct drm_gpuva_ops *ops;
2169         struct drm_gpuva_op *__op;
2170         struct xe_vma_op *op;
2171         struct drm_gpuvm_bo *vm_bo;
2172         int err;
2173
2174         lockdep_assert_held_write(&vm->lock);
2175
2176         vm_dbg(&vm->xe->drm,
2177                "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
2178                operation, (ULL)addr, (ULL)range,
2179                (ULL)bo_offset_or_userptr);
2180
2181         switch (operation) {
2182         case DRM_XE_VM_BIND_OP_MAP:
2183         case DRM_XE_VM_BIND_OP_MAP_USERPTR:
2184                 ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
2185                                                   obj, bo_offset_or_userptr);
2186                 break;
2187         case DRM_XE_VM_BIND_OP_UNMAP:
2188                 ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
2189                 break;
2190         case DRM_XE_VM_BIND_OP_PREFETCH:
2191                 ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
2192                 break;
2193         case DRM_XE_VM_BIND_OP_UNMAP_ALL:
2194                 xe_assert(vm->xe, bo);
2195
2196                 err = xe_bo_lock(bo, true);
2197                 if (err)
2198                         return ERR_PTR(err);
2199
2200                 vm_bo = drm_gpuvm_bo_find(&vm->gpuvm, obj);
2201                 if (!vm_bo)
2202                         break;
2203
2204                 ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
2205                 drm_gpuvm_bo_put(vm_bo);
2206                 xe_bo_unlock(bo);
2207                 break;
2208         default:
2209                 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2210                 ops = ERR_PTR(-EINVAL);
2211         }
2212         if (IS_ERR(ops))
2213                 return ops;
2214
2215 #ifdef TEST_VM_ASYNC_OPS_ERROR
2216         if (operation & FORCE_ASYNC_OP_ERROR) {
2217                 op = list_first_entry_or_null(&ops->list, struct xe_vma_op,
2218                                               base.entry);
2219                 if (op)
2220                         op->inject_error = true;
2221         }
2222 #endif
2223
2224         drm_gpuva_for_each_op(__op, ops) {
2225                 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2226
2227                 op->tile_mask = tile_mask;
2228                 if (__op->op == DRM_GPUVA_OP_MAP) {
2229                         op->map.immediate =
2230                                 flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
2231                         op->map.read_only =
2232                                 flags & DRM_XE_VM_BIND_FLAG_READONLY;
2233                         op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
2234                 } else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
2235                         op->prefetch.region = prefetch_region;
2236                 }
2237
2238                 print_op(vm->xe, __op);
2239         }
2240
2241         return ops;
2242 }
2243
2244 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
2245                               u8 tile_mask, bool read_only, bool is_null)
2246 {
2247         struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
2248         struct xe_vma *vma;
2249         int err;
2250
2251         lockdep_assert_held_write(&vm->lock);
2252
2253         if (bo) {
2254                 err = xe_bo_lock(bo, true);
2255                 if (err)
2256                         return ERR_PTR(err);
2257         }
2258         vma = xe_vma_create(vm, bo, op->gem.offset,
2259                             op->va.addr, op->va.addr +
2260                             op->va.range - 1, read_only, is_null,
2261                             tile_mask);
2262         if (bo)
2263                 xe_bo_unlock(bo);
2264
2265         if (xe_vma_is_userptr(vma)) {
2266                 err = xe_vma_userptr_pin_pages(vma);
2267                 if (err) {
2268                         prep_vma_destroy(vm, vma, false);
2269                         xe_vma_destroy_unlocked(vma);
2270                         return ERR_PTR(err);
2271                 }
2272         } else if (!xe_vma_has_no_bo(vma) && !bo->vm) {
2273                 vm_insert_extobj(vm, vma);
2274                 err = add_preempt_fences(vm, bo);
2275                 if (err) {
2276                         prep_vma_destroy(vm, vma, false);
2277                         xe_vma_destroy_unlocked(vma);
2278                         return ERR_PTR(err);
2279                 }
2280         }
2281
2282         return vma;
2283 }
2284
2285 static u64 xe_vma_max_pte_size(struct xe_vma *vma)
2286 {
2287         if (vma->gpuva.flags & XE_VMA_PTE_1G)
2288                 return SZ_1G;
2289         else if (vma->gpuva.flags & XE_VMA_PTE_2M)
2290                 return SZ_2M;
2291
2292         return SZ_4K;
2293 }
2294
2295 static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
2296 {
2297         switch (size) {
2298         case SZ_1G:
2299                 vma->gpuva.flags |= XE_VMA_PTE_1G;
2300                 break;
2301         case SZ_2M:
2302                 vma->gpuva.flags |= XE_VMA_PTE_2M;
2303                 break;
2304         }
2305
2306         return SZ_4K;
2307 }
2308
2309 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
2310 {
2311         int err = 0;
2312
2313         lockdep_assert_held_write(&vm->lock);
2314
2315         switch (op->base.op) {
2316         case DRM_GPUVA_OP_MAP:
2317                 err |= xe_vm_insert_vma(vm, op->map.vma);
2318                 if (!err)
2319                         op->flags |= XE_VMA_OP_COMMITTED;
2320                 break;
2321         case DRM_GPUVA_OP_REMAP:
2322         {
2323                 u8 tile_present =
2324                         gpuva_to_vma(op->base.remap.unmap->va)->tile_present;
2325
2326                 prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
2327                                  true);
2328                 op->flags |= XE_VMA_OP_COMMITTED;
2329
2330                 if (op->remap.prev) {
2331                         err |= xe_vm_insert_vma(vm, op->remap.prev);
2332                         if (!err)
2333                                 op->flags |= XE_VMA_OP_PREV_COMMITTED;
2334                         if (!err && op->remap.skip_prev) {
2335                                 op->remap.prev->tile_present =
2336                                         tile_present;
2337                                 op->remap.prev = NULL;
2338                         }
2339                 }
2340                 if (op->remap.next) {
2341                         err |= xe_vm_insert_vma(vm, op->remap.next);
2342                         if (!err)
2343                                 op->flags |= XE_VMA_OP_NEXT_COMMITTED;
2344                         if (!err && op->remap.skip_next) {
2345                                 op->remap.next->tile_present =
2346                                         tile_present;
2347                                 op->remap.next = NULL;
2348                         }
2349                 }
2350
2351                 /* Adjust for partial unbind after removin VMA from VM */
2352                 if (!err) {
2353                         op->base.remap.unmap->va->va.addr = op->remap.start;
2354                         op->base.remap.unmap->va->va.range = op->remap.range;
2355                 }
2356                 break;
2357         }
2358         case DRM_GPUVA_OP_UNMAP:
2359                 prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
2360                 op->flags |= XE_VMA_OP_COMMITTED;
2361                 break;
2362         case DRM_GPUVA_OP_PREFETCH:
2363                 op->flags |= XE_VMA_OP_COMMITTED;
2364                 break;
2365         default:
2366                 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2367         }
2368
2369         return err;
2370 }
2371
2372
2373 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
2374                                    struct drm_gpuva_ops *ops,
2375                                    struct xe_sync_entry *syncs, u32 num_syncs,
2376                                    struct list_head *ops_list, bool last,
2377                                    bool async)
2378 {
2379         struct xe_vma_op *last_op = NULL;
2380         struct drm_gpuva_op *__op;
2381         int err = 0;
2382
2383         lockdep_assert_held_write(&vm->lock);
2384
2385         drm_gpuva_for_each_op(__op, ops) {
2386                 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2387                 bool first = list_empty(ops_list);
2388
2389                 INIT_LIST_HEAD(&op->link);
2390                 list_add_tail(&op->link, ops_list);
2391
2392                 if (first) {
2393                         op->flags |= XE_VMA_OP_FIRST;
2394                         op->num_syncs = num_syncs;
2395                         op->syncs = syncs;
2396                 }
2397
2398                 op->q = q;
2399
2400                 switch (op->base.op) {
2401                 case DRM_GPUVA_OP_MAP:
2402                 {
2403                         struct xe_vma *vma;
2404
2405                         vma = new_vma(vm, &op->base.map,
2406                                       op->tile_mask, op->map.read_only,
2407                                       op->map.is_null);
2408                         if (IS_ERR(vma))
2409                                 return PTR_ERR(vma);
2410
2411                         op->map.vma = vma;
2412                         break;
2413                 }
2414                 case DRM_GPUVA_OP_REMAP:
2415                 {
2416                         struct xe_vma *old =
2417                                 gpuva_to_vma(op->base.remap.unmap->va);
2418
2419                         op->remap.start = xe_vma_start(old);
2420                         op->remap.range = xe_vma_size(old);
2421
2422                         if (op->base.remap.prev) {
2423                                 struct xe_vma *vma;
2424                                 bool read_only =
2425                                         op->base.remap.unmap->va->flags &
2426                                         XE_VMA_READ_ONLY;
2427                                 bool is_null =
2428                                         op->base.remap.unmap->va->flags &
2429                                         DRM_GPUVA_SPARSE;
2430
2431                                 vma = new_vma(vm, op->base.remap.prev,
2432                                               op->tile_mask, read_only,
2433                                               is_null);
2434                                 if (IS_ERR(vma))
2435                                         return PTR_ERR(vma);
2436
2437                                 op->remap.prev = vma;
2438
2439                                 /*
2440                                  * Userptr creates a new SG mapping so
2441                                  * we must also rebind.
2442                                  */
2443                                 op->remap.skip_prev = !xe_vma_is_userptr(old) &&
2444                                         IS_ALIGNED(xe_vma_end(vma),
2445                                                    xe_vma_max_pte_size(old));
2446                                 if (op->remap.skip_prev) {
2447                                         xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2448                                         op->remap.range -=
2449                                                 xe_vma_end(vma) -
2450                                                 xe_vma_start(old);
2451                                         op->remap.start = xe_vma_end(vma);
2452                                 }
2453                         }
2454
2455                         if (op->base.remap.next) {
2456                                 struct xe_vma *vma;
2457                                 bool read_only =
2458                                         op->base.remap.unmap->va->flags &
2459                                         XE_VMA_READ_ONLY;
2460
2461                                 bool is_null =
2462                                         op->base.remap.unmap->va->flags &
2463                                         DRM_GPUVA_SPARSE;
2464
2465                                 vma = new_vma(vm, op->base.remap.next,
2466                                               op->tile_mask, read_only,
2467                                               is_null);
2468                                 if (IS_ERR(vma))
2469                                         return PTR_ERR(vma);
2470
2471                                 op->remap.next = vma;
2472
2473                                 /*
2474                                  * Userptr creates a new SG mapping so
2475                                  * we must also rebind.
2476                                  */
2477                                 op->remap.skip_next = !xe_vma_is_userptr(old) &&
2478                                         IS_ALIGNED(xe_vma_start(vma),
2479                                                    xe_vma_max_pte_size(old));
2480                                 if (op->remap.skip_next) {
2481                                         xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2482                                         op->remap.range -=
2483                                                 xe_vma_end(old) -
2484                                                 xe_vma_start(vma);
2485                                 }
2486                         }
2487                         break;
2488                 }
2489                 case DRM_GPUVA_OP_UNMAP:
2490                 case DRM_GPUVA_OP_PREFETCH:
2491                         /* Nothing to do */
2492                         break;
2493                 default:
2494                         drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2495                 }
2496
2497                 last_op = op;
2498
2499                 err = xe_vma_op_commit(vm, op);
2500                 if (err)
2501                         return err;
2502         }
2503
2504         /* FIXME: Unhandled corner case */
2505         XE_WARN_ON(!last_op && last && !list_empty(ops_list));
2506
2507         if (!last_op)
2508                 return 0;
2509
2510         last_op->ops = ops;
2511         if (last) {
2512                 last_op->flags |= XE_VMA_OP_LAST;
2513                 last_op->num_syncs = num_syncs;
2514                 last_op->syncs = syncs;
2515         }
2516
2517         return 0;
2518 }
2519
2520 static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
2521                       struct xe_vma *vma, struct xe_vma_op *op)
2522 {
2523         int err;
2524
2525         lockdep_assert_held_write(&vm->lock);
2526
2527         err = xe_vm_prepare_vma(exec, vma, 1);
2528         if (err)
2529                 return err;
2530
2531         xe_vm_assert_held(vm);
2532         xe_bo_assert_held(xe_vma_bo(vma));
2533
2534         switch (op->base.op) {
2535         case DRM_GPUVA_OP_MAP:
2536                 err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
2537                                  op->syncs, op->num_syncs,
2538                                  op->map.immediate || !xe_vm_in_fault_mode(vm),
2539                                  op->flags & XE_VMA_OP_FIRST,
2540                                  op->flags & XE_VMA_OP_LAST);
2541                 break;
2542         case DRM_GPUVA_OP_REMAP:
2543         {
2544                 bool prev = !!op->remap.prev;
2545                 bool next = !!op->remap.next;
2546
2547                 if (!op->remap.unmap_done) {
2548                         if (prev || next)
2549                                 vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
2550                         err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2551                                            op->num_syncs,
2552                                            op->flags & XE_VMA_OP_FIRST,
2553                                            op->flags & XE_VMA_OP_LAST &&
2554                                            !prev && !next);
2555                         if (err)
2556                                 break;
2557                         op->remap.unmap_done = true;
2558                 }
2559
2560                 if (prev) {
2561                         op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
2562                         err = xe_vm_bind(vm, op->remap.prev, op->q,
2563                                          xe_vma_bo(op->remap.prev), op->syncs,
2564                                          op->num_syncs, true, false,
2565                                          op->flags & XE_VMA_OP_LAST && !next);
2566                         op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2567                         if (err)
2568                                 break;
2569                         op->remap.prev = NULL;
2570                 }
2571
2572                 if (next) {
2573                         op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
2574                         err = xe_vm_bind(vm, op->remap.next, op->q,
2575                                          xe_vma_bo(op->remap.next),
2576                                          op->syncs, op->num_syncs,
2577                                          true, false,
2578                                          op->flags & XE_VMA_OP_LAST);
2579                         op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2580                         if (err)
2581                                 break;
2582                         op->remap.next = NULL;
2583                 }
2584
2585                 break;
2586         }
2587         case DRM_GPUVA_OP_UNMAP:
2588                 err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2589                                    op->num_syncs, op->flags & XE_VMA_OP_FIRST,
2590                                    op->flags & XE_VMA_OP_LAST);
2591                 break;
2592         case DRM_GPUVA_OP_PREFETCH:
2593                 err = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region,
2594                                      op->syncs, op->num_syncs,
2595                                      op->flags & XE_VMA_OP_FIRST,
2596                                      op->flags & XE_VMA_OP_LAST);
2597                 break;
2598         default:
2599                 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2600         }
2601
2602         if (err)
2603                 trace_xe_vma_fail(vma);
2604
2605         return err;
2606 }
2607
2608 static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
2609                                struct xe_vma_op *op)
2610 {
2611         struct drm_exec exec;
2612         int err;
2613
2614 retry_userptr:
2615         drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
2616         drm_exec_until_all_locked(&exec) {
2617                 err = op_execute(&exec, vm, vma, op);
2618                 drm_exec_retry_on_contention(&exec);
2619                 if (err)
2620                         break;
2621         }
2622         drm_exec_fini(&exec);
2623
2624         if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
2625                 lockdep_assert_held_write(&vm->lock);
2626                 err = xe_vma_userptr_pin_pages(vma);
2627                 if (!err)
2628                         goto retry_userptr;
2629
2630                 trace_xe_vma_fail(vma);
2631         }
2632
2633         return err;
2634 }
2635
2636 static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
2637 {
2638         int ret = 0;
2639
2640         lockdep_assert_held_write(&vm->lock);
2641
2642 #ifdef TEST_VM_ASYNC_OPS_ERROR
2643         if (op->inject_error) {
2644                 op->inject_error = false;
2645                 return -ENOMEM;
2646         }
2647 #endif
2648
2649         switch (op->base.op) {
2650         case DRM_GPUVA_OP_MAP:
2651                 ret = __xe_vma_op_execute(vm, op->map.vma, op);
2652                 break;
2653         case DRM_GPUVA_OP_REMAP:
2654         {
2655                 struct xe_vma *vma;
2656
2657                 if (!op->remap.unmap_done)
2658                         vma = gpuva_to_vma(op->base.remap.unmap->va);
2659                 else if (op->remap.prev)
2660                         vma = op->remap.prev;
2661                 else
2662                         vma = op->remap.next;
2663
2664                 ret = __xe_vma_op_execute(vm, vma, op);
2665                 break;
2666         }
2667         case DRM_GPUVA_OP_UNMAP:
2668                 ret = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
2669                                           op);
2670                 break;
2671         case DRM_GPUVA_OP_PREFETCH:
2672                 ret = __xe_vma_op_execute(vm,
2673                                           gpuva_to_vma(op->base.prefetch.va),
2674                                           op);
2675                 break;
2676         default:
2677                 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2678         }
2679
2680         return ret;
2681 }
2682
2683 static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op)
2684 {
2685         bool last = op->flags & XE_VMA_OP_LAST;
2686
2687         if (last) {
2688                 while (op->num_syncs--)
2689                         xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
2690                 kfree(op->syncs);
2691                 if (op->q)
2692                         xe_exec_queue_put(op->q);
2693         }
2694         if (!list_empty(&op->link))
2695                 list_del(&op->link);
2696         if (op->ops)
2697                 drm_gpuva_ops_free(&vm->gpuvm, op->ops);
2698         if (last)
2699                 xe_vm_put(vm);
2700 }
2701
2702 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
2703                              bool post_commit, bool prev_post_commit,
2704                              bool next_post_commit)
2705 {
2706         lockdep_assert_held_write(&vm->lock);
2707
2708         switch (op->base.op) {
2709         case DRM_GPUVA_OP_MAP:
2710                 if (op->map.vma) {
2711                         prep_vma_destroy(vm, op->map.vma, post_commit);
2712                         xe_vma_destroy_unlocked(op->map.vma);
2713                 }
2714                 break;
2715         case DRM_GPUVA_OP_UNMAP:
2716         {
2717                 struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2718
2719                 if (vma) {
2720                         down_read(&vm->userptr.notifier_lock);
2721                         vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2722                         up_read(&vm->userptr.notifier_lock);
2723                         if (post_commit)
2724                                 xe_vm_insert_vma(vm, vma);
2725                 }
2726                 break;
2727         }
2728         case DRM_GPUVA_OP_REMAP:
2729         {
2730                 struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
2731
2732                 if (op->remap.prev) {
2733                         prep_vma_destroy(vm, op->remap.prev, prev_post_commit);
2734                         xe_vma_destroy_unlocked(op->remap.prev);
2735                 }
2736                 if (op->remap.next) {
2737                         prep_vma_destroy(vm, op->remap.next, next_post_commit);
2738                         xe_vma_destroy_unlocked(op->remap.next);
2739                 }
2740                 if (vma) {
2741                         down_read(&vm->userptr.notifier_lock);
2742                         vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2743                         up_read(&vm->userptr.notifier_lock);
2744                         if (post_commit)
2745                                 xe_vm_insert_vma(vm, vma);
2746                 }
2747                 break;
2748         }
2749         case DRM_GPUVA_OP_PREFETCH:
2750                 /* Nothing to do */
2751                 break;
2752         default:
2753                 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2754         }
2755 }
2756
2757 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
2758                                      struct drm_gpuva_ops **ops,
2759                                      int num_ops_list)
2760 {
2761         int i;
2762
2763         for (i = num_ops_list - 1; i; ++i) {
2764                 struct drm_gpuva_ops *__ops = ops[i];
2765                 struct drm_gpuva_op *__op;
2766
2767                 if (!__ops)
2768                         continue;
2769
2770                 drm_gpuva_for_each_op_reverse(__op, __ops) {
2771                         struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2772
2773                         xe_vma_op_unwind(vm, op,
2774                                          op->flags & XE_VMA_OP_COMMITTED,
2775                                          op->flags & XE_VMA_OP_PREV_COMMITTED,
2776                                          op->flags & XE_VMA_OP_NEXT_COMMITTED);
2777                 }
2778
2779                 drm_gpuva_ops_free(&vm->gpuvm, __ops);
2780         }
2781 }
2782
2783 static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
2784                                      struct list_head *ops_list)
2785 {
2786         struct xe_vma_op *op, *next;
2787         int err;
2788
2789         lockdep_assert_held_write(&vm->lock);
2790
2791         list_for_each_entry_safe(op, next, ops_list, link) {
2792                 err = xe_vma_op_execute(vm, op);
2793                 if (err) {
2794                         drm_warn(&vm->xe->drm, "VM op(%d) failed with %d",
2795                                  op->base.op, err);
2796                         /*
2797                          * FIXME: Killing VM rather than proper error handling
2798                          */
2799                         xe_vm_kill(vm);
2800                         return -ENOSPC;
2801                 }
2802                 xe_vma_op_cleanup(vm, op);
2803         }
2804
2805         return 0;
2806 }
2807
2808 #ifdef TEST_VM_ASYNC_OPS_ERROR
2809 #define SUPPORTED_FLAGS \
2810         (FORCE_ASYNC_OP_ERROR | DRM_XE_VM_BIND_FLAG_ASYNC | \
2811          DRM_XE_VM_BIND_FLAG_READONLY | DRM_XE_VM_BIND_FLAG_IMMEDIATE | \
2812          DRM_XE_VM_BIND_FLAG_NULL | 0xffff)
2813 #else
2814 #define SUPPORTED_FLAGS \
2815         (DRM_XE_VM_BIND_FLAG_ASYNC | DRM_XE_VM_BIND_FLAG_READONLY | \
2816          DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | \
2817          0xffff)
2818 #endif
2819 #define XE_64K_PAGE_MASK 0xffffull
2820
2821 #define MAX_BINDS       512     /* FIXME: Picking random upper limit */
2822
2823 static int vm_bind_ioctl_check_args(struct xe_device *xe,
2824                                     struct drm_xe_vm_bind *args,
2825                                     struct drm_xe_vm_bind_op **bind_ops,
2826                                     bool *async)
2827 {
2828         int err;
2829         int i;
2830
2831         if (XE_IOCTL_DBG(xe, args->extensions) ||
2832             XE_IOCTL_DBG(xe, !args->num_binds) ||
2833             XE_IOCTL_DBG(xe, args->num_binds > MAX_BINDS))
2834                 return -EINVAL;
2835
2836         if (args->num_binds > 1) {
2837                 u64 __user *bind_user =
2838                         u64_to_user_ptr(args->vector_of_binds);
2839
2840                 *bind_ops = kmalloc(sizeof(struct drm_xe_vm_bind_op) *
2841                                     args->num_binds, GFP_KERNEL);
2842                 if (!*bind_ops)
2843                         return -ENOMEM;
2844
2845                 err = __copy_from_user(*bind_ops, bind_user,
2846                                        sizeof(struct drm_xe_vm_bind_op) *
2847                                        args->num_binds);
2848                 if (XE_IOCTL_DBG(xe, err)) {
2849                         err = -EFAULT;
2850                         goto free_bind_ops;
2851                 }
2852         } else {
2853                 *bind_ops = &args->bind;
2854         }
2855
2856         for (i = 0; i < args->num_binds; ++i) {
2857                 u64 range = (*bind_ops)[i].range;
2858                 u64 addr = (*bind_ops)[i].addr;
2859                 u32 op = (*bind_ops)[i].op;
2860                 u32 flags = (*bind_ops)[i].flags;
2861                 u32 obj = (*bind_ops)[i].obj;
2862                 u64 obj_offset = (*bind_ops)[i].obj_offset;
2863                 u32 prefetch_region = (*bind_ops)[i].prefetch_mem_region_instance;
2864                 bool is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
2865
2866                 if (i == 0) {
2867                         *async = !!(flags & DRM_XE_VM_BIND_FLAG_ASYNC);
2868                         if (XE_IOCTL_DBG(xe, !*async && args->num_syncs)) {
2869                                 err = -EINVAL;
2870                                 goto free_bind_ops;
2871                         }
2872                 } else if (XE_IOCTL_DBG(xe, *async !=
2873                                         !!(flags & DRM_XE_VM_BIND_FLAG_ASYNC))) {
2874                         err = -EINVAL;
2875                         goto free_bind_ops;
2876                 }
2877
2878                 if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
2879                     XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
2880                     XE_IOCTL_DBG(xe, obj && is_null) ||
2881                     XE_IOCTL_DBG(xe, obj_offset && is_null) ||
2882                     XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP &&
2883                                  is_null) ||
2884                     XE_IOCTL_DBG(xe, !obj &&
2885                                  op == DRM_XE_VM_BIND_OP_MAP &&
2886                                  !is_null) ||
2887                     XE_IOCTL_DBG(xe, !obj &&
2888                                  op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2889                     XE_IOCTL_DBG(xe, addr &&
2890                                  op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2891                     XE_IOCTL_DBG(xe, range &&
2892                                  op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2893                     XE_IOCTL_DBG(xe, obj &&
2894                                  op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
2895                     XE_IOCTL_DBG(xe, obj &&
2896                                  op == DRM_XE_VM_BIND_OP_PREFETCH) ||
2897                     XE_IOCTL_DBG(xe, prefetch_region &&
2898                                  op != DRM_XE_VM_BIND_OP_PREFETCH) ||
2899                     XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
2900                                        xe->info.mem_region_mask)) ||
2901                     XE_IOCTL_DBG(xe, obj &&
2902                                  op == DRM_XE_VM_BIND_OP_UNMAP)) {
2903                         err = -EINVAL;
2904                         goto free_bind_ops;
2905                 }
2906
2907                 if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
2908                     XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
2909                     XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
2910                     XE_IOCTL_DBG(xe, !range &&
2911                                  op != DRM_XE_VM_BIND_OP_UNMAP_ALL)) {
2912                         err = -EINVAL;
2913                         goto free_bind_ops;
2914                 }
2915         }
2916
2917         return 0;
2918
2919 free_bind_ops:
2920         if (args->num_binds > 1)
2921                 kfree(*bind_ops);
2922         return err;
2923 }
2924
2925 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2926 {
2927         struct xe_device *xe = to_xe_device(dev);
2928         struct xe_file *xef = to_xe_file(file);
2929         struct drm_xe_vm_bind *args = data;
2930         struct drm_xe_sync __user *syncs_user;
2931         struct xe_bo **bos = NULL;
2932         struct drm_gpuva_ops **ops = NULL;
2933         struct xe_vm *vm;
2934         struct xe_exec_queue *q = NULL;
2935         u32 num_syncs;
2936         struct xe_sync_entry *syncs = NULL;
2937         struct drm_xe_vm_bind_op *bind_ops;
2938         LIST_HEAD(ops_list);
2939         bool async;
2940         int err;
2941         int i;
2942
2943         err = vm_bind_ioctl_check_args(xe, args, &bind_ops, &async);
2944         if (err)
2945                 return err;
2946
2947         if (args->exec_queue_id) {
2948                 q = xe_exec_queue_lookup(xef, args->exec_queue_id);
2949                 if (XE_IOCTL_DBG(xe, !q)) {
2950                         err = -ENOENT;
2951                         goto free_objs;
2952                 }
2953
2954                 if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
2955                         err = -EINVAL;
2956                         goto put_exec_queue;
2957                 }
2958
2959                 if (XE_IOCTL_DBG(xe, async !=
2960                                  !!(q->flags & EXEC_QUEUE_FLAG_VM_ASYNC))) {
2961                         err = -EINVAL;
2962                         goto put_exec_queue;
2963                 }
2964         }
2965
2966         vm = xe_vm_lookup(xef, args->vm_id);
2967         if (XE_IOCTL_DBG(xe, !vm)) {
2968                 err = -EINVAL;
2969                 goto put_exec_queue;
2970         }
2971
2972         if (!args->exec_queue_id) {
2973                 if (XE_IOCTL_DBG(xe, async !=
2974                                  !!(vm->flags & XE_VM_FLAG_ASYNC_DEFAULT))) {
2975                         err = -EINVAL;
2976                         goto put_vm;
2977                 }
2978         }
2979
2980         err = down_write_killable(&vm->lock);
2981         if (err)
2982                 goto put_vm;
2983
2984         if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
2985                 err = -ENOENT;
2986                 goto release_vm_lock;
2987         }
2988
2989         for (i = 0; i < args->num_binds; ++i) {
2990                 u64 range = bind_ops[i].range;
2991                 u64 addr = bind_ops[i].addr;
2992
2993                 if (XE_IOCTL_DBG(xe, range > vm->size) ||
2994                     XE_IOCTL_DBG(xe, addr > vm->size - range)) {
2995                         err = -EINVAL;
2996                         goto release_vm_lock;
2997                 }
2998
2999                 if (bind_ops[i].tile_mask) {
3000                         u64 valid_tiles = BIT(xe->info.tile_count) - 1;
3001
3002                         if (XE_IOCTL_DBG(xe, bind_ops[i].tile_mask &
3003                                          ~valid_tiles)) {
3004                                 err = -EINVAL;
3005                                 goto release_vm_lock;
3006                         }
3007                 }
3008         }
3009
3010         bos = kzalloc(sizeof(*bos) * args->num_binds, GFP_KERNEL);
3011         if (!bos) {
3012                 err = -ENOMEM;
3013                 goto release_vm_lock;
3014         }
3015
3016         ops = kzalloc(sizeof(*ops) * args->num_binds, GFP_KERNEL);
3017         if (!ops) {
3018                 err = -ENOMEM;
3019                 goto release_vm_lock;
3020         }
3021
3022         for (i = 0; i < args->num_binds; ++i) {
3023                 struct drm_gem_object *gem_obj;
3024                 u64 range = bind_ops[i].range;
3025                 u64 addr = bind_ops[i].addr;
3026                 u32 obj = bind_ops[i].obj;
3027                 u64 obj_offset = bind_ops[i].obj_offset;
3028
3029                 if (!obj)
3030                         continue;
3031
3032                 gem_obj = drm_gem_object_lookup(file, obj);
3033                 if (XE_IOCTL_DBG(xe, !gem_obj)) {
3034                         err = -ENOENT;
3035                         goto put_obj;
3036                 }
3037                 bos[i] = gem_to_xe_bo(gem_obj);
3038
3039                 if (XE_IOCTL_DBG(xe, range > bos[i]->size) ||
3040                     XE_IOCTL_DBG(xe, obj_offset >
3041                                  bos[i]->size - range)) {
3042                         err = -EINVAL;
3043                         goto put_obj;
3044                 }
3045
3046                 if (bos[i]->flags & XE_BO_INTERNAL_64K) {
3047                         if (XE_IOCTL_DBG(xe, obj_offset &
3048                                          XE_64K_PAGE_MASK) ||
3049                             XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
3050                             XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
3051                                 err = -EINVAL;
3052                                 goto put_obj;
3053                         }
3054                 }
3055         }
3056
3057         if (args->num_syncs) {
3058                 syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3059                 if (!syncs) {
3060                         err = -ENOMEM;
3061                         goto put_obj;
3062                 }
3063         }
3064
3065         syncs_user = u64_to_user_ptr(args->syncs);
3066         for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3067                 err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3068                                           &syncs_user[num_syncs], false,
3069                                           xe_vm_no_dma_fences(vm));
3070                 if (err)
3071                         goto free_syncs;
3072         }
3073
3074         for (i = 0; i < args->num_binds; ++i) {
3075                 u64 range = bind_ops[i].range;
3076                 u64 addr = bind_ops[i].addr;
3077                 u32 op = bind_ops[i].op;
3078                 u32 flags = bind_ops[i].flags;
3079                 u64 obj_offset = bind_ops[i].obj_offset;
3080                 u8 tile_mask = bind_ops[i].tile_mask;
3081                 u32 prefetch_region = bind_ops[i].prefetch_mem_region_instance;
3082
3083                 ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
3084                                                   addr, range, op, flags,
3085                                                   tile_mask, prefetch_region);
3086                 if (IS_ERR(ops[i])) {
3087                         err = PTR_ERR(ops[i]);
3088                         ops[i] = NULL;
3089                         goto unwind_ops;
3090                 }
3091
3092                 err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
3093                                               &ops_list,
3094                                               i == args->num_binds - 1,
3095                                               async);
3096                 if (err)
3097                         goto unwind_ops;
3098         }
3099
3100         /* Nothing to do */
3101         if (list_empty(&ops_list)) {
3102                 err = -ENODATA;
3103                 goto unwind_ops;
3104         }
3105
3106         xe_vm_get(vm);
3107         if (q)
3108                 xe_exec_queue_get(q);
3109
3110         err = vm_bind_ioctl_ops_execute(vm, &ops_list);
3111
3112         up_write(&vm->lock);
3113
3114         if (q)
3115                 xe_exec_queue_put(q);
3116         xe_vm_put(vm);
3117
3118         for (i = 0; bos && i < args->num_binds; ++i)
3119                 xe_bo_put(bos[i]);
3120
3121         kfree(bos);
3122         kfree(ops);
3123         if (args->num_binds > 1)
3124                 kfree(bind_ops);
3125
3126         return err;
3127
3128 unwind_ops:
3129         vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
3130 free_syncs:
3131         for (i = 0; err == -ENODATA && i < num_syncs; i++) {
3132                 struct dma_fence *fence =
3133                         xe_exec_queue_last_fence_get(to_wait_exec_queue(vm, q), vm);
3134
3135                 xe_sync_entry_signal(&syncs[i], NULL, fence);
3136         }
3137         while (num_syncs--)
3138                 xe_sync_entry_cleanup(&syncs[num_syncs]);
3139
3140         kfree(syncs);
3141 put_obj:
3142         for (i = 0; i < args->num_binds; ++i)
3143                 xe_bo_put(bos[i]);
3144 release_vm_lock:
3145         up_write(&vm->lock);
3146 put_vm:
3147         xe_vm_put(vm);
3148 put_exec_queue:
3149         if (q)
3150                 xe_exec_queue_put(q);
3151 free_objs:
3152         kfree(bos);
3153         kfree(ops);
3154         if (args->num_binds > 1)
3155                 kfree(bind_ops);
3156         return err == -ENODATA ? 0 : err;
3157 }
3158
3159 /**
3160  * xe_vm_lock() - Lock the vm's dma_resv object
3161  * @vm: The struct xe_vm whose lock is to be locked
3162  * @intr: Whether to perform any wait interruptible
3163  *
3164  * Return: 0 on success, -EINTR if @intr is true and the wait for a
3165  * contended lock was interrupted. If @intr is false, the function
3166  * always returns 0.
3167  */
3168 int xe_vm_lock(struct xe_vm *vm, bool intr)
3169 {
3170         if (intr)
3171                 return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
3172
3173         return dma_resv_lock(xe_vm_resv(vm), NULL);
3174 }
3175
3176 /**
3177  * xe_vm_unlock() - Unlock the vm's dma_resv object
3178  * @vm: The struct xe_vm whose lock is to be released.
3179  *
3180  * Unlock a buffer object lock that was locked by xe_vm_lock().
3181  */
3182 void xe_vm_unlock(struct xe_vm *vm)
3183 {
3184         dma_resv_unlock(xe_vm_resv(vm));
3185 }
3186
3187 /**
3188  * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3189  * @vma: VMA to invalidate
3190  *
3191  * Walks a list of page tables leaves which it memset the entries owned by this
3192  * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3193  * complete.
3194  *
3195  * Returns 0 for success, negative error code otherwise.
3196  */
3197 int xe_vm_invalidate_vma(struct xe_vma *vma)
3198 {
3199         struct xe_device *xe = xe_vma_vm(vma)->xe;
3200         struct xe_tile *tile;
3201         u32 tile_needs_invalidate = 0;
3202         int seqno[XE_MAX_TILES_PER_DEVICE];
3203         u8 id;
3204         int ret;
3205
3206         xe_assert(xe, xe_vm_in_fault_mode(xe_vma_vm(vma)));
3207         xe_assert(xe, !xe_vma_is_null(vma));
3208         trace_xe_vma_usm_invalidate(vma);
3209
3210         /* Check that we don't race with page-table updates */
3211         if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3212                 if (xe_vma_is_userptr(vma)) {
3213                         WARN_ON_ONCE(!mmu_interval_check_retry
3214                                      (&vma->userptr.notifier,
3215                                       vma->userptr.notifier_seq));
3216                         WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
3217                                                              DMA_RESV_USAGE_BOOKKEEP));
3218
3219                 } else {
3220                         xe_bo_assert_held(xe_vma_bo(vma));
3221                 }
3222         }
3223
3224         for_each_tile(tile, xe, id) {
3225                 if (xe_pt_zap_ptes(tile, vma)) {
3226                         tile_needs_invalidate |= BIT(id);
3227                         xe_device_wmb(xe);
3228                         /*
3229                          * FIXME: We potentially need to invalidate multiple
3230                          * GTs within the tile
3231                          */
3232                         seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
3233                         if (seqno[id] < 0)
3234                                 return seqno[id];
3235                 }
3236         }
3237
3238         for_each_tile(tile, xe, id) {
3239                 if (tile_needs_invalidate & BIT(id)) {
3240                         ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
3241                         if (ret < 0)
3242                                 return ret;
3243                 }
3244         }
3245
3246         vma->usm.tile_invalidated = vma->tile_mask;
3247
3248         return 0;
3249 }
3250
3251 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
3252 {
3253         struct drm_gpuva *gpuva;
3254         bool is_vram;
3255         uint64_t addr;
3256
3257         if (!down_read_trylock(&vm->lock)) {
3258                 drm_printf(p, " Failed to acquire VM lock to dump capture");
3259                 return 0;
3260         }
3261         if (vm->pt_root[gt_id]) {
3262                 addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE);
3263                 is_vram = xe_bo_is_vram(vm->pt_root[gt_id]->bo);
3264                 drm_printf(p, " VM root: A:0x%llx %s\n", addr,
3265                            is_vram ? "VRAM" : "SYS");
3266         }
3267
3268         drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3269                 struct xe_vma *vma = gpuva_to_vma(gpuva);
3270                 bool is_userptr = xe_vma_is_userptr(vma);
3271                 bool is_null = xe_vma_is_null(vma);
3272
3273                 if (is_null) {
3274                         addr = 0;
3275                 } else if (is_userptr) {
3276                         struct xe_res_cursor cur;
3277
3278                         if (vma->userptr.sg) {
3279                                 xe_res_first_sg(vma->userptr.sg, 0, XE_PAGE_SIZE,
3280                                                 &cur);
3281                                 addr = xe_res_dma(&cur);
3282                         } else {
3283                                 addr = 0;
3284                         }
3285                 } else {
3286                         addr = __xe_bo_addr(xe_vma_bo(vma), 0, XE_PAGE_SIZE);
3287                         is_vram = xe_bo_is_vram(xe_vma_bo(vma));
3288                 }
3289                 drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
3290                            xe_vma_start(vma), xe_vma_end(vma) - 1,
3291                            xe_vma_size(vma),
3292                            addr, is_null ? "NULL" : is_userptr ? "USR" :
3293                            is_vram ? "VRAM" : "SYS");
3294         }
3295         up_read(&vm->lock);
3296
3297         return 0;
3298 }