drm/xe: Fix fence reservation accouting
[linux-2.6-microblaze.git] / drivers / gpu / drm / xe / xe_vm.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5
6 #include "xe_vm.h"
7
8 #include <linux/dma-fence-array.h>
9
10 #include <drm/drm_exec.h>
11 #include <drm/drm_print.h>
12 #include <drm/ttm/ttm_execbuf_util.h>
13 #include <drm/ttm/ttm_tt.h>
14 #include <drm/xe_drm.h>
15 #include <linux/delay.h>
16 #include <linux/kthread.h>
17 #include <linux/mm.h>
18 #include <linux/swap.h>
19
20 #include "xe_bo.h"
21 #include "xe_device.h"
22 #include "xe_exec_queue.h"
23 #include "xe_gt.h"
24 #include "xe_gt_pagefault.h"
25 #include "xe_gt_tlb_invalidation.h"
26 #include "xe_migrate.h"
27 #include "xe_pm.h"
28 #include "xe_preempt_fence.h"
29 #include "xe_pt.h"
30 #include "xe_res_cursor.h"
31 #include "xe_sync.h"
32 #include "xe_trace.h"
33 #include "generated/xe_wa_oob.h"
34 #include "xe_wa.h"
35
36 #define TEST_VM_ASYNC_OPS_ERROR
37
38 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
39 {
40         return vm->gpuvm.r_obj;
41 }
42
43 /**
44  * xe_vma_userptr_check_repin() - Advisory check for repin needed
45  * @vma: The userptr vma
46  *
47  * Check if the userptr vma has been invalidated since last successful
48  * repin. The check is advisory only and can the function can be called
49  * without the vm->userptr.notifier_lock held. There is no guarantee that the
50  * vma userptr will remain valid after a lockless check, so typically
51  * the call needs to be followed by a proper check under the notifier_lock.
52  *
53  * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
54  */
55 int xe_vma_userptr_check_repin(struct xe_vma *vma)
56 {
57         return mmu_interval_check_retry(&vma->userptr.notifier,
58                                         vma->userptr.notifier_seq) ?
59                 -EAGAIN : 0;
60 }
61
62 int xe_vma_userptr_pin_pages(struct xe_vma *vma)
63 {
64         struct xe_vm *vm = xe_vma_vm(vma);
65         struct xe_device *xe = vm->xe;
66         const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT;
67         struct page **pages;
68         bool in_kthread = !current->mm;
69         unsigned long notifier_seq;
70         int pinned, ret, i;
71         bool read_only = xe_vma_read_only(vma);
72
73         lockdep_assert_held(&vm->lock);
74         XE_WARN_ON(!xe_vma_is_userptr(vma));
75 retry:
76         if (vma->gpuva.flags & XE_VMA_DESTROYED)
77                 return 0;
78
79         notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier);
80         if (notifier_seq == vma->userptr.notifier_seq)
81                 return 0;
82
83         pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
84         if (!pages)
85                 return -ENOMEM;
86
87         if (vma->userptr.sg) {
88                 dma_unmap_sgtable(xe->drm.dev,
89                                   vma->userptr.sg,
90                                   read_only ? DMA_TO_DEVICE :
91                                   DMA_BIDIRECTIONAL, 0);
92                 sg_free_table(vma->userptr.sg);
93                 vma->userptr.sg = NULL;
94         }
95
96         pinned = ret = 0;
97         if (in_kthread) {
98                 if (!mmget_not_zero(vma->userptr.notifier.mm)) {
99                         ret = -EFAULT;
100                         goto mm_closed;
101                 }
102                 kthread_use_mm(vma->userptr.notifier.mm);
103         }
104
105         while (pinned < num_pages) {
106                 ret = get_user_pages_fast(xe_vma_userptr(vma) +
107                                           pinned * PAGE_SIZE,
108                                           num_pages - pinned,
109                                           read_only ? 0 : FOLL_WRITE,
110                                           &pages[pinned]);
111                 if (ret < 0) {
112                         if (in_kthread)
113                                 ret = 0;
114                         break;
115                 }
116
117                 pinned += ret;
118                 ret = 0;
119         }
120
121         if (in_kthread) {
122                 kthread_unuse_mm(vma->userptr.notifier.mm);
123                 mmput(vma->userptr.notifier.mm);
124         }
125 mm_closed:
126         if (ret)
127                 goto out;
128
129         ret = sg_alloc_table_from_pages_segment(&vma->userptr.sgt, pages,
130                                                 pinned, 0,
131                                                 (u64)pinned << PAGE_SHIFT,
132                                                 xe_sg_segment_size(xe->drm.dev),
133                                                 GFP_KERNEL);
134         if (ret) {
135                 vma->userptr.sg = NULL;
136                 goto out;
137         }
138         vma->userptr.sg = &vma->userptr.sgt;
139
140         ret = dma_map_sgtable(xe->drm.dev, vma->userptr.sg,
141                               read_only ? DMA_TO_DEVICE :
142                               DMA_BIDIRECTIONAL,
143                               DMA_ATTR_SKIP_CPU_SYNC |
144                               DMA_ATTR_NO_KERNEL_MAPPING);
145         if (ret) {
146                 sg_free_table(vma->userptr.sg);
147                 vma->userptr.sg = NULL;
148                 goto out;
149         }
150
151         for (i = 0; i < pinned; ++i) {
152                 if (!read_only) {
153                         lock_page(pages[i]);
154                         set_page_dirty(pages[i]);
155                         unlock_page(pages[i]);
156                 }
157
158                 mark_page_accessed(pages[i]);
159         }
160
161 out:
162         release_pages(pages, pinned);
163         kvfree(pages);
164
165         if (!(ret < 0)) {
166                 vma->userptr.notifier_seq = notifier_seq;
167                 if (xe_vma_userptr_check_repin(vma) == -EAGAIN)
168                         goto retry;
169         }
170
171         return ret < 0 ? ret : 0;
172 }
173
174 static bool preempt_fences_waiting(struct xe_vm *vm)
175 {
176         struct xe_exec_queue *q;
177
178         lockdep_assert_held(&vm->lock);
179         xe_vm_assert_held(vm);
180
181         list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
182                 if (!q->compute.pfence ||
183                     (q->compute.pfence && test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
184                                                    &q->compute.pfence->flags))) {
185                         return true;
186                 }
187         }
188
189         return false;
190 }
191
192 static void free_preempt_fences(struct list_head *list)
193 {
194         struct list_head *link, *next;
195
196         list_for_each_safe(link, next, list)
197                 xe_preempt_fence_free(to_preempt_fence_from_link(link));
198 }
199
200 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
201                                 unsigned int *count)
202 {
203         lockdep_assert_held(&vm->lock);
204         xe_vm_assert_held(vm);
205
206         if (*count >= vm->preempt.num_exec_queues)
207                 return 0;
208
209         for (; *count < vm->preempt.num_exec_queues; ++(*count)) {
210                 struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
211
212                 if (IS_ERR(pfence))
213                         return PTR_ERR(pfence);
214
215                 list_move_tail(xe_preempt_fence_link(pfence), list);
216         }
217
218         return 0;
219 }
220
221 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
222 {
223         struct xe_exec_queue *q;
224
225         xe_vm_assert_held(vm);
226
227         list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
228                 if (q->compute.pfence) {
229                         long timeout = dma_fence_wait(q->compute.pfence, false);
230
231                         if (timeout < 0)
232                                 return -ETIME;
233                         dma_fence_put(q->compute.pfence);
234                         q->compute.pfence = NULL;
235                 }
236         }
237
238         return 0;
239 }
240
241 static bool xe_vm_is_idle(struct xe_vm *vm)
242 {
243         struct xe_exec_queue *q;
244
245         xe_vm_assert_held(vm);
246         list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
247                 if (!xe_exec_queue_is_idle(q))
248                         return false;
249         }
250
251         return true;
252 }
253
254 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
255 {
256         struct list_head *link;
257         struct xe_exec_queue *q;
258
259         list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
260                 struct dma_fence *fence;
261
262                 link = list->next;
263                 XE_WARN_ON(link == list);
264
265                 fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
266                                              q, q->compute.context,
267                                              ++q->compute.seqno);
268                 dma_fence_put(q->compute.pfence);
269                 q->compute.pfence = fence;
270         }
271 }
272
273 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
274 {
275         struct xe_exec_queue *q;
276         int err;
277
278         err = xe_bo_lock(bo, true);
279         if (err)
280                 return err;
281
282         err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
283         if (err)
284                 goto out_unlock;
285
286         list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
287                 if (q->compute.pfence) {
288                         dma_resv_add_fence(bo->ttm.base.resv,
289                                            q->compute.pfence,
290                                            DMA_RESV_USAGE_BOOKKEEP);
291                 }
292
293 out_unlock:
294         xe_bo_unlock(bo);
295         return err;
296 }
297
298 /**
299  * xe_vm_fence_all_extobjs() - Add a fence to vm's external objects' resv
300  * @vm: The vm.
301  * @fence: The fence to add.
302  * @usage: The resv usage for the fence.
303  *
304  * Loops over all of the vm's external object bindings and adds a @fence
305  * with the given @usage to all of the external object's reservation
306  * objects.
307  */
308 void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
309                              enum dma_resv_usage usage)
310 {
311         struct xe_vma *vma;
312
313         list_for_each_entry(vma, &vm->extobj.list, extobj.link)
314                 dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence, usage);
315 }
316
317 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm)
318 {
319         struct xe_exec_queue *q;
320
321         lockdep_assert_held(&vm->lock);
322         xe_vm_assert_held(vm);
323
324         list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
325                 q->ops->resume(q);
326
327                 dma_resv_add_fence(xe_vm_resv(vm), q->compute.pfence,
328                                    DMA_RESV_USAGE_BOOKKEEP);
329                 xe_vm_fence_all_extobjs(vm, q->compute.pfence,
330                                         DMA_RESV_USAGE_BOOKKEEP);
331         }
332 }
333
334 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
335 {
336         struct drm_exec exec;
337         struct dma_fence *pfence;
338         int err;
339         bool wait;
340
341         XE_WARN_ON(!xe_vm_in_compute_mode(vm));
342
343         down_write(&vm->lock);
344         drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
345         drm_exec_until_all_locked(&exec) {
346                 err = xe_vm_lock_dma_resv(vm, &exec, 1, true);
347                 drm_exec_retry_on_contention(&exec);
348                 if (err)
349                         goto out_unlock;
350         }
351
352         pfence = xe_preempt_fence_create(q, q->compute.context,
353                                          ++q->compute.seqno);
354         if (!pfence) {
355                 err = -ENOMEM;
356                 goto out_unlock;
357         }
358
359         list_add(&q->compute.link, &vm->preempt.exec_queues);
360         ++vm->preempt.num_exec_queues;
361         q->compute.pfence = pfence;
362
363         down_read(&vm->userptr.notifier_lock);
364
365         dma_resv_add_fence(xe_vm_resv(vm), pfence,
366                            DMA_RESV_USAGE_BOOKKEEP);
367
368         xe_vm_fence_all_extobjs(vm, pfence, DMA_RESV_USAGE_BOOKKEEP);
369
370         /*
371          * Check to see if a preemption on VM is in flight or userptr
372          * invalidation, if so trigger this preempt fence to sync state with
373          * other preempt fences on the VM.
374          */
375         wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
376         if (wait)
377                 dma_fence_enable_sw_signaling(pfence);
378
379         up_read(&vm->userptr.notifier_lock);
380
381 out_unlock:
382         drm_exec_fini(&exec);
383         up_write(&vm->lock);
384
385         return err;
386 }
387
388 /**
389  * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
390  * that need repinning.
391  * @vm: The VM.
392  *
393  * This function checks for whether the VM has userptrs that need repinning,
394  * and provides a release-type barrier on the userptr.notifier_lock after
395  * checking.
396  *
397  * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
398  */
399 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
400 {
401         lockdep_assert_held_read(&vm->userptr.notifier_lock);
402
403         return (list_empty(&vm->userptr.repin_list) &&
404                 list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
405 }
406
407 /**
408  * xe_vm_lock_dma_resv() - Lock the vm dma_resv object and the dma_resv
409  * objects of the vm's external buffer objects.
410  * @vm: The vm.
411  * @exec: Pointer to a struct drm_exec locking context.
412  * @num_shared: Number of dma-fence slots to reserve in the locked objects.
413  * @lock_vm: Lock also the vm's dma_resv.
414  *
415  * Locks the vm dma-resv objects and all the dma-resv objects of the
416  * buffer objects on the vm external object list.
417  *
418  * Return: 0 on success, Negative error code on error. In particular if
419  * @intr is set to true, -EINTR or -ERESTARTSYS may be returned.
420  */
421 int xe_vm_lock_dma_resv(struct xe_vm *vm, struct drm_exec *exec,
422                         unsigned int num_shared, bool lock_vm)
423 {
424         struct xe_vma *vma, *next;
425         int err = 0;
426
427         lockdep_assert_held(&vm->lock);
428
429         if (lock_vm) {
430                 err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
431                 if (err)
432                         return err;
433         }
434
435         list_for_each_entry(vma, &vm->extobj.list, extobj.link) {
436                 err = drm_exec_prepare_obj(exec, &xe_vma_bo(vma)->ttm.base, num_shared);
437                 if (err)
438                         return err;
439         }
440
441         spin_lock(&vm->notifier.list_lock);
442         list_for_each_entry_safe(vma, next, &vm->notifier.rebind_list,
443                                  notifier.rebind_link) {
444                 xe_bo_assert_held(xe_vma_bo(vma));
445
446                 list_del_init(&vma->notifier.rebind_link);
447                 if (vma->tile_present && !(vma->gpuva.flags & XE_VMA_DESTROYED))
448                         list_move_tail(&vma->combined_links.rebind,
449                                        &vm->rebind_list);
450         }
451         spin_unlock(&vm->notifier.list_lock);
452
453         return 0;
454 }
455
456 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
457
458 static void xe_vm_kill(struct xe_vm *vm)
459 {
460         struct xe_exec_queue *q;
461
462         lockdep_assert_held(&vm->lock);
463
464         xe_vm_lock(vm, false);
465         vm->flags |= XE_VM_FLAG_BANNED;
466         trace_xe_vm_kill(vm);
467
468         list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
469                 q->ops->kill(q);
470         xe_vm_unlock(vm);
471
472         /* TODO: Inform user the VM is banned */
473 }
474
475 /**
476  * xe_vm_validate_should_retry() - Whether to retry after a validate error.
477  * @exec: The drm_exec object used for locking before validation.
478  * @err: The error returned from ttm_bo_validate().
479  * @end: A ktime_t cookie that should be set to 0 before first use and
480  * that should be reused on subsequent calls.
481  *
482  * With multiple active VMs, under memory pressure, it is possible that
483  * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
484  * Until ttm properly handles locking in such scenarios, best thing the
485  * driver can do is retry with a timeout. Check if that is necessary, and
486  * if so unlock the drm_exec's objects while keeping the ticket to prepare
487  * for a rerun.
488  *
489  * Return: true if a retry after drm_exec_init() is recommended;
490  * false otherwise.
491  */
492 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
493 {
494         ktime_t cur;
495
496         if (err != -ENOMEM)
497                 return false;
498
499         cur = ktime_get();
500         *end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
501         if (!ktime_before(cur, *end))
502                 return false;
503
504         /*
505          * We would like to keep the ticket here with
506          * drm_exec_unlock_all(), but WW mutex asserts currently
507          * stop us from that. In any case this function could go away
508          * with proper TTM -EDEADLK handling.
509          */
510         drm_exec_fini(exec);
511
512         msleep(20);
513         return true;
514 }
515
516 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
517                                  bool *done)
518 {
519         struct xe_vma *vma;
520         int err;
521
522         /*
523          * 1 fence for each preempt fence plus a fence for each tile from a
524          * possible rebind
525          */
526         err = drm_exec_prepare_obj(exec, xe_vm_obj(vm),
527                                    vm->preempt.num_exec_queues +
528                                    vm->xe->info.tile_count);
529         if (err)
530                 return err;
531
532         if (xe_vm_is_idle(vm)) {
533                 vm->preempt.rebind_deactivated = true;
534                 *done = true;
535                 return 0;
536         }
537
538         if (!preempt_fences_waiting(vm)) {
539                 *done = true;
540                 return 0;
541         }
542
543         err = xe_vm_lock_dma_resv(vm, exec, vm->preempt.num_exec_queues, false);
544         if (err)
545                 return err;
546
547         err = wait_for_existing_preempt_fences(vm);
548         if (err)
549                 return err;
550
551         list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
552                 if (xe_vma_has_no_bo(vma) ||
553                     vma->gpuva.flags & XE_VMA_DESTROYED)
554                         continue;
555
556                 err = xe_bo_validate(xe_vma_bo(vma), vm, false);
557                 if (err)
558                         break;
559         }
560
561         return err;
562 }
563
564 static void preempt_rebind_work_func(struct work_struct *w)
565 {
566         struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
567         struct drm_exec exec;
568         struct dma_fence *rebind_fence;
569         unsigned int fence_count = 0;
570         LIST_HEAD(preempt_fences);
571         ktime_t end = 0;
572         int err;
573         long wait;
574         int __maybe_unused tries = 0;
575
576         XE_WARN_ON(!xe_vm_in_compute_mode(vm));
577         trace_xe_vm_rebind_worker_enter(vm);
578
579         down_write(&vm->lock);
580
581         if (xe_vm_is_closed_or_banned(vm)) {
582                 up_write(&vm->lock);
583                 trace_xe_vm_rebind_worker_exit(vm);
584                 return;
585         }
586
587 retry:
588         if (vm->async_ops.error)
589                 goto out_unlock_outer;
590
591         /*
592          * Extreme corner where we exit a VM error state with a munmap style VM
593          * unbind inflight which requires a rebind. In this case the rebind
594          * needs to install some fences into the dma-resv slots. The worker to
595          * do this queued, let that worker make progress by dropping vm->lock
596          * and trying this again.
597          */
598         if (vm->async_ops.munmap_rebind_inflight) {
599                 up_write(&vm->lock);
600                 flush_work(&vm->async_ops.work);
601                 goto retry;
602         }
603
604         if (xe_vm_userptr_check_repin(vm)) {
605                 err = xe_vm_userptr_pin(vm);
606                 if (err)
607                         goto out_unlock_outer;
608         }
609
610         drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
611
612         drm_exec_until_all_locked(&exec) {
613                 bool done = false;
614
615                 err = xe_preempt_work_begin(&exec, vm, &done);
616                 drm_exec_retry_on_contention(&exec);
617                 if (err && xe_vm_validate_should_retry(&exec, err, &end)) {
618                         err = -EAGAIN;
619                         goto out_unlock_outer;
620                 }
621                 if (err || done)
622                         goto out_unlock;
623         }
624
625         err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
626         if (err)
627                 goto out_unlock;
628
629         rebind_fence = xe_vm_rebind(vm, true);
630         if (IS_ERR(rebind_fence)) {
631                 err = PTR_ERR(rebind_fence);
632                 goto out_unlock;
633         }
634
635         if (rebind_fence) {
636                 dma_fence_wait(rebind_fence, false);
637                 dma_fence_put(rebind_fence);
638         }
639
640         /* Wait on munmap style VM unbinds */
641         wait = dma_resv_wait_timeout(xe_vm_resv(vm),
642                                      DMA_RESV_USAGE_KERNEL,
643                                      false, MAX_SCHEDULE_TIMEOUT);
644         if (wait <= 0) {
645                 err = -ETIME;
646                 goto out_unlock;
647         }
648
649 #define retry_required(__tries, __vm) \
650         (IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
651         (!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
652         __xe_vm_userptr_needs_repin(__vm))
653
654         down_read(&vm->userptr.notifier_lock);
655         if (retry_required(tries, vm)) {
656                 up_read(&vm->userptr.notifier_lock);
657                 err = -EAGAIN;
658                 goto out_unlock;
659         }
660
661 #undef retry_required
662
663         spin_lock(&vm->xe->ttm.lru_lock);
664         ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
665         spin_unlock(&vm->xe->ttm.lru_lock);
666
667         /* Point of no return. */
668         arm_preempt_fences(vm, &preempt_fences);
669         resume_and_reinstall_preempt_fences(vm);
670         up_read(&vm->userptr.notifier_lock);
671
672 out_unlock:
673         drm_exec_fini(&exec);
674 out_unlock_outer:
675         if (err == -EAGAIN) {
676                 trace_xe_vm_rebind_worker_retry(vm);
677                 goto retry;
678         }
679
680         if (err) {
681                 drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
682                 xe_vm_kill(vm);
683         }
684         up_write(&vm->lock);
685
686         free_preempt_fences(&preempt_fences);
687
688         trace_xe_vm_rebind_worker_exit(vm);
689 }
690
691 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
692                                    const struct mmu_notifier_range *range,
693                                    unsigned long cur_seq)
694 {
695         struct xe_vma *vma = container_of(mni, struct xe_vma, userptr.notifier);
696         struct xe_vm *vm = xe_vma_vm(vma);
697         struct dma_resv_iter cursor;
698         struct dma_fence *fence;
699         long err;
700
701         XE_WARN_ON(!xe_vma_is_userptr(vma));
702         trace_xe_vma_userptr_invalidate(vma);
703
704         if (!mmu_notifier_range_blockable(range))
705                 return false;
706
707         down_write(&vm->userptr.notifier_lock);
708         mmu_interval_set_seq(mni, cur_seq);
709
710         /* No need to stop gpu access if the userptr is not yet bound. */
711         if (!vma->userptr.initial_bind) {
712                 up_write(&vm->userptr.notifier_lock);
713                 return true;
714         }
715
716         /*
717          * Tell exec and rebind worker they need to repin and rebind this
718          * userptr.
719          */
720         if (!xe_vm_in_fault_mode(vm) &&
721             !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
722                 spin_lock(&vm->userptr.invalidated_lock);
723                 list_move_tail(&vma->userptr.invalidate_link,
724                                &vm->userptr.invalidated);
725                 spin_unlock(&vm->userptr.invalidated_lock);
726         }
727
728         up_write(&vm->userptr.notifier_lock);
729
730         /*
731          * Preempt fences turn into schedule disables, pipeline these.
732          * Note that even in fault mode, we need to wait for binds and
733          * unbinds to complete, and those are attached as BOOKMARK fences
734          * to the vm.
735          */
736         dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
737                             DMA_RESV_USAGE_BOOKKEEP);
738         dma_resv_for_each_fence_unlocked(&cursor, fence)
739                 dma_fence_enable_sw_signaling(fence);
740         dma_resv_iter_end(&cursor);
741
742         err = dma_resv_wait_timeout(xe_vm_resv(vm),
743                                     DMA_RESV_USAGE_BOOKKEEP,
744                                     false, MAX_SCHEDULE_TIMEOUT);
745         XE_WARN_ON(err <= 0);
746
747         if (xe_vm_in_fault_mode(vm)) {
748                 err = xe_vm_invalidate_vma(vma);
749                 XE_WARN_ON(err);
750         }
751
752         trace_xe_vma_userptr_invalidate_complete(vma);
753
754         return true;
755 }
756
757 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
758         .invalidate = vma_userptr_invalidate,
759 };
760
761 int xe_vm_userptr_pin(struct xe_vm *vm)
762 {
763         struct xe_vma *vma, *next;
764         int err = 0;
765         LIST_HEAD(tmp_evict);
766
767         lockdep_assert_held_write(&vm->lock);
768
769         /* Collect invalidated userptrs */
770         spin_lock(&vm->userptr.invalidated_lock);
771         list_for_each_entry_safe(vma, next, &vm->userptr.invalidated,
772                                  userptr.invalidate_link) {
773                 list_del_init(&vma->userptr.invalidate_link);
774                 if (list_empty(&vma->combined_links.userptr))
775                         list_move_tail(&vma->combined_links.userptr,
776                                        &vm->userptr.repin_list);
777         }
778         spin_unlock(&vm->userptr.invalidated_lock);
779
780         /* Pin and move to temporary list */
781         list_for_each_entry_safe(vma, next, &vm->userptr.repin_list,
782                                  combined_links.userptr) {
783                 err = xe_vma_userptr_pin_pages(vma);
784                 if (err < 0)
785                         goto out_err;
786
787                 list_move_tail(&vma->combined_links.userptr, &tmp_evict);
788         }
789
790         /* Take lock and move to rebind_list for rebinding. */
791         err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
792         if (err)
793                 goto out_err;
794
795         list_for_each_entry_safe(vma, next, &tmp_evict, combined_links.userptr)
796                 list_move_tail(&vma->combined_links.rebind, &vm->rebind_list);
797
798         dma_resv_unlock(xe_vm_resv(vm));
799
800         return 0;
801
802 out_err:
803         list_splice_tail(&tmp_evict, &vm->userptr.repin_list);
804
805         return err;
806 }
807
808 /**
809  * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
810  * that need repinning.
811  * @vm: The VM.
812  *
813  * This function does an advisory check for whether the VM has userptrs that
814  * need repinning.
815  *
816  * Return: 0 if there are no indications of userptrs needing repinning,
817  * -EAGAIN if there are.
818  */
819 int xe_vm_userptr_check_repin(struct xe_vm *vm)
820 {
821         return (list_empty_careful(&vm->userptr.repin_list) &&
822                 list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
823 }
824
825 static struct dma_fence *
826 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
827                struct xe_sync_entry *syncs, u32 num_syncs,
828                bool first_op, bool last_op);
829
830 struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
831 {
832         struct dma_fence *fence = NULL;
833         struct xe_vma *vma, *next;
834
835         lockdep_assert_held(&vm->lock);
836         if (xe_vm_no_dma_fences(vm) && !rebind_worker)
837                 return NULL;
838
839         xe_vm_assert_held(vm);
840         list_for_each_entry_safe(vma, next, &vm->rebind_list,
841                                  combined_links.rebind) {
842                 XE_WARN_ON(!vma->tile_present);
843
844                 list_del_init(&vma->combined_links.rebind);
845                 dma_fence_put(fence);
846                 if (rebind_worker)
847                         trace_xe_vma_rebind_worker(vma);
848                 else
849                         trace_xe_vma_rebind_exec(vma);
850                 fence = xe_vm_bind_vma(vma, NULL, NULL, 0, false, false);
851                 if (IS_ERR(fence))
852                         return fence;
853         }
854
855         return fence;
856 }
857
858 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
859                                     struct xe_bo *bo,
860                                     u64 bo_offset_or_userptr,
861                                     u64 start, u64 end,
862                                     bool read_only,
863                                     bool is_null,
864                                     u8 tile_mask)
865 {
866         struct xe_vma *vma;
867         struct xe_tile *tile;
868         u8 id;
869
870         XE_WARN_ON(start >= end);
871         XE_WARN_ON(end >= vm->size);
872
873         if (!bo && !is_null)    /* userptr */
874                 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
875         else
876                 vma = kzalloc(sizeof(*vma) - sizeof(struct xe_userptr),
877                               GFP_KERNEL);
878         if (!vma) {
879                 vma = ERR_PTR(-ENOMEM);
880                 return vma;
881         }
882
883         INIT_LIST_HEAD(&vma->combined_links.rebind);
884         INIT_LIST_HEAD(&vma->notifier.rebind_link);
885         INIT_LIST_HEAD(&vma->extobj.link);
886
887         INIT_LIST_HEAD(&vma->gpuva.gem.entry);
888         vma->gpuva.vm = &vm->gpuvm;
889         vma->gpuva.va.addr = start;
890         vma->gpuva.va.range = end - start + 1;
891         if (read_only)
892                 vma->gpuva.flags |= XE_VMA_READ_ONLY;
893         if (is_null)
894                 vma->gpuva.flags |= DRM_GPUVA_SPARSE;
895
896         if (tile_mask) {
897                 vma->tile_mask = tile_mask;
898         } else {
899                 for_each_tile(tile, vm->xe, id)
900                         vma->tile_mask |= 0x1 << id;
901         }
902
903         if (vm->xe->info.platform == XE_PVC)
904                 vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
905
906         if (bo) {
907                 struct drm_gpuvm_bo *vm_bo;
908
909                 xe_bo_assert_held(bo);
910
911                 vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
912                 if (IS_ERR(vm_bo)) {
913                         kfree(vma);
914                         return ERR_CAST(vm_bo);
915                 }
916
917                 drm_gem_object_get(&bo->ttm.base);
918                 vma->gpuva.gem.obj = &bo->ttm.base;
919                 vma->gpuva.gem.offset = bo_offset_or_userptr;
920                 drm_gpuva_link(&vma->gpuva, vm_bo);
921                 drm_gpuvm_bo_put(vm_bo);
922         } else /* userptr or null */ {
923                 if (!is_null) {
924                         u64 size = end - start + 1;
925                         int err;
926
927                         INIT_LIST_HEAD(&vma->userptr.invalidate_link);
928                         vma->gpuva.gem.offset = bo_offset_or_userptr;
929
930                         err = mmu_interval_notifier_insert(&vma->userptr.notifier,
931                                                            current->mm,
932                                                            xe_vma_userptr(vma), size,
933                                                            &vma_userptr_notifier_ops);
934                         if (err) {
935                                 kfree(vma);
936                                 vma = ERR_PTR(err);
937                                 return vma;
938                         }
939
940                         vma->userptr.notifier_seq = LONG_MAX;
941                 }
942
943                 xe_vm_get(vm);
944         }
945
946         return vma;
947 }
948
949 static bool vm_remove_extobj(struct xe_vma *vma)
950 {
951         if (!list_empty(&vma->extobj.link)) {
952                 xe_vma_vm(vma)->extobj.entries--;
953                 list_del_init(&vma->extobj.link);
954                 return true;
955         }
956         return false;
957 }
958
959 static void xe_vma_destroy_late(struct xe_vma *vma)
960 {
961         struct xe_vm *vm = xe_vma_vm(vma);
962         struct xe_device *xe = vm->xe;
963         bool read_only = xe_vma_read_only(vma);
964
965         if (xe_vma_is_userptr(vma)) {
966                 if (vma->userptr.sg) {
967                         dma_unmap_sgtable(xe->drm.dev,
968                                           vma->userptr.sg,
969                                           read_only ? DMA_TO_DEVICE :
970                                           DMA_BIDIRECTIONAL, 0);
971                         sg_free_table(vma->userptr.sg);
972                         vma->userptr.sg = NULL;
973                 }
974
975                 /*
976                  * Since userptr pages are not pinned, we can't remove
977                  * the notifer until we're sure the GPU is not accessing
978                  * them anymore
979                  */
980                 mmu_interval_notifier_remove(&vma->userptr.notifier);
981                 xe_vm_put(vm);
982         } else if (xe_vma_is_null(vma)) {
983                 xe_vm_put(vm);
984         } else {
985                 xe_bo_put(xe_vma_bo(vma));
986         }
987
988         kfree(vma);
989 }
990
991 static void vma_destroy_work_func(struct work_struct *w)
992 {
993         struct xe_vma *vma =
994                 container_of(w, struct xe_vma, destroy_work);
995
996         xe_vma_destroy_late(vma);
997 }
998
999 static struct xe_vma *
1000 bo_has_vm_references_locked(struct xe_bo *bo, struct xe_vm *vm,
1001                             struct xe_vma *ignore)
1002 {
1003         struct drm_gpuvm_bo *vm_bo;
1004         struct drm_gpuva *va;
1005         struct drm_gem_object *obj = &bo->ttm.base;
1006
1007         xe_bo_assert_held(bo);
1008
1009         drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
1010                 drm_gpuvm_bo_for_each_va(va, vm_bo) {
1011                         struct xe_vma *vma = gpuva_to_vma(va);
1012
1013                         if (vma != ignore && xe_vma_vm(vma) == vm)
1014                                 return vma;
1015                 }
1016         }
1017
1018         return NULL;
1019 }
1020
1021 static bool bo_has_vm_references(struct xe_bo *bo, struct xe_vm *vm,
1022                                  struct xe_vma *ignore)
1023 {
1024         bool ret;
1025
1026         xe_bo_lock(bo, false);
1027         ret = !!bo_has_vm_references_locked(bo, vm, ignore);
1028         xe_bo_unlock(bo);
1029
1030         return ret;
1031 }
1032
1033 static void __vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
1034 {
1035         lockdep_assert_held_write(&vm->lock);
1036
1037         list_add(&vma->extobj.link, &vm->extobj.list);
1038         vm->extobj.entries++;
1039 }
1040
1041 static void vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
1042 {
1043         struct xe_bo *bo = xe_vma_bo(vma);
1044
1045         lockdep_assert_held_write(&vm->lock);
1046
1047         if (bo_has_vm_references(bo, vm, vma))
1048                 return;
1049
1050         __vm_insert_extobj(vm, vma);
1051 }
1052
1053 static void vma_destroy_cb(struct dma_fence *fence,
1054                            struct dma_fence_cb *cb)
1055 {
1056         struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
1057
1058         INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
1059         queue_work(system_unbound_wq, &vma->destroy_work);
1060 }
1061
1062 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
1063 {
1064         struct xe_vm *vm = xe_vma_vm(vma);
1065
1066         lockdep_assert_held_write(&vm->lock);
1067         XE_WARN_ON(!list_empty(&vma->combined_links.destroy));
1068
1069         if (xe_vma_is_userptr(vma)) {
1070                 XE_WARN_ON(!(vma->gpuva.flags & XE_VMA_DESTROYED));
1071
1072                 spin_lock(&vm->userptr.invalidated_lock);
1073                 list_del(&vma->userptr.invalidate_link);
1074                 spin_unlock(&vm->userptr.invalidated_lock);
1075         } else if (!xe_vma_is_null(vma)) {
1076                 xe_bo_assert_held(xe_vma_bo(vma));
1077
1078                 spin_lock(&vm->notifier.list_lock);
1079                 list_del(&vma->notifier.rebind_link);
1080                 spin_unlock(&vm->notifier.list_lock);
1081
1082                 drm_gpuva_unlink(&vma->gpuva);
1083
1084                 if (!xe_vma_bo(vma)->vm && vm_remove_extobj(vma)) {
1085                         struct xe_vma *other;
1086
1087                         other = bo_has_vm_references_locked(xe_vma_bo(vma), vm, NULL);
1088
1089                         if (other)
1090                                 __vm_insert_extobj(vm, other);
1091                 }
1092         }
1093
1094         xe_vm_assert_held(vm);
1095         if (fence) {
1096                 int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
1097                                                  vma_destroy_cb);
1098
1099                 if (ret) {
1100                         XE_WARN_ON(ret != -ENOENT);
1101                         xe_vma_destroy_late(vma);
1102                 }
1103         } else {
1104                 xe_vma_destroy_late(vma);
1105         }
1106 }
1107
1108 /**
1109  * xe_vm_prepare_vma() - drm_exec utility to lock a vma
1110  * @exec: The drm_exec object we're currently locking for.
1111  * @vma: The vma for witch we want to lock the vm resv and any attached
1112  * object's resv.
1113  * @num_shared: The number of dma-fence slots to pre-allocate in the
1114  * objects' reservation objects.
1115  *
1116  * Return: 0 on success, negative error code on error. In particular
1117  * may return -EDEADLK on WW transaction contention and -EINTR if
1118  * an interruptible wait is terminated by a signal.
1119  */
1120 int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
1121                       unsigned int num_shared)
1122 {
1123         struct xe_vm *vm = xe_vma_vm(vma);
1124         struct xe_bo *bo = xe_vma_bo(vma);
1125         int err;
1126
1127         XE_WARN_ON(!vm);
1128         err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
1129         if (!err && bo && !bo->vm)
1130                 err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared);
1131
1132         return err;
1133 }
1134
1135 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1136 {
1137         struct drm_exec exec;
1138         int err;
1139
1140         drm_exec_init(&exec, 0);
1141         drm_exec_until_all_locked(&exec) {
1142                 err = xe_vm_prepare_vma(&exec, vma, 0);
1143                 drm_exec_retry_on_contention(&exec);
1144                 if (XE_WARN_ON(err))
1145                         break;
1146         }
1147
1148         xe_vma_destroy(vma, NULL);
1149
1150         drm_exec_fini(&exec);
1151 }
1152
1153 struct xe_vma *
1154 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
1155 {
1156         struct drm_gpuva *gpuva;
1157
1158         lockdep_assert_held(&vm->lock);
1159
1160         if (xe_vm_is_closed_or_banned(vm))
1161                 return NULL;
1162
1163         XE_WARN_ON(start + range > vm->size);
1164
1165         gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
1166
1167         return gpuva ? gpuva_to_vma(gpuva) : NULL;
1168 }
1169
1170 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1171 {
1172         int err;
1173
1174         XE_WARN_ON(xe_vma_vm(vma) != vm);
1175         lockdep_assert_held(&vm->lock);
1176
1177         err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
1178         XE_WARN_ON(err);        /* Shouldn't be possible */
1179
1180         return err;
1181 }
1182
1183 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1184 {
1185         XE_WARN_ON(xe_vma_vm(vma) != vm);
1186         lockdep_assert_held(&vm->lock);
1187
1188         drm_gpuva_remove(&vma->gpuva);
1189         if (vm->usm.last_fault_vma == vma)
1190                 vm->usm.last_fault_vma = NULL;
1191 }
1192
1193 static struct drm_gpuva_op *xe_vm_op_alloc(void)
1194 {
1195         struct xe_vma_op *op;
1196
1197         op = kzalloc(sizeof(*op), GFP_KERNEL);
1198
1199         if (unlikely(!op))
1200                 return NULL;
1201
1202         return &op->base;
1203 }
1204
1205 static void xe_vm_free(struct drm_gpuvm *gpuvm);
1206
1207 static struct drm_gpuvm_ops gpuvm_ops = {
1208         .op_alloc = xe_vm_op_alloc,
1209         .vm_free = xe_vm_free,
1210 };
1211
1212 static void xe_vma_op_work_func(struct work_struct *w);
1213 static void vm_destroy_work_func(struct work_struct *w);
1214
1215 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1216 {
1217         struct drm_gem_object *vm_resv_obj;
1218         struct xe_vm *vm;
1219         int err, number_tiles = 0;
1220         struct xe_tile *tile;
1221         u8 id;
1222
1223         vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1224         if (!vm)
1225                 return ERR_PTR(-ENOMEM);
1226
1227         vm->xe = xe;
1228
1229         vm->size = 1ull << xe->info.va_bits;
1230
1231         vm->flags = flags;
1232
1233         init_rwsem(&vm->lock);
1234
1235         INIT_LIST_HEAD(&vm->rebind_list);
1236
1237         INIT_LIST_HEAD(&vm->userptr.repin_list);
1238         INIT_LIST_HEAD(&vm->userptr.invalidated);
1239         init_rwsem(&vm->userptr.notifier_lock);
1240         spin_lock_init(&vm->userptr.invalidated_lock);
1241
1242         INIT_LIST_HEAD(&vm->notifier.rebind_list);
1243         spin_lock_init(&vm->notifier.list_lock);
1244
1245         INIT_LIST_HEAD(&vm->async_ops.pending);
1246         INIT_WORK(&vm->async_ops.work, xe_vma_op_work_func);
1247         spin_lock_init(&vm->async_ops.lock);
1248
1249         INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1250
1251         INIT_LIST_HEAD(&vm->preempt.exec_queues);
1252         vm->preempt.min_run_period_ms = 10;     /* FIXME: Wire up to uAPI */
1253
1254         for_each_tile(tile, xe, id)
1255                 xe_range_fence_tree_init(&vm->rftree[id]);
1256
1257         INIT_LIST_HEAD(&vm->extobj.list);
1258
1259         if (!(flags & XE_VM_FLAG_MIGRATION))
1260                 xe_device_mem_access_get(xe);
1261
1262         vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
1263         if (!vm_resv_obj) {
1264                 err = -ENOMEM;
1265                 goto err_no_resv;
1266         }
1267
1268         drm_gpuvm_init(&vm->gpuvm, "Xe VM", 0, &xe->drm, vm_resv_obj,
1269                        0, vm->size, 0, 0, &gpuvm_ops);
1270
1271         drm_gem_object_put(vm_resv_obj);
1272
1273         err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
1274         if (err)
1275                 goto err_close;
1276
1277         if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1278                 vm->flags |= XE_VM_FLAG_64K;
1279
1280         for_each_tile(tile, xe, id) {
1281                 if (flags & XE_VM_FLAG_MIGRATION &&
1282                     tile->id != XE_VM_FLAG_TILE_ID(flags))
1283                         continue;
1284
1285                 vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
1286                 if (IS_ERR(vm->pt_root[id])) {
1287                         err = PTR_ERR(vm->pt_root[id]);
1288                         vm->pt_root[id] = NULL;
1289                         goto err_unlock_close;
1290                 }
1291         }
1292
1293         if (flags & XE_VM_FLAG_SCRATCH_PAGE) {
1294                 for_each_tile(tile, xe, id) {
1295                         if (!vm->pt_root[id])
1296                                 continue;
1297
1298                         err = xe_pt_create_scratch(xe, tile, vm);
1299                         if (err)
1300                                 goto err_unlock_close;
1301                 }
1302                 vm->batch_invalidate_tlb = true;
1303         }
1304
1305         if (flags & XE_VM_FLAG_COMPUTE_MODE) {
1306                 INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1307                 vm->flags |= XE_VM_FLAG_COMPUTE_MODE;
1308                 vm->batch_invalidate_tlb = false;
1309         }
1310
1311         if (flags & XE_VM_FLAG_ASYNC_BIND_OPS) {
1312                 vm->async_ops.fence.context = dma_fence_context_alloc(1);
1313                 vm->flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
1314         }
1315
1316         /* Fill pt_root after allocating scratch tables */
1317         for_each_tile(tile, xe, id) {
1318                 if (!vm->pt_root[id])
1319                         continue;
1320
1321                 xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
1322         }
1323         dma_resv_unlock(xe_vm_resv(vm));
1324
1325         /* Kernel migration VM shouldn't have a circular loop.. */
1326         if (!(flags & XE_VM_FLAG_MIGRATION)) {
1327                 for_each_tile(tile, xe, id) {
1328                         struct xe_gt *gt = tile->primary_gt;
1329                         struct xe_vm *migrate_vm;
1330                         struct xe_exec_queue *q;
1331
1332                         if (!vm->pt_root[id])
1333                                 continue;
1334
1335                         migrate_vm = xe_migrate_get_vm(tile->migrate);
1336                         q = xe_exec_queue_create_class(xe, gt, migrate_vm,
1337                                                        XE_ENGINE_CLASS_COPY,
1338                                                        EXEC_QUEUE_FLAG_VM);
1339                         xe_vm_put(migrate_vm);
1340                         if (IS_ERR(q)) {
1341                                 err = PTR_ERR(q);
1342                                 goto err_close;
1343                         }
1344                         vm->q[id] = q;
1345                         number_tiles++;
1346                 }
1347         }
1348
1349         if (number_tiles > 1)
1350                 vm->composite_fence_ctx = dma_fence_context_alloc(1);
1351
1352         mutex_lock(&xe->usm.lock);
1353         if (flags & XE_VM_FLAG_FAULT_MODE)
1354                 xe->usm.num_vm_in_fault_mode++;
1355         else if (!(flags & XE_VM_FLAG_MIGRATION))
1356                 xe->usm.num_vm_in_non_fault_mode++;
1357         mutex_unlock(&xe->usm.lock);
1358
1359         trace_xe_vm_create(vm);
1360
1361         return vm;
1362
1363 err_unlock_close:
1364         dma_resv_unlock(xe_vm_resv(vm));
1365 err_close:
1366         xe_vm_close_and_put(vm);
1367         return ERR_PTR(err);
1368
1369 err_no_resv:
1370         for_each_tile(tile, xe, id)
1371                 xe_range_fence_tree_fini(&vm->rftree[id]);
1372         kfree(vm);
1373         if (!(flags & XE_VM_FLAG_MIGRATION))
1374                 xe_device_mem_access_put(xe);
1375         return ERR_PTR(err);
1376 }
1377
1378 static void flush_async_ops(struct xe_vm *vm)
1379 {
1380         queue_work(system_unbound_wq, &vm->async_ops.work);
1381         flush_work(&vm->async_ops.work);
1382 }
1383
1384 static void vm_error_capture(struct xe_vm *vm, int err,
1385                              u32 op, u64 addr, u64 size)
1386 {
1387         struct drm_xe_vm_bind_op_error_capture capture;
1388         u64 __user *address =
1389                 u64_to_user_ptr(vm->async_ops.error_capture.addr);
1390         bool in_kthread = !current->mm;
1391
1392         capture.error = err;
1393         capture.op = op;
1394         capture.addr = addr;
1395         capture.size = size;
1396
1397         if (in_kthread) {
1398                 if (!mmget_not_zero(vm->async_ops.error_capture.mm))
1399                         goto mm_closed;
1400                 kthread_use_mm(vm->async_ops.error_capture.mm);
1401         }
1402
1403         if (copy_to_user(address, &capture, sizeof(capture)))
1404                 XE_WARN_ON("Copy to user failed");
1405
1406         if (in_kthread) {
1407                 kthread_unuse_mm(vm->async_ops.error_capture.mm);
1408                 mmput(vm->async_ops.error_capture.mm);
1409         }
1410
1411 mm_closed:
1412         wake_up_all(&vm->async_ops.error_capture.wq);
1413 }
1414
1415 static void xe_vm_close(struct xe_vm *vm)
1416 {
1417         down_write(&vm->lock);
1418         vm->size = 0;
1419         up_write(&vm->lock);
1420 }
1421
1422 void xe_vm_close_and_put(struct xe_vm *vm)
1423 {
1424         LIST_HEAD(contested);
1425         struct xe_device *xe = vm->xe;
1426         struct xe_tile *tile;
1427         struct xe_vma *vma, *next_vma;
1428         struct drm_gpuva *gpuva, *next;
1429         u8 id;
1430
1431         XE_WARN_ON(vm->preempt.num_exec_queues);
1432
1433         xe_vm_close(vm);
1434         flush_async_ops(vm);
1435         if (xe_vm_in_compute_mode(vm))
1436                 flush_work(&vm->preempt.rebind_work);
1437
1438         for_each_tile(tile, xe, id) {
1439                 if (vm->q[id]) {
1440                         xe_exec_queue_kill(vm->q[id]);
1441                         xe_exec_queue_put(vm->q[id]);
1442                         vm->q[id] = NULL;
1443                 }
1444         }
1445
1446         down_write(&vm->lock);
1447         xe_vm_lock(vm, false);
1448         drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) {
1449                 vma = gpuva_to_vma(gpuva);
1450
1451                 if (xe_vma_has_no_bo(vma)) {
1452                         down_read(&vm->userptr.notifier_lock);
1453                         vma->gpuva.flags |= XE_VMA_DESTROYED;
1454                         up_read(&vm->userptr.notifier_lock);
1455                 }
1456
1457                 xe_vm_remove_vma(vm, vma);
1458
1459                 /* easy case, remove from VMA? */
1460                 if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
1461                         list_del_init(&vma->combined_links.rebind);
1462                         xe_vma_destroy(vma, NULL);
1463                         continue;
1464                 }
1465
1466                 list_move_tail(&vma->combined_links.destroy, &contested);
1467                 vma->gpuva.flags |= XE_VMA_DESTROYED;
1468         }
1469
1470         /*
1471          * All vm operations will add shared fences to resv.
1472          * The only exception is eviction for a shared object,
1473          * but even so, the unbind when evicted would still
1474          * install a fence to resv. Hence it's safe to
1475          * destroy the pagetables immediately.
1476          */
1477         for_each_tile(tile, xe, id) {
1478                 if (vm->scratch_bo[id]) {
1479                         u32 i;
1480
1481                         xe_bo_unpin(vm->scratch_bo[id]);
1482                         xe_bo_put(vm->scratch_bo[id]);
1483                         for (i = 0; i < vm->pt_root[id]->level; i++)
1484                                 xe_pt_destroy(vm->scratch_pt[id][i], vm->flags,
1485                                               NULL);
1486                 }
1487                 if (vm->pt_root[id]) {
1488                         xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1489                         vm->pt_root[id] = NULL;
1490                 }
1491         }
1492         xe_vm_unlock(vm);
1493
1494         /*
1495          * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1496          * Since we hold a refcount to the bo, we can remove and free
1497          * the members safely without locking.
1498          */
1499         list_for_each_entry_safe(vma, next_vma, &contested,
1500                                  combined_links.destroy) {
1501                 list_del_init(&vma->combined_links.destroy);
1502                 xe_vma_destroy_unlocked(vma);
1503         }
1504
1505         if (vm->async_ops.error_capture.addr)
1506                 wake_up_all(&vm->async_ops.error_capture.wq);
1507
1508         XE_WARN_ON(!list_empty(&vm->extobj.list));
1509         up_write(&vm->lock);
1510
1511         mutex_lock(&xe->usm.lock);
1512         if (vm->flags & XE_VM_FLAG_FAULT_MODE)
1513                 xe->usm.num_vm_in_fault_mode--;
1514         else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1515                 xe->usm.num_vm_in_non_fault_mode--;
1516         mutex_unlock(&xe->usm.lock);
1517
1518         for_each_tile(tile, xe, id)
1519                 xe_range_fence_tree_fini(&vm->rftree[id]);
1520
1521         xe_vm_put(vm);
1522 }
1523
1524 static void vm_destroy_work_func(struct work_struct *w)
1525 {
1526         struct xe_vm *vm =
1527                 container_of(w, struct xe_vm, destroy_work);
1528         struct xe_device *xe = vm->xe;
1529         struct xe_tile *tile;
1530         u8 id;
1531         void *lookup;
1532
1533         /* xe_vm_close_and_put was not called? */
1534         XE_WARN_ON(vm->size);
1535
1536         if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
1537                 xe_device_mem_access_put(xe);
1538
1539                 if (xe->info.has_asid) {
1540                         mutex_lock(&xe->usm.lock);
1541                         lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1542                         XE_WARN_ON(lookup != vm);
1543                         mutex_unlock(&xe->usm.lock);
1544                 }
1545         }
1546
1547         for_each_tile(tile, xe, id)
1548                 XE_WARN_ON(vm->pt_root[id]);
1549
1550         trace_xe_vm_free(vm);
1551         dma_fence_put(vm->rebind_fence);
1552         kfree(vm);
1553 }
1554
1555 static void xe_vm_free(struct drm_gpuvm *gpuvm)
1556 {
1557         struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
1558
1559         /* To destroy the VM we need to be able to sleep */
1560         queue_work(system_unbound_wq, &vm->destroy_work);
1561 }
1562
1563 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1564 {
1565         struct xe_vm *vm;
1566
1567         mutex_lock(&xef->vm.lock);
1568         vm = xa_load(&xef->vm.xa, id);
1569         if (vm)
1570                 xe_vm_get(vm);
1571         mutex_unlock(&xef->vm.lock);
1572
1573         return vm;
1574 }
1575
1576 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
1577 {
1578         return xe_pde_encode(vm->pt_root[tile->id]->bo, 0,
1579                              XE_CACHE_WB);
1580 }
1581
1582 static struct dma_fence *
1583 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1584                  struct xe_sync_entry *syncs, u32 num_syncs,
1585                  bool first_op, bool last_op)
1586 {
1587         struct xe_tile *tile;
1588         struct dma_fence *fence = NULL;
1589         struct dma_fence **fences = NULL;
1590         struct dma_fence_array *cf = NULL;
1591         struct xe_vm *vm = xe_vma_vm(vma);
1592         int cur_fence = 0, i;
1593         int number_tiles = hweight8(vma->tile_present);
1594         int err;
1595         u8 id;
1596
1597         trace_xe_vma_unbind(vma);
1598
1599         if (number_tiles > 1) {
1600                 fences = kmalloc_array(number_tiles, sizeof(*fences),
1601                                        GFP_KERNEL);
1602                 if (!fences)
1603                         return ERR_PTR(-ENOMEM);
1604         }
1605
1606         for_each_tile(tile, vm->xe, id) {
1607                 if (!(vma->tile_present & BIT(id)))
1608                         goto next;
1609
1610                 fence = __xe_pt_unbind_vma(tile, vma, q, first_op ? syncs : NULL,
1611                                            first_op ? num_syncs : 0);
1612                 if (IS_ERR(fence)) {
1613                         err = PTR_ERR(fence);
1614                         goto err_fences;
1615                 }
1616
1617                 if (fences)
1618                         fences[cur_fence++] = fence;
1619
1620 next:
1621                 if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1622                         q = list_next_entry(q, multi_gt_list);
1623         }
1624
1625         if (fences) {
1626                 cf = dma_fence_array_create(number_tiles, fences,
1627                                             vm->composite_fence_ctx,
1628                                             vm->composite_fence_seqno++,
1629                                             false);
1630                 if (!cf) {
1631                         --vm->composite_fence_seqno;
1632                         err = -ENOMEM;
1633                         goto err_fences;
1634                 }
1635         }
1636
1637         if (last_op) {
1638                 for (i = 0; i < num_syncs; i++)
1639                         xe_sync_entry_signal(&syncs[i], NULL,
1640                                              cf ? &cf->base : fence);
1641         }
1642
1643         return cf ? &cf->base : !fence ? dma_fence_get_stub() : fence;
1644
1645 err_fences:
1646         if (fences) {
1647                 while (cur_fence) {
1648                         /* FIXME: Rewind the previous binds? */
1649                         dma_fence_put(fences[--cur_fence]);
1650                 }
1651                 kfree(fences);
1652         }
1653
1654         return ERR_PTR(err);
1655 }
1656
1657 static struct dma_fence *
1658 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1659                struct xe_sync_entry *syncs, u32 num_syncs,
1660                bool first_op, bool last_op)
1661 {
1662         struct xe_tile *tile;
1663         struct dma_fence *fence;
1664         struct dma_fence **fences = NULL;
1665         struct dma_fence_array *cf = NULL;
1666         struct xe_vm *vm = xe_vma_vm(vma);
1667         int cur_fence = 0, i;
1668         int number_tiles = hweight8(vma->tile_mask);
1669         int err;
1670         u8 id;
1671
1672         trace_xe_vma_bind(vma);
1673
1674         if (number_tiles > 1) {
1675                 fences = kmalloc_array(number_tiles, sizeof(*fences),
1676                                        GFP_KERNEL);
1677                 if (!fences)
1678                         return ERR_PTR(-ENOMEM);
1679         }
1680
1681         for_each_tile(tile, vm->xe, id) {
1682                 if (!(vma->tile_mask & BIT(id)))
1683                         goto next;
1684
1685                 fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id],
1686                                          first_op ? syncs : NULL,
1687                                          first_op ? num_syncs : 0,
1688                                          vma->tile_present & BIT(id));
1689                 if (IS_ERR(fence)) {
1690                         err = PTR_ERR(fence);
1691                         goto err_fences;
1692                 }
1693
1694                 if (fences)
1695                         fences[cur_fence++] = fence;
1696
1697 next:
1698                 if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1699                         q = list_next_entry(q, multi_gt_list);
1700         }
1701
1702         if (fences) {
1703                 cf = dma_fence_array_create(number_tiles, fences,
1704                                             vm->composite_fence_ctx,
1705                                             vm->composite_fence_seqno++,
1706                                             false);
1707                 if (!cf) {
1708                         --vm->composite_fence_seqno;
1709                         err = -ENOMEM;
1710                         goto err_fences;
1711                 }
1712         }
1713
1714         if (last_op) {
1715                 for (i = 0; i < num_syncs; i++)
1716                         xe_sync_entry_signal(&syncs[i], NULL,
1717                                              cf ? &cf->base : fence);
1718         }
1719
1720         return cf ? &cf->base : fence;
1721
1722 err_fences:
1723         if (fences) {
1724                 while (cur_fence) {
1725                         /* FIXME: Rewind the previous binds? */
1726                         dma_fence_put(fences[--cur_fence]);
1727                 }
1728                 kfree(fences);
1729         }
1730
1731         return ERR_PTR(err);
1732 }
1733
1734 struct async_op_fence {
1735         struct dma_fence fence;
1736         struct dma_fence *wait_fence;
1737         struct dma_fence_cb cb;
1738         struct xe_vm *vm;
1739         wait_queue_head_t wq;
1740         bool started;
1741 };
1742
1743 static const char *async_op_fence_get_driver_name(struct dma_fence *dma_fence)
1744 {
1745         return "xe";
1746 }
1747
1748 static const char *
1749 async_op_fence_get_timeline_name(struct dma_fence *dma_fence)
1750 {
1751         return "async_op_fence";
1752 }
1753
1754 static const struct dma_fence_ops async_op_fence_ops = {
1755         .get_driver_name = async_op_fence_get_driver_name,
1756         .get_timeline_name = async_op_fence_get_timeline_name,
1757 };
1758
1759 static void async_op_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1760 {
1761         struct async_op_fence *afence =
1762                 container_of(cb, struct async_op_fence, cb);
1763
1764         afence->fence.error = afence->wait_fence->error;
1765         dma_fence_signal(&afence->fence);
1766         xe_vm_put(afence->vm);
1767         dma_fence_put(afence->wait_fence);
1768         dma_fence_put(&afence->fence);
1769 }
1770
1771 static void add_async_op_fence_cb(struct xe_vm *vm,
1772                                   struct dma_fence *fence,
1773                                   struct async_op_fence *afence)
1774 {
1775         int ret;
1776
1777         if (!xe_vm_no_dma_fences(vm)) {
1778                 afence->started = true;
1779                 smp_wmb();
1780                 wake_up_all(&afence->wq);
1781         }
1782
1783         afence->wait_fence = dma_fence_get(fence);
1784         afence->vm = xe_vm_get(vm);
1785         dma_fence_get(&afence->fence);
1786         ret = dma_fence_add_callback(fence, &afence->cb, async_op_fence_cb);
1787         if (ret == -ENOENT) {
1788                 afence->fence.error = afence->wait_fence->error;
1789                 dma_fence_signal(&afence->fence);
1790         }
1791         if (ret) {
1792                 xe_vm_put(vm);
1793                 dma_fence_put(afence->wait_fence);
1794                 dma_fence_put(&afence->fence);
1795         }
1796         XE_WARN_ON(ret && ret != -ENOENT);
1797 }
1798
1799 int xe_vm_async_fence_wait_start(struct dma_fence *fence)
1800 {
1801         if (fence->ops == &async_op_fence_ops) {
1802                 struct async_op_fence *afence =
1803                         container_of(fence, struct async_op_fence, fence);
1804
1805                 XE_WARN_ON(xe_vm_no_dma_fences(afence->vm));
1806
1807                 smp_rmb();
1808                 return wait_event_interruptible(afence->wq, afence->started);
1809         }
1810
1811         return 0;
1812 }
1813
1814 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
1815                         struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1816                         u32 num_syncs, struct async_op_fence *afence,
1817                         bool immediate, bool first_op, bool last_op)
1818 {
1819         struct dma_fence *fence;
1820
1821         xe_vm_assert_held(vm);
1822
1823         if (immediate) {
1824                 fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
1825                                        last_op);
1826                 if (IS_ERR(fence))
1827                         return PTR_ERR(fence);
1828         } else {
1829                 int i;
1830
1831                 XE_WARN_ON(!xe_vm_in_fault_mode(vm));
1832
1833                 fence = dma_fence_get_stub();
1834                 if (last_op) {
1835                         for (i = 0; i < num_syncs; i++)
1836                                 xe_sync_entry_signal(&syncs[i], NULL, fence);
1837                 }
1838         }
1839         if (afence)
1840                 add_async_op_fence_cb(vm, fence, afence);
1841
1842         dma_fence_put(fence);
1843         return 0;
1844 }
1845
1846 static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
1847                       struct xe_bo *bo, struct xe_sync_entry *syncs,
1848                       u32 num_syncs, struct async_op_fence *afence,
1849                       bool immediate, bool first_op, bool last_op)
1850 {
1851         int err;
1852
1853         xe_vm_assert_held(vm);
1854         xe_bo_assert_held(bo);
1855
1856         if (bo && immediate) {
1857                 err = xe_bo_validate(bo, vm, true);
1858                 if (err)
1859                         return err;
1860         }
1861
1862         return __xe_vm_bind(vm, vma, q, syncs, num_syncs, afence, immediate,
1863                             first_op, last_op);
1864 }
1865
1866 static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
1867                         struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1868                         u32 num_syncs, struct async_op_fence *afence,
1869                         bool first_op, bool last_op)
1870 {
1871         struct dma_fence *fence;
1872
1873         xe_vm_assert_held(vm);
1874         xe_bo_assert_held(xe_vma_bo(vma));
1875
1876         fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
1877         if (IS_ERR(fence))
1878                 return PTR_ERR(fence);
1879         if (afence)
1880                 add_async_op_fence_cb(vm, fence, afence);
1881
1882         xe_vma_destroy(vma, fence);
1883         dma_fence_put(fence);
1884
1885         return 0;
1886 }
1887
1888 static int vm_set_error_capture_address(struct xe_device *xe, struct xe_vm *vm,
1889                                         u64 value)
1890 {
1891         if (XE_IOCTL_DBG(xe, !value))
1892                 return -EINVAL;
1893
1894         if (XE_IOCTL_DBG(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
1895                 return -EOPNOTSUPP;
1896
1897         if (XE_IOCTL_DBG(xe, vm->async_ops.error_capture.addr))
1898                 return -EOPNOTSUPP;
1899
1900         vm->async_ops.error_capture.mm = current->mm;
1901         vm->async_ops.error_capture.addr = value;
1902         init_waitqueue_head(&vm->async_ops.error_capture.wq);
1903
1904         return 0;
1905 }
1906
1907 typedef int (*xe_vm_set_property_fn)(struct xe_device *xe, struct xe_vm *vm,
1908                                      u64 value);
1909
1910 static const xe_vm_set_property_fn vm_set_property_funcs[] = {
1911         [XE_VM_PROPERTY_BIND_OP_ERROR_CAPTURE_ADDRESS] =
1912                 vm_set_error_capture_address,
1913 };
1914
1915 static int vm_user_ext_set_property(struct xe_device *xe, struct xe_vm *vm,
1916                                     u64 extension)
1917 {
1918         u64 __user *address = u64_to_user_ptr(extension);
1919         struct drm_xe_ext_vm_set_property ext;
1920         int err;
1921
1922         err = __copy_from_user(&ext, address, sizeof(ext));
1923         if (XE_IOCTL_DBG(xe, err))
1924                 return -EFAULT;
1925
1926         if (XE_IOCTL_DBG(xe, ext.property >=
1927                          ARRAY_SIZE(vm_set_property_funcs)) ||
1928             XE_IOCTL_DBG(xe, ext.pad) ||
1929             XE_IOCTL_DBG(xe, ext.reserved[0] || ext.reserved[1]))
1930                 return -EINVAL;
1931
1932         return vm_set_property_funcs[ext.property](xe, vm, ext.value);
1933 }
1934
1935 typedef int (*xe_vm_user_extension_fn)(struct xe_device *xe, struct xe_vm *vm,
1936                                        u64 extension);
1937
1938 static const xe_vm_set_property_fn vm_user_extension_funcs[] = {
1939         [XE_VM_EXTENSION_SET_PROPERTY] = vm_user_ext_set_property,
1940 };
1941
1942 #define MAX_USER_EXTENSIONS     16
1943 static int vm_user_extensions(struct xe_device *xe, struct xe_vm *vm,
1944                               u64 extensions, int ext_number)
1945 {
1946         u64 __user *address = u64_to_user_ptr(extensions);
1947         struct xe_user_extension ext;
1948         int err;
1949
1950         if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
1951                 return -E2BIG;
1952
1953         err = __copy_from_user(&ext, address, sizeof(ext));
1954         if (XE_IOCTL_DBG(xe, err))
1955                 return -EFAULT;
1956
1957         if (XE_IOCTL_DBG(xe, ext.pad) ||
1958             XE_IOCTL_DBG(xe, ext.name >=
1959                          ARRAY_SIZE(vm_user_extension_funcs)))
1960                 return -EINVAL;
1961
1962         err = vm_user_extension_funcs[ext.name](xe, vm, extensions);
1963         if (XE_IOCTL_DBG(xe, err))
1964                 return err;
1965
1966         if (ext.next_extension)
1967                 return vm_user_extensions(xe, vm, ext.next_extension,
1968                                           ++ext_number);
1969
1970         return 0;
1971 }
1972
1973 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_SCRATCH_PAGE | \
1974                                     DRM_XE_VM_CREATE_COMPUTE_MODE | \
1975                                     DRM_XE_VM_CREATE_ASYNC_BIND_OPS | \
1976                                     DRM_XE_VM_CREATE_FAULT_MODE)
1977
1978 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1979                        struct drm_file *file)
1980 {
1981         struct xe_device *xe = to_xe_device(dev);
1982         struct xe_file *xef = to_xe_file(file);
1983         struct drm_xe_vm_create *args = data;
1984         struct xe_vm *vm;
1985         u32 id, asid;
1986         int err;
1987         u32 flags = 0;
1988
1989         if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
1990                 args->flags |= DRM_XE_VM_CREATE_SCRATCH_PAGE;
1991
1992         if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
1993                          !xe->info.supports_usm))
1994                 return -EINVAL;
1995
1996         if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1997                 return -EINVAL;
1998
1999         if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
2000                 return -EINVAL;
2001
2002         if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE &&
2003                          args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
2004                 return -EINVAL;
2005
2006         if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE &&
2007                          args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
2008                 return -EINVAL;
2009
2010         if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
2011                          xe_device_in_non_fault_mode(xe)))
2012                 return -EINVAL;
2013
2014         if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FAULT_MODE) &&
2015                          xe_device_in_fault_mode(xe)))
2016                 return -EINVAL;
2017
2018         if (args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE)
2019                 flags |= XE_VM_FLAG_SCRATCH_PAGE;
2020         if (args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE)
2021                 flags |= XE_VM_FLAG_COMPUTE_MODE;
2022         if (args->flags & DRM_XE_VM_CREATE_ASYNC_BIND_OPS)
2023                 flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
2024         if (args->flags & DRM_XE_VM_CREATE_FAULT_MODE)
2025                 flags |= XE_VM_FLAG_FAULT_MODE;
2026
2027         vm = xe_vm_create(xe, flags);
2028         if (IS_ERR(vm))
2029                 return PTR_ERR(vm);
2030
2031         if (args->extensions) {
2032                 err = vm_user_extensions(xe, vm, args->extensions, 0);
2033                 if (XE_IOCTL_DBG(xe, err)) {
2034                         xe_vm_close_and_put(vm);
2035                         return err;
2036                 }
2037         }
2038
2039         mutex_lock(&xef->vm.lock);
2040         err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
2041         mutex_unlock(&xef->vm.lock);
2042         if (err) {
2043                 xe_vm_close_and_put(vm);
2044                 return err;
2045         }
2046
2047         if (xe->info.has_asid) {
2048                 mutex_lock(&xe->usm.lock);
2049                 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
2050                                       XA_LIMIT(0, XE_MAX_ASID - 1),
2051                                       &xe->usm.next_asid, GFP_KERNEL);
2052                 mutex_unlock(&xe->usm.lock);
2053                 if (err) {
2054                         xe_vm_close_and_put(vm);
2055                         return err;
2056                 }
2057                 vm->usm.asid = asid;
2058         }
2059
2060         args->vm_id = id;
2061
2062 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
2063         /* Warning: Security issue - never enable by default */
2064         args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
2065 #endif
2066
2067         return 0;
2068 }
2069
2070 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
2071                         struct drm_file *file)
2072 {
2073         struct xe_device *xe = to_xe_device(dev);
2074         struct xe_file *xef = to_xe_file(file);
2075         struct drm_xe_vm_destroy *args = data;
2076         struct xe_vm *vm;
2077         int err = 0;
2078
2079         if (XE_IOCTL_DBG(xe, args->pad) ||
2080             XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2081                 return -EINVAL;
2082
2083         mutex_lock(&xef->vm.lock);
2084         vm = xa_load(&xef->vm.xa, args->vm_id);
2085         if (XE_IOCTL_DBG(xe, !vm))
2086                 err = -ENOENT;
2087         else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues))
2088                 err = -EBUSY;
2089         else
2090                 xa_erase(&xef->vm.xa, args->vm_id);
2091         mutex_unlock(&xef->vm.lock);
2092
2093         if (!err)
2094                 xe_vm_close_and_put(vm);
2095
2096         return err;
2097 }
2098
2099 static const u32 region_to_mem_type[] = {
2100         XE_PL_TT,
2101         XE_PL_VRAM0,
2102         XE_PL_VRAM1,
2103 };
2104
2105 static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
2106                           struct xe_exec_queue *q, u32 region,
2107                           struct xe_sync_entry *syncs, u32 num_syncs,
2108                           struct async_op_fence *afence, bool first_op,
2109                           bool last_op)
2110 {
2111         int err;
2112
2113         XE_WARN_ON(region > ARRAY_SIZE(region_to_mem_type));
2114
2115         if (!xe_vma_has_no_bo(vma)) {
2116                 err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
2117                 if (err)
2118                         return err;
2119         }
2120
2121         if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
2122                 return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
2123                                   afence, true, first_op, last_op);
2124         } else {
2125                 int i;
2126
2127                 /* Nothing to do, signal fences now */
2128                 if (last_op) {
2129                         for (i = 0; i < num_syncs; i++)
2130                                 xe_sync_entry_signal(&syncs[i], NULL,
2131                                                      dma_fence_get_stub());
2132                 }
2133                 if (afence)
2134                         dma_fence_signal(&afence->fence);
2135                 return 0;
2136         }
2137 }
2138
2139 #define VM_BIND_OP(op)  (op & 0xffff)
2140
2141 static void vm_set_async_error(struct xe_vm *vm, int err)
2142 {
2143         lockdep_assert_held(&vm->lock);
2144         vm->async_ops.error = err;
2145 }
2146
2147 static int vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo,
2148                                     u64 addr, u64 range, u32 op)
2149 {
2150         struct xe_device *xe = vm->xe;
2151         struct xe_vma *vma;
2152         bool async = !!(op & XE_VM_BIND_FLAG_ASYNC);
2153
2154         lockdep_assert_held(&vm->lock);
2155
2156         switch (VM_BIND_OP(op)) {
2157         case XE_VM_BIND_OP_MAP:
2158         case XE_VM_BIND_OP_MAP_USERPTR:
2159                 vma = xe_vm_find_overlapping_vma(vm, addr, range);
2160                 if (XE_IOCTL_DBG(xe, vma && !async))
2161                         return -EBUSY;
2162                 break;
2163         case XE_VM_BIND_OP_UNMAP:
2164         case XE_VM_BIND_OP_PREFETCH:
2165                 vma = xe_vm_find_overlapping_vma(vm, addr, range);
2166                 if (XE_IOCTL_DBG(xe, !vma))
2167                         /* Not an actual error, IOCTL cleans up returns and 0 */
2168                         return -ENODATA;
2169                 if (XE_IOCTL_DBG(xe, (xe_vma_start(vma) != addr ||
2170                                       xe_vma_end(vma) != addr + range) && !async))
2171                         return -EINVAL;
2172                 break;
2173         case XE_VM_BIND_OP_UNMAP_ALL:
2174                 if (XE_IOCTL_DBG(xe, list_empty(&bo->ttm.base.gpuva.list)))
2175                         /* Not an actual error, IOCTL cleans up returns and 0 */
2176                         return -ENODATA;
2177                 break;
2178         default:
2179                 XE_WARN_ON("NOT POSSIBLE");
2180                 return -EINVAL;
2181         }
2182
2183         return 0;
2184 }
2185
2186 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
2187                              bool post_commit)
2188 {
2189         down_read(&vm->userptr.notifier_lock);
2190         vma->gpuva.flags |= XE_VMA_DESTROYED;
2191         up_read(&vm->userptr.notifier_lock);
2192         if (post_commit)
2193                 xe_vm_remove_vma(vm, vma);
2194 }
2195
2196 #undef ULL
2197 #define ULL     unsigned long long
2198
2199 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
2200 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2201 {
2202         struct xe_vma *vma;
2203
2204         switch (op->op) {
2205         case DRM_GPUVA_OP_MAP:
2206                 vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx",
2207                        (ULL)op->map.va.addr, (ULL)op->map.va.range);
2208                 break;
2209         case DRM_GPUVA_OP_REMAP:
2210                 vma = gpuva_to_vma(op->remap.unmap->va);
2211                 vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2212                        (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2213                        op->unmap.keep ? 1 : 0);
2214                 if (op->remap.prev)
2215                         vm_dbg(&xe->drm,
2216                                "REMAP:PREV: addr=0x%016llx, range=0x%016llx",
2217                                (ULL)op->remap.prev->va.addr,
2218                                (ULL)op->remap.prev->va.range);
2219                 if (op->remap.next)
2220                         vm_dbg(&xe->drm,
2221                                "REMAP:NEXT: addr=0x%016llx, range=0x%016llx",
2222                                (ULL)op->remap.next->va.addr,
2223                                (ULL)op->remap.next->va.range);
2224                 break;
2225         case DRM_GPUVA_OP_UNMAP:
2226                 vma = gpuva_to_vma(op->unmap.va);
2227                 vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2228                        (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2229                        op->unmap.keep ? 1 : 0);
2230                 break;
2231         case DRM_GPUVA_OP_PREFETCH:
2232                 vma = gpuva_to_vma(op->prefetch.va);
2233                 vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx",
2234                        (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
2235                 break;
2236         default:
2237                 XE_WARN_ON("NOT POSSIBLE");
2238         }
2239 }
2240 #else
2241 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2242 {
2243 }
2244 #endif
2245
2246 /*
2247  * Create operations list from IOCTL arguments, setup operations fields so parse
2248  * and commit steps are decoupled from IOCTL arguments. This step can fail.
2249  */
2250 static struct drm_gpuva_ops *
2251 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
2252                          u64 bo_offset_or_userptr, u64 addr, u64 range,
2253                          u32 operation, u8 tile_mask, u32 region)
2254 {
2255         struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
2256         struct drm_gpuva_ops *ops;
2257         struct drm_gpuva_op *__op;
2258         struct xe_vma_op *op;
2259         struct drm_gpuvm_bo *vm_bo;
2260         int err;
2261
2262         lockdep_assert_held_write(&vm->lock);
2263
2264         vm_dbg(&vm->xe->drm,
2265                "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
2266                VM_BIND_OP(operation), (ULL)addr, (ULL)range,
2267                (ULL)bo_offset_or_userptr);
2268
2269         switch (VM_BIND_OP(operation)) {
2270         case XE_VM_BIND_OP_MAP:
2271         case XE_VM_BIND_OP_MAP_USERPTR:
2272                 ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
2273                                                   obj, bo_offset_or_userptr);
2274                 if (IS_ERR(ops))
2275                         return ops;
2276
2277                 drm_gpuva_for_each_op(__op, ops) {
2278                         struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2279
2280                         op->tile_mask = tile_mask;
2281                         op->map.immediate =
2282                                 operation & XE_VM_BIND_FLAG_IMMEDIATE;
2283                         op->map.read_only =
2284                                 operation & XE_VM_BIND_FLAG_READONLY;
2285                         op->map.is_null = operation & XE_VM_BIND_FLAG_NULL;
2286                 }
2287                 break;
2288         case XE_VM_BIND_OP_UNMAP:
2289                 ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
2290                 if (IS_ERR(ops))
2291                         return ops;
2292
2293                 drm_gpuva_for_each_op(__op, ops) {
2294                         struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2295
2296                         op->tile_mask = tile_mask;
2297                 }
2298                 break;
2299         case XE_VM_BIND_OP_PREFETCH:
2300                 ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
2301                 if (IS_ERR(ops))
2302                         return ops;
2303
2304                 drm_gpuva_for_each_op(__op, ops) {
2305                         struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2306
2307                         op->tile_mask = tile_mask;
2308                         op->prefetch.region = region;
2309                 }
2310                 break;
2311         case XE_VM_BIND_OP_UNMAP_ALL:
2312                 XE_WARN_ON(!bo);
2313
2314                 err = xe_bo_lock(bo, true);
2315                 if (err)
2316                         return ERR_PTR(err);
2317
2318                 vm_bo = drm_gpuvm_bo_find(&vm->gpuvm, obj);
2319                 if (!vm_bo)
2320                         break;
2321
2322                 ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
2323                 drm_gpuvm_bo_put(vm_bo);
2324                 xe_bo_unlock(bo);
2325                 if (IS_ERR(ops))
2326                         return ops;
2327
2328                 drm_gpuva_for_each_op(__op, ops) {
2329                         struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2330
2331                         op->tile_mask = tile_mask;
2332                 }
2333                 break;
2334         default:
2335                 XE_WARN_ON("NOT POSSIBLE");
2336                 ops = ERR_PTR(-EINVAL);
2337         }
2338
2339 #ifdef TEST_VM_ASYNC_OPS_ERROR
2340         if (operation & FORCE_ASYNC_OP_ERROR) {
2341                 op = list_first_entry_or_null(&ops->list, struct xe_vma_op,
2342                                               base.entry);
2343                 if (op)
2344                         op->inject_error = true;
2345         }
2346 #endif
2347
2348         if (!IS_ERR(ops))
2349                 drm_gpuva_for_each_op(__op, ops)
2350                         print_op(vm->xe, __op);
2351
2352         return ops;
2353 }
2354
2355 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
2356                               u8 tile_mask, bool read_only, bool is_null)
2357 {
2358         struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
2359         struct xe_vma *vma;
2360         int err;
2361
2362         lockdep_assert_held_write(&vm->lock);
2363
2364         if (bo) {
2365                 err = xe_bo_lock(bo, true);
2366                 if (err)
2367                         return ERR_PTR(err);
2368         }
2369         vma = xe_vma_create(vm, bo, op->gem.offset,
2370                             op->va.addr, op->va.addr +
2371                             op->va.range - 1, read_only, is_null,
2372                             tile_mask);
2373         if (bo)
2374                 xe_bo_unlock(bo);
2375
2376         if (xe_vma_is_userptr(vma)) {
2377                 err = xe_vma_userptr_pin_pages(vma);
2378                 if (err) {
2379                         prep_vma_destroy(vm, vma, false);
2380                         xe_vma_destroy_unlocked(vma);
2381                         return ERR_PTR(err);
2382                 }
2383         } else if (!xe_vma_has_no_bo(vma) && !bo->vm) {
2384                 vm_insert_extobj(vm, vma);
2385                 err = add_preempt_fences(vm, bo);
2386                 if (err) {
2387                         prep_vma_destroy(vm, vma, false);
2388                         xe_vma_destroy_unlocked(vma);
2389                         return ERR_PTR(err);
2390                 }
2391         }
2392
2393         return vma;
2394 }
2395
2396 static u64 xe_vma_max_pte_size(struct xe_vma *vma)
2397 {
2398         if (vma->gpuva.flags & XE_VMA_PTE_1G)
2399                 return SZ_1G;
2400         else if (vma->gpuva.flags & XE_VMA_PTE_2M)
2401                 return SZ_2M;
2402
2403         return SZ_4K;
2404 }
2405
2406 static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
2407 {
2408         switch (size) {
2409         case SZ_1G:
2410                 vma->gpuva.flags |= XE_VMA_PTE_1G;
2411                 break;
2412         case SZ_2M:
2413                 vma->gpuva.flags |= XE_VMA_PTE_2M;
2414                 break;
2415         }
2416
2417         return SZ_4K;
2418 }
2419
2420 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
2421 {
2422         int err = 0;
2423
2424         lockdep_assert_held_write(&vm->lock);
2425
2426         switch (op->base.op) {
2427         case DRM_GPUVA_OP_MAP:
2428                 err |= xe_vm_insert_vma(vm, op->map.vma);
2429                 if (!err)
2430                         op->flags |= XE_VMA_OP_COMMITTED;
2431                 break;
2432         case DRM_GPUVA_OP_REMAP:
2433                 prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
2434                                  true);
2435                 op->flags |= XE_VMA_OP_COMMITTED;
2436
2437                 if (op->remap.prev) {
2438                         err |= xe_vm_insert_vma(vm, op->remap.prev);
2439                         if (!err)
2440                                 op->flags |= XE_VMA_OP_PREV_COMMITTED;
2441                         if (!err && op->remap.skip_prev)
2442                                 op->remap.prev = NULL;
2443                 }
2444                 if (op->remap.next) {
2445                         err |= xe_vm_insert_vma(vm, op->remap.next);
2446                         if (!err)
2447                                 op->flags |= XE_VMA_OP_NEXT_COMMITTED;
2448                         if (!err && op->remap.skip_next)
2449                                 op->remap.next = NULL;
2450                 }
2451
2452                 /* Adjust for partial unbind after removin VMA from VM */
2453                 if (!err) {
2454                         op->base.remap.unmap->va->va.addr = op->remap.start;
2455                         op->base.remap.unmap->va->va.range = op->remap.range;
2456                 }
2457                 break;
2458         case DRM_GPUVA_OP_UNMAP:
2459                 prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
2460                 op->flags |= XE_VMA_OP_COMMITTED;
2461                 break;
2462         case DRM_GPUVA_OP_PREFETCH:
2463                 op->flags |= XE_VMA_OP_COMMITTED;
2464                 break;
2465         default:
2466                 XE_WARN_ON("NOT POSSIBLE");
2467         }
2468
2469         return err;
2470 }
2471
2472
2473 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
2474                                    struct drm_gpuva_ops *ops,
2475                                    struct xe_sync_entry *syncs, u32 num_syncs,
2476                                    struct list_head *ops_list, bool last,
2477                                    bool async)
2478 {
2479         struct xe_vma_op *last_op = NULL;
2480         struct async_op_fence *fence = NULL;
2481         struct drm_gpuva_op *__op;
2482         int err = 0;
2483
2484         lockdep_assert_held_write(&vm->lock);
2485
2486         if (last && num_syncs && async) {
2487                 u64 seqno;
2488
2489                 fence = kmalloc(sizeof(*fence), GFP_KERNEL);
2490                 if (!fence)
2491                         return -ENOMEM;
2492
2493                 seqno = q ? ++q->bind.fence_seqno : ++vm->async_ops.fence.seqno;
2494                 dma_fence_init(&fence->fence, &async_op_fence_ops,
2495                                &vm->async_ops.lock, q ? q->bind.fence_ctx :
2496                                vm->async_ops.fence.context, seqno);
2497
2498                 if (!xe_vm_no_dma_fences(vm)) {
2499                         fence->vm = vm;
2500                         fence->started = false;
2501                         init_waitqueue_head(&fence->wq);
2502                 }
2503         }
2504
2505         drm_gpuva_for_each_op(__op, ops) {
2506                 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2507                 bool first = list_empty(ops_list);
2508
2509                 XE_WARN_ON(!first && !async);
2510
2511                 INIT_LIST_HEAD(&op->link);
2512                 list_add_tail(&op->link, ops_list);
2513
2514                 if (first) {
2515                         op->flags |= XE_VMA_OP_FIRST;
2516                         op->num_syncs = num_syncs;
2517                         op->syncs = syncs;
2518                 }
2519
2520                 op->q = q;
2521
2522                 switch (op->base.op) {
2523                 case DRM_GPUVA_OP_MAP:
2524                 {
2525                         struct xe_vma *vma;
2526
2527                         vma = new_vma(vm, &op->base.map,
2528                                       op->tile_mask, op->map.read_only,
2529                                       op->map.is_null);
2530                         if (IS_ERR(vma)) {
2531                                 err = PTR_ERR(vma);
2532                                 goto free_fence;
2533                         }
2534
2535                         op->map.vma = vma;
2536                         break;
2537                 }
2538                 case DRM_GPUVA_OP_REMAP:
2539                 {
2540                         struct xe_vma *old =
2541                                 gpuva_to_vma(op->base.remap.unmap->va);
2542
2543                         op->remap.start = xe_vma_start(old);
2544                         op->remap.range = xe_vma_size(old);
2545
2546                         if (op->base.remap.prev) {
2547                                 struct xe_vma *vma;
2548                                 bool read_only =
2549                                         op->base.remap.unmap->va->flags &
2550                                         XE_VMA_READ_ONLY;
2551                                 bool is_null =
2552                                         op->base.remap.unmap->va->flags &
2553                                         DRM_GPUVA_SPARSE;
2554
2555                                 vma = new_vma(vm, op->base.remap.prev,
2556                                               op->tile_mask, read_only,
2557                                               is_null);
2558                                 if (IS_ERR(vma)) {
2559                                         err = PTR_ERR(vma);
2560                                         goto free_fence;
2561                                 }
2562
2563                                 op->remap.prev = vma;
2564
2565                                 /*
2566                                  * Userptr creates a new SG mapping so
2567                                  * we must also rebind.
2568                                  */
2569                                 op->remap.skip_prev = !xe_vma_is_userptr(old) &&
2570                                         IS_ALIGNED(xe_vma_end(vma),
2571                                                    xe_vma_max_pte_size(old));
2572                                 if (op->remap.skip_prev) {
2573                                         xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2574                                         op->remap.range -=
2575                                                 xe_vma_end(vma) -
2576                                                 xe_vma_start(old);
2577                                         op->remap.start = xe_vma_end(vma);
2578                                 }
2579                         }
2580
2581                         if (op->base.remap.next) {
2582                                 struct xe_vma *vma;
2583                                 bool read_only =
2584                                         op->base.remap.unmap->va->flags &
2585                                         XE_VMA_READ_ONLY;
2586
2587                                 bool is_null =
2588                                         op->base.remap.unmap->va->flags &
2589                                         DRM_GPUVA_SPARSE;
2590
2591                                 vma = new_vma(vm, op->base.remap.next,
2592                                               op->tile_mask, read_only,
2593                                               is_null);
2594                                 if (IS_ERR(vma)) {
2595                                         err = PTR_ERR(vma);
2596                                         goto free_fence;
2597                                 }
2598
2599                                 op->remap.next = vma;
2600
2601                                 /*
2602                                  * Userptr creates a new SG mapping so
2603                                  * we must also rebind.
2604                                  */
2605                                 op->remap.skip_next = !xe_vma_is_userptr(old) &&
2606                                         IS_ALIGNED(xe_vma_start(vma),
2607                                                    xe_vma_max_pte_size(old));
2608                                 if (op->remap.skip_next) {
2609                                         xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2610                                         op->remap.range -=
2611                                                 xe_vma_end(old) -
2612                                                 xe_vma_start(vma);
2613                                 }
2614                         }
2615                         break;
2616                 }
2617                 case DRM_GPUVA_OP_UNMAP:
2618                 case DRM_GPUVA_OP_PREFETCH:
2619                         /* Nothing to do */
2620                         break;
2621                 default:
2622                         XE_WARN_ON("NOT POSSIBLE");
2623                 }
2624
2625                 last_op = op;
2626
2627                 err = xe_vma_op_commit(vm, op);
2628                 if (err)
2629                         goto free_fence;
2630         }
2631
2632         /* FIXME: Unhandled corner case */
2633         XE_WARN_ON(!last_op && last && !list_empty(ops_list));
2634
2635         if (!last_op)
2636                 goto free_fence;
2637         last_op->ops = ops;
2638         if (last) {
2639                 last_op->flags |= XE_VMA_OP_LAST;
2640                 last_op->num_syncs = num_syncs;
2641                 last_op->syncs = syncs;
2642                 last_op->fence = fence;
2643         }
2644
2645         return 0;
2646
2647 free_fence:
2648         kfree(fence);
2649         return err;
2650 }
2651
2652 static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
2653                       struct xe_vma *vma, struct xe_vma_op *op)
2654 {
2655         int err;
2656
2657         lockdep_assert_held_write(&vm->lock);
2658
2659         err = xe_vm_prepare_vma(exec, vma, 1);
2660         if (err)
2661                 return err;
2662
2663         xe_vm_assert_held(vm);
2664         xe_bo_assert_held(xe_vma_bo(vma));
2665
2666         switch (op->base.op) {
2667         case DRM_GPUVA_OP_MAP:
2668                 err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
2669                                  op->syncs, op->num_syncs, op->fence,
2670                                  op->map.immediate || !xe_vm_in_fault_mode(vm),
2671                                  op->flags & XE_VMA_OP_FIRST,
2672                                  op->flags & XE_VMA_OP_LAST);
2673                 break;
2674         case DRM_GPUVA_OP_REMAP:
2675         {
2676                 bool prev = !!op->remap.prev;
2677                 bool next = !!op->remap.next;
2678
2679                 if (!op->remap.unmap_done) {
2680                         if (prev || next) {
2681                                 vm->async_ops.munmap_rebind_inflight = true;
2682                                 vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
2683                         }
2684                         err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2685                                            op->num_syncs,
2686                                            !prev && !next ? op->fence : NULL,
2687                                            op->flags & XE_VMA_OP_FIRST,
2688                                            op->flags & XE_VMA_OP_LAST && !prev &&
2689                                            !next);
2690                         if (err)
2691                                 break;
2692                         op->remap.unmap_done = true;
2693                 }
2694
2695                 if (prev) {
2696                         op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
2697                         err = xe_vm_bind(vm, op->remap.prev, op->q,
2698                                          xe_vma_bo(op->remap.prev), op->syncs,
2699                                          op->num_syncs,
2700                                          !next ? op->fence : NULL, true, false,
2701                                          op->flags & XE_VMA_OP_LAST && !next);
2702                         op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2703                         if (err)
2704                                 break;
2705                         op->remap.prev = NULL;
2706                 }
2707
2708                 if (next) {
2709                         op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
2710                         err = xe_vm_bind(vm, op->remap.next, op->q,
2711                                          xe_vma_bo(op->remap.next),
2712                                          op->syncs, op->num_syncs,
2713                                          op->fence, true, false,
2714                                          op->flags & XE_VMA_OP_LAST);
2715                         op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2716                         if (err)
2717                                 break;
2718                         op->remap.next = NULL;
2719                 }
2720                 vm->async_ops.munmap_rebind_inflight = false;
2721
2722                 break;
2723         }
2724         case DRM_GPUVA_OP_UNMAP:
2725                 err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2726                                    op->num_syncs, op->fence,
2727                                    op->flags & XE_VMA_OP_FIRST,
2728                                    op->flags & XE_VMA_OP_LAST);
2729                 break;
2730         case DRM_GPUVA_OP_PREFETCH:
2731                 err = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region,
2732                                      op->syncs, op->num_syncs, op->fence,
2733                                      op->flags & XE_VMA_OP_FIRST,
2734                                      op->flags & XE_VMA_OP_LAST);
2735                 break;
2736         default:
2737                 XE_WARN_ON("NOT POSSIBLE");
2738         }
2739
2740         if (err)
2741                 trace_xe_vma_fail(vma);
2742
2743         return err;
2744 }
2745
2746 static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
2747                                struct xe_vma_op *op)
2748 {
2749         struct drm_exec exec;
2750         int err;
2751
2752 retry_userptr:
2753         drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
2754         drm_exec_until_all_locked(&exec) {
2755                 err = op_execute(&exec, vm, vma, op);
2756                 drm_exec_retry_on_contention(&exec);
2757                 if (err)
2758                         break;
2759         }
2760         drm_exec_fini(&exec);
2761
2762         if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
2763                 lockdep_assert_held_write(&vm->lock);
2764                 err = xe_vma_userptr_pin_pages(vma);
2765                 if (!err)
2766                         goto retry_userptr;
2767
2768                 trace_xe_vma_fail(vma);
2769         }
2770
2771         return err;
2772 }
2773
2774 static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
2775 {
2776         int ret = 0;
2777
2778         lockdep_assert_held_write(&vm->lock);
2779
2780 #ifdef TEST_VM_ASYNC_OPS_ERROR
2781         if (op->inject_error) {
2782                 op->inject_error = false;
2783                 return -ENOMEM;
2784         }
2785 #endif
2786
2787         switch (op->base.op) {
2788         case DRM_GPUVA_OP_MAP:
2789                 ret = __xe_vma_op_execute(vm, op->map.vma, op);
2790                 break;
2791         case DRM_GPUVA_OP_REMAP:
2792         {
2793                 struct xe_vma *vma;
2794
2795                 if (!op->remap.unmap_done)
2796                         vma = gpuva_to_vma(op->base.remap.unmap->va);
2797                 else if (op->remap.prev)
2798                         vma = op->remap.prev;
2799                 else
2800                         vma = op->remap.next;
2801
2802                 ret = __xe_vma_op_execute(vm, vma, op);
2803                 break;
2804         }
2805         case DRM_GPUVA_OP_UNMAP:
2806                 ret = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
2807                                           op);
2808                 break;
2809         case DRM_GPUVA_OP_PREFETCH:
2810                 ret = __xe_vma_op_execute(vm,
2811                                           gpuva_to_vma(op->base.prefetch.va),
2812                                           op);
2813                 break;
2814         default:
2815                 XE_WARN_ON("NOT POSSIBLE");
2816         }
2817
2818         return ret;
2819 }
2820
2821 static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op)
2822 {
2823         bool last = op->flags & XE_VMA_OP_LAST;
2824
2825         if (last) {
2826                 while (op->num_syncs--)
2827                         xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
2828                 kfree(op->syncs);
2829                 if (op->q)
2830                         xe_exec_queue_put(op->q);
2831                 if (op->fence)
2832                         dma_fence_put(&op->fence->fence);
2833         }
2834         if (!list_empty(&op->link)) {
2835                 spin_lock_irq(&vm->async_ops.lock);
2836                 list_del(&op->link);
2837                 spin_unlock_irq(&vm->async_ops.lock);
2838         }
2839         if (op->ops)
2840                 drm_gpuva_ops_free(&vm->gpuvm, op->ops);
2841         if (last)
2842                 xe_vm_put(vm);
2843 }
2844
2845 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
2846                              bool post_commit, bool prev_post_commit,
2847                              bool next_post_commit)
2848 {
2849         lockdep_assert_held_write(&vm->lock);
2850
2851         switch (op->base.op) {
2852         case DRM_GPUVA_OP_MAP:
2853                 if (op->map.vma) {
2854                         prep_vma_destroy(vm, op->map.vma, post_commit);
2855                         xe_vma_destroy_unlocked(op->map.vma);
2856                 }
2857                 break;
2858         case DRM_GPUVA_OP_UNMAP:
2859         {
2860                 struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2861
2862                 if (vma) {
2863                         down_read(&vm->userptr.notifier_lock);
2864                         vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2865                         up_read(&vm->userptr.notifier_lock);
2866                         if (post_commit)
2867                                 xe_vm_insert_vma(vm, vma);
2868                 }
2869                 break;
2870         }
2871         case DRM_GPUVA_OP_REMAP:
2872         {
2873                 struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
2874
2875                 if (op->remap.prev) {
2876                         prep_vma_destroy(vm, op->remap.prev, prev_post_commit);
2877                         xe_vma_destroy_unlocked(op->remap.prev);
2878                 }
2879                 if (op->remap.next) {
2880                         prep_vma_destroy(vm, op->remap.next, next_post_commit);
2881                         xe_vma_destroy_unlocked(op->remap.next);
2882                 }
2883                 if (vma) {
2884                         down_read(&vm->userptr.notifier_lock);
2885                         vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2886                         up_read(&vm->userptr.notifier_lock);
2887                         if (post_commit)
2888                                 xe_vm_insert_vma(vm, vma);
2889                 }
2890                 break;
2891         }
2892         case DRM_GPUVA_OP_PREFETCH:
2893                 /* Nothing to do */
2894                 break;
2895         default:
2896                 XE_WARN_ON("NOT POSSIBLE");
2897         }
2898 }
2899
2900 static struct xe_vma_op *next_vma_op(struct xe_vm *vm)
2901 {
2902         return list_first_entry_or_null(&vm->async_ops.pending,
2903                                         struct xe_vma_op, link);
2904 }
2905
2906 static void xe_vma_op_work_func(struct work_struct *w)
2907 {
2908         struct xe_vm *vm = container_of(w, struct xe_vm, async_ops.work);
2909
2910         for (;;) {
2911                 struct xe_vma_op *op;
2912                 int err;
2913
2914                 if (vm->async_ops.error && !xe_vm_is_closed(vm))
2915                         break;
2916
2917                 spin_lock_irq(&vm->async_ops.lock);
2918                 op = next_vma_op(vm);
2919                 spin_unlock_irq(&vm->async_ops.lock);
2920
2921                 if (!op)
2922                         break;
2923
2924                 if (!xe_vm_is_closed(vm)) {
2925                         down_write(&vm->lock);
2926                         err = xe_vma_op_execute(vm, op);
2927                         if (err) {
2928                                 drm_warn(&vm->xe->drm,
2929                                          "Async VM op(%d) failed with %d",
2930                                          op->base.op, err);
2931                                 vm_set_async_error(vm, err);
2932                                 up_write(&vm->lock);
2933
2934                                 if (vm->async_ops.error_capture.addr)
2935                                         vm_error_capture(vm, err, 0, 0, 0);
2936                                 break;
2937                         }
2938                         up_write(&vm->lock);
2939                 } else {
2940                         struct xe_vma *vma;
2941
2942                         switch (op->base.op) {
2943                         case DRM_GPUVA_OP_REMAP:
2944                                 vma = gpuva_to_vma(op->base.remap.unmap->va);
2945                                 trace_xe_vma_flush(vma);
2946
2947                                 down_write(&vm->lock);
2948                                 xe_vma_destroy_unlocked(vma);
2949                                 up_write(&vm->lock);
2950                                 break;
2951                         case DRM_GPUVA_OP_UNMAP:
2952                                 vma = gpuva_to_vma(op->base.unmap.va);
2953                                 trace_xe_vma_flush(vma);
2954
2955                                 down_write(&vm->lock);
2956                                 xe_vma_destroy_unlocked(vma);
2957                                 up_write(&vm->lock);
2958                                 break;
2959                         default:
2960                                 /* Nothing to do */
2961                                 break;
2962                         }
2963
2964                         if (op->fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
2965                                                    &op->fence->fence.flags)) {
2966                                 if (!xe_vm_no_dma_fences(vm)) {
2967                                         op->fence->started = true;
2968                                         wake_up_all(&op->fence->wq);
2969                                 }
2970                                 dma_fence_signal(&op->fence->fence);
2971                         }
2972                 }
2973
2974                 xe_vma_op_cleanup(vm, op);
2975         }
2976 }
2977
2978 static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
2979                                      struct list_head *ops_list, bool async)
2980 {
2981         struct xe_vma_op *op, *last_op, *next;
2982         int err;
2983
2984         lockdep_assert_held_write(&vm->lock);
2985
2986         list_for_each_entry(op, ops_list, link)
2987                 last_op = op;
2988
2989         if (!async) {
2990                 err = xe_vma_op_execute(vm, last_op);
2991                 if (err)
2992                         goto unwind;
2993                 xe_vma_op_cleanup(vm, last_op);
2994         } else {
2995                 int i;
2996                 bool installed = false;
2997
2998                 for (i = 0; i < last_op->num_syncs; i++)
2999                         installed |= xe_sync_entry_signal(&last_op->syncs[i],
3000                                                           NULL,
3001                                                           &last_op->fence->fence);
3002                 if (!installed && last_op->fence)
3003                         dma_fence_signal(&last_op->fence->fence);
3004
3005                 spin_lock_irq(&vm->async_ops.lock);
3006                 list_splice_tail(ops_list, &vm->async_ops.pending);
3007                 spin_unlock_irq(&vm->async_ops.lock);
3008
3009                 if (!vm->async_ops.error)
3010                         queue_work(system_unbound_wq, &vm->async_ops.work);
3011         }
3012
3013         return 0;
3014
3015 unwind:
3016         list_for_each_entry_reverse(op, ops_list, link)
3017                 xe_vma_op_unwind(vm, op, op->flags & XE_VMA_OP_COMMITTED,
3018                                  op->flags & XE_VMA_OP_PREV_COMMITTED,
3019                                  op->flags & XE_VMA_OP_NEXT_COMMITTED);
3020         list_for_each_entry_safe(op, next, ops_list, link)
3021                 xe_vma_op_cleanup(vm, op);
3022
3023         return err;
3024 }
3025
3026 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
3027                                      struct drm_gpuva_ops **ops,
3028                                      int num_ops_list)
3029 {
3030         int i;
3031
3032         for (i = num_ops_list - 1; i; ++i) {
3033                 struct drm_gpuva_ops *__ops = ops[i];
3034                 struct drm_gpuva_op *__op;
3035
3036                 if (!__ops)
3037                         continue;
3038
3039                 drm_gpuva_for_each_op_reverse(__op, __ops) {
3040                         struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
3041
3042                         xe_vma_op_unwind(vm, op,
3043                                          op->flags & XE_VMA_OP_COMMITTED,
3044                                          op->flags & XE_VMA_OP_PREV_COMMITTED,
3045                                          op->flags & XE_VMA_OP_NEXT_COMMITTED);
3046                 }
3047
3048                 drm_gpuva_ops_free(&vm->gpuvm, __ops);
3049         }
3050 }
3051
3052 #ifdef TEST_VM_ASYNC_OPS_ERROR
3053 #define SUPPORTED_FLAGS \
3054         (FORCE_ASYNC_OP_ERROR | XE_VM_BIND_FLAG_ASYNC | \
3055          XE_VM_BIND_FLAG_READONLY | XE_VM_BIND_FLAG_IMMEDIATE | \
3056          XE_VM_BIND_FLAG_NULL | 0xffff)
3057 #else
3058 #define SUPPORTED_FLAGS \
3059         (XE_VM_BIND_FLAG_ASYNC | XE_VM_BIND_FLAG_READONLY | \
3060          XE_VM_BIND_FLAG_IMMEDIATE | XE_VM_BIND_FLAG_NULL | 0xffff)
3061 #endif
3062 #define XE_64K_PAGE_MASK 0xffffull
3063
3064 #define MAX_BINDS       512     /* FIXME: Picking random upper limit */
3065
3066 static int vm_bind_ioctl_check_args(struct xe_device *xe,
3067                                     struct drm_xe_vm_bind *args,
3068                                     struct drm_xe_vm_bind_op **bind_ops,
3069                                     bool *async)
3070 {
3071         int err;
3072         int i;
3073
3074         if (XE_IOCTL_DBG(xe, args->extensions) ||
3075             XE_IOCTL_DBG(xe, !args->num_binds) ||
3076             XE_IOCTL_DBG(xe, args->num_binds > MAX_BINDS))
3077                 return -EINVAL;
3078
3079         if (args->num_binds > 1) {
3080                 u64 __user *bind_user =
3081                         u64_to_user_ptr(args->vector_of_binds);
3082
3083                 *bind_ops = kmalloc(sizeof(struct drm_xe_vm_bind_op) *
3084                                     args->num_binds, GFP_KERNEL);
3085                 if (!*bind_ops)
3086                         return -ENOMEM;
3087
3088                 err = __copy_from_user(*bind_ops, bind_user,
3089                                        sizeof(struct drm_xe_vm_bind_op) *
3090                                        args->num_binds);
3091                 if (XE_IOCTL_DBG(xe, err)) {
3092                         err = -EFAULT;
3093                         goto free_bind_ops;
3094                 }
3095         } else {
3096                 *bind_ops = &args->bind;
3097         }
3098
3099         for (i = 0; i < args->num_binds; ++i) {
3100                 u64 range = (*bind_ops)[i].range;
3101                 u64 addr = (*bind_ops)[i].addr;
3102                 u32 op = (*bind_ops)[i].op;
3103                 u32 obj = (*bind_ops)[i].obj;
3104                 u64 obj_offset = (*bind_ops)[i].obj_offset;
3105                 u32 region = (*bind_ops)[i].region;
3106                 bool is_null = op & XE_VM_BIND_FLAG_NULL;
3107
3108                 if (i == 0) {
3109                         *async = !!(op & XE_VM_BIND_FLAG_ASYNC);
3110                 } else if (XE_IOCTL_DBG(xe, !*async) ||
3111                            XE_IOCTL_DBG(xe, !(op & XE_VM_BIND_FLAG_ASYNC)) ||
3112                            XE_IOCTL_DBG(xe, VM_BIND_OP(op) ==
3113                                         XE_VM_BIND_OP_RESTART)) {
3114                         err = -EINVAL;
3115                         goto free_bind_ops;
3116                 }
3117
3118                 if (XE_IOCTL_DBG(xe, !*async &&
3119                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL)) {
3120                         err = -EINVAL;
3121                         goto free_bind_ops;
3122                 }
3123
3124                 if (XE_IOCTL_DBG(xe, !*async &&
3125                                  VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH)) {
3126                         err = -EINVAL;
3127                         goto free_bind_ops;
3128                 }
3129
3130                 if (XE_IOCTL_DBG(xe, VM_BIND_OP(op) >
3131                                  XE_VM_BIND_OP_PREFETCH) ||
3132                     XE_IOCTL_DBG(xe, op & ~SUPPORTED_FLAGS) ||
3133                     XE_IOCTL_DBG(xe, obj && is_null) ||
3134                     XE_IOCTL_DBG(xe, obj_offset && is_null) ||
3135                     XE_IOCTL_DBG(xe, VM_BIND_OP(op) != XE_VM_BIND_OP_MAP &&
3136                                  is_null) ||
3137                     XE_IOCTL_DBG(xe, !obj &&
3138                                  VM_BIND_OP(op) == XE_VM_BIND_OP_MAP &&
3139                                  !is_null) ||
3140                     XE_IOCTL_DBG(xe, !obj &&
3141                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
3142                     XE_IOCTL_DBG(xe, addr &&
3143                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
3144                     XE_IOCTL_DBG(xe, range &&
3145                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
3146                     XE_IOCTL_DBG(xe, obj &&
3147                                  VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR) ||
3148                     XE_IOCTL_DBG(xe, obj &&
3149                                  VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH) ||
3150                     XE_IOCTL_DBG(xe, region &&
3151                                  VM_BIND_OP(op) != XE_VM_BIND_OP_PREFETCH) ||
3152                     XE_IOCTL_DBG(xe, !(BIT(region) &
3153                                        xe->info.mem_region_mask)) ||
3154                     XE_IOCTL_DBG(xe, obj &&
3155                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP)) {
3156                         err = -EINVAL;
3157                         goto free_bind_ops;
3158                 }
3159
3160                 if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
3161                     XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
3162                     XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
3163                     XE_IOCTL_DBG(xe, !range && VM_BIND_OP(op) !=
3164                                  XE_VM_BIND_OP_RESTART &&
3165                                  VM_BIND_OP(op) != XE_VM_BIND_OP_UNMAP_ALL)) {
3166                         err = -EINVAL;
3167                         goto free_bind_ops;
3168                 }
3169         }
3170
3171         return 0;
3172
3173 free_bind_ops:
3174         if (args->num_binds > 1)
3175                 kfree(*bind_ops);
3176         return err;
3177 }
3178
3179 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3180 {
3181         struct xe_device *xe = to_xe_device(dev);
3182         struct xe_file *xef = to_xe_file(file);
3183         struct drm_xe_vm_bind *args = data;
3184         struct drm_xe_sync __user *syncs_user;
3185         struct xe_bo **bos = NULL;
3186         struct drm_gpuva_ops **ops = NULL;
3187         struct xe_vm *vm;
3188         struct xe_exec_queue *q = NULL;
3189         u32 num_syncs;
3190         struct xe_sync_entry *syncs = NULL;
3191         struct drm_xe_vm_bind_op *bind_ops;
3192         LIST_HEAD(ops_list);
3193         bool async;
3194         int err;
3195         int i;
3196
3197         err = vm_bind_ioctl_check_args(xe, args, &bind_ops, &async);
3198         if (err)
3199                 return err;
3200
3201         if (args->exec_queue_id) {
3202                 q = xe_exec_queue_lookup(xef, args->exec_queue_id);
3203                 if (XE_IOCTL_DBG(xe, !q)) {
3204                         err = -ENOENT;
3205                         goto free_objs;
3206                 }
3207
3208                 if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
3209                         err = -EINVAL;
3210                         goto put_exec_queue;
3211                 }
3212         }
3213
3214         vm = xe_vm_lookup(xef, args->vm_id);
3215         if (XE_IOCTL_DBG(xe, !vm)) {
3216                 err = -EINVAL;
3217                 goto put_exec_queue;
3218         }
3219
3220         err = down_write_killable(&vm->lock);
3221         if (err)
3222                 goto put_vm;
3223
3224         if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
3225                 err = -ENOENT;
3226                 goto release_vm_lock;
3227         }
3228
3229         if (VM_BIND_OP(bind_ops[0].op) == XE_VM_BIND_OP_RESTART) {
3230                 if (XE_IOCTL_DBG(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
3231                         err = -EOPNOTSUPP;
3232                 if (XE_IOCTL_DBG(xe, !err && args->num_syncs))
3233                         err = EINVAL;
3234                 if (XE_IOCTL_DBG(xe, !err && !vm->async_ops.error))
3235                         err = -EPROTO;
3236
3237                 if (!err) {
3238                         trace_xe_vm_restart(vm);
3239                         vm_set_async_error(vm, 0);
3240
3241                         queue_work(system_unbound_wq, &vm->async_ops.work);
3242
3243                         /* Rebinds may have been blocked, give worker a kick */
3244                         if (xe_vm_in_compute_mode(vm))
3245                                 xe_vm_queue_rebind_worker(vm);
3246                 }
3247
3248                 goto release_vm_lock;
3249         }
3250
3251         if (XE_IOCTL_DBG(xe, !vm->async_ops.error &&
3252                          async != !!(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS))) {
3253                 err = -EOPNOTSUPP;
3254                 goto release_vm_lock;
3255         }
3256
3257         for (i = 0; i < args->num_binds; ++i) {
3258                 u64 range = bind_ops[i].range;
3259                 u64 addr = bind_ops[i].addr;
3260
3261                 if (XE_IOCTL_DBG(xe, range > vm->size) ||
3262                     XE_IOCTL_DBG(xe, addr > vm->size - range)) {
3263                         err = -EINVAL;
3264                         goto release_vm_lock;
3265                 }
3266
3267                 if (bind_ops[i].tile_mask) {
3268                         u64 valid_tiles = BIT(xe->info.tile_count) - 1;
3269
3270                         if (XE_IOCTL_DBG(xe, bind_ops[i].tile_mask &
3271                                          ~valid_tiles)) {
3272                                 err = -EINVAL;
3273                                 goto release_vm_lock;
3274                         }
3275                 }
3276         }
3277
3278         bos = kzalloc(sizeof(*bos) * args->num_binds, GFP_KERNEL);
3279         if (!bos) {
3280                 err = -ENOMEM;
3281                 goto release_vm_lock;
3282         }
3283
3284         ops = kzalloc(sizeof(*ops) * args->num_binds, GFP_KERNEL);
3285         if (!ops) {
3286                 err = -ENOMEM;
3287                 goto release_vm_lock;
3288         }
3289
3290         for (i = 0; i < args->num_binds; ++i) {
3291                 struct drm_gem_object *gem_obj;
3292                 u64 range = bind_ops[i].range;
3293                 u64 addr = bind_ops[i].addr;
3294                 u32 obj = bind_ops[i].obj;
3295                 u64 obj_offset = bind_ops[i].obj_offset;
3296
3297                 if (!obj)
3298                         continue;
3299
3300                 gem_obj = drm_gem_object_lookup(file, obj);
3301                 if (XE_IOCTL_DBG(xe, !gem_obj)) {
3302                         err = -ENOENT;
3303                         goto put_obj;
3304                 }
3305                 bos[i] = gem_to_xe_bo(gem_obj);
3306
3307                 if (XE_IOCTL_DBG(xe, range > bos[i]->size) ||
3308                     XE_IOCTL_DBG(xe, obj_offset >
3309                                  bos[i]->size - range)) {
3310                         err = -EINVAL;
3311                         goto put_obj;
3312                 }
3313
3314                 if (bos[i]->flags & XE_BO_INTERNAL_64K) {
3315                         if (XE_IOCTL_DBG(xe, obj_offset &
3316                                          XE_64K_PAGE_MASK) ||
3317                             XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
3318                             XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
3319                                 err = -EINVAL;
3320                                 goto put_obj;
3321                         }
3322                 }
3323         }
3324
3325         if (args->num_syncs) {
3326                 syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3327                 if (!syncs) {
3328                         err = -ENOMEM;
3329                         goto put_obj;
3330                 }
3331         }
3332
3333         syncs_user = u64_to_user_ptr(args->syncs);
3334         for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3335                 err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3336                                           &syncs_user[num_syncs], false,
3337                                           xe_vm_no_dma_fences(vm));
3338                 if (err)
3339                         goto free_syncs;
3340         }
3341
3342         /* Do some error checking first to make the unwind easier */
3343         for (i = 0; i < args->num_binds; ++i) {
3344                 u64 range = bind_ops[i].range;
3345                 u64 addr = bind_ops[i].addr;
3346                 u32 op = bind_ops[i].op;
3347
3348                 err = vm_bind_ioctl_lookup_vma(vm, bos[i], addr, range, op);
3349                 if (err)
3350                         goto free_syncs;
3351         }
3352
3353         for (i = 0; i < args->num_binds; ++i) {
3354                 u64 range = bind_ops[i].range;
3355                 u64 addr = bind_ops[i].addr;
3356                 u32 op = bind_ops[i].op;
3357                 u64 obj_offset = bind_ops[i].obj_offset;
3358                 u8 tile_mask = bind_ops[i].tile_mask;
3359                 u32 region = bind_ops[i].region;
3360
3361                 ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
3362                                                   addr, range, op, tile_mask,
3363                                                   region);
3364                 if (IS_ERR(ops[i])) {
3365                         err = PTR_ERR(ops[i]);
3366                         ops[i] = NULL;
3367                         goto unwind_ops;
3368                 }
3369
3370                 err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
3371                                               &ops_list,
3372                                               i == args->num_binds - 1,
3373                                               async);
3374                 if (err)
3375                         goto unwind_ops;
3376         }
3377
3378         /* Nothing to do */
3379         if (list_empty(&ops_list)) {
3380                 err = -ENODATA;
3381                 goto unwind_ops;
3382         }
3383
3384         err = vm_bind_ioctl_ops_execute(vm, &ops_list, async);
3385         up_write(&vm->lock);
3386
3387         for (i = 0; i < args->num_binds; ++i)
3388                 xe_bo_put(bos[i]);
3389
3390         kfree(bos);
3391         kfree(ops);
3392         if (args->num_binds > 1)
3393                 kfree(bind_ops);
3394
3395         return err;
3396
3397 unwind_ops:
3398         vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
3399 free_syncs:
3400         for (i = 0; err == -ENODATA && i < num_syncs; i++)
3401                 xe_sync_entry_signal(&syncs[i], NULL, dma_fence_get_stub());
3402         while (num_syncs--)
3403                 xe_sync_entry_cleanup(&syncs[num_syncs]);
3404
3405         kfree(syncs);
3406 put_obj:
3407         for (i = 0; i < args->num_binds; ++i)
3408                 xe_bo_put(bos[i]);
3409 release_vm_lock:
3410         up_write(&vm->lock);
3411 put_vm:
3412         xe_vm_put(vm);
3413 put_exec_queue:
3414         if (q)
3415                 xe_exec_queue_put(q);
3416 free_objs:
3417         kfree(bos);
3418         kfree(ops);
3419         if (args->num_binds > 1)
3420                 kfree(bind_ops);
3421         return err == -ENODATA ? 0 : err;
3422 }
3423
3424 /**
3425  * xe_vm_lock() - Lock the vm's dma_resv object
3426  * @vm: The struct xe_vm whose lock is to be locked
3427  * @intr: Whether to perform any wait interruptible
3428  *
3429  * Return: 0 on success, -EINTR if @intr is true and the wait for a
3430  * contended lock was interrupted. If @intr is false, the function
3431  * always returns 0.
3432  */
3433 int xe_vm_lock(struct xe_vm *vm, bool intr)
3434 {
3435         if (intr)
3436                 return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
3437
3438         return dma_resv_lock(xe_vm_resv(vm), NULL);
3439 }
3440
3441 /**
3442  * xe_vm_unlock() - Unlock the vm's dma_resv object
3443  * @vm: The struct xe_vm whose lock is to be released.
3444  *
3445  * Unlock a buffer object lock that was locked by xe_vm_lock().
3446  */
3447 void xe_vm_unlock(struct xe_vm *vm)
3448 {
3449         dma_resv_unlock(xe_vm_resv(vm));
3450 }
3451
3452 /**
3453  * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3454  * @vma: VMA to invalidate
3455  *
3456  * Walks a list of page tables leaves which it memset the entries owned by this
3457  * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3458  * complete.
3459  *
3460  * Returns 0 for success, negative error code otherwise.
3461  */
3462 int xe_vm_invalidate_vma(struct xe_vma *vma)
3463 {
3464         struct xe_device *xe = xe_vma_vm(vma)->xe;
3465         struct xe_tile *tile;
3466         u32 tile_needs_invalidate = 0;
3467         int seqno[XE_MAX_TILES_PER_DEVICE];
3468         u8 id;
3469         int ret;
3470
3471         XE_WARN_ON(!xe_vm_in_fault_mode(xe_vma_vm(vma)));
3472         XE_WARN_ON(xe_vma_is_null(vma));
3473         trace_xe_vma_usm_invalidate(vma);
3474
3475         /* Check that we don't race with page-table updates */
3476         if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3477                 if (xe_vma_is_userptr(vma)) {
3478                         WARN_ON_ONCE(!mmu_interval_check_retry
3479                                      (&vma->userptr.notifier,
3480                                       vma->userptr.notifier_seq));
3481                         WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
3482                                                              DMA_RESV_USAGE_BOOKKEEP));
3483
3484                 } else {
3485                         xe_bo_assert_held(xe_vma_bo(vma));
3486                 }
3487         }
3488
3489         for_each_tile(tile, xe, id) {
3490                 if (xe_pt_zap_ptes(tile, vma)) {
3491                         tile_needs_invalidate |= BIT(id);
3492                         xe_device_wmb(xe);
3493                         /*
3494                          * FIXME: We potentially need to invalidate multiple
3495                          * GTs within the tile
3496                          */
3497                         seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
3498                         if (seqno[id] < 0)
3499                                 return seqno[id];
3500                 }
3501         }
3502
3503         for_each_tile(tile, xe, id) {
3504                 if (tile_needs_invalidate & BIT(id)) {
3505                         ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
3506                         if (ret < 0)
3507                                 return ret;
3508                 }
3509         }
3510
3511         vma->usm.tile_invalidated = vma->tile_mask;
3512
3513         return 0;
3514 }
3515
3516 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
3517 {
3518         struct drm_gpuva *gpuva;
3519         bool is_vram;
3520         uint64_t addr;
3521
3522         if (!down_read_trylock(&vm->lock)) {
3523                 drm_printf(p, " Failed to acquire VM lock to dump capture");
3524                 return 0;
3525         }
3526         if (vm->pt_root[gt_id]) {
3527                 addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE);
3528                 is_vram = xe_bo_is_vram(vm->pt_root[gt_id]->bo);
3529                 drm_printf(p, " VM root: A:0x%llx %s\n", addr,
3530                            is_vram ? "VRAM" : "SYS");
3531         }
3532
3533         drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3534                 struct xe_vma *vma = gpuva_to_vma(gpuva);
3535                 bool is_userptr = xe_vma_is_userptr(vma);
3536                 bool is_null = xe_vma_is_null(vma);
3537
3538                 if (is_null) {
3539                         addr = 0;
3540                 } else if (is_userptr) {
3541                         struct xe_res_cursor cur;
3542
3543                         if (vma->userptr.sg) {
3544                                 xe_res_first_sg(vma->userptr.sg, 0, XE_PAGE_SIZE,
3545                                                 &cur);
3546                                 addr = xe_res_dma(&cur);
3547                         } else {
3548                                 addr = 0;
3549                         }
3550                 } else {
3551                         addr = __xe_bo_addr(xe_vma_bo(vma), 0, XE_PAGE_SIZE);
3552                         is_vram = xe_bo_is_vram(xe_vma_bo(vma));
3553                 }
3554                 drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
3555                            xe_vma_start(vma), xe_vma_end(vma) - 1,
3556                            xe_vma_size(vma),
3557                            addr, is_null ? "NULL" : is_userptr ? "USR" :
3558                            is_vram ? "VRAM" : "SYS");
3559         }
3560         up_read(&vm->lock);
3561
3562         return 0;
3563 }