80b374b9cdd1e2ac16c02365277a773a3139da78
[linux-2.6-microblaze.git] / drivers / gpu / drm / xe / xe_vm.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5
6 #include "xe_vm.h"
7
8 #include <linux/dma-fence-array.h>
9
10 #include <drm/drm_exec.h>
11 #include <drm/drm_print.h>
12 #include <drm/ttm/ttm_execbuf_util.h>
13 #include <drm/ttm/ttm_tt.h>
14 #include <drm/xe_drm.h>
15 #include <linux/delay.h>
16 #include <linux/kthread.h>
17 #include <linux/mm.h>
18 #include <linux/swap.h>
19
20 #include "xe_bo.h"
21 #include "xe_device.h"
22 #include "xe_exec_queue.h"
23 #include "xe_gt.h"
24 #include "xe_gt_pagefault.h"
25 #include "xe_gt_tlb_invalidation.h"
26 #include "xe_migrate.h"
27 #include "xe_pm.h"
28 #include "xe_preempt_fence.h"
29 #include "xe_pt.h"
30 #include "xe_res_cursor.h"
31 #include "xe_sync.h"
32 #include "xe_trace.h"
33 #include "generated/xe_wa_oob.h"
34 #include "xe_wa.h"
35
36 #define TEST_VM_ASYNC_OPS_ERROR
37
38 /**
39  * xe_vma_userptr_check_repin() - Advisory check for repin needed
40  * @vma: The userptr vma
41  *
42  * Check if the userptr vma has been invalidated since last successful
43  * repin. The check is advisory only and can the function can be called
44  * without the vm->userptr.notifier_lock held. There is no guarantee that the
45  * vma userptr will remain valid after a lockless check, so typically
46  * the call needs to be followed by a proper check under the notifier_lock.
47  *
48  * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
49  */
50 int xe_vma_userptr_check_repin(struct xe_vma *vma)
51 {
52         return mmu_interval_check_retry(&vma->userptr.notifier,
53                                         vma->userptr.notifier_seq) ?
54                 -EAGAIN : 0;
55 }
56
57 int xe_vma_userptr_pin_pages(struct xe_vma *vma)
58 {
59         struct xe_vm *vm = xe_vma_vm(vma);
60         struct xe_device *xe = vm->xe;
61         const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT;
62         struct page **pages;
63         bool in_kthread = !current->mm;
64         unsigned long notifier_seq;
65         int pinned, ret, i;
66         bool read_only = xe_vma_read_only(vma);
67
68         lockdep_assert_held(&vm->lock);
69         XE_WARN_ON(!xe_vma_is_userptr(vma));
70 retry:
71         if (vma->gpuva.flags & XE_VMA_DESTROYED)
72                 return 0;
73
74         notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier);
75         if (notifier_seq == vma->userptr.notifier_seq)
76                 return 0;
77
78         pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
79         if (!pages)
80                 return -ENOMEM;
81
82         if (vma->userptr.sg) {
83                 dma_unmap_sgtable(xe->drm.dev,
84                                   vma->userptr.sg,
85                                   read_only ? DMA_TO_DEVICE :
86                                   DMA_BIDIRECTIONAL, 0);
87                 sg_free_table(vma->userptr.sg);
88                 vma->userptr.sg = NULL;
89         }
90
91         pinned = ret = 0;
92         if (in_kthread) {
93                 if (!mmget_not_zero(vma->userptr.notifier.mm)) {
94                         ret = -EFAULT;
95                         goto mm_closed;
96                 }
97                 kthread_use_mm(vma->userptr.notifier.mm);
98         }
99
100         while (pinned < num_pages) {
101                 ret = get_user_pages_fast(xe_vma_userptr(vma) +
102                                           pinned * PAGE_SIZE,
103                                           num_pages - pinned,
104                                           read_only ? 0 : FOLL_WRITE,
105                                           &pages[pinned]);
106                 if (ret < 0) {
107                         if (in_kthread)
108                                 ret = 0;
109                         break;
110                 }
111
112                 pinned += ret;
113                 ret = 0;
114         }
115
116         if (in_kthread) {
117                 kthread_unuse_mm(vma->userptr.notifier.mm);
118                 mmput(vma->userptr.notifier.mm);
119         }
120 mm_closed:
121         if (ret)
122                 goto out;
123
124         ret = sg_alloc_table_from_pages_segment(&vma->userptr.sgt, pages,
125                                                 pinned, 0,
126                                                 (u64)pinned << PAGE_SHIFT,
127                                                 xe_sg_segment_size(xe->drm.dev),
128                                                 GFP_KERNEL);
129         if (ret) {
130                 vma->userptr.sg = NULL;
131                 goto out;
132         }
133         vma->userptr.sg = &vma->userptr.sgt;
134
135         ret = dma_map_sgtable(xe->drm.dev, vma->userptr.sg,
136                               read_only ? DMA_TO_DEVICE :
137                               DMA_BIDIRECTIONAL,
138                               DMA_ATTR_SKIP_CPU_SYNC |
139                               DMA_ATTR_NO_KERNEL_MAPPING);
140         if (ret) {
141                 sg_free_table(vma->userptr.sg);
142                 vma->userptr.sg = NULL;
143                 goto out;
144         }
145
146         for (i = 0; i < pinned; ++i) {
147                 if (!read_only) {
148                         lock_page(pages[i]);
149                         set_page_dirty(pages[i]);
150                         unlock_page(pages[i]);
151                 }
152
153                 mark_page_accessed(pages[i]);
154         }
155
156 out:
157         release_pages(pages, pinned);
158         kvfree(pages);
159
160         if (!(ret < 0)) {
161                 vma->userptr.notifier_seq = notifier_seq;
162                 if (xe_vma_userptr_check_repin(vma) == -EAGAIN)
163                         goto retry;
164         }
165
166         return ret < 0 ? ret : 0;
167 }
168
169 static bool preempt_fences_waiting(struct xe_vm *vm)
170 {
171         struct xe_exec_queue *q;
172
173         lockdep_assert_held(&vm->lock);
174         xe_vm_assert_held(vm);
175
176         list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
177                 if (!q->compute.pfence ||
178                     (q->compute.pfence && test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
179                                                    &q->compute.pfence->flags))) {
180                         return true;
181                 }
182         }
183
184         return false;
185 }
186
187 static void free_preempt_fences(struct list_head *list)
188 {
189         struct list_head *link, *next;
190
191         list_for_each_safe(link, next, list)
192                 xe_preempt_fence_free(to_preempt_fence_from_link(link));
193 }
194
195 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
196                                 unsigned int *count)
197 {
198         lockdep_assert_held(&vm->lock);
199         xe_vm_assert_held(vm);
200
201         if (*count >= vm->preempt.num_exec_queues)
202                 return 0;
203
204         for (; *count < vm->preempt.num_exec_queues; ++(*count)) {
205                 struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
206
207                 if (IS_ERR(pfence))
208                         return PTR_ERR(pfence);
209
210                 list_move_tail(xe_preempt_fence_link(pfence), list);
211         }
212
213         return 0;
214 }
215
216 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
217 {
218         struct xe_exec_queue *q;
219
220         xe_vm_assert_held(vm);
221
222         list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
223                 if (q->compute.pfence) {
224                         long timeout = dma_fence_wait(q->compute.pfence, false);
225
226                         if (timeout < 0)
227                                 return -ETIME;
228                         dma_fence_put(q->compute.pfence);
229                         q->compute.pfence = NULL;
230                 }
231         }
232
233         return 0;
234 }
235
236 static bool xe_vm_is_idle(struct xe_vm *vm)
237 {
238         struct xe_exec_queue *q;
239
240         xe_vm_assert_held(vm);
241         list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
242                 if (!xe_exec_queue_is_idle(q))
243                         return false;
244         }
245
246         return true;
247 }
248
249 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
250 {
251         struct list_head *link;
252         struct xe_exec_queue *q;
253
254         list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
255                 struct dma_fence *fence;
256
257                 link = list->next;
258                 XE_WARN_ON(link == list);
259
260                 fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
261                                              q, q->compute.context,
262                                              ++q->compute.seqno);
263                 dma_fence_put(q->compute.pfence);
264                 q->compute.pfence = fence;
265         }
266 }
267
268 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
269 {
270         struct xe_exec_queue *q;
271         int err;
272
273         err = xe_bo_lock(bo, true);
274         if (err)
275                 return err;
276
277         err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
278         if (err)
279                 goto out_unlock;
280
281         list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
282                 if (q->compute.pfence) {
283                         dma_resv_add_fence(bo->ttm.base.resv,
284                                            q->compute.pfence,
285                                            DMA_RESV_USAGE_BOOKKEEP);
286                 }
287
288 out_unlock:
289         xe_bo_unlock(bo);
290         return err;
291 }
292
293 /**
294  * xe_vm_fence_all_extobjs() - Add a fence to vm's external objects' resv
295  * @vm: The vm.
296  * @fence: The fence to add.
297  * @usage: The resv usage for the fence.
298  *
299  * Loops over all of the vm's external object bindings and adds a @fence
300  * with the given @usage to all of the external object's reservation
301  * objects.
302  */
303 void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
304                              enum dma_resv_usage usage)
305 {
306         struct xe_vma *vma;
307
308         list_for_each_entry(vma, &vm->extobj.list, extobj.link)
309                 dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence, usage);
310 }
311
312 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm)
313 {
314         struct xe_exec_queue *q;
315
316         lockdep_assert_held(&vm->lock);
317         xe_vm_assert_held(vm);
318
319         list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
320                 q->ops->resume(q);
321
322                 dma_resv_add_fence(xe_vm_resv(vm), q->compute.pfence,
323                                    DMA_RESV_USAGE_BOOKKEEP);
324                 xe_vm_fence_all_extobjs(vm, q->compute.pfence,
325                                         DMA_RESV_USAGE_BOOKKEEP);
326         }
327 }
328
329 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
330 {
331         struct drm_exec exec;
332         struct dma_fence *pfence;
333         int err;
334         bool wait;
335
336         XE_WARN_ON(!xe_vm_in_compute_mode(vm));
337
338         down_write(&vm->lock);
339         drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
340         drm_exec_until_all_locked(&exec) {
341                 err = xe_vm_lock_dma_resv(vm, &exec, 1, true);
342                 drm_exec_retry_on_contention(&exec);
343                 if (err)
344                         goto out_unlock;
345         }
346
347         pfence = xe_preempt_fence_create(q, q->compute.context,
348                                          ++q->compute.seqno);
349         if (!pfence) {
350                 err = -ENOMEM;
351                 goto out_unlock;
352         }
353
354         list_add(&q->compute.link, &vm->preempt.exec_queues);
355         ++vm->preempt.num_exec_queues;
356         q->compute.pfence = pfence;
357
358         down_read(&vm->userptr.notifier_lock);
359
360         dma_resv_add_fence(xe_vm_resv(vm), pfence,
361                            DMA_RESV_USAGE_BOOKKEEP);
362
363         xe_vm_fence_all_extobjs(vm, pfence, DMA_RESV_USAGE_BOOKKEEP);
364
365         /*
366          * Check to see if a preemption on VM is in flight or userptr
367          * invalidation, if so trigger this preempt fence to sync state with
368          * other preempt fences on the VM.
369          */
370         wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
371         if (wait)
372                 dma_fence_enable_sw_signaling(pfence);
373
374         up_read(&vm->userptr.notifier_lock);
375
376 out_unlock:
377         drm_exec_fini(&exec);
378         up_write(&vm->lock);
379
380         return err;
381 }
382
383 /**
384  * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
385  * that need repinning.
386  * @vm: The VM.
387  *
388  * This function checks for whether the VM has userptrs that need repinning,
389  * and provides a release-type barrier on the userptr.notifier_lock after
390  * checking.
391  *
392  * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
393  */
394 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
395 {
396         lockdep_assert_held_read(&vm->userptr.notifier_lock);
397
398         return (list_empty(&vm->userptr.repin_list) &&
399                 list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
400 }
401
402 /**
403  * xe_vm_lock_dma_resv() - Lock the vm dma_resv object and the dma_resv
404  * objects of the vm's external buffer objects.
405  * @vm: The vm.
406  * @exec: Pointer to a struct drm_exec locking context.
407  * @num_shared: Number of dma-fence slots to reserve in the locked objects.
408  * @lock_vm: Lock also the vm's dma_resv.
409  *
410  * Locks the vm dma-resv objects and all the dma-resv objects of the
411  * buffer objects on the vm external object list.
412  *
413  * Return: 0 on success, Negative error code on error. In particular if
414  * @intr is set to true, -EINTR or -ERESTARTSYS may be returned.
415  */
416 int xe_vm_lock_dma_resv(struct xe_vm *vm, struct drm_exec *exec,
417                         unsigned int num_shared, bool lock_vm)
418 {
419         struct xe_vma *vma, *next;
420         int err = 0;
421
422         lockdep_assert_held(&vm->lock);
423
424         if (lock_vm) {
425                 err = drm_exec_prepare_obj(exec, &xe_vm_ttm_bo(vm)->base, num_shared);
426                 if (err)
427                         return err;
428         }
429
430         list_for_each_entry(vma, &vm->extobj.list, extobj.link) {
431                 err = drm_exec_prepare_obj(exec, &xe_vma_bo(vma)->ttm.base, num_shared);
432                 if (err)
433                         return err;
434         }
435
436         spin_lock(&vm->notifier.list_lock);
437         list_for_each_entry_safe(vma, next, &vm->notifier.rebind_list,
438                                  notifier.rebind_link) {
439                 xe_bo_assert_held(xe_vma_bo(vma));
440
441                 list_del_init(&vma->notifier.rebind_link);
442                 if (vma->tile_present && !(vma->gpuva.flags & XE_VMA_DESTROYED))
443                         list_move_tail(&vma->combined_links.rebind,
444                                        &vm->rebind_list);
445         }
446         spin_unlock(&vm->notifier.list_lock);
447
448         return 0;
449 }
450
451 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
452
453 static void xe_vm_kill(struct xe_vm *vm)
454 {
455         struct xe_exec_queue *q;
456
457         lockdep_assert_held(&vm->lock);
458
459         xe_vm_lock(vm, false);
460         vm->flags |= XE_VM_FLAG_BANNED;
461         trace_xe_vm_kill(vm);
462
463         list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
464                 q->ops->kill(q);
465         xe_vm_unlock(vm);
466
467         /* TODO: Inform user the VM is banned */
468 }
469
470 /**
471  * xe_vm_validate_should_retry() - Whether to retry after a validate error.
472  * @exec: The drm_exec object used for locking before validation.
473  * @err: The error returned from ttm_bo_validate().
474  * @end: A ktime_t cookie that should be set to 0 before first use and
475  * that should be reused on subsequent calls.
476  *
477  * With multiple active VMs, under memory pressure, it is possible that
478  * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
479  * Until ttm properly handles locking in such scenarios, best thing the
480  * driver can do is retry with a timeout. Check if that is necessary, and
481  * if so unlock the drm_exec's objects while keeping the ticket to prepare
482  * for a rerun.
483  *
484  * Return: true if a retry after drm_exec_init() is recommended;
485  * false otherwise.
486  */
487 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
488 {
489         ktime_t cur;
490
491         if (err != -ENOMEM)
492                 return false;
493
494         cur = ktime_get();
495         *end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
496         if (!ktime_before(cur, *end))
497                 return false;
498
499         /*
500          * We would like to keep the ticket here with
501          * drm_exec_unlock_all(), but WW mutex asserts currently
502          * stop us from that. In any case this function could go away
503          * with proper TTM -EDEADLK handling.
504          */
505         drm_exec_fini(exec);
506
507         msleep(20);
508         return true;
509 }
510
511 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
512                                  bool *done)
513 {
514         struct xe_vma *vma;
515         int err;
516
517         err = drm_exec_prepare_obj(exec, &xe_vm_ttm_bo(vm)->base,
518                                    vm->preempt.num_exec_queues);
519         if (err)
520                 return err;
521
522         if (xe_vm_is_idle(vm)) {
523                 vm->preempt.rebind_deactivated = true;
524                 *done = true;
525                 return 0;
526         }
527
528         if (!preempt_fences_waiting(vm)) {
529                 *done = true;
530                 return 0;
531         }
532
533         err = xe_vm_lock_dma_resv(vm, exec, vm->preempt.num_exec_queues, false);
534         if (err)
535                 return err;
536
537         err = wait_for_existing_preempt_fences(vm);
538         if (err)
539                 return err;
540
541         list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
542                 if (xe_vma_has_no_bo(vma) ||
543                     vma->gpuva.flags & XE_VMA_DESTROYED)
544                         continue;
545
546                 err = xe_bo_validate(xe_vma_bo(vma), vm, false);
547                 if (err)
548                         break;
549         }
550
551         return err;
552 }
553
554 static void preempt_rebind_work_func(struct work_struct *w)
555 {
556         struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
557         struct drm_exec exec;
558         struct dma_fence *rebind_fence;
559         unsigned int fence_count = 0;
560         LIST_HEAD(preempt_fences);
561         ktime_t end = 0;
562         int err;
563         long wait;
564         int __maybe_unused tries = 0;
565
566         XE_WARN_ON(!xe_vm_in_compute_mode(vm));
567         trace_xe_vm_rebind_worker_enter(vm);
568
569         down_write(&vm->lock);
570
571         if (xe_vm_is_closed_or_banned(vm)) {
572                 up_write(&vm->lock);
573                 trace_xe_vm_rebind_worker_exit(vm);
574                 return;
575         }
576
577 retry:
578         if (vm->async_ops.error)
579                 goto out_unlock_outer;
580
581         /*
582          * Extreme corner where we exit a VM error state with a munmap style VM
583          * unbind inflight which requires a rebind. In this case the rebind
584          * needs to install some fences into the dma-resv slots. The worker to
585          * do this queued, let that worker make progress by dropping vm->lock
586          * and trying this again.
587          */
588         if (vm->async_ops.munmap_rebind_inflight) {
589                 up_write(&vm->lock);
590                 flush_work(&vm->async_ops.work);
591                 goto retry;
592         }
593
594         if (xe_vm_userptr_check_repin(vm)) {
595                 err = xe_vm_userptr_pin(vm);
596                 if (err)
597                         goto out_unlock_outer;
598         }
599
600         drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
601
602         drm_exec_until_all_locked(&exec) {
603                 bool done = false;
604
605                 err = xe_preempt_work_begin(&exec, vm, &done);
606                 drm_exec_retry_on_contention(&exec);
607                 if (err && xe_vm_validate_should_retry(&exec, err, &end)) {
608                         err = -EAGAIN;
609                         goto out_unlock_outer;
610                 }
611                 if (err || done)
612                         goto out_unlock;
613         }
614
615         err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
616         if (err)
617                 goto out_unlock;
618
619         rebind_fence = xe_vm_rebind(vm, true);
620         if (IS_ERR(rebind_fence)) {
621                 err = PTR_ERR(rebind_fence);
622                 goto out_unlock;
623         }
624
625         if (rebind_fence) {
626                 dma_fence_wait(rebind_fence, false);
627                 dma_fence_put(rebind_fence);
628         }
629
630         /* Wait on munmap style VM unbinds */
631         wait = dma_resv_wait_timeout(xe_vm_resv(vm),
632                                      DMA_RESV_USAGE_KERNEL,
633                                      false, MAX_SCHEDULE_TIMEOUT);
634         if (wait <= 0) {
635                 err = -ETIME;
636                 goto out_unlock;
637         }
638
639 #define retry_required(__tries, __vm) \
640         (IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
641         (!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
642         __xe_vm_userptr_needs_repin(__vm))
643
644         down_read(&vm->userptr.notifier_lock);
645         if (retry_required(tries, vm)) {
646                 up_read(&vm->userptr.notifier_lock);
647                 err = -EAGAIN;
648                 goto out_unlock;
649         }
650
651 #undef retry_required
652
653         spin_lock(&vm->xe->ttm.lru_lock);
654         ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
655         spin_unlock(&vm->xe->ttm.lru_lock);
656
657         /* Point of no return. */
658         arm_preempt_fences(vm, &preempt_fences);
659         resume_and_reinstall_preempt_fences(vm);
660         up_read(&vm->userptr.notifier_lock);
661
662 out_unlock:
663         drm_exec_fini(&exec);
664 out_unlock_outer:
665         if (err == -EAGAIN) {
666                 trace_xe_vm_rebind_worker_retry(vm);
667                 goto retry;
668         }
669
670         if (err) {
671                 drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
672                 xe_vm_kill(vm);
673         }
674         up_write(&vm->lock);
675
676         free_preempt_fences(&preempt_fences);
677
678         trace_xe_vm_rebind_worker_exit(vm);
679 }
680
681 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
682                                    const struct mmu_notifier_range *range,
683                                    unsigned long cur_seq)
684 {
685         struct xe_vma *vma = container_of(mni, struct xe_vma, userptr.notifier);
686         struct xe_vm *vm = xe_vma_vm(vma);
687         struct dma_resv_iter cursor;
688         struct dma_fence *fence;
689         long err;
690
691         XE_WARN_ON(!xe_vma_is_userptr(vma));
692         trace_xe_vma_userptr_invalidate(vma);
693
694         if (!mmu_notifier_range_blockable(range))
695                 return false;
696
697         down_write(&vm->userptr.notifier_lock);
698         mmu_interval_set_seq(mni, cur_seq);
699
700         /* No need to stop gpu access if the userptr is not yet bound. */
701         if (!vma->userptr.initial_bind) {
702                 up_write(&vm->userptr.notifier_lock);
703                 return true;
704         }
705
706         /*
707          * Tell exec and rebind worker they need to repin and rebind this
708          * userptr.
709          */
710         if (!xe_vm_in_fault_mode(vm) &&
711             !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
712                 spin_lock(&vm->userptr.invalidated_lock);
713                 list_move_tail(&vma->userptr.invalidate_link,
714                                &vm->userptr.invalidated);
715                 spin_unlock(&vm->userptr.invalidated_lock);
716         }
717
718         up_write(&vm->userptr.notifier_lock);
719
720         /*
721          * Preempt fences turn into schedule disables, pipeline these.
722          * Note that even in fault mode, we need to wait for binds and
723          * unbinds to complete, and those are attached as BOOKMARK fences
724          * to the vm.
725          */
726         dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
727                             DMA_RESV_USAGE_BOOKKEEP);
728         dma_resv_for_each_fence_unlocked(&cursor, fence)
729                 dma_fence_enable_sw_signaling(fence);
730         dma_resv_iter_end(&cursor);
731
732         err = dma_resv_wait_timeout(xe_vm_resv(vm),
733                                     DMA_RESV_USAGE_BOOKKEEP,
734                                     false, MAX_SCHEDULE_TIMEOUT);
735         XE_WARN_ON(err <= 0);
736
737         if (xe_vm_in_fault_mode(vm)) {
738                 err = xe_vm_invalidate_vma(vma);
739                 XE_WARN_ON(err);
740         }
741
742         trace_xe_vma_userptr_invalidate_complete(vma);
743
744         return true;
745 }
746
747 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
748         .invalidate = vma_userptr_invalidate,
749 };
750
751 int xe_vm_userptr_pin(struct xe_vm *vm)
752 {
753         struct xe_vma *vma, *next;
754         int err = 0;
755         LIST_HEAD(tmp_evict);
756
757         lockdep_assert_held_write(&vm->lock);
758
759         /* Collect invalidated userptrs */
760         spin_lock(&vm->userptr.invalidated_lock);
761         list_for_each_entry_safe(vma, next, &vm->userptr.invalidated,
762                                  userptr.invalidate_link) {
763                 list_del_init(&vma->userptr.invalidate_link);
764                 if (list_empty(&vma->combined_links.userptr))
765                         list_move_tail(&vma->combined_links.userptr,
766                                        &vm->userptr.repin_list);
767         }
768         spin_unlock(&vm->userptr.invalidated_lock);
769
770         /* Pin and move to temporary list */
771         list_for_each_entry_safe(vma, next, &vm->userptr.repin_list,
772                                  combined_links.userptr) {
773                 err = xe_vma_userptr_pin_pages(vma);
774                 if (err < 0)
775                         goto out_err;
776
777                 list_move_tail(&vma->combined_links.userptr, &tmp_evict);
778         }
779
780         /* Take lock and move to rebind_list for rebinding. */
781         err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
782         if (err)
783                 goto out_err;
784
785         list_for_each_entry_safe(vma, next, &tmp_evict, combined_links.userptr)
786                 list_move_tail(&vma->combined_links.rebind, &vm->rebind_list);
787
788         dma_resv_unlock(xe_vm_resv(vm));
789
790         return 0;
791
792 out_err:
793         list_splice_tail(&tmp_evict, &vm->userptr.repin_list);
794
795         return err;
796 }
797
798 /**
799  * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
800  * that need repinning.
801  * @vm: The VM.
802  *
803  * This function does an advisory check for whether the VM has userptrs that
804  * need repinning.
805  *
806  * Return: 0 if there are no indications of userptrs needing repinning,
807  * -EAGAIN if there are.
808  */
809 int xe_vm_userptr_check_repin(struct xe_vm *vm)
810 {
811         return (list_empty_careful(&vm->userptr.repin_list) &&
812                 list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
813 }
814
815 static struct dma_fence *
816 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
817                struct xe_sync_entry *syncs, u32 num_syncs,
818                bool first_op, bool last_op);
819
820 struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
821 {
822         struct dma_fence *fence = NULL;
823         struct xe_vma *vma, *next;
824
825         lockdep_assert_held(&vm->lock);
826         if (xe_vm_no_dma_fences(vm) && !rebind_worker)
827                 return NULL;
828
829         xe_vm_assert_held(vm);
830         list_for_each_entry_safe(vma, next, &vm->rebind_list,
831                                  combined_links.rebind) {
832                 XE_WARN_ON(!vma->tile_present);
833
834                 list_del_init(&vma->combined_links.rebind);
835                 dma_fence_put(fence);
836                 if (rebind_worker)
837                         trace_xe_vma_rebind_worker(vma);
838                 else
839                         trace_xe_vma_rebind_exec(vma);
840                 fence = xe_vm_bind_vma(vma, NULL, NULL, 0, false, false);
841                 if (IS_ERR(fence))
842                         return fence;
843         }
844
845         return fence;
846 }
847
848 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
849                                     struct xe_bo *bo,
850                                     u64 bo_offset_or_userptr,
851                                     u64 start, u64 end,
852                                     bool read_only,
853                                     bool is_null,
854                                     u8 tile_mask)
855 {
856         struct xe_vma *vma;
857         struct xe_tile *tile;
858         u8 id;
859
860         XE_WARN_ON(start >= end);
861         XE_WARN_ON(end >= vm->size);
862
863         if (!bo && !is_null)    /* userptr */
864                 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
865         else
866                 vma = kzalloc(sizeof(*vma) - sizeof(struct xe_userptr),
867                               GFP_KERNEL);
868         if (!vma) {
869                 vma = ERR_PTR(-ENOMEM);
870                 return vma;
871         }
872
873         INIT_LIST_HEAD(&vma->combined_links.rebind);
874         INIT_LIST_HEAD(&vma->notifier.rebind_link);
875         INIT_LIST_HEAD(&vma->extobj.link);
876
877         INIT_LIST_HEAD(&vma->gpuva.gem.entry);
878         vma->gpuva.vm = &vm->gpuvm;
879         vma->gpuva.va.addr = start;
880         vma->gpuva.va.range = end - start + 1;
881         if (read_only)
882                 vma->gpuva.flags |= XE_VMA_READ_ONLY;
883         if (is_null)
884                 vma->gpuva.flags |= DRM_GPUVA_SPARSE;
885
886         if (tile_mask) {
887                 vma->tile_mask = tile_mask;
888         } else {
889                 for_each_tile(tile, vm->xe, id)
890                         vma->tile_mask |= 0x1 << id;
891         }
892
893         if (vm->xe->info.platform == XE_PVC)
894                 vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
895
896         if (bo) {
897                 struct drm_gpuvm_bo *vm_bo;
898
899                 xe_bo_assert_held(bo);
900
901                 vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
902                 if (IS_ERR(vm_bo)) {
903                         kfree(vma);
904                         return ERR_CAST(vm_bo);
905                 }
906
907                 drm_gem_object_get(&bo->ttm.base);
908                 vma->gpuva.gem.obj = &bo->ttm.base;
909                 vma->gpuva.gem.offset = bo_offset_or_userptr;
910                 drm_gpuva_link(&vma->gpuva, vm_bo);
911                 drm_gpuvm_bo_put(vm_bo);
912         } else /* userptr or null */ {
913                 if (!is_null) {
914                         u64 size = end - start + 1;
915                         int err;
916
917                         INIT_LIST_HEAD(&vma->userptr.invalidate_link);
918                         vma->gpuva.gem.offset = bo_offset_or_userptr;
919
920                         err = mmu_interval_notifier_insert(&vma->userptr.notifier,
921                                                            current->mm,
922                                                            xe_vma_userptr(vma), size,
923                                                            &vma_userptr_notifier_ops);
924                         if (err) {
925                                 kfree(vma);
926                                 vma = ERR_PTR(err);
927                                 return vma;
928                         }
929
930                         vma->userptr.notifier_seq = LONG_MAX;
931                 }
932
933                 xe_vm_get(vm);
934         }
935
936         return vma;
937 }
938
939 static bool vm_remove_extobj(struct xe_vma *vma)
940 {
941         if (!list_empty(&vma->extobj.link)) {
942                 xe_vma_vm(vma)->extobj.entries--;
943                 list_del_init(&vma->extobj.link);
944                 return true;
945         }
946         return false;
947 }
948
949 static void xe_vma_destroy_late(struct xe_vma *vma)
950 {
951         struct xe_vm *vm = xe_vma_vm(vma);
952         struct xe_device *xe = vm->xe;
953         bool read_only = xe_vma_read_only(vma);
954
955         if (xe_vma_is_userptr(vma)) {
956                 if (vma->userptr.sg) {
957                         dma_unmap_sgtable(xe->drm.dev,
958                                           vma->userptr.sg,
959                                           read_only ? DMA_TO_DEVICE :
960                                           DMA_BIDIRECTIONAL, 0);
961                         sg_free_table(vma->userptr.sg);
962                         vma->userptr.sg = NULL;
963                 }
964
965                 /*
966                  * Since userptr pages are not pinned, we can't remove
967                  * the notifer until we're sure the GPU is not accessing
968                  * them anymore
969                  */
970                 mmu_interval_notifier_remove(&vma->userptr.notifier);
971                 xe_vm_put(vm);
972         } else if (xe_vma_is_null(vma)) {
973                 xe_vm_put(vm);
974         } else {
975                 xe_bo_put(xe_vma_bo(vma));
976         }
977
978         kfree(vma);
979 }
980
981 static void vma_destroy_work_func(struct work_struct *w)
982 {
983         struct xe_vma *vma =
984                 container_of(w, struct xe_vma, destroy_work);
985
986         xe_vma_destroy_late(vma);
987 }
988
989 static struct xe_vma *
990 bo_has_vm_references_locked(struct xe_bo *bo, struct xe_vm *vm,
991                             struct xe_vma *ignore)
992 {
993         struct drm_gpuvm_bo *vm_bo;
994         struct drm_gpuva *va;
995         struct drm_gem_object *obj = &bo->ttm.base;
996
997         xe_bo_assert_held(bo);
998
999         drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
1000                 drm_gpuvm_bo_for_each_va(va, vm_bo) {
1001                         struct xe_vma *vma = gpuva_to_vma(va);
1002
1003                         if (vma != ignore && xe_vma_vm(vma) == vm)
1004                                 return vma;
1005                 }
1006         }
1007
1008         return NULL;
1009 }
1010
1011 static bool bo_has_vm_references(struct xe_bo *bo, struct xe_vm *vm,
1012                                  struct xe_vma *ignore)
1013 {
1014         bool ret;
1015
1016         xe_bo_lock(bo, false);
1017         ret = !!bo_has_vm_references_locked(bo, vm, ignore);
1018         xe_bo_unlock(bo);
1019
1020         return ret;
1021 }
1022
1023 static void __vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
1024 {
1025         lockdep_assert_held_write(&vm->lock);
1026
1027         list_add(&vma->extobj.link, &vm->extobj.list);
1028         vm->extobj.entries++;
1029 }
1030
1031 static void vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
1032 {
1033         struct xe_bo *bo = xe_vma_bo(vma);
1034
1035         lockdep_assert_held_write(&vm->lock);
1036
1037         if (bo_has_vm_references(bo, vm, vma))
1038                 return;
1039
1040         __vm_insert_extobj(vm, vma);
1041 }
1042
1043 static void vma_destroy_cb(struct dma_fence *fence,
1044                            struct dma_fence_cb *cb)
1045 {
1046         struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
1047
1048         INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
1049         queue_work(system_unbound_wq, &vma->destroy_work);
1050 }
1051
1052 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
1053 {
1054         struct xe_vm *vm = xe_vma_vm(vma);
1055
1056         lockdep_assert_held_write(&vm->lock);
1057         XE_WARN_ON(!list_empty(&vma->combined_links.destroy));
1058
1059         if (xe_vma_is_userptr(vma)) {
1060                 XE_WARN_ON(!(vma->gpuva.flags & XE_VMA_DESTROYED));
1061
1062                 spin_lock(&vm->userptr.invalidated_lock);
1063                 list_del(&vma->userptr.invalidate_link);
1064                 spin_unlock(&vm->userptr.invalidated_lock);
1065         } else if (!xe_vma_is_null(vma)) {
1066                 xe_bo_assert_held(xe_vma_bo(vma));
1067
1068                 spin_lock(&vm->notifier.list_lock);
1069                 list_del(&vma->notifier.rebind_link);
1070                 spin_unlock(&vm->notifier.list_lock);
1071
1072                 drm_gpuva_unlink(&vma->gpuva);
1073
1074                 if (!xe_vma_bo(vma)->vm && vm_remove_extobj(vma)) {
1075                         struct xe_vma *other;
1076
1077                         other = bo_has_vm_references_locked(xe_vma_bo(vma), vm, NULL);
1078
1079                         if (other)
1080                                 __vm_insert_extobj(vm, other);
1081                 }
1082         }
1083
1084         xe_vm_assert_held(vm);
1085         if (fence) {
1086                 int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
1087                                                  vma_destroy_cb);
1088
1089                 if (ret) {
1090                         XE_WARN_ON(ret != -ENOENT);
1091                         xe_vma_destroy_late(vma);
1092                 }
1093         } else {
1094                 xe_vma_destroy_late(vma);
1095         }
1096 }
1097
1098 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1099 {
1100         struct ttm_validate_buffer tv[2];
1101         struct ww_acquire_ctx ww;
1102         struct xe_bo *bo = xe_vma_bo(vma);
1103         LIST_HEAD(objs);
1104         LIST_HEAD(dups);
1105         int err;
1106
1107         memset(tv, 0, sizeof(tv));
1108         tv[0].bo = xe_vm_ttm_bo(xe_vma_vm(vma));
1109         list_add(&tv[0].head, &objs);
1110
1111         if (bo) {
1112                 tv[1].bo = &xe_bo_get(bo)->ttm;
1113                 list_add(&tv[1].head, &objs);
1114         }
1115         err = ttm_eu_reserve_buffers(&ww, &objs, false, &dups);
1116         XE_WARN_ON(err);
1117
1118         xe_vma_destroy(vma, NULL);
1119
1120         ttm_eu_backoff_reservation(&ww, &objs);
1121         if (bo)
1122                 xe_bo_put(bo);
1123 }
1124
1125 struct xe_vma *
1126 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
1127 {
1128         struct drm_gpuva *gpuva;
1129
1130         lockdep_assert_held(&vm->lock);
1131
1132         if (xe_vm_is_closed_or_banned(vm))
1133                 return NULL;
1134
1135         XE_WARN_ON(start + range > vm->size);
1136
1137         gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
1138
1139         return gpuva ? gpuva_to_vma(gpuva) : NULL;
1140 }
1141
1142 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1143 {
1144         int err;
1145
1146         XE_WARN_ON(xe_vma_vm(vma) != vm);
1147         lockdep_assert_held(&vm->lock);
1148
1149         err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
1150         XE_WARN_ON(err);        /* Shouldn't be possible */
1151
1152         return err;
1153 }
1154
1155 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1156 {
1157         XE_WARN_ON(xe_vma_vm(vma) != vm);
1158         lockdep_assert_held(&vm->lock);
1159
1160         drm_gpuva_remove(&vma->gpuva);
1161         if (vm->usm.last_fault_vma == vma)
1162                 vm->usm.last_fault_vma = NULL;
1163 }
1164
1165 static struct drm_gpuva_op *xe_vm_op_alloc(void)
1166 {
1167         struct xe_vma_op *op;
1168
1169         op = kzalloc(sizeof(*op), GFP_KERNEL);
1170
1171         if (unlikely(!op))
1172                 return NULL;
1173
1174         return &op->base;
1175 }
1176
1177 static void xe_vm_free(struct drm_gpuvm *gpuvm);
1178
1179 static struct drm_gpuvm_ops gpuvm_ops = {
1180         .op_alloc = xe_vm_op_alloc,
1181         .vm_free = xe_vm_free,
1182 };
1183
1184 static void xe_vma_op_work_func(struct work_struct *w);
1185 static void vm_destroy_work_func(struct work_struct *w);
1186
1187 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1188 {
1189         struct drm_gem_object *vm_resv_obj;
1190         struct xe_vm *vm;
1191         int err, number_tiles = 0;
1192         struct xe_tile *tile;
1193         u8 id;
1194
1195         vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1196         if (!vm)
1197                 return ERR_PTR(-ENOMEM);
1198
1199         vm->xe = xe;
1200
1201         vm->size = 1ull << xe->info.va_bits;
1202
1203         vm->flags = flags;
1204
1205         init_rwsem(&vm->lock);
1206
1207         INIT_LIST_HEAD(&vm->rebind_list);
1208
1209         INIT_LIST_HEAD(&vm->userptr.repin_list);
1210         INIT_LIST_HEAD(&vm->userptr.invalidated);
1211         init_rwsem(&vm->userptr.notifier_lock);
1212         spin_lock_init(&vm->userptr.invalidated_lock);
1213
1214         INIT_LIST_HEAD(&vm->notifier.rebind_list);
1215         spin_lock_init(&vm->notifier.list_lock);
1216
1217         INIT_LIST_HEAD(&vm->async_ops.pending);
1218         INIT_WORK(&vm->async_ops.work, xe_vma_op_work_func);
1219         spin_lock_init(&vm->async_ops.lock);
1220
1221         INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1222
1223         INIT_LIST_HEAD(&vm->preempt.exec_queues);
1224         vm->preempt.min_run_period_ms = 10;     /* FIXME: Wire up to uAPI */
1225
1226         for_each_tile(tile, xe, id)
1227                 xe_range_fence_tree_init(&vm->rftree[id]);
1228
1229         INIT_LIST_HEAD(&vm->extobj.list);
1230
1231         if (!(flags & XE_VM_FLAG_MIGRATION))
1232                 xe_device_mem_access_get(xe);
1233
1234         vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
1235         if (!vm_resv_obj) {
1236                 err = -ENOMEM;
1237                 goto err_no_resv;
1238         }
1239
1240         drm_gpuvm_init(&vm->gpuvm, "Xe VM", 0, &xe->drm, vm_resv_obj,
1241                        0, vm->size, 0, 0, &gpuvm_ops);
1242
1243         drm_gem_object_put(vm_resv_obj);
1244
1245         err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
1246         if (err)
1247                 goto err_close;
1248
1249         if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1250                 vm->flags |= XE_VM_FLAG_64K;
1251
1252         for_each_tile(tile, xe, id) {
1253                 if (flags & XE_VM_FLAG_MIGRATION &&
1254                     tile->id != XE_VM_FLAG_TILE_ID(flags))
1255                         continue;
1256
1257                 vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
1258                 if (IS_ERR(vm->pt_root[id])) {
1259                         err = PTR_ERR(vm->pt_root[id]);
1260                         vm->pt_root[id] = NULL;
1261                         goto err_unlock_close;
1262                 }
1263         }
1264
1265         if (flags & XE_VM_FLAG_SCRATCH_PAGE) {
1266                 for_each_tile(tile, xe, id) {
1267                         if (!vm->pt_root[id])
1268                                 continue;
1269
1270                         err = xe_pt_create_scratch(xe, tile, vm);
1271                         if (err)
1272                                 goto err_unlock_close;
1273                 }
1274                 vm->batch_invalidate_tlb = true;
1275         }
1276
1277         if (flags & XE_VM_FLAG_COMPUTE_MODE) {
1278                 INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1279                 vm->flags |= XE_VM_FLAG_COMPUTE_MODE;
1280                 vm->batch_invalidate_tlb = false;
1281         }
1282
1283         if (flags & XE_VM_FLAG_ASYNC_BIND_OPS) {
1284                 vm->async_ops.fence.context = dma_fence_context_alloc(1);
1285                 vm->flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
1286         }
1287
1288         /* Fill pt_root after allocating scratch tables */
1289         for_each_tile(tile, xe, id) {
1290                 if (!vm->pt_root[id])
1291                         continue;
1292
1293                 xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
1294         }
1295         dma_resv_unlock(xe_vm_resv(vm));
1296
1297         /* Kernel migration VM shouldn't have a circular loop.. */
1298         if (!(flags & XE_VM_FLAG_MIGRATION)) {
1299                 for_each_tile(tile, xe, id) {
1300                         struct xe_gt *gt = tile->primary_gt;
1301                         struct xe_vm *migrate_vm;
1302                         struct xe_exec_queue *q;
1303
1304                         if (!vm->pt_root[id])
1305                                 continue;
1306
1307                         migrate_vm = xe_migrate_get_vm(tile->migrate);
1308                         q = xe_exec_queue_create_class(xe, gt, migrate_vm,
1309                                                        XE_ENGINE_CLASS_COPY,
1310                                                        EXEC_QUEUE_FLAG_VM);
1311                         xe_vm_put(migrate_vm);
1312                         if (IS_ERR(q)) {
1313                                 err = PTR_ERR(q);
1314                                 goto err_close;
1315                         }
1316                         vm->q[id] = q;
1317                         number_tiles++;
1318                 }
1319         }
1320
1321         if (number_tiles > 1)
1322                 vm->composite_fence_ctx = dma_fence_context_alloc(1);
1323
1324         mutex_lock(&xe->usm.lock);
1325         if (flags & XE_VM_FLAG_FAULT_MODE)
1326                 xe->usm.num_vm_in_fault_mode++;
1327         else if (!(flags & XE_VM_FLAG_MIGRATION))
1328                 xe->usm.num_vm_in_non_fault_mode++;
1329         mutex_unlock(&xe->usm.lock);
1330
1331         trace_xe_vm_create(vm);
1332
1333         return vm;
1334
1335 err_unlock_close:
1336         dma_resv_unlock(xe_vm_resv(vm));
1337 err_close:
1338         xe_vm_close_and_put(vm);
1339         return ERR_PTR(err);
1340
1341 err_no_resv:
1342         for_each_tile(tile, xe, id)
1343                 xe_range_fence_tree_fini(&vm->rftree[id]);
1344         kfree(vm);
1345         if (!(flags & XE_VM_FLAG_MIGRATION))
1346                 xe_device_mem_access_put(xe);
1347         return ERR_PTR(err);
1348 }
1349
1350 static void flush_async_ops(struct xe_vm *vm)
1351 {
1352         queue_work(system_unbound_wq, &vm->async_ops.work);
1353         flush_work(&vm->async_ops.work);
1354 }
1355
1356 static void vm_error_capture(struct xe_vm *vm, int err,
1357                              u32 op, u64 addr, u64 size)
1358 {
1359         struct drm_xe_vm_bind_op_error_capture capture;
1360         u64 __user *address =
1361                 u64_to_user_ptr(vm->async_ops.error_capture.addr);
1362         bool in_kthread = !current->mm;
1363
1364         capture.error = err;
1365         capture.op = op;
1366         capture.addr = addr;
1367         capture.size = size;
1368
1369         if (in_kthread) {
1370                 if (!mmget_not_zero(vm->async_ops.error_capture.mm))
1371                         goto mm_closed;
1372                 kthread_use_mm(vm->async_ops.error_capture.mm);
1373         }
1374
1375         if (copy_to_user(address, &capture, sizeof(capture)))
1376                 XE_WARN_ON("Copy to user failed");
1377
1378         if (in_kthread) {
1379                 kthread_unuse_mm(vm->async_ops.error_capture.mm);
1380                 mmput(vm->async_ops.error_capture.mm);
1381         }
1382
1383 mm_closed:
1384         wake_up_all(&vm->async_ops.error_capture.wq);
1385 }
1386
1387 static void xe_vm_close(struct xe_vm *vm)
1388 {
1389         down_write(&vm->lock);
1390         vm->size = 0;
1391         up_write(&vm->lock);
1392 }
1393
1394 void xe_vm_close_and_put(struct xe_vm *vm)
1395 {
1396         LIST_HEAD(contested);
1397         struct xe_device *xe = vm->xe;
1398         struct xe_tile *tile;
1399         struct xe_vma *vma, *next_vma;
1400         struct drm_gpuva *gpuva, *next;
1401         u8 id;
1402
1403         XE_WARN_ON(vm->preempt.num_exec_queues);
1404
1405         xe_vm_close(vm);
1406         flush_async_ops(vm);
1407         if (xe_vm_in_compute_mode(vm))
1408                 flush_work(&vm->preempt.rebind_work);
1409
1410         for_each_tile(tile, xe, id) {
1411                 if (vm->q[id]) {
1412                         xe_exec_queue_kill(vm->q[id]);
1413                         xe_exec_queue_put(vm->q[id]);
1414                         vm->q[id] = NULL;
1415                 }
1416         }
1417
1418         down_write(&vm->lock);
1419         xe_vm_lock(vm, false);
1420         drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) {
1421                 vma = gpuva_to_vma(gpuva);
1422
1423                 if (xe_vma_has_no_bo(vma)) {
1424                         down_read(&vm->userptr.notifier_lock);
1425                         vma->gpuva.flags |= XE_VMA_DESTROYED;
1426                         up_read(&vm->userptr.notifier_lock);
1427                 }
1428
1429                 xe_vm_remove_vma(vm, vma);
1430
1431                 /* easy case, remove from VMA? */
1432                 if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
1433                         list_del_init(&vma->combined_links.rebind);
1434                         xe_vma_destroy(vma, NULL);
1435                         continue;
1436                 }
1437
1438                 list_move_tail(&vma->combined_links.destroy, &contested);
1439                 vma->gpuva.flags |= XE_VMA_DESTROYED;
1440         }
1441
1442         /*
1443          * All vm operations will add shared fences to resv.
1444          * The only exception is eviction for a shared object,
1445          * but even so, the unbind when evicted would still
1446          * install a fence to resv. Hence it's safe to
1447          * destroy the pagetables immediately.
1448          */
1449         for_each_tile(tile, xe, id) {
1450                 if (vm->scratch_bo[id]) {
1451                         u32 i;
1452
1453                         xe_bo_unpin(vm->scratch_bo[id]);
1454                         xe_bo_put(vm->scratch_bo[id]);
1455                         for (i = 0; i < vm->pt_root[id]->level; i++)
1456                                 xe_pt_destroy(vm->scratch_pt[id][i], vm->flags,
1457                                               NULL);
1458                 }
1459                 if (vm->pt_root[id]) {
1460                         xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1461                         vm->pt_root[id] = NULL;
1462                 }
1463         }
1464         xe_vm_unlock(vm);
1465
1466         /*
1467          * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1468          * Since we hold a refcount to the bo, we can remove and free
1469          * the members safely without locking.
1470          */
1471         list_for_each_entry_safe(vma, next_vma, &contested,
1472                                  combined_links.destroy) {
1473                 list_del_init(&vma->combined_links.destroy);
1474                 xe_vma_destroy_unlocked(vma);
1475         }
1476
1477         if (vm->async_ops.error_capture.addr)
1478                 wake_up_all(&vm->async_ops.error_capture.wq);
1479
1480         XE_WARN_ON(!list_empty(&vm->extobj.list));
1481         up_write(&vm->lock);
1482
1483         mutex_lock(&xe->usm.lock);
1484         if (vm->flags & XE_VM_FLAG_FAULT_MODE)
1485                 xe->usm.num_vm_in_fault_mode--;
1486         else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1487                 xe->usm.num_vm_in_non_fault_mode--;
1488         mutex_unlock(&xe->usm.lock);
1489
1490         for_each_tile(tile, xe, id)
1491                 xe_range_fence_tree_fini(&vm->rftree[id]);
1492
1493         xe_vm_put(vm);
1494 }
1495
1496 static void vm_destroy_work_func(struct work_struct *w)
1497 {
1498         struct xe_vm *vm =
1499                 container_of(w, struct xe_vm, destroy_work);
1500         struct xe_device *xe = vm->xe;
1501         struct xe_tile *tile;
1502         u8 id;
1503         void *lookup;
1504
1505         /* xe_vm_close_and_put was not called? */
1506         XE_WARN_ON(vm->size);
1507
1508         if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
1509                 xe_device_mem_access_put(xe);
1510
1511                 if (xe->info.has_asid) {
1512                         mutex_lock(&xe->usm.lock);
1513                         lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1514                         XE_WARN_ON(lookup != vm);
1515                         mutex_unlock(&xe->usm.lock);
1516                 }
1517         }
1518
1519         for_each_tile(tile, xe, id)
1520                 XE_WARN_ON(vm->pt_root[id]);
1521
1522         trace_xe_vm_free(vm);
1523         dma_fence_put(vm->rebind_fence);
1524         kfree(vm);
1525 }
1526
1527 static void xe_vm_free(struct drm_gpuvm *gpuvm)
1528 {
1529         struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
1530
1531         /* To destroy the VM we need to be able to sleep */
1532         queue_work(system_unbound_wq, &vm->destroy_work);
1533 }
1534
1535 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1536 {
1537         struct xe_vm *vm;
1538
1539         mutex_lock(&xef->vm.lock);
1540         vm = xa_load(&xef->vm.xa, id);
1541         if (vm)
1542                 xe_vm_get(vm);
1543         mutex_unlock(&xef->vm.lock);
1544
1545         return vm;
1546 }
1547
1548 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
1549 {
1550         return xe_pde_encode(vm->pt_root[tile->id]->bo, 0,
1551                              XE_CACHE_WB);
1552 }
1553
1554 static struct dma_fence *
1555 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1556                  struct xe_sync_entry *syncs, u32 num_syncs,
1557                  bool first_op, bool last_op)
1558 {
1559         struct xe_tile *tile;
1560         struct dma_fence *fence = NULL;
1561         struct dma_fence **fences = NULL;
1562         struct dma_fence_array *cf = NULL;
1563         struct xe_vm *vm = xe_vma_vm(vma);
1564         int cur_fence = 0, i;
1565         int number_tiles = hweight8(vma->tile_present);
1566         int err;
1567         u8 id;
1568
1569         trace_xe_vma_unbind(vma);
1570
1571         if (number_tiles > 1) {
1572                 fences = kmalloc_array(number_tiles, sizeof(*fences),
1573                                        GFP_KERNEL);
1574                 if (!fences)
1575                         return ERR_PTR(-ENOMEM);
1576         }
1577
1578         for_each_tile(tile, vm->xe, id) {
1579                 if (!(vma->tile_present & BIT(id)))
1580                         goto next;
1581
1582                 fence = __xe_pt_unbind_vma(tile, vma, q, first_op ? syncs : NULL,
1583                                            first_op ? num_syncs : 0);
1584                 if (IS_ERR(fence)) {
1585                         err = PTR_ERR(fence);
1586                         goto err_fences;
1587                 }
1588
1589                 if (fences)
1590                         fences[cur_fence++] = fence;
1591
1592 next:
1593                 if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1594                         q = list_next_entry(q, multi_gt_list);
1595         }
1596
1597         if (fences) {
1598                 cf = dma_fence_array_create(number_tiles, fences,
1599                                             vm->composite_fence_ctx,
1600                                             vm->composite_fence_seqno++,
1601                                             false);
1602                 if (!cf) {
1603                         --vm->composite_fence_seqno;
1604                         err = -ENOMEM;
1605                         goto err_fences;
1606                 }
1607         }
1608
1609         if (last_op) {
1610                 for (i = 0; i < num_syncs; i++)
1611                         xe_sync_entry_signal(&syncs[i], NULL,
1612                                              cf ? &cf->base : fence);
1613         }
1614
1615         return cf ? &cf->base : !fence ? dma_fence_get_stub() : fence;
1616
1617 err_fences:
1618         if (fences) {
1619                 while (cur_fence) {
1620                         /* FIXME: Rewind the previous binds? */
1621                         dma_fence_put(fences[--cur_fence]);
1622                 }
1623                 kfree(fences);
1624         }
1625
1626         return ERR_PTR(err);
1627 }
1628
1629 static struct dma_fence *
1630 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1631                struct xe_sync_entry *syncs, u32 num_syncs,
1632                bool first_op, bool last_op)
1633 {
1634         struct xe_tile *tile;
1635         struct dma_fence *fence;
1636         struct dma_fence **fences = NULL;
1637         struct dma_fence_array *cf = NULL;
1638         struct xe_vm *vm = xe_vma_vm(vma);
1639         int cur_fence = 0, i;
1640         int number_tiles = hweight8(vma->tile_mask);
1641         int err;
1642         u8 id;
1643
1644         trace_xe_vma_bind(vma);
1645
1646         if (number_tiles > 1) {
1647                 fences = kmalloc_array(number_tiles, sizeof(*fences),
1648                                        GFP_KERNEL);
1649                 if (!fences)
1650                         return ERR_PTR(-ENOMEM);
1651         }
1652
1653         for_each_tile(tile, vm->xe, id) {
1654                 if (!(vma->tile_mask & BIT(id)))
1655                         goto next;
1656
1657                 fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id],
1658                                          first_op ? syncs : NULL,
1659                                          first_op ? num_syncs : 0,
1660                                          vma->tile_present & BIT(id));
1661                 if (IS_ERR(fence)) {
1662                         err = PTR_ERR(fence);
1663                         goto err_fences;
1664                 }
1665
1666                 if (fences)
1667                         fences[cur_fence++] = fence;
1668
1669 next:
1670                 if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1671                         q = list_next_entry(q, multi_gt_list);
1672         }
1673
1674         if (fences) {
1675                 cf = dma_fence_array_create(number_tiles, fences,
1676                                             vm->composite_fence_ctx,
1677                                             vm->composite_fence_seqno++,
1678                                             false);
1679                 if (!cf) {
1680                         --vm->composite_fence_seqno;
1681                         err = -ENOMEM;
1682                         goto err_fences;
1683                 }
1684         }
1685
1686         if (last_op) {
1687                 for (i = 0; i < num_syncs; i++)
1688                         xe_sync_entry_signal(&syncs[i], NULL,
1689                                              cf ? &cf->base : fence);
1690         }
1691
1692         return cf ? &cf->base : fence;
1693
1694 err_fences:
1695         if (fences) {
1696                 while (cur_fence) {
1697                         /* FIXME: Rewind the previous binds? */
1698                         dma_fence_put(fences[--cur_fence]);
1699                 }
1700                 kfree(fences);
1701         }
1702
1703         return ERR_PTR(err);
1704 }
1705
1706 struct async_op_fence {
1707         struct dma_fence fence;
1708         struct dma_fence *wait_fence;
1709         struct dma_fence_cb cb;
1710         struct xe_vm *vm;
1711         wait_queue_head_t wq;
1712         bool started;
1713 };
1714
1715 static const char *async_op_fence_get_driver_name(struct dma_fence *dma_fence)
1716 {
1717         return "xe";
1718 }
1719
1720 static const char *
1721 async_op_fence_get_timeline_name(struct dma_fence *dma_fence)
1722 {
1723         return "async_op_fence";
1724 }
1725
1726 static const struct dma_fence_ops async_op_fence_ops = {
1727         .get_driver_name = async_op_fence_get_driver_name,
1728         .get_timeline_name = async_op_fence_get_timeline_name,
1729 };
1730
1731 static void async_op_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1732 {
1733         struct async_op_fence *afence =
1734                 container_of(cb, struct async_op_fence, cb);
1735
1736         afence->fence.error = afence->wait_fence->error;
1737         dma_fence_signal(&afence->fence);
1738         xe_vm_put(afence->vm);
1739         dma_fence_put(afence->wait_fence);
1740         dma_fence_put(&afence->fence);
1741 }
1742
1743 static void add_async_op_fence_cb(struct xe_vm *vm,
1744                                   struct dma_fence *fence,
1745                                   struct async_op_fence *afence)
1746 {
1747         int ret;
1748
1749         if (!xe_vm_no_dma_fences(vm)) {
1750                 afence->started = true;
1751                 smp_wmb();
1752                 wake_up_all(&afence->wq);
1753         }
1754
1755         afence->wait_fence = dma_fence_get(fence);
1756         afence->vm = xe_vm_get(vm);
1757         dma_fence_get(&afence->fence);
1758         ret = dma_fence_add_callback(fence, &afence->cb, async_op_fence_cb);
1759         if (ret == -ENOENT) {
1760                 afence->fence.error = afence->wait_fence->error;
1761                 dma_fence_signal(&afence->fence);
1762         }
1763         if (ret) {
1764                 xe_vm_put(vm);
1765                 dma_fence_put(afence->wait_fence);
1766                 dma_fence_put(&afence->fence);
1767         }
1768         XE_WARN_ON(ret && ret != -ENOENT);
1769 }
1770
1771 int xe_vm_async_fence_wait_start(struct dma_fence *fence)
1772 {
1773         if (fence->ops == &async_op_fence_ops) {
1774                 struct async_op_fence *afence =
1775                         container_of(fence, struct async_op_fence, fence);
1776
1777                 XE_WARN_ON(xe_vm_no_dma_fences(afence->vm));
1778
1779                 smp_rmb();
1780                 return wait_event_interruptible(afence->wq, afence->started);
1781         }
1782
1783         return 0;
1784 }
1785
1786 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
1787                         struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1788                         u32 num_syncs, struct async_op_fence *afence,
1789                         bool immediate, bool first_op, bool last_op)
1790 {
1791         struct dma_fence *fence;
1792
1793         xe_vm_assert_held(vm);
1794
1795         if (immediate) {
1796                 fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
1797                                        last_op);
1798                 if (IS_ERR(fence))
1799                         return PTR_ERR(fence);
1800         } else {
1801                 int i;
1802
1803                 XE_WARN_ON(!xe_vm_in_fault_mode(vm));
1804
1805                 fence = dma_fence_get_stub();
1806                 if (last_op) {
1807                         for (i = 0; i < num_syncs; i++)
1808                                 xe_sync_entry_signal(&syncs[i], NULL, fence);
1809                 }
1810         }
1811         if (afence)
1812                 add_async_op_fence_cb(vm, fence, afence);
1813
1814         dma_fence_put(fence);
1815         return 0;
1816 }
1817
1818 static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
1819                       struct xe_bo *bo, struct xe_sync_entry *syncs,
1820                       u32 num_syncs, struct async_op_fence *afence,
1821                       bool immediate, bool first_op, bool last_op)
1822 {
1823         int err;
1824
1825         xe_vm_assert_held(vm);
1826         xe_bo_assert_held(bo);
1827
1828         if (bo && immediate) {
1829                 err = xe_bo_validate(bo, vm, true);
1830                 if (err)
1831                         return err;
1832         }
1833
1834         return __xe_vm_bind(vm, vma, q, syncs, num_syncs, afence, immediate,
1835                             first_op, last_op);
1836 }
1837
1838 static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
1839                         struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1840                         u32 num_syncs, struct async_op_fence *afence,
1841                         bool first_op, bool last_op)
1842 {
1843         struct dma_fence *fence;
1844
1845         xe_vm_assert_held(vm);
1846         xe_bo_assert_held(xe_vma_bo(vma));
1847
1848         fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
1849         if (IS_ERR(fence))
1850                 return PTR_ERR(fence);
1851         if (afence)
1852                 add_async_op_fence_cb(vm, fence, afence);
1853
1854         xe_vma_destroy(vma, fence);
1855         dma_fence_put(fence);
1856
1857         return 0;
1858 }
1859
1860 static int vm_set_error_capture_address(struct xe_device *xe, struct xe_vm *vm,
1861                                         u64 value)
1862 {
1863         if (XE_IOCTL_DBG(xe, !value))
1864                 return -EINVAL;
1865
1866         if (XE_IOCTL_DBG(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
1867                 return -EOPNOTSUPP;
1868
1869         if (XE_IOCTL_DBG(xe, vm->async_ops.error_capture.addr))
1870                 return -EOPNOTSUPP;
1871
1872         vm->async_ops.error_capture.mm = current->mm;
1873         vm->async_ops.error_capture.addr = value;
1874         init_waitqueue_head(&vm->async_ops.error_capture.wq);
1875
1876         return 0;
1877 }
1878
1879 typedef int (*xe_vm_set_property_fn)(struct xe_device *xe, struct xe_vm *vm,
1880                                      u64 value);
1881
1882 static const xe_vm_set_property_fn vm_set_property_funcs[] = {
1883         [XE_VM_PROPERTY_BIND_OP_ERROR_CAPTURE_ADDRESS] =
1884                 vm_set_error_capture_address,
1885 };
1886
1887 static int vm_user_ext_set_property(struct xe_device *xe, struct xe_vm *vm,
1888                                     u64 extension)
1889 {
1890         u64 __user *address = u64_to_user_ptr(extension);
1891         struct drm_xe_ext_vm_set_property ext;
1892         int err;
1893
1894         err = __copy_from_user(&ext, address, sizeof(ext));
1895         if (XE_IOCTL_DBG(xe, err))
1896                 return -EFAULT;
1897
1898         if (XE_IOCTL_DBG(xe, ext.property >=
1899                          ARRAY_SIZE(vm_set_property_funcs)) ||
1900             XE_IOCTL_DBG(xe, ext.pad) ||
1901             XE_IOCTL_DBG(xe, ext.reserved[0] || ext.reserved[1]))
1902                 return -EINVAL;
1903
1904         return vm_set_property_funcs[ext.property](xe, vm, ext.value);
1905 }
1906
1907 typedef int (*xe_vm_user_extension_fn)(struct xe_device *xe, struct xe_vm *vm,
1908                                        u64 extension);
1909
1910 static const xe_vm_set_property_fn vm_user_extension_funcs[] = {
1911         [XE_VM_EXTENSION_SET_PROPERTY] = vm_user_ext_set_property,
1912 };
1913
1914 #define MAX_USER_EXTENSIONS     16
1915 static int vm_user_extensions(struct xe_device *xe, struct xe_vm *vm,
1916                               u64 extensions, int ext_number)
1917 {
1918         u64 __user *address = u64_to_user_ptr(extensions);
1919         struct xe_user_extension ext;
1920         int err;
1921
1922         if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
1923                 return -E2BIG;
1924
1925         err = __copy_from_user(&ext, address, sizeof(ext));
1926         if (XE_IOCTL_DBG(xe, err))
1927                 return -EFAULT;
1928
1929         if (XE_IOCTL_DBG(xe, ext.pad) ||
1930             XE_IOCTL_DBG(xe, ext.name >=
1931                          ARRAY_SIZE(vm_user_extension_funcs)))
1932                 return -EINVAL;
1933
1934         err = vm_user_extension_funcs[ext.name](xe, vm, extensions);
1935         if (XE_IOCTL_DBG(xe, err))
1936                 return err;
1937
1938         if (ext.next_extension)
1939                 return vm_user_extensions(xe, vm, ext.next_extension,
1940                                           ++ext_number);
1941
1942         return 0;
1943 }
1944
1945 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_SCRATCH_PAGE | \
1946                                     DRM_XE_VM_CREATE_COMPUTE_MODE | \
1947                                     DRM_XE_VM_CREATE_ASYNC_BIND_OPS | \
1948                                     DRM_XE_VM_CREATE_FAULT_MODE)
1949
1950 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1951                        struct drm_file *file)
1952 {
1953         struct xe_device *xe = to_xe_device(dev);
1954         struct xe_file *xef = to_xe_file(file);
1955         struct drm_xe_vm_create *args = data;
1956         struct xe_vm *vm;
1957         u32 id, asid;
1958         int err;
1959         u32 flags = 0;
1960
1961         if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
1962                 args->flags |= DRM_XE_VM_CREATE_SCRATCH_PAGE;
1963
1964         if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
1965                          !xe->info.supports_usm))
1966                 return -EINVAL;
1967
1968         if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1969                 return -EINVAL;
1970
1971         if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1972                 return -EINVAL;
1973
1974         if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE &&
1975                          args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
1976                 return -EINVAL;
1977
1978         if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE &&
1979                          args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
1980                 return -EINVAL;
1981
1982         if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
1983                          xe_device_in_non_fault_mode(xe)))
1984                 return -EINVAL;
1985
1986         if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FAULT_MODE) &&
1987                          xe_device_in_fault_mode(xe)))
1988                 return -EINVAL;
1989
1990         if (args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE)
1991                 flags |= XE_VM_FLAG_SCRATCH_PAGE;
1992         if (args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE)
1993                 flags |= XE_VM_FLAG_COMPUTE_MODE;
1994         if (args->flags & DRM_XE_VM_CREATE_ASYNC_BIND_OPS)
1995                 flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
1996         if (args->flags & DRM_XE_VM_CREATE_FAULT_MODE)
1997                 flags |= XE_VM_FLAG_FAULT_MODE;
1998
1999         vm = xe_vm_create(xe, flags);
2000         if (IS_ERR(vm))
2001                 return PTR_ERR(vm);
2002
2003         if (args->extensions) {
2004                 err = vm_user_extensions(xe, vm, args->extensions, 0);
2005                 if (XE_IOCTL_DBG(xe, err)) {
2006                         xe_vm_close_and_put(vm);
2007                         return err;
2008                 }
2009         }
2010
2011         mutex_lock(&xef->vm.lock);
2012         err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
2013         mutex_unlock(&xef->vm.lock);
2014         if (err) {
2015                 xe_vm_close_and_put(vm);
2016                 return err;
2017         }
2018
2019         if (xe->info.has_asid) {
2020                 mutex_lock(&xe->usm.lock);
2021                 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
2022                                       XA_LIMIT(0, XE_MAX_ASID - 1),
2023                                       &xe->usm.next_asid, GFP_KERNEL);
2024                 mutex_unlock(&xe->usm.lock);
2025                 if (err) {
2026                         xe_vm_close_and_put(vm);
2027                         return err;
2028                 }
2029                 vm->usm.asid = asid;
2030         }
2031
2032         args->vm_id = id;
2033
2034 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
2035         /* Warning: Security issue - never enable by default */
2036         args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
2037 #endif
2038
2039         return 0;
2040 }
2041
2042 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
2043                         struct drm_file *file)
2044 {
2045         struct xe_device *xe = to_xe_device(dev);
2046         struct xe_file *xef = to_xe_file(file);
2047         struct drm_xe_vm_destroy *args = data;
2048         struct xe_vm *vm;
2049         int err = 0;
2050
2051         if (XE_IOCTL_DBG(xe, args->pad) ||
2052             XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2053                 return -EINVAL;
2054
2055         mutex_lock(&xef->vm.lock);
2056         vm = xa_load(&xef->vm.xa, args->vm_id);
2057         if (XE_IOCTL_DBG(xe, !vm))
2058                 err = -ENOENT;
2059         else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues))
2060                 err = -EBUSY;
2061         else
2062                 xa_erase(&xef->vm.xa, args->vm_id);
2063         mutex_unlock(&xef->vm.lock);
2064
2065         if (!err)
2066                 xe_vm_close_and_put(vm);
2067
2068         return err;
2069 }
2070
2071 static const u32 region_to_mem_type[] = {
2072         XE_PL_TT,
2073         XE_PL_VRAM0,
2074         XE_PL_VRAM1,
2075 };
2076
2077 static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
2078                           struct xe_exec_queue *q, u32 region,
2079                           struct xe_sync_entry *syncs, u32 num_syncs,
2080                           struct async_op_fence *afence, bool first_op,
2081                           bool last_op)
2082 {
2083         int err;
2084
2085         XE_WARN_ON(region > ARRAY_SIZE(region_to_mem_type));
2086
2087         if (!xe_vma_has_no_bo(vma)) {
2088                 err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
2089                 if (err)
2090                         return err;
2091         }
2092
2093         if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
2094                 return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
2095                                   afence, true, first_op, last_op);
2096         } else {
2097                 int i;
2098
2099                 /* Nothing to do, signal fences now */
2100                 if (last_op) {
2101                         for (i = 0; i < num_syncs; i++)
2102                                 xe_sync_entry_signal(&syncs[i], NULL,
2103                                                      dma_fence_get_stub());
2104                 }
2105                 if (afence)
2106                         dma_fence_signal(&afence->fence);
2107                 return 0;
2108         }
2109 }
2110
2111 #define VM_BIND_OP(op)  (op & 0xffff)
2112
2113 struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm)
2114 {
2115         int idx = vm->flags & XE_VM_FLAG_MIGRATION ?
2116                 XE_VM_FLAG_TILE_ID(vm->flags) : 0;
2117
2118         /* Safe to use index 0 as all BO in the VM share a single dma-resv lock */
2119         return &vm->pt_root[idx]->bo->ttm;
2120 }
2121
2122 static void xe_vm_tv_populate(struct xe_vm *vm, struct ttm_validate_buffer *tv)
2123 {
2124         tv->num_shared = 1;
2125         tv->bo = xe_vm_ttm_bo(vm);
2126 }
2127
2128 static void vm_set_async_error(struct xe_vm *vm, int err)
2129 {
2130         lockdep_assert_held(&vm->lock);
2131         vm->async_ops.error = err;
2132 }
2133
2134 static int vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo,
2135                                     u64 addr, u64 range, u32 op)
2136 {
2137         struct xe_device *xe = vm->xe;
2138         struct xe_vma *vma;
2139         bool async = !!(op & XE_VM_BIND_FLAG_ASYNC);
2140
2141         lockdep_assert_held(&vm->lock);
2142
2143         switch (VM_BIND_OP(op)) {
2144         case XE_VM_BIND_OP_MAP:
2145         case XE_VM_BIND_OP_MAP_USERPTR:
2146                 vma = xe_vm_find_overlapping_vma(vm, addr, range);
2147                 if (XE_IOCTL_DBG(xe, vma && !async))
2148                         return -EBUSY;
2149                 break;
2150         case XE_VM_BIND_OP_UNMAP:
2151         case XE_VM_BIND_OP_PREFETCH:
2152                 vma = xe_vm_find_overlapping_vma(vm, addr, range);
2153                 if (XE_IOCTL_DBG(xe, !vma))
2154                         /* Not an actual error, IOCTL cleans up returns and 0 */
2155                         return -ENODATA;
2156                 if (XE_IOCTL_DBG(xe, (xe_vma_start(vma) != addr ||
2157                                       xe_vma_end(vma) != addr + range) && !async))
2158                         return -EINVAL;
2159                 break;
2160         case XE_VM_BIND_OP_UNMAP_ALL:
2161                 if (XE_IOCTL_DBG(xe, list_empty(&bo->ttm.base.gpuva.list)))
2162                         /* Not an actual error, IOCTL cleans up returns and 0 */
2163                         return -ENODATA;
2164                 break;
2165         default:
2166                 XE_WARN_ON("NOT POSSIBLE");
2167                 return -EINVAL;
2168         }
2169
2170         return 0;
2171 }
2172
2173 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
2174                              bool post_commit)
2175 {
2176         down_read(&vm->userptr.notifier_lock);
2177         vma->gpuva.flags |= XE_VMA_DESTROYED;
2178         up_read(&vm->userptr.notifier_lock);
2179         if (post_commit)
2180                 xe_vm_remove_vma(vm, vma);
2181 }
2182
2183 #undef ULL
2184 #define ULL     unsigned long long
2185
2186 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
2187 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2188 {
2189         struct xe_vma *vma;
2190
2191         switch (op->op) {
2192         case DRM_GPUVA_OP_MAP:
2193                 vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx",
2194                        (ULL)op->map.va.addr, (ULL)op->map.va.range);
2195                 break;
2196         case DRM_GPUVA_OP_REMAP:
2197                 vma = gpuva_to_vma(op->remap.unmap->va);
2198                 vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2199                        (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2200                        op->unmap.keep ? 1 : 0);
2201                 if (op->remap.prev)
2202                         vm_dbg(&xe->drm,
2203                                "REMAP:PREV: addr=0x%016llx, range=0x%016llx",
2204                                (ULL)op->remap.prev->va.addr,
2205                                (ULL)op->remap.prev->va.range);
2206                 if (op->remap.next)
2207                         vm_dbg(&xe->drm,
2208                                "REMAP:NEXT: addr=0x%016llx, range=0x%016llx",
2209                                (ULL)op->remap.next->va.addr,
2210                                (ULL)op->remap.next->va.range);
2211                 break;
2212         case DRM_GPUVA_OP_UNMAP:
2213                 vma = gpuva_to_vma(op->unmap.va);
2214                 vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2215                        (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2216                        op->unmap.keep ? 1 : 0);
2217                 break;
2218         case DRM_GPUVA_OP_PREFETCH:
2219                 vma = gpuva_to_vma(op->prefetch.va);
2220                 vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx",
2221                        (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
2222                 break;
2223         default:
2224                 XE_WARN_ON("NOT POSSIBLE");
2225         }
2226 }
2227 #else
2228 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2229 {
2230 }
2231 #endif
2232
2233 /*
2234  * Create operations list from IOCTL arguments, setup operations fields so parse
2235  * and commit steps are decoupled from IOCTL arguments. This step can fail.
2236  */
2237 static struct drm_gpuva_ops *
2238 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
2239                          u64 bo_offset_or_userptr, u64 addr, u64 range,
2240                          u32 operation, u8 tile_mask, u32 region)
2241 {
2242         struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
2243         struct drm_gpuva_ops *ops;
2244         struct drm_gpuva_op *__op;
2245         struct xe_vma_op *op;
2246         struct drm_gpuvm_bo *vm_bo;
2247         int err;
2248
2249         lockdep_assert_held_write(&vm->lock);
2250
2251         vm_dbg(&vm->xe->drm,
2252                "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
2253                VM_BIND_OP(operation), (ULL)addr, (ULL)range,
2254                (ULL)bo_offset_or_userptr);
2255
2256         switch (VM_BIND_OP(operation)) {
2257         case XE_VM_BIND_OP_MAP:
2258         case XE_VM_BIND_OP_MAP_USERPTR:
2259                 ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
2260                                                   obj, bo_offset_or_userptr);
2261                 if (IS_ERR(ops))
2262                         return ops;
2263
2264                 drm_gpuva_for_each_op(__op, ops) {
2265                         struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2266
2267                         op->tile_mask = tile_mask;
2268                         op->map.immediate =
2269                                 operation & XE_VM_BIND_FLAG_IMMEDIATE;
2270                         op->map.read_only =
2271                                 operation & XE_VM_BIND_FLAG_READONLY;
2272                         op->map.is_null = operation & XE_VM_BIND_FLAG_NULL;
2273                 }
2274                 break;
2275         case XE_VM_BIND_OP_UNMAP:
2276                 ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
2277                 if (IS_ERR(ops))
2278                         return ops;
2279
2280                 drm_gpuva_for_each_op(__op, ops) {
2281                         struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2282
2283                         op->tile_mask = tile_mask;
2284                 }
2285                 break;
2286         case XE_VM_BIND_OP_PREFETCH:
2287                 ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
2288                 if (IS_ERR(ops))
2289                         return ops;
2290
2291                 drm_gpuva_for_each_op(__op, ops) {
2292                         struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2293
2294                         op->tile_mask = tile_mask;
2295                         op->prefetch.region = region;
2296                 }
2297                 break;
2298         case XE_VM_BIND_OP_UNMAP_ALL:
2299                 XE_WARN_ON(!bo);
2300
2301                 err = xe_bo_lock(bo, true);
2302                 if (err)
2303                         return ERR_PTR(err);
2304
2305                 vm_bo = drm_gpuvm_bo_find(&vm->gpuvm, obj);
2306                 if (!vm_bo)
2307                         break;
2308
2309                 ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
2310                 drm_gpuvm_bo_put(vm_bo);
2311                 xe_bo_unlock(bo);
2312                 if (IS_ERR(ops))
2313                         return ops;
2314
2315                 drm_gpuva_for_each_op(__op, ops) {
2316                         struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2317
2318                         op->tile_mask = tile_mask;
2319                 }
2320                 break;
2321         default:
2322                 XE_WARN_ON("NOT POSSIBLE");
2323                 ops = ERR_PTR(-EINVAL);
2324         }
2325
2326 #ifdef TEST_VM_ASYNC_OPS_ERROR
2327         if (operation & FORCE_ASYNC_OP_ERROR) {
2328                 op = list_first_entry_or_null(&ops->list, struct xe_vma_op,
2329                                               base.entry);
2330                 if (op)
2331                         op->inject_error = true;
2332         }
2333 #endif
2334
2335         if (!IS_ERR(ops))
2336                 drm_gpuva_for_each_op(__op, ops)
2337                         print_op(vm->xe, __op);
2338
2339         return ops;
2340 }
2341
2342 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
2343                               u8 tile_mask, bool read_only, bool is_null)
2344 {
2345         struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
2346         struct xe_vma *vma;
2347         int err;
2348
2349         lockdep_assert_held_write(&vm->lock);
2350
2351         if (bo) {
2352                 err = xe_bo_lock(bo, true);
2353                 if (err)
2354                         return ERR_PTR(err);
2355         }
2356         vma = xe_vma_create(vm, bo, op->gem.offset,
2357                             op->va.addr, op->va.addr +
2358                             op->va.range - 1, read_only, is_null,
2359                             tile_mask);
2360         if (bo)
2361                 xe_bo_unlock(bo);
2362
2363         if (xe_vma_is_userptr(vma)) {
2364                 err = xe_vma_userptr_pin_pages(vma);
2365                 if (err) {
2366                         prep_vma_destroy(vm, vma, false);
2367                         xe_vma_destroy_unlocked(vma);
2368                         return ERR_PTR(err);
2369                 }
2370         } else if (!xe_vma_has_no_bo(vma) && !bo->vm) {
2371                 vm_insert_extobj(vm, vma);
2372                 err = add_preempt_fences(vm, bo);
2373                 if (err) {
2374                         prep_vma_destroy(vm, vma, false);
2375                         xe_vma_destroy_unlocked(vma);
2376                         return ERR_PTR(err);
2377                 }
2378         }
2379
2380         return vma;
2381 }
2382
2383 static u64 xe_vma_max_pte_size(struct xe_vma *vma)
2384 {
2385         if (vma->gpuva.flags & XE_VMA_PTE_1G)
2386                 return SZ_1G;
2387         else if (vma->gpuva.flags & XE_VMA_PTE_2M)
2388                 return SZ_2M;
2389
2390         return SZ_4K;
2391 }
2392
2393 static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
2394 {
2395         switch (size) {
2396         case SZ_1G:
2397                 vma->gpuva.flags |= XE_VMA_PTE_1G;
2398                 break;
2399         case SZ_2M:
2400                 vma->gpuva.flags |= XE_VMA_PTE_2M;
2401                 break;
2402         }
2403
2404         return SZ_4K;
2405 }
2406
2407 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
2408 {
2409         int err = 0;
2410
2411         lockdep_assert_held_write(&vm->lock);
2412
2413         switch (op->base.op) {
2414         case DRM_GPUVA_OP_MAP:
2415                 err |= xe_vm_insert_vma(vm, op->map.vma);
2416                 if (!err)
2417                         op->flags |= XE_VMA_OP_COMMITTED;
2418                 break;
2419         case DRM_GPUVA_OP_REMAP:
2420                 prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
2421                                  true);
2422                 op->flags |= XE_VMA_OP_COMMITTED;
2423
2424                 if (op->remap.prev) {
2425                         err |= xe_vm_insert_vma(vm, op->remap.prev);
2426                         if (!err)
2427                                 op->flags |= XE_VMA_OP_PREV_COMMITTED;
2428                         if (!err && op->remap.skip_prev)
2429                                 op->remap.prev = NULL;
2430                 }
2431                 if (op->remap.next) {
2432                         err |= xe_vm_insert_vma(vm, op->remap.next);
2433                         if (!err)
2434                                 op->flags |= XE_VMA_OP_NEXT_COMMITTED;
2435                         if (!err && op->remap.skip_next)
2436                                 op->remap.next = NULL;
2437                 }
2438
2439                 /* Adjust for partial unbind after removin VMA from VM */
2440                 if (!err) {
2441                         op->base.remap.unmap->va->va.addr = op->remap.start;
2442                         op->base.remap.unmap->va->va.range = op->remap.range;
2443                 }
2444                 break;
2445         case DRM_GPUVA_OP_UNMAP:
2446                 prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
2447                 op->flags |= XE_VMA_OP_COMMITTED;
2448                 break;
2449         case DRM_GPUVA_OP_PREFETCH:
2450                 op->flags |= XE_VMA_OP_COMMITTED;
2451                 break;
2452         default:
2453                 XE_WARN_ON("NOT POSSIBLE");
2454         }
2455
2456         return err;
2457 }
2458
2459
2460 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
2461                                    struct drm_gpuva_ops *ops,
2462                                    struct xe_sync_entry *syncs, u32 num_syncs,
2463                                    struct list_head *ops_list, bool last,
2464                                    bool async)
2465 {
2466         struct xe_vma_op *last_op = NULL;
2467         struct async_op_fence *fence = NULL;
2468         struct drm_gpuva_op *__op;
2469         int err = 0;
2470
2471         lockdep_assert_held_write(&vm->lock);
2472
2473         if (last && num_syncs && async) {
2474                 u64 seqno;
2475
2476                 fence = kmalloc(sizeof(*fence), GFP_KERNEL);
2477                 if (!fence)
2478                         return -ENOMEM;
2479
2480                 seqno = q ? ++q->bind.fence_seqno : ++vm->async_ops.fence.seqno;
2481                 dma_fence_init(&fence->fence, &async_op_fence_ops,
2482                                &vm->async_ops.lock, q ? q->bind.fence_ctx :
2483                                vm->async_ops.fence.context, seqno);
2484
2485                 if (!xe_vm_no_dma_fences(vm)) {
2486                         fence->vm = vm;
2487                         fence->started = false;
2488                         init_waitqueue_head(&fence->wq);
2489                 }
2490         }
2491
2492         drm_gpuva_for_each_op(__op, ops) {
2493                 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2494                 bool first = list_empty(ops_list);
2495
2496                 XE_WARN_ON(!first && !async);
2497
2498                 INIT_LIST_HEAD(&op->link);
2499                 list_add_tail(&op->link, ops_list);
2500
2501                 if (first) {
2502                         op->flags |= XE_VMA_OP_FIRST;
2503                         op->num_syncs = num_syncs;
2504                         op->syncs = syncs;
2505                 }
2506
2507                 op->q = q;
2508
2509                 switch (op->base.op) {
2510                 case DRM_GPUVA_OP_MAP:
2511                 {
2512                         struct xe_vma *vma;
2513
2514                         vma = new_vma(vm, &op->base.map,
2515                                       op->tile_mask, op->map.read_only,
2516                                       op->map.is_null);
2517                         if (IS_ERR(vma)) {
2518                                 err = PTR_ERR(vma);
2519                                 goto free_fence;
2520                         }
2521
2522                         op->map.vma = vma;
2523                         break;
2524                 }
2525                 case DRM_GPUVA_OP_REMAP:
2526                 {
2527                         struct xe_vma *old =
2528                                 gpuva_to_vma(op->base.remap.unmap->va);
2529
2530                         op->remap.start = xe_vma_start(old);
2531                         op->remap.range = xe_vma_size(old);
2532
2533                         if (op->base.remap.prev) {
2534                                 struct xe_vma *vma;
2535                                 bool read_only =
2536                                         op->base.remap.unmap->va->flags &
2537                                         XE_VMA_READ_ONLY;
2538                                 bool is_null =
2539                                         op->base.remap.unmap->va->flags &
2540                                         DRM_GPUVA_SPARSE;
2541
2542                                 vma = new_vma(vm, op->base.remap.prev,
2543                                               op->tile_mask, read_only,
2544                                               is_null);
2545                                 if (IS_ERR(vma)) {
2546                                         err = PTR_ERR(vma);
2547                                         goto free_fence;
2548                                 }
2549
2550                                 op->remap.prev = vma;
2551
2552                                 /*
2553                                  * Userptr creates a new SG mapping so
2554                                  * we must also rebind.
2555                                  */
2556                                 op->remap.skip_prev = !xe_vma_is_userptr(old) &&
2557                                         IS_ALIGNED(xe_vma_end(vma),
2558                                                    xe_vma_max_pte_size(old));
2559                                 if (op->remap.skip_prev) {
2560                                         xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2561                                         op->remap.range -=
2562                                                 xe_vma_end(vma) -
2563                                                 xe_vma_start(old);
2564                                         op->remap.start = xe_vma_end(vma);
2565                                 }
2566                         }
2567
2568                         if (op->base.remap.next) {
2569                                 struct xe_vma *vma;
2570                                 bool read_only =
2571                                         op->base.remap.unmap->va->flags &
2572                                         XE_VMA_READ_ONLY;
2573
2574                                 bool is_null =
2575                                         op->base.remap.unmap->va->flags &
2576                                         DRM_GPUVA_SPARSE;
2577
2578                                 vma = new_vma(vm, op->base.remap.next,
2579                                               op->tile_mask, read_only,
2580                                               is_null);
2581                                 if (IS_ERR(vma)) {
2582                                         err = PTR_ERR(vma);
2583                                         goto free_fence;
2584                                 }
2585
2586                                 op->remap.next = vma;
2587
2588                                 /*
2589                                  * Userptr creates a new SG mapping so
2590                                  * we must also rebind.
2591                                  */
2592                                 op->remap.skip_next = !xe_vma_is_userptr(old) &&
2593                                         IS_ALIGNED(xe_vma_start(vma),
2594                                                    xe_vma_max_pte_size(old));
2595                                 if (op->remap.skip_next) {
2596                                         xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2597                                         op->remap.range -=
2598                                                 xe_vma_end(old) -
2599                                                 xe_vma_start(vma);
2600                                 }
2601                         }
2602                         break;
2603                 }
2604                 case DRM_GPUVA_OP_UNMAP:
2605                 case DRM_GPUVA_OP_PREFETCH:
2606                         /* Nothing to do */
2607                         break;
2608                 default:
2609                         XE_WARN_ON("NOT POSSIBLE");
2610                 }
2611
2612                 last_op = op;
2613
2614                 err = xe_vma_op_commit(vm, op);
2615                 if (err)
2616                         goto free_fence;
2617         }
2618
2619         /* FIXME: Unhandled corner case */
2620         XE_WARN_ON(!last_op && last && !list_empty(ops_list));
2621
2622         if (!last_op)
2623                 goto free_fence;
2624         last_op->ops = ops;
2625         if (last) {
2626                 last_op->flags |= XE_VMA_OP_LAST;
2627                 last_op->num_syncs = num_syncs;
2628                 last_op->syncs = syncs;
2629                 last_op->fence = fence;
2630         }
2631
2632         return 0;
2633
2634 free_fence:
2635         kfree(fence);
2636         return err;
2637 }
2638
2639 static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
2640                                struct xe_vma_op *op)
2641 {
2642         LIST_HEAD(objs);
2643         LIST_HEAD(dups);
2644         struct ttm_validate_buffer tv_bo, tv_vm;
2645         struct ww_acquire_ctx ww;
2646         struct xe_bo *vbo;
2647         int err;
2648
2649         lockdep_assert_held_write(&vm->lock);
2650
2651         xe_vm_tv_populate(vm, &tv_vm);
2652         list_add_tail(&tv_vm.head, &objs);
2653         vbo = xe_vma_bo(vma);
2654         if (vbo) {
2655                 /*
2656                  * An unbind can drop the last reference to the BO and
2657                  * the BO is needed for ttm_eu_backoff_reservation so
2658                  * take a reference here.
2659                  */
2660                 xe_bo_get(vbo);
2661
2662                 if (!vbo->vm) {
2663                         tv_bo.bo = &vbo->ttm;
2664                         tv_bo.num_shared = 1;
2665                         list_add(&tv_bo.head, &objs);
2666                 }
2667         }
2668
2669 again:
2670         err = ttm_eu_reserve_buffers(&ww, &objs, true, &dups);
2671         if (err) {
2672                 xe_bo_put(vbo);
2673                 return err;
2674         }
2675
2676         xe_vm_assert_held(vm);
2677         xe_bo_assert_held(xe_vma_bo(vma));
2678
2679         switch (op->base.op) {
2680         case DRM_GPUVA_OP_MAP:
2681                 err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
2682                                  op->syncs, op->num_syncs, op->fence,
2683                                  op->map.immediate || !xe_vm_in_fault_mode(vm),
2684                                  op->flags & XE_VMA_OP_FIRST,
2685                                  op->flags & XE_VMA_OP_LAST);
2686                 break;
2687         case DRM_GPUVA_OP_REMAP:
2688         {
2689                 bool prev = !!op->remap.prev;
2690                 bool next = !!op->remap.next;
2691
2692                 if (!op->remap.unmap_done) {
2693                         if (prev || next) {
2694                                 vm->async_ops.munmap_rebind_inflight = true;
2695                                 vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
2696                         }
2697                         err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2698                                            op->num_syncs,
2699                                            !prev && !next ? op->fence : NULL,
2700                                            op->flags & XE_VMA_OP_FIRST,
2701                                            op->flags & XE_VMA_OP_LAST && !prev &&
2702                                            !next);
2703                         if (err)
2704                                 break;
2705                         op->remap.unmap_done = true;
2706                 }
2707
2708                 if (prev) {
2709                         op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
2710                         err = xe_vm_bind(vm, op->remap.prev, op->q,
2711                                          xe_vma_bo(op->remap.prev), op->syncs,
2712                                          op->num_syncs,
2713                                          !next ? op->fence : NULL, true, false,
2714                                          op->flags & XE_VMA_OP_LAST && !next);
2715                         op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2716                         if (err)
2717                                 break;
2718                         op->remap.prev = NULL;
2719                 }
2720
2721                 if (next) {
2722                         op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
2723                         err = xe_vm_bind(vm, op->remap.next, op->q,
2724                                          xe_vma_bo(op->remap.next),
2725                                          op->syncs, op->num_syncs,
2726                                          op->fence, true, false,
2727                                          op->flags & XE_VMA_OP_LAST);
2728                         op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2729                         if (err)
2730                                 break;
2731                         op->remap.next = NULL;
2732                 }
2733                 vm->async_ops.munmap_rebind_inflight = false;
2734
2735                 break;
2736         }
2737         case DRM_GPUVA_OP_UNMAP:
2738                 err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2739                                    op->num_syncs, op->fence,
2740                                    op->flags & XE_VMA_OP_FIRST,
2741                                    op->flags & XE_VMA_OP_LAST);
2742                 break;
2743         case DRM_GPUVA_OP_PREFETCH:
2744                 err = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region,
2745                                      op->syncs, op->num_syncs, op->fence,
2746                                      op->flags & XE_VMA_OP_FIRST,
2747                                      op->flags & XE_VMA_OP_LAST);
2748                 break;
2749         default:
2750                 XE_WARN_ON("NOT POSSIBLE");
2751         }
2752
2753         ttm_eu_backoff_reservation(&ww, &objs);
2754         if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
2755                 lockdep_assert_held_write(&vm->lock);
2756                 err = xe_vma_userptr_pin_pages(vma);
2757                 if (!err)
2758                         goto again;
2759         }
2760         xe_bo_put(vbo);
2761
2762         if (err)
2763                 trace_xe_vma_fail(vma);
2764
2765         return err;
2766 }
2767
2768 static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
2769 {
2770         int ret = 0;
2771
2772         lockdep_assert_held_write(&vm->lock);
2773
2774 #ifdef TEST_VM_ASYNC_OPS_ERROR
2775         if (op->inject_error) {
2776                 op->inject_error = false;
2777                 return -ENOMEM;
2778         }
2779 #endif
2780
2781         switch (op->base.op) {
2782         case DRM_GPUVA_OP_MAP:
2783                 ret = __xe_vma_op_execute(vm, op->map.vma, op);
2784                 break;
2785         case DRM_GPUVA_OP_REMAP:
2786         {
2787                 struct xe_vma *vma;
2788
2789                 if (!op->remap.unmap_done)
2790                         vma = gpuva_to_vma(op->base.remap.unmap->va);
2791                 else if (op->remap.prev)
2792                         vma = op->remap.prev;
2793                 else
2794                         vma = op->remap.next;
2795
2796                 ret = __xe_vma_op_execute(vm, vma, op);
2797                 break;
2798         }
2799         case DRM_GPUVA_OP_UNMAP:
2800                 ret = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
2801                                           op);
2802                 break;
2803         case DRM_GPUVA_OP_PREFETCH:
2804                 ret = __xe_vma_op_execute(vm,
2805                                           gpuva_to_vma(op->base.prefetch.va),
2806                                           op);
2807                 break;
2808         default:
2809                 XE_WARN_ON("NOT POSSIBLE");
2810         }
2811
2812         return ret;
2813 }
2814
2815 static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op)
2816 {
2817         bool last = op->flags & XE_VMA_OP_LAST;
2818
2819         if (last) {
2820                 while (op->num_syncs--)
2821                         xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
2822                 kfree(op->syncs);
2823                 if (op->q)
2824                         xe_exec_queue_put(op->q);
2825                 if (op->fence)
2826                         dma_fence_put(&op->fence->fence);
2827         }
2828         if (!list_empty(&op->link)) {
2829                 spin_lock_irq(&vm->async_ops.lock);
2830                 list_del(&op->link);
2831                 spin_unlock_irq(&vm->async_ops.lock);
2832         }
2833         if (op->ops)
2834                 drm_gpuva_ops_free(&vm->gpuvm, op->ops);
2835         if (last)
2836                 xe_vm_put(vm);
2837 }
2838
2839 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
2840                              bool post_commit, bool prev_post_commit,
2841                              bool next_post_commit)
2842 {
2843         lockdep_assert_held_write(&vm->lock);
2844
2845         switch (op->base.op) {
2846         case DRM_GPUVA_OP_MAP:
2847                 if (op->map.vma) {
2848                         prep_vma_destroy(vm, op->map.vma, post_commit);
2849                         xe_vma_destroy_unlocked(op->map.vma);
2850                 }
2851                 break;
2852         case DRM_GPUVA_OP_UNMAP:
2853         {
2854                 struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2855
2856                 if (vma) {
2857                         down_read(&vm->userptr.notifier_lock);
2858                         vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2859                         up_read(&vm->userptr.notifier_lock);
2860                         if (post_commit)
2861                                 xe_vm_insert_vma(vm, vma);
2862                 }
2863                 break;
2864         }
2865         case DRM_GPUVA_OP_REMAP:
2866         {
2867                 struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
2868
2869                 if (op->remap.prev) {
2870                         prep_vma_destroy(vm, op->remap.prev, prev_post_commit);
2871                         xe_vma_destroy_unlocked(op->remap.prev);
2872                 }
2873                 if (op->remap.next) {
2874                         prep_vma_destroy(vm, op->remap.next, next_post_commit);
2875                         xe_vma_destroy_unlocked(op->remap.next);
2876                 }
2877                 if (vma) {
2878                         down_read(&vm->userptr.notifier_lock);
2879                         vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2880                         up_read(&vm->userptr.notifier_lock);
2881                         if (post_commit)
2882                                 xe_vm_insert_vma(vm, vma);
2883                 }
2884                 break;
2885         }
2886         case DRM_GPUVA_OP_PREFETCH:
2887                 /* Nothing to do */
2888                 break;
2889         default:
2890                 XE_WARN_ON("NOT POSSIBLE");
2891         }
2892 }
2893
2894 static struct xe_vma_op *next_vma_op(struct xe_vm *vm)
2895 {
2896         return list_first_entry_or_null(&vm->async_ops.pending,
2897                                         struct xe_vma_op, link);
2898 }
2899
2900 static void xe_vma_op_work_func(struct work_struct *w)
2901 {
2902         struct xe_vm *vm = container_of(w, struct xe_vm, async_ops.work);
2903
2904         for (;;) {
2905                 struct xe_vma_op *op;
2906                 int err;
2907
2908                 if (vm->async_ops.error && !xe_vm_is_closed(vm))
2909                         break;
2910
2911                 spin_lock_irq(&vm->async_ops.lock);
2912                 op = next_vma_op(vm);
2913                 spin_unlock_irq(&vm->async_ops.lock);
2914
2915                 if (!op)
2916                         break;
2917
2918                 if (!xe_vm_is_closed(vm)) {
2919                         down_write(&vm->lock);
2920                         err = xe_vma_op_execute(vm, op);
2921                         if (err) {
2922                                 drm_warn(&vm->xe->drm,
2923                                          "Async VM op(%d) failed with %d",
2924                                          op->base.op, err);
2925                                 vm_set_async_error(vm, err);
2926                                 up_write(&vm->lock);
2927
2928                                 if (vm->async_ops.error_capture.addr)
2929                                         vm_error_capture(vm, err, 0, 0, 0);
2930                                 break;
2931                         }
2932                         up_write(&vm->lock);
2933                 } else {
2934                         struct xe_vma *vma;
2935
2936                         switch (op->base.op) {
2937                         case DRM_GPUVA_OP_REMAP:
2938                                 vma = gpuva_to_vma(op->base.remap.unmap->va);
2939                                 trace_xe_vma_flush(vma);
2940
2941                                 down_write(&vm->lock);
2942                                 xe_vma_destroy_unlocked(vma);
2943                                 up_write(&vm->lock);
2944                                 break;
2945                         case DRM_GPUVA_OP_UNMAP:
2946                                 vma = gpuva_to_vma(op->base.unmap.va);
2947                                 trace_xe_vma_flush(vma);
2948
2949                                 down_write(&vm->lock);
2950                                 xe_vma_destroy_unlocked(vma);
2951                                 up_write(&vm->lock);
2952                                 break;
2953                         default:
2954                                 /* Nothing to do */
2955                                 break;
2956                         }
2957
2958                         if (op->fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
2959                                                    &op->fence->fence.flags)) {
2960                                 if (!xe_vm_no_dma_fences(vm)) {
2961                                         op->fence->started = true;
2962                                         wake_up_all(&op->fence->wq);
2963                                 }
2964                                 dma_fence_signal(&op->fence->fence);
2965                         }
2966                 }
2967
2968                 xe_vma_op_cleanup(vm, op);
2969         }
2970 }
2971
2972 static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
2973                                      struct list_head *ops_list, bool async)
2974 {
2975         struct xe_vma_op *op, *last_op, *next;
2976         int err;
2977
2978         lockdep_assert_held_write(&vm->lock);
2979
2980         list_for_each_entry(op, ops_list, link)
2981                 last_op = op;
2982
2983         if (!async) {
2984                 err = xe_vma_op_execute(vm, last_op);
2985                 if (err)
2986                         goto unwind;
2987                 xe_vma_op_cleanup(vm, last_op);
2988         } else {
2989                 int i;
2990                 bool installed = false;
2991
2992                 for (i = 0; i < last_op->num_syncs; i++)
2993                         installed |= xe_sync_entry_signal(&last_op->syncs[i],
2994                                                           NULL,
2995                                                           &last_op->fence->fence);
2996                 if (!installed && last_op->fence)
2997                         dma_fence_signal(&last_op->fence->fence);
2998
2999                 spin_lock_irq(&vm->async_ops.lock);
3000                 list_splice_tail(ops_list, &vm->async_ops.pending);
3001                 spin_unlock_irq(&vm->async_ops.lock);
3002
3003                 if (!vm->async_ops.error)
3004                         queue_work(system_unbound_wq, &vm->async_ops.work);
3005         }
3006
3007         return 0;
3008
3009 unwind:
3010         list_for_each_entry_reverse(op, ops_list, link)
3011                 xe_vma_op_unwind(vm, op, op->flags & XE_VMA_OP_COMMITTED,
3012                                  op->flags & XE_VMA_OP_PREV_COMMITTED,
3013                                  op->flags & XE_VMA_OP_NEXT_COMMITTED);
3014         list_for_each_entry_safe(op, next, ops_list, link)
3015                 xe_vma_op_cleanup(vm, op);
3016
3017         return err;
3018 }
3019
3020 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
3021                                      struct drm_gpuva_ops **ops,
3022                                      int num_ops_list)
3023 {
3024         int i;
3025
3026         for (i = num_ops_list - 1; i; ++i) {
3027                 struct drm_gpuva_ops *__ops = ops[i];
3028                 struct drm_gpuva_op *__op;
3029
3030                 if (!__ops)
3031                         continue;
3032
3033                 drm_gpuva_for_each_op_reverse(__op, __ops) {
3034                         struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
3035
3036                         xe_vma_op_unwind(vm, op,
3037                                          op->flags & XE_VMA_OP_COMMITTED,
3038                                          op->flags & XE_VMA_OP_PREV_COMMITTED,
3039                                          op->flags & XE_VMA_OP_NEXT_COMMITTED);
3040                 }
3041
3042                 drm_gpuva_ops_free(&vm->gpuvm, __ops);
3043         }
3044 }
3045
3046 #ifdef TEST_VM_ASYNC_OPS_ERROR
3047 #define SUPPORTED_FLAGS \
3048         (FORCE_ASYNC_OP_ERROR | XE_VM_BIND_FLAG_ASYNC | \
3049          XE_VM_BIND_FLAG_READONLY | XE_VM_BIND_FLAG_IMMEDIATE | \
3050          XE_VM_BIND_FLAG_NULL | 0xffff)
3051 #else
3052 #define SUPPORTED_FLAGS \
3053         (XE_VM_BIND_FLAG_ASYNC | XE_VM_BIND_FLAG_READONLY | \
3054          XE_VM_BIND_FLAG_IMMEDIATE | XE_VM_BIND_FLAG_NULL | 0xffff)
3055 #endif
3056 #define XE_64K_PAGE_MASK 0xffffull
3057
3058 #define MAX_BINDS       512     /* FIXME: Picking random upper limit */
3059
3060 static int vm_bind_ioctl_check_args(struct xe_device *xe,
3061                                     struct drm_xe_vm_bind *args,
3062                                     struct drm_xe_vm_bind_op **bind_ops,
3063                                     bool *async)
3064 {
3065         int err;
3066         int i;
3067
3068         if (XE_IOCTL_DBG(xe, args->extensions) ||
3069             XE_IOCTL_DBG(xe, !args->num_binds) ||
3070             XE_IOCTL_DBG(xe, args->num_binds > MAX_BINDS))
3071                 return -EINVAL;
3072
3073         if (args->num_binds > 1) {
3074                 u64 __user *bind_user =
3075                         u64_to_user_ptr(args->vector_of_binds);
3076
3077                 *bind_ops = kmalloc(sizeof(struct drm_xe_vm_bind_op) *
3078                                     args->num_binds, GFP_KERNEL);
3079                 if (!*bind_ops)
3080                         return -ENOMEM;
3081
3082                 err = __copy_from_user(*bind_ops, bind_user,
3083                                        sizeof(struct drm_xe_vm_bind_op) *
3084                                        args->num_binds);
3085                 if (XE_IOCTL_DBG(xe, err)) {
3086                         err = -EFAULT;
3087                         goto free_bind_ops;
3088                 }
3089         } else {
3090                 *bind_ops = &args->bind;
3091         }
3092
3093         for (i = 0; i < args->num_binds; ++i) {
3094                 u64 range = (*bind_ops)[i].range;
3095                 u64 addr = (*bind_ops)[i].addr;
3096                 u32 op = (*bind_ops)[i].op;
3097                 u32 obj = (*bind_ops)[i].obj;
3098                 u64 obj_offset = (*bind_ops)[i].obj_offset;
3099                 u32 region = (*bind_ops)[i].region;
3100                 bool is_null = op & XE_VM_BIND_FLAG_NULL;
3101
3102                 if (i == 0) {
3103                         *async = !!(op & XE_VM_BIND_FLAG_ASYNC);
3104                 } else if (XE_IOCTL_DBG(xe, !*async) ||
3105                            XE_IOCTL_DBG(xe, !(op & XE_VM_BIND_FLAG_ASYNC)) ||
3106                            XE_IOCTL_DBG(xe, VM_BIND_OP(op) ==
3107                                         XE_VM_BIND_OP_RESTART)) {
3108                         err = -EINVAL;
3109                         goto free_bind_ops;
3110                 }
3111
3112                 if (XE_IOCTL_DBG(xe, !*async &&
3113                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL)) {
3114                         err = -EINVAL;
3115                         goto free_bind_ops;
3116                 }
3117
3118                 if (XE_IOCTL_DBG(xe, !*async &&
3119                                  VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH)) {
3120                         err = -EINVAL;
3121                         goto free_bind_ops;
3122                 }
3123
3124                 if (XE_IOCTL_DBG(xe, VM_BIND_OP(op) >
3125                                  XE_VM_BIND_OP_PREFETCH) ||
3126                     XE_IOCTL_DBG(xe, op & ~SUPPORTED_FLAGS) ||
3127                     XE_IOCTL_DBG(xe, obj && is_null) ||
3128                     XE_IOCTL_DBG(xe, obj_offset && is_null) ||
3129                     XE_IOCTL_DBG(xe, VM_BIND_OP(op) != XE_VM_BIND_OP_MAP &&
3130                                  is_null) ||
3131                     XE_IOCTL_DBG(xe, !obj &&
3132                                  VM_BIND_OP(op) == XE_VM_BIND_OP_MAP &&
3133                                  !is_null) ||
3134                     XE_IOCTL_DBG(xe, !obj &&
3135                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
3136                     XE_IOCTL_DBG(xe, addr &&
3137                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
3138                     XE_IOCTL_DBG(xe, range &&
3139                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
3140                     XE_IOCTL_DBG(xe, obj &&
3141                                  VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR) ||
3142                     XE_IOCTL_DBG(xe, obj &&
3143                                  VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH) ||
3144                     XE_IOCTL_DBG(xe, region &&
3145                                  VM_BIND_OP(op) != XE_VM_BIND_OP_PREFETCH) ||
3146                     XE_IOCTL_DBG(xe, !(BIT(region) &
3147                                        xe->info.mem_region_mask)) ||
3148                     XE_IOCTL_DBG(xe, obj &&
3149                                  VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP)) {
3150                         err = -EINVAL;
3151                         goto free_bind_ops;
3152                 }
3153
3154                 if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
3155                     XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
3156                     XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
3157                     XE_IOCTL_DBG(xe, !range && VM_BIND_OP(op) !=
3158                                  XE_VM_BIND_OP_RESTART &&
3159                                  VM_BIND_OP(op) != XE_VM_BIND_OP_UNMAP_ALL)) {
3160                         err = -EINVAL;
3161                         goto free_bind_ops;
3162                 }
3163         }
3164
3165         return 0;
3166
3167 free_bind_ops:
3168         if (args->num_binds > 1)
3169                 kfree(*bind_ops);
3170         return err;
3171 }
3172
3173 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3174 {
3175         struct xe_device *xe = to_xe_device(dev);
3176         struct xe_file *xef = to_xe_file(file);
3177         struct drm_xe_vm_bind *args = data;
3178         struct drm_xe_sync __user *syncs_user;
3179         struct xe_bo **bos = NULL;
3180         struct drm_gpuva_ops **ops = NULL;
3181         struct xe_vm *vm;
3182         struct xe_exec_queue *q = NULL;
3183         u32 num_syncs;
3184         struct xe_sync_entry *syncs = NULL;
3185         struct drm_xe_vm_bind_op *bind_ops;
3186         LIST_HEAD(ops_list);
3187         bool async;
3188         int err;
3189         int i;
3190
3191         err = vm_bind_ioctl_check_args(xe, args, &bind_ops, &async);
3192         if (err)
3193                 return err;
3194
3195         if (args->exec_queue_id) {
3196                 q = xe_exec_queue_lookup(xef, args->exec_queue_id);
3197                 if (XE_IOCTL_DBG(xe, !q)) {
3198                         err = -ENOENT;
3199                         goto free_objs;
3200                 }
3201
3202                 if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
3203                         err = -EINVAL;
3204                         goto put_exec_queue;
3205                 }
3206         }
3207
3208         vm = xe_vm_lookup(xef, args->vm_id);
3209         if (XE_IOCTL_DBG(xe, !vm)) {
3210                 err = -EINVAL;
3211                 goto put_exec_queue;
3212         }
3213
3214         err = down_write_killable(&vm->lock);
3215         if (err)
3216                 goto put_vm;
3217
3218         if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
3219                 err = -ENOENT;
3220                 goto release_vm_lock;
3221         }
3222
3223         if (VM_BIND_OP(bind_ops[0].op) == XE_VM_BIND_OP_RESTART) {
3224                 if (XE_IOCTL_DBG(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
3225                         err = -EOPNOTSUPP;
3226                 if (XE_IOCTL_DBG(xe, !err && args->num_syncs))
3227                         err = EINVAL;
3228                 if (XE_IOCTL_DBG(xe, !err && !vm->async_ops.error))
3229                         err = -EPROTO;
3230
3231                 if (!err) {
3232                         trace_xe_vm_restart(vm);
3233                         vm_set_async_error(vm, 0);
3234
3235                         queue_work(system_unbound_wq, &vm->async_ops.work);
3236
3237                         /* Rebinds may have been blocked, give worker a kick */
3238                         if (xe_vm_in_compute_mode(vm))
3239                                 xe_vm_queue_rebind_worker(vm);
3240                 }
3241
3242                 goto release_vm_lock;
3243         }
3244
3245         if (XE_IOCTL_DBG(xe, !vm->async_ops.error &&
3246                          async != !!(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS))) {
3247                 err = -EOPNOTSUPP;
3248                 goto release_vm_lock;
3249         }
3250
3251         for (i = 0; i < args->num_binds; ++i) {
3252                 u64 range = bind_ops[i].range;
3253                 u64 addr = bind_ops[i].addr;
3254
3255                 if (XE_IOCTL_DBG(xe, range > vm->size) ||
3256                     XE_IOCTL_DBG(xe, addr > vm->size - range)) {
3257                         err = -EINVAL;
3258                         goto release_vm_lock;
3259                 }
3260
3261                 if (bind_ops[i].tile_mask) {
3262                         u64 valid_tiles = BIT(xe->info.tile_count) - 1;
3263
3264                         if (XE_IOCTL_DBG(xe, bind_ops[i].tile_mask &
3265                                          ~valid_tiles)) {
3266                                 err = -EINVAL;
3267                                 goto release_vm_lock;
3268                         }
3269                 }
3270         }
3271
3272         bos = kzalloc(sizeof(*bos) * args->num_binds, GFP_KERNEL);
3273         if (!bos) {
3274                 err = -ENOMEM;
3275                 goto release_vm_lock;
3276         }
3277
3278         ops = kzalloc(sizeof(*ops) * args->num_binds, GFP_KERNEL);
3279         if (!ops) {
3280                 err = -ENOMEM;
3281                 goto release_vm_lock;
3282         }
3283
3284         for (i = 0; i < args->num_binds; ++i) {
3285                 struct drm_gem_object *gem_obj;
3286                 u64 range = bind_ops[i].range;
3287                 u64 addr = bind_ops[i].addr;
3288                 u32 obj = bind_ops[i].obj;
3289                 u64 obj_offset = bind_ops[i].obj_offset;
3290
3291                 if (!obj)
3292                         continue;
3293
3294                 gem_obj = drm_gem_object_lookup(file, obj);
3295                 if (XE_IOCTL_DBG(xe, !gem_obj)) {
3296                         err = -ENOENT;
3297                         goto put_obj;
3298                 }
3299                 bos[i] = gem_to_xe_bo(gem_obj);
3300
3301                 if (XE_IOCTL_DBG(xe, range > bos[i]->size) ||
3302                     XE_IOCTL_DBG(xe, obj_offset >
3303                                  bos[i]->size - range)) {
3304                         err = -EINVAL;
3305                         goto put_obj;
3306                 }
3307
3308                 if (bos[i]->flags & XE_BO_INTERNAL_64K) {
3309                         if (XE_IOCTL_DBG(xe, obj_offset &
3310                                          XE_64K_PAGE_MASK) ||
3311                             XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
3312                             XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
3313                                 err = -EINVAL;
3314                                 goto put_obj;
3315                         }
3316                 }
3317         }
3318
3319         if (args->num_syncs) {
3320                 syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3321                 if (!syncs) {
3322                         err = -ENOMEM;
3323                         goto put_obj;
3324                 }
3325         }
3326
3327         syncs_user = u64_to_user_ptr(args->syncs);
3328         for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3329                 err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3330                                           &syncs_user[num_syncs], false,
3331                                           xe_vm_no_dma_fences(vm));
3332                 if (err)
3333                         goto free_syncs;
3334         }
3335
3336         /* Do some error checking first to make the unwind easier */
3337         for (i = 0; i < args->num_binds; ++i) {
3338                 u64 range = bind_ops[i].range;
3339                 u64 addr = bind_ops[i].addr;
3340                 u32 op = bind_ops[i].op;
3341
3342                 err = vm_bind_ioctl_lookup_vma(vm, bos[i], addr, range, op);
3343                 if (err)
3344                         goto free_syncs;
3345         }
3346
3347         for (i = 0; i < args->num_binds; ++i) {
3348                 u64 range = bind_ops[i].range;
3349                 u64 addr = bind_ops[i].addr;
3350                 u32 op = bind_ops[i].op;
3351                 u64 obj_offset = bind_ops[i].obj_offset;
3352                 u8 tile_mask = bind_ops[i].tile_mask;
3353                 u32 region = bind_ops[i].region;
3354
3355                 ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
3356                                                   addr, range, op, tile_mask,
3357                                                   region);
3358                 if (IS_ERR(ops[i])) {
3359                         err = PTR_ERR(ops[i]);
3360                         ops[i] = NULL;
3361                         goto unwind_ops;
3362                 }
3363
3364                 err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
3365                                               &ops_list,
3366                                               i == args->num_binds - 1,
3367                                               async);
3368                 if (err)
3369                         goto unwind_ops;
3370         }
3371
3372         /* Nothing to do */
3373         if (list_empty(&ops_list)) {
3374                 err = -ENODATA;
3375                 goto unwind_ops;
3376         }
3377
3378         err = vm_bind_ioctl_ops_execute(vm, &ops_list, async);
3379         up_write(&vm->lock);
3380
3381         for (i = 0; i < args->num_binds; ++i)
3382                 xe_bo_put(bos[i]);
3383
3384         kfree(bos);
3385         kfree(ops);
3386         if (args->num_binds > 1)
3387                 kfree(bind_ops);
3388
3389         return err;
3390
3391 unwind_ops:
3392         vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
3393 free_syncs:
3394         for (i = 0; err == -ENODATA && i < num_syncs; i++)
3395                 xe_sync_entry_signal(&syncs[i], NULL, dma_fence_get_stub());
3396         while (num_syncs--)
3397                 xe_sync_entry_cleanup(&syncs[num_syncs]);
3398
3399         kfree(syncs);
3400 put_obj:
3401         for (i = 0; i < args->num_binds; ++i)
3402                 xe_bo_put(bos[i]);
3403 release_vm_lock:
3404         up_write(&vm->lock);
3405 put_vm:
3406         xe_vm_put(vm);
3407 put_exec_queue:
3408         if (q)
3409                 xe_exec_queue_put(q);
3410 free_objs:
3411         kfree(bos);
3412         kfree(ops);
3413         if (args->num_binds > 1)
3414                 kfree(bind_ops);
3415         return err == -ENODATA ? 0 : err;
3416 }
3417
3418 /**
3419  * xe_vm_lock() - Lock the vm's dma_resv object
3420  * @vm: The struct xe_vm whose lock is to be locked
3421  * @intr: Whether to perform any wait interruptible
3422  *
3423  * Return: 0 on success, -EINTR if @intr is true and the wait for a
3424  * contended lock was interrupted. If @intr is false, the function
3425  * always returns 0.
3426  */
3427 int xe_vm_lock(struct xe_vm *vm, bool intr)
3428 {
3429         if (intr)
3430                 return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
3431
3432         return dma_resv_lock(xe_vm_resv(vm), NULL);
3433 }
3434
3435 /**
3436  * xe_vm_unlock() - Unlock the vm's dma_resv object
3437  * @vm: The struct xe_vm whose lock is to be released.
3438  *
3439  * Unlock a buffer object lock that was locked by xe_vm_lock().
3440  */
3441 void xe_vm_unlock(struct xe_vm *vm)
3442 {
3443         dma_resv_unlock(xe_vm_resv(vm));
3444 }
3445
3446 /**
3447  * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3448  * @vma: VMA to invalidate
3449  *
3450  * Walks a list of page tables leaves which it memset the entries owned by this
3451  * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3452  * complete.
3453  *
3454  * Returns 0 for success, negative error code otherwise.
3455  */
3456 int xe_vm_invalidate_vma(struct xe_vma *vma)
3457 {
3458         struct xe_device *xe = xe_vma_vm(vma)->xe;
3459         struct xe_tile *tile;
3460         u32 tile_needs_invalidate = 0;
3461         int seqno[XE_MAX_TILES_PER_DEVICE];
3462         u8 id;
3463         int ret;
3464
3465         XE_WARN_ON(!xe_vm_in_fault_mode(xe_vma_vm(vma)));
3466         XE_WARN_ON(xe_vma_is_null(vma));
3467         trace_xe_vma_usm_invalidate(vma);
3468
3469         /* Check that we don't race with page-table updates */
3470         if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3471                 if (xe_vma_is_userptr(vma)) {
3472                         WARN_ON_ONCE(!mmu_interval_check_retry
3473                                      (&vma->userptr.notifier,
3474                                       vma->userptr.notifier_seq));
3475                         WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
3476                                                              DMA_RESV_USAGE_BOOKKEEP));
3477
3478                 } else {
3479                         xe_bo_assert_held(xe_vma_bo(vma));
3480                 }
3481         }
3482
3483         for_each_tile(tile, xe, id) {
3484                 if (xe_pt_zap_ptes(tile, vma)) {
3485                         tile_needs_invalidate |= BIT(id);
3486                         xe_device_wmb(xe);
3487                         /*
3488                          * FIXME: We potentially need to invalidate multiple
3489                          * GTs within the tile
3490                          */
3491                         seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
3492                         if (seqno[id] < 0)
3493                                 return seqno[id];
3494                 }
3495         }
3496
3497         for_each_tile(tile, xe, id) {
3498                 if (tile_needs_invalidate & BIT(id)) {
3499                         ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
3500                         if (ret < 0)
3501                                 return ret;
3502                 }
3503         }
3504
3505         vma->usm.tile_invalidated = vma->tile_mask;
3506
3507         return 0;
3508 }
3509
3510 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
3511 {
3512         struct drm_gpuva *gpuva;
3513         bool is_vram;
3514         uint64_t addr;
3515
3516         if (!down_read_trylock(&vm->lock)) {
3517                 drm_printf(p, " Failed to acquire VM lock to dump capture");
3518                 return 0;
3519         }
3520         if (vm->pt_root[gt_id]) {
3521                 addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE);
3522                 is_vram = xe_bo_is_vram(vm->pt_root[gt_id]->bo);
3523                 drm_printf(p, " VM root: A:0x%llx %s\n", addr,
3524                            is_vram ? "VRAM" : "SYS");
3525         }
3526
3527         drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3528                 struct xe_vma *vma = gpuva_to_vma(gpuva);
3529                 bool is_userptr = xe_vma_is_userptr(vma);
3530                 bool is_null = xe_vma_is_null(vma);
3531
3532                 if (is_null) {
3533                         addr = 0;
3534                 } else if (is_userptr) {
3535                         struct xe_res_cursor cur;
3536
3537                         if (vma->userptr.sg) {
3538                                 xe_res_first_sg(vma->userptr.sg, 0, XE_PAGE_SIZE,
3539                                                 &cur);
3540                                 addr = xe_res_dma(&cur);
3541                         } else {
3542                                 addr = 0;
3543                         }
3544                 } else {
3545                         addr = __xe_bo_addr(xe_vma_bo(vma), 0, XE_PAGE_SIZE);
3546                         is_vram = xe_bo_is_vram(xe_vma_bo(vma));
3547                 }
3548                 drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
3549                            xe_vma_start(vma), xe_vma_end(vma) - 1,
3550                            xe_vma_size(vma),
3551                            addr, is_null ? "NULL" : is_userptr ? "USR" :
3552                            is_vram ? "VRAM" : "SYS");
3553         }
3554         up_read(&vm->lock);
3555
3556         return 0;
3557 }