1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021 Intel Corporation
8 #include <linux/dma-fence-array.h>
10 #include <drm/drm_exec.h>
11 #include <drm/drm_print.h>
12 #include <drm/ttm/ttm_execbuf_util.h>
13 #include <drm/ttm/ttm_tt.h>
14 #include <drm/xe_drm.h>
15 #include <linux/delay.h>
16 #include <linux/kthread.h>
18 #include <linux/swap.h>
21 #include "xe_device.h"
22 #include "xe_exec_queue.h"
24 #include "xe_gt_pagefault.h"
25 #include "xe_gt_tlb_invalidation.h"
26 #include "xe_migrate.h"
28 #include "xe_preempt_fence.h"
30 #include "xe_res_cursor.h"
33 #include "generated/xe_wa_oob.h"
36 #define TEST_VM_ASYNC_OPS_ERROR
39 * xe_vma_userptr_check_repin() - Advisory check for repin needed
40 * @vma: The userptr vma
42 * Check if the userptr vma has been invalidated since last successful
43 * repin. The check is advisory only and can the function can be called
44 * without the vm->userptr.notifier_lock held. There is no guarantee that the
45 * vma userptr will remain valid after a lockless check, so typically
46 * the call needs to be followed by a proper check under the notifier_lock.
48 * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
50 int xe_vma_userptr_check_repin(struct xe_vma *vma)
52 return mmu_interval_check_retry(&vma->userptr.notifier,
53 vma->userptr.notifier_seq) ?
57 int xe_vma_userptr_pin_pages(struct xe_vma *vma)
59 struct xe_vm *vm = xe_vma_vm(vma);
60 struct xe_device *xe = vm->xe;
61 const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT;
63 bool in_kthread = !current->mm;
64 unsigned long notifier_seq;
66 bool read_only = xe_vma_read_only(vma);
68 lockdep_assert_held(&vm->lock);
69 XE_WARN_ON(!xe_vma_is_userptr(vma));
71 if (vma->gpuva.flags & XE_VMA_DESTROYED)
74 notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier);
75 if (notifier_seq == vma->userptr.notifier_seq)
78 pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
82 if (vma->userptr.sg) {
83 dma_unmap_sgtable(xe->drm.dev,
85 read_only ? DMA_TO_DEVICE :
86 DMA_BIDIRECTIONAL, 0);
87 sg_free_table(vma->userptr.sg);
88 vma->userptr.sg = NULL;
93 if (!mmget_not_zero(vma->userptr.notifier.mm)) {
97 kthread_use_mm(vma->userptr.notifier.mm);
100 while (pinned < num_pages) {
101 ret = get_user_pages_fast(xe_vma_userptr(vma) +
104 read_only ? 0 : FOLL_WRITE,
117 kthread_unuse_mm(vma->userptr.notifier.mm);
118 mmput(vma->userptr.notifier.mm);
124 ret = sg_alloc_table_from_pages_segment(&vma->userptr.sgt, pages,
126 (u64)pinned << PAGE_SHIFT,
127 xe_sg_segment_size(xe->drm.dev),
130 vma->userptr.sg = NULL;
133 vma->userptr.sg = &vma->userptr.sgt;
135 ret = dma_map_sgtable(xe->drm.dev, vma->userptr.sg,
136 read_only ? DMA_TO_DEVICE :
138 DMA_ATTR_SKIP_CPU_SYNC |
139 DMA_ATTR_NO_KERNEL_MAPPING);
141 sg_free_table(vma->userptr.sg);
142 vma->userptr.sg = NULL;
146 for (i = 0; i < pinned; ++i) {
149 set_page_dirty(pages[i]);
150 unlock_page(pages[i]);
153 mark_page_accessed(pages[i]);
157 release_pages(pages, pinned);
161 vma->userptr.notifier_seq = notifier_seq;
162 if (xe_vma_userptr_check_repin(vma) == -EAGAIN)
166 return ret < 0 ? ret : 0;
169 static bool preempt_fences_waiting(struct xe_vm *vm)
171 struct xe_exec_queue *q;
173 lockdep_assert_held(&vm->lock);
174 xe_vm_assert_held(vm);
176 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
177 if (!q->compute.pfence ||
178 (q->compute.pfence && test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
179 &q->compute.pfence->flags))) {
187 static void free_preempt_fences(struct list_head *list)
189 struct list_head *link, *next;
191 list_for_each_safe(link, next, list)
192 xe_preempt_fence_free(to_preempt_fence_from_link(link));
195 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
198 lockdep_assert_held(&vm->lock);
199 xe_vm_assert_held(vm);
201 if (*count >= vm->preempt.num_exec_queues)
204 for (; *count < vm->preempt.num_exec_queues; ++(*count)) {
205 struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
208 return PTR_ERR(pfence);
210 list_move_tail(xe_preempt_fence_link(pfence), list);
216 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
218 struct xe_exec_queue *q;
220 xe_vm_assert_held(vm);
222 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
223 if (q->compute.pfence) {
224 long timeout = dma_fence_wait(q->compute.pfence, false);
228 dma_fence_put(q->compute.pfence);
229 q->compute.pfence = NULL;
236 static bool xe_vm_is_idle(struct xe_vm *vm)
238 struct xe_exec_queue *q;
240 xe_vm_assert_held(vm);
241 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
242 if (!xe_exec_queue_is_idle(q))
249 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
251 struct list_head *link;
252 struct xe_exec_queue *q;
254 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
255 struct dma_fence *fence;
258 XE_WARN_ON(link == list);
260 fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
261 q, q->compute.context,
263 dma_fence_put(q->compute.pfence);
264 q->compute.pfence = fence;
268 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
270 struct xe_exec_queue *q;
273 err = xe_bo_lock(bo, true);
277 err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
281 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
282 if (q->compute.pfence) {
283 dma_resv_add_fence(bo->ttm.base.resv,
285 DMA_RESV_USAGE_BOOKKEEP);
294 * xe_vm_fence_all_extobjs() - Add a fence to vm's external objects' resv
296 * @fence: The fence to add.
297 * @usage: The resv usage for the fence.
299 * Loops over all of the vm's external object bindings and adds a @fence
300 * with the given @usage to all of the external object's reservation
303 void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
304 enum dma_resv_usage usage)
308 list_for_each_entry(vma, &vm->extobj.list, extobj.link)
309 dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence, usage);
312 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm)
314 struct xe_exec_queue *q;
316 lockdep_assert_held(&vm->lock);
317 xe_vm_assert_held(vm);
319 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
322 dma_resv_add_fence(xe_vm_resv(vm), q->compute.pfence,
323 DMA_RESV_USAGE_BOOKKEEP);
324 xe_vm_fence_all_extobjs(vm, q->compute.pfence,
325 DMA_RESV_USAGE_BOOKKEEP);
329 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
331 struct drm_exec exec;
332 struct dma_fence *pfence;
336 XE_WARN_ON(!xe_vm_in_compute_mode(vm));
338 down_write(&vm->lock);
339 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
340 drm_exec_until_all_locked(&exec) {
341 err = xe_vm_lock_dma_resv(vm, &exec, 1, true);
342 drm_exec_retry_on_contention(&exec);
347 pfence = xe_preempt_fence_create(q, q->compute.context,
354 list_add(&q->compute.link, &vm->preempt.exec_queues);
355 ++vm->preempt.num_exec_queues;
356 q->compute.pfence = pfence;
358 down_read(&vm->userptr.notifier_lock);
360 dma_resv_add_fence(xe_vm_resv(vm), pfence,
361 DMA_RESV_USAGE_BOOKKEEP);
363 xe_vm_fence_all_extobjs(vm, pfence, DMA_RESV_USAGE_BOOKKEEP);
366 * Check to see if a preemption on VM is in flight or userptr
367 * invalidation, if so trigger this preempt fence to sync state with
368 * other preempt fences on the VM.
370 wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
372 dma_fence_enable_sw_signaling(pfence);
374 up_read(&vm->userptr.notifier_lock);
377 drm_exec_fini(&exec);
384 * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
385 * that need repinning.
388 * This function checks for whether the VM has userptrs that need repinning,
389 * and provides a release-type barrier on the userptr.notifier_lock after
392 * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
394 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
396 lockdep_assert_held_read(&vm->userptr.notifier_lock);
398 return (list_empty(&vm->userptr.repin_list) &&
399 list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
403 * xe_vm_lock_dma_resv() - Lock the vm dma_resv object and the dma_resv
404 * objects of the vm's external buffer objects.
406 * @exec: Pointer to a struct drm_exec locking context.
407 * @num_shared: Number of dma-fence slots to reserve in the locked objects.
408 * @lock_vm: Lock also the vm's dma_resv.
410 * Locks the vm dma-resv objects and all the dma-resv objects of the
411 * buffer objects on the vm external object list.
413 * Return: 0 on success, Negative error code on error. In particular if
414 * @intr is set to true, -EINTR or -ERESTARTSYS may be returned.
416 int xe_vm_lock_dma_resv(struct xe_vm *vm, struct drm_exec *exec,
417 unsigned int num_shared, bool lock_vm)
419 struct xe_vma *vma, *next;
422 lockdep_assert_held(&vm->lock);
425 err = drm_exec_prepare_obj(exec, &xe_vm_ttm_bo(vm)->base, num_shared);
430 list_for_each_entry(vma, &vm->extobj.list, extobj.link) {
431 err = drm_exec_prepare_obj(exec, &xe_vma_bo(vma)->ttm.base, num_shared);
436 spin_lock(&vm->notifier.list_lock);
437 list_for_each_entry_safe(vma, next, &vm->notifier.rebind_list,
438 notifier.rebind_link) {
439 xe_bo_assert_held(xe_vma_bo(vma));
441 list_del_init(&vma->notifier.rebind_link);
442 if (vma->tile_present && !(vma->gpuva.flags & XE_VMA_DESTROYED))
443 list_move_tail(&vma->combined_links.rebind,
446 spin_unlock(&vm->notifier.list_lock);
451 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
453 static void xe_vm_kill(struct xe_vm *vm)
455 struct xe_exec_queue *q;
457 lockdep_assert_held(&vm->lock);
459 xe_vm_lock(vm, false);
460 vm->flags |= XE_VM_FLAG_BANNED;
461 trace_xe_vm_kill(vm);
463 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
467 /* TODO: Inform user the VM is banned */
471 * xe_vm_validate_should_retry() - Whether to retry after a validate error.
472 * @exec: The drm_exec object used for locking before validation.
473 * @err: The error returned from ttm_bo_validate().
474 * @end: A ktime_t cookie that should be set to 0 before first use and
475 * that should be reused on subsequent calls.
477 * With multiple active VMs, under memory pressure, it is possible that
478 * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
479 * Until ttm properly handles locking in such scenarios, best thing the
480 * driver can do is retry with a timeout. Check if that is necessary, and
481 * if so unlock the drm_exec's objects while keeping the ticket to prepare
484 * Return: true if a retry after drm_exec_init() is recommended;
487 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
495 *end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
496 if (!ktime_before(cur, *end))
500 * We would like to keep the ticket here with
501 * drm_exec_unlock_all(), but WW mutex asserts currently
502 * stop us from that. In any case this function could go away
503 * with proper TTM -EDEADLK handling.
511 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
517 err = drm_exec_prepare_obj(exec, &xe_vm_ttm_bo(vm)->base,
518 vm->preempt.num_exec_queues);
522 if (xe_vm_is_idle(vm)) {
523 vm->preempt.rebind_deactivated = true;
528 if (!preempt_fences_waiting(vm)) {
533 err = xe_vm_lock_dma_resv(vm, exec, vm->preempt.num_exec_queues, false);
537 err = wait_for_existing_preempt_fences(vm);
541 list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
542 if (xe_vma_has_no_bo(vma) ||
543 vma->gpuva.flags & XE_VMA_DESTROYED)
546 err = xe_bo_validate(xe_vma_bo(vma), vm, false);
554 static void preempt_rebind_work_func(struct work_struct *w)
556 struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
557 struct drm_exec exec;
558 struct dma_fence *rebind_fence;
559 unsigned int fence_count = 0;
560 LIST_HEAD(preempt_fences);
564 int __maybe_unused tries = 0;
566 XE_WARN_ON(!xe_vm_in_compute_mode(vm));
567 trace_xe_vm_rebind_worker_enter(vm);
569 down_write(&vm->lock);
571 if (xe_vm_is_closed_or_banned(vm)) {
573 trace_xe_vm_rebind_worker_exit(vm);
578 if (vm->async_ops.error)
579 goto out_unlock_outer;
582 * Extreme corner where we exit a VM error state with a munmap style VM
583 * unbind inflight which requires a rebind. In this case the rebind
584 * needs to install some fences into the dma-resv slots. The worker to
585 * do this queued, let that worker make progress by dropping vm->lock
586 * and trying this again.
588 if (vm->async_ops.munmap_rebind_inflight) {
590 flush_work(&vm->async_ops.work);
594 if (xe_vm_userptr_check_repin(vm)) {
595 err = xe_vm_userptr_pin(vm);
597 goto out_unlock_outer;
600 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
602 drm_exec_until_all_locked(&exec) {
605 err = xe_preempt_work_begin(&exec, vm, &done);
606 drm_exec_retry_on_contention(&exec);
607 if (err && xe_vm_validate_should_retry(&exec, err, &end)) {
609 goto out_unlock_outer;
615 err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
619 rebind_fence = xe_vm_rebind(vm, true);
620 if (IS_ERR(rebind_fence)) {
621 err = PTR_ERR(rebind_fence);
626 dma_fence_wait(rebind_fence, false);
627 dma_fence_put(rebind_fence);
630 /* Wait on munmap style VM unbinds */
631 wait = dma_resv_wait_timeout(xe_vm_resv(vm),
632 DMA_RESV_USAGE_KERNEL,
633 false, MAX_SCHEDULE_TIMEOUT);
639 #define retry_required(__tries, __vm) \
640 (IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
641 (!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
642 __xe_vm_userptr_needs_repin(__vm))
644 down_read(&vm->userptr.notifier_lock);
645 if (retry_required(tries, vm)) {
646 up_read(&vm->userptr.notifier_lock);
651 #undef retry_required
653 spin_lock(&vm->xe->ttm.lru_lock);
654 ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
655 spin_unlock(&vm->xe->ttm.lru_lock);
657 /* Point of no return. */
658 arm_preempt_fences(vm, &preempt_fences);
659 resume_and_reinstall_preempt_fences(vm);
660 up_read(&vm->userptr.notifier_lock);
663 drm_exec_fini(&exec);
665 if (err == -EAGAIN) {
666 trace_xe_vm_rebind_worker_retry(vm);
671 drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
676 free_preempt_fences(&preempt_fences);
678 trace_xe_vm_rebind_worker_exit(vm);
681 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
682 const struct mmu_notifier_range *range,
683 unsigned long cur_seq)
685 struct xe_vma *vma = container_of(mni, struct xe_vma, userptr.notifier);
686 struct xe_vm *vm = xe_vma_vm(vma);
687 struct dma_resv_iter cursor;
688 struct dma_fence *fence;
691 XE_WARN_ON(!xe_vma_is_userptr(vma));
692 trace_xe_vma_userptr_invalidate(vma);
694 if (!mmu_notifier_range_blockable(range))
697 down_write(&vm->userptr.notifier_lock);
698 mmu_interval_set_seq(mni, cur_seq);
700 /* No need to stop gpu access if the userptr is not yet bound. */
701 if (!vma->userptr.initial_bind) {
702 up_write(&vm->userptr.notifier_lock);
707 * Tell exec and rebind worker they need to repin and rebind this
710 if (!xe_vm_in_fault_mode(vm) &&
711 !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
712 spin_lock(&vm->userptr.invalidated_lock);
713 list_move_tail(&vma->userptr.invalidate_link,
714 &vm->userptr.invalidated);
715 spin_unlock(&vm->userptr.invalidated_lock);
718 up_write(&vm->userptr.notifier_lock);
721 * Preempt fences turn into schedule disables, pipeline these.
722 * Note that even in fault mode, we need to wait for binds and
723 * unbinds to complete, and those are attached as BOOKMARK fences
726 dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
727 DMA_RESV_USAGE_BOOKKEEP);
728 dma_resv_for_each_fence_unlocked(&cursor, fence)
729 dma_fence_enable_sw_signaling(fence);
730 dma_resv_iter_end(&cursor);
732 err = dma_resv_wait_timeout(xe_vm_resv(vm),
733 DMA_RESV_USAGE_BOOKKEEP,
734 false, MAX_SCHEDULE_TIMEOUT);
735 XE_WARN_ON(err <= 0);
737 if (xe_vm_in_fault_mode(vm)) {
738 err = xe_vm_invalidate_vma(vma);
742 trace_xe_vma_userptr_invalidate_complete(vma);
747 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
748 .invalidate = vma_userptr_invalidate,
751 int xe_vm_userptr_pin(struct xe_vm *vm)
753 struct xe_vma *vma, *next;
755 LIST_HEAD(tmp_evict);
757 lockdep_assert_held_write(&vm->lock);
759 /* Collect invalidated userptrs */
760 spin_lock(&vm->userptr.invalidated_lock);
761 list_for_each_entry_safe(vma, next, &vm->userptr.invalidated,
762 userptr.invalidate_link) {
763 list_del_init(&vma->userptr.invalidate_link);
764 if (list_empty(&vma->combined_links.userptr))
765 list_move_tail(&vma->combined_links.userptr,
766 &vm->userptr.repin_list);
768 spin_unlock(&vm->userptr.invalidated_lock);
770 /* Pin and move to temporary list */
771 list_for_each_entry_safe(vma, next, &vm->userptr.repin_list,
772 combined_links.userptr) {
773 err = xe_vma_userptr_pin_pages(vma);
777 list_move_tail(&vma->combined_links.userptr, &tmp_evict);
780 /* Take lock and move to rebind_list for rebinding. */
781 err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
785 list_for_each_entry_safe(vma, next, &tmp_evict, combined_links.userptr)
786 list_move_tail(&vma->combined_links.rebind, &vm->rebind_list);
788 dma_resv_unlock(xe_vm_resv(vm));
793 list_splice_tail(&tmp_evict, &vm->userptr.repin_list);
799 * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
800 * that need repinning.
803 * This function does an advisory check for whether the VM has userptrs that
806 * Return: 0 if there are no indications of userptrs needing repinning,
807 * -EAGAIN if there are.
809 int xe_vm_userptr_check_repin(struct xe_vm *vm)
811 return (list_empty_careful(&vm->userptr.repin_list) &&
812 list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
815 static struct dma_fence *
816 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
817 struct xe_sync_entry *syncs, u32 num_syncs,
818 bool first_op, bool last_op);
820 struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
822 struct dma_fence *fence = NULL;
823 struct xe_vma *vma, *next;
825 lockdep_assert_held(&vm->lock);
826 if (xe_vm_no_dma_fences(vm) && !rebind_worker)
829 xe_vm_assert_held(vm);
830 list_for_each_entry_safe(vma, next, &vm->rebind_list,
831 combined_links.rebind) {
832 XE_WARN_ON(!vma->tile_present);
834 list_del_init(&vma->combined_links.rebind);
835 dma_fence_put(fence);
837 trace_xe_vma_rebind_worker(vma);
839 trace_xe_vma_rebind_exec(vma);
840 fence = xe_vm_bind_vma(vma, NULL, NULL, 0, false, false);
848 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
850 u64 bo_offset_or_userptr,
857 struct xe_tile *tile;
860 XE_WARN_ON(start >= end);
861 XE_WARN_ON(end >= vm->size);
863 if (!bo && !is_null) /* userptr */
864 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
866 vma = kzalloc(sizeof(*vma) - sizeof(struct xe_userptr),
869 vma = ERR_PTR(-ENOMEM);
873 INIT_LIST_HEAD(&vma->combined_links.rebind);
874 INIT_LIST_HEAD(&vma->notifier.rebind_link);
875 INIT_LIST_HEAD(&vma->extobj.link);
877 INIT_LIST_HEAD(&vma->gpuva.gem.entry);
878 vma->gpuva.vm = &vm->gpuvm;
879 vma->gpuva.va.addr = start;
880 vma->gpuva.va.range = end - start + 1;
882 vma->gpuva.flags |= XE_VMA_READ_ONLY;
884 vma->gpuva.flags |= DRM_GPUVA_SPARSE;
887 vma->tile_mask = tile_mask;
889 for_each_tile(tile, vm->xe, id)
890 vma->tile_mask |= 0x1 << id;
893 if (vm->xe->info.platform == XE_PVC)
894 vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
897 struct drm_gpuvm_bo *vm_bo;
899 xe_bo_assert_held(bo);
901 vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
904 return ERR_CAST(vm_bo);
907 drm_gem_object_get(&bo->ttm.base);
908 vma->gpuva.gem.obj = &bo->ttm.base;
909 vma->gpuva.gem.offset = bo_offset_or_userptr;
910 drm_gpuva_link(&vma->gpuva, vm_bo);
911 drm_gpuvm_bo_put(vm_bo);
912 } else /* userptr or null */ {
914 u64 size = end - start + 1;
917 INIT_LIST_HEAD(&vma->userptr.invalidate_link);
918 vma->gpuva.gem.offset = bo_offset_or_userptr;
920 err = mmu_interval_notifier_insert(&vma->userptr.notifier,
922 xe_vma_userptr(vma), size,
923 &vma_userptr_notifier_ops);
930 vma->userptr.notifier_seq = LONG_MAX;
939 static bool vm_remove_extobj(struct xe_vma *vma)
941 if (!list_empty(&vma->extobj.link)) {
942 xe_vma_vm(vma)->extobj.entries--;
943 list_del_init(&vma->extobj.link);
949 static void xe_vma_destroy_late(struct xe_vma *vma)
951 struct xe_vm *vm = xe_vma_vm(vma);
952 struct xe_device *xe = vm->xe;
953 bool read_only = xe_vma_read_only(vma);
955 if (xe_vma_is_userptr(vma)) {
956 if (vma->userptr.sg) {
957 dma_unmap_sgtable(xe->drm.dev,
959 read_only ? DMA_TO_DEVICE :
960 DMA_BIDIRECTIONAL, 0);
961 sg_free_table(vma->userptr.sg);
962 vma->userptr.sg = NULL;
966 * Since userptr pages are not pinned, we can't remove
967 * the notifer until we're sure the GPU is not accessing
970 mmu_interval_notifier_remove(&vma->userptr.notifier);
972 } else if (xe_vma_is_null(vma)) {
975 xe_bo_put(xe_vma_bo(vma));
981 static void vma_destroy_work_func(struct work_struct *w)
984 container_of(w, struct xe_vma, destroy_work);
986 xe_vma_destroy_late(vma);
989 static struct xe_vma *
990 bo_has_vm_references_locked(struct xe_bo *bo, struct xe_vm *vm,
991 struct xe_vma *ignore)
993 struct drm_gpuvm_bo *vm_bo;
994 struct drm_gpuva *va;
995 struct drm_gem_object *obj = &bo->ttm.base;
997 xe_bo_assert_held(bo);
999 drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
1000 drm_gpuvm_bo_for_each_va(va, vm_bo) {
1001 struct xe_vma *vma = gpuva_to_vma(va);
1003 if (vma != ignore && xe_vma_vm(vma) == vm)
1011 static bool bo_has_vm_references(struct xe_bo *bo, struct xe_vm *vm,
1012 struct xe_vma *ignore)
1016 xe_bo_lock(bo, false);
1017 ret = !!bo_has_vm_references_locked(bo, vm, ignore);
1023 static void __vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
1025 lockdep_assert_held_write(&vm->lock);
1027 list_add(&vma->extobj.link, &vm->extobj.list);
1028 vm->extobj.entries++;
1031 static void vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
1033 struct xe_bo *bo = xe_vma_bo(vma);
1035 lockdep_assert_held_write(&vm->lock);
1037 if (bo_has_vm_references(bo, vm, vma))
1040 __vm_insert_extobj(vm, vma);
1043 static void vma_destroy_cb(struct dma_fence *fence,
1044 struct dma_fence_cb *cb)
1046 struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
1048 INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
1049 queue_work(system_unbound_wq, &vma->destroy_work);
1052 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
1054 struct xe_vm *vm = xe_vma_vm(vma);
1056 lockdep_assert_held_write(&vm->lock);
1057 XE_WARN_ON(!list_empty(&vma->combined_links.destroy));
1059 if (xe_vma_is_userptr(vma)) {
1060 XE_WARN_ON(!(vma->gpuva.flags & XE_VMA_DESTROYED));
1062 spin_lock(&vm->userptr.invalidated_lock);
1063 list_del(&vma->userptr.invalidate_link);
1064 spin_unlock(&vm->userptr.invalidated_lock);
1065 } else if (!xe_vma_is_null(vma)) {
1066 xe_bo_assert_held(xe_vma_bo(vma));
1068 spin_lock(&vm->notifier.list_lock);
1069 list_del(&vma->notifier.rebind_link);
1070 spin_unlock(&vm->notifier.list_lock);
1072 drm_gpuva_unlink(&vma->gpuva);
1074 if (!xe_vma_bo(vma)->vm && vm_remove_extobj(vma)) {
1075 struct xe_vma *other;
1077 other = bo_has_vm_references_locked(xe_vma_bo(vma), vm, NULL);
1080 __vm_insert_extobj(vm, other);
1084 xe_vm_assert_held(vm);
1086 int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
1090 XE_WARN_ON(ret != -ENOENT);
1091 xe_vma_destroy_late(vma);
1094 xe_vma_destroy_late(vma);
1098 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1100 struct ttm_validate_buffer tv[2];
1101 struct ww_acquire_ctx ww;
1102 struct xe_bo *bo = xe_vma_bo(vma);
1107 memset(tv, 0, sizeof(tv));
1108 tv[0].bo = xe_vm_ttm_bo(xe_vma_vm(vma));
1109 list_add(&tv[0].head, &objs);
1112 tv[1].bo = &xe_bo_get(bo)->ttm;
1113 list_add(&tv[1].head, &objs);
1115 err = ttm_eu_reserve_buffers(&ww, &objs, false, &dups);
1118 xe_vma_destroy(vma, NULL);
1120 ttm_eu_backoff_reservation(&ww, &objs);
1126 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
1128 struct drm_gpuva *gpuva;
1130 lockdep_assert_held(&vm->lock);
1132 if (xe_vm_is_closed_or_banned(vm))
1135 XE_WARN_ON(start + range > vm->size);
1137 gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
1139 return gpuva ? gpuva_to_vma(gpuva) : NULL;
1142 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1146 XE_WARN_ON(xe_vma_vm(vma) != vm);
1147 lockdep_assert_held(&vm->lock);
1149 err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
1150 XE_WARN_ON(err); /* Shouldn't be possible */
1155 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1157 XE_WARN_ON(xe_vma_vm(vma) != vm);
1158 lockdep_assert_held(&vm->lock);
1160 drm_gpuva_remove(&vma->gpuva);
1161 if (vm->usm.last_fault_vma == vma)
1162 vm->usm.last_fault_vma = NULL;
1165 static struct drm_gpuva_op *xe_vm_op_alloc(void)
1167 struct xe_vma_op *op;
1169 op = kzalloc(sizeof(*op), GFP_KERNEL);
1177 static void xe_vm_free(struct drm_gpuvm *gpuvm);
1179 static struct drm_gpuvm_ops gpuvm_ops = {
1180 .op_alloc = xe_vm_op_alloc,
1181 .vm_free = xe_vm_free,
1184 static void xe_vma_op_work_func(struct work_struct *w);
1185 static void vm_destroy_work_func(struct work_struct *w);
1187 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1189 struct drm_gem_object *vm_resv_obj;
1191 int err, number_tiles = 0;
1192 struct xe_tile *tile;
1195 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1197 return ERR_PTR(-ENOMEM);
1201 vm->size = 1ull << xe->info.va_bits;
1205 init_rwsem(&vm->lock);
1207 INIT_LIST_HEAD(&vm->rebind_list);
1209 INIT_LIST_HEAD(&vm->userptr.repin_list);
1210 INIT_LIST_HEAD(&vm->userptr.invalidated);
1211 init_rwsem(&vm->userptr.notifier_lock);
1212 spin_lock_init(&vm->userptr.invalidated_lock);
1214 INIT_LIST_HEAD(&vm->notifier.rebind_list);
1215 spin_lock_init(&vm->notifier.list_lock);
1217 INIT_LIST_HEAD(&vm->async_ops.pending);
1218 INIT_WORK(&vm->async_ops.work, xe_vma_op_work_func);
1219 spin_lock_init(&vm->async_ops.lock);
1221 INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1223 INIT_LIST_HEAD(&vm->preempt.exec_queues);
1224 vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */
1226 for_each_tile(tile, xe, id)
1227 xe_range_fence_tree_init(&vm->rftree[id]);
1229 INIT_LIST_HEAD(&vm->extobj.list);
1231 if (!(flags & XE_VM_FLAG_MIGRATION))
1232 xe_device_mem_access_get(xe);
1234 vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
1240 drm_gpuvm_init(&vm->gpuvm, "Xe VM", 0, &xe->drm, vm_resv_obj,
1241 0, vm->size, 0, 0, &gpuvm_ops);
1243 drm_gem_object_put(vm_resv_obj);
1245 err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
1249 if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1250 vm->flags |= XE_VM_FLAG_64K;
1252 for_each_tile(tile, xe, id) {
1253 if (flags & XE_VM_FLAG_MIGRATION &&
1254 tile->id != XE_VM_FLAG_TILE_ID(flags))
1257 vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
1258 if (IS_ERR(vm->pt_root[id])) {
1259 err = PTR_ERR(vm->pt_root[id]);
1260 vm->pt_root[id] = NULL;
1261 goto err_unlock_close;
1265 if (flags & XE_VM_FLAG_SCRATCH_PAGE) {
1266 for_each_tile(tile, xe, id) {
1267 if (!vm->pt_root[id])
1270 err = xe_pt_create_scratch(xe, tile, vm);
1272 goto err_unlock_close;
1274 vm->batch_invalidate_tlb = true;
1277 if (flags & XE_VM_FLAG_COMPUTE_MODE) {
1278 INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1279 vm->flags |= XE_VM_FLAG_COMPUTE_MODE;
1280 vm->batch_invalidate_tlb = false;
1283 if (flags & XE_VM_FLAG_ASYNC_BIND_OPS) {
1284 vm->async_ops.fence.context = dma_fence_context_alloc(1);
1285 vm->flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
1288 /* Fill pt_root after allocating scratch tables */
1289 for_each_tile(tile, xe, id) {
1290 if (!vm->pt_root[id])
1293 xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
1295 dma_resv_unlock(xe_vm_resv(vm));
1297 /* Kernel migration VM shouldn't have a circular loop.. */
1298 if (!(flags & XE_VM_FLAG_MIGRATION)) {
1299 for_each_tile(tile, xe, id) {
1300 struct xe_gt *gt = tile->primary_gt;
1301 struct xe_vm *migrate_vm;
1302 struct xe_exec_queue *q;
1304 if (!vm->pt_root[id])
1307 migrate_vm = xe_migrate_get_vm(tile->migrate);
1308 q = xe_exec_queue_create_class(xe, gt, migrate_vm,
1309 XE_ENGINE_CLASS_COPY,
1310 EXEC_QUEUE_FLAG_VM);
1311 xe_vm_put(migrate_vm);
1321 if (number_tiles > 1)
1322 vm->composite_fence_ctx = dma_fence_context_alloc(1);
1324 mutex_lock(&xe->usm.lock);
1325 if (flags & XE_VM_FLAG_FAULT_MODE)
1326 xe->usm.num_vm_in_fault_mode++;
1327 else if (!(flags & XE_VM_FLAG_MIGRATION))
1328 xe->usm.num_vm_in_non_fault_mode++;
1329 mutex_unlock(&xe->usm.lock);
1331 trace_xe_vm_create(vm);
1336 dma_resv_unlock(xe_vm_resv(vm));
1338 xe_vm_close_and_put(vm);
1339 return ERR_PTR(err);
1342 for_each_tile(tile, xe, id)
1343 xe_range_fence_tree_fini(&vm->rftree[id]);
1345 if (!(flags & XE_VM_FLAG_MIGRATION))
1346 xe_device_mem_access_put(xe);
1347 return ERR_PTR(err);
1350 static void flush_async_ops(struct xe_vm *vm)
1352 queue_work(system_unbound_wq, &vm->async_ops.work);
1353 flush_work(&vm->async_ops.work);
1356 static void vm_error_capture(struct xe_vm *vm, int err,
1357 u32 op, u64 addr, u64 size)
1359 struct drm_xe_vm_bind_op_error_capture capture;
1360 u64 __user *address =
1361 u64_to_user_ptr(vm->async_ops.error_capture.addr);
1362 bool in_kthread = !current->mm;
1364 capture.error = err;
1366 capture.addr = addr;
1367 capture.size = size;
1370 if (!mmget_not_zero(vm->async_ops.error_capture.mm))
1372 kthread_use_mm(vm->async_ops.error_capture.mm);
1375 if (copy_to_user(address, &capture, sizeof(capture)))
1376 XE_WARN_ON("Copy to user failed");
1379 kthread_unuse_mm(vm->async_ops.error_capture.mm);
1380 mmput(vm->async_ops.error_capture.mm);
1384 wake_up_all(&vm->async_ops.error_capture.wq);
1387 static void xe_vm_close(struct xe_vm *vm)
1389 down_write(&vm->lock);
1391 up_write(&vm->lock);
1394 void xe_vm_close_and_put(struct xe_vm *vm)
1396 LIST_HEAD(contested);
1397 struct xe_device *xe = vm->xe;
1398 struct xe_tile *tile;
1399 struct xe_vma *vma, *next_vma;
1400 struct drm_gpuva *gpuva, *next;
1403 XE_WARN_ON(vm->preempt.num_exec_queues);
1406 flush_async_ops(vm);
1407 if (xe_vm_in_compute_mode(vm))
1408 flush_work(&vm->preempt.rebind_work);
1410 for_each_tile(tile, xe, id) {
1412 xe_exec_queue_kill(vm->q[id]);
1413 xe_exec_queue_put(vm->q[id]);
1418 down_write(&vm->lock);
1419 xe_vm_lock(vm, false);
1420 drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) {
1421 vma = gpuva_to_vma(gpuva);
1423 if (xe_vma_has_no_bo(vma)) {
1424 down_read(&vm->userptr.notifier_lock);
1425 vma->gpuva.flags |= XE_VMA_DESTROYED;
1426 up_read(&vm->userptr.notifier_lock);
1429 xe_vm_remove_vma(vm, vma);
1431 /* easy case, remove from VMA? */
1432 if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
1433 list_del_init(&vma->combined_links.rebind);
1434 xe_vma_destroy(vma, NULL);
1438 list_move_tail(&vma->combined_links.destroy, &contested);
1439 vma->gpuva.flags |= XE_VMA_DESTROYED;
1443 * All vm operations will add shared fences to resv.
1444 * The only exception is eviction for a shared object,
1445 * but even so, the unbind when evicted would still
1446 * install a fence to resv. Hence it's safe to
1447 * destroy the pagetables immediately.
1449 for_each_tile(tile, xe, id) {
1450 if (vm->scratch_bo[id]) {
1453 xe_bo_unpin(vm->scratch_bo[id]);
1454 xe_bo_put(vm->scratch_bo[id]);
1455 for (i = 0; i < vm->pt_root[id]->level; i++)
1456 xe_pt_destroy(vm->scratch_pt[id][i], vm->flags,
1459 if (vm->pt_root[id]) {
1460 xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1461 vm->pt_root[id] = NULL;
1467 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1468 * Since we hold a refcount to the bo, we can remove and free
1469 * the members safely without locking.
1471 list_for_each_entry_safe(vma, next_vma, &contested,
1472 combined_links.destroy) {
1473 list_del_init(&vma->combined_links.destroy);
1474 xe_vma_destroy_unlocked(vma);
1477 if (vm->async_ops.error_capture.addr)
1478 wake_up_all(&vm->async_ops.error_capture.wq);
1480 XE_WARN_ON(!list_empty(&vm->extobj.list));
1481 up_write(&vm->lock);
1483 mutex_lock(&xe->usm.lock);
1484 if (vm->flags & XE_VM_FLAG_FAULT_MODE)
1485 xe->usm.num_vm_in_fault_mode--;
1486 else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1487 xe->usm.num_vm_in_non_fault_mode--;
1488 mutex_unlock(&xe->usm.lock);
1490 for_each_tile(tile, xe, id)
1491 xe_range_fence_tree_fini(&vm->rftree[id]);
1496 static void vm_destroy_work_func(struct work_struct *w)
1499 container_of(w, struct xe_vm, destroy_work);
1500 struct xe_device *xe = vm->xe;
1501 struct xe_tile *tile;
1505 /* xe_vm_close_and_put was not called? */
1506 XE_WARN_ON(vm->size);
1508 if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
1509 xe_device_mem_access_put(xe);
1511 if (xe->info.has_asid) {
1512 mutex_lock(&xe->usm.lock);
1513 lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1514 XE_WARN_ON(lookup != vm);
1515 mutex_unlock(&xe->usm.lock);
1519 for_each_tile(tile, xe, id)
1520 XE_WARN_ON(vm->pt_root[id]);
1522 trace_xe_vm_free(vm);
1523 dma_fence_put(vm->rebind_fence);
1527 static void xe_vm_free(struct drm_gpuvm *gpuvm)
1529 struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
1531 /* To destroy the VM we need to be able to sleep */
1532 queue_work(system_unbound_wq, &vm->destroy_work);
1535 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1539 mutex_lock(&xef->vm.lock);
1540 vm = xa_load(&xef->vm.xa, id);
1543 mutex_unlock(&xef->vm.lock);
1548 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
1550 return xe_pde_encode(vm->pt_root[tile->id]->bo, 0,
1554 static struct dma_fence *
1555 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1556 struct xe_sync_entry *syncs, u32 num_syncs,
1557 bool first_op, bool last_op)
1559 struct xe_tile *tile;
1560 struct dma_fence *fence = NULL;
1561 struct dma_fence **fences = NULL;
1562 struct dma_fence_array *cf = NULL;
1563 struct xe_vm *vm = xe_vma_vm(vma);
1564 int cur_fence = 0, i;
1565 int number_tiles = hweight8(vma->tile_present);
1569 trace_xe_vma_unbind(vma);
1571 if (number_tiles > 1) {
1572 fences = kmalloc_array(number_tiles, sizeof(*fences),
1575 return ERR_PTR(-ENOMEM);
1578 for_each_tile(tile, vm->xe, id) {
1579 if (!(vma->tile_present & BIT(id)))
1582 fence = __xe_pt_unbind_vma(tile, vma, q, first_op ? syncs : NULL,
1583 first_op ? num_syncs : 0);
1584 if (IS_ERR(fence)) {
1585 err = PTR_ERR(fence);
1590 fences[cur_fence++] = fence;
1593 if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1594 q = list_next_entry(q, multi_gt_list);
1598 cf = dma_fence_array_create(number_tiles, fences,
1599 vm->composite_fence_ctx,
1600 vm->composite_fence_seqno++,
1603 --vm->composite_fence_seqno;
1610 for (i = 0; i < num_syncs; i++)
1611 xe_sync_entry_signal(&syncs[i], NULL,
1612 cf ? &cf->base : fence);
1615 return cf ? &cf->base : !fence ? dma_fence_get_stub() : fence;
1620 /* FIXME: Rewind the previous binds? */
1621 dma_fence_put(fences[--cur_fence]);
1626 return ERR_PTR(err);
1629 static struct dma_fence *
1630 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1631 struct xe_sync_entry *syncs, u32 num_syncs,
1632 bool first_op, bool last_op)
1634 struct xe_tile *tile;
1635 struct dma_fence *fence;
1636 struct dma_fence **fences = NULL;
1637 struct dma_fence_array *cf = NULL;
1638 struct xe_vm *vm = xe_vma_vm(vma);
1639 int cur_fence = 0, i;
1640 int number_tiles = hweight8(vma->tile_mask);
1644 trace_xe_vma_bind(vma);
1646 if (number_tiles > 1) {
1647 fences = kmalloc_array(number_tiles, sizeof(*fences),
1650 return ERR_PTR(-ENOMEM);
1653 for_each_tile(tile, vm->xe, id) {
1654 if (!(vma->tile_mask & BIT(id)))
1657 fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id],
1658 first_op ? syncs : NULL,
1659 first_op ? num_syncs : 0,
1660 vma->tile_present & BIT(id));
1661 if (IS_ERR(fence)) {
1662 err = PTR_ERR(fence);
1667 fences[cur_fence++] = fence;
1670 if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1671 q = list_next_entry(q, multi_gt_list);
1675 cf = dma_fence_array_create(number_tiles, fences,
1676 vm->composite_fence_ctx,
1677 vm->composite_fence_seqno++,
1680 --vm->composite_fence_seqno;
1687 for (i = 0; i < num_syncs; i++)
1688 xe_sync_entry_signal(&syncs[i], NULL,
1689 cf ? &cf->base : fence);
1692 return cf ? &cf->base : fence;
1697 /* FIXME: Rewind the previous binds? */
1698 dma_fence_put(fences[--cur_fence]);
1703 return ERR_PTR(err);
1706 struct async_op_fence {
1707 struct dma_fence fence;
1708 struct dma_fence *wait_fence;
1709 struct dma_fence_cb cb;
1711 wait_queue_head_t wq;
1715 static const char *async_op_fence_get_driver_name(struct dma_fence *dma_fence)
1721 async_op_fence_get_timeline_name(struct dma_fence *dma_fence)
1723 return "async_op_fence";
1726 static const struct dma_fence_ops async_op_fence_ops = {
1727 .get_driver_name = async_op_fence_get_driver_name,
1728 .get_timeline_name = async_op_fence_get_timeline_name,
1731 static void async_op_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1733 struct async_op_fence *afence =
1734 container_of(cb, struct async_op_fence, cb);
1736 afence->fence.error = afence->wait_fence->error;
1737 dma_fence_signal(&afence->fence);
1738 xe_vm_put(afence->vm);
1739 dma_fence_put(afence->wait_fence);
1740 dma_fence_put(&afence->fence);
1743 static void add_async_op_fence_cb(struct xe_vm *vm,
1744 struct dma_fence *fence,
1745 struct async_op_fence *afence)
1749 if (!xe_vm_no_dma_fences(vm)) {
1750 afence->started = true;
1752 wake_up_all(&afence->wq);
1755 afence->wait_fence = dma_fence_get(fence);
1756 afence->vm = xe_vm_get(vm);
1757 dma_fence_get(&afence->fence);
1758 ret = dma_fence_add_callback(fence, &afence->cb, async_op_fence_cb);
1759 if (ret == -ENOENT) {
1760 afence->fence.error = afence->wait_fence->error;
1761 dma_fence_signal(&afence->fence);
1765 dma_fence_put(afence->wait_fence);
1766 dma_fence_put(&afence->fence);
1768 XE_WARN_ON(ret && ret != -ENOENT);
1771 int xe_vm_async_fence_wait_start(struct dma_fence *fence)
1773 if (fence->ops == &async_op_fence_ops) {
1774 struct async_op_fence *afence =
1775 container_of(fence, struct async_op_fence, fence);
1777 XE_WARN_ON(xe_vm_no_dma_fences(afence->vm));
1780 return wait_event_interruptible(afence->wq, afence->started);
1786 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
1787 struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1788 u32 num_syncs, struct async_op_fence *afence,
1789 bool immediate, bool first_op, bool last_op)
1791 struct dma_fence *fence;
1793 xe_vm_assert_held(vm);
1796 fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
1799 return PTR_ERR(fence);
1803 XE_WARN_ON(!xe_vm_in_fault_mode(vm));
1805 fence = dma_fence_get_stub();
1807 for (i = 0; i < num_syncs; i++)
1808 xe_sync_entry_signal(&syncs[i], NULL, fence);
1812 add_async_op_fence_cb(vm, fence, afence);
1814 dma_fence_put(fence);
1818 static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
1819 struct xe_bo *bo, struct xe_sync_entry *syncs,
1820 u32 num_syncs, struct async_op_fence *afence,
1821 bool immediate, bool first_op, bool last_op)
1825 xe_vm_assert_held(vm);
1826 xe_bo_assert_held(bo);
1828 if (bo && immediate) {
1829 err = xe_bo_validate(bo, vm, true);
1834 return __xe_vm_bind(vm, vma, q, syncs, num_syncs, afence, immediate,
1838 static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
1839 struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1840 u32 num_syncs, struct async_op_fence *afence,
1841 bool first_op, bool last_op)
1843 struct dma_fence *fence;
1845 xe_vm_assert_held(vm);
1846 xe_bo_assert_held(xe_vma_bo(vma));
1848 fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
1850 return PTR_ERR(fence);
1852 add_async_op_fence_cb(vm, fence, afence);
1854 xe_vma_destroy(vma, fence);
1855 dma_fence_put(fence);
1860 static int vm_set_error_capture_address(struct xe_device *xe, struct xe_vm *vm,
1863 if (XE_IOCTL_DBG(xe, !value))
1866 if (XE_IOCTL_DBG(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
1869 if (XE_IOCTL_DBG(xe, vm->async_ops.error_capture.addr))
1872 vm->async_ops.error_capture.mm = current->mm;
1873 vm->async_ops.error_capture.addr = value;
1874 init_waitqueue_head(&vm->async_ops.error_capture.wq);
1879 typedef int (*xe_vm_set_property_fn)(struct xe_device *xe, struct xe_vm *vm,
1882 static const xe_vm_set_property_fn vm_set_property_funcs[] = {
1883 [XE_VM_PROPERTY_BIND_OP_ERROR_CAPTURE_ADDRESS] =
1884 vm_set_error_capture_address,
1887 static int vm_user_ext_set_property(struct xe_device *xe, struct xe_vm *vm,
1890 u64 __user *address = u64_to_user_ptr(extension);
1891 struct drm_xe_ext_vm_set_property ext;
1894 err = __copy_from_user(&ext, address, sizeof(ext));
1895 if (XE_IOCTL_DBG(xe, err))
1898 if (XE_IOCTL_DBG(xe, ext.property >=
1899 ARRAY_SIZE(vm_set_property_funcs)) ||
1900 XE_IOCTL_DBG(xe, ext.pad) ||
1901 XE_IOCTL_DBG(xe, ext.reserved[0] || ext.reserved[1]))
1904 return vm_set_property_funcs[ext.property](xe, vm, ext.value);
1907 typedef int (*xe_vm_user_extension_fn)(struct xe_device *xe, struct xe_vm *vm,
1910 static const xe_vm_set_property_fn vm_user_extension_funcs[] = {
1911 [XE_VM_EXTENSION_SET_PROPERTY] = vm_user_ext_set_property,
1914 #define MAX_USER_EXTENSIONS 16
1915 static int vm_user_extensions(struct xe_device *xe, struct xe_vm *vm,
1916 u64 extensions, int ext_number)
1918 u64 __user *address = u64_to_user_ptr(extensions);
1919 struct xe_user_extension ext;
1922 if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
1925 err = __copy_from_user(&ext, address, sizeof(ext));
1926 if (XE_IOCTL_DBG(xe, err))
1929 if (XE_IOCTL_DBG(xe, ext.pad) ||
1930 XE_IOCTL_DBG(xe, ext.name >=
1931 ARRAY_SIZE(vm_user_extension_funcs)))
1934 err = vm_user_extension_funcs[ext.name](xe, vm, extensions);
1935 if (XE_IOCTL_DBG(xe, err))
1938 if (ext.next_extension)
1939 return vm_user_extensions(xe, vm, ext.next_extension,
1945 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_SCRATCH_PAGE | \
1946 DRM_XE_VM_CREATE_COMPUTE_MODE | \
1947 DRM_XE_VM_CREATE_ASYNC_BIND_OPS | \
1948 DRM_XE_VM_CREATE_FAULT_MODE)
1950 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1951 struct drm_file *file)
1953 struct xe_device *xe = to_xe_device(dev);
1954 struct xe_file *xef = to_xe_file(file);
1955 struct drm_xe_vm_create *args = data;
1961 if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
1962 args->flags |= DRM_XE_VM_CREATE_SCRATCH_PAGE;
1964 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
1965 !xe->info.supports_usm))
1968 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1971 if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1974 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE &&
1975 args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
1978 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE &&
1979 args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
1982 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
1983 xe_device_in_non_fault_mode(xe)))
1986 if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FAULT_MODE) &&
1987 xe_device_in_fault_mode(xe)))
1990 if (args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE)
1991 flags |= XE_VM_FLAG_SCRATCH_PAGE;
1992 if (args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE)
1993 flags |= XE_VM_FLAG_COMPUTE_MODE;
1994 if (args->flags & DRM_XE_VM_CREATE_ASYNC_BIND_OPS)
1995 flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
1996 if (args->flags & DRM_XE_VM_CREATE_FAULT_MODE)
1997 flags |= XE_VM_FLAG_FAULT_MODE;
1999 vm = xe_vm_create(xe, flags);
2003 if (args->extensions) {
2004 err = vm_user_extensions(xe, vm, args->extensions, 0);
2005 if (XE_IOCTL_DBG(xe, err)) {
2006 xe_vm_close_and_put(vm);
2011 mutex_lock(&xef->vm.lock);
2012 err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
2013 mutex_unlock(&xef->vm.lock);
2015 xe_vm_close_and_put(vm);
2019 if (xe->info.has_asid) {
2020 mutex_lock(&xe->usm.lock);
2021 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
2022 XA_LIMIT(0, XE_MAX_ASID - 1),
2023 &xe->usm.next_asid, GFP_KERNEL);
2024 mutex_unlock(&xe->usm.lock);
2026 xe_vm_close_and_put(vm);
2029 vm->usm.asid = asid;
2034 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
2035 /* Warning: Security issue - never enable by default */
2036 args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
2042 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
2043 struct drm_file *file)
2045 struct xe_device *xe = to_xe_device(dev);
2046 struct xe_file *xef = to_xe_file(file);
2047 struct drm_xe_vm_destroy *args = data;
2051 if (XE_IOCTL_DBG(xe, args->pad) ||
2052 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2055 mutex_lock(&xef->vm.lock);
2056 vm = xa_load(&xef->vm.xa, args->vm_id);
2057 if (XE_IOCTL_DBG(xe, !vm))
2059 else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues))
2062 xa_erase(&xef->vm.xa, args->vm_id);
2063 mutex_unlock(&xef->vm.lock);
2066 xe_vm_close_and_put(vm);
2071 static const u32 region_to_mem_type[] = {
2077 static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
2078 struct xe_exec_queue *q, u32 region,
2079 struct xe_sync_entry *syncs, u32 num_syncs,
2080 struct async_op_fence *afence, bool first_op,
2085 XE_WARN_ON(region > ARRAY_SIZE(region_to_mem_type));
2087 if (!xe_vma_has_no_bo(vma)) {
2088 err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
2093 if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
2094 return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
2095 afence, true, first_op, last_op);
2099 /* Nothing to do, signal fences now */
2101 for (i = 0; i < num_syncs; i++)
2102 xe_sync_entry_signal(&syncs[i], NULL,
2103 dma_fence_get_stub());
2106 dma_fence_signal(&afence->fence);
2111 #define VM_BIND_OP(op) (op & 0xffff)
2113 struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm)
2115 int idx = vm->flags & XE_VM_FLAG_MIGRATION ?
2116 XE_VM_FLAG_TILE_ID(vm->flags) : 0;
2118 /* Safe to use index 0 as all BO in the VM share a single dma-resv lock */
2119 return &vm->pt_root[idx]->bo->ttm;
2122 static void xe_vm_tv_populate(struct xe_vm *vm, struct ttm_validate_buffer *tv)
2125 tv->bo = xe_vm_ttm_bo(vm);
2128 static void vm_set_async_error(struct xe_vm *vm, int err)
2130 lockdep_assert_held(&vm->lock);
2131 vm->async_ops.error = err;
2134 static int vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo,
2135 u64 addr, u64 range, u32 op)
2137 struct xe_device *xe = vm->xe;
2139 bool async = !!(op & XE_VM_BIND_FLAG_ASYNC);
2141 lockdep_assert_held(&vm->lock);
2143 switch (VM_BIND_OP(op)) {
2144 case XE_VM_BIND_OP_MAP:
2145 case XE_VM_BIND_OP_MAP_USERPTR:
2146 vma = xe_vm_find_overlapping_vma(vm, addr, range);
2147 if (XE_IOCTL_DBG(xe, vma && !async))
2150 case XE_VM_BIND_OP_UNMAP:
2151 case XE_VM_BIND_OP_PREFETCH:
2152 vma = xe_vm_find_overlapping_vma(vm, addr, range);
2153 if (XE_IOCTL_DBG(xe, !vma))
2154 /* Not an actual error, IOCTL cleans up returns and 0 */
2156 if (XE_IOCTL_DBG(xe, (xe_vma_start(vma) != addr ||
2157 xe_vma_end(vma) != addr + range) && !async))
2160 case XE_VM_BIND_OP_UNMAP_ALL:
2161 if (XE_IOCTL_DBG(xe, list_empty(&bo->ttm.base.gpuva.list)))
2162 /* Not an actual error, IOCTL cleans up returns and 0 */
2166 XE_WARN_ON("NOT POSSIBLE");
2173 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
2176 down_read(&vm->userptr.notifier_lock);
2177 vma->gpuva.flags |= XE_VMA_DESTROYED;
2178 up_read(&vm->userptr.notifier_lock);
2180 xe_vm_remove_vma(vm, vma);
2184 #define ULL unsigned long long
2186 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
2187 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2192 case DRM_GPUVA_OP_MAP:
2193 vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx",
2194 (ULL)op->map.va.addr, (ULL)op->map.va.range);
2196 case DRM_GPUVA_OP_REMAP:
2197 vma = gpuva_to_vma(op->remap.unmap->va);
2198 vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2199 (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2200 op->unmap.keep ? 1 : 0);
2203 "REMAP:PREV: addr=0x%016llx, range=0x%016llx",
2204 (ULL)op->remap.prev->va.addr,
2205 (ULL)op->remap.prev->va.range);
2208 "REMAP:NEXT: addr=0x%016llx, range=0x%016llx",
2209 (ULL)op->remap.next->va.addr,
2210 (ULL)op->remap.next->va.range);
2212 case DRM_GPUVA_OP_UNMAP:
2213 vma = gpuva_to_vma(op->unmap.va);
2214 vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2215 (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2216 op->unmap.keep ? 1 : 0);
2218 case DRM_GPUVA_OP_PREFETCH:
2219 vma = gpuva_to_vma(op->prefetch.va);
2220 vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx",
2221 (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
2224 XE_WARN_ON("NOT POSSIBLE");
2228 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2234 * Create operations list from IOCTL arguments, setup operations fields so parse
2235 * and commit steps are decoupled from IOCTL arguments. This step can fail.
2237 static struct drm_gpuva_ops *
2238 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
2239 u64 bo_offset_or_userptr, u64 addr, u64 range,
2240 u32 operation, u8 tile_mask, u32 region)
2242 struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
2243 struct drm_gpuva_ops *ops;
2244 struct drm_gpuva_op *__op;
2245 struct xe_vma_op *op;
2246 struct drm_gpuvm_bo *vm_bo;
2249 lockdep_assert_held_write(&vm->lock);
2251 vm_dbg(&vm->xe->drm,
2252 "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
2253 VM_BIND_OP(operation), (ULL)addr, (ULL)range,
2254 (ULL)bo_offset_or_userptr);
2256 switch (VM_BIND_OP(operation)) {
2257 case XE_VM_BIND_OP_MAP:
2258 case XE_VM_BIND_OP_MAP_USERPTR:
2259 ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
2260 obj, bo_offset_or_userptr);
2264 drm_gpuva_for_each_op(__op, ops) {
2265 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2267 op->tile_mask = tile_mask;
2269 operation & XE_VM_BIND_FLAG_IMMEDIATE;
2271 operation & XE_VM_BIND_FLAG_READONLY;
2272 op->map.is_null = operation & XE_VM_BIND_FLAG_NULL;
2275 case XE_VM_BIND_OP_UNMAP:
2276 ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
2280 drm_gpuva_for_each_op(__op, ops) {
2281 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2283 op->tile_mask = tile_mask;
2286 case XE_VM_BIND_OP_PREFETCH:
2287 ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
2291 drm_gpuva_for_each_op(__op, ops) {
2292 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2294 op->tile_mask = tile_mask;
2295 op->prefetch.region = region;
2298 case XE_VM_BIND_OP_UNMAP_ALL:
2301 err = xe_bo_lock(bo, true);
2303 return ERR_PTR(err);
2305 vm_bo = drm_gpuvm_bo_find(&vm->gpuvm, obj);
2309 ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
2310 drm_gpuvm_bo_put(vm_bo);
2315 drm_gpuva_for_each_op(__op, ops) {
2316 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2318 op->tile_mask = tile_mask;
2322 XE_WARN_ON("NOT POSSIBLE");
2323 ops = ERR_PTR(-EINVAL);
2326 #ifdef TEST_VM_ASYNC_OPS_ERROR
2327 if (operation & FORCE_ASYNC_OP_ERROR) {
2328 op = list_first_entry_or_null(&ops->list, struct xe_vma_op,
2331 op->inject_error = true;
2336 drm_gpuva_for_each_op(__op, ops)
2337 print_op(vm->xe, __op);
2342 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
2343 u8 tile_mask, bool read_only, bool is_null)
2345 struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
2349 lockdep_assert_held_write(&vm->lock);
2352 err = xe_bo_lock(bo, true);
2354 return ERR_PTR(err);
2356 vma = xe_vma_create(vm, bo, op->gem.offset,
2357 op->va.addr, op->va.addr +
2358 op->va.range - 1, read_only, is_null,
2363 if (xe_vma_is_userptr(vma)) {
2364 err = xe_vma_userptr_pin_pages(vma);
2366 prep_vma_destroy(vm, vma, false);
2367 xe_vma_destroy_unlocked(vma);
2368 return ERR_PTR(err);
2370 } else if (!xe_vma_has_no_bo(vma) && !bo->vm) {
2371 vm_insert_extobj(vm, vma);
2372 err = add_preempt_fences(vm, bo);
2374 prep_vma_destroy(vm, vma, false);
2375 xe_vma_destroy_unlocked(vma);
2376 return ERR_PTR(err);
2383 static u64 xe_vma_max_pte_size(struct xe_vma *vma)
2385 if (vma->gpuva.flags & XE_VMA_PTE_1G)
2387 else if (vma->gpuva.flags & XE_VMA_PTE_2M)
2393 static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
2397 vma->gpuva.flags |= XE_VMA_PTE_1G;
2400 vma->gpuva.flags |= XE_VMA_PTE_2M;
2407 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
2411 lockdep_assert_held_write(&vm->lock);
2413 switch (op->base.op) {
2414 case DRM_GPUVA_OP_MAP:
2415 err |= xe_vm_insert_vma(vm, op->map.vma);
2417 op->flags |= XE_VMA_OP_COMMITTED;
2419 case DRM_GPUVA_OP_REMAP:
2420 prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
2422 op->flags |= XE_VMA_OP_COMMITTED;
2424 if (op->remap.prev) {
2425 err |= xe_vm_insert_vma(vm, op->remap.prev);
2427 op->flags |= XE_VMA_OP_PREV_COMMITTED;
2428 if (!err && op->remap.skip_prev)
2429 op->remap.prev = NULL;
2431 if (op->remap.next) {
2432 err |= xe_vm_insert_vma(vm, op->remap.next);
2434 op->flags |= XE_VMA_OP_NEXT_COMMITTED;
2435 if (!err && op->remap.skip_next)
2436 op->remap.next = NULL;
2439 /* Adjust for partial unbind after removin VMA from VM */
2441 op->base.remap.unmap->va->va.addr = op->remap.start;
2442 op->base.remap.unmap->va->va.range = op->remap.range;
2445 case DRM_GPUVA_OP_UNMAP:
2446 prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
2447 op->flags |= XE_VMA_OP_COMMITTED;
2449 case DRM_GPUVA_OP_PREFETCH:
2450 op->flags |= XE_VMA_OP_COMMITTED;
2453 XE_WARN_ON("NOT POSSIBLE");
2460 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
2461 struct drm_gpuva_ops *ops,
2462 struct xe_sync_entry *syncs, u32 num_syncs,
2463 struct list_head *ops_list, bool last,
2466 struct xe_vma_op *last_op = NULL;
2467 struct async_op_fence *fence = NULL;
2468 struct drm_gpuva_op *__op;
2471 lockdep_assert_held_write(&vm->lock);
2473 if (last && num_syncs && async) {
2476 fence = kmalloc(sizeof(*fence), GFP_KERNEL);
2480 seqno = q ? ++q->bind.fence_seqno : ++vm->async_ops.fence.seqno;
2481 dma_fence_init(&fence->fence, &async_op_fence_ops,
2482 &vm->async_ops.lock, q ? q->bind.fence_ctx :
2483 vm->async_ops.fence.context, seqno);
2485 if (!xe_vm_no_dma_fences(vm)) {
2487 fence->started = false;
2488 init_waitqueue_head(&fence->wq);
2492 drm_gpuva_for_each_op(__op, ops) {
2493 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2494 bool first = list_empty(ops_list);
2496 XE_WARN_ON(!first && !async);
2498 INIT_LIST_HEAD(&op->link);
2499 list_add_tail(&op->link, ops_list);
2502 op->flags |= XE_VMA_OP_FIRST;
2503 op->num_syncs = num_syncs;
2509 switch (op->base.op) {
2510 case DRM_GPUVA_OP_MAP:
2514 vma = new_vma(vm, &op->base.map,
2515 op->tile_mask, op->map.read_only,
2525 case DRM_GPUVA_OP_REMAP:
2527 struct xe_vma *old =
2528 gpuva_to_vma(op->base.remap.unmap->va);
2530 op->remap.start = xe_vma_start(old);
2531 op->remap.range = xe_vma_size(old);
2533 if (op->base.remap.prev) {
2536 op->base.remap.unmap->va->flags &
2539 op->base.remap.unmap->va->flags &
2542 vma = new_vma(vm, op->base.remap.prev,
2543 op->tile_mask, read_only,
2550 op->remap.prev = vma;
2553 * Userptr creates a new SG mapping so
2554 * we must also rebind.
2556 op->remap.skip_prev = !xe_vma_is_userptr(old) &&
2557 IS_ALIGNED(xe_vma_end(vma),
2558 xe_vma_max_pte_size(old));
2559 if (op->remap.skip_prev) {
2560 xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2564 op->remap.start = xe_vma_end(vma);
2568 if (op->base.remap.next) {
2571 op->base.remap.unmap->va->flags &
2575 op->base.remap.unmap->va->flags &
2578 vma = new_vma(vm, op->base.remap.next,
2579 op->tile_mask, read_only,
2586 op->remap.next = vma;
2589 * Userptr creates a new SG mapping so
2590 * we must also rebind.
2592 op->remap.skip_next = !xe_vma_is_userptr(old) &&
2593 IS_ALIGNED(xe_vma_start(vma),
2594 xe_vma_max_pte_size(old));
2595 if (op->remap.skip_next) {
2596 xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2604 case DRM_GPUVA_OP_UNMAP:
2605 case DRM_GPUVA_OP_PREFETCH:
2609 XE_WARN_ON("NOT POSSIBLE");
2614 err = xe_vma_op_commit(vm, op);
2619 /* FIXME: Unhandled corner case */
2620 XE_WARN_ON(!last_op && last && !list_empty(ops_list));
2626 last_op->flags |= XE_VMA_OP_LAST;
2627 last_op->num_syncs = num_syncs;
2628 last_op->syncs = syncs;
2629 last_op->fence = fence;
2639 static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
2640 struct xe_vma_op *op)
2644 struct ttm_validate_buffer tv_bo, tv_vm;
2645 struct ww_acquire_ctx ww;
2649 lockdep_assert_held_write(&vm->lock);
2651 xe_vm_tv_populate(vm, &tv_vm);
2652 list_add_tail(&tv_vm.head, &objs);
2653 vbo = xe_vma_bo(vma);
2656 * An unbind can drop the last reference to the BO and
2657 * the BO is needed for ttm_eu_backoff_reservation so
2658 * take a reference here.
2663 tv_bo.bo = &vbo->ttm;
2664 tv_bo.num_shared = 1;
2665 list_add(&tv_bo.head, &objs);
2670 err = ttm_eu_reserve_buffers(&ww, &objs, true, &dups);
2676 xe_vm_assert_held(vm);
2677 xe_bo_assert_held(xe_vma_bo(vma));
2679 switch (op->base.op) {
2680 case DRM_GPUVA_OP_MAP:
2681 err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
2682 op->syncs, op->num_syncs, op->fence,
2683 op->map.immediate || !xe_vm_in_fault_mode(vm),
2684 op->flags & XE_VMA_OP_FIRST,
2685 op->flags & XE_VMA_OP_LAST);
2687 case DRM_GPUVA_OP_REMAP:
2689 bool prev = !!op->remap.prev;
2690 bool next = !!op->remap.next;
2692 if (!op->remap.unmap_done) {
2694 vm->async_ops.munmap_rebind_inflight = true;
2695 vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
2697 err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2699 !prev && !next ? op->fence : NULL,
2700 op->flags & XE_VMA_OP_FIRST,
2701 op->flags & XE_VMA_OP_LAST && !prev &&
2705 op->remap.unmap_done = true;
2709 op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
2710 err = xe_vm_bind(vm, op->remap.prev, op->q,
2711 xe_vma_bo(op->remap.prev), op->syncs,
2713 !next ? op->fence : NULL, true, false,
2714 op->flags & XE_VMA_OP_LAST && !next);
2715 op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2718 op->remap.prev = NULL;
2722 op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
2723 err = xe_vm_bind(vm, op->remap.next, op->q,
2724 xe_vma_bo(op->remap.next),
2725 op->syncs, op->num_syncs,
2726 op->fence, true, false,
2727 op->flags & XE_VMA_OP_LAST);
2728 op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2731 op->remap.next = NULL;
2733 vm->async_ops.munmap_rebind_inflight = false;
2737 case DRM_GPUVA_OP_UNMAP:
2738 err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2739 op->num_syncs, op->fence,
2740 op->flags & XE_VMA_OP_FIRST,
2741 op->flags & XE_VMA_OP_LAST);
2743 case DRM_GPUVA_OP_PREFETCH:
2744 err = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region,
2745 op->syncs, op->num_syncs, op->fence,
2746 op->flags & XE_VMA_OP_FIRST,
2747 op->flags & XE_VMA_OP_LAST);
2750 XE_WARN_ON("NOT POSSIBLE");
2753 ttm_eu_backoff_reservation(&ww, &objs);
2754 if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
2755 lockdep_assert_held_write(&vm->lock);
2756 err = xe_vma_userptr_pin_pages(vma);
2763 trace_xe_vma_fail(vma);
2768 static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
2772 lockdep_assert_held_write(&vm->lock);
2774 #ifdef TEST_VM_ASYNC_OPS_ERROR
2775 if (op->inject_error) {
2776 op->inject_error = false;
2781 switch (op->base.op) {
2782 case DRM_GPUVA_OP_MAP:
2783 ret = __xe_vma_op_execute(vm, op->map.vma, op);
2785 case DRM_GPUVA_OP_REMAP:
2789 if (!op->remap.unmap_done)
2790 vma = gpuva_to_vma(op->base.remap.unmap->va);
2791 else if (op->remap.prev)
2792 vma = op->remap.prev;
2794 vma = op->remap.next;
2796 ret = __xe_vma_op_execute(vm, vma, op);
2799 case DRM_GPUVA_OP_UNMAP:
2800 ret = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
2803 case DRM_GPUVA_OP_PREFETCH:
2804 ret = __xe_vma_op_execute(vm,
2805 gpuva_to_vma(op->base.prefetch.va),
2809 XE_WARN_ON("NOT POSSIBLE");
2815 static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op)
2817 bool last = op->flags & XE_VMA_OP_LAST;
2820 while (op->num_syncs--)
2821 xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
2824 xe_exec_queue_put(op->q);
2826 dma_fence_put(&op->fence->fence);
2828 if (!list_empty(&op->link)) {
2829 spin_lock_irq(&vm->async_ops.lock);
2830 list_del(&op->link);
2831 spin_unlock_irq(&vm->async_ops.lock);
2834 drm_gpuva_ops_free(&vm->gpuvm, op->ops);
2839 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
2840 bool post_commit, bool prev_post_commit,
2841 bool next_post_commit)
2843 lockdep_assert_held_write(&vm->lock);
2845 switch (op->base.op) {
2846 case DRM_GPUVA_OP_MAP:
2848 prep_vma_destroy(vm, op->map.vma, post_commit);
2849 xe_vma_destroy_unlocked(op->map.vma);
2852 case DRM_GPUVA_OP_UNMAP:
2854 struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2857 down_read(&vm->userptr.notifier_lock);
2858 vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2859 up_read(&vm->userptr.notifier_lock);
2861 xe_vm_insert_vma(vm, vma);
2865 case DRM_GPUVA_OP_REMAP:
2867 struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
2869 if (op->remap.prev) {
2870 prep_vma_destroy(vm, op->remap.prev, prev_post_commit);
2871 xe_vma_destroy_unlocked(op->remap.prev);
2873 if (op->remap.next) {
2874 prep_vma_destroy(vm, op->remap.next, next_post_commit);
2875 xe_vma_destroy_unlocked(op->remap.next);
2878 down_read(&vm->userptr.notifier_lock);
2879 vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2880 up_read(&vm->userptr.notifier_lock);
2882 xe_vm_insert_vma(vm, vma);
2886 case DRM_GPUVA_OP_PREFETCH:
2890 XE_WARN_ON("NOT POSSIBLE");
2894 static struct xe_vma_op *next_vma_op(struct xe_vm *vm)
2896 return list_first_entry_or_null(&vm->async_ops.pending,
2897 struct xe_vma_op, link);
2900 static void xe_vma_op_work_func(struct work_struct *w)
2902 struct xe_vm *vm = container_of(w, struct xe_vm, async_ops.work);
2905 struct xe_vma_op *op;
2908 if (vm->async_ops.error && !xe_vm_is_closed(vm))
2911 spin_lock_irq(&vm->async_ops.lock);
2912 op = next_vma_op(vm);
2913 spin_unlock_irq(&vm->async_ops.lock);
2918 if (!xe_vm_is_closed(vm)) {
2919 down_write(&vm->lock);
2920 err = xe_vma_op_execute(vm, op);
2922 drm_warn(&vm->xe->drm,
2923 "Async VM op(%d) failed with %d",
2925 vm_set_async_error(vm, err);
2926 up_write(&vm->lock);
2928 if (vm->async_ops.error_capture.addr)
2929 vm_error_capture(vm, err, 0, 0, 0);
2932 up_write(&vm->lock);
2936 switch (op->base.op) {
2937 case DRM_GPUVA_OP_REMAP:
2938 vma = gpuva_to_vma(op->base.remap.unmap->va);
2939 trace_xe_vma_flush(vma);
2941 down_write(&vm->lock);
2942 xe_vma_destroy_unlocked(vma);
2943 up_write(&vm->lock);
2945 case DRM_GPUVA_OP_UNMAP:
2946 vma = gpuva_to_vma(op->base.unmap.va);
2947 trace_xe_vma_flush(vma);
2949 down_write(&vm->lock);
2950 xe_vma_destroy_unlocked(vma);
2951 up_write(&vm->lock);
2958 if (op->fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
2959 &op->fence->fence.flags)) {
2960 if (!xe_vm_no_dma_fences(vm)) {
2961 op->fence->started = true;
2962 wake_up_all(&op->fence->wq);
2964 dma_fence_signal(&op->fence->fence);
2968 xe_vma_op_cleanup(vm, op);
2972 static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
2973 struct list_head *ops_list, bool async)
2975 struct xe_vma_op *op, *last_op, *next;
2978 lockdep_assert_held_write(&vm->lock);
2980 list_for_each_entry(op, ops_list, link)
2984 err = xe_vma_op_execute(vm, last_op);
2987 xe_vma_op_cleanup(vm, last_op);
2990 bool installed = false;
2992 for (i = 0; i < last_op->num_syncs; i++)
2993 installed |= xe_sync_entry_signal(&last_op->syncs[i],
2995 &last_op->fence->fence);
2996 if (!installed && last_op->fence)
2997 dma_fence_signal(&last_op->fence->fence);
2999 spin_lock_irq(&vm->async_ops.lock);
3000 list_splice_tail(ops_list, &vm->async_ops.pending);
3001 spin_unlock_irq(&vm->async_ops.lock);
3003 if (!vm->async_ops.error)
3004 queue_work(system_unbound_wq, &vm->async_ops.work);
3010 list_for_each_entry_reverse(op, ops_list, link)
3011 xe_vma_op_unwind(vm, op, op->flags & XE_VMA_OP_COMMITTED,
3012 op->flags & XE_VMA_OP_PREV_COMMITTED,
3013 op->flags & XE_VMA_OP_NEXT_COMMITTED);
3014 list_for_each_entry_safe(op, next, ops_list, link)
3015 xe_vma_op_cleanup(vm, op);
3020 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
3021 struct drm_gpuva_ops **ops,
3026 for (i = num_ops_list - 1; i; ++i) {
3027 struct drm_gpuva_ops *__ops = ops[i];
3028 struct drm_gpuva_op *__op;
3033 drm_gpuva_for_each_op_reverse(__op, __ops) {
3034 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
3036 xe_vma_op_unwind(vm, op,
3037 op->flags & XE_VMA_OP_COMMITTED,
3038 op->flags & XE_VMA_OP_PREV_COMMITTED,
3039 op->flags & XE_VMA_OP_NEXT_COMMITTED);
3042 drm_gpuva_ops_free(&vm->gpuvm, __ops);
3046 #ifdef TEST_VM_ASYNC_OPS_ERROR
3047 #define SUPPORTED_FLAGS \
3048 (FORCE_ASYNC_OP_ERROR | XE_VM_BIND_FLAG_ASYNC | \
3049 XE_VM_BIND_FLAG_READONLY | XE_VM_BIND_FLAG_IMMEDIATE | \
3050 XE_VM_BIND_FLAG_NULL | 0xffff)
3052 #define SUPPORTED_FLAGS \
3053 (XE_VM_BIND_FLAG_ASYNC | XE_VM_BIND_FLAG_READONLY | \
3054 XE_VM_BIND_FLAG_IMMEDIATE | XE_VM_BIND_FLAG_NULL | 0xffff)
3056 #define XE_64K_PAGE_MASK 0xffffull
3058 #define MAX_BINDS 512 /* FIXME: Picking random upper limit */
3060 static int vm_bind_ioctl_check_args(struct xe_device *xe,
3061 struct drm_xe_vm_bind *args,
3062 struct drm_xe_vm_bind_op **bind_ops,
3068 if (XE_IOCTL_DBG(xe, args->extensions) ||
3069 XE_IOCTL_DBG(xe, !args->num_binds) ||
3070 XE_IOCTL_DBG(xe, args->num_binds > MAX_BINDS))
3073 if (args->num_binds > 1) {
3074 u64 __user *bind_user =
3075 u64_to_user_ptr(args->vector_of_binds);
3077 *bind_ops = kmalloc(sizeof(struct drm_xe_vm_bind_op) *
3078 args->num_binds, GFP_KERNEL);
3082 err = __copy_from_user(*bind_ops, bind_user,
3083 sizeof(struct drm_xe_vm_bind_op) *
3085 if (XE_IOCTL_DBG(xe, err)) {
3090 *bind_ops = &args->bind;
3093 for (i = 0; i < args->num_binds; ++i) {
3094 u64 range = (*bind_ops)[i].range;
3095 u64 addr = (*bind_ops)[i].addr;
3096 u32 op = (*bind_ops)[i].op;
3097 u32 obj = (*bind_ops)[i].obj;
3098 u64 obj_offset = (*bind_ops)[i].obj_offset;
3099 u32 region = (*bind_ops)[i].region;
3100 bool is_null = op & XE_VM_BIND_FLAG_NULL;
3103 *async = !!(op & XE_VM_BIND_FLAG_ASYNC);
3104 } else if (XE_IOCTL_DBG(xe, !*async) ||
3105 XE_IOCTL_DBG(xe, !(op & XE_VM_BIND_FLAG_ASYNC)) ||
3106 XE_IOCTL_DBG(xe, VM_BIND_OP(op) ==
3107 XE_VM_BIND_OP_RESTART)) {
3112 if (XE_IOCTL_DBG(xe, !*async &&
3113 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL)) {
3118 if (XE_IOCTL_DBG(xe, !*async &&
3119 VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH)) {
3124 if (XE_IOCTL_DBG(xe, VM_BIND_OP(op) >
3125 XE_VM_BIND_OP_PREFETCH) ||
3126 XE_IOCTL_DBG(xe, op & ~SUPPORTED_FLAGS) ||
3127 XE_IOCTL_DBG(xe, obj && is_null) ||
3128 XE_IOCTL_DBG(xe, obj_offset && is_null) ||
3129 XE_IOCTL_DBG(xe, VM_BIND_OP(op) != XE_VM_BIND_OP_MAP &&
3131 XE_IOCTL_DBG(xe, !obj &&
3132 VM_BIND_OP(op) == XE_VM_BIND_OP_MAP &&
3134 XE_IOCTL_DBG(xe, !obj &&
3135 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
3136 XE_IOCTL_DBG(xe, addr &&
3137 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
3138 XE_IOCTL_DBG(xe, range &&
3139 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
3140 XE_IOCTL_DBG(xe, obj &&
3141 VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR) ||
3142 XE_IOCTL_DBG(xe, obj &&
3143 VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH) ||
3144 XE_IOCTL_DBG(xe, region &&
3145 VM_BIND_OP(op) != XE_VM_BIND_OP_PREFETCH) ||
3146 XE_IOCTL_DBG(xe, !(BIT(region) &
3147 xe->info.mem_region_mask)) ||
3148 XE_IOCTL_DBG(xe, obj &&
3149 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP)) {
3154 if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
3155 XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
3156 XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
3157 XE_IOCTL_DBG(xe, !range && VM_BIND_OP(op) !=
3158 XE_VM_BIND_OP_RESTART &&
3159 VM_BIND_OP(op) != XE_VM_BIND_OP_UNMAP_ALL)) {
3168 if (args->num_binds > 1)
3173 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3175 struct xe_device *xe = to_xe_device(dev);
3176 struct xe_file *xef = to_xe_file(file);
3177 struct drm_xe_vm_bind *args = data;
3178 struct drm_xe_sync __user *syncs_user;
3179 struct xe_bo **bos = NULL;
3180 struct drm_gpuva_ops **ops = NULL;
3182 struct xe_exec_queue *q = NULL;
3184 struct xe_sync_entry *syncs = NULL;
3185 struct drm_xe_vm_bind_op *bind_ops;
3186 LIST_HEAD(ops_list);
3191 err = vm_bind_ioctl_check_args(xe, args, &bind_ops, &async);
3195 if (args->exec_queue_id) {
3196 q = xe_exec_queue_lookup(xef, args->exec_queue_id);
3197 if (XE_IOCTL_DBG(xe, !q)) {
3202 if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
3204 goto put_exec_queue;
3208 vm = xe_vm_lookup(xef, args->vm_id);
3209 if (XE_IOCTL_DBG(xe, !vm)) {
3211 goto put_exec_queue;
3214 err = down_write_killable(&vm->lock);
3218 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
3220 goto release_vm_lock;
3223 if (VM_BIND_OP(bind_ops[0].op) == XE_VM_BIND_OP_RESTART) {
3224 if (XE_IOCTL_DBG(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
3226 if (XE_IOCTL_DBG(xe, !err && args->num_syncs))
3228 if (XE_IOCTL_DBG(xe, !err && !vm->async_ops.error))
3232 trace_xe_vm_restart(vm);
3233 vm_set_async_error(vm, 0);
3235 queue_work(system_unbound_wq, &vm->async_ops.work);
3237 /* Rebinds may have been blocked, give worker a kick */
3238 if (xe_vm_in_compute_mode(vm))
3239 xe_vm_queue_rebind_worker(vm);
3242 goto release_vm_lock;
3245 if (XE_IOCTL_DBG(xe, !vm->async_ops.error &&
3246 async != !!(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS))) {
3248 goto release_vm_lock;
3251 for (i = 0; i < args->num_binds; ++i) {
3252 u64 range = bind_ops[i].range;
3253 u64 addr = bind_ops[i].addr;
3255 if (XE_IOCTL_DBG(xe, range > vm->size) ||
3256 XE_IOCTL_DBG(xe, addr > vm->size - range)) {
3258 goto release_vm_lock;
3261 if (bind_ops[i].tile_mask) {
3262 u64 valid_tiles = BIT(xe->info.tile_count) - 1;
3264 if (XE_IOCTL_DBG(xe, bind_ops[i].tile_mask &
3267 goto release_vm_lock;
3272 bos = kzalloc(sizeof(*bos) * args->num_binds, GFP_KERNEL);
3275 goto release_vm_lock;
3278 ops = kzalloc(sizeof(*ops) * args->num_binds, GFP_KERNEL);
3281 goto release_vm_lock;
3284 for (i = 0; i < args->num_binds; ++i) {
3285 struct drm_gem_object *gem_obj;
3286 u64 range = bind_ops[i].range;
3287 u64 addr = bind_ops[i].addr;
3288 u32 obj = bind_ops[i].obj;
3289 u64 obj_offset = bind_ops[i].obj_offset;
3294 gem_obj = drm_gem_object_lookup(file, obj);
3295 if (XE_IOCTL_DBG(xe, !gem_obj)) {
3299 bos[i] = gem_to_xe_bo(gem_obj);
3301 if (XE_IOCTL_DBG(xe, range > bos[i]->size) ||
3302 XE_IOCTL_DBG(xe, obj_offset >
3303 bos[i]->size - range)) {
3308 if (bos[i]->flags & XE_BO_INTERNAL_64K) {
3309 if (XE_IOCTL_DBG(xe, obj_offset &
3310 XE_64K_PAGE_MASK) ||
3311 XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
3312 XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
3319 if (args->num_syncs) {
3320 syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3327 syncs_user = u64_to_user_ptr(args->syncs);
3328 for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3329 err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3330 &syncs_user[num_syncs], false,
3331 xe_vm_no_dma_fences(vm));
3336 /* Do some error checking first to make the unwind easier */
3337 for (i = 0; i < args->num_binds; ++i) {
3338 u64 range = bind_ops[i].range;
3339 u64 addr = bind_ops[i].addr;
3340 u32 op = bind_ops[i].op;
3342 err = vm_bind_ioctl_lookup_vma(vm, bos[i], addr, range, op);
3347 for (i = 0; i < args->num_binds; ++i) {
3348 u64 range = bind_ops[i].range;
3349 u64 addr = bind_ops[i].addr;
3350 u32 op = bind_ops[i].op;
3351 u64 obj_offset = bind_ops[i].obj_offset;
3352 u8 tile_mask = bind_ops[i].tile_mask;
3353 u32 region = bind_ops[i].region;
3355 ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
3356 addr, range, op, tile_mask,
3358 if (IS_ERR(ops[i])) {
3359 err = PTR_ERR(ops[i]);
3364 err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
3366 i == args->num_binds - 1,
3373 if (list_empty(&ops_list)) {
3378 err = vm_bind_ioctl_ops_execute(vm, &ops_list, async);
3379 up_write(&vm->lock);
3381 for (i = 0; i < args->num_binds; ++i)
3386 if (args->num_binds > 1)
3392 vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
3394 for (i = 0; err == -ENODATA && i < num_syncs; i++)
3395 xe_sync_entry_signal(&syncs[i], NULL, dma_fence_get_stub());
3397 xe_sync_entry_cleanup(&syncs[num_syncs]);
3401 for (i = 0; i < args->num_binds; ++i)
3404 up_write(&vm->lock);
3409 xe_exec_queue_put(q);
3413 if (args->num_binds > 1)
3415 return err == -ENODATA ? 0 : err;
3419 * xe_vm_lock() - Lock the vm's dma_resv object
3420 * @vm: The struct xe_vm whose lock is to be locked
3421 * @intr: Whether to perform any wait interruptible
3423 * Return: 0 on success, -EINTR if @intr is true and the wait for a
3424 * contended lock was interrupted. If @intr is false, the function
3427 int xe_vm_lock(struct xe_vm *vm, bool intr)
3430 return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
3432 return dma_resv_lock(xe_vm_resv(vm), NULL);
3436 * xe_vm_unlock() - Unlock the vm's dma_resv object
3437 * @vm: The struct xe_vm whose lock is to be released.
3439 * Unlock a buffer object lock that was locked by xe_vm_lock().
3441 void xe_vm_unlock(struct xe_vm *vm)
3443 dma_resv_unlock(xe_vm_resv(vm));
3447 * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3448 * @vma: VMA to invalidate
3450 * Walks a list of page tables leaves which it memset the entries owned by this
3451 * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3454 * Returns 0 for success, negative error code otherwise.
3456 int xe_vm_invalidate_vma(struct xe_vma *vma)
3458 struct xe_device *xe = xe_vma_vm(vma)->xe;
3459 struct xe_tile *tile;
3460 u32 tile_needs_invalidate = 0;
3461 int seqno[XE_MAX_TILES_PER_DEVICE];
3465 XE_WARN_ON(!xe_vm_in_fault_mode(xe_vma_vm(vma)));
3466 XE_WARN_ON(xe_vma_is_null(vma));
3467 trace_xe_vma_usm_invalidate(vma);
3469 /* Check that we don't race with page-table updates */
3470 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3471 if (xe_vma_is_userptr(vma)) {
3472 WARN_ON_ONCE(!mmu_interval_check_retry
3473 (&vma->userptr.notifier,
3474 vma->userptr.notifier_seq));
3475 WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
3476 DMA_RESV_USAGE_BOOKKEEP));
3479 xe_bo_assert_held(xe_vma_bo(vma));
3483 for_each_tile(tile, xe, id) {
3484 if (xe_pt_zap_ptes(tile, vma)) {
3485 tile_needs_invalidate |= BIT(id);
3488 * FIXME: We potentially need to invalidate multiple
3489 * GTs within the tile
3491 seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
3497 for_each_tile(tile, xe, id) {
3498 if (tile_needs_invalidate & BIT(id)) {
3499 ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
3505 vma->usm.tile_invalidated = vma->tile_mask;
3510 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
3512 struct drm_gpuva *gpuva;
3516 if (!down_read_trylock(&vm->lock)) {
3517 drm_printf(p, " Failed to acquire VM lock to dump capture");
3520 if (vm->pt_root[gt_id]) {
3521 addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE);
3522 is_vram = xe_bo_is_vram(vm->pt_root[gt_id]->bo);
3523 drm_printf(p, " VM root: A:0x%llx %s\n", addr,
3524 is_vram ? "VRAM" : "SYS");
3527 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3528 struct xe_vma *vma = gpuva_to_vma(gpuva);
3529 bool is_userptr = xe_vma_is_userptr(vma);
3530 bool is_null = xe_vma_is_null(vma);
3534 } else if (is_userptr) {
3535 struct xe_res_cursor cur;
3537 if (vma->userptr.sg) {
3538 xe_res_first_sg(vma->userptr.sg, 0, XE_PAGE_SIZE,
3540 addr = xe_res_dma(&cur);
3545 addr = __xe_bo_addr(xe_vma_bo(vma), 0, XE_PAGE_SIZE);
3546 is_vram = xe_bo_is_vram(xe_vma_bo(vma));
3548 drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
3549 xe_vma_start(vma), xe_vma_end(vma) - 1,
3551 addr, is_null ? "NULL" : is_userptr ? "USR" :
3552 is_vram ? "VRAM" : "SYS");