1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021 Intel Corporation
8 #include <linux/dma-fence-array.h>
10 #include <drm/drm_exec.h>
11 #include <drm/drm_print.h>
12 #include <drm/ttm/ttm_execbuf_util.h>
13 #include <drm/ttm/ttm_tt.h>
14 #include <drm/xe_drm.h>
15 #include <linux/delay.h>
16 #include <linux/kthread.h>
18 #include <linux/swap.h>
21 #include "xe_device.h"
22 #include "xe_exec_queue.h"
24 #include "xe_gt_pagefault.h"
25 #include "xe_gt_tlb_invalidation.h"
26 #include "xe_migrate.h"
28 #include "xe_preempt_fence.h"
30 #include "xe_res_cursor.h"
33 #include "generated/xe_wa_oob.h"
36 #define TEST_VM_ASYNC_OPS_ERROR
38 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
40 return vm->gpuvm.r_obj;
44 * xe_vma_userptr_check_repin() - Advisory check for repin needed
45 * @vma: The userptr vma
47 * Check if the userptr vma has been invalidated since last successful
48 * repin. The check is advisory only and can the function can be called
49 * without the vm->userptr.notifier_lock held. There is no guarantee that the
50 * vma userptr will remain valid after a lockless check, so typically
51 * the call needs to be followed by a proper check under the notifier_lock.
53 * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
55 int xe_vma_userptr_check_repin(struct xe_vma *vma)
57 return mmu_interval_check_retry(&vma->userptr.notifier,
58 vma->userptr.notifier_seq) ?
62 int xe_vma_userptr_pin_pages(struct xe_vma *vma)
64 struct xe_vm *vm = xe_vma_vm(vma);
65 struct xe_device *xe = vm->xe;
66 const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT;
68 bool in_kthread = !current->mm;
69 unsigned long notifier_seq;
71 bool read_only = xe_vma_read_only(vma);
73 lockdep_assert_held(&vm->lock);
74 XE_WARN_ON(!xe_vma_is_userptr(vma));
76 if (vma->gpuva.flags & XE_VMA_DESTROYED)
79 notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier);
80 if (notifier_seq == vma->userptr.notifier_seq)
83 pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
87 if (vma->userptr.sg) {
88 dma_unmap_sgtable(xe->drm.dev,
90 read_only ? DMA_TO_DEVICE :
91 DMA_BIDIRECTIONAL, 0);
92 sg_free_table(vma->userptr.sg);
93 vma->userptr.sg = NULL;
98 if (!mmget_not_zero(vma->userptr.notifier.mm)) {
102 kthread_use_mm(vma->userptr.notifier.mm);
105 while (pinned < num_pages) {
106 ret = get_user_pages_fast(xe_vma_userptr(vma) +
109 read_only ? 0 : FOLL_WRITE,
122 kthread_unuse_mm(vma->userptr.notifier.mm);
123 mmput(vma->userptr.notifier.mm);
129 ret = sg_alloc_table_from_pages_segment(&vma->userptr.sgt, pages,
131 (u64)pinned << PAGE_SHIFT,
132 xe_sg_segment_size(xe->drm.dev),
135 vma->userptr.sg = NULL;
138 vma->userptr.sg = &vma->userptr.sgt;
140 ret = dma_map_sgtable(xe->drm.dev, vma->userptr.sg,
141 read_only ? DMA_TO_DEVICE :
143 DMA_ATTR_SKIP_CPU_SYNC |
144 DMA_ATTR_NO_KERNEL_MAPPING);
146 sg_free_table(vma->userptr.sg);
147 vma->userptr.sg = NULL;
151 for (i = 0; i < pinned; ++i) {
154 set_page_dirty(pages[i]);
155 unlock_page(pages[i]);
158 mark_page_accessed(pages[i]);
162 release_pages(pages, pinned);
166 vma->userptr.notifier_seq = notifier_seq;
167 if (xe_vma_userptr_check_repin(vma) == -EAGAIN)
171 return ret < 0 ? ret : 0;
174 static bool preempt_fences_waiting(struct xe_vm *vm)
176 struct xe_exec_queue *q;
178 lockdep_assert_held(&vm->lock);
179 xe_vm_assert_held(vm);
181 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
182 if (!q->compute.pfence ||
183 (q->compute.pfence && test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
184 &q->compute.pfence->flags))) {
192 static void free_preempt_fences(struct list_head *list)
194 struct list_head *link, *next;
196 list_for_each_safe(link, next, list)
197 xe_preempt_fence_free(to_preempt_fence_from_link(link));
200 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
203 lockdep_assert_held(&vm->lock);
204 xe_vm_assert_held(vm);
206 if (*count >= vm->preempt.num_exec_queues)
209 for (; *count < vm->preempt.num_exec_queues; ++(*count)) {
210 struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
213 return PTR_ERR(pfence);
215 list_move_tail(xe_preempt_fence_link(pfence), list);
221 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
223 struct xe_exec_queue *q;
225 xe_vm_assert_held(vm);
227 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
228 if (q->compute.pfence) {
229 long timeout = dma_fence_wait(q->compute.pfence, false);
233 dma_fence_put(q->compute.pfence);
234 q->compute.pfence = NULL;
241 static bool xe_vm_is_idle(struct xe_vm *vm)
243 struct xe_exec_queue *q;
245 xe_vm_assert_held(vm);
246 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
247 if (!xe_exec_queue_is_idle(q))
254 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
256 struct list_head *link;
257 struct xe_exec_queue *q;
259 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
260 struct dma_fence *fence;
263 XE_WARN_ON(link == list);
265 fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
266 q, q->compute.context,
268 dma_fence_put(q->compute.pfence);
269 q->compute.pfence = fence;
273 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
275 struct xe_exec_queue *q;
278 err = xe_bo_lock(bo, true);
282 err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
286 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
287 if (q->compute.pfence) {
288 dma_resv_add_fence(bo->ttm.base.resv,
290 DMA_RESV_USAGE_BOOKKEEP);
299 * xe_vm_fence_all_extobjs() - Add a fence to vm's external objects' resv
301 * @fence: The fence to add.
302 * @usage: The resv usage for the fence.
304 * Loops over all of the vm's external object bindings and adds a @fence
305 * with the given @usage to all of the external object's reservation
308 void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
309 enum dma_resv_usage usage)
313 list_for_each_entry(vma, &vm->extobj.list, extobj.link)
314 dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence, usage);
317 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm)
319 struct xe_exec_queue *q;
321 lockdep_assert_held(&vm->lock);
322 xe_vm_assert_held(vm);
324 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
327 dma_resv_add_fence(xe_vm_resv(vm), q->compute.pfence,
328 DMA_RESV_USAGE_BOOKKEEP);
329 xe_vm_fence_all_extobjs(vm, q->compute.pfence,
330 DMA_RESV_USAGE_BOOKKEEP);
334 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
336 struct drm_exec exec;
337 struct dma_fence *pfence;
341 XE_WARN_ON(!xe_vm_in_compute_mode(vm));
343 down_write(&vm->lock);
344 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
345 drm_exec_until_all_locked(&exec) {
346 err = xe_vm_lock_dma_resv(vm, &exec, 1, true);
347 drm_exec_retry_on_contention(&exec);
352 pfence = xe_preempt_fence_create(q, q->compute.context,
359 list_add(&q->compute.link, &vm->preempt.exec_queues);
360 ++vm->preempt.num_exec_queues;
361 q->compute.pfence = pfence;
363 down_read(&vm->userptr.notifier_lock);
365 dma_resv_add_fence(xe_vm_resv(vm), pfence,
366 DMA_RESV_USAGE_BOOKKEEP);
368 xe_vm_fence_all_extobjs(vm, pfence, DMA_RESV_USAGE_BOOKKEEP);
371 * Check to see if a preemption on VM is in flight or userptr
372 * invalidation, if so trigger this preempt fence to sync state with
373 * other preempt fences on the VM.
375 wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
377 dma_fence_enable_sw_signaling(pfence);
379 up_read(&vm->userptr.notifier_lock);
382 drm_exec_fini(&exec);
389 * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
390 * that need repinning.
393 * This function checks for whether the VM has userptrs that need repinning,
394 * and provides a release-type barrier on the userptr.notifier_lock after
397 * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
399 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
401 lockdep_assert_held_read(&vm->userptr.notifier_lock);
403 return (list_empty(&vm->userptr.repin_list) &&
404 list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
408 * xe_vm_lock_dma_resv() - Lock the vm dma_resv object and the dma_resv
409 * objects of the vm's external buffer objects.
411 * @exec: Pointer to a struct drm_exec locking context.
412 * @num_shared: Number of dma-fence slots to reserve in the locked objects.
413 * @lock_vm: Lock also the vm's dma_resv.
415 * Locks the vm dma-resv objects and all the dma-resv objects of the
416 * buffer objects on the vm external object list.
418 * Return: 0 on success, Negative error code on error. In particular if
419 * @intr is set to true, -EINTR or -ERESTARTSYS may be returned.
421 int xe_vm_lock_dma_resv(struct xe_vm *vm, struct drm_exec *exec,
422 unsigned int num_shared, bool lock_vm)
424 struct xe_vma *vma, *next;
427 lockdep_assert_held(&vm->lock);
430 err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
435 list_for_each_entry(vma, &vm->extobj.list, extobj.link) {
436 err = drm_exec_prepare_obj(exec, &xe_vma_bo(vma)->ttm.base, num_shared);
441 spin_lock(&vm->notifier.list_lock);
442 list_for_each_entry_safe(vma, next, &vm->notifier.rebind_list,
443 notifier.rebind_link) {
444 xe_bo_assert_held(xe_vma_bo(vma));
446 list_del_init(&vma->notifier.rebind_link);
447 if (vma->tile_present && !(vma->gpuva.flags & XE_VMA_DESTROYED))
448 list_move_tail(&vma->combined_links.rebind,
451 spin_unlock(&vm->notifier.list_lock);
456 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
458 static void xe_vm_kill(struct xe_vm *vm)
460 struct xe_exec_queue *q;
462 lockdep_assert_held(&vm->lock);
464 xe_vm_lock(vm, false);
465 vm->flags |= XE_VM_FLAG_BANNED;
466 trace_xe_vm_kill(vm);
468 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
472 /* TODO: Inform user the VM is banned */
476 * xe_vm_validate_should_retry() - Whether to retry after a validate error.
477 * @exec: The drm_exec object used for locking before validation.
478 * @err: The error returned from ttm_bo_validate().
479 * @end: A ktime_t cookie that should be set to 0 before first use and
480 * that should be reused on subsequent calls.
482 * With multiple active VMs, under memory pressure, it is possible that
483 * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
484 * Until ttm properly handles locking in such scenarios, best thing the
485 * driver can do is retry with a timeout. Check if that is necessary, and
486 * if so unlock the drm_exec's objects while keeping the ticket to prepare
489 * Return: true if a retry after drm_exec_init() is recommended;
492 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
500 *end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
501 if (!ktime_before(cur, *end))
505 * We would like to keep the ticket here with
506 * drm_exec_unlock_all(), but WW mutex asserts currently
507 * stop us from that. In any case this function could go away
508 * with proper TTM -EDEADLK handling.
516 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
523 * 1 fence for each preempt fence plus a fence for each tile from a
526 err = drm_exec_prepare_obj(exec, xe_vm_obj(vm),
527 vm->preempt.num_exec_queues +
528 vm->xe->info.tile_count);
532 if (xe_vm_is_idle(vm)) {
533 vm->preempt.rebind_deactivated = true;
538 if (!preempt_fences_waiting(vm)) {
543 err = xe_vm_lock_dma_resv(vm, exec, vm->preempt.num_exec_queues, false);
547 err = wait_for_existing_preempt_fences(vm);
551 list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
552 if (xe_vma_has_no_bo(vma) ||
553 vma->gpuva.flags & XE_VMA_DESTROYED)
556 err = xe_bo_validate(xe_vma_bo(vma), vm, false);
564 static void preempt_rebind_work_func(struct work_struct *w)
566 struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
567 struct drm_exec exec;
568 struct dma_fence *rebind_fence;
569 unsigned int fence_count = 0;
570 LIST_HEAD(preempt_fences);
574 int __maybe_unused tries = 0;
576 XE_WARN_ON(!xe_vm_in_compute_mode(vm));
577 trace_xe_vm_rebind_worker_enter(vm);
579 down_write(&vm->lock);
581 if (xe_vm_is_closed_or_banned(vm)) {
583 trace_xe_vm_rebind_worker_exit(vm);
588 if (vm->async_ops.error)
589 goto out_unlock_outer;
592 * Extreme corner where we exit a VM error state with a munmap style VM
593 * unbind inflight which requires a rebind. In this case the rebind
594 * needs to install some fences into the dma-resv slots. The worker to
595 * do this queued, let that worker make progress by dropping vm->lock
596 * and trying this again.
598 if (vm->async_ops.munmap_rebind_inflight) {
600 flush_work(&vm->async_ops.work);
604 if (xe_vm_userptr_check_repin(vm)) {
605 err = xe_vm_userptr_pin(vm);
607 goto out_unlock_outer;
610 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
612 drm_exec_until_all_locked(&exec) {
615 err = xe_preempt_work_begin(&exec, vm, &done);
616 drm_exec_retry_on_contention(&exec);
617 if (err && xe_vm_validate_should_retry(&exec, err, &end)) {
619 goto out_unlock_outer;
625 err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
629 rebind_fence = xe_vm_rebind(vm, true);
630 if (IS_ERR(rebind_fence)) {
631 err = PTR_ERR(rebind_fence);
636 dma_fence_wait(rebind_fence, false);
637 dma_fence_put(rebind_fence);
640 /* Wait on munmap style VM unbinds */
641 wait = dma_resv_wait_timeout(xe_vm_resv(vm),
642 DMA_RESV_USAGE_KERNEL,
643 false, MAX_SCHEDULE_TIMEOUT);
649 #define retry_required(__tries, __vm) \
650 (IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
651 (!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
652 __xe_vm_userptr_needs_repin(__vm))
654 down_read(&vm->userptr.notifier_lock);
655 if (retry_required(tries, vm)) {
656 up_read(&vm->userptr.notifier_lock);
661 #undef retry_required
663 spin_lock(&vm->xe->ttm.lru_lock);
664 ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
665 spin_unlock(&vm->xe->ttm.lru_lock);
667 /* Point of no return. */
668 arm_preempt_fences(vm, &preempt_fences);
669 resume_and_reinstall_preempt_fences(vm);
670 up_read(&vm->userptr.notifier_lock);
673 drm_exec_fini(&exec);
675 if (err == -EAGAIN) {
676 trace_xe_vm_rebind_worker_retry(vm);
681 drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
686 free_preempt_fences(&preempt_fences);
688 trace_xe_vm_rebind_worker_exit(vm);
691 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
692 const struct mmu_notifier_range *range,
693 unsigned long cur_seq)
695 struct xe_vma *vma = container_of(mni, struct xe_vma, userptr.notifier);
696 struct xe_vm *vm = xe_vma_vm(vma);
697 struct dma_resv_iter cursor;
698 struct dma_fence *fence;
701 XE_WARN_ON(!xe_vma_is_userptr(vma));
702 trace_xe_vma_userptr_invalidate(vma);
704 if (!mmu_notifier_range_blockable(range))
707 down_write(&vm->userptr.notifier_lock);
708 mmu_interval_set_seq(mni, cur_seq);
710 /* No need to stop gpu access if the userptr is not yet bound. */
711 if (!vma->userptr.initial_bind) {
712 up_write(&vm->userptr.notifier_lock);
717 * Tell exec and rebind worker they need to repin and rebind this
720 if (!xe_vm_in_fault_mode(vm) &&
721 !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
722 spin_lock(&vm->userptr.invalidated_lock);
723 list_move_tail(&vma->userptr.invalidate_link,
724 &vm->userptr.invalidated);
725 spin_unlock(&vm->userptr.invalidated_lock);
728 up_write(&vm->userptr.notifier_lock);
731 * Preempt fences turn into schedule disables, pipeline these.
732 * Note that even in fault mode, we need to wait for binds and
733 * unbinds to complete, and those are attached as BOOKMARK fences
736 dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
737 DMA_RESV_USAGE_BOOKKEEP);
738 dma_resv_for_each_fence_unlocked(&cursor, fence)
739 dma_fence_enable_sw_signaling(fence);
740 dma_resv_iter_end(&cursor);
742 err = dma_resv_wait_timeout(xe_vm_resv(vm),
743 DMA_RESV_USAGE_BOOKKEEP,
744 false, MAX_SCHEDULE_TIMEOUT);
745 XE_WARN_ON(err <= 0);
747 if (xe_vm_in_fault_mode(vm)) {
748 err = xe_vm_invalidate_vma(vma);
752 trace_xe_vma_userptr_invalidate_complete(vma);
757 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
758 .invalidate = vma_userptr_invalidate,
761 int xe_vm_userptr_pin(struct xe_vm *vm)
763 struct xe_vma *vma, *next;
765 LIST_HEAD(tmp_evict);
767 lockdep_assert_held_write(&vm->lock);
769 /* Collect invalidated userptrs */
770 spin_lock(&vm->userptr.invalidated_lock);
771 list_for_each_entry_safe(vma, next, &vm->userptr.invalidated,
772 userptr.invalidate_link) {
773 list_del_init(&vma->userptr.invalidate_link);
774 if (list_empty(&vma->combined_links.userptr))
775 list_move_tail(&vma->combined_links.userptr,
776 &vm->userptr.repin_list);
778 spin_unlock(&vm->userptr.invalidated_lock);
780 /* Pin and move to temporary list */
781 list_for_each_entry_safe(vma, next, &vm->userptr.repin_list,
782 combined_links.userptr) {
783 err = xe_vma_userptr_pin_pages(vma);
787 list_move_tail(&vma->combined_links.userptr, &tmp_evict);
790 /* Take lock and move to rebind_list for rebinding. */
791 err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
795 list_for_each_entry_safe(vma, next, &tmp_evict, combined_links.userptr)
796 list_move_tail(&vma->combined_links.rebind, &vm->rebind_list);
798 dma_resv_unlock(xe_vm_resv(vm));
803 list_splice_tail(&tmp_evict, &vm->userptr.repin_list);
809 * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
810 * that need repinning.
813 * This function does an advisory check for whether the VM has userptrs that
816 * Return: 0 if there are no indications of userptrs needing repinning,
817 * -EAGAIN if there are.
819 int xe_vm_userptr_check_repin(struct xe_vm *vm)
821 return (list_empty_careful(&vm->userptr.repin_list) &&
822 list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
825 static struct dma_fence *
826 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
827 struct xe_sync_entry *syncs, u32 num_syncs,
828 bool first_op, bool last_op);
830 struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
832 struct dma_fence *fence = NULL;
833 struct xe_vma *vma, *next;
835 lockdep_assert_held(&vm->lock);
836 if (xe_vm_no_dma_fences(vm) && !rebind_worker)
839 xe_vm_assert_held(vm);
840 list_for_each_entry_safe(vma, next, &vm->rebind_list,
841 combined_links.rebind) {
842 XE_WARN_ON(!vma->tile_present);
844 list_del_init(&vma->combined_links.rebind);
845 dma_fence_put(fence);
847 trace_xe_vma_rebind_worker(vma);
849 trace_xe_vma_rebind_exec(vma);
850 fence = xe_vm_bind_vma(vma, NULL, NULL, 0, false, false);
858 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
860 u64 bo_offset_or_userptr,
867 struct xe_tile *tile;
870 XE_WARN_ON(start >= end);
871 XE_WARN_ON(end >= vm->size);
873 if (!bo && !is_null) /* userptr */
874 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
876 vma = kzalloc(sizeof(*vma) - sizeof(struct xe_userptr),
879 vma = ERR_PTR(-ENOMEM);
883 INIT_LIST_HEAD(&vma->combined_links.rebind);
884 INIT_LIST_HEAD(&vma->notifier.rebind_link);
885 INIT_LIST_HEAD(&vma->extobj.link);
887 INIT_LIST_HEAD(&vma->gpuva.gem.entry);
888 vma->gpuva.vm = &vm->gpuvm;
889 vma->gpuva.va.addr = start;
890 vma->gpuva.va.range = end - start + 1;
892 vma->gpuva.flags |= XE_VMA_READ_ONLY;
894 vma->gpuva.flags |= DRM_GPUVA_SPARSE;
897 vma->tile_mask = tile_mask;
899 for_each_tile(tile, vm->xe, id)
900 vma->tile_mask |= 0x1 << id;
903 if (vm->xe->info.platform == XE_PVC)
904 vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
907 struct drm_gpuvm_bo *vm_bo;
909 xe_bo_assert_held(bo);
911 vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
914 return ERR_CAST(vm_bo);
917 drm_gem_object_get(&bo->ttm.base);
918 vma->gpuva.gem.obj = &bo->ttm.base;
919 vma->gpuva.gem.offset = bo_offset_or_userptr;
920 drm_gpuva_link(&vma->gpuva, vm_bo);
921 drm_gpuvm_bo_put(vm_bo);
922 } else /* userptr or null */ {
924 u64 size = end - start + 1;
927 INIT_LIST_HEAD(&vma->userptr.invalidate_link);
928 vma->gpuva.gem.offset = bo_offset_or_userptr;
930 err = mmu_interval_notifier_insert(&vma->userptr.notifier,
932 xe_vma_userptr(vma), size,
933 &vma_userptr_notifier_ops);
940 vma->userptr.notifier_seq = LONG_MAX;
949 static bool vm_remove_extobj(struct xe_vma *vma)
951 if (!list_empty(&vma->extobj.link)) {
952 xe_vma_vm(vma)->extobj.entries--;
953 list_del_init(&vma->extobj.link);
959 static void xe_vma_destroy_late(struct xe_vma *vma)
961 struct xe_vm *vm = xe_vma_vm(vma);
962 struct xe_device *xe = vm->xe;
963 bool read_only = xe_vma_read_only(vma);
965 if (xe_vma_is_userptr(vma)) {
966 if (vma->userptr.sg) {
967 dma_unmap_sgtable(xe->drm.dev,
969 read_only ? DMA_TO_DEVICE :
970 DMA_BIDIRECTIONAL, 0);
971 sg_free_table(vma->userptr.sg);
972 vma->userptr.sg = NULL;
976 * Since userptr pages are not pinned, we can't remove
977 * the notifer until we're sure the GPU is not accessing
980 mmu_interval_notifier_remove(&vma->userptr.notifier);
982 } else if (xe_vma_is_null(vma)) {
985 xe_bo_put(xe_vma_bo(vma));
991 static void vma_destroy_work_func(struct work_struct *w)
994 container_of(w, struct xe_vma, destroy_work);
996 xe_vma_destroy_late(vma);
999 static struct xe_vma *
1000 bo_has_vm_references_locked(struct xe_bo *bo, struct xe_vm *vm,
1001 struct xe_vma *ignore)
1003 struct drm_gpuvm_bo *vm_bo;
1004 struct drm_gpuva *va;
1005 struct drm_gem_object *obj = &bo->ttm.base;
1007 xe_bo_assert_held(bo);
1009 drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
1010 drm_gpuvm_bo_for_each_va(va, vm_bo) {
1011 struct xe_vma *vma = gpuva_to_vma(va);
1013 if (vma != ignore && xe_vma_vm(vma) == vm)
1021 static bool bo_has_vm_references(struct xe_bo *bo, struct xe_vm *vm,
1022 struct xe_vma *ignore)
1026 xe_bo_lock(bo, false);
1027 ret = !!bo_has_vm_references_locked(bo, vm, ignore);
1033 static void __vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
1035 lockdep_assert_held_write(&vm->lock);
1037 list_add(&vma->extobj.link, &vm->extobj.list);
1038 vm->extobj.entries++;
1041 static void vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
1043 struct xe_bo *bo = xe_vma_bo(vma);
1045 lockdep_assert_held_write(&vm->lock);
1047 if (bo_has_vm_references(bo, vm, vma))
1050 __vm_insert_extobj(vm, vma);
1053 static void vma_destroy_cb(struct dma_fence *fence,
1054 struct dma_fence_cb *cb)
1056 struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
1058 INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
1059 queue_work(system_unbound_wq, &vma->destroy_work);
1062 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
1064 struct xe_vm *vm = xe_vma_vm(vma);
1066 lockdep_assert_held_write(&vm->lock);
1067 XE_WARN_ON(!list_empty(&vma->combined_links.destroy));
1069 if (xe_vma_is_userptr(vma)) {
1070 XE_WARN_ON(!(vma->gpuva.flags & XE_VMA_DESTROYED));
1072 spin_lock(&vm->userptr.invalidated_lock);
1073 list_del(&vma->userptr.invalidate_link);
1074 spin_unlock(&vm->userptr.invalidated_lock);
1075 } else if (!xe_vma_is_null(vma)) {
1076 xe_bo_assert_held(xe_vma_bo(vma));
1078 spin_lock(&vm->notifier.list_lock);
1079 list_del(&vma->notifier.rebind_link);
1080 spin_unlock(&vm->notifier.list_lock);
1082 drm_gpuva_unlink(&vma->gpuva);
1084 if (!xe_vma_bo(vma)->vm && vm_remove_extobj(vma)) {
1085 struct xe_vma *other;
1087 other = bo_has_vm_references_locked(xe_vma_bo(vma), vm, NULL);
1090 __vm_insert_extobj(vm, other);
1094 xe_vm_assert_held(vm);
1096 int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
1100 XE_WARN_ON(ret != -ENOENT);
1101 xe_vma_destroy_late(vma);
1104 xe_vma_destroy_late(vma);
1109 * xe_vm_prepare_vma() - drm_exec utility to lock a vma
1110 * @exec: The drm_exec object we're currently locking for.
1111 * @vma: The vma for witch we want to lock the vm resv and any attached
1113 * @num_shared: The number of dma-fence slots to pre-allocate in the
1114 * objects' reservation objects.
1116 * Return: 0 on success, negative error code on error. In particular
1117 * may return -EDEADLK on WW transaction contention and -EINTR if
1118 * an interruptible wait is terminated by a signal.
1120 int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
1121 unsigned int num_shared)
1123 struct xe_vm *vm = xe_vma_vm(vma);
1124 struct xe_bo *bo = xe_vma_bo(vma);
1128 err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
1129 if (!err && bo && !bo->vm)
1130 err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared);
1135 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1137 struct drm_exec exec;
1140 drm_exec_init(&exec, 0);
1141 drm_exec_until_all_locked(&exec) {
1142 err = xe_vm_prepare_vma(&exec, vma, 0);
1143 drm_exec_retry_on_contention(&exec);
1144 if (XE_WARN_ON(err))
1148 xe_vma_destroy(vma, NULL);
1150 drm_exec_fini(&exec);
1154 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
1156 struct drm_gpuva *gpuva;
1158 lockdep_assert_held(&vm->lock);
1160 if (xe_vm_is_closed_or_banned(vm))
1163 XE_WARN_ON(start + range > vm->size);
1165 gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
1167 return gpuva ? gpuva_to_vma(gpuva) : NULL;
1170 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1174 XE_WARN_ON(xe_vma_vm(vma) != vm);
1175 lockdep_assert_held(&vm->lock);
1177 err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
1178 XE_WARN_ON(err); /* Shouldn't be possible */
1183 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1185 XE_WARN_ON(xe_vma_vm(vma) != vm);
1186 lockdep_assert_held(&vm->lock);
1188 drm_gpuva_remove(&vma->gpuva);
1189 if (vm->usm.last_fault_vma == vma)
1190 vm->usm.last_fault_vma = NULL;
1193 static struct drm_gpuva_op *xe_vm_op_alloc(void)
1195 struct xe_vma_op *op;
1197 op = kzalloc(sizeof(*op), GFP_KERNEL);
1205 static void xe_vm_free(struct drm_gpuvm *gpuvm);
1207 static struct drm_gpuvm_ops gpuvm_ops = {
1208 .op_alloc = xe_vm_op_alloc,
1209 .vm_free = xe_vm_free,
1212 static void xe_vma_op_work_func(struct work_struct *w);
1213 static void vm_destroy_work_func(struct work_struct *w);
1215 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1217 struct drm_gem_object *vm_resv_obj;
1219 int err, number_tiles = 0;
1220 struct xe_tile *tile;
1223 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1225 return ERR_PTR(-ENOMEM);
1229 vm->size = 1ull << xe->info.va_bits;
1233 init_rwsem(&vm->lock);
1235 INIT_LIST_HEAD(&vm->rebind_list);
1237 INIT_LIST_HEAD(&vm->userptr.repin_list);
1238 INIT_LIST_HEAD(&vm->userptr.invalidated);
1239 init_rwsem(&vm->userptr.notifier_lock);
1240 spin_lock_init(&vm->userptr.invalidated_lock);
1242 INIT_LIST_HEAD(&vm->notifier.rebind_list);
1243 spin_lock_init(&vm->notifier.list_lock);
1245 INIT_LIST_HEAD(&vm->async_ops.pending);
1246 INIT_WORK(&vm->async_ops.work, xe_vma_op_work_func);
1247 spin_lock_init(&vm->async_ops.lock);
1249 INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1251 INIT_LIST_HEAD(&vm->preempt.exec_queues);
1252 vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */
1254 for_each_tile(tile, xe, id)
1255 xe_range_fence_tree_init(&vm->rftree[id]);
1257 INIT_LIST_HEAD(&vm->extobj.list);
1259 if (!(flags & XE_VM_FLAG_MIGRATION))
1260 xe_device_mem_access_get(xe);
1262 vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
1268 drm_gpuvm_init(&vm->gpuvm, "Xe VM", 0, &xe->drm, vm_resv_obj,
1269 0, vm->size, 0, 0, &gpuvm_ops);
1271 drm_gem_object_put(vm_resv_obj);
1273 err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
1277 if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1278 vm->flags |= XE_VM_FLAG_64K;
1280 for_each_tile(tile, xe, id) {
1281 if (flags & XE_VM_FLAG_MIGRATION &&
1282 tile->id != XE_VM_FLAG_TILE_ID(flags))
1285 vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
1286 if (IS_ERR(vm->pt_root[id])) {
1287 err = PTR_ERR(vm->pt_root[id]);
1288 vm->pt_root[id] = NULL;
1289 goto err_unlock_close;
1293 if (flags & XE_VM_FLAG_SCRATCH_PAGE) {
1294 for_each_tile(tile, xe, id) {
1295 if (!vm->pt_root[id])
1298 err = xe_pt_create_scratch(xe, tile, vm);
1300 goto err_unlock_close;
1302 vm->batch_invalidate_tlb = true;
1305 if (flags & XE_VM_FLAG_COMPUTE_MODE) {
1306 INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1307 vm->flags |= XE_VM_FLAG_COMPUTE_MODE;
1308 vm->batch_invalidate_tlb = false;
1311 if (flags & XE_VM_FLAG_ASYNC_BIND_OPS) {
1312 vm->async_ops.fence.context = dma_fence_context_alloc(1);
1313 vm->flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
1316 /* Fill pt_root after allocating scratch tables */
1317 for_each_tile(tile, xe, id) {
1318 if (!vm->pt_root[id])
1321 xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
1323 dma_resv_unlock(xe_vm_resv(vm));
1325 /* Kernel migration VM shouldn't have a circular loop.. */
1326 if (!(flags & XE_VM_FLAG_MIGRATION)) {
1327 for_each_tile(tile, xe, id) {
1328 struct xe_gt *gt = tile->primary_gt;
1329 struct xe_vm *migrate_vm;
1330 struct xe_exec_queue *q;
1332 if (!vm->pt_root[id])
1335 migrate_vm = xe_migrate_get_vm(tile->migrate);
1336 q = xe_exec_queue_create_class(xe, gt, migrate_vm,
1337 XE_ENGINE_CLASS_COPY,
1338 EXEC_QUEUE_FLAG_VM);
1339 xe_vm_put(migrate_vm);
1349 if (number_tiles > 1)
1350 vm->composite_fence_ctx = dma_fence_context_alloc(1);
1352 mutex_lock(&xe->usm.lock);
1353 if (flags & XE_VM_FLAG_FAULT_MODE)
1354 xe->usm.num_vm_in_fault_mode++;
1355 else if (!(flags & XE_VM_FLAG_MIGRATION))
1356 xe->usm.num_vm_in_non_fault_mode++;
1357 mutex_unlock(&xe->usm.lock);
1359 trace_xe_vm_create(vm);
1364 dma_resv_unlock(xe_vm_resv(vm));
1366 xe_vm_close_and_put(vm);
1367 return ERR_PTR(err);
1370 for_each_tile(tile, xe, id)
1371 xe_range_fence_tree_fini(&vm->rftree[id]);
1373 if (!(flags & XE_VM_FLAG_MIGRATION))
1374 xe_device_mem_access_put(xe);
1375 return ERR_PTR(err);
1378 static void flush_async_ops(struct xe_vm *vm)
1380 queue_work(system_unbound_wq, &vm->async_ops.work);
1381 flush_work(&vm->async_ops.work);
1384 static void vm_error_capture(struct xe_vm *vm, int err,
1385 u32 op, u64 addr, u64 size)
1387 struct drm_xe_vm_bind_op_error_capture capture;
1388 u64 __user *address =
1389 u64_to_user_ptr(vm->async_ops.error_capture.addr);
1390 bool in_kthread = !current->mm;
1392 capture.error = err;
1394 capture.addr = addr;
1395 capture.size = size;
1398 if (!mmget_not_zero(vm->async_ops.error_capture.mm))
1400 kthread_use_mm(vm->async_ops.error_capture.mm);
1403 if (copy_to_user(address, &capture, sizeof(capture)))
1404 drm_warn(&vm->xe->drm, "Copy to user failed");
1407 kthread_unuse_mm(vm->async_ops.error_capture.mm);
1408 mmput(vm->async_ops.error_capture.mm);
1412 wake_up_all(&vm->async_ops.error_capture.wq);
1415 static void xe_vm_close(struct xe_vm *vm)
1417 down_write(&vm->lock);
1419 up_write(&vm->lock);
1422 void xe_vm_close_and_put(struct xe_vm *vm)
1424 LIST_HEAD(contested);
1425 struct xe_device *xe = vm->xe;
1426 struct xe_tile *tile;
1427 struct xe_vma *vma, *next_vma;
1428 struct drm_gpuva *gpuva, *next;
1431 XE_WARN_ON(vm->preempt.num_exec_queues);
1434 flush_async_ops(vm);
1435 if (xe_vm_in_compute_mode(vm))
1436 flush_work(&vm->preempt.rebind_work);
1438 for_each_tile(tile, xe, id) {
1440 xe_exec_queue_kill(vm->q[id]);
1441 xe_exec_queue_put(vm->q[id]);
1446 down_write(&vm->lock);
1447 xe_vm_lock(vm, false);
1448 drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) {
1449 vma = gpuva_to_vma(gpuva);
1451 if (xe_vma_has_no_bo(vma)) {
1452 down_read(&vm->userptr.notifier_lock);
1453 vma->gpuva.flags |= XE_VMA_DESTROYED;
1454 up_read(&vm->userptr.notifier_lock);
1457 xe_vm_remove_vma(vm, vma);
1459 /* easy case, remove from VMA? */
1460 if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
1461 list_del_init(&vma->combined_links.rebind);
1462 xe_vma_destroy(vma, NULL);
1466 list_move_tail(&vma->combined_links.destroy, &contested);
1467 vma->gpuva.flags |= XE_VMA_DESTROYED;
1471 * All vm operations will add shared fences to resv.
1472 * The only exception is eviction for a shared object,
1473 * but even so, the unbind when evicted would still
1474 * install a fence to resv. Hence it's safe to
1475 * destroy the pagetables immediately.
1477 for_each_tile(tile, xe, id) {
1478 if (vm->scratch_bo[id]) {
1481 xe_bo_unpin(vm->scratch_bo[id]);
1482 xe_bo_put(vm->scratch_bo[id]);
1483 for (i = 0; i < vm->pt_root[id]->level; i++)
1484 xe_pt_destroy(vm->scratch_pt[id][i], vm->flags,
1487 if (vm->pt_root[id]) {
1488 xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1489 vm->pt_root[id] = NULL;
1495 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1496 * Since we hold a refcount to the bo, we can remove and free
1497 * the members safely without locking.
1499 list_for_each_entry_safe(vma, next_vma, &contested,
1500 combined_links.destroy) {
1501 list_del_init(&vma->combined_links.destroy);
1502 xe_vma_destroy_unlocked(vma);
1505 if (vm->async_ops.error_capture.addr)
1506 wake_up_all(&vm->async_ops.error_capture.wq);
1508 XE_WARN_ON(!list_empty(&vm->extobj.list));
1509 up_write(&vm->lock);
1511 mutex_lock(&xe->usm.lock);
1512 if (vm->flags & XE_VM_FLAG_FAULT_MODE)
1513 xe->usm.num_vm_in_fault_mode--;
1514 else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1515 xe->usm.num_vm_in_non_fault_mode--;
1516 mutex_unlock(&xe->usm.lock);
1518 for_each_tile(tile, xe, id)
1519 xe_range_fence_tree_fini(&vm->rftree[id]);
1524 static void vm_destroy_work_func(struct work_struct *w)
1527 container_of(w, struct xe_vm, destroy_work);
1528 struct xe_device *xe = vm->xe;
1529 struct xe_tile *tile;
1533 /* xe_vm_close_and_put was not called? */
1534 XE_WARN_ON(vm->size);
1536 if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
1537 xe_device_mem_access_put(xe);
1539 if (xe->info.has_asid) {
1540 mutex_lock(&xe->usm.lock);
1541 lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1542 XE_WARN_ON(lookup != vm);
1543 mutex_unlock(&xe->usm.lock);
1547 for_each_tile(tile, xe, id)
1548 XE_WARN_ON(vm->pt_root[id]);
1550 trace_xe_vm_free(vm);
1551 dma_fence_put(vm->rebind_fence);
1555 static void xe_vm_free(struct drm_gpuvm *gpuvm)
1557 struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
1559 /* To destroy the VM we need to be able to sleep */
1560 queue_work(system_unbound_wq, &vm->destroy_work);
1563 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1567 mutex_lock(&xef->vm.lock);
1568 vm = xa_load(&xef->vm.xa, id);
1571 mutex_unlock(&xef->vm.lock);
1576 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
1578 return xe_pde_encode(vm->pt_root[tile->id]->bo, 0,
1582 static struct dma_fence *
1583 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1584 struct xe_sync_entry *syncs, u32 num_syncs,
1585 bool first_op, bool last_op)
1587 struct xe_tile *tile;
1588 struct dma_fence *fence = NULL;
1589 struct dma_fence **fences = NULL;
1590 struct dma_fence_array *cf = NULL;
1591 struct xe_vm *vm = xe_vma_vm(vma);
1592 int cur_fence = 0, i;
1593 int number_tiles = hweight8(vma->tile_present);
1597 trace_xe_vma_unbind(vma);
1599 if (number_tiles > 1) {
1600 fences = kmalloc_array(number_tiles, sizeof(*fences),
1603 return ERR_PTR(-ENOMEM);
1606 for_each_tile(tile, vm->xe, id) {
1607 if (!(vma->tile_present & BIT(id)))
1610 fence = __xe_pt_unbind_vma(tile, vma, q, first_op ? syncs : NULL,
1611 first_op ? num_syncs : 0);
1612 if (IS_ERR(fence)) {
1613 err = PTR_ERR(fence);
1618 fences[cur_fence++] = fence;
1621 if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1622 q = list_next_entry(q, multi_gt_list);
1626 cf = dma_fence_array_create(number_tiles, fences,
1627 vm->composite_fence_ctx,
1628 vm->composite_fence_seqno++,
1631 --vm->composite_fence_seqno;
1638 for (i = 0; i < num_syncs; i++)
1639 xe_sync_entry_signal(&syncs[i], NULL,
1640 cf ? &cf->base : fence);
1643 return cf ? &cf->base : !fence ? dma_fence_get_stub() : fence;
1648 /* FIXME: Rewind the previous binds? */
1649 dma_fence_put(fences[--cur_fence]);
1654 return ERR_PTR(err);
1657 static struct dma_fence *
1658 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1659 struct xe_sync_entry *syncs, u32 num_syncs,
1660 bool first_op, bool last_op)
1662 struct xe_tile *tile;
1663 struct dma_fence *fence;
1664 struct dma_fence **fences = NULL;
1665 struct dma_fence_array *cf = NULL;
1666 struct xe_vm *vm = xe_vma_vm(vma);
1667 int cur_fence = 0, i;
1668 int number_tiles = hweight8(vma->tile_mask);
1672 trace_xe_vma_bind(vma);
1674 if (number_tiles > 1) {
1675 fences = kmalloc_array(number_tiles, sizeof(*fences),
1678 return ERR_PTR(-ENOMEM);
1681 for_each_tile(tile, vm->xe, id) {
1682 if (!(vma->tile_mask & BIT(id)))
1685 fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id],
1686 first_op ? syncs : NULL,
1687 first_op ? num_syncs : 0,
1688 vma->tile_present & BIT(id));
1689 if (IS_ERR(fence)) {
1690 err = PTR_ERR(fence);
1695 fences[cur_fence++] = fence;
1698 if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1699 q = list_next_entry(q, multi_gt_list);
1703 cf = dma_fence_array_create(number_tiles, fences,
1704 vm->composite_fence_ctx,
1705 vm->composite_fence_seqno++,
1708 --vm->composite_fence_seqno;
1715 for (i = 0; i < num_syncs; i++)
1716 xe_sync_entry_signal(&syncs[i], NULL,
1717 cf ? &cf->base : fence);
1720 return cf ? &cf->base : fence;
1725 /* FIXME: Rewind the previous binds? */
1726 dma_fence_put(fences[--cur_fence]);
1731 return ERR_PTR(err);
1734 struct async_op_fence {
1735 struct dma_fence fence;
1736 struct dma_fence *wait_fence;
1737 struct dma_fence_cb cb;
1739 wait_queue_head_t wq;
1743 static const char *async_op_fence_get_driver_name(struct dma_fence *dma_fence)
1749 async_op_fence_get_timeline_name(struct dma_fence *dma_fence)
1751 return "async_op_fence";
1754 static const struct dma_fence_ops async_op_fence_ops = {
1755 .get_driver_name = async_op_fence_get_driver_name,
1756 .get_timeline_name = async_op_fence_get_timeline_name,
1759 static void async_op_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1761 struct async_op_fence *afence =
1762 container_of(cb, struct async_op_fence, cb);
1764 afence->fence.error = afence->wait_fence->error;
1765 dma_fence_signal(&afence->fence);
1766 xe_vm_put(afence->vm);
1767 dma_fence_put(afence->wait_fence);
1768 dma_fence_put(&afence->fence);
1771 static void add_async_op_fence_cb(struct xe_vm *vm,
1772 struct dma_fence *fence,
1773 struct async_op_fence *afence)
1777 if (!xe_vm_no_dma_fences(vm)) {
1778 afence->started = true;
1780 wake_up_all(&afence->wq);
1783 afence->wait_fence = dma_fence_get(fence);
1784 afence->vm = xe_vm_get(vm);
1785 dma_fence_get(&afence->fence);
1786 ret = dma_fence_add_callback(fence, &afence->cb, async_op_fence_cb);
1787 if (ret == -ENOENT) {
1788 afence->fence.error = afence->wait_fence->error;
1789 dma_fence_signal(&afence->fence);
1793 dma_fence_put(afence->wait_fence);
1794 dma_fence_put(&afence->fence);
1796 XE_WARN_ON(ret && ret != -ENOENT);
1799 int xe_vm_async_fence_wait_start(struct dma_fence *fence)
1801 if (fence->ops == &async_op_fence_ops) {
1802 struct async_op_fence *afence =
1803 container_of(fence, struct async_op_fence, fence);
1805 XE_WARN_ON(xe_vm_no_dma_fences(afence->vm));
1808 return wait_event_interruptible(afence->wq, afence->started);
1814 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
1815 struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1816 u32 num_syncs, struct async_op_fence *afence,
1817 bool immediate, bool first_op, bool last_op)
1819 struct dma_fence *fence;
1821 xe_vm_assert_held(vm);
1824 fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
1827 return PTR_ERR(fence);
1831 XE_WARN_ON(!xe_vm_in_fault_mode(vm));
1833 fence = dma_fence_get_stub();
1835 for (i = 0; i < num_syncs; i++)
1836 xe_sync_entry_signal(&syncs[i], NULL, fence);
1840 add_async_op_fence_cb(vm, fence, afence);
1842 dma_fence_put(fence);
1846 static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
1847 struct xe_bo *bo, struct xe_sync_entry *syncs,
1848 u32 num_syncs, struct async_op_fence *afence,
1849 bool immediate, bool first_op, bool last_op)
1853 xe_vm_assert_held(vm);
1854 xe_bo_assert_held(bo);
1856 if (bo && immediate) {
1857 err = xe_bo_validate(bo, vm, true);
1862 return __xe_vm_bind(vm, vma, q, syncs, num_syncs, afence, immediate,
1866 static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
1867 struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1868 u32 num_syncs, struct async_op_fence *afence,
1869 bool first_op, bool last_op)
1871 struct dma_fence *fence;
1873 xe_vm_assert_held(vm);
1874 xe_bo_assert_held(xe_vma_bo(vma));
1876 fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
1878 return PTR_ERR(fence);
1880 add_async_op_fence_cb(vm, fence, afence);
1882 xe_vma_destroy(vma, fence);
1883 dma_fence_put(fence);
1888 static int vm_set_error_capture_address(struct xe_device *xe, struct xe_vm *vm,
1891 if (XE_IOCTL_DBG(xe, !value))
1894 if (XE_IOCTL_DBG(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
1897 if (XE_IOCTL_DBG(xe, vm->async_ops.error_capture.addr))
1900 vm->async_ops.error_capture.mm = current->mm;
1901 vm->async_ops.error_capture.addr = value;
1902 init_waitqueue_head(&vm->async_ops.error_capture.wq);
1907 typedef int (*xe_vm_set_property_fn)(struct xe_device *xe, struct xe_vm *vm,
1910 static const xe_vm_set_property_fn vm_set_property_funcs[] = {
1911 [XE_VM_PROPERTY_BIND_OP_ERROR_CAPTURE_ADDRESS] =
1912 vm_set_error_capture_address,
1915 static int vm_user_ext_set_property(struct xe_device *xe, struct xe_vm *vm,
1918 u64 __user *address = u64_to_user_ptr(extension);
1919 struct drm_xe_ext_vm_set_property ext;
1922 err = __copy_from_user(&ext, address, sizeof(ext));
1923 if (XE_IOCTL_DBG(xe, err))
1926 if (XE_IOCTL_DBG(xe, ext.property >=
1927 ARRAY_SIZE(vm_set_property_funcs)) ||
1928 XE_IOCTL_DBG(xe, ext.pad) ||
1929 XE_IOCTL_DBG(xe, ext.reserved[0] || ext.reserved[1]))
1932 return vm_set_property_funcs[ext.property](xe, vm, ext.value);
1935 typedef int (*xe_vm_user_extension_fn)(struct xe_device *xe, struct xe_vm *vm,
1938 static const xe_vm_set_property_fn vm_user_extension_funcs[] = {
1939 [XE_VM_EXTENSION_SET_PROPERTY] = vm_user_ext_set_property,
1942 #define MAX_USER_EXTENSIONS 16
1943 static int vm_user_extensions(struct xe_device *xe, struct xe_vm *vm,
1944 u64 extensions, int ext_number)
1946 u64 __user *address = u64_to_user_ptr(extensions);
1947 struct xe_user_extension ext;
1950 if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
1953 err = __copy_from_user(&ext, address, sizeof(ext));
1954 if (XE_IOCTL_DBG(xe, err))
1957 if (XE_IOCTL_DBG(xe, ext.pad) ||
1958 XE_IOCTL_DBG(xe, ext.name >=
1959 ARRAY_SIZE(vm_user_extension_funcs)))
1962 err = vm_user_extension_funcs[ext.name](xe, vm, extensions);
1963 if (XE_IOCTL_DBG(xe, err))
1966 if (ext.next_extension)
1967 return vm_user_extensions(xe, vm, ext.next_extension,
1973 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_SCRATCH_PAGE | \
1974 DRM_XE_VM_CREATE_COMPUTE_MODE | \
1975 DRM_XE_VM_CREATE_ASYNC_BIND_OPS | \
1976 DRM_XE_VM_CREATE_FAULT_MODE)
1978 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1979 struct drm_file *file)
1981 struct xe_device *xe = to_xe_device(dev);
1982 struct xe_file *xef = to_xe_file(file);
1983 struct drm_xe_vm_create *args = data;
1989 if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
1990 args->flags |= DRM_XE_VM_CREATE_SCRATCH_PAGE;
1992 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
1993 !xe->info.supports_usm))
1996 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1999 if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
2002 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE &&
2003 args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
2006 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE &&
2007 args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
2010 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
2011 xe_device_in_non_fault_mode(xe)))
2014 if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FAULT_MODE) &&
2015 xe_device_in_fault_mode(xe)))
2018 if (args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE)
2019 flags |= XE_VM_FLAG_SCRATCH_PAGE;
2020 if (args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE)
2021 flags |= XE_VM_FLAG_COMPUTE_MODE;
2022 if (args->flags & DRM_XE_VM_CREATE_ASYNC_BIND_OPS)
2023 flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
2024 if (args->flags & DRM_XE_VM_CREATE_FAULT_MODE)
2025 flags |= XE_VM_FLAG_FAULT_MODE;
2027 vm = xe_vm_create(xe, flags);
2031 if (args->extensions) {
2032 err = vm_user_extensions(xe, vm, args->extensions, 0);
2033 if (XE_IOCTL_DBG(xe, err)) {
2034 xe_vm_close_and_put(vm);
2039 mutex_lock(&xef->vm.lock);
2040 err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
2041 mutex_unlock(&xef->vm.lock);
2043 xe_vm_close_and_put(vm);
2047 if (xe->info.has_asid) {
2048 mutex_lock(&xe->usm.lock);
2049 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
2050 XA_LIMIT(0, XE_MAX_ASID - 1),
2051 &xe->usm.next_asid, GFP_KERNEL);
2052 mutex_unlock(&xe->usm.lock);
2054 xe_vm_close_and_put(vm);
2057 vm->usm.asid = asid;
2062 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
2063 /* Warning: Security issue - never enable by default */
2064 args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
2070 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
2071 struct drm_file *file)
2073 struct xe_device *xe = to_xe_device(dev);
2074 struct xe_file *xef = to_xe_file(file);
2075 struct drm_xe_vm_destroy *args = data;
2079 if (XE_IOCTL_DBG(xe, args->pad) ||
2080 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2083 mutex_lock(&xef->vm.lock);
2084 vm = xa_load(&xef->vm.xa, args->vm_id);
2085 if (XE_IOCTL_DBG(xe, !vm))
2087 else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues))
2090 xa_erase(&xef->vm.xa, args->vm_id);
2091 mutex_unlock(&xef->vm.lock);
2094 xe_vm_close_and_put(vm);
2099 static const u32 region_to_mem_type[] = {
2105 static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
2106 struct xe_exec_queue *q, u32 region,
2107 struct xe_sync_entry *syncs, u32 num_syncs,
2108 struct async_op_fence *afence, bool first_op,
2113 XE_WARN_ON(region > ARRAY_SIZE(region_to_mem_type));
2115 if (!xe_vma_has_no_bo(vma)) {
2116 err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
2121 if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
2122 return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
2123 afence, true, first_op, last_op);
2127 /* Nothing to do, signal fences now */
2129 for (i = 0; i < num_syncs; i++)
2130 xe_sync_entry_signal(&syncs[i], NULL,
2131 dma_fence_get_stub());
2134 dma_fence_signal(&afence->fence);
2139 #define VM_BIND_OP(op) (op & 0xffff)
2141 static void vm_set_async_error(struct xe_vm *vm, int err)
2143 lockdep_assert_held(&vm->lock);
2144 vm->async_ops.error = err;
2147 static int vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo,
2148 u64 addr, u64 range, u32 op)
2150 struct xe_device *xe = vm->xe;
2152 bool async = !!(op & XE_VM_BIND_FLAG_ASYNC);
2154 lockdep_assert_held(&vm->lock);
2156 switch (VM_BIND_OP(op)) {
2157 case XE_VM_BIND_OP_MAP:
2158 case XE_VM_BIND_OP_MAP_USERPTR:
2159 vma = xe_vm_find_overlapping_vma(vm, addr, range);
2160 if (XE_IOCTL_DBG(xe, vma && !async))
2163 case XE_VM_BIND_OP_UNMAP:
2164 case XE_VM_BIND_OP_PREFETCH:
2165 vma = xe_vm_find_overlapping_vma(vm, addr, range);
2166 if (XE_IOCTL_DBG(xe, !vma))
2167 /* Not an actual error, IOCTL cleans up returns and 0 */
2169 if (XE_IOCTL_DBG(xe, (xe_vma_start(vma) != addr ||
2170 xe_vma_end(vma) != addr + range) && !async))
2173 case XE_VM_BIND_OP_UNMAP_ALL:
2174 if (XE_IOCTL_DBG(xe, list_empty(&bo->ttm.base.gpuva.list)))
2175 /* Not an actual error, IOCTL cleans up returns and 0 */
2179 drm_warn(&xe->drm, "NOT POSSIBLE");
2186 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
2189 down_read(&vm->userptr.notifier_lock);
2190 vma->gpuva.flags |= XE_VMA_DESTROYED;
2191 up_read(&vm->userptr.notifier_lock);
2193 xe_vm_remove_vma(vm, vma);
2197 #define ULL unsigned long long
2199 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
2200 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2205 case DRM_GPUVA_OP_MAP:
2206 vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx",
2207 (ULL)op->map.va.addr, (ULL)op->map.va.range);
2209 case DRM_GPUVA_OP_REMAP:
2210 vma = gpuva_to_vma(op->remap.unmap->va);
2211 vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2212 (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2213 op->unmap.keep ? 1 : 0);
2216 "REMAP:PREV: addr=0x%016llx, range=0x%016llx",
2217 (ULL)op->remap.prev->va.addr,
2218 (ULL)op->remap.prev->va.range);
2221 "REMAP:NEXT: addr=0x%016llx, range=0x%016llx",
2222 (ULL)op->remap.next->va.addr,
2223 (ULL)op->remap.next->va.range);
2225 case DRM_GPUVA_OP_UNMAP:
2226 vma = gpuva_to_vma(op->unmap.va);
2227 vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2228 (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2229 op->unmap.keep ? 1 : 0);
2231 case DRM_GPUVA_OP_PREFETCH:
2232 vma = gpuva_to_vma(op->prefetch.va);
2233 vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx",
2234 (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
2237 drm_warn(&xe->drm, "NOT POSSIBLE");
2241 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2247 * Create operations list from IOCTL arguments, setup operations fields so parse
2248 * and commit steps are decoupled from IOCTL arguments. This step can fail.
2250 static struct drm_gpuva_ops *
2251 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
2252 u64 bo_offset_or_userptr, u64 addr, u64 range,
2253 u32 operation, u8 tile_mask, u32 region)
2255 struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
2256 struct drm_gpuva_ops *ops;
2257 struct drm_gpuva_op *__op;
2258 struct xe_vma_op *op;
2259 struct drm_gpuvm_bo *vm_bo;
2262 lockdep_assert_held_write(&vm->lock);
2264 vm_dbg(&vm->xe->drm,
2265 "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
2266 VM_BIND_OP(operation), (ULL)addr, (ULL)range,
2267 (ULL)bo_offset_or_userptr);
2269 switch (VM_BIND_OP(operation)) {
2270 case XE_VM_BIND_OP_MAP:
2271 case XE_VM_BIND_OP_MAP_USERPTR:
2272 ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
2273 obj, bo_offset_or_userptr);
2277 drm_gpuva_for_each_op(__op, ops) {
2278 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2280 op->tile_mask = tile_mask;
2282 operation & XE_VM_BIND_FLAG_IMMEDIATE;
2284 operation & XE_VM_BIND_FLAG_READONLY;
2285 op->map.is_null = operation & XE_VM_BIND_FLAG_NULL;
2288 case XE_VM_BIND_OP_UNMAP:
2289 ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
2293 drm_gpuva_for_each_op(__op, ops) {
2294 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2296 op->tile_mask = tile_mask;
2299 case XE_VM_BIND_OP_PREFETCH:
2300 ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
2304 drm_gpuva_for_each_op(__op, ops) {
2305 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2307 op->tile_mask = tile_mask;
2308 op->prefetch.region = region;
2311 case XE_VM_BIND_OP_UNMAP_ALL:
2314 err = xe_bo_lock(bo, true);
2316 return ERR_PTR(err);
2318 vm_bo = drm_gpuvm_bo_find(&vm->gpuvm, obj);
2322 ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
2323 drm_gpuvm_bo_put(vm_bo);
2328 drm_gpuva_for_each_op(__op, ops) {
2329 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2331 op->tile_mask = tile_mask;
2335 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2336 ops = ERR_PTR(-EINVAL);
2339 #ifdef TEST_VM_ASYNC_OPS_ERROR
2340 if (operation & FORCE_ASYNC_OP_ERROR) {
2341 op = list_first_entry_or_null(&ops->list, struct xe_vma_op,
2344 op->inject_error = true;
2349 drm_gpuva_for_each_op(__op, ops)
2350 print_op(vm->xe, __op);
2355 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
2356 u8 tile_mask, bool read_only, bool is_null)
2358 struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
2362 lockdep_assert_held_write(&vm->lock);
2365 err = xe_bo_lock(bo, true);
2367 return ERR_PTR(err);
2369 vma = xe_vma_create(vm, bo, op->gem.offset,
2370 op->va.addr, op->va.addr +
2371 op->va.range - 1, read_only, is_null,
2376 if (xe_vma_is_userptr(vma)) {
2377 err = xe_vma_userptr_pin_pages(vma);
2379 prep_vma_destroy(vm, vma, false);
2380 xe_vma_destroy_unlocked(vma);
2381 return ERR_PTR(err);
2383 } else if (!xe_vma_has_no_bo(vma) && !bo->vm) {
2384 vm_insert_extobj(vm, vma);
2385 err = add_preempt_fences(vm, bo);
2387 prep_vma_destroy(vm, vma, false);
2388 xe_vma_destroy_unlocked(vma);
2389 return ERR_PTR(err);
2396 static u64 xe_vma_max_pte_size(struct xe_vma *vma)
2398 if (vma->gpuva.flags & XE_VMA_PTE_1G)
2400 else if (vma->gpuva.flags & XE_VMA_PTE_2M)
2406 static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
2410 vma->gpuva.flags |= XE_VMA_PTE_1G;
2413 vma->gpuva.flags |= XE_VMA_PTE_2M;
2420 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
2424 lockdep_assert_held_write(&vm->lock);
2426 switch (op->base.op) {
2427 case DRM_GPUVA_OP_MAP:
2428 err |= xe_vm_insert_vma(vm, op->map.vma);
2430 op->flags |= XE_VMA_OP_COMMITTED;
2432 case DRM_GPUVA_OP_REMAP:
2433 prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
2435 op->flags |= XE_VMA_OP_COMMITTED;
2437 if (op->remap.prev) {
2438 err |= xe_vm_insert_vma(vm, op->remap.prev);
2440 op->flags |= XE_VMA_OP_PREV_COMMITTED;
2441 if (!err && op->remap.skip_prev)
2442 op->remap.prev = NULL;
2444 if (op->remap.next) {
2445 err |= xe_vm_insert_vma(vm, op->remap.next);
2447 op->flags |= XE_VMA_OP_NEXT_COMMITTED;
2448 if (!err && op->remap.skip_next)
2449 op->remap.next = NULL;
2452 /* Adjust for partial unbind after removin VMA from VM */
2454 op->base.remap.unmap->va->va.addr = op->remap.start;
2455 op->base.remap.unmap->va->va.range = op->remap.range;
2458 case DRM_GPUVA_OP_UNMAP:
2459 prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
2460 op->flags |= XE_VMA_OP_COMMITTED;
2462 case DRM_GPUVA_OP_PREFETCH:
2463 op->flags |= XE_VMA_OP_COMMITTED;
2466 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2473 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
2474 struct drm_gpuva_ops *ops,
2475 struct xe_sync_entry *syncs, u32 num_syncs,
2476 struct list_head *ops_list, bool last,
2479 struct xe_vma_op *last_op = NULL;
2480 struct async_op_fence *fence = NULL;
2481 struct drm_gpuva_op *__op;
2484 lockdep_assert_held_write(&vm->lock);
2486 if (last && num_syncs && async) {
2489 fence = kmalloc(sizeof(*fence), GFP_KERNEL);
2493 seqno = q ? ++q->bind.fence_seqno : ++vm->async_ops.fence.seqno;
2494 dma_fence_init(&fence->fence, &async_op_fence_ops,
2495 &vm->async_ops.lock, q ? q->bind.fence_ctx :
2496 vm->async_ops.fence.context, seqno);
2498 if (!xe_vm_no_dma_fences(vm)) {
2500 fence->started = false;
2501 init_waitqueue_head(&fence->wq);
2505 drm_gpuva_for_each_op(__op, ops) {
2506 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2507 bool first = list_empty(ops_list);
2509 XE_WARN_ON(!first && !async);
2511 INIT_LIST_HEAD(&op->link);
2512 list_add_tail(&op->link, ops_list);
2515 op->flags |= XE_VMA_OP_FIRST;
2516 op->num_syncs = num_syncs;
2522 switch (op->base.op) {
2523 case DRM_GPUVA_OP_MAP:
2527 vma = new_vma(vm, &op->base.map,
2528 op->tile_mask, op->map.read_only,
2538 case DRM_GPUVA_OP_REMAP:
2540 struct xe_vma *old =
2541 gpuva_to_vma(op->base.remap.unmap->va);
2543 op->remap.start = xe_vma_start(old);
2544 op->remap.range = xe_vma_size(old);
2546 if (op->base.remap.prev) {
2549 op->base.remap.unmap->va->flags &
2552 op->base.remap.unmap->va->flags &
2555 vma = new_vma(vm, op->base.remap.prev,
2556 op->tile_mask, read_only,
2563 op->remap.prev = vma;
2566 * Userptr creates a new SG mapping so
2567 * we must also rebind.
2569 op->remap.skip_prev = !xe_vma_is_userptr(old) &&
2570 IS_ALIGNED(xe_vma_end(vma),
2571 xe_vma_max_pte_size(old));
2572 if (op->remap.skip_prev) {
2573 xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2577 op->remap.start = xe_vma_end(vma);
2581 if (op->base.remap.next) {
2584 op->base.remap.unmap->va->flags &
2588 op->base.remap.unmap->va->flags &
2591 vma = new_vma(vm, op->base.remap.next,
2592 op->tile_mask, read_only,
2599 op->remap.next = vma;
2602 * Userptr creates a new SG mapping so
2603 * we must also rebind.
2605 op->remap.skip_next = !xe_vma_is_userptr(old) &&
2606 IS_ALIGNED(xe_vma_start(vma),
2607 xe_vma_max_pte_size(old));
2608 if (op->remap.skip_next) {
2609 xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2617 case DRM_GPUVA_OP_UNMAP:
2618 case DRM_GPUVA_OP_PREFETCH:
2622 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2627 err = xe_vma_op_commit(vm, op);
2632 /* FIXME: Unhandled corner case */
2633 XE_WARN_ON(!last_op && last && !list_empty(ops_list));
2639 last_op->flags |= XE_VMA_OP_LAST;
2640 last_op->num_syncs = num_syncs;
2641 last_op->syncs = syncs;
2642 last_op->fence = fence;
2652 static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
2653 struct xe_vma *vma, struct xe_vma_op *op)
2657 lockdep_assert_held_write(&vm->lock);
2659 err = xe_vm_prepare_vma(exec, vma, 1);
2663 xe_vm_assert_held(vm);
2664 xe_bo_assert_held(xe_vma_bo(vma));
2666 switch (op->base.op) {
2667 case DRM_GPUVA_OP_MAP:
2668 err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
2669 op->syncs, op->num_syncs, op->fence,
2670 op->map.immediate || !xe_vm_in_fault_mode(vm),
2671 op->flags & XE_VMA_OP_FIRST,
2672 op->flags & XE_VMA_OP_LAST);
2674 case DRM_GPUVA_OP_REMAP:
2676 bool prev = !!op->remap.prev;
2677 bool next = !!op->remap.next;
2679 if (!op->remap.unmap_done) {
2681 vm->async_ops.munmap_rebind_inflight = true;
2682 vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
2684 err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2686 !prev && !next ? op->fence : NULL,
2687 op->flags & XE_VMA_OP_FIRST,
2688 op->flags & XE_VMA_OP_LAST && !prev &&
2692 op->remap.unmap_done = true;
2696 op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
2697 err = xe_vm_bind(vm, op->remap.prev, op->q,
2698 xe_vma_bo(op->remap.prev), op->syncs,
2700 !next ? op->fence : NULL, true, false,
2701 op->flags & XE_VMA_OP_LAST && !next);
2702 op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2705 op->remap.prev = NULL;
2709 op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
2710 err = xe_vm_bind(vm, op->remap.next, op->q,
2711 xe_vma_bo(op->remap.next),
2712 op->syncs, op->num_syncs,
2713 op->fence, true, false,
2714 op->flags & XE_VMA_OP_LAST);
2715 op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2718 op->remap.next = NULL;
2720 vm->async_ops.munmap_rebind_inflight = false;
2724 case DRM_GPUVA_OP_UNMAP:
2725 err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2726 op->num_syncs, op->fence,
2727 op->flags & XE_VMA_OP_FIRST,
2728 op->flags & XE_VMA_OP_LAST);
2730 case DRM_GPUVA_OP_PREFETCH:
2731 err = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region,
2732 op->syncs, op->num_syncs, op->fence,
2733 op->flags & XE_VMA_OP_FIRST,
2734 op->flags & XE_VMA_OP_LAST);
2737 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2741 trace_xe_vma_fail(vma);
2746 static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
2747 struct xe_vma_op *op)
2749 struct drm_exec exec;
2753 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
2754 drm_exec_until_all_locked(&exec) {
2755 err = op_execute(&exec, vm, vma, op);
2756 drm_exec_retry_on_contention(&exec);
2760 drm_exec_fini(&exec);
2762 if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
2763 lockdep_assert_held_write(&vm->lock);
2764 err = xe_vma_userptr_pin_pages(vma);
2768 trace_xe_vma_fail(vma);
2774 static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
2778 lockdep_assert_held_write(&vm->lock);
2780 #ifdef TEST_VM_ASYNC_OPS_ERROR
2781 if (op->inject_error) {
2782 op->inject_error = false;
2787 switch (op->base.op) {
2788 case DRM_GPUVA_OP_MAP:
2789 ret = __xe_vma_op_execute(vm, op->map.vma, op);
2791 case DRM_GPUVA_OP_REMAP:
2795 if (!op->remap.unmap_done)
2796 vma = gpuva_to_vma(op->base.remap.unmap->va);
2797 else if (op->remap.prev)
2798 vma = op->remap.prev;
2800 vma = op->remap.next;
2802 ret = __xe_vma_op_execute(vm, vma, op);
2805 case DRM_GPUVA_OP_UNMAP:
2806 ret = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
2809 case DRM_GPUVA_OP_PREFETCH:
2810 ret = __xe_vma_op_execute(vm,
2811 gpuva_to_vma(op->base.prefetch.va),
2815 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2821 static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op)
2823 bool last = op->flags & XE_VMA_OP_LAST;
2826 while (op->num_syncs--)
2827 xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
2830 xe_exec_queue_put(op->q);
2832 dma_fence_put(&op->fence->fence);
2834 if (!list_empty(&op->link)) {
2835 spin_lock_irq(&vm->async_ops.lock);
2836 list_del(&op->link);
2837 spin_unlock_irq(&vm->async_ops.lock);
2840 drm_gpuva_ops_free(&vm->gpuvm, op->ops);
2845 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
2846 bool post_commit, bool prev_post_commit,
2847 bool next_post_commit)
2849 lockdep_assert_held_write(&vm->lock);
2851 switch (op->base.op) {
2852 case DRM_GPUVA_OP_MAP:
2854 prep_vma_destroy(vm, op->map.vma, post_commit);
2855 xe_vma_destroy_unlocked(op->map.vma);
2858 case DRM_GPUVA_OP_UNMAP:
2860 struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2863 down_read(&vm->userptr.notifier_lock);
2864 vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2865 up_read(&vm->userptr.notifier_lock);
2867 xe_vm_insert_vma(vm, vma);
2871 case DRM_GPUVA_OP_REMAP:
2873 struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
2875 if (op->remap.prev) {
2876 prep_vma_destroy(vm, op->remap.prev, prev_post_commit);
2877 xe_vma_destroy_unlocked(op->remap.prev);
2879 if (op->remap.next) {
2880 prep_vma_destroy(vm, op->remap.next, next_post_commit);
2881 xe_vma_destroy_unlocked(op->remap.next);
2884 down_read(&vm->userptr.notifier_lock);
2885 vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2886 up_read(&vm->userptr.notifier_lock);
2888 xe_vm_insert_vma(vm, vma);
2892 case DRM_GPUVA_OP_PREFETCH:
2896 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2900 static struct xe_vma_op *next_vma_op(struct xe_vm *vm)
2902 return list_first_entry_or_null(&vm->async_ops.pending,
2903 struct xe_vma_op, link);
2906 static void xe_vma_op_work_func(struct work_struct *w)
2908 struct xe_vm *vm = container_of(w, struct xe_vm, async_ops.work);
2911 struct xe_vma_op *op;
2914 if (vm->async_ops.error && !xe_vm_is_closed(vm))
2917 spin_lock_irq(&vm->async_ops.lock);
2918 op = next_vma_op(vm);
2919 spin_unlock_irq(&vm->async_ops.lock);
2924 if (!xe_vm_is_closed(vm)) {
2925 down_write(&vm->lock);
2926 err = xe_vma_op_execute(vm, op);
2928 drm_warn(&vm->xe->drm,
2929 "Async VM op(%d) failed with %d",
2931 vm_set_async_error(vm, err);
2932 up_write(&vm->lock);
2934 if (vm->async_ops.error_capture.addr)
2935 vm_error_capture(vm, err, 0, 0, 0);
2938 up_write(&vm->lock);
2942 switch (op->base.op) {
2943 case DRM_GPUVA_OP_REMAP:
2944 vma = gpuva_to_vma(op->base.remap.unmap->va);
2945 trace_xe_vma_flush(vma);
2947 down_write(&vm->lock);
2948 xe_vma_destroy_unlocked(vma);
2949 up_write(&vm->lock);
2951 case DRM_GPUVA_OP_UNMAP:
2952 vma = gpuva_to_vma(op->base.unmap.va);
2953 trace_xe_vma_flush(vma);
2955 down_write(&vm->lock);
2956 xe_vma_destroy_unlocked(vma);
2957 up_write(&vm->lock);
2964 if (op->fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
2965 &op->fence->fence.flags)) {
2966 if (!xe_vm_no_dma_fences(vm)) {
2967 op->fence->started = true;
2968 wake_up_all(&op->fence->wq);
2970 dma_fence_signal(&op->fence->fence);
2974 xe_vma_op_cleanup(vm, op);
2978 static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
2979 struct list_head *ops_list, bool async)
2981 struct xe_vma_op *op, *last_op, *next;
2984 lockdep_assert_held_write(&vm->lock);
2986 list_for_each_entry(op, ops_list, link)
2990 err = xe_vma_op_execute(vm, last_op);
2993 xe_vma_op_cleanup(vm, last_op);
2996 bool installed = false;
2998 for (i = 0; i < last_op->num_syncs; i++)
2999 installed |= xe_sync_entry_signal(&last_op->syncs[i],
3001 &last_op->fence->fence);
3002 if (!installed && last_op->fence)
3003 dma_fence_signal(&last_op->fence->fence);
3005 spin_lock_irq(&vm->async_ops.lock);
3006 list_splice_tail(ops_list, &vm->async_ops.pending);
3007 spin_unlock_irq(&vm->async_ops.lock);
3009 if (!vm->async_ops.error)
3010 queue_work(system_unbound_wq, &vm->async_ops.work);
3016 list_for_each_entry_reverse(op, ops_list, link)
3017 xe_vma_op_unwind(vm, op, op->flags & XE_VMA_OP_COMMITTED,
3018 op->flags & XE_VMA_OP_PREV_COMMITTED,
3019 op->flags & XE_VMA_OP_NEXT_COMMITTED);
3020 list_for_each_entry_safe(op, next, ops_list, link)
3021 xe_vma_op_cleanup(vm, op);
3026 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
3027 struct drm_gpuva_ops **ops,
3032 for (i = num_ops_list - 1; i; ++i) {
3033 struct drm_gpuva_ops *__ops = ops[i];
3034 struct drm_gpuva_op *__op;
3039 drm_gpuva_for_each_op_reverse(__op, __ops) {
3040 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
3042 xe_vma_op_unwind(vm, op,
3043 op->flags & XE_VMA_OP_COMMITTED,
3044 op->flags & XE_VMA_OP_PREV_COMMITTED,
3045 op->flags & XE_VMA_OP_NEXT_COMMITTED);
3048 drm_gpuva_ops_free(&vm->gpuvm, __ops);
3052 #ifdef TEST_VM_ASYNC_OPS_ERROR
3053 #define SUPPORTED_FLAGS \
3054 (FORCE_ASYNC_OP_ERROR | XE_VM_BIND_FLAG_ASYNC | \
3055 XE_VM_BIND_FLAG_READONLY | XE_VM_BIND_FLAG_IMMEDIATE | \
3056 XE_VM_BIND_FLAG_NULL | 0xffff)
3058 #define SUPPORTED_FLAGS \
3059 (XE_VM_BIND_FLAG_ASYNC | XE_VM_BIND_FLAG_READONLY | \
3060 XE_VM_BIND_FLAG_IMMEDIATE | XE_VM_BIND_FLAG_NULL | 0xffff)
3062 #define XE_64K_PAGE_MASK 0xffffull
3064 #define MAX_BINDS 512 /* FIXME: Picking random upper limit */
3066 static int vm_bind_ioctl_check_args(struct xe_device *xe,
3067 struct drm_xe_vm_bind *args,
3068 struct drm_xe_vm_bind_op **bind_ops,
3074 if (XE_IOCTL_DBG(xe, args->extensions) ||
3075 XE_IOCTL_DBG(xe, !args->num_binds) ||
3076 XE_IOCTL_DBG(xe, args->num_binds > MAX_BINDS))
3079 if (args->num_binds > 1) {
3080 u64 __user *bind_user =
3081 u64_to_user_ptr(args->vector_of_binds);
3083 *bind_ops = kmalloc(sizeof(struct drm_xe_vm_bind_op) *
3084 args->num_binds, GFP_KERNEL);
3088 err = __copy_from_user(*bind_ops, bind_user,
3089 sizeof(struct drm_xe_vm_bind_op) *
3091 if (XE_IOCTL_DBG(xe, err)) {
3096 *bind_ops = &args->bind;
3099 for (i = 0; i < args->num_binds; ++i) {
3100 u64 range = (*bind_ops)[i].range;
3101 u64 addr = (*bind_ops)[i].addr;
3102 u32 op = (*bind_ops)[i].op;
3103 u32 obj = (*bind_ops)[i].obj;
3104 u64 obj_offset = (*bind_ops)[i].obj_offset;
3105 u32 region = (*bind_ops)[i].region;
3106 bool is_null = op & XE_VM_BIND_FLAG_NULL;
3109 *async = !!(op & XE_VM_BIND_FLAG_ASYNC);
3110 } else if (XE_IOCTL_DBG(xe, !*async) ||
3111 XE_IOCTL_DBG(xe, !(op & XE_VM_BIND_FLAG_ASYNC)) ||
3112 XE_IOCTL_DBG(xe, VM_BIND_OP(op) ==
3113 XE_VM_BIND_OP_RESTART)) {
3118 if (XE_IOCTL_DBG(xe, !*async &&
3119 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL)) {
3124 if (XE_IOCTL_DBG(xe, !*async &&
3125 VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH)) {
3130 if (XE_IOCTL_DBG(xe, VM_BIND_OP(op) >
3131 XE_VM_BIND_OP_PREFETCH) ||
3132 XE_IOCTL_DBG(xe, op & ~SUPPORTED_FLAGS) ||
3133 XE_IOCTL_DBG(xe, obj && is_null) ||
3134 XE_IOCTL_DBG(xe, obj_offset && is_null) ||
3135 XE_IOCTL_DBG(xe, VM_BIND_OP(op) != XE_VM_BIND_OP_MAP &&
3137 XE_IOCTL_DBG(xe, !obj &&
3138 VM_BIND_OP(op) == XE_VM_BIND_OP_MAP &&
3140 XE_IOCTL_DBG(xe, !obj &&
3141 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
3142 XE_IOCTL_DBG(xe, addr &&
3143 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
3144 XE_IOCTL_DBG(xe, range &&
3145 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
3146 XE_IOCTL_DBG(xe, obj &&
3147 VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR) ||
3148 XE_IOCTL_DBG(xe, obj &&
3149 VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH) ||
3150 XE_IOCTL_DBG(xe, region &&
3151 VM_BIND_OP(op) != XE_VM_BIND_OP_PREFETCH) ||
3152 XE_IOCTL_DBG(xe, !(BIT(region) &
3153 xe->info.mem_region_mask)) ||
3154 XE_IOCTL_DBG(xe, obj &&
3155 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP)) {
3160 if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
3161 XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
3162 XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
3163 XE_IOCTL_DBG(xe, !range && VM_BIND_OP(op) !=
3164 XE_VM_BIND_OP_RESTART &&
3165 VM_BIND_OP(op) != XE_VM_BIND_OP_UNMAP_ALL)) {
3174 if (args->num_binds > 1)
3179 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3181 struct xe_device *xe = to_xe_device(dev);
3182 struct xe_file *xef = to_xe_file(file);
3183 struct drm_xe_vm_bind *args = data;
3184 struct drm_xe_sync __user *syncs_user;
3185 struct xe_bo **bos = NULL;
3186 struct drm_gpuva_ops **ops = NULL;
3188 struct xe_exec_queue *q = NULL;
3190 struct xe_sync_entry *syncs = NULL;
3191 struct drm_xe_vm_bind_op *bind_ops;
3192 LIST_HEAD(ops_list);
3197 err = vm_bind_ioctl_check_args(xe, args, &bind_ops, &async);
3201 if (args->exec_queue_id) {
3202 q = xe_exec_queue_lookup(xef, args->exec_queue_id);
3203 if (XE_IOCTL_DBG(xe, !q)) {
3208 if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
3210 goto put_exec_queue;
3214 vm = xe_vm_lookup(xef, args->vm_id);
3215 if (XE_IOCTL_DBG(xe, !vm)) {
3217 goto put_exec_queue;
3220 err = down_write_killable(&vm->lock);
3224 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
3226 goto release_vm_lock;
3229 if (VM_BIND_OP(bind_ops[0].op) == XE_VM_BIND_OP_RESTART) {
3230 if (XE_IOCTL_DBG(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
3232 if (XE_IOCTL_DBG(xe, !err && args->num_syncs))
3234 if (XE_IOCTL_DBG(xe, !err && !vm->async_ops.error))
3238 trace_xe_vm_restart(vm);
3239 vm_set_async_error(vm, 0);
3241 queue_work(system_unbound_wq, &vm->async_ops.work);
3243 /* Rebinds may have been blocked, give worker a kick */
3244 if (xe_vm_in_compute_mode(vm))
3245 xe_vm_queue_rebind_worker(vm);
3248 goto release_vm_lock;
3251 if (XE_IOCTL_DBG(xe, !vm->async_ops.error &&
3252 async != !!(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS))) {
3254 goto release_vm_lock;
3257 for (i = 0; i < args->num_binds; ++i) {
3258 u64 range = bind_ops[i].range;
3259 u64 addr = bind_ops[i].addr;
3261 if (XE_IOCTL_DBG(xe, range > vm->size) ||
3262 XE_IOCTL_DBG(xe, addr > vm->size - range)) {
3264 goto release_vm_lock;
3267 if (bind_ops[i].tile_mask) {
3268 u64 valid_tiles = BIT(xe->info.tile_count) - 1;
3270 if (XE_IOCTL_DBG(xe, bind_ops[i].tile_mask &
3273 goto release_vm_lock;
3278 bos = kzalloc(sizeof(*bos) * args->num_binds, GFP_KERNEL);
3281 goto release_vm_lock;
3284 ops = kzalloc(sizeof(*ops) * args->num_binds, GFP_KERNEL);
3287 goto release_vm_lock;
3290 for (i = 0; i < args->num_binds; ++i) {
3291 struct drm_gem_object *gem_obj;
3292 u64 range = bind_ops[i].range;
3293 u64 addr = bind_ops[i].addr;
3294 u32 obj = bind_ops[i].obj;
3295 u64 obj_offset = bind_ops[i].obj_offset;
3300 gem_obj = drm_gem_object_lookup(file, obj);
3301 if (XE_IOCTL_DBG(xe, !gem_obj)) {
3305 bos[i] = gem_to_xe_bo(gem_obj);
3307 if (XE_IOCTL_DBG(xe, range > bos[i]->size) ||
3308 XE_IOCTL_DBG(xe, obj_offset >
3309 bos[i]->size - range)) {
3314 if (bos[i]->flags & XE_BO_INTERNAL_64K) {
3315 if (XE_IOCTL_DBG(xe, obj_offset &
3316 XE_64K_PAGE_MASK) ||
3317 XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
3318 XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
3325 if (args->num_syncs) {
3326 syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3333 syncs_user = u64_to_user_ptr(args->syncs);
3334 for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3335 err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3336 &syncs_user[num_syncs], false,
3337 xe_vm_no_dma_fences(vm));
3342 /* Do some error checking first to make the unwind easier */
3343 for (i = 0; i < args->num_binds; ++i) {
3344 u64 range = bind_ops[i].range;
3345 u64 addr = bind_ops[i].addr;
3346 u32 op = bind_ops[i].op;
3348 err = vm_bind_ioctl_lookup_vma(vm, bos[i], addr, range, op);
3353 for (i = 0; i < args->num_binds; ++i) {
3354 u64 range = bind_ops[i].range;
3355 u64 addr = bind_ops[i].addr;
3356 u32 op = bind_ops[i].op;
3357 u64 obj_offset = bind_ops[i].obj_offset;
3358 u8 tile_mask = bind_ops[i].tile_mask;
3359 u32 region = bind_ops[i].region;
3361 ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
3362 addr, range, op, tile_mask,
3364 if (IS_ERR(ops[i])) {
3365 err = PTR_ERR(ops[i]);
3370 err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
3372 i == args->num_binds - 1,
3379 if (list_empty(&ops_list)) {
3384 err = vm_bind_ioctl_ops_execute(vm, &ops_list, async);
3385 up_write(&vm->lock);
3387 for (i = 0; i < args->num_binds; ++i)
3392 if (args->num_binds > 1)
3398 vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
3400 for (i = 0; err == -ENODATA && i < num_syncs; i++)
3401 xe_sync_entry_signal(&syncs[i], NULL, dma_fence_get_stub());
3403 xe_sync_entry_cleanup(&syncs[num_syncs]);
3407 for (i = 0; i < args->num_binds; ++i)
3410 up_write(&vm->lock);
3415 xe_exec_queue_put(q);
3419 if (args->num_binds > 1)
3421 return err == -ENODATA ? 0 : err;
3425 * xe_vm_lock() - Lock the vm's dma_resv object
3426 * @vm: The struct xe_vm whose lock is to be locked
3427 * @intr: Whether to perform any wait interruptible
3429 * Return: 0 on success, -EINTR if @intr is true and the wait for a
3430 * contended lock was interrupted. If @intr is false, the function
3433 int xe_vm_lock(struct xe_vm *vm, bool intr)
3436 return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
3438 return dma_resv_lock(xe_vm_resv(vm), NULL);
3442 * xe_vm_unlock() - Unlock the vm's dma_resv object
3443 * @vm: The struct xe_vm whose lock is to be released.
3445 * Unlock a buffer object lock that was locked by xe_vm_lock().
3447 void xe_vm_unlock(struct xe_vm *vm)
3449 dma_resv_unlock(xe_vm_resv(vm));
3453 * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3454 * @vma: VMA to invalidate
3456 * Walks a list of page tables leaves which it memset the entries owned by this
3457 * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3460 * Returns 0 for success, negative error code otherwise.
3462 int xe_vm_invalidate_vma(struct xe_vma *vma)
3464 struct xe_device *xe = xe_vma_vm(vma)->xe;
3465 struct xe_tile *tile;
3466 u32 tile_needs_invalidate = 0;
3467 int seqno[XE_MAX_TILES_PER_DEVICE];
3471 XE_WARN_ON(!xe_vm_in_fault_mode(xe_vma_vm(vma)));
3472 XE_WARN_ON(xe_vma_is_null(vma));
3473 trace_xe_vma_usm_invalidate(vma);
3475 /* Check that we don't race with page-table updates */
3476 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3477 if (xe_vma_is_userptr(vma)) {
3478 WARN_ON_ONCE(!mmu_interval_check_retry
3479 (&vma->userptr.notifier,
3480 vma->userptr.notifier_seq));
3481 WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
3482 DMA_RESV_USAGE_BOOKKEEP));
3485 xe_bo_assert_held(xe_vma_bo(vma));
3489 for_each_tile(tile, xe, id) {
3490 if (xe_pt_zap_ptes(tile, vma)) {
3491 tile_needs_invalidate |= BIT(id);
3494 * FIXME: We potentially need to invalidate multiple
3495 * GTs within the tile
3497 seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
3503 for_each_tile(tile, xe, id) {
3504 if (tile_needs_invalidate & BIT(id)) {
3505 ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
3511 vma->usm.tile_invalidated = vma->tile_mask;
3516 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
3518 struct drm_gpuva *gpuva;
3522 if (!down_read_trylock(&vm->lock)) {
3523 drm_printf(p, " Failed to acquire VM lock to dump capture");
3526 if (vm->pt_root[gt_id]) {
3527 addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE);
3528 is_vram = xe_bo_is_vram(vm->pt_root[gt_id]->bo);
3529 drm_printf(p, " VM root: A:0x%llx %s\n", addr,
3530 is_vram ? "VRAM" : "SYS");
3533 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3534 struct xe_vma *vma = gpuva_to_vma(gpuva);
3535 bool is_userptr = xe_vma_is_userptr(vma);
3536 bool is_null = xe_vma_is_null(vma);
3540 } else if (is_userptr) {
3541 struct xe_res_cursor cur;
3543 if (vma->userptr.sg) {
3544 xe_res_first_sg(vma->userptr.sg, 0, XE_PAGE_SIZE,
3546 addr = xe_res_dma(&cur);
3551 addr = __xe_bo_addr(xe_vma_bo(vma), 0, XE_PAGE_SIZE);
3552 is_vram = xe_bo_is_vram(xe_vma_bo(vma));
3554 drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
3555 xe_vma_start(vma), xe_vma_end(vma) - 1,
3557 addr, is_null ? "NULL" : is_userptr ? "USR" :
3558 is_vram ? "VRAM" : "SYS");