1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021 Intel Corporation
8 #include <linux/dma-fence-array.h>
10 #include <drm/ttm/ttm_execbuf_util.h>
11 #include <drm/ttm/ttm_tt.h>
12 #include <drm/xe_drm.h>
13 #include <linux/delay.h>
14 #include <linux/kthread.h>
16 #include <linux/swap.h>
19 #include "xe_device.h"
20 #include "xe_engine.h"
22 #include "xe_gt_pagefault.h"
23 #include "xe_gt_tlb_invalidation.h"
24 #include "xe_migrate.h"
26 #include "xe_preempt_fence.h"
28 #include "xe_res_cursor.h"
32 #define TEST_VM_ASYNC_OPS_ERROR
35 * xe_vma_userptr_check_repin() - Advisory check for repin needed
36 * @vma: The userptr vma
38 * Check if the userptr vma has been invalidated since last successful
39 * repin. The check is advisory only and can the function can be called
40 * without the vm->userptr.notifier_lock held. There is no guarantee that the
41 * vma userptr will remain valid after a lockless check, so typically
42 * the call needs to be followed by a proper check under the notifier_lock.
44 * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
46 int xe_vma_userptr_check_repin(struct xe_vma *vma)
48 return mmu_interval_check_retry(&vma->userptr.notifier,
49 vma->userptr.notifier_seq) ?
53 int xe_vma_userptr_pin_pages(struct xe_vma *vma)
55 struct xe_vm *vm = vma->vm;
56 struct xe_device *xe = vm->xe;
57 const unsigned long num_pages =
58 (vma->end - vma->start + 1) >> PAGE_SHIFT;
60 bool in_kthread = !current->mm;
61 unsigned long notifier_seq;
63 bool read_only = vma->pte_flags & XE_PTE_READ_ONLY;
65 lockdep_assert_held(&vm->lock);
66 XE_BUG_ON(!xe_vma_is_userptr(vma));
71 notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier);
72 if (notifier_seq == vma->userptr.notifier_seq)
75 pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
79 if (vma->userptr.sg) {
80 dma_unmap_sgtable(xe->drm.dev,
82 read_only ? DMA_TO_DEVICE :
83 DMA_BIDIRECTIONAL, 0);
84 sg_free_table(vma->userptr.sg);
85 vma->userptr.sg = NULL;
90 if (!mmget_not_zero(vma->userptr.notifier.mm)) {
94 kthread_use_mm(vma->userptr.notifier.mm);
97 while (pinned < num_pages) {
98 ret = get_user_pages_fast(vma->userptr.ptr + pinned * PAGE_SIZE,
100 read_only ? 0 : FOLL_WRITE,
113 kthread_unuse_mm(vma->userptr.notifier.mm);
114 mmput(vma->userptr.notifier.mm);
120 ret = sg_alloc_table_from_pages(&vma->userptr.sgt, pages, pinned,
121 0, (u64)pinned << PAGE_SHIFT,
124 vma->userptr.sg = NULL;
127 vma->userptr.sg = &vma->userptr.sgt;
129 ret = dma_map_sgtable(xe->drm.dev, vma->userptr.sg,
130 read_only ? DMA_TO_DEVICE :
132 DMA_ATTR_SKIP_CPU_SYNC |
133 DMA_ATTR_NO_KERNEL_MAPPING);
135 sg_free_table(vma->userptr.sg);
136 vma->userptr.sg = NULL;
140 for (i = 0; i < pinned; ++i) {
143 set_page_dirty(pages[i]);
144 unlock_page(pages[i]);
147 mark_page_accessed(pages[i]);
151 release_pages(pages, pinned);
155 vma->userptr.notifier_seq = notifier_seq;
156 if (xe_vma_userptr_check_repin(vma) == -EAGAIN)
160 return ret < 0 ? ret : 0;
163 static bool preempt_fences_waiting(struct xe_vm *vm)
167 lockdep_assert_held(&vm->lock);
168 xe_vm_assert_held(vm);
170 list_for_each_entry(e, &vm->preempt.engines, compute.link) {
171 if (!e->compute.pfence || (e->compute.pfence &&
172 test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
173 &e->compute.pfence->flags))) {
181 static void free_preempt_fences(struct list_head *list)
183 struct list_head *link, *next;
185 list_for_each_safe(link, next, list)
186 xe_preempt_fence_free(to_preempt_fence_from_link(link));
189 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
192 lockdep_assert_held(&vm->lock);
193 xe_vm_assert_held(vm);
195 if (*count >= vm->preempt.num_engines)
198 for (; *count < vm->preempt.num_engines; ++(*count)) {
199 struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
202 return PTR_ERR(pfence);
204 list_move_tail(xe_preempt_fence_link(pfence), list);
210 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
214 xe_vm_assert_held(vm);
216 list_for_each_entry(e, &vm->preempt.engines, compute.link) {
217 if (e->compute.pfence) {
218 long timeout = dma_fence_wait(e->compute.pfence, false);
222 dma_fence_put(e->compute.pfence);
223 e->compute.pfence = NULL;
230 static bool xe_vm_is_idle(struct xe_vm *vm)
234 xe_vm_assert_held(vm);
235 list_for_each_entry(e, &vm->preempt.engines, compute.link) {
236 if (!xe_engine_is_idle(e))
243 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
245 struct list_head *link;
248 list_for_each_entry(e, &vm->preempt.engines, compute.link) {
249 struct dma_fence *fence;
252 XE_BUG_ON(link == list);
254 fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
255 e, e->compute.context,
257 dma_fence_put(e->compute.pfence);
258 e->compute.pfence = fence;
262 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
265 struct ww_acquire_ctx ww;
268 err = xe_bo_lock(bo, &ww, vm->preempt.num_engines, true);
272 list_for_each_entry(e, &vm->preempt.engines, compute.link)
273 if (e->compute.pfence) {
274 dma_resv_add_fence(bo->ttm.base.resv,
276 DMA_RESV_USAGE_BOOKKEEP);
279 xe_bo_unlock(bo, &ww);
284 * xe_vm_fence_all_extobjs() - Add a fence to vm's external objects' resv
286 * @fence: The fence to add.
287 * @usage: The resv usage for the fence.
289 * Loops over all of the vm's external object bindings and adds a @fence
290 * with the given @usage to all of the external object's reservation
293 void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
294 enum dma_resv_usage usage)
298 list_for_each_entry(vma, &vm->extobj.list, extobj.link)
299 dma_resv_add_fence(vma->bo->ttm.base.resv, fence, usage);
302 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm)
306 lockdep_assert_held(&vm->lock);
307 xe_vm_assert_held(vm);
309 list_for_each_entry(e, &vm->preempt.engines, compute.link) {
312 dma_resv_add_fence(&vm->resv, e->compute.pfence,
313 DMA_RESV_USAGE_BOOKKEEP);
314 xe_vm_fence_all_extobjs(vm, e->compute.pfence,
315 DMA_RESV_USAGE_BOOKKEEP);
319 int xe_vm_add_compute_engine(struct xe_vm *vm, struct xe_engine *e)
321 struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
322 struct ttm_validate_buffer *tv;
323 struct ww_acquire_ctx ww;
324 struct list_head objs;
325 struct dma_fence *pfence;
329 XE_BUG_ON(!xe_vm_in_compute_mode(vm));
331 down_write(&vm->lock);
333 err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs, true, 1);
335 goto out_unlock_outer;
337 pfence = xe_preempt_fence_create(e, e->compute.context,
344 list_add(&e->compute.link, &vm->preempt.engines);
345 ++vm->preempt.num_engines;
346 e->compute.pfence = pfence;
348 down_read(&vm->userptr.notifier_lock);
350 dma_resv_add_fence(&vm->resv, pfence,
351 DMA_RESV_USAGE_BOOKKEEP);
353 xe_vm_fence_all_extobjs(vm, pfence, DMA_RESV_USAGE_BOOKKEEP);
356 * Check to see if a preemption on VM is in flight or userptr
357 * invalidation, if so trigger this preempt fence to sync state with
358 * other preempt fences on the VM.
360 wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
362 dma_fence_enable_sw_signaling(pfence);
364 up_read(&vm->userptr.notifier_lock);
367 xe_vm_unlock_dma_resv(vm, tv_onstack, tv, &ww, &objs);
375 * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
376 * that need repinning.
379 * This function checks for whether the VM has userptrs that need repinning,
380 * and provides a release-type barrier on the userptr.notifier_lock after
383 * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
385 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
387 lockdep_assert_held_read(&vm->userptr.notifier_lock);
389 return (list_empty(&vm->userptr.repin_list) &&
390 list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
394 * xe_vm_lock_dma_resv() - Lock the vm dma_resv object and the dma_resv
395 * objects of the vm's external buffer objects.
397 * @ww: Pointer to a struct ww_acquire_ctx locking context.
398 * @tv_onstack: Array size XE_ONSTACK_TV of storage for the struct
399 * ttm_validate_buffers used for locking.
400 * @tv: Pointer to a pointer that on output contains the actual storage used.
401 * @objs: List head for the buffer objects locked.
402 * @intr: Whether to lock interruptible.
403 * @num_shared: Number of dma-fence slots to reserve in the locked objects.
405 * Locks the vm dma-resv objects and all the dma-resv objects of the
406 * buffer objects on the vm external object list. The TTM utilities require
407 * a list of struct ttm_validate_buffers pointing to the actual buffer
408 * objects to lock. Storage for those struct ttm_validate_buffers should
409 * be provided in @tv_onstack, and is typically reserved on the stack
410 * of the caller. If the size of @tv_onstack isn't sufficient, then
411 * storage will be allocated internally using kvmalloc().
413 * The function performs deadlock handling internally, and after a
414 * successful return the ww locking transaction should be considered
417 * Return: 0 on success, Negative error code on error. In particular if
418 * @intr is set to true, -EINTR or -ERESTARTSYS may be returned. In case
419 * of error, any locking performed has been reverted.
421 int xe_vm_lock_dma_resv(struct xe_vm *vm, struct ww_acquire_ctx *ww,
422 struct ttm_validate_buffer *tv_onstack,
423 struct ttm_validate_buffer **tv,
424 struct list_head *objs,
426 unsigned int num_shared)
428 struct ttm_validate_buffer *tv_vm, *tv_bo;
429 struct xe_vma *vma, *next;
433 lockdep_assert_held(&vm->lock);
435 if (vm->extobj.entries < XE_ONSTACK_TV) {
438 tv_vm = kvmalloc_array(vm->extobj.entries + 1, sizeof(*tv_vm),
445 INIT_LIST_HEAD(objs);
446 list_for_each_entry(vma, &vm->extobj.list, extobj.link) {
447 tv_bo->num_shared = num_shared;
448 tv_bo->bo = &vma->bo->ttm;
450 list_add_tail(&tv_bo->head, objs);
453 tv_vm->num_shared = num_shared;
454 tv_vm->bo = xe_vm_ttm_bo(vm);
455 list_add_tail(&tv_vm->head, objs);
456 err = ttm_eu_reserve_buffers(ww, objs, intr, &dups);
460 spin_lock(&vm->notifier.list_lock);
461 list_for_each_entry_safe(vma, next, &vm->notifier.rebind_list,
462 notifier.rebind_link) {
463 xe_bo_assert_held(vma->bo);
465 list_del_init(&vma->notifier.rebind_link);
466 if (vma->gt_present && !vma->destroyed)
467 list_move_tail(&vma->rebind_link, &vm->rebind_list);
469 spin_unlock(&vm->notifier.list_lock);
475 if (tv_vm != tv_onstack)
482 * xe_vm_unlock_dma_resv() - Unlock reservation objects locked by
483 * xe_vm_lock_dma_resv()
485 * @tv_onstack: The @tv_onstack array given to xe_vm_lock_dma_resv().
486 * @tv: The value of *@tv given by xe_vm_lock_dma_resv().
487 * @ww: The ww_acquire_context used for locking.
488 * @objs: The list returned from xe_vm_lock_dma_resv().
490 * Unlocks the reservation objects and frees any memory allocated by
491 * xe_vm_lock_dma_resv().
493 void xe_vm_unlock_dma_resv(struct xe_vm *vm,
494 struct ttm_validate_buffer *tv_onstack,
495 struct ttm_validate_buffer *tv,
496 struct ww_acquire_ctx *ww,
497 struct list_head *objs)
500 * Nothing should've been able to enter the list while we were locked,
501 * since we've held the dma-resvs of all the vm's external objects,
502 * and holding the dma_resv of an object is required for list
503 * addition, and we shouldn't add ourselves.
505 XE_WARN_ON(!list_empty(&vm->notifier.rebind_list));
507 ttm_eu_backoff_reservation(ww, objs);
508 if (tv && tv != tv_onstack)
512 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
514 static void preempt_rebind_work_func(struct work_struct *w)
516 struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
518 struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
519 struct ttm_validate_buffer *tv;
520 struct ww_acquire_ctx ww;
521 struct list_head objs;
522 struct dma_fence *rebind_fence;
523 unsigned int fence_count = 0;
524 LIST_HEAD(preempt_fences);
528 int __maybe_unused tries = 0;
530 XE_BUG_ON(!xe_vm_in_compute_mode(vm));
531 trace_xe_vm_rebind_worker_enter(vm);
533 if (xe_vm_is_closed(vm)) {
534 trace_xe_vm_rebind_worker_exit(vm);
538 down_write(&vm->lock);
541 if (vm->async_ops.error)
542 goto out_unlock_outer;
545 * Extreme corner where we exit a VM error state with a munmap style VM
546 * unbind inflight which requires a rebind. In this case the rebind
547 * needs to install some fences into the dma-resv slots. The worker to
548 * do this queued, let that worker make progress by dropping vm->lock
549 * and trying this again.
551 if (vm->async_ops.munmap_rebind_inflight) {
553 flush_work(&vm->async_ops.work);
557 if (xe_vm_userptr_check_repin(vm)) {
558 err = xe_vm_userptr_pin(vm);
560 goto out_unlock_outer;
563 err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs,
564 false, vm->preempt.num_engines);
566 goto out_unlock_outer;
568 if (xe_vm_is_idle(vm)) {
569 vm->preempt.rebind_deactivated = true;
573 /* Fresh preempt fences already installed. Everyting is running. */
574 if (!preempt_fences_waiting(vm))
578 * This makes sure vm is completely suspended and also balances
579 * xe_engine suspend- and resume; we resume *all* vm engines below.
581 err = wait_for_existing_preempt_fences(vm);
585 err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
589 list_for_each_entry(vma, &vm->rebind_list, rebind_link) {
590 if (xe_vma_is_userptr(vma) || vma->destroyed)
593 err = xe_bo_validate(vma->bo, vm, false);
598 rebind_fence = xe_vm_rebind(vm, true);
599 if (IS_ERR(rebind_fence)) {
600 err = PTR_ERR(rebind_fence);
605 dma_fence_wait(rebind_fence, false);
606 dma_fence_put(rebind_fence);
609 /* Wait on munmap style VM unbinds */
610 wait = dma_resv_wait_timeout(&vm->resv,
611 DMA_RESV_USAGE_KERNEL,
612 false, MAX_SCHEDULE_TIMEOUT);
618 #define retry_required(__tries, __vm) \
619 (IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
620 (!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
621 __xe_vm_userptr_needs_repin(__vm))
623 down_read(&vm->userptr.notifier_lock);
624 if (retry_required(tries, vm)) {
625 up_read(&vm->userptr.notifier_lock);
630 #undef retry_required
632 /* Point of no return. */
633 arm_preempt_fences(vm, &preempt_fences);
634 resume_and_reinstall_preempt_fences(vm);
635 up_read(&vm->userptr.notifier_lock);
638 xe_vm_unlock_dma_resv(vm, tv_onstack, tv, &ww, &objs);
640 if (err == -EAGAIN) {
641 trace_xe_vm_rebind_worker_retry(vm);
646 * With multiple active VMs, under memory pressure, it is possible that
647 * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
648 * Until ttm properly handles locking in such scenarios, best thing the
649 * driver can do is retry with a timeout. Killing the VM or putting it
650 * in error state after timeout or other error scenarios is still TBD.
652 if (err == -ENOMEM) {
653 ktime_t cur = ktime_get();
655 end = end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
656 if (ktime_before(cur, end)) {
658 trace_xe_vm_rebind_worker_retry(vm);
664 free_preempt_fences(&preempt_fences);
666 XE_WARN_ON(err < 0); /* TODO: Kill VM or put in error state */
667 trace_xe_vm_rebind_worker_exit(vm);
670 struct async_op_fence;
671 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
672 struct xe_engine *e, struct xe_sync_entry *syncs,
673 u32 num_syncs, struct async_op_fence *afence);
675 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
676 const struct mmu_notifier_range *range,
677 unsigned long cur_seq)
679 struct xe_vma *vma = container_of(mni, struct xe_vma, userptr.notifier);
680 struct xe_vm *vm = vma->vm;
681 struct dma_resv_iter cursor;
682 struct dma_fence *fence;
685 XE_BUG_ON(!xe_vma_is_userptr(vma));
686 trace_xe_vma_userptr_invalidate(vma);
688 if (!mmu_notifier_range_blockable(range))
691 down_write(&vm->userptr.notifier_lock);
692 mmu_interval_set_seq(mni, cur_seq);
694 /* No need to stop gpu access if the userptr is not yet bound. */
695 if (!vma->userptr.initial_bind) {
696 up_write(&vm->userptr.notifier_lock);
701 * Tell exec and rebind worker they need to repin and rebind this
704 if (!xe_vm_in_fault_mode(vm) && !vma->destroyed && vma->gt_present) {
705 spin_lock(&vm->userptr.invalidated_lock);
706 list_move_tail(&vma->userptr.invalidate_link,
707 &vm->userptr.invalidated);
708 spin_unlock(&vm->userptr.invalidated_lock);
711 up_write(&vm->userptr.notifier_lock);
714 * Preempt fences turn into schedule disables, pipeline these.
715 * Note that even in fault mode, we need to wait for binds and
716 * unbinds to complete, and those are attached as BOOKMARK fences
719 dma_resv_iter_begin(&cursor, &vm->resv,
720 DMA_RESV_USAGE_BOOKKEEP);
721 dma_resv_for_each_fence_unlocked(&cursor, fence)
722 dma_fence_enable_sw_signaling(fence);
723 dma_resv_iter_end(&cursor);
725 err = dma_resv_wait_timeout(&vm->resv,
726 DMA_RESV_USAGE_BOOKKEEP,
727 false, MAX_SCHEDULE_TIMEOUT);
728 XE_WARN_ON(err <= 0);
730 if (xe_vm_in_fault_mode(vm)) {
731 err = xe_vm_invalidate_vma(vma);
735 trace_xe_vma_userptr_invalidate_complete(vma);
740 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
741 .invalidate = vma_userptr_invalidate,
744 int xe_vm_userptr_pin(struct xe_vm *vm)
746 struct xe_vma *vma, *next;
748 LIST_HEAD(tmp_evict);
750 lockdep_assert_held_write(&vm->lock);
752 /* Collect invalidated userptrs */
753 spin_lock(&vm->userptr.invalidated_lock);
754 list_for_each_entry_safe(vma, next, &vm->userptr.invalidated,
755 userptr.invalidate_link) {
756 list_del_init(&vma->userptr.invalidate_link);
757 list_move_tail(&vma->userptr_link, &vm->userptr.repin_list);
759 spin_unlock(&vm->userptr.invalidated_lock);
761 /* Pin and move to temporary list */
762 list_for_each_entry_safe(vma, next, &vm->userptr.repin_list, userptr_link) {
763 err = xe_vma_userptr_pin_pages(vma);
767 list_move_tail(&vma->userptr_link, &tmp_evict);
770 /* Take lock and move to rebind_list for rebinding. */
771 err = dma_resv_lock_interruptible(&vm->resv, NULL);
775 list_for_each_entry_safe(vma, next, &tmp_evict, userptr_link) {
776 list_del_init(&vma->userptr_link);
777 list_move_tail(&vma->rebind_link, &vm->rebind_list);
780 dma_resv_unlock(&vm->resv);
785 list_splice_tail(&tmp_evict, &vm->userptr.repin_list);
791 * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
792 * that need repinning.
795 * This function does an advisory check for whether the VM has userptrs that
798 * Return: 0 if there are no indications of userptrs needing repinning,
799 * -EAGAIN if there are.
801 int xe_vm_userptr_check_repin(struct xe_vm *vm)
803 return (list_empty_careful(&vm->userptr.repin_list) &&
804 list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
807 static struct dma_fence *
808 xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
809 struct xe_sync_entry *syncs, u32 num_syncs);
811 struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
813 struct dma_fence *fence = NULL;
814 struct xe_vma *vma, *next;
816 lockdep_assert_held(&vm->lock);
817 if (xe_vm_no_dma_fences(vm) && !rebind_worker)
820 xe_vm_assert_held(vm);
821 list_for_each_entry_safe(vma, next, &vm->rebind_list, rebind_link) {
822 XE_WARN_ON(!vma->gt_present);
824 list_del_init(&vma->rebind_link);
825 dma_fence_put(fence);
827 trace_xe_vma_rebind_worker(vma);
829 trace_xe_vma_rebind_exec(vma);
830 fence = xe_vm_bind_vma(vma, NULL, NULL, 0);
838 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
840 u64 bo_offset_or_userptr,
849 XE_BUG_ON(start >= end);
850 XE_BUG_ON(end >= vm->size);
852 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
854 vma = ERR_PTR(-ENOMEM);
858 INIT_LIST_HEAD(&vma->rebind_link);
859 INIT_LIST_HEAD(&vma->unbind_link);
860 INIT_LIST_HEAD(&vma->userptr_link);
861 INIT_LIST_HEAD(&vma->userptr.invalidate_link);
862 INIT_LIST_HEAD(&vma->notifier.rebind_link);
863 INIT_LIST_HEAD(&vma->extobj.link);
869 vma->pte_flags = XE_PTE_READ_ONLY;
872 vma->gt_mask = gt_mask;
874 for_each_gt(gt, vm->xe, id)
875 if (!xe_gt_is_media_type(gt))
876 vma->gt_mask |= 0x1 << id;
879 if (vm->xe->info.platform == XE_PVC)
880 vma->use_atomic_access_pte_bit = true;
883 xe_bo_assert_held(bo);
884 vma->bo_offset = bo_offset_or_userptr;
885 vma->bo = xe_bo_get(bo);
886 list_add_tail(&vma->bo_link, &bo->vmas);
887 } else /* userptr */ {
888 u64 size = end - start + 1;
891 vma->userptr.ptr = bo_offset_or_userptr;
893 err = mmu_interval_notifier_insert(&vma->userptr.notifier,
895 vma->userptr.ptr, size,
896 &vma_userptr_notifier_ops);
903 vma->userptr.notifier_seq = LONG_MAX;
910 static bool vm_remove_extobj(struct xe_vma *vma)
912 if (!list_empty(&vma->extobj.link)) {
913 vma->vm->extobj.entries--;
914 list_del_init(&vma->extobj.link);
920 static void xe_vma_destroy_late(struct xe_vma *vma)
922 struct xe_vm *vm = vma->vm;
923 struct xe_device *xe = vm->xe;
924 bool read_only = vma->pte_flags & XE_PTE_READ_ONLY;
926 if (xe_vma_is_userptr(vma)) {
927 if (vma->userptr.sg) {
928 dma_unmap_sgtable(xe->drm.dev,
930 read_only ? DMA_TO_DEVICE :
931 DMA_BIDIRECTIONAL, 0);
932 sg_free_table(vma->userptr.sg);
933 vma->userptr.sg = NULL;
937 * Since userptr pages are not pinned, we can't remove
938 * the notifer until we're sure the GPU is not accessing
941 mmu_interval_notifier_remove(&vma->userptr.notifier);
950 static void vma_destroy_work_func(struct work_struct *w)
953 container_of(w, struct xe_vma, destroy_work);
955 xe_vma_destroy_late(vma);
958 static struct xe_vma *
959 bo_has_vm_references_locked(struct xe_bo *bo, struct xe_vm *vm,
960 struct xe_vma *ignore)
964 list_for_each_entry(vma, &bo->vmas, bo_link) {
965 if (vma != ignore && vma->vm == vm && !vma->destroyed)
972 static bool bo_has_vm_references(struct xe_bo *bo, struct xe_vm *vm,
973 struct xe_vma *ignore)
975 struct ww_acquire_ctx ww;
978 xe_bo_lock(bo, &ww, 0, false);
979 ret = !!bo_has_vm_references_locked(bo, vm, ignore);
980 xe_bo_unlock(bo, &ww);
985 static void __vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
987 list_add(&vma->extobj.link, &vm->extobj.list);
988 vm->extobj.entries++;
991 static void vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
993 struct xe_bo *bo = vma->bo;
995 lockdep_assert_held_write(&vm->lock);
997 if (bo_has_vm_references(bo, vm, vma))
1000 __vm_insert_extobj(vm, vma);
1003 static void vma_destroy_cb(struct dma_fence *fence,
1004 struct dma_fence_cb *cb)
1006 struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
1008 INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
1009 queue_work(system_unbound_wq, &vma->destroy_work);
1012 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
1014 struct xe_vm *vm = vma->vm;
1016 lockdep_assert_held_write(&vm->lock);
1017 XE_BUG_ON(!list_empty(&vma->unbind_link));
1019 if (xe_vma_is_userptr(vma)) {
1020 XE_WARN_ON(!vma->destroyed);
1021 spin_lock(&vm->userptr.invalidated_lock);
1022 list_del_init(&vma->userptr.invalidate_link);
1023 spin_unlock(&vm->userptr.invalidated_lock);
1024 list_del(&vma->userptr_link);
1026 xe_bo_assert_held(vma->bo);
1027 list_del(&vma->bo_link);
1029 spin_lock(&vm->notifier.list_lock);
1030 list_del(&vma->notifier.rebind_link);
1031 spin_unlock(&vm->notifier.list_lock);
1033 if (!vma->bo->vm && vm_remove_extobj(vma)) {
1034 struct xe_vma *other;
1036 other = bo_has_vm_references_locked(vma->bo, vm, NULL);
1039 __vm_insert_extobj(vm, other);
1043 xe_vm_assert_held(vm);
1044 if (!list_empty(&vma->rebind_link))
1045 list_del(&vma->rebind_link);
1048 int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
1052 XE_WARN_ON(ret != -ENOENT);
1053 xe_vma_destroy_late(vma);
1056 xe_vma_destroy_late(vma);
1060 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1062 struct ttm_validate_buffer tv[2];
1063 struct ww_acquire_ctx ww;
1064 struct xe_bo *bo = vma->bo;
1069 memset(tv, 0, sizeof(tv));
1070 tv[0].bo = xe_vm_ttm_bo(vma->vm);
1071 list_add(&tv[0].head, &objs);
1074 tv[1].bo = &xe_bo_get(bo)->ttm;
1075 list_add(&tv[1].head, &objs);
1077 err = ttm_eu_reserve_buffers(&ww, &objs, false, &dups);
1080 xe_vma_destroy(vma, NULL);
1082 ttm_eu_backoff_reservation(&ww, &objs);
1087 static struct xe_vma *to_xe_vma(const struct rb_node *node)
1089 BUILD_BUG_ON(offsetof(struct xe_vma, vm_node) != 0);
1090 return (struct xe_vma *)node;
1093 static int xe_vma_cmp(const struct xe_vma *a, const struct xe_vma *b)
1095 if (a->end < b->start) {
1097 } else if (b->end < a->start) {
1104 static bool xe_vma_less_cb(struct rb_node *a, const struct rb_node *b)
1106 return xe_vma_cmp(to_xe_vma(a), to_xe_vma(b)) < 0;
1109 int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node)
1111 struct xe_vma *cmp = to_xe_vma(node);
1112 const struct xe_vma *own = key;
1114 if (own->start > cmp->end)
1117 if (own->end < cmp->start)
1124 xe_vm_find_overlapping_vma(struct xe_vm *vm, const struct xe_vma *vma)
1126 struct rb_node *node;
1128 if (xe_vm_is_closed(vm))
1131 XE_BUG_ON(vma->end >= vm->size);
1132 lockdep_assert_held(&vm->lock);
1134 node = rb_find(vma, &vm->vmas, xe_vma_cmp_vma_cb);
1136 return node ? to_xe_vma(node) : NULL;
1139 static void xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1141 XE_BUG_ON(vma->vm != vm);
1142 lockdep_assert_held(&vm->lock);
1144 rb_add(&vma->vm_node, &vm->vmas, xe_vma_less_cb);
1147 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1149 XE_BUG_ON(vma->vm != vm);
1150 lockdep_assert_held(&vm->lock);
1152 rb_erase(&vma->vm_node, &vm->vmas);
1153 if (vm->usm.last_fault_vma == vma)
1154 vm->usm.last_fault_vma = NULL;
1157 static void async_op_work_func(struct work_struct *w);
1158 static void vm_destroy_work_func(struct work_struct *w);
1160 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1163 int err, i = 0, number_gts = 0;
1167 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1169 return ERR_PTR(-ENOMEM);
1172 kref_init(&vm->refcount);
1173 dma_resv_init(&vm->resv);
1175 vm->size = 1ull << xe_pt_shift(xe->info.vm_max_level + 1);
1180 init_rwsem(&vm->lock);
1182 INIT_LIST_HEAD(&vm->rebind_list);
1184 INIT_LIST_HEAD(&vm->userptr.repin_list);
1185 INIT_LIST_HEAD(&vm->userptr.invalidated);
1186 init_rwsem(&vm->userptr.notifier_lock);
1187 spin_lock_init(&vm->userptr.invalidated_lock);
1189 INIT_LIST_HEAD(&vm->notifier.rebind_list);
1190 spin_lock_init(&vm->notifier.list_lock);
1192 INIT_LIST_HEAD(&vm->async_ops.pending);
1193 INIT_WORK(&vm->async_ops.work, async_op_work_func);
1194 spin_lock_init(&vm->async_ops.lock);
1196 INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1198 INIT_LIST_HEAD(&vm->preempt.engines);
1199 vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */
1201 INIT_LIST_HEAD(&vm->extobj.list);
1203 if (!(flags & XE_VM_FLAG_MIGRATION)) {
1204 /* We need to immeditatelly exit from any D3 state */
1205 xe_pm_runtime_get(xe);
1206 xe_device_mem_access_get(xe);
1209 err = dma_resv_lock_interruptible(&vm->resv, NULL);
1213 if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1214 vm->flags |= XE_VM_FLAGS_64K;
1216 for_each_gt(gt, xe, id) {
1217 if (xe_gt_is_media_type(gt))
1220 if (flags & XE_VM_FLAG_MIGRATION &&
1221 gt->info.id != XE_VM_FLAG_GT_ID(flags))
1224 vm->pt_root[id] = xe_pt_create(vm, gt, xe->info.vm_max_level);
1225 if (IS_ERR(vm->pt_root[id])) {
1226 err = PTR_ERR(vm->pt_root[id]);
1227 vm->pt_root[id] = NULL;
1228 goto err_destroy_root;
1232 if (flags & XE_VM_FLAG_SCRATCH_PAGE) {
1233 for_each_gt(gt, xe, id) {
1234 if (!vm->pt_root[id])
1237 err = xe_pt_create_scratch(xe, gt, vm);
1239 goto err_scratch_pt;
1243 if (flags & DRM_XE_VM_CREATE_COMPUTE_MODE) {
1244 INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1245 vm->flags |= XE_VM_FLAG_COMPUTE_MODE;
1248 if (flags & DRM_XE_VM_CREATE_ASYNC_BIND_OPS) {
1249 vm->async_ops.fence.context = dma_fence_context_alloc(1);
1250 vm->flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
1253 /* Fill pt_root after allocating scratch tables */
1254 for_each_gt(gt, xe, id) {
1255 if (!vm->pt_root[id])
1258 xe_pt_populate_empty(gt, vm, vm->pt_root[id]);
1260 dma_resv_unlock(&vm->resv);
1262 /* Kernel migration VM shouldn't have a circular loop.. */
1263 if (!(flags & XE_VM_FLAG_MIGRATION)) {
1264 for_each_gt(gt, xe, id) {
1265 struct xe_vm *migrate_vm;
1266 struct xe_engine *eng;
1268 if (!vm->pt_root[id])
1271 migrate_vm = xe_migrate_get_vm(gt->migrate);
1272 eng = xe_engine_create_class(xe, gt, migrate_vm,
1273 XE_ENGINE_CLASS_COPY,
1275 xe_vm_put(migrate_vm);
1277 xe_vm_close_and_put(vm);
1278 return ERR_CAST(eng);
1286 vm->composite_fence_ctx = dma_fence_context_alloc(1);
1288 mutex_lock(&xe->usm.lock);
1289 if (flags & XE_VM_FLAG_FAULT_MODE)
1290 xe->usm.num_vm_in_fault_mode++;
1291 else if (!(flags & XE_VM_FLAG_MIGRATION))
1292 xe->usm.num_vm_in_non_fault_mode++;
1293 mutex_unlock(&xe->usm.lock);
1295 trace_xe_vm_create(vm);
1300 for_each_gt(gt, xe, id) {
1301 if (!vm->pt_root[id])
1304 i = vm->pt_root[id]->level;
1306 if (vm->scratch_pt[id][--i])
1307 xe_pt_destroy(vm->scratch_pt[id][i],
1309 xe_bo_unpin(vm->scratch_bo[id]);
1310 xe_bo_put(vm->scratch_bo[id]);
1313 for_each_gt(gt, xe, id) {
1314 if (vm->pt_root[id])
1315 xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1317 dma_resv_unlock(&vm->resv);
1319 dma_resv_fini(&vm->resv);
1321 if (!(flags & XE_VM_FLAG_MIGRATION)) {
1322 xe_device_mem_access_put(xe);
1323 xe_pm_runtime_put(xe);
1325 return ERR_PTR(err);
1328 static void flush_async_ops(struct xe_vm *vm)
1330 queue_work(system_unbound_wq, &vm->async_ops.work);
1331 flush_work(&vm->async_ops.work);
1334 static void vm_error_capture(struct xe_vm *vm, int err,
1335 u32 op, u64 addr, u64 size)
1337 struct drm_xe_vm_bind_op_error_capture capture;
1338 u64 __user *address =
1339 u64_to_user_ptr(vm->async_ops.error_capture.addr);
1340 bool in_kthread = !current->mm;
1342 capture.error = err;
1344 capture.addr = addr;
1345 capture.size = size;
1348 if (!mmget_not_zero(vm->async_ops.error_capture.mm))
1350 kthread_use_mm(vm->async_ops.error_capture.mm);
1353 if (copy_to_user(address, &capture, sizeof(capture)))
1354 XE_WARN_ON("Copy to user failed");
1357 kthread_unuse_mm(vm->async_ops.error_capture.mm);
1358 mmput(vm->async_ops.error_capture.mm);
1362 wake_up_all(&vm->async_ops.error_capture.wq);
1365 void xe_vm_close_and_put(struct xe_vm *vm)
1367 struct rb_root contested = RB_ROOT;
1368 struct ww_acquire_ctx ww;
1369 struct xe_device *xe = vm->xe;
1373 XE_BUG_ON(vm->preempt.num_engines);
1377 flush_async_ops(vm);
1378 if (xe_vm_in_compute_mode(vm))
1379 flush_work(&vm->preempt.rebind_work);
1381 for_each_gt(gt, xe, id) {
1383 xe_engine_kill(vm->eng[id]);
1384 xe_engine_put(vm->eng[id]);
1389 down_write(&vm->lock);
1390 xe_vm_lock(vm, &ww, 0, false);
1391 while (vm->vmas.rb_node) {
1392 struct xe_vma *vma = to_xe_vma(vm->vmas.rb_node);
1394 if (xe_vma_is_userptr(vma)) {
1395 down_read(&vm->userptr.notifier_lock);
1396 vma->destroyed = true;
1397 up_read(&vm->userptr.notifier_lock);
1400 rb_erase(&vma->vm_node, &vm->vmas);
1402 /* easy case, remove from VMA? */
1403 if (xe_vma_is_userptr(vma) || vma->bo->vm) {
1404 xe_vma_destroy(vma, NULL);
1408 rb_add(&vma->vm_node, &contested, xe_vma_less_cb);
1412 * All vm operations will add shared fences to resv.
1413 * The only exception is eviction for a shared object,
1414 * but even so, the unbind when evicted would still
1415 * install a fence to resv. Hence it's safe to
1416 * destroy the pagetables immediately.
1418 for_each_gt(gt, xe, id) {
1419 if (vm->scratch_bo[id]) {
1422 xe_bo_unpin(vm->scratch_bo[id]);
1423 xe_bo_put(vm->scratch_bo[id]);
1424 for (i = 0; i < vm->pt_root[id]->level; i++)
1425 xe_pt_destroy(vm->scratch_pt[id][i], vm->flags,
1429 xe_vm_unlock(vm, &ww);
1431 if (contested.rb_node) {
1434 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1435 * Since we hold a refcount to the bo, we can remove and free
1436 * the members safely without locking.
1438 while (contested.rb_node) {
1439 struct xe_vma *vma = to_xe_vma(contested.rb_node);
1441 rb_erase(&vma->vm_node, &contested);
1442 xe_vma_destroy_unlocked(vma);
1446 if (vm->async_ops.error_capture.addr)
1447 wake_up_all(&vm->async_ops.error_capture.wq);
1449 XE_WARN_ON(!list_empty(&vm->extobj.list));
1450 up_write(&vm->lock);
1452 mutex_lock(&xe->usm.lock);
1453 if (vm->flags & XE_VM_FLAG_FAULT_MODE)
1454 xe->usm.num_vm_in_fault_mode--;
1455 else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1456 xe->usm.num_vm_in_non_fault_mode--;
1457 mutex_unlock(&xe->usm.lock);
1462 static void vm_destroy_work_func(struct work_struct *w)
1465 container_of(w, struct xe_vm, destroy_work);
1466 struct ww_acquire_ctx ww;
1467 struct xe_device *xe = vm->xe;
1472 /* xe_vm_close_and_put was not called? */
1473 XE_WARN_ON(vm->size);
1475 if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
1476 xe_device_mem_access_put(xe);
1477 xe_pm_runtime_put(xe);
1479 if (xe->info.has_asid) {
1480 mutex_lock(&xe->usm.lock);
1481 lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1482 XE_WARN_ON(lookup != vm);
1483 mutex_unlock(&xe->usm.lock);
1488 * XXX: We delay destroying the PT root until the VM if freed as PT root
1489 * is needed for xe_vm_lock to work. If we remove that dependency this
1490 * can be moved to xe_vm_close_and_put.
1492 xe_vm_lock(vm, &ww, 0, false);
1493 for_each_gt(gt, xe, id) {
1494 if (vm->pt_root[id]) {
1495 xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1496 vm->pt_root[id] = NULL;
1499 xe_vm_unlock(vm, &ww);
1501 trace_xe_vm_free(vm);
1502 dma_fence_put(vm->rebind_fence);
1503 dma_resv_fini(&vm->resv);
1507 void xe_vm_free(struct kref *ref)
1509 struct xe_vm *vm = container_of(ref, struct xe_vm, refcount);
1511 /* To destroy the VM we need to be able to sleep */
1512 queue_work(system_unbound_wq, &vm->destroy_work);
1515 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1519 mutex_lock(&xef->vm.lock);
1520 vm = xa_load(&xef->vm.xa, id);
1521 mutex_unlock(&xef->vm.lock);
1529 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_gt *full_gt)
1531 XE_BUG_ON(xe_gt_is_media_type(full_gt));
1533 return gen8_pde_encode(vm->pt_root[full_gt->info.id]->bo, 0,
1537 static struct dma_fence *
1538 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_engine *e,
1539 struct xe_sync_entry *syncs, u32 num_syncs)
1542 struct dma_fence *fence = NULL;
1543 struct dma_fence **fences = NULL;
1544 struct dma_fence_array *cf = NULL;
1545 struct xe_vm *vm = vma->vm;
1546 int cur_fence = 0, i;
1547 int number_gts = hweight_long(vma->gt_present);
1551 trace_xe_vma_unbind(vma);
1553 if (number_gts > 1) {
1554 fences = kmalloc_array(number_gts, sizeof(*fences),
1557 return ERR_PTR(-ENOMEM);
1560 for_each_gt(gt, vm->xe, id) {
1561 if (!(vma->gt_present & BIT(id)))
1564 XE_BUG_ON(xe_gt_is_media_type(gt));
1566 fence = __xe_pt_unbind_vma(gt, vma, e, syncs, num_syncs);
1567 if (IS_ERR(fence)) {
1568 err = PTR_ERR(fence);
1573 fences[cur_fence++] = fence;
1576 if (e && vm->pt_root[id] && !list_empty(&e->multi_gt_list))
1577 e = list_next_entry(e, multi_gt_list);
1581 cf = dma_fence_array_create(number_gts, fences,
1582 vm->composite_fence_ctx,
1583 vm->composite_fence_seqno++,
1586 --vm->composite_fence_seqno;
1592 for (i = 0; i < num_syncs; i++)
1593 xe_sync_entry_signal(&syncs[i], NULL, cf ? &cf->base : fence);
1595 return cf ? &cf->base : !fence ? dma_fence_get_stub() : fence;
1600 /* FIXME: Rewind the previous binds? */
1601 dma_fence_put(fences[--cur_fence]);
1606 return ERR_PTR(err);
1609 static struct dma_fence *
1610 xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
1611 struct xe_sync_entry *syncs, u32 num_syncs)
1614 struct dma_fence *fence;
1615 struct dma_fence **fences = NULL;
1616 struct dma_fence_array *cf = NULL;
1617 struct xe_vm *vm = vma->vm;
1618 int cur_fence = 0, i;
1619 int number_gts = hweight_long(vma->gt_mask);
1623 trace_xe_vma_bind(vma);
1625 if (number_gts > 1) {
1626 fences = kmalloc_array(number_gts, sizeof(*fences),
1629 return ERR_PTR(-ENOMEM);
1632 for_each_gt(gt, vm->xe, id) {
1633 if (!(vma->gt_mask & BIT(id)))
1636 XE_BUG_ON(xe_gt_is_media_type(gt));
1637 fence = __xe_pt_bind_vma(gt, vma, e, syncs, num_syncs,
1638 vma->gt_present & BIT(id));
1639 if (IS_ERR(fence)) {
1640 err = PTR_ERR(fence);
1645 fences[cur_fence++] = fence;
1648 if (e && vm->pt_root[id] && !list_empty(&e->multi_gt_list))
1649 e = list_next_entry(e, multi_gt_list);
1653 cf = dma_fence_array_create(number_gts, fences,
1654 vm->composite_fence_ctx,
1655 vm->composite_fence_seqno++,
1658 --vm->composite_fence_seqno;
1664 for (i = 0; i < num_syncs; i++)
1665 xe_sync_entry_signal(&syncs[i], NULL, cf ? &cf->base : fence);
1667 return cf ? &cf->base : fence;
1672 /* FIXME: Rewind the previous binds? */
1673 dma_fence_put(fences[--cur_fence]);
1678 return ERR_PTR(err);
1681 struct async_op_fence {
1682 struct dma_fence fence;
1683 struct dma_fence *wait_fence;
1684 struct dma_fence_cb cb;
1686 wait_queue_head_t wq;
1690 static const char *async_op_fence_get_driver_name(struct dma_fence *dma_fence)
1696 async_op_fence_get_timeline_name(struct dma_fence *dma_fence)
1698 return "async_op_fence";
1701 static const struct dma_fence_ops async_op_fence_ops = {
1702 .get_driver_name = async_op_fence_get_driver_name,
1703 .get_timeline_name = async_op_fence_get_timeline_name,
1706 static void async_op_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1708 struct async_op_fence *afence =
1709 container_of(cb, struct async_op_fence, cb);
1711 afence->fence.error = afence->wait_fence->error;
1712 dma_fence_signal(&afence->fence);
1713 xe_vm_put(afence->vm);
1714 dma_fence_put(afence->wait_fence);
1715 dma_fence_put(&afence->fence);
1718 static void add_async_op_fence_cb(struct xe_vm *vm,
1719 struct dma_fence *fence,
1720 struct async_op_fence *afence)
1724 if (!xe_vm_no_dma_fences(vm)) {
1725 afence->started = true;
1727 wake_up_all(&afence->wq);
1730 afence->wait_fence = dma_fence_get(fence);
1731 afence->vm = xe_vm_get(vm);
1732 dma_fence_get(&afence->fence);
1733 ret = dma_fence_add_callback(fence, &afence->cb, async_op_fence_cb);
1734 if (ret == -ENOENT) {
1735 afence->fence.error = afence->wait_fence->error;
1736 dma_fence_signal(&afence->fence);
1740 dma_fence_put(afence->wait_fence);
1741 dma_fence_put(&afence->fence);
1743 XE_WARN_ON(ret && ret != -ENOENT);
1746 int xe_vm_async_fence_wait_start(struct dma_fence *fence)
1748 if (fence->ops == &async_op_fence_ops) {
1749 struct async_op_fence *afence =
1750 container_of(fence, struct async_op_fence, fence);
1752 XE_BUG_ON(xe_vm_no_dma_fences(afence->vm));
1755 return wait_event_interruptible(afence->wq, afence->started);
1761 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
1762 struct xe_engine *e, struct xe_sync_entry *syncs,
1763 u32 num_syncs, struct async_op_fence *afence)
1765 struct dma_fence *fence;
1767 xe_vm_assert_held(vm);
1769 fence = xe_vm_bind_vma(vma, e, syncs, num_syncs);
1771 return PTR_ERR(fence);
1773 add_async_op_fence_cb(vm, fence, afence);
1775 dma_fence_put(fence);
1779 static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_engine *e,
1780 struct xe_bo *bo, struct xe_sync_entry *syncs,
1781 u32 num_syncs, struct async_op_fence *afence)
1785 xe_vm_assert_held(vm);
1786 xe_bo_assert_held(bo);
1789 err = xe_bo_validate(bo, vm, true);
1794 return __xe_vm_bind(vm, vma, e, syncs, num_syncs, afence);
1797 static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
1798 struct xe_engine *e, struct xe_sync_entry *syncs,
1799 u32 num_syncs, struct async_op_fence *afence)
1801 struct dma_fence *fence;
1803 xe_vm_assert_held(vm);
1804 xe_bo_assert_held(vma->bo);
1806 fence = xe_vm_unbind_vma(vma, e, syncs, num_syncs);
1808 return PTR_ERR(fence);
1810 add_async_op_fence_cb(vm, fence, afence);
1812 xe_vma_destroy(vma, fence);
1813 dma_fence_put(fence);
1818 static int vm_set_error_capture_address(struct xe_device *xe, struct xe_vm *vm,
1821 if (XE_IOCTL_ERR(xe, !value))
1824 if (XE_IOCTL_ERR(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
1827 if (XE_IOCTL_ERR(xe, vm->async_ops.error_capture.addr))
1830 vm->async_ops.error_capture.mm = current->mm;
1831 vm->async_ops.error_capture.addr = value;
1832 init_waitqueue_head(&vm->async_ops.error_capture.wq);
1837 typedef int (*xe_vm_set_property_fn)(struct xe_device *xe, struct xe_vm *vm,
1840 static const xe_vm_set_property_fn vm_set_property_funcs[] = {
1841 [XE_VM_PROPERTY_BIND_OP_ERROR_CAPTURE_ADDRESS] =
1842 vm_set_error_capture_address,
1845 static int vm_user_ext_set_property(struct xe_device *xe, struct xe_vm *vm,
1848 u64 __user *address = u64_to_user_ptr(extension);
1849 struct drm_xe_ext_vm_set_property ext;
1852 err = __copy_from_user(&ext, address, sizeof(ext));
1853 if (XE_IOCTL_ERR(xe, err))
1856 if (XE_IOCTL_ERR(xe, ext.property >=
1857 ARRAY_SIZE(vm_set_property_funcs)))
1860 return vm_set_property_funcs[ext.property](xe, vm, ext.value);
1863 typedef int (*xe_vm_user_extension_fn)(struct xe_device *xe, struct xe_vm *vm,
1866 static const xe_vm_set_property_fn vm_user_extension_funcs[] = {
1867 [XE_VM_EXTENSION_SET_PROPERTY] = vm_user_ext_set_property,
1870 #define MAX_USER_EXTENSIONS 16
1871 static int vm_user_extensions(struct xe_device *xe, struct xe_vm *vm,
1872 u64 extensions, int ext_number)
1874 u64 __user *address = u64_to_user_ptr(extensions);
1875 struct xe_user_extension ext;
1878 if (XE_IOCTL_ERR(xe, ext_number >= MAX_USER_EXTENSIONS))
1881 err = __copy_from_user(&ext, address, sizeof(ext));
1882 if (XE_IOCTL_ERR(xe, err))
1885 if (XE_IOCTL_ERR(xe, ext.name >=
1886 ARRAY_SIZE(vm_user_extension_funcs)))
1889 err = vm_user_extension_funcs[ext.name](xe, vm, extensions);
1890 if (XE_IOCTL_ERR(xe, err))
1893 if (ext.next_extension)
1894 return vm_user_extensions(xe, vm, ext.next_extension,
1900 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_SCRATCH_PAGE | \
1901 DRM_XE_VM_CREATE_COMPUTE_MODE | \
1902 DRM_XE_VM_CREATE_ASYNC_BIND_OPS | \
1903 DRM_XE_VM_CREATE_FAULT_MODE)
1905 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1906 struct drm_file *file)
1908 struct xe_device *xe = to_xe_device(dev);
1909 struct xe_file *xef = to_xe_file(file);
1910 struct drm_xe_vm_create *args = data;
1916 if (XE_IOCTL_ERR(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1919 if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE &&
1920 args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
1923 if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE &&
1924 args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
1927 if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
1928 xe_device_in_non_fault_mode(xe)))
1931 if (XE_IOCTL_ERR(xe, !(args->flags & DRM_XE_VM_CREATE_FAULT_MODE) &&
1932 xe_device_in_fault_mode(xe)))
1935 if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
1936 !xe->info.supports_usm))
1939 if (args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE)
1940 flags |= XE_VM_FLAG_SCRATCH_PAGE;
1941 if (args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE)
1942 flags |= XE_VM_FLAG_COMPUTE_MODE;
1943 if (args->flags & DRM_XE_VM_CREATE_ASYNC_BIND_OPS)
1944 flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
1945 if (args->flags & DRM_XE_VM_CREATE_FAULT_MODE)
1946 flags |= XE_VM_FLAG_FAULT_MODE;
1948 vm = xe_vm_create(xe, flags);
1952 if (args->extensions) {
1953 err = vm_user_extensions(xe, vm, args->extensions, 0);
1954 if (XE_IOCTL_ERR(xe, err)) {
1955 xe_vm_close_and_put(vm);
1960 mutex_lock(&xef->vm.lock);
1961 err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1962 mutex_unlock(&xef->vm.lock);
1964 xe_vm_close_and_put(vm);
1968 if (xe->info.has_asid) {
1969 mutex_lock(&xe->usm.lock);
1970 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
1971 XA_LIMIT(0, XE_MAX_ASID - 1),
1972 &xe->usm.next_asid, GFP_KERNEL);
1973 mutex_unlock(&xe->usm.lock);
1975 xe_vm_close_and_put(vm);
1978 vm->usm.asid = asid;
1983 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
1984 /* Warning: Security issue - never enable by default */
1985 args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
1991 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
1992 struct drm_file *file)
1994 struct xe_device *xe = to_xe_device(dev);
1995 struct xe_file *xef = to_xe_file(file);
1996 struct drm_xe_vm_destroy *args = data;
1999 if (XE_IOCTL_ERR(xe, args->pad))
2002 vm = xe_vm_lookup(xef, args->vm_id);
2003 if (XE_IOCTL_ERR(xe, !vm))
2007 /* FIXME: Extend this check to non-compute mode VMs */
2008 if (XE_IOCTL_ERR(xe, vm->preempt.num_engines))
2011 mutex_lock(&xef->vm.lock);
2012 xa_erase(&xef->vm.xa, args->vm_id);
2013 mutex_unlock(&xef->vm.lock);
2015 xe_vm_close_and_put(vm);
2020 static const u32 region_to_mem_type[] = {
2026 static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
2027 struct xe_engine *e, u32 region,
2028 struct xe_sync_entry *syncs, u32 num_syncs,
2029 struct async_op_fence *afence)
2033 XE_BUG_ON(region > ARRAY_SIZE(region_to_mem_type));
2035 if (!xe_vma_is_userptr(vma)) {
2036 err = xe_bo_migrate(vma->bo, region_to_mem_type[region]);
2041 if (vma->gt_mask != (vma->gt_present & ~vma->usm.gt_invalidated)) {
2042 return xe_vm_bind(vm, vma, e, vma->bo, syncs, num_syncs,
2047 /* Nothing to do, signal fences now */
2048 for (i = 0; i < num_syncs; i++)
2049 xe_sync_entry_signal(&syncs[i], NULL,
2050 dma_fence_get_stub());
2052 dma_fence_signal(&afence->fence);
2057 #define VM_BIND_OP(op) (op & 0xffff)
2059 static int __vm_bind_ioctl(struct xe_vm *vm, struct xe_vma *vma,
2060 struct xe_engine *e, struct xe_bo *bo, u32 op,
2061 u32 region, struct xe_sync_entry *syncs,
2062 u32 num_syncs, struct async_op_fence *afence)
2064 switch (VM_BIND_OP(op)) {
2065 case XE_VM_BIND_OP_MAP:
2066 return xe_vm_bind(vm, vma, e, bo, syncs, num_syncs, afence);
2067 case XE_VM_BIND_OP_UNMAP:
2068 case XE_VM_BIND_OP_UNMAP_ALL:
2069 return xe_vm_unbind(vm, vma, e, syncs, num_syncs, afence);
2070 case XE_VM_BIND_OP_MAP_USERPTR:
2071 return xe_vm_bind(vm, vma, e, NULL, syncs, num_syncs, afence);
2072 case XE_VM_BIND_OP_PREFETCH:
2073 return xe_vm_prefetch(vm, vma, e, region, syncs, num_syncs,
2077 XE_BUG_ON("NOT POSSIBLE");
2082 struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm)
2084 int idx = vm->flags & XE_VM_FLAG_MIGRATION ?
2085 XE_VM_FLAG_GT_ID(vm->flags) : 0;
2087 /* Safe to use index 0 as all BO in the VM share a single dma-resv lock */
2088 return &vm->pt_root[idx]->bo->ttm;
2091 static void xe_vm_tv_populate(struct xe_vm *vm, struct ttm_validate_buffer *tv)
2094 tv->bo = xe_vm_ttm_bo(vm);
2097 static bool is_map_op(u32 op)
2099 return VM_BIND_OP(op) == XE_VM_BIND_OP_MAP ||
2100 VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR;
2103 static bool is_unmap_op(u32 op)
2105 return VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP ||
2106 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL;
2109 static int vm_bind_ioctl(struct xe_vm *vm, struct xe_vma *vma,
2110 struct xe_engine *e, struct xe_bo *bo,
2111 struct drm_xe_vm_bind_op *bind_op,
2112 struct xe_sync_entry *syncs, u32 num_syncs,
2113 struct async_op_fence *afence)
2117 struct ttm_validate_buffer tv_bo, tv_vm;
2118 struct ww_acquire_ctx ww;
2122 lockdep_assert_held(&vm->lock);
2123 XE_BUG_ON(!list_empty(&vma->unbind_link));
2125 /* Binds deferred to faults, signal fences now */
2126 if (xe_vm_in_fault_mode(vm) && is_map_op(bind_op->op) &&
2127 !(bind_op->op & XE_VM_BIND_FLAG_IMMEDIATE)) {
2128 for (i = 0; i < num_syncs; i++)
2129 xe_sync_entry_signal(&syncs[i], NULL,
2130 dma_fence_get_stub());
2132 dma_fence_signal(&afence->fence);
2136 xe_vm_tv_populate(vm, &tv_vm);
2137 list_add_tail(&tv_vm.head, &objs);
2141 * An unbind can drop the last reference to the BO and
2142 * the BO is needed for ttm_eu_backoff_reservation so
2143 * take a reference here.
2147 tv_bo.bo = &vbo->ttm;
2148 tv_bo.num_shared = 1;
2149 list_add(&tv_bo.head, &objs);
2153 err = ttm_eu_reserve_buffers(&ww, &objs, true, &dups);
2155 err = __vm_bind_ioctl(vm, vma, e, bo,
2156 bind_op->op, bind_op->region, syncs,
2158 ttm_eu_backoff_reservation(&ww, &objs);
2159 if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
2160 lockdep_assert_held_write(&vm->lock);
2161 err = xe_vma_userptr_pin_pages(vma);
2173 struct xe_engine *engine;
2175 struct drm_xe_vm_bind_op bind_op;
2176 struct xe_sync_entry *syncs;
2178 struct list_head link;
2179 struct async_op_fence *fence;
2182 static void async_op_cleanup(struct xe_vm *vm, struct async_op *op)
2184 while (op->num_syncs--)
2185 xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
2189 xe_engine_put(op->engine);
2192 dma_fence_put(&op->fence->fence);
2196 static struct async_op *next_async_op(struct xe_vm *vm)
2198 return list_first_entry_or_null(&vm->async_ops.pending,
2199 struct async_op, link);
2202 static void vm_set_async_error(struct xe_vm *vm, int err)
2204 lockdep_assert_held(&vm->lock);
2205 vm->async_ops.error = err;
2208 static void async_op_work_func(struct work_struct *w)
2210 struct xe_vm *vm = container_of(w, struct xe_vm, async_ops.work);
2213 struct async_op *op;
2216 if (vm->async_ops.error && !xe_vm_is_closed(vm))
2219 spin_lock_irq(&vm->async_ops.lock);
2220 op = next_async_op(vm);
2222 list_del_init(&op->link);
2223 spin_unlock_irq(&vm->async_ops.lock);
2228 if (!xe_vm_is_closed(vm)) {
2231 down_write(&vm->lock);
2233 first = op->vma->first_munmap_rebind;
2234 last = op->vma->last_munmap_rebind;
2235 #ifdef TEST_VM_ASYNC_OPS_ERROR
2236 #define FORCE_ASYNC_OP_ERROR BIT(31)
2237 if (!(op->bind_op.op & FORCE_ASYNC_OP_ERROR)) {
2238 err = vm_bind_ioctl(vm, op->vma, op->engine,
2239 op->bo, &op->bind_op,
2240 op->syncs, op->num_syncs,
2244 op->bind_op.op &= ~FORCE_ASYNC_OP_ERROR;
2247 err = vm_bind_ioctl(vm, op->vma, op->engine, op->bo,
2248 &op->bind_op, op->syncs,
2249 op->num_syncs, op->fence);
2252 * In order for the fencing to work (stall behind
2253 * existing jobs / prevent new jobs from running) all
2254 * the dma-resv slots need to be programmed in a batch
2255 * relative to execs / the rebind worker. The vm->lock
2258 if (!err && ((first && VM_BIND_OP(op->bind_op.op) ==
2259 XE_VM_BIND_OP_UNMAP) ||
2260 vm->async_ops.munmap_rebind_inflight)) {
2262 op->vma->last_munmap_rebind = false;
2263 vm->async_ops.munmap_rebind_inflight =
2266 vm->async_ops.munmap_rebind_inflight =
2269 async_op_cleanup(vm, op);
2271 spin_lock_irq(&vm->async_ops.lock);
2272 op = next_async_op(vm);
2274 list_del_init(&op->link);
2275 spin_unlock_irq(&vm->async_ops.lock);
2281 trace_xe_vma_fail(op->vma);
2282 drm_warn(&vm->xe->drm, "Async VM op(%d) failed with %d",
2283 VM_BIND_OP(op->bind_op.op),
2286 spin_lock_irq(&vm->async_ops.lock);
2287 list_add(&op->link, &vm->async_ops.pending);
2288 spin_unlock_irq(&vm->async_ops.lock);
2290 vm_set_async_error(vm, err);
2291 up_write(&vm->lock);
2293 if (vm->async_ops.error_capture.addr)
2294 vm_error_capture(vm, err,
2300 up_write(&vm->lock);
2302 trace_xe_vma_flush(op->vma);
2304 if (is_unmap_op(op->bind_op.op)) {
2305 down_write(&vm->lock);
2306 xe_vma_destroy_unlocked(op->vma);
2307 up_write(&vm->lock);
2310 if (op->fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
2311 &op->fence->fence.flags)) {
2312 if (!xe_vm_no_dma_fences(vm)) {
2313 op->fence->started = true;
2315 wake_up_all(&op->fence->wq);
2317 dma_fence_signal(&op->fence->fence);
2321 async_op_cleanup(vm, op);
2325 static int __vm_bind_ioctl_async(struct xe_vm *vm, struct xe_vma *vma,
2326 struct xe_engine *e, struct xe_bo *bo,
2327 struct drm_xe_vm_bind_op *bind_op,
2328 struct xe_sync_entry *syncs, u32 num_syncs)
2330 struct async_op *op;
2331 bool installed = false;
2335 lockdep_assert_held(&vm->lock);
2337 op = kmalloc(sizeof(*op), GFP_KERNEL);
2343 op->fence = kmalloc(sizeof(*op->fence), GFP_KERNEL);
2349 seqno = e ? ++e->bind.fence_seqno : ++vm->async_ops.fence.seqno;
2350 dma_fence_init(&op->fence->fence, &async_op_fence_ops,
2351 &vm->async_ops.lock, e ? e->bind.fence_ctx :
2352 vm->async_ops.fence.context, seqno);
2354 if (!xe_vm_no_dma_fences(vm)) {
2356 op->fence->started = false;
2357 init_waitqueue_head(&op->fence->wq);
2365 op->bind_op = *bind_op;
2367 op->num_syncs = num_syncs;
2368 INIT_LIST_HEAD(&op->link);
2370 for (i = 0; i < num_syncs; i++)
2371 installed |= xe_sync_entry_signal(&syncs[i], NULL,
2374 if (!installed && op->fence)
2375 dma_fence_signal(&op->fence->fence);
2377 spin_lock_irq(&vm->async_ops.lock);
2378 list_add_tail(&op->link, &vm->async_ops.pending);
2379 spin_unlock_irq(&vm->async_ops.lock);
2381 if (!vm->async_ops.error)
2382 queue_work(system_unbound_wq, &vm->async_ops.work);
2387 static int vm_bind_ioctl_async(struct xe_vm *vm, struct xe_vma *vma,
2388 struct xe_engine *e, struct xe_bo *bo,
2389 struct drm_xe_vm_bind_op *bind_op,
2390 struct xe_sync_entry *syncs, u32 num_syncs)
2392 struct xe_vma *__vma, *next;
2393 struct list_head rebind_list;
2394 struct xe_sync_entry *in_syncs = NULL, *out_syncs = NULL;
2395 u32 num_in_syncs = 0, num_out_syncs = 0;
2396 bool first = true, last;
2400 lockdep_assert_held(&vm->lock);
2402 /* Not a linked list of unbinds + rebinds, easy */
2403 if (list_empty(&vma->unbind_link))
2404 return __vm_bind_ioctl_async(vm, vma, e, bo, bind_op,
2408 * Linked list of unbinds + rebinds, decompose syncs into 'in / out'
2409 * passing the 'in' to the first operation and 'out' to the last. Also
2410 * the reference counting is a little tricky, increment the VM / bind
2411 * engine ref count on all but the last operation and increment the BOs
2412 * ref count on each rebind.
2415 XE_BUG_ON(VM_BIND_OP(bind_op->op) != XE_VM_BIND_OP_UNMAP &&
2416 VM_BIND_OP(bind_op->op) != XE_VM_BIND_OP_UNMAP_ALL &&
2417 VM_BIND_OP(bind_op->op) != XE_VM_BIND_OP_PREFETCH);
2419 /* Decompose syncs */
2421 in_syncs = kmalloc(sizeof(*in_syncs) * num_syncs, GFP_KERNEL);
2422 out_syncs = kmalloc(sizeof(*out_syncs) * num_syncs, GFP_KERNEL);
2423 if (!in_syncs || !out_syncs) {
2428 for (i = 0; i < num_syncs; ++i) {
2429 bool signal = syncs[i].flags & DRM_XE_SYNC_SIGNAL;
2432 out_syncs[num_out_syncs++] = syncs[i];
2434 in_syncs[num_in_syncs++] = syncs[i];
2438 /* Do unbinds + move rebinds to new list */
2439 INIT_LIST_HEAD(&rebind_list);
2440 list_for_each_entry_safe(__vma, next, &vma->unbind_link, unbind_link) {
2441 if (__vma->destroyed ||
2442 VM_BIND_OP(bind_op->op) == XE_VM_BIND_OP_PREFETCH) {
2443 list_del_init(&__vma->unbind_link);
2445 err = __vm_bind_ioctl_async(xe_vm_get(vm), __vma,
2446 e ? xe_engine_get(e) : NULL,
2447 bo, bind_op, first ?
2449 first ? num_in_syncs : 0);
2460 list_move_tail(&__vma->unbind_link, &rebind_list);
2463 last = list_empty(&rebind_list);
2469 err = __vm_bind_ioctl_async(vm, vma, e,
2472 last ? out_syncs : NULL,
2473 first ? num_in_syncs :
2474 last ? num_out_syncs : 0);
2486 list_for_each_entry_safe(__vma, next, &rebind_list, unbind_link) {
2487 list_del_init(&__vma->unbind_link);
2488 last = list_empty(&rebind_list);
2490 if (xe_vma_is_userptr(__vma)) {
2491 bind_op->op = XE_VM_BIND_FLAG_ASYNC |
2492 XE_VM_BIND_OP_MAP_USERPTR;
2494 bind_op->op = XE_VM_BIND_FLAG_ASYNC |
2496 xe_bo_get(__vma->bo);
2505 err = __vm_bind_ioctl_async(vm, __vma, e,
2506 __vma->bo, bind_op, last ?
2508 last ? num_out_syncs : 0);
2530 static int __vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo,
2531 u64 addr, u64 range, u32 op)
2533 struct xe_device *xe = vm->xe;
2534 struct xe_vma *vma, lookup;
2535 bool async = !!(op & XE_VM_BIND_FLAG_ASYNC);
2537 lockdep_assert_held(&vm->lock);
2539 lookup.start = addr;
2540 lookup.end = addr + range - 1;
2542 switch (VM_BIND_OP(op)) {
2543 case XE_VM_BIND_OP_MAP:
2544 case XE_VM_BIND_OP_MAP_USERPTR:
2545 vma = xe_vm_find_overlapping_vma(vm, &lookup);
2546 if (XE_IOCTL_ERR(xe, vma))
2549 case XE_VM_BIND_OP_UNMAP:
2550 case XE_VM_BIND_OP_PREFETCH:
2551 vma = xe_vm_find_overlapping_vma(vm, &lookup);
2552 if (XE_IOCTL_ERR(xe, !vma) ||
2553 XE_IOCTL_ERR(xe, (vma->start != addr ||
2554 vma->end != addr + range - 1) && !async))
2557 case XE_VM_BIND_OP_UNMAP_ALL:
2560 XE_BUG_ON("NOT POSSIBLE");
2567 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma)
2569 down_read(&vm->userptr.notifier_lock);
2570 vma->destroyed = true;
2571 up_read(&vm->userptr.notifier_lock);
2572 xe_vm_remove_vma(vm, vma);
2575 static int prep_replacement_vma(struct xe_vm *vm, struct xe_vma *vma)
2579 if (vma->bo && !vma->bo->vm) {
2580 vm_insert_extobj(vm, vma);
2581 err = add_preempt_fences(vm, vma->bo);
2590 * Find all overlapping VMAs in lookup range and add to a list in the returned
2591 * VMA, all of VMAs found will be unbound. Also possibly add 2 new VMAs that
2592 * need to be bound if first / last VMAs are not fully unbound. This is akin to
2595 static struct xe_vma *vm_unbind_lookup_vmas(struct xe_vm *vm,
2596 struct xe_vma *lookup)
2598 struct xe_vma *vma = xe_vm_find_overlapping_vma(vm, lookup);
2599 struct rb_node *node;
2600 struct xe_vma *first = vma, *last = vma, *new_first = NULL,
2601 *new_last = NULL, *__vma, *next;
2603 bool first_munmap_rebind = false;
2605 lockdep_assert_held(&vm->lock);
2608 node = &vma->vm_node;
2609 while ((node = rb_next(node))) {
2610 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2611 __vma = to_xe_vma(node);
2612 list_add_tail(&__vma->unbind_link, &vma->unbind_link);
2619 node = &vma->vm_node;
2620 while ((node = rb_prev(node))) {
2621 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2622 __vma = to_xe_vma(node);
2623 list_add(&__vma->unbind_link, &vma->unbind_link);
2630 if (first->start != lookup->start) {
2631 struct ww_acquire_ctx ww;
2634 err = xe_bo_lock(first->bo, &ww, 0, true);
2637 new_first = xe_vma_create(first->vm, first->bo,
2638 first->bo ? first->bo_offset :
2642 (first->pte_flags & XE_PTE_READ_ONLY),
2645 xe_bo_unlock(first->bo, &ww);
2651 err = xe_vma_userptr_pin_pages(new_first);
2655 err = prep_replacement_vma(vm, new_first);
2660 if (last->end != lookup->end) {
2661 struct ww_acquire_ctx ww;
2662 u64 chunk = lookup->end + 1 - last->start;
2665 err = xe_bo_lock(last->bo, &ww, 0, true);
2668 new_last = xe_vma_create(last->vm, last->bo,
2669 last->bo ? last->bo_offset + chunk :
2670 last->userptr.ptr + chunk,
2671 last->start + chunk,
2673 (last->pte_flags & XE_PTE_READ_ONLY),
2676 xe_bo_unlock(last->bo, &ww);
2682 err = xe_vma_userptr_pin_pages(new_last);
2686 err = prep_replacement_vma(vm, new_last);
2691 prep_vma_destroy(vm, vma);
2692 if (list_empty(&vma->unbind_link) && (new_first || new_last))
2693 vma->first_munmap_rebind = true;
2694 list_for_each_entry(__vma, &vma->unbind_link, unbind_link) {
2695 if ((new_first || new_last) && !first_munmap_rebind) {
2696 __vma->first_munmap_rebind = true;
2697 first_munmap_rebind = true;
2699 prep_vma_destroy(vm, __vma);
2702 xe_vm_insert_vma(vm, new_first);
2703 list_add_tail(&new_first->unbind_link, &vma->unbind_link);
2705 new_first->last_munmap_rebind = true;
2708 xe_vm_insert_vma(vm, new_last);
2709 list_add_tail(&new_last->unbind_link, &vma->unbind_link);
2710 new_last->last_munmap_rebind = true;
2716 list_for_each_entry_safe(__vma, next, &vma->unbind_link, unbind_link)
2717 list_del_init(&__vma->unbind_link);
2719 prep_vma_destroy(vm, new_last);
2720 xe_vma_destroy_unlocked(new_last);
2723 prep_vma_destroy(vm, new_first);
2724 xe_vma_destroy_unlocked(new_first);
2727 return ERR_PTR(err);
2731 * Similar to vm_unbind_lookup_vmas, find all VMAs in lookup range to prefetch
2733 static struct xe_vma *vm_prefetch_lookup_vmas(struct xe_vm *vm,
2734 struct xe_vma *lookup,
2737 struct xe_vma *vma = xe_vm_find_overlapping_vma(vm, lookup), *__vma,
2739 struct rb_node *node;
2741 if (!xe_vma_is_userptr(vma)) {
2742 if (!xe_bo_can_migrate(vma->bo, region_to_mem_type[region]))
2743 return ERR_PTR(-EINVAL);
2746 node = &vma->vm_node;
2747 while ((node = rb_next(node))) {
2748 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2749 __vma = to_xe_vma(node);
2750 if (!xe_vma_is_userptr(__vma)) {
2751 if (!xe_bo_can_migrate(__vma->bo, region_to_mem_type[region]))
2754 list_add_tail(&__vma->unbind_link, &vma->unbind_link);
2760 node = &vma->vm_node;
2761 while ((node = rb_prev(node))) {
2762 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2763 __vma = to_xe_vma(node);
2764 if (!xe_vma_is_userptr(__vma)) {
2765 if (!xe_bo_can_migrate(__vma->bo, region_to_mem_type[region]))
2768 list_add(&__vma->unbind_link, &vma->unbind_link);
2777 list_for_each_entry_safe(__vma, next, &vma->unbind_link,
2779 list_del_init(&__vma->unbind_link);
2781 return ERR_PTR(-EINVAL);
2784 static struct xe_vma *vm_unbind_all_lookup_vmas(struct xe_vm *vm,
2787 struct xe_vma *first = NULL, *vma;
2789 lockdep_assert_held(&vm->lock);
2790 xe_bo_assert_held(bo);
2792 list_for_each_entry(vma, &bo->vmas, bo_link) {
2796 prep_vma_destroy(vm, vma);
2800 list_add_tail(&vma->unbind_link, &first->unbind_link);
2806 static struct xe_vma *vm_bind_ioctl_lookup_vma(struct xe_vm *vm,
2808 u64 bo_offset_or_userptr,
2809 u64 addr, u64 range, u32 op,
2810 u64 gt_mask, u32 region)
2812 struct ww_acquire_ctx ww;
2813 struct xe_vma *vma, lookup;
2816 lockdep_assert_held(&vm->lock);
2818 lookup.start = addr;
2819 lookup.end = addr + range - 1;
2821 switch (VM_BIND_OP(op)) {
2822 case XE_VM_BIND_OP_MAP:
2825 err = xe_bo_lock(bo, &ww, 0, true);
2827 return ERR_PTR(err);
2828 vma = xe_vma_create(vm, bo, bo_offset_or_userptr, addr,
2830 op & XE_VM_BIND_FLAG_READONLY,
2832 xe_bo_unlock(bo, &ww);
2834 return ERR_PTR(-ENOMEM);
2836 xe_vm_insert_vma(vm, vma);
2838 vm_insert_extobj(vm, vma);
2839 err = add_preempt_fences(vm, bo);
2841 prep_vma_destroy(vm, vma);
2842 xe_vma_destroy_unlocked(vma);
2844 return ERR_PTR(err);
2848 case XE_VM_BIND_OP_UNMAP:
2849 vma = vm_unbind_lookup_vmas(vm, &lookup);
2851 case XE_VM_BIND_OP_PREFETCH:
2852 vma = vm_prefetch_lookup_vmas(vm, &lookup, region);
2854 case XE_VM_BIND_OP_UNMAP_ALL:
2857 err = xe_bo_lock(bo, &ww, 0, true);
2859 return ERR_PTR(err);
2860 vma = vm_unbind_all_lookup_vmas(vm, bo);
2862 vma = ERR_PTR(-EINVAL);
2863 xe_bo_unlock(bo, &ww);
2865 case XE_VM_BIND_OP_MAP_USERPTR:
2868 vma = xe_vma_create(vm, NULL, bo_offset_or_userptr, addr,
2870 op & XE_VM_BIND_FLAG_READONLY,
2873 return ERR_PTR(-ENOMEM);
2875 err = xe_vma_userptr_pin_pages(vma);
2877 prep_vma_destroy(vm, vma);
2878 xe_vma_destroy_unlocked(vma);
2880 return ERR_PTR(err);
2882 xe_vm_insert_vma(vm, vma);
2886 XE_BUG_ON("NOT POSSIBLE");
2887 vma = ERR_PTR(-EINVAL);
2893 #ifdef TEST_VM_ASYNC_OPS_ERROR
2894 #define SUPPORTED_FLAGS \
2895 (FORCE_ASYNC_OP_ERROR | XE_VM_BIND_FLAG_ASYNC | \
2896 XE_VM_BIND_FLAG_READONLY | XE_VM_BIND_FLAG_IMMEDIATE | 0xffff)
2898 #define SUPPORTED_FLAGS \
2899 (XE_VM_BIND_FLAG_ASYNC | XE_VM_BIND_FLAG_READONLY | \
2900 XE_VM_BIND_FLAG_IMMEDIATE | 0xffff)
2902 #define XE_64K_PAGE_MASK 0xffffull
2904 #define MAX_BINDS 512 /* FIXME: Picking random upper limit */
2906 static int vm_bind_ioctl_check_args(struct xe_device *xe,
2907 struct drm_xe_vm_bind *args,
2908 struct drm_xe_vm_bind_op **bind_ops,
2914 if (XE_IOCTL_ERR(xe, args->extensions) ||
2915 XE_IOCTL_ERR(xe, !args->num_binds) ||
2916 XE_IOCTL_ERR(xe, args->num_binds > MAX_BINDS))
2919 if (args->num_binds > 1) {
2920 u64 __user *bind_user =
2921 u64_to_user_ptr(args->vector_of_binds);
2923 *bind_ops = kmalloc(sizeof(struct drm_xe_vm_bind_op) *
2924 args->num_binds, GFP_KERNEL);
2928 err = __copy_from_user(*bind_ops, bind_user,
2929 sizeof(struct drm_xe_vm_bind_op) *
2931 if (XE_IOCTL_ERR(xe, err)) {
2936 *bind_ops = &args->bind;
2939 for (i = 0; i < args->num_binds; ++i) {
2940 u64 range = (*bind_ops)[i].range;
2941 u64 addr = (*bind_ops)[i].addr;
2942 u32 op = (*bind_ops)[i].op;
2943 u32 obj = (*bind_ops)[i].obj;
2944 u64 obj_offset = (*bind_ops)[i].obj_offset;
2945 u32 region = (*bind_ops)[i].region;
2948 *async = !!(op & XE_VM_BIND_FLAG_ASYNC);
2949 } else if (XE_IOCTL_ERR(xe, !*async) ||
2950 XE_IOCTL_ERR(xe, !(op & XE_VM_BIND_FLAG_ASYNC)) ||
2951 XE_IOCTL_ERR(xe, VM_BIND_OP(op) ==
2952 XE_VM_BIND_OP_RESTART)) {
2957 if (XE_IOCTL_ERR(xe, !*async &&
2958 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL)) {
2963 if (XE_IOCTL_ERR(xe, !*async &&
2964 VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH)) {
2969 if (XE_IOCTL_ERR(xe, VM_BIND_OP(op) >
2970 XE_VM_BIND_OP_PREFETCH) ||
2971 XE_IOCTL_ERR(xe, op & ~SUPPORTED_FLAGS) ||
2972 XE_IOCTL_ERR(xe, !obj &&
2973 VM_BIND_OP(op) == XE_VM_BIND_OP_MAP) ||
2974 XE_IOCTL_ERR(xe, !obj &&
2975 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
2976 XE_IOCTL_ERR(xe, addr &&
2977 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
2978 XE_IOCTL_ERR(xe, range &&
2979 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
2980 XE_IOCTL_ERR(xe, obj &&
2981 VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR) ||
2982 XE_IOCTL_ERR(xe, obj &&
2983 VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH) ||
2984 XE_IOCTL_ERR(xe, region &&
2985 VM_BIND_OP(op) != XE_VM_BIND_OP_PREFETCH) ||
2986 XE_IOCTL_ERR(xe, !(BIT(region) &
2987 xe->info.mem_region_mask)) ||
2988 XE_IOCTL_ERR(xe, obj &&
2989 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP)) {
2994 if (XE_IOCTL_ERR(xe, obj_offset & ~PAGE_MASK) ||
2995 XE_IOCTL_ERR(xe, addr & ~PAGE_MASK) ||
2996 XE_IOCTL_ERR(xe, range & ~PAGE_MASK) ||
2997 XE_IOCTL_ERR(xe, !range && VM_BIND_OP(op) !=
2998 XE_VM_BIND_OP_RESTART &&
2999 VM_BIND_OP(op) != XE_VM_BIND_OP_UNMAP_ALL)) {
3008 if (args->num_binds > 1)
3013 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3015 struct xe_device *xe = to_xe_device(dev);
3016 struct xe_file *xef = to_xe_file(file);
3017 struct drm_xe_vm_bind *args = data;
3018 struct drm_xe_sync __user *syncs_user;
3019 struct xe_bo **bos = NULL;
3020 struct xe_vma **vmas = NULL;
3022 struct xe_engine *e = NULL;
3024 struct xe_sync_entry *syncs = NULL;
3025 struct drm_xe_vm_bind_op *bind_ops;
3030 err = vm_bind_ioctl_check_args(xe, args, &bind_ops, &async);
3034 vm = xe_vm_lookup(xef, args->vm_id);
3035 if (XE_IOCTL_ERR(xe, !vm)) {
3040 if (XE_IOCTL_ERR(xe, xe_vm_is_closed(vm))) {
3041 DRM_ERROR("VM closed while we began looking up?\n");
3046 if (args->engine_id) {
3047 e = xe_engine_lookup(xef, args->engine_id);
3048 if (XE_IOCTL_ERR(xe, !e)) {
3052 if (XE_IOCTL_ERR(xe, !(e->flags & ENGINE_FLAG_VM))) {
3058 if (VM_BIND_OP(bind_ops[0].op) == XE_VM_BIND_OP_RESTART) {
3059 if (XE_IOCTL_ERR(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
3061 if (XE_IOCTL_ERR(xe, !err && args->num_syncs))
3063 if (XE_IOCTL_ERR(xe, !err && !vm->async_ops.error))
3067 down_write(&vm->lock);
3068 trace_xe_vm_restart(vm);
3069 vm_set_async_error(vm, 0);
3070 up_write(&vm->lock);
3072 queue_work(system_unbound_wq, &vm->async_ops.work);
3074 /* Rebinds may have been blocked, give worker a kick */
3075 if (xe_vm_in_compute_mode(vm))
3076 queue_work(vm->xe->ordered_wq,
3077 &vm->preempt.rebind_work);
3083 if (XE_IOCTL_ERR(xe, !vm->async_ops.error &&
3084 async != !!(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS))) {
3089 for (i = 0; i < args->num_binds; ++i) {
3090 u64 range = bind_ops[i].range;
3091 u64 addr = bind_ops[i].addr;
3093 if (XE_IOCTL_ERR(xe, range > vm->size) ||
3094 XE_IOCTL_ERR(xe, addr > vm->size - range)) {
3099 if (bind_ops[i].gt_mask) {
3100 u64 valid_gts = BIT(xe->info.tile_count) - 1;
3102 if (XE_IOCTL_ERR(xe, bind_ops[i].gt_mask &
3110 bos = kzalloc(sizeof(*bos) * args->num_binds, GFP_KERNEL);
3116 vmas = kzalloc(sizeof(*vmas) * args->num_binds, GFP_KERNEL);
3122 for (i = 0; i < args->num_binds; ++i) {
3123 struct drm_gem_object *gem_obj;
3124 u64 range = bind_ops[i].range;
3125 u64 addr = bind_ops[i].addr;
3126 u32 obj = bind_ops[i].obj;
3127 u64 obj_offset = bind_ops[i].obj_offset;
3132 gem_obj = drm_gem_object_lookup(file, obj);
3133 if (XE_IOCTL_ERR(xe, !gem_obj)) {
3137 bos[i] = gem_to_xe_bo(gem_obj);
3139 if (XE_IOCTL_ERR(xe, range > bos[i]->size) ||
3140 XE_IOCTL_ERR(xe, obj_offset >
3141 bos[i]->size - range)) {
3146 if (bos[i]->flags & XE_BO_INTERNAL_64K) {
3147 if (XE_IOCTL_ERR(xe, obj_offset &
3148 XE_64K_PAGE_MASK) ||
3149 XE_IOCTL_ERR(xe, addr & XE_64K_PAGE_MASK) ||
3150 XE_IOCTL_ERR(xe, range & XE_64K_PAGE_MASK)) {
3157 if (args->num_syncs) {
3158 syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3165 syncs_user = u64_to_user_ptr(args->syncs);
3166 for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3167 err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3168 &syncs_user[num_syncs], false,
3169 xe_vm_no_dma_fences(vm));
3174 err = down_write_killable(&vm->lock);
3178 /* Do some error checking first to make the unwind easier */
3179 for (i = 0; i < args->num_binds; ++i) {
3180 u64 range = bind_ops[i].range;
3181 u64 addr = bind_ops[i].addr;
3182 u32 op = bind_ops[i].op;
3184 err = __vm_bind_ioctl_lookup_vma(vm, bos[i], addr, range, op);
3186 goto release_vm_lock;
3189 for (i = 0; i < args->num_binds; ++i) {
3190 u64 range = bind_ops[i].range;
3191 u64 addr = bind_ops[i].addr;
3192 u32 op = bind_ops[i].op;
3193 u64 obj_offset = bind_ops[i].obj_offset;
3194 u64 gt_mask = bind_ops[i].gt_mask;
3195 u32 region = bind_ops[i].region;
3197 vmas[i] = vm_bind_ioctl_lookup_vma(vm, bos[i], obj_offset,
3198 addr, range, op, gt_mask,
3200 if (IS_ERR(vmas[i])) {
3201 err = PTR_ERR(vmas[i]);
3207 for (j = 0; j < args->num_binds; ++j) {
3208 struct xe_sync_entry *__syncs;
3209 u32 __num_syncs = 0;
3210 bool first_or_last = j == 0 || j == args->num_binds - 1;
3212 if (args->num_binds == 1) {
3213 __num_syncs = num_syncs;
3215 } else if (first_or_last && num_syncs) {
3216 bool first = j == 0;
3218 __syncs = kmalloc(sizeof(*__syncs) * num_syncs,
3225 /* in-syncs on first bind, out-syncs on last bind */
3226 for (i = 0; i < num_syncs; ++i) {
3227 bool signal = syncs[i].flags &
3230 if ((first && !signal) || (!first && signal))
3231 __syncs[__num_syncs++] = syncs[i];
3239 bool last = j == args->num_binds - 1;
3242 * Each pass of async worker drops the ref, take a ref
3243 * here, 1 set of refs taken above
3251 err = vm_bind_ioctl_async(vm, vmas[j], e, bos[j],
3252 bind_ops + j, __syncs,
3262 XE_BUG_ON(j != 0); /* Not supported */
3263 err = vm_bind_ioctl(vm, vmas[j], e, bos[j],
3264 bind_ops + j, __syncs,
3266 break; /* Needed so cleanup loops work */
3270 /* Most of cleanup owned by the async bind worker */
3271 if (async && !err) {
3272 up_write(&vm->lock);
3273 if (args->num_binds > 1)
3279 for (i = j; err && i < args->num_binds; ++i) {
3280 u32 op = bind_ops[i].op;
3281 struct xe_vma *vma, *next;
3286 list_for_each_entry_safe(vma, next, &vma->unbind_link,
3288 list_del_init(&vma->unbind_link);
3289 if (!vma->destroyed) {
3290 prep_vma_destroy(vm, vma);
3291 xe_vma_destroy_unlocked(vma);
3295 switch (VM_BIND_OP(op)) {
3296 case XE_VM_BIND_OP_MAP:
3297 prep_vma_destroy(vm, vmas[i]);
3298 xe_vma_destroy_unlocked(vmas[i]);
3300 case XE_VM_BIND_OP_MAP_USERPTR:
3301 prep_vma_destroy(vm, vmas[i]);
3302 xe_vma_destroy_unlocked(vmas[i]);
3307 up_write(&vm->lock);
3309 while (num_syncs--) {
3311 !(syncs[num_syncs].flags & DRM_XE_SYNC_SIGNAL))
3312 continue; /* Still in async worker */
3313 xe_sync_entry_cleanup(&syncs[num_syncs]);
3318 for (i = j; i < args->num_binds; ++i)
3328 if (args->num_binds > 1)
3334 * XXX: Using the TTM wrappers for now, likely can call into dma-resv code
3335 * directly to optimize. Also this likely should be an inline function.
3337 int xe_vm_lock(struct xe_vm *vm, struct ww_acquire_ctx *ww,
3338 int num_resv, bool intr)
3340 struct ttm_validate_buffer tv_vm;
3346 tv_vm.num_shared = num_resv;
3347 tv_vm.bo = xe_vm_ttm_bo(vm);;
3348 list_add_tail(&tv_vm.head, &objs);
3350 return ttm_eu_reserve_buffers(ww, &objs, intr, &dups);
3353 void xe_vm_unlock(struct xe_vm *vm, struct ww_acquire_ctx *ww)
3355 dma_resv_unlock(&vm->resv);
3356 ww_acquire_fini(ww);
3360 * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3361 * @vma: VMA to invalidate
3363 * Walks a list of page tables leaves which it memset the entries owned by this
3364 * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3367 * Returns 0 for success, negative error code otherwise.
3369 int xe_vm_invalidate_vma(struct xe_vma *vma)
3371 struct xe_device *xe = vma->vm->xe;
3373 u32 gt_needs_invalidate = 0;
3374 int seqno[XE_MAX_GT];
3378 XE_BUG_ON(!xe_vm_in_fault_mode(vma->vm));
3379 trace_xe_vma_usm_invalidate(vma);
3381 /* Check that we don't race with page-table updates */
3382 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3383 if (xe_vma_is_userptr(vma)) {
3384 WARN_ON_ONCE(!mmu_interval_check_retry
3385 (&vma->userptr.notifier,
3386 vma->userptr.notifier_seq));
3387 WARN_ON_ONCE(!dma_resv_test_signaled(&vma->vm->resv,
3388 DMA_RESV_USAGE_BOOKKEEP));
3391 xe_bo_assert_held(vma->bo);
3395 for_each_gt(gt, xe, id) {
3396 if (xe_pt_zap_ptes(gt, vma)) {
3397 gt_needs_invalidate |= BIT(id);
3399 seqno[id] = xe_gt_tlb_invalidation_vma(gt, NULL, vma);
3405 for_each_gt(gt, xe, id) {
3406 if (gt_needs_invalidate & BIT(id)) {
3407 ret = xe_gt_tlb_invalidation_wait(gt, seqno[id]);
3413 vma->usm.gt_invalidated = vma->gt_mask;
3418 #if IS_ENABLED(CONFIG_DRM_XE_SIMPLE_ERROR_CAPTURE)
3419 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
3421 struct rb_node *node;
3425 if (!down_read_trylock(&vm->lock)) {
3426 drm_printf(p, " Failed to acquire VM lock to dump capture");
3429 if (vm->pt_root[gt_id]) {
3430 addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE,
3432 drm_printf(p, " VM root: A:0x%llx %s\n", addr, is_vram ? "VRAM" : "SYS");
3435 for (node = rb_first(&vm->vmas); node; node = rb_next(node)) {
3436 struct xe_vma *vma = to_xe_vma(node);
3437 bool is_userptr = xe_vma_is_userptr(vma);
3440 struct xe_res_cursor cur;
3442 xe_res_first_sg(vma->userptr.sg, 0, XE_PAGE_SIZE,
3444 addr = xe_res_dma(&cur);
3446 addr = __xe_bo_addr(vma->bo, 0, XE_PAGE_SIZE, &is_vram);
3448 drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
3449 vma->start, vma->end, vma->end - vma->start + 1ull,
3450 addr, is_userptr ? "USR" : is_vram ? "VRAM" : "SYS");
3457 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)