1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021 Intel Corporation
8 #include <linux/dma-fence-array.h>
10 #include <drm/ttm/ttm_execbuf_util.h>
11 #include <drm/ttm/ttm_tt.h>
12 #include <drm/xe_drm.h>
13 #include <linux/kthread.h>
15 #include <linux/swap.h>
18 #include "xe_device.h"
19 #include "xe_engine.h"
21 #include "xe_gt_pagefault.h"
22 #include "xe_gt_tlb_invalidation.h"
23 #include "xe_migrate.h"
25 #include "xe_preempt_fence.h"
27 #include "xe_res_cursor.h"
31 #define TEST_VM_ASYNC_OPS_ERROR
34 * xe_vma_userptr_check_repin() - Advisory check for repin needed
35 * @vma: The userptr vma
37 * Check if the userptr vma has been invalidated since last successful
38 * repin. The check is advisory only and can the function can be called
39 * without the vm->userptr.notifier_lock held. There is no guarantee that the
40 * vma userptr will remain valid after a lockless check, so typically
41 * the call needs to be followed by a proper check under the notifier_lock.
43 * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
45 int xe_vma_userptr_check_repin(struct xe_vma *vma)
47 return mmu_interval_check_retry(&vma->userptr.notifier,
48 vma->userptr.notifier_seq) ?
52 int xe_vma_userptr_pin_pages(struct xe_vma *vma)
54 struct xe_vm *vm = vma->vm;
55 struct xe_device *xe = vm->xe;
56 const unsigned long num_pages =
57 (vma->end - vma->start + 1) >> PAGE_SHIFT;
59 bool in_kthread = !current->mm;
60 unsigned long notifier_seq;
62 bool read_only = vma->pte_flags & PTE_READ_ONLY;
64 lockdep_assert_held(&vm->lock);
65 XE_BUG_ON(!xe_vma_is_userptr(vma));
70 notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier);
71 if (notifier_seq == vma->userptr.notifier_seq)
74 pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
78 if (vma->userptr.sg) {
79 dma_unmap_sgtable(xe->drm.dev,
81 read_only ? DMA_TO_DEVICE :
82 DMA_BIDIRECTIONAL, 0);
83 sg_free_table(vma->userptr.sg);
84 vma->userptr.sg = NULL;
89 if (!mmget_not_zero(vma->userptr.notifier.mm)) {
93 kthread_use_mm(vma->userptr.notifier.mm);
96 while (pinned < num_pages) {
97 ret = get_user_pages_fast(vma->userptr.ptr + pinned * PAGE_SIZE,
99 read_only ? 0 : FOLL_WRITE,
112 kthread_unuse_mm(vma->userptr.notifier.mm);
113 mmput(vma->userptr.notifier.mm);
119 ret = sg_alloc_table_from_pages(&vma->userptr.sgt, pages, pinned,
120 0, (u64)pinned << PAGE_SHIFT,
123 vma->userptr.sg = NULL;
126 vma->userptr.sg = &vma->userptr.sgt;
128 ret = dma_map_sgtable(xe->drm.dev, vma->userptr.sg,
129 read_only ? DMA_TO_DEVICE :
131 DMA_ATTR_SKIP_CPU_SYNC |
132 DMA_ATTR_NO_KERNEL_MAPPING);
134 sg_free_table(vma->userptr.sg);
135 vma->userptr.sg = NULL;
139 for (i = 0; i < pinned; ++i) {
142 set_page_dirty(pages[i]);
143 unlock_page(pages[i]);
146 mark_page_accessed(pages[i]);
150 release_pages(pages, pinned);
154 vma->userptr.notifier_seq = notifier_seq;
155 if (xe_vma_userptr_check_repin(vma) == -EAGAIN)
159 return ret < 0 ? ret : 0;
162 static bool preempt_fences_waiting(struct xe_vm *vm)
166 lockdep_assert_held(&vm->lock);
167 xe_vm_assert_held(vm);
169 list_for_each_entry(e, &vm->preempt.engines, compute.link) {
170 if (!e->compute.pfence || (e->compute.pfence &&
171 test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
172 &e->compute.pfence->flags))) {
180 static void free_preempt_fences(struct list_head *list)
182 struct list_head *link, *next;
184 list_for_each_safe(link, next, list)
185 xe_preempt_fence_free(to_preempt_fence_from_link(link));
188 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
191 lockdep_assert_held(&vm->lock);
192 xe_vm_assert_held(vm);
194 if (*count >= vm->preempt.num_engines)
197 for (; *count < vm->preempt.num_engines; ++(*count)) {
198 struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
201 return PTR_ERR(pfence);
203 list_move_tail(xe_preempt_fence_link(pfence), list);
209 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
213 xe_vm_assert_held(vm);
215 list_for_each_entry(e, &vm->preempt.engines, compute.link) {
216 if (e->compute.pfence) {
217 long timeout = dma_fence_wait(e->compute.pfence, false);
221 dma_fence_put(e->compute.pfence);
222 e->compute.pfence = NULL;
229 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
231 struct list_head *link;
234 list_for_each_entry(e, &vm->preempt.engines, compute.link) {
235 struct dma_fence *fence;
238 XE_BUG_ON(link == list);
240 fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
241 e, e->compute.context,
243 dma_fence_put(e->compute.pfence);
244 e->compute.pfence = fence;
248 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
251 struct ww_acquire_ctx ww;
254 err = xe_bo_lock(bo, &ww, vm->preempt.num_engines, true);
258 list_for_each_entry(e, &vm->preempt.engines, compute.link)
259 if (e->compute.pfence) {
260 dma_resv_add_fence(bo->ttm.base.resv,
262 DMA_RESV_USAGE_BOOKKEEP);
265 xe_bo_unlock(bo, &ww);
270 * xe_vm_fence_all_extobjs() - Add a fence to vm's external objects' resv
272 * @fence: The fence to add.
273 * @usage: The resv usage for the fence.
275 * Loops over all of the vm's external object bindings and adds a @fence
276 * with the given @usage to all of the external object's reservation
279 void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
280 enum dma_resv_usage usage)
284 list_for_each_entry(vma, &vm->extobj.list, extobj.link)
285 dma_resv_add_fence(vma->bo->ttm.base.resv, fence, usage);
288 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm)
292 lockdep_assert_held(&vm->lock);
293 xe_vm_assert_held(vm);
295 list_for_each_entry(e, &vm->preempt.engines, compute.link) {
298 dma_resv_add_fence(&vm->resv, e->compute.pfence,
299 DMA_RESV_USAGE_BOOKKEEP);
300 xe_vm_fence_all_extobjs(vm, e->compute.pfence,
301 DMA_RESV_USAGE_BOOKKEEP);
305 int xe_vm_add_compute_engine(struct xe_vm *vm, struct xe_engine *e)
307 struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
308 struct ttm_validate_buffer *tv;
309 struct ww_acquire_ctx ww;
310 struct list_head objs;
311 struct dma_fence *pfence;
315 XE_BUG_ON(!xe_vm_in_compute_mode(vm));
317 down_write(&vm->lock);
319 err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs, true, 1);
321 goto out_unlock_outer;
323 pfence = xe_preempt_fence_create(e, e->compute.context,
330 list_add(&e->compute.link, &vm->preempt.engines);
331 ++vm->preempt.num_engines;
332 e->compute.pfence = pfence;
334 down_read(&vm->userptr.notifier_lock);
336 dma_resv_add_fence(&vm->resv, pfence,
337 DMA_RESV_USAGE_BOOKKEEP);
339 xe_vm_fence_all_extobjs(vm, pfence, DMA_RESV_USAGE_BOOKKEEP);
342 * Check to see if a preemption on VM is in flight or userptr
343 * invalidation, if so trigger this preempt fence to sync state with
344 * other preempt fences on the VM.
346 wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
348 dma_fence_enable_sw_signaling(pfence);
350 up_read(&vm->userptr.notifier_lock);
353 xe_vm_unlock_dma_resv(vm, tv_onstack, tv, &ww, &objs);
361 * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
362 * that need repinning.
365 * This function checks for whether the VM has userptrs that need repinning,
366 * and provides a release-type barrier on the userptr.notifier_lock after
369 * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
371 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
373 lockdep_assert_held_read(&vm->userptr.notifier_lock);
375 return (list_empty(&vm->userptr.repin_list) &&
376 list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
380 * xe_vm_lock_dma_resv() - Lock the vm dma_resv object and the dma_resv
381 * objects of the vm's external buffer objects.
383 * @ww: Pointer to a struct ww_acquire_ctx locking context.
384 * @tv_onstack: Array size XE_ONSTACK_TV of storage for the struct
385 * ttm_validate_buffers used for locking.
386 * @tv: Pointer to a pointer that on output contains the actual storage used.
387 * @objs: List head for the buffer objects locked.
388 * @intr: Whether to lock interruptible.
389 * @num_shared: Number of dma-fence slots to reserve in the locked objects.
391 * Locks the vm dma-resv objects and all the dma-resv objects of the
392 * buffer objects on the vm external object list. The TTM utilities require
393 * a list of struct ttm_validate_buffers pointing to the actual buffer
394 * objects to lock. Storage for those struct ttm_validate_buffers should
395 * be provided in @tv_onstack, and is typically reserved on the stack
396 * of the caller. If the size of @tv_onstack isn't sufficient, then
397 * storage will be allocated internally using kvmalloc().
399 * The function performs deadlock handling internally, and after a
400 * successful return the ww locking transaction should be considered
403 * Return: 0 on success, Negative error code on error. In particular if
404 * @intr is set to true, -EINTR or -ERESTARTSYS may be returned. In case
405 * of error, any locking performed has been reverted.
407 int xe_vm_lock_dma_resv(struct xe_vm *vm, struct ww_acquire_ctx *ww,
408 struct ttm_validate_buffer *tv_onstack,
409 struct ttm_validate_buffer **tv,
410 struct list_head *objs,
412 unsigned int num_shared)
414 struct ttm_validate_buffer *tv_vm, *tv_bo;
415 struct xe_vma *vma, *next;
419 lockdep_assert_held(&vm->lock);
421 if (vm->extobj.entries < XE_ONSTACK_TV) {
424 tv_vm = kvmalloc_array(vm->extobj.entries + 1, sizeof(*tv_vm),
431 INIT_LIST_HEAD(objs);
432 list_for_each_entry(vma, &vm->extobj.list, extobj.link) {
433 tv_bo->num_shared = num_shared;
434 tv_bo->bo = &vma->bo->ttm;
436 list_add_tail(&tv_bo->head, objs);
439 tv_vm->num_shared = num_shared;
440 tv_vm->bo = xe_vm_ttm_bo(vm);
441 list_add_tail(&tv_vm->head, objs);
442 err = ttm_eu_reserve_buffers(ww, objs, intr, &dups);
446 spin_lock(&vm->notifier.list_lock);
447 list_for_each_entry_safe(vma, next, &vm->notifier.rebind_list,
448 notifier.rebind_link) {
449 xe_bo_assert_held(vma->bo);
451 list_del_init(&vma->notifier.rebind_link);
452 if (vma->gt_present && !vma->destroyed)
453 list_move_tail(&vma->rebind_link, &vm->rebind_list);
455 spin_unlock(&vm->notifier.list_lock);
461 if (tv_vm != tv_onstack)
468 * xe_vm_unlock_dma_resv() - Unlock reservation objects locked by
469 * xe_vm_lock_dma_resv()
471 * @tv_onstack: The @tv_onstack array given to xe_vm_lock_dma_resv().
472 * @tv: The value of *@tv given by xe_vm_lock_dma_resv().
473 * @ww: The ww_acquire_context used for locking.
474 * @objs: The list returned from xe_vm_lock_dma_resv().
476 * Unlocks the reservation objects and frees any memory allocated by
477 * xe_vm_lock_dma_resv().
479 void xe_vm_unlock_dma_resv(struct xe_vm *vm,
480 struct ttm_validate_buffer *tv_onstack,
481 struct ttm_validate_buffer *tv,
482 struct ww_acquire_ctx *ww,
483 struct list_head *objs)
486 * Nothing should've been able to enter the list while we were locked,
487 * since we've held the dma-resvs of all the vm's external objects,
488 * and holding the dma_resv of an object is required for list
489 * addition, and we shouldn't add ourselves.
491 XE_WARN_ON(!list_empty(&vm->notifier.rebind_list));
493 ttm_eu_backoff_reservation(ww, objs);
494 if (tv && tv != tv_onstack)
498 static void preempt_rebind_work_func(struct work_struct *w)
500 struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
502 struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
503 struct ttm_validate_buffer *tv;
504 struct ww_acquire_ctx ww;
505 struct list_head objs;
506 struct dma_fence *rebind_fence;
507 unsigned int fence_count = 0;
508 LIST_HEAD(preempt_fences);
511 int __maybe_unused tries = 0;
513 XE_BUG_ON(!xe_vm_in_compute_mode(vm));
514 trace_xe_vm_rebind_worker_enter(vm);
516 if (xe_vm_is_closed(vm)) {
517 trace_xe_vm_rebind_worker_exit(vm);
521 down_write(&vm->lock);
524 if (vm->async_ops.error)
525 goto out_unlock_outer;
528 * Extreme corner where we exit a VM error state with a munmap style VM
529 * unbind inflight which requires a rebind. In this case the rebind
530 * needs to install some fences into the dma-resv slots. The worker to
531 * do this queued, let that worker make progress by dropping vm->lock
532 * and trying this again.
534 if (vm->async_ops.munmap_rebind_inflight) {
536 flush_work(&vm->async_ops.work);
540 if (xe_vm_userptr_check_repin(vm)) {
541 err = xe_vm_userptr_pin(vm);
543 goto out_unlock_outer;
546 err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs,
547 false, vm->preempt.num_engines);
549 goto out_unlock_outer;
551 /* Fresh preempt fences already installed. Everyting is running. */
552 if (!preempt_fences_waiting(vm))
556 * This makes sure vm is completely suspended and also balances
557 * xe_engine suspend- and resume; we resume *all* vm engines below.
559 err = wait_for_existing_preempt_fences(vm);
563 err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
567 list_for_each_entry(vma, &vm->rebind_list, rebind_link) {
568 if (xe_vma_is_userptr(vma) || vma->destroyed)
571 err = xe_bo_validate(vma->bo, vm, false);
576 rebind_fence = xe_vm_rebind(vm, true);
577 if (IS_ERR(rebind_fence)) {
578 err = PTR_ERR(rebind_fence);
583 dma_fence_wait(rebind_fence, false);
584 dma_fence_put(rebind_fence);
587 /* Wait on munmap style VM unbinds */
588 wait = dma_resv_wait_timeout(&vm->resv,
589 DMA_RESV_USAGE_KERNEL,
590 false, MAX_SCHEDULE_TIMEOUT);
596 #define retry_required(__tries, __vm) \
597 (IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
598 (!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
599 __xe_vm_userptr_needs_repin(__vm))
601 down_read(&vm->userptr.notifier_lock);
602 if (retry_required(tries, vm)) {
603 up_read(&vm->userptr.notifier_lock);
608 #undef retry_required
610 /* Point of no return. */
611 arm_preempt_fences(vm, &preempt_fences);
612 resume_and_reinstall_preempt_fences(vm);
613 up_read(&vm->userptr.notifier_lock);
616 xe_vm_unlock_dma_resv(vm, tv_onstack, tv, &ww, &objs);
618 if (err == -EAGAIN) {
619 trace_xe_vm_rebind_worker_retry(vm);
624 free_preempt_fences(&preempt_fences);
626 XE_WARN_ON(err < 0); /* TODO: Kill VM or put in error state */
627 trace_xe_vm_rebind_worker_exit(vm);
630 struct async_op_fence;
631 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
632 struct xe_engine *e, struct xe_sync_entry *syncs,
633 u32 num_syncs, struct async_op_fence *afence);
635 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
636 const struct mmu_notifier_range *range,
637 unsigned long cur_seq)
639 struct xe_vma *vma = container_of(mni, struct xe_vma, userptr.notifier);
640 struct xe_vm *vm = vma->vm;
641 struct dma_resv_iter cursor;
642 struct dma_fence *fence;
645 XE_BUG_ON(!xe_vma_is_userptr(vma));
646 trace_xe_vma_userptr_invalidate(vma);
648 if (!mmu_notifier_range_blockable(range))
651 down_write(&vm->userptr.notifier_lock);
652 mmu_interval_set_seq(mni, cur_seq);
654 /* No need to stop gpu access if the userptr is not yet bound. */
655 if (!vma->userptr.initial_bind) {
656 up_write(&vm->userptr.notifier_lock);
661 * Tell exec and rebind worker they need to repin and rebind this
664 if (!xe_vm_in_fault_mode(vm) && !vma->destroyed && vma->gt_present) {
665 spin_lock(&vm->userptr.invalidated_lock);
666 list_move_tail(&vma->userptr.invalidate_link,
667 &vm->userptr.invalidated);
668 spin_unlock(&vm->userptr.invalidated_lock);
671 up_write(&vm->userptr.notifier_lock);
674 * Preempt fences turn into schedule disables, pipeline these.
675 * Note that even in fault mode, we need to wait for binds and
676 * unbinds to complete, and those are attached as BOOKMARK fences
679 dma_resv_iter_begin(&cursor, &vm->resv,
680 DMA_RESV_USAGE_BOOKKEEP);
681 dma_resv_for_each_fence_unlocked(&cursor, fence)
682 dma_fence_enable_sw_signaling(fence);
683 dma_resv_iter_end(&cursor);
685 err = dma_resv_wait_timeout(&vm->resv,
686 DMA_RESV_USAGE_BOOKKEEP,
687 false, MAX_SCHEDULE_TIMEOUT);
688 XE_WARN_ON(err <= 0);
690 if (xe_vm_in_fault_mode(vm)) {
691 err = xe_vm_invalidate_vma(vma);
695 trace_xe_vma_userptr_invalidate_complete(vma);
700 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
701 .invalidate = vma_userptr_invalidate,
704 int xe_vm_userptr_pin(struct xe_vm *vm)
706 struct xe_vma *vma, *next;
708 LIST_HEAD(tmp_evict);
710 lockdep_assert_held_write(&vm->lock);
712 /* Collect invalidated userptrs */
713 spin_lock(&vm->userptr.invalidated_lock);
714 list_for_each_entry_safe(vma, next, &vm->userptr.invalidated,
715 userptr.invalidate_link) {
716 list_del_init(&vma->userptr.invalidate_link);
717 list_move_tail(&vma->userptr_link, &vm->userptr.repin_list);
719 spin_unlock(&vm->userptr.invalidated_lock);
721 /* Pin and move to temporary list */
722 list_for_each_entry_safe(vma, next, &vm->userptr.repin_list, userptr_link) {
723 err = xe_vma_userptr_pin_pages(vma);
727 list_move_tail(&vma->userptr_link, &tmp_evict);
730 /* Take lock and move to rebind_list for rebinding. */
731 err = dma_resv_lock_interruptible(&vm->resv, NULL);
735 list_for_each_entry_safe(vma, next, &tmp_evict, userptr_link) {
736 list_del_init(&vma->userptr_link);
737 list_move_tail(&vma->rebind_link, &vm->rebind_list);
740 dma_resv_unlock(&vm->resv);
745 list_splice_tail(&tmp_evict, &vm->userptr.repin_list);
751 * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
752 * that need repinning.
755 * This function does an advisory check for whether the VM has userptrs that
758 * Return: 0 if there are no indications of userptrs needing repinning,
759 * -EAGAIN if there are.
761 int xe_vm_userptr_check_repin(struct xe_vm *vm)
763 return (list_empty_careful(&vm->userptr.repin_list) &&
764 list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
767 static struct dma_fence *
768 xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
769 struct xe_sync_entry *syncs, u32 num_syncs);
771 struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
773 struct dma_fence *fence = NULL;
774 struct xe_vma *vma, *next;
776 lockdep_assert_held(&vm->lock);
777 if (xe_vm_no_dma_fences(vm) && !rebind_worker)
780 xe_vm_assert_held(vm);
781 list_for_each_entry_safe(vma, next, &vm->rebind_list, rebind_link) {
782 XE_WARN_ON(!vma->gt_present);
784 list_del_init(&vma->rebind_link);
785 dma_fence_put(fence);
787 trace_xe_vma_rebind_worker(vma);
789 trace_xe_vma_rebind_exec(vma);
790 fence = xe_vm_bind_vma(vma, NULL, NULL, 0);
798 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
800 u64 bo_offset_or_userptr,
809 XE_BUG_ON(start >= end);
810 XE_BUG_ON(end >= vm->size);
812 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
814 vma = ERR_PTR(-ENOMEM);
818 INIT_LIST_HEAD(&vma->rebind_link);
819 INIT_LIST_HEAD(&vma->unbind_link);
820 INIT_LIST_HEAD(&vma->userptr_link);
821 INIT_LIST_HEAD(&vma->userptr.invalidate_link);
822 INIT_LIST_HEAD(&vma->notifier.rebind_link);
823 INIT_LIST_HEAD(&vma->extobj.link);
829 vma->pte_flags = PTE_READ_ONLY;
832 vma->gt_mask = gt_mask;
834 for_each_gt(gt, vm->xe, id)
835 if (!xe_gt_is_media_type(gt))
836 vma->gt_mask |= 0x1 << id;
839 if (vm->xe->info.platform == XE_PVC)
840 vma->use_atomic_access_pte_bit = true;
843 xe_bo_assert_held(bo);
844 vma->bo_offset = bo_offset_or_userptr;
845 vma->bo = xe_bo_get(bo);
846 list_add_tail(&vma->bo_link, &bo->vmas);
847 } else /* userptr */ {
848 u64 size = end - start + 1;
851 vma->userptr.ptr = bo_offset_or_userptr;
853 err = mmu_interval_notifier_insert(&vma->userptr.notifier,
855 vma->userptr.ptr, size,
856 &vma_userptr_notifier_ops);
863 vma->userptr.notifier_seq = LONG_MAX;
870 static bool vm_remove_extobj(struct xe_vma *vma)
872 if (!list_empty(&vma->extobj.link)) {
873 vma->vm->extobj.entries--;
874 list_del_init(&vma->extobj.link);
880 static void xe_vma_destroy_late(struct xe_vma *vma)
882 struct xe_vm *vm = vma->vm;
883 struct xe_device *xe = vm->xe;
884 bool read_only = vma->pte_flags & PTE_READ_ONLY;
886 if (xe_vma_is_userptr(vma)) {
887 if (vma->userptr.sg) {
888 dma_unmap_sgtable(xe->drm.dev,
890 read_only ? DMA_TO_DEVICE :
891 DMA_BIDIRECTIONAL, 0);
892 sg_free_table(vma->userptr.sg);
893 vma->userptr.sg = NULL;
897 * Since userptr pages are not pinned, we can't remove
898 * the notifer until we're sure the GPU is not accessing
901 mmu_interval_notifier_remove(&vma->userptr.notifier);
910 static void vma_destroy_work_func(struct work_struct *w)
913 container_of(w, struct xe_vma, destroy_work);
915 xe_vma_destroy_late(vma);
918 static struct xe_vma *
919 bo_has_vm_references_locked(struct xe_bo *bo, struct xe_vm *vm,
920 struct xe_vma *ignore)
924 list_for_each_entry(vma, &bo->vmas, bo_link) {
925 if (vma != ignore && vma->vm == vm && !vma->destroyed)
932 static bool bo_has_vm_references(struct xe_bo *bo, struct xe_vm *vm,
933 struct xe_vma *ignore)
935 struct ww_acquire_ctx ww;
938 xe_bo_lock(bo, &ww, 0, false);
939 ret = !!bo_has_vm_references_locked(bo, vm, ignore);
940 xe_bo_unlock(bo, &ww);
945 static void __vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
947 list_add(&vma->extobj.link, &vm->extobj.list);
948 vm->extobj.entries++;
951 static void vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
953 struct xe_bo *bo = vma->bo;
955 lockdep_assert_held_write(&vm->lock);
957 if (bo_has_vm_references(bo, vm, vma))
960 __vm_insert_extobj(vm, vma);
963 static void vma_destroy_cb(struct dma_fence *fence,
964 struct dma_fence_cb *cb)
966 struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
968 INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
969 queue_work(system_unbound_wq, &vma->destroy_work);
972 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
974 struct xe_vm *vm = vma->vm;
976 lockdep_assert_held_write(&vm->lock);
977 XE_BUG_ON(!list_empty(&vma->unbind_link));
979 if (xe_vma_is_userptr(vma)) {
980 XE_WARN_ON(!vma->destroyed);
981 spin_lock(&vm->userptr.invalidated_lock);
982 list_del_init(&vma->userptr.invalidate_link);
983 spin_unlock(&vm->userptr.invalidated_lock);
984 list_del(&vma->userptr_link);
986 xe_bo_assert_held(vma->bo);
987 list_del(&vma->bo_link);
989 spin_lock(&vm->notifier.list_lock);
990 list_del(&vma->notifier.rebind_link);
991 spin_unlock(&vm->notifier.list_lock);
993 if (!vma->bo->vm && vm_remove_extobj(vma)) {
994 struct xe_vma *other;
996 other = bo_has_vm_references_locked(vma->bo, vm, NULL);
999 __vm_insert_extobj(vm, other);
1003 xe_vm_assert_held(vm);
1004 if (!list_empty(&vma->rebind_link))
1005 list_del(&vma->rebind_link);
1008 int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
1012 XE_WARN_ON(ret != -ENOENT);
1013 xe_vma_destroy_late(vma);
1016 xe_vma_destroy_late(vma);
1020 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1022 struct ttm_validate_buffer tv[2];
1023 struct ww_acquire_ctx ww;
1024 struct xe_bo *bo = vma->bo;
1029 memset(tv, 0, sizeof(tv));
1030 tv[0].bo = xe_vm_ttm_bo(vma->vm);
1031 list_add(&tv[0].head, &objs);
1034 tv[1].bo = &xe_bo_get(bo)->ttm;
1035 list_add(&tv[1].head, &objs);
1037 err = ttm_eu_reserve_buffers(&ww, &objs, false, &dups);
1040 xe_vma_destroy(vma, NULL);
1042 ttm_eu_backoff_reservation(&ww, &objs);
1047 static struct xe_vma *to_xe_vma(const struct rb_node *node)
1049 BUILD_BUG_ON(offsetof(struct xe_vma, vm_node) != 0);
1050 return (struct xe_vma *)node;
1053 static int xe_vma_cmp(const struct xe_vma *a, const struct xe_vma *b)
1055 if (a->end < b->start) {
1057 } else if (b->end < a->start) {
1064 static bool xe_vma_less_cb(struct rb_node *a, const struct rb_node *b)
1066 return xe_vma_cmp(to_xe_vma(a), to_xe_vma(b)) < 0;
1069 int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node)
1071 struct xe_vma *cmp = to_xe_vma(node);
1072 const struct xe_vma *own = key;
1074 if (own->start > cmp->end)
1077 if (own->end < cmp->start)
1084 xe_vm_find_overlapping_vma(struct xe_vm *vm, const struct xe_vma *vma)
1086 struct rb_node *node;
1088 if (xe_vm_is_closed(vm))
1091 XE_BUG_ON(vma->end >= vm->size);
1092 lockdep_assert_held(&vm->lock);
1094 node = rb_find(vma, &vm->vmas, xe_vma_cmp_vma_cb);
1096 return node ? to_xe_vma(node) : NULL;
1099 static void xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1101 XE_BUG_ON(vma->vm != vm);
1102 lockdep_assert_held(&vm->lock);
1104 rb_add(&vma->vm_node, &vm->vmas, xe_vma_less_cb);
1107 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1109 XE_BUG_ON(vma->vm != vm);
1110 lockdep_assert_held(&vm->lock);
1112 rb_erase(&vma->vm_node, &vm->vmas);
1113 if (vm->usm.last_fault_vma == vma)
1114 vm->usm.last_fault_vma = NULL;
1117 static void async_op_work_func(struct work_struct *w);
1118 static void vm_destroy_work_func(struct work_struct *w);
1120 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1123 int err, i = 0, number_gts = 0;
1127 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1129 return ERR_PTR(-ENOMEM);
1132 kref_init(&vm->refcount);
1133 dma_resv_init(&vm->resv);
1135 vm->size = 1ull << xe_pt_shift(xe->info.vm_max_level + 1);
1140 init_rwsem(&vm->lock);
1142 INIT_LIST_HEAD(&vm->rebind_list);
1144 INIT_LIST_HEAD(&vm->userptr.repin_list);
1145 INIT_LIST_HEAD(&vm->userptr.invalidated);
1146 init_rwsem(&vm->userptr.notifier_lock);
1147 spin_lock_init(&vm->userptr.invalidated_lock);
1149 INIT_LIST_HEAD(&vm->notifier.rebind_list);
1150 spin_lock_init(&vm->notifier.list_lock);
1152 INIT_LIST_HEAD(&vm->async_ops.pending);
1153 INIT_WORK(&vm->async_ops.work, async_op_work_func);
1154 spin_lock_init(&vm->async_ops.lock);
1156 INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1158 INIT_LIST_HEAD(&vm->preempt.engines);
1159 vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */
1161 INIT_LIST_HEAD(&vm->extobj.list);
1163 if (!(flags & XE_VM_FLAG_MIGRATION)) {
1164 /* We need to immeditatelly exit from any D3 state */
1165 xe_pm_runtime_get(xe);
1166 xe_device_mem_access_get(xe);
1169 err = dma_resv_lock_interruptible(&vm->resv, NULL);
1173 if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1174 vm->flags |= XE_VM_FLAGS_64K;
1176 for_each_gt(gt, xe, id) {
1177 if (xe_gt_is_media_type(gt))
1180 if (flags & XE_VM_FLAG_MIGRATION &&
1181 gt->info.id != XE_VM_FLAG_GT_ID(flags))
1184 vm->pt_root[id] = xe_pt_create(vm, gt, xe->info.vm_max_level);
1185 if (IS_ERR(vm->pt_root[id])) {
1186 err = PTR_ERR(vm->pt_root[id]);
1187 vm->pt_root[id] = NULL;
1188 goto err_destroy_root;
1192 if (flags & XE_VM_FLAG_SCRATCH_PAGE) {
1193 for_each_gt(gt, xe, id) {
1194 if (!vm->pt_root[id])
1197 err = xe_pt_create_scratch(xe, gt, vm);
1199 goto err_scratch_pt;
1203 if (flags & DRM_XE_VM_CREATE_COMPUTE_MODE) {
1204 INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1205 vm->flags |= XE_VM_FLAG_COMPUTE_MODE;
1208 if (flags & DRM_XE_VM_CREATE_ASYNC_BIND_OPS) {
1209 vm->async_ops.fence.context = dma_fence_context_alloc(1);
1210 vm->flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
1213 /* Fill pt_root after allocating scratch tables */
1214 for_each_gt(gt, xe, id) {
1215 if (!vm->pt_root[id])
1218 xe_pt_populate_empty(gt, vm, vm->pt_root[id]);
1220 dma_resv_unlock(&vm->resv);
1222 /* Kernel migration VM shouldn't have a circular loop.. */
1223 if (!(flags & XE_VM_FLAG_MIGRATION)) {
1224 for_each_gt(gt, xe, id) {
1225 struct xe_vm *migrate_vm;
1226 struct xe_engine *eng;
1228 if (!vm->pt_root[id])
1231 migrate_vm = xe_migrate_get_vm(gt->migrate);
1232 eng = xe_engine_create_class(xe, gt, migrate_vm,
1233 XE_ENGINE_CLASS_COPY,
1235 xe_vm_put(migrate_vm);
1237 xe_vm_close_and_put(vm);
1238 return ERR_CAST(eng);
1246 vm->composite_fence_ctx = dma_fence_context_alloc(1);
1248 mutex_lock(&xe->usm.lock);
1249 if (flags & XE_VM_FLAG_FAULT_MODE)
1250 xe->usm.num_vm_in_fault_mode++;
1251 else if (!(flags & XE_VM_FLAG_MIGRATION))
1252 xe->usm.num_vm_in_non_fault_mode++;
1253 mutex_unlock(&xe->usm.lock);
1255 trace_xe_vm_create(vm);
1260 for_each_gt(gt, xe, id) {
1261 if (!vm->pt_root[id])
1264 i = vm->pt_root[id]->level;
1266 if (vm->scratch_pt[id][--i])
1267 xe_pt_destroy(vm->scratch_pt[id][i],
1269 xe_bo_unpin(vm->scratch_bo[id]);
1270 xe_bo_put(vm->scratch_bo[id]);
1273 for_each_gt(gt, xe, id) {
1274 if (vm->pt_root[id])
1275 xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1277 dma_resv_unlock(&vm->resv);
1279 dma_resv_fini(&vm->resv);
1281 if (!(flags & XE_VM_FLAG_MIGRATION)) {
1282 xe_device_mem_access_put(xe);
1283 xe_pm_runtime_put(xe);
1285 return ERR_PTR(err);
1288 static void flush_async_ops(struct xe_vm *vm)
1290 queue_work(system_unbound_wq, &vm->async_ops.work);
1291 flush_work(&vm->async_ops.work);
1294 static void vm_error_capture(struct xe_vm *vm, int err,
1295 u32 op, u64 addr, u64 size)
1297 struct drm_xe_vm_bind_op_error_capture capture;
1298 u64 __user *address =
1299 u64_to_user_ptr(vm->async_ops.error_capture.addr);
1300 bool in_kthread = !current->mm;
1302 capture.error = err;
1304 capture.addr = addr;
1305 capture.size = size;
1308 if (!mmget_not_zero(vm->async_ops.error_capture.mm))
1310 kthread_use_mm(vm->async_ops.error_capture.mm);
1313 if (copy_to_user(address, &capture, sizeof(capture)))
1314 XE_WARN_ON("Copy to user failed");
1317 kthread_unuse_mm(vm->async_ops.error_capture.mm);
1318 mmput(vm->async_ops.error_capture.mm);
1322 wake_up_all(&vm->async_ops.error_capture.wq);
1325 void xe_vm_close_and_put(struct xe_vm *vm)
1327 struct rb_root contested = RB_ROOT;
1328 struct ww_acquire_ctx ww;
1329 struct xe_device *xe = vm->xe;
1333 XE_BUG_ON(vm->preempt.num_engines);
1337 flush_async_ops(vm);
1338 if (xe_vm_in_compute_mode(vm))
1339 flush_work(&vm->preempt.rebind_work);
1341 for_each_gt(gt, xe, id) {
1343 xe_engine_kill(vm->eng[id]);
1344 xe_engine_put(vm->eng[id]);
1349 down_write(&vm->lock);
1350 xe_vm_lock(vm, &ww, 0, false);
1351 while (vm->vmas.rb_node) {
1352 struct xe_vma *vma = to_xe_vma(vm->vmas.rb_node);
1354 if (xe_vma_is_userptr(vma)) {
1355 down_read(&vm->userptr.notifier_lock);
1356 vma->destroyed = true;
1357 up_read(&vm->userptr.notifier_lock);
1360 rb_erase(&vma->vm_node, &vm->vmas);
1362 /* easy case, remove from VMA? */
1363 if (xe_vma_is_userptr(vma) || vma->bo->vm) {
1364 xe_vma_destroy(vma, NULL);
1368 rb_add(&vma->vm_node, &contested, xe_vma_less_cb);
1372 * All vm operations will add shared fences to resv.
1373 * The only exception is eviction for a shared object,
1374 * but even so, the unbind when evicted would still
1375 * install a fence to resv. Hence it's safe to
1376 * destroy the pagetables immediately.
1378 for_each_gt(gt, xe, id) {
1379 if (vm->scratch_bo[id]) {
1382 xe_bo_unpin(vm->scratch_bo[id]);
1383 xe_bo_put(vm->scratch_bo[id]);
1384 for (i = 0; i < vm->pt_root[id]->level; i++)
1385 xe_pt_destroy(vm->scratch_pt[id][i], vm->flags,
1389 xe_vm_unlock(vm, &ww);
1391 if (contested.rb_node) {
1394 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1395 * Since we hold a refcount to the bo, we can remove and free
1396 * the members safely without locking.
1398 while (contested.rb_node) {
1399 struct xe_vma *vma = to_xe_vma(contested.rb_node);
1401 rb_erase(&vma->vm_node, &contested);
1402 xe_vma_destroy_unlocked(vma);
1406 if (vm->async_ops.error_capture.addr)
1407 wake_up_all(&vm->async_ops.error_capture.wq);
1409 XE_WARN_ON(!list_empty(&vm->extobj.list));
1410 up_write(&vm->lock);
1415 static void vm_destroy_work_func(struct work_struct *w)
1418 container_of(w, struct xe_vm, destroy_work);
1419 struct ww_acquire_ctx ww;
1420 struct xe_device *xe = vm->xe;
1425 /* xe_vm_close_and_put was not called? */
1426 XE_WARN_ON(vm->size);
1428 if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
1429 xe_device_mem_access_put(xe);
1430 xe_pm_runtime_put(xe);
1432 if (xe->info.supports_usm) {
1433 mutex_lock(&xe->usm.lock);
1434 lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1435 XE_WARN_ON(lookup != vm);
1436 mutex_unlock(&xe->usm.lock);
1441 * XXX: We delay destroying the PT root until the VM if freed as PT root
1442 * is needed for xe_vm_lock to work. If we remove that dependency this
1443 * can be moved to xe_vm_close_and_put.
1445 xe_vm_lock(vm, &ww, 0, false);
1446 for_each_gt(gt, xe, id) {
1447 if (vm->pt_root[id]) {
1448 xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1449 vm->pt_root[id] = NULL;
1452 xe_vm_unlock(vm, &ww);
1454 mutex_lock(&xe->usm.lock);
1455 if (vm->flags & XE_VM_FLAG_FAULT_MODE)
1456 xe->usm.num_vm_in_fault_mode--;
1457 else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1458 xe->usm.num_vm_in_non_fault_mode--;
1459 mutex_unlock(&xe->usm.lock);
1461 trace_xe_vm_free(vm);
1462 dma_fence_put(vm->rebind_fence);
1463 dma_resv_fini(&vm->resv);
1468 void xe_vm_free(struct kref *ref)
1470 struct xe_vm *vm = container_of(ref, struct xe_vm, refcount);
1472 /* To destroy the VM we need to be able to sleep */
1473 queue_work(system_unbound_wq, &vm->destroy_work);
1476 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1480 mutex_lock(&xef->vm.lock);
1481 vm = xa_load(&xef->vm.xa, id);
1482 mutex_unlock(&xef->vm.lock);
1490 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_gt *full_gt)
1492 XE_BUG_ON(xe_gt_is_media_type(full_gt));
1494 return gen8_pde_encode(vm->pt_root[full_gt->info.id]->bo, 0,
1498 static struct dma_fence *
1499 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_engine *e,
1500 struct xe_sync_entry *syncs, u32 num_syncs)
1503 struct dma_fence *fence = NULL;
1504 struct dma_fence **fences = NULL;
1505 struct dma_fence_array *cf = NULL;
1506 struct xe_vm *vm = vma->vm;
1507 int cur_fence = 0, i;
1508 int number_gts = hweight_long(vma->gt_present);
1512 trace_xe_vma_unbind(vma);
1514 if (number_gts > 1) {
1515 fences = kmalloc_array(number_gts, sizeof(*fences),
1518 return ERR_PTR(-ENOMEM);
1521 for_each_gt(gt, vm->xe, id) {
1522 if (!(vma->gt_present & BIT(id)))
1525 XE_BUG_ON(xe_gt_is_media_type(gt));
1527 fence = __xe_pt_unbind_vma(gt, vma, e, syncs, num_syncs);
1528 if (IS_ERR(fence)) {
1529 err = PTR_ERR(fence);
1534 fences[cur_fence++] = fence;
1537 if (e && vm->pt_root[id] && !list_empty(&e->multi_gt_list))
1538 e = list_next_entry(e, multi_gt_list);
1542 cf = dma_fence_array_create(number_gts, fences,
1543 vm->composite_fence_ctx,
1544 vm->composite_fence_seqno++,
1547 --vm->composite_fence_seqno;
1553 for (i = 0; i < num_syncs; i++)
1554 xe_sync_entry_signal(&syncs[i], NULL, cf ? &cf->base : fence);
1556 return cf ? &cf->base : !fence ? dma_fence_get_stub() : fence;
1561 /* FIXME: Rewind the previous binds? */
1562 dma_fence_put(fences[--cur_fence]);
1567 return ERR_PTR(err);
1570 static struct dma_fence *
1571 xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
1572 struct xe_sync_entry *syncs, u32 num_syncs)
1575 struct dma_fence *fence;
1576 struct dma_fence **fences = NULL;
1577 struct dma_fence_array *cf = NULL;
1578 struct xe_vm *vm = vma->vm;
1579 int cur_fence = 0, i;
1580 int number_gts = hweight_long(vma->gt_mask);
1584 trace_xe_vma_bind(vma);
1586 if (number_gts > 1) {
1587 fences = kmalloc_array(number_gts, sizeof(*fences),
1590 return ERR_PTR(-ENOMEM);
1593 for_each_gt(gt, vm->xe, id) {
1594 if (!(vma->gt_mask & BIT(id)))
1597 XE_BUG_ON(xe_gt_is_media_type(gt));
1598 fence = __xe_pt_bind_vma(gt, vma, e, syncs, num_syncs,
1599 vma->gt_present & BIT(id));
1600 if (IS_ERR(fence)) {
1601 err = PTR_ERR(fence);
1606 fences[cur_fence++] = fence;
1609 if (e && vm->pt_root[id] && !list_empty(&e->multi_gt_list))
1610 e = list_next_entry(e, multi_gt_list);
1614 cf = dma_fence_array_create(number_gts, fences,
1615 vm->composite_fence_ctx,
1616 vm->composite_fence_seqno++,
1619 --vm->composite_fence_seqno;
1625 for (i = 0; i < num_syncs; i++)
1626 xe_sync_entry_signal(&syncs[i], NULL, cf ? &cf->base : fence);
1628 return cf ? &cf->base : fence;
1633 /* FIXME: Rewind the previous binds? */
1634 dma_fence_put(fences[--cur_fence]);
1639 return ERR_PTR(err);
1642 struct async_op_fence {
1643 struct dma_fence fence;
1644 struct dma_fence *wait_fence;
1645 struct dma_fence_cb cb;
1647 wait_queue_head_t wq;
1651 static const char *async_op_fence_get_driver_name(struct dma_fence *dma_fence)
1657 async_op_fence_get_timeline_name(struct dma_fence *dma_fence)
1659 return "async_op_fence";
1662 static const struct dma_fence_ops async_op_fence_ops = {
1663 .get_driver_name = async_op_fence_get_driver_name,
1664 .get_timeline_name = async_op_fence_get_timeline_name,
1667 static void async_op_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1669 struct async_op_fence *afence =
1670 container_of(cb, struct async_op_fence, cb);
1672 afence->fence.error = afence->wait_fence->error;
1673 dma_fence_signal(&afence->fence);
1674 xe_vm_put(afence->vm);
1675 dma_fence_put(afence->wait_fence);
1676 dma_fence_put(&afence->fence);
1679 static void add_async_op_fence_cb(struct xe_vm *vm,
1680 struct dma_fence *fence,
1681 struct async_op_fence *afence)
1685 if (!xe_vm_no_dma_fences(vm)) {
1686 afence->started = true;
1688 wake_up_all(&afence->wq);
1691 afence->wait_fence = dma_fence_get(fence);
1692 afence->vm = xe_vm_get(vm);
1693 dma_fence_get(&afence->fence);
1694 ret = dma_fence_add_callback(fence, &afence->cb, async_op_fence_cb);
1695 if (ret == -ENOENT) {
1696 afence->fence.error = afence->wait_fence->error;
1697 dma_fence_signal(&afence->fence);
1701 dma_fence_put(afence->wait_fence);
1702 dma_fence_put(&afence->fence);
1704 XE_WARN_ON(ret && ret != -ENOENT);
1707 int xe_vm_async_fence_wait_start(struct dma_fence *fence)
1709 if (fence->ops == &async_op_fence_ops) {
1710 struct async_op_fence *afence =
1711 container_of(fence, struct async_op_fence, fence);
1713 XE_BUG_ON(xe_vm_no_dma_fences(afence->vm));
1716 return wait_event_interruptible(afence->wq, afence->started);
1722 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
1723 struct xe_engine *e, struct xe_sync_entry *syncs,
1724 u32 num_syncs, struct async_op_fence *afence)
1726 struct dma_fence *fence;
1728 xe_vm_assert_held(vm);
1730 fence = xe_vm_bind_vma(vma, e, syncs, num_syncs);
1732 return PTR_ERR(fence);
1734 add_async_op_fence_cb(vm, fence, afence);
1736 dma_fence_put(fence);
1740 static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_engine *e,
1741 struct xe_bo *bo, struct xe_sync_entry *syncs,
1742 u32 num_syncs, struct async_op_fence *afence)
1746 xe_vm_assert_held(vm);
1747 xe_bo_assert_held(bo);
1750 err = xe_bo_validate(bo, vm, true);
1755 return __xe_vm_bind(vm, vma, e, syncs, num_syncs, afence);
1758 static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
1759 struct xe_engine *e, struct xe_sync_entry *syncs,
1760 u32 num_syncs, struct async_op_fence *afence)
1762 struct dma_fence *fence;
1764 xe_vm_assert_held(vm);
1765 xe_bo_assert_held(vma->bo);
1767 fence = xe_vm_unbind_vma(vma, e, syncs, num_syncs);
1769 return PTR_ERR(fence);
1771 add_async_op_fence_cb(vm, fence, afence);
1773 xe_vma_destroy(vma, fence);
1774 dma_fence_put(fence);
1779 static int vm_set_error_capture_address(struct xe_device *xe, struct xe_vm *vm,
1782 if (XE_IOCTL_ERR(xe, !value))
1785 if (XE_IOCTL_ERR(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
1788 if (XE_IOCTL_ERR(xe, vm->async_ops.error_capture.addr))
1791 vm->async_ops.error_capture.mm = current->mm;
1792 vm->async_ops.error_capture.addr = value;
1793 init_waitqueue_head(&vm->async_ops.error_capture.wq);
1798 typedef int (*xe_vm_set_property_fn)(struct xe_device *xe, struct xe_vm *vm,
1801 static const xe_vm_set_property_fn vm_set_property_funcs[] = {
1802 [XE_VM_PROPERTY_BIND_OP_ERROR_CAPTURE_ADDRESS] =
1803 vm_set_error_capture_address,
1806 static int vm_user_ext_set_property(struct xe_device *xe, struct xe_vm *vm,
1809 u64 __user *address = u64_to_user_ptr(extension);
1810 struct drm_xe_ext_vm_set_property ext;
1813 err = __copy_from_user(&ext, address, sizeof(ext));
1814 if (XE_IOCTL_ERR(xe, err))
1817 if (XE_IOCTL_ERR(xe, ext.property >=
1818 ARRAY_SIZE(vm_set_property_funcs)))
1821 return vm_set_property_funcs[ext.property](xe, vm, ext.value);
1824 typedef int (*xe_vm_user_extension_fn)(struct xe_device *xe, struct xe_vm *vm,
1827 static const xe_vm_set_property_fn vm_user_extension_funcs[] = {
1828 [XE_VM_EXTENSION_SET_PROPERTY] = vm_user_ext_set_property,
1831 #define MAX_USER_EXTENSIONS 16
1832 static int vm_user_extensions(struct xe_device *xe, struct xe_vm *vm,
1833 u64 extensions, int ext_number)
1835 u64 __user *address = u64_to_user_ptr(extensions);
1836 struct xe_user_extension ext;
1839 if (XE_IOCTL_ERR(xe, ext_number >= MAX_USER_EXTENSIONS))
1842 err = __copy_from_user(&ext, address, sizeof(ext));
1843 if (XE_IOCTL_ERR(xe, err))
1846 if (XE_IOCTL_ERR(xe, ext.name >=
1847 ARRAY_SIZE(vm_user_extension_funcs)))
1850 err = vm_user_extension_funcs[ext.name](xe, vm, extensions);
1851 if (XE_IOCTL_ERR(xe, err))
1854 if (ext.next_extension)
1855 return vm_user_extensions(xe, vm, ext.next_extension,
1861 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_SCRATCH_PAGE | \
1862 DRM_XE_VM_CREATE_COMPUTE_MODE | \
1863 DRM_XE_VM_CREATE_ASYNC_BIND_OPS | \
1864 DRM_XE_VM_CREATE_FAULT_MODE)
1866 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1867 struct drm_file *file)
1869 struct xe_device *xe = to_xe_device(dev);
1870 struct xe_file *xef = to_xe_file(file);
1871 struct drm_xe_vm_create *args = data;
1877 if (XE_IOCTL_ERR(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1880 if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE &&
1881 args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
1884 if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE &&
1885 args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
1888 if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
1889 xe_device_in_non_fault_mode(xe)))
1892 if (XE_IOCTL_ERR(xe, !(args->flags & DRM_XE_VM_CREATE_FAULT_MODE) &&
1893 xe_device_in_fault_mode(xe)))
1896 if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
1897 !xe->info.supports_usm))
1900 if (args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE)
1901 flags |= XE_VM_FLAG_SCRATCH_PAGE;
1902 if (args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE)
1903 flags |= XE_VM_FLAG_COMPUTE_MODE;
1904 if (args->flags & DRM_XE_VM_CREATE_ASYNC_BIND_OPS)
1905 flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
1906 if (args->flags & DRM_XE_VM_CREATE_FAULT_MODE)
1907 flags |= XE_VM_FLAG_FAULT_MODE;
1909 vm = xe_vm_create(xe, flags);
1913 if (args->extensions) {
1914 err = vm_user_extensions(xe, vm, args->extensions, 0);
1915 if (XE_IOCTL_ERR(xe, err)) {
1916 xe_vm_close_and_put(vm);
1921 mutex_lock(&xef->vm.lock);
1922 err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1923 mutex_unlock(&xef->vm.lock);
1925 xe_vm_close_and_put(vm);
1929 if (xe->info.supports_usm) {
1930 mutex_lock(&xe->usm.lock);
1931 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
1932 XA_LIMIT(0, XE_MAX_ASID - 1),
1933 &xe->usm.next_asid, GFP_KERNEL);
1934 mutex_unlock(&xe->usm.lock);
1936 xe_vm_close_and_put(vm);
1939 vm->usm.asid = asid;
1944 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
1945 /* Warning: Security issue - never enable by default */
1946 args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, GEN8_PAGE_SIZE);
1952 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
1953 struct drm_file *file)
1955 struct xe_device *xe = to_xe_device(dev);
1956 struct xe_file *xef = to_xe_file(file);
1957 struct drm_xe_vm_destroy *args = data;
1960 if (XE_IOCTL_ERR(xe, args->pad))
1963 vm = xe_vm_lookup(xef, args->vm_id);
1964 if (XE_IOCTL_ERR(xe, !vm))
1968 /* FIXME: Extend this check to non-compute mode VMs */
1969 if (XE_IOCTL_ERR(xe, vm->preempt.num_engines))
1972 mutex_lock(&xef->vm.lock);
1973 xa_erase(&xef->vm.xa, args->vm_id);
1974 mutex_unlock(&xef->vm.lock);
1976 xe_vm_close_and_put(vm);
1981 static const u32 region_to_mem_type[] = {
1987 static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
1988 struct xe_engine *e, u32 region,
1989 struct xe_sync_entry *syncs, u32 num_syncs,
1990 struct async_op_fence *afence)
1994 XE_BUG_ON(region > ARRAY_SIZE(region_to_mem_type));
1996 if (!xe_vma_is_userptr(vma)) {
1997 err = xe_bo_migrate(vma->bo, region_to_mem_type[region]);
2002 if (vma->gt_mask != (vma->gt_present & ~vma->usm.gt_invalidated)) {
2003 return xe_vm_bind(vm, vma, e, vma->bo, syncs, num_syncs,
2008 /* Nothing to do, signal fences now */
2009 for (i = 0; i < num_syncs; i++)
2010 xe_sync_entry_signal(&syncs[i], NULL,
2011 dma_fence_get_stub());
2013 dma_fence_signal(&afence->fence);
2018 #define VM_BIND_OP(op) (op & 0xffff)
2020 static int __vm_bind_ioctl(struct xe_vm *vm, struct xe_vma *vma,
2021 struct xe_engine *e, struct xe_bo *bo, u32 op,
2022 u32 region, struct xe_sync_entry *syncs,
2023 u32 num_syncs, struct async_op_fence *afence)
2025 switch (VM_BIND_OP(op)) {
2026 case XE_VM_BIND_OP_MAP:
2027 return xe_vm_bind(vm, vma, e, bo, syncs, num_syncs, afence);
2028 case XE_VM_BIND_OP_UNMAP:
2029 case XE_VM_BIND_OP_UNMAP_ALL:
2030 return xe_vm_unbind(vm, vma, e, syncs, num_syncs, afence);
2031 case XE_VM_BIND_OP_MAP_USERPTR:
2032 return xe_vm_bind(vm, vma, e, NULL, syncs, num_syncs, afence);
2033 case XE_VM_BIND_OP_PREFETCH:
2034 return xe_vm_prefetch(vm, vma, e, region, syncs, num_syncs,
2038 XE_BUG_ON("NOT POSSIBLE");
2043 struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm)
2045 int idx = vm->flags & XE_VM_FLAG_MIGRATION ?
2046 XE_VM_FLAG_GT_ID(vm->flags) : 0;
2048 /* Safe to use index 0 as all BO in the VM share a single dma-resv lock */
2049 return &vm->pt_root[idx]->bo->ttm;
2052 static void xe_vm_tv_populate(struct xe_vm *vm, struct ttm_validate_buffer *tv)
2055 tv->bo = xe_vm_ttm_bo(vm);
2058 static bool is_map_op(u32 op)
2060 return VM_BIND_OP(op) == XE_VM_BIND_OP_MAP ||
2061 VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR;
2064 static bool is_unmap_op(u32 op)
2066 return VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP ||
2067 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL;
2070 static int vm_bind_ioctl(struct xe_vm *vm, struct xe_vma *vma,
2071 struct xe_engine *e, struct xe_bo *bo,
2072 struct drm_xe_vm_bind_op *bind_op,
2073 struct xe_sync_entry *syncs, u32 num_syncs,
2074 struct async_op_fence *afence)
2078 struct ttm_validate_buffer tv_bo, tv_vm;
2079 struct ww_acquire_ctx ww;
2083 lockdep_assert_held(&vm->lock);
2084 XE_BUG_ON(!list_empty(&vma->unbind_link));
2086 /* Binds deferred to faults, signal fences now */
2087 if (xe_vm_in_fault_mode(vm) && is_map_op(bind_op->op) &&
2088 !(bind_op->op & XE_VM_BIND_FLAG_IMMEDIATE)) {
2089 for (i = 0; i < num_syncs; i++)
2090 xe_sync_entry_signal(&syncs[i], NULL,
2091 dma_fence_get_stub());
2093 dma_fence_signal(&afence->fence);
2097 xe_vm_tv_populate(vm, &tv_vm);
2098 list_add_tail(&tv_vm.head, &objs);
2102 * An unbind can drop the last reference to the BO and
2103 * the BO is needed for ttm_eu_backoff_reservation so
2104 * take a reference here.
2108 tv_bo.bo = &vbo->ttm;
2109 tv_bo.num_shared = 1;
2110 list_add(&tv_bo.head, &objs);
2114 err = ttm_eu_reserve_buffers(&ww, &objs, true, &dups);
2116 err = __vm_bind_ioctl(vm, vma, e, bo,
2117 bind_op->op, bind_op->region, syncs,
2119 ttm_eu_backoff_reservation(&ww, &objs);
2120 if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
2121 lockdep_assert_held_write(&vm->lock);
2122 err = xe_vma_userptr_pin_pages(vma);
2134 struct xe_engine *engine;
2136 struct drm_xe_vm_bind_op bind_op;
2137 struct xe_sync_entry *syncs;
2139 struct list_head link;
2140 struct async_op_fence *fence;
2143 static void async_op_cleanup(struct xe_vm *vm, struct async_op *op)
2145 while (op->num_syncs--)
2146 xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
2150 xe_engine_put(op->engine);
2153 dma_fence_put(&op->fence->fence);
2157 static struct async_op *next_async_op(struct xe_vm *vm)
2159 return list_first_entry_or_null(&vm->async_ops.pending,
2160 struct async_op, link);
2163 static void vm_set_async_error(struct xe_vm *vm, int err)
2165 lockdep_assert_held(&vm->lock);
2166 vm->async_ops.error = err;
2169 static void async_op_work_func(struct work_struct *w)
2171 struct xe_vm *vm = container_of(w, struct xe_vm, async_ops.work);
2174 struct async_op *op;
2177 if (vm->async_ops.error && !xe_vm_is_closed(vm))
2180 spin_lock_irq(&vm->async_ops.lock);
2181 op = next_async_op(vm);
2183 list_del_init(&op->link);
2184 spin_unlock_irq(&vm->async_ops.lock);
2189 if (!xe_vm_is_closed(vm)) {
2192 down_write(&vm->lock);
2194 first = op->vma->first_munmap_rebind;
2195 last = op->vma->last_munmap_rebind;
2196 #ifdef TEST_VM_ASYNC_OPS_ERROR
2197 #define FORCE_ASYNC_OP_ERROR BIT(31)
2198 if (!(op->bind_op.op & FORCE_ASYNC_OP_ERROR)) {
2199 err = vm_bind_ioctl(vm, op->vma, op->engine,
2200 op->bo, &op->bind_op,
2201 op->syncs, op->num_syncs,
2205 op->bind_op.op &= ~FORCE_ASYNC_OP_ERROR;
2208 err = vm_bind_ioctl(vm, op->vma, op->engine, op->bo,
2209 &op->bind_op, op->syncs,
2210 op->num_syncs, op->fence);
2213 * In order for the fencing to work (stall behind
2214 * existing jobs / prevent new jobs from running) all
2215 * the dma-resv slots need to be programmed in a batch
2216 * relative to execs / the rebind worker. The vm->lock
2219 if (!err && ((first && VM_BIND_OP(op->bind_op.op) ==
2220 XE_VM_BIND_OP_UNMAP) ||
2221 vm->async_ops.munmap_rebind_inflight)) {
2223 op->vma->last_munmap_rebind = false;
2224 vm->async_ops.munmap_rebind_inflight =
2227 vm->async_ops.munmap_rebind_inflight =
2230 async_op_cleanup(vm, op);
2232 spin_lock_irq(&vm->async_ops.lock);
2233 op = next_async_op(vm);
2235 list_del_init(&op->link);
2236 spin_unlock_irq(&vm->async_ops.lock);
2242 trace_xe_vma_fail(op->vma);
2243 drm_warn(&vm->xe->drm, "Async VM op(%d) failed with %d",
2244 VM_BIND_OP(op->bind_op.op),
2247 spin_lock_irq(&vm->async_ops.lock);
2248 list_add(&op->link, &vm->async_ops.pending);
2249 spin_unlock_irq(&vm->async_ops.lock);
2251 vm_set_async_error(vm, err);
2252 up_write(&vm->lock);
2254 if (vm->async_ops.error_capture.addr)
2255 vm_error_capture(vm, err,
2261 up_write(&vm->lock);
2263 trace_xe_vma_flush(op->vma);
2265 if (is_unmap_op(op->bind_op.op)) {
2266 down_write(&vm->lock);
2267 xe_vma_destroy_unlocked(op->vma);
2268 up_write(&vm->lock);
2271 if (op->fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
2272 &op->fence->fence.flags)) {
2273 if (!xe_vm_no_dma_fences(vm)) {
2274 op->fence->started = true;
2276 wake_up_all(&op->fence->wq);
2278 dma_fence_signal(&op->fence->fence);
2282 async_op_cleanup(vm, op);
2286 static int __vm_bind_ioctl_async(struct xe_vm *vm, struct xe_vma *vma,
2287 struct xe_engine *e, struct xe_bo *bo,
2288 struct drm_xe_vm_bind_op *bind_op,
2289 struct xe_sync_entry *syncs, u32 num_syncs)
2291 struct async_op *op;
2292 bool installed = false;
2296 lockdep_assert_held(&vm->lock);
2298 op = kmalloc(sizeof(*op), GFP_KERNEL);
2304 op->fence = kmalloc(sizeof(*op->fence), GFP_KERNEL);
2310 seqno = e ? ++e->bind.fence_seqno : ++vm->async_ops.fence.seqno;
2311 dma_fence_init(&op->fence->fence, &async_op_fence_ops,
2312 &vm->async_ops.lock, e ? e->bind.fence_ctx :
2313 vm->async_ops.fence.context, seqno);
2315 if (!xe_vm_no_dma_fences(vm)) {
2317 op->fence->started = false;
2318 init_waitqueue_head(&op->fence->wq);
2326 op->bind_op = *bind_op;
2328 op->num_syncs = num_syncs;
2329 INIT_LIST_HEAD(&op->link);
2331 for (i = 0; i < num_syncs; i++)
2332 installed |= xe_sync_entry_signal(&syncs[i], NULL,
2335 if (!installed && op->fence)
2336 dma_fence_signal(&op->fence->fence);
2338 spin_lock_irq(&vm->async_ops.lock);
2339 list_add_tail(&op->link, &vm->async_ops.pending);
2340 spin_unlock_irq(&vm->async_ops.lock);
2342 if (!vm->async_ops.error)
2343 queue_work(system_unbound_wq, &vm->async_ops.work);
2348 static int vm_bind_ioctl_async(struct xe_vm *vm, struct xe_vma *vma,
2349 struct xe_engine *e, struct xe_bo *bo,
2350 struct drm_xe_vm_bind_op *bind_op,
2351 struct xe_sync_entry *syncs, u32 num_syncs)
2353 struct xe_vma *__vma, *next;
2354 struct list_head rebind_list;
2355 struct xe_sync_entry *in_syncs = NULL, *out_syncs = NULL;
2356 u32 num_in_syncs = 0, num_out_syncs = 0;
2357 bool first = true, last;
2361 lockdep_assert_held(&vm->lock);
2363 /* Not a linked list of unbinds + rebinds, easy */
2364 if (list_empty(&vma->unbind_link))
2365 return __vm_bind_ioctl_async(vm, vma, e, bo, bind_op,
2369 * Linked list of unbinds + rebinds, decompose syncs into 'in / out'
2370 * passing the 'in' to the first operation and 'out' to the last. Also
2371 * the reference counting is a little tricky, increment the VM / bind
2372 * engine ref count on all but the last operation and increment the BOs
2373 * ref count on each rebind.
2376 XE_BUG_ON(VM_BIND_OP(bind_op->op) != XE_VM_BIND_OP_UNMAP &&
2377 VM_BIND_OP(bind_op->op) != XE_VM_BIND_OP_UNMAP_ALL &&
2378 VM_BIND_OP(bind_op->op) != XE_VM_BIND_OP_PREFETCH);
2380 /* Decompose syncs */
2382 in_syncs = kmalloc(sizeof(*in_syncs) * num_syncs, GFP_KERNEL);
2383 out_syncs = kmalloc(sizeof(*out_syncs) * num_syncs, GFP_KERNEL);
2384 if (!in_syncs || !out_syncs) {
2389 for (i = 0; i < num_syncs; ++i) {
2390 bool signal = syncs[i].flags & DRM_XE_SYNC_SIGNAL;
2393 out_syncs[num_out_syncs++] = syncs[i];
2395 in_syncs[num_in_syncs++] = syncs[i];
2399 /* Do unbinds + move rebinds to new list */
2400 INIT_LIST_HEAD(&rebind_list);
2401 list_for_each_entry_safe(__vma, next, &vma->unbind_link, unbind_link) {
2402 if (__vma->destroyed ||
2403 VM_BIND_OP(bind_op->op) == XE_VM_BIND_OP_PREFETCH) {
2404 list_del_init(&__vma->unbind_link);
2406 err = __vm_bind_ioctl_async(xe_vm_get(vm), __vma,
2407 e ? xe_engine_get(e) : NULL,
2408 bo, bind_op, first ?
2410 first ? num_in_syncs : 0);
2421 list_move_tail(&__vma->unbind_link, &rebind_list);
2424 last = list_empty(&rebind_list);
2430 err = __vm_bind_ioctl_async(vm, vma, e,
2433 last ? out_syncs : NULL,
2434 first ? num_in_syncs :
2435 last ? num_out_syncs : 0);
2447 list_for_each_entry_safe(__vma, next, &rebind_list, unbind_link) {
2448 list_del_init(&__vma->unbind_link);
2449 last = list_empty(&rebind_list);
2451 if (xe_vma_is_userptr(__vma)) {
2452 bind_op->op = XE_VM_BIND_FLAG_ASYNC |
2453 XE_VM_BIND_OP_MAP_USERPTR;
2455 bind_op->op = XE_VM_BIND_FLAG_ASYNC |
2457 xe_bo_get(__vma->bo);
2466 err = __vm_bind_ioctl_async(vm, __vma, e,
2467 __vma->bo, bind_op, last ?
2469 last ? num_out_syncs : 0);
2491 static int __vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo,
2492 u64 addr, u64 range, u32 op)
2494 struct xe_device *xe = vm->xe;
2495 struct xe_vma *vma, lookup;
2496 bool async = !!(op & XE_VM_BIND_FLAG_ASYNC);
2498 lockdep_assert_held(&vm->lock);
2500 lookup.start = addr;
2501 lookup.end = addr + range - 1;
2503 switch (VM_BIND_OP(op)) {
2504 case XE_VM_BIND_OP_MAP:
2505 case XE_VM_BIND_OP_MAP_USERPTR:
2506 vma = xe_vm_find_overlapping_vma(vm, &lookup);
2507 if (XE_IOCTL_ERR(xe, vma))
2510 case XE_VM_BIND_OP_UNMAP:
2511 case XE_VM_BIND_OP_PREFETCH:
2512 vma = xe_vm_find_overlapping_vma(vm, &lookup);
2513 if (XE_IOCTL_ERR(xe, !vma) ||
2514 XE_IOCTL_ERR(xe, (vma->start != addr ||
2515 vma->end != addr + range - 1) && !async))
2518 case XE_VM_BIND_OP_UNMAP_ALL:
2521 XE_BUG_ON("NOT POSSIBLE");
2528 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma)
2530 down_read(&vm->userptr.notifier_lock);
2531 vma->destroyed = true;
2532 up_read(&vm->userptr.notifier_lock);
2533 xe_vm_remove_vma(vm, vma);
2536 static int prep_replacement_vma(struct xe_vm *vm, struct xe_vma *vma)
2540 if (vma->bo && !vma->bo->vm) {
2541 vm_insert_extobj(vm, vma);
2542 err = add_preempt_fences(vm, vma->bo);
2551 * Find all overlapping VMAs in lookup range and add to a list in the returned
2552 * VMA, all of VMAs found will be unbound. Also possibly add 2 new VMAs that
2553 * need to be bound if first / last VMAs are not fully unbound. This is akin to
2556 static struct xe_vma *vm_unbind_lookup_vmas(struct xe_vm *vm,
2557 struct xe_vma *lookup)
2559 struct xe_vma *vma = xe_vm_find_overlapping_vma(vm, lookup);
2560 struct rb_node *node;
2561 struct xe_vma *first = vma, *last = vma, *new_first = NULL,
2562 *new_last = NULL, *__vma, *next;
2564 bool first_munmap_rebind = false;
2566 lockdep_assert_held(&vm->lock);
2569 node = &vma->vm_node;
2570 while ((node = rb_next(node))) {
2571 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2572 __vma = to_xe_vma(node);
2573 list_add_tail(&__vma->unbind_link, &vma->unbind_link);
2580 node = &vma->vm_node;
2581 while ((node = rb_prev(node))) {
2582 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2583 __vma = to_xe_vma(node);
2584 list_add(&__vma->unbind_link, &vma->unbind_link);
2591 if (first->start != lookup->start) {
2592 struct ww_acquire_ctx ww;
2595 err = xe_bo_lock(first->bo, &ww, 0, true);
2598 new_first = xe_vma_create(first->vm, first->bo,
2599 first->bo ? first->bo_offset :
2603 (first->pte_flags & PTE_READ_ONLY),
2606 xe_bo_unlock(first->bo, &ww);
2612 err = xe_vma_userptr_pin_pages(new_first);
2616 err = prep_replacement_vma(vm, new_first);
2621 if (last->end != lookup->end) {
2622 struct ww_acquire_ctx ww;
2623 u64 chunk = lookup->end + 1 - last->start;
2626 err = xe_bo_lock(last->bo, &ww, 0, true);
2629 new_last = xe_vma_create(last->vm, last->bo,
2630 last->bo ? last->bo_offset + chunk :
2631 last->userptr.ptr + chunk,
2632 last->start + chunk,
2634 (last->pte_flags & PTE_READ_ONLY),
2637 xe_bo_unlock(last->bo, &ww);
2643 err = xe_vma_userptr_pin_pages(new_last);
2647 err = prep_replacement_vma(vm, new_last);
2652 prep_vma_destroy(vm, vma);
2653 if (list_empty(&vma->unbind_link) && (new_first || new_last))
2654 vma->first_munmap_rebind = true;
2655 list_for_each_entry(__vma, &vma->unbind_link, unbind_link) {
2656 if ((new_first || new_last) && !first_munmap_rebind) {
2657 __vma->first_munmap_rebind = true;
2658 first_munmap_rebind = true;
2660 prep_vma_destroy(vm, __vma);
2663 xe_vm_insert_vma(vm, new_first);
2664 list_add_tail(&new_first->unbind_link, &vma->unbind_link);
2666 new_first->last_munmap_rebind = true;
2669 xe_vm_insert_vma(vm, new_last);
2670 list_add_tail(&new_last->unbind_link, &vma->unbind_link);
2671 new_last->last_munmap_rebind = true;
2677 list_for_each_entry_safe(__vma, next, &vma->unbind_link, unbind_link)
2678 list_del_init(&__vma->unbind_link);
2680 prep_vma_destroy(vm, new_last);
2681 xe_vma_destroy_unlocked(new_last);
2684 prep_vma_destroy(vm, new_first);
2685 xe_vma_destroy_unlocked(new_first);
2688 return ERR_PTR(err);
2692 * Similar to vm_unbind_lookup_vmas, find all VMAs in lookup range to prefetch
2694 static struct xe_vma *vm_prefetch_lookup_vmas(struct xe_vm *vm,
2695 struct xe_vma *lookup,
2698 struct xe_vma *vma = xe_vm_find_overlapping_vma(vm, lookup), *__vma,
2700 struct rb_node *node;
2702 if (!xe_vma_is_userptr(vma)) {
2703 if (!xe_bo_can_migrate(vma->bo, region_to_mem_type[region]))
2704 return ERR_PTR(-EINVAL);
2707 node = &vma->vm_node;
2708 while ((node = rb_next(node))) {
2709 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2710 __vma = to_xe_vma(node);
2711 if (!xe_vma_is_userptr(__vma)) {
2712 if (!xe_bo_can_migrate(__vma->bo, region_to_mem_type[region]))
2715 list_add_tail(&__vma->unbind_link, &vma->unbind_link);
2721 node = &vma->vm_node;
2722 while ((node = rb_prev(node))) {
2723 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2724 __vma = to_xe_vma(node);
2725 if (!xe_vma_is_userptr(__vma)) {
2726 if (!xe_bo_can_migrate(__vma->bo, region_to_mem_type[region]))
2729 list_add(&__vma->unbind_link, &vma->unbind_link);
2738 list_for_each_entry_safe(__vma, next, &vma->unbind_link,
2740 list_del_init(&__vma->unbind_link);
2742 return ERR_PTR(-EINVAL);
2745 static struct xe_vma *vm_unbind_all_lookup_vmas(struct xe_vm *vm,
2748 struct xe_vma *first = NULL, *vma;
2750 lockdep_assert_held(&vm->lock);
2751 xe_bo_assert_held(bo);
2753 list_for_each_entry(vma, &bo->vmas, bo_link) {
2757 prep_vma_destroy(vm, vma);
2761 list_add_tail(&vma->unbind_link, &first->unbind_link);
2767 static struct xe_vma *vm_bind_ioctl_lookup_vma(struct xe_vm *vm,
2769 u64 bo_offset_or_userptr,
2770 u64 addr, u64 range, u32 op,
2771 u64 gt_mask, u32 region)
2773 struct ww_acquire_ctx ww;
2774 struct xe_vma *vma, lookup;
2777 lockdep_assert_held(&vm->lock);
2779 lookup.start = addr;
2780 lookup.end = addr + range - 1;
2782 switch (VM_BIND_OP(op)) {
2783 case XE_VM_BIND_OP_MAP:
2786 err = xe_bo_lock(bo, &ww, 0, true);
2788 return ERR_PTR(err);
2789 vma = xe_vma_create(vm, bo, bo_offset_or_userptr, addr,
2791 op & XE_VM_BIND_FLAG_READONLY,
2793 xe_bo_unlock(bo, &ww);
2795 return ERR_PTR(-ENOMEM);
2797 xe_vm_insert_vma(vm, vma);
2799 vm_insert_extobj(vm, vma);
2800 err = add_preempt_fences(vm, bo);
2802 prep_vma_destroy(vm, vma);
2803 xe_vma_destroy_unlocked(vma);
2805 return ERR_PTR(err);
2809 case XE_VM_BIND_OP_UNMAP:
2810 vma = vm_unbind_lookup_vmas(vm, &lookup);
2812 case XE_VM_BIND_OP_PREFETCH:
2813 vma = vm_prefetch_lookup_vmas(vm, &lookup, region);
2815 case XE_VM_BIND_OP_UNMAP_ALL:
2818 err = xe_bo_lock(bo, &ww, 0, true);
2820 return ERR_PTR(err);
2821 vma = vm_unbind_all_lookup_vmas(vm, bo);
2823 vma = ERR_PTR(-EINVAL);
2824 xe_bo_unlock(bo, &ww);
2826 case XE_VM_BIND_OP_MAP_USERPTR:
2829 vma = xe_vma_create(vm, NULL, bo_offset_or_userptr, addr,
2831 op & XE_VM_BIND_FLAG_READONLY,
2834 return ERR_PTR(-ENOMEM);
2836 err = xe_vma_userptr_pin_pages(vma);
2838 xe_vma_destroy(vma, NULL);
2840 return ERR_PTR(err);
2842 xe_vm_insert_vma(vm, vma);
2846 XE_BUG_ON("NOT POSSIBLE");
2847 vma = ERR_PTR(-EINVAL);
2853 #ifdef TEST_VM_ASYNC_OPS_ERROR
2854 #define SUPPORTED_FLAGS \
2855 (FORCE_ASYNC_OP_ERROR | XE_VM_BIND_FLAG_ASYNC | \
2856 XE_VM_BIND_FLAG_READONLY | XE_VM_BIND_FLAG_IMMEDIATE | 0xffff)
2858 #define SUPPORTED_FLAGS \
2859 (XE_VM_BIND_FLAG_ASYNC | XE_VM_BIND_FLAG_READONLY | \
2860 XE_VM_BIND_FLAG_IMMEDIATE | 0xffff)
2862 #define XE_64K_PAGE_MASK 0xffffull
2864 #define MAX_BINDS 512 /* FIXME: Picking random upper limit */
2866 static int vm_bind_ioctl_check_args(struct xe_device *xe,
2867 struct drm_xe_vm_bind *args,
2868 struct drm_xe_vm_bind_op **bind_ops,
2874 if (XE_IOCTL_ERR(xe, args->extensions) ||
2875 XE_IOCTL_ERR(xe, !args->num_binds) ||
2876 XE_IOCTL_ERR(xe, args->num_binds > MAX_BINDS))
2879 if (args->num_binds > 1) {
2880 u64 __user *bind_user =
2881 u64_to_user_ptr(args->vector_of_binds);
2883 *bind_ops = kmalloc(sizeof(struct drm_xe_vm_bind_op) *
2884 args->num_binds, GFP_KERNEL);
2888 err = __copy_from_user(*bind_ops, bind_user,
2889 sizeof(struct drm_xe_vm_bind_op) *
2891 if (XE_IOCTL_ERR(xe, err)) {
2896 *bind_ops = &args->bind;
2899 for (i = 0; i < args->num_binds; ++i) {
2900 u64 range = (*bind_ops)[i].range;
2901 u64 addr = (*bind_ops)[i].addr;
2902 u32 op = (*bind_ops)[i].op;
2903 u32 obj = (*bind_ops)[i].obj;
2904 u64 obj_offset = (*bind_ops)[i].obj_offset;
2905 u32 region = (*bind_ops)[i].region;
2908 *async = !!(op & XE_VM_BIND_FLAG_ASYNC);
2909 } else if (XE_IOCTL_ERR(xe, !*async) ||
2910 XE_IOCTL_ERR(xe, !(op & XE_VM_BIND_FLAG_ASYNC)) ||
2911 XE_IOCTL_ERR(xe, VM_BIND_OP(op) ==
2912 XE_VM_BIND_OP_RESTART)) {
2917 if (XE_IOCTL_ERR(xe, !*async &&
2918 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL)) {
2923 if (XE_IOCTL_ERR(xe, !*async &&
2924 VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH)) {
2929 if (XE_IOCTL_ERR(xe, VM_BIND_OP(op) >
2930 XE_VM_BIND_OP_PREFETCH) ||
2931 XE_IOCTL_ERR(xe, op & ~SUPPORTED_FLAGS) ||
2932 XE_IOCTL_ERR(xe, !obj &&
2933 VM_BIND_OP(op) == XE_VM_BIND_OP_MAP) ||
2934 XE_IOCTL_ERR(xe, !obj &&
2935 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
2936 XE_IOCTL_ERR(xe, addr &&
2937 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
2938 XE_IOCTL_ERR(xe, range &&
2939 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
2940 XE_IOCTL_ERR(xe, obj &&
2941 VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR) ||
2942 XE_IOCTL_ERR(xe, obj &&
2943 VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH) ||
2944 XE_IOCTL_ERR(xe, region &&
2945 VM_BIND_OP(op) != XE_VM_BIND_OP_PREFETCH) ||
2946 XE_IOCTL_ERR(xe, !(BIT(region) &
2947 xe->info.mem_region_mask)) ||
2948 XE_IOCTL_ERR(xe, obj &&
2949 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP)) {
2954 if (XE_IOCTL_ERR(xe, obj_offset & ~PAGE_MASK) ||
2955 XE_IOCTL_ERR(xe, addr & ~PAGE_MASK) ||
2956 XE_IOCTL_ERR(xe, range & ~PAGE_MASK) ||
2957 XE_IOCTL_ERR(xe, !range && VM_BIND_OP(op) !=
2958 XE_VM_BIND_OP_RESTART &&
2959 VM_BIND_OP(op) != XE_VM_BIND_OP_UNMAP_ALL)) {
2968 if (args->num_binds > 1)
2973 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2975 struct xe_device *xe = to_xe_device(dev);
2976 struct xe_file *xef = to_xe_file(file);
2977 struct drm_xe_vm_bind *args = data;
2978 struct drm_xe_sync __user *syncs_user;
2979 struct xe_bo **bos = NULL;
2980 struct xe_vma **vmas = NULL;
2982 struct xe_engine *e = NULL;
2984 struct xe_sync_entry *syncs = NULL;
2985 struct drm_xe_vm_bind_op *bind_ops;
2990 err = vm_bind_ioctl_check_args(xe, args, &bind_ops, &async);
2994 vm = xe_vm_lookup(xef, args->vm_id);
2995 if (XE_IOCTL_ERR(xe, !vm)) {
3000 if (XE_IOCTL_ERR(xe, xe_vm_is_closed(vm))) {
3001 DRM_ERROR("VM closed while we began looking up?\n");
3006 if (args->engine_id) {
3007 e = xe_engine_lookup(xef, args->engine_id);
3008 if (XE_IOCTL_ERR(xe, !e)) {
3012 if (XE_IOCTL_ERR(xe, !(e->flags & ENGINE_FLAG_VM))) {
3018 if (VM_BIND_OP(bind_ops[0].op) == XE_VM_BIND_OP_RESTART) {
3019 if (XE_IOCTL_ERR(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
3021 if (XE_IOCTL_ERR(xe, !err && args->num_syncs))
3023 if (XE_IOCTL_ERR(xe, !err && !vm->async_ops.error))
3027 down_write(&vm->lock);
3028 trace_xe_vm_restart(vm);
3029 vm_set_async_error(vm, 0);
3030 up_write(&vm->lock);
3032 queue_work(system_unbound_wq, &vm->async_ops.work);
3034 /* Rebinds may have been blocked, give worker a kick */
3035 if (xe_vm_in_compute_mode(vm))
3036 queue_work(vm->xe->ordered_wq,
3037 &vm->preempt.rebind_work);
3043 if (XE_IOCTL_ERR(xe, !vm->async_ops.error &&
3044 async != !!(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS))) {
3049 for (i = 0; i < args->num_binds; ++i) {
3050 u64 range = bind_ops[i].range;
3051 u64 addr = bind_ops[i].addr;
3053 if (XE_IOCTL_ERR(xe, range > vm->size) ||
3054 XE_IOCTL_ERR(xe, addr > vm->size - range)) {
3059 if (bind_ops[i].gt_mask) {
3060 u64 valid_gts = BIT(xe->info.tile_count) - 1;
3062 if (XE_IOCTL_ERR(xe, bind_ops[i].gt_mask &
3070 bos = kzalloc(sizeof(*bos) * args->num_binds, GFP_KERNEL);
3076 vmas = kzalloc(sizeof(*vmas) * args->num_binds, GFP_KERNEL);
3082 for (i = 0; i < args->num_binds; ++i) {
3083 struct drm_gem_object *gem_obj;
3084 u64 range = bind_ops[i].range;
3085 u64 addr = bind_ops[i].addr;
3086 u32 obj = bind_ops[i].obj;
3087 u64 obj_offset = bind_ops[i].obj_offset;
3092 gem_obj = drm_gem_object_lookup(file, obj);
3093 if (XE_IOCTL_ERR(xe, !gem_obj)) {
3097 bos[i] = gem_to_xe_bo(gem_obj);
3099 if (XE_IOCTL_ERR(xe, range > bos[i]->size) ||
3100 XE_IOCTL_ERR(xe, obj_offset >
3101 bos[i]->size - range)) {
3106 if (bos[i]->flags & XE_BO_INTERNAL_64K) {
3107 if (XE_IOCTL_ERR(xe, obj_offset &
3108 XE_64K_PAGE_MASK) ||
3109 XE_IOCTL_ERR(xe, addr & XE_64K_PAGE_MASK) ||
3110 XE_IOCTL_ERR(xe, range & XE_64K_PAGE_MASK)) {
3117 if (args->num_syncs) {
3118 syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3125 syncs_user = u64_to_user_ptr(args->syncs);
3126 for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3127 err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3128 &syncs_user[num_syncs], false,
3129 xe_vm_no_dma_fences(vm));
3134 err = down_write_killable(&vm->lock);
3138 /* Do some error checking first to make the unwind easier */
3139 for (i = 0; i < args->num_binds; ++i) {
3140 u64 range = bind_ops[i].range;
3141 u64 addr = bind_ops[i].addr;
3142 u32 op = bind_ops[i].op;
3144 err = __vm_bind_ioctl_lookup_vma(vm, bos[i], addr, range, op);
3146 goto release_vm_lock;
3149 for (i = 0; i < args->num_binds; ++i) {
3150 u64 range = bind_ops[i].range;
3151 u64 addr = bind_ops[i].addr;
3152 u32 op = bind_ops[i].op;
3153 u64 obj_offset = bind_ops[i].obj_offset;
3154 u64 gt_mask = bind_ops[i].gt_mask;
3155 u32 region = bind_ops[i].region;
3157 vmas[i] = vm_bind_ioctl_lookup_vma(vm, bos[i], obj_offset,
3158 addr, range, op, gt_mask,
3160 if (IS_ERR(vmas[i])) {
3161 err = PTR_ERR(vmas[i]);
3167 for (j = 0; j < args->num_binds; ++j) {
3168 struct xe_sync_entry *__syncs;
3169 u32 __num_syncs = 0;
3170 bool first_or_last = j == 0 || j == args->num_binds - 1;
3172 if (args->num_binds == 1) {
3173 __num_syncs = num_syncs;
3175 } else if (first_or_last && num_syncs) {
3176 bool first = j == 0;
3178 __syncs = kmalloc(sizeof(*__syncs) * num_syncs,
3185 /* in-syncs on first bind, out-syncs on last bind */
3186 for (i = 0; i < num_syncs; ++i) {
3187 bool signal = syncs[i].flags &
3190 if ((first && !signal) || (!first && signal))
3191 __syncs[__num_syncs++] = syncs[i];
3199 bool last = j == args->num_binds - 1;
3202 * Each pass of async worker drops the ref, take a ref
3203 * here, 1 set of refs taken above
3211 err = vm_bind_ioctl_async(vm, vmas[j], e, bos[j],
3212 bind_ops + j, __syncs,
3222 XE_BUG_ON(j != 0); /* Not supported */
3223 err = vm_bind_ioctl(vm, vmas[j], e, bos[j],
3224 bind_ops + j, __syncs,
3226 break; /* Needed so cleanup loops work */
3230 /* Most of cleanup owned by the async bind worker */
3231 if (async && !err) {
3232 up_write(&vm->lock);
3233 if (args->num_binds > 1)
3239 for (i = j; err && i < args->num_binds; ++i) {
3240 u32 op = bind_ops[i].op;
3241 struct xe_vma *vma, *next;
3246 list_for_each_entry_safe(vma, next, &vma->unbind_link,
3248 list_del_init(&vma->unbind_link);
3249 if (!vma->destroyed) {
3250 prep_vma_destroy(vm, vma);
3251 xe_vma_destroy_unlocked(vma);
3255 switch (VM_BIND_OP(op)) {
3256 case XE_VM_BIND_OP_MAP:
3257 prep_vma_destroy(vm, vmas[i]);
3258 xe_vma_destroy_unlocked(vmas[i]);
3260 case XE_VM_BIND_OP_MAP_USERPTR:
3261 prep_vma_destroy(vm, vmas[i]);
3262 xe_vma_destroy_unlocked(vmas[i]);
3267 up_write(&vm->lock);
3269 while (num_syncs--) {
3271 !(syncs[num_syncs].flags & DRM_XE_SYNC_SIGNAL))
3272 continue; /* Still in async worker */
3273 xe_sync_entry_cleanup(&syncs[num_syncs]);
3278 for (i = j; i < args->num_binds; ++i)
3288 if (args->num_binds > 1)
3294 * XXX: Using the TTM wrappers for now, likely can call into dma-resv code
3295 * directly to optimize. Also this likely should be an inline function.
3297 int xe_vm_lock(struct xe_vm *vm, struct ww_acquire_ctx *ww,
3298 int num_resv, bool intr)
3300 struct ttm_validate_buffer tv_vm;
3306 tv_vm.num_shared = num_resv;
3307 tv_vm.bo = xe_vm_ttm_bo(vm);;
3308 list_add_tail(&tv_vm.head, &objs);
3310 return ttm_eu_reserve_buffers(ww, &objs, intr, &dups);
3313 void xe_vm_unlock(struct xe_vm *vm, struct ww_acquire_ctx *ww)
3315 dma_resv_unlock(&vm->resv);
3316 ww_acquire_fini(ww);
3320 * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3321 * @vma: VMA to invalidate
3323 * Walks a list of page tables leaves which it memset the entries owned by this
3324 * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3327 * Returns 0 for success, negative error code otherwise.
3329 int xe_vm_invalidate_vma(struct xe_vma *vma)
3331 struct xe_device *xe = vma->vm->xe;
3333 u32 gt_needs_invalidate = 0;
3334 int seqno[XE_MAX_GT];
3338 XE_BUG_ON(!xe_vm_in_fault_mode(vma->vm));
3339 trace_xe_vma_usm_invalidate(vma);
3341 /* Check that we don't race with page-table updates */
3342 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3343 if (xe_vma_is_userptr(vma)) {
3344 WARN_ON_ONCE(!mmu_interval_check_retry
3345 (&vma->userptr.notifier,
3346 vma->userptr.notifier_seq));
3347 WARN_ON_ONCE(!dma_resv_test_signaled(&vma->vm->resv,
3348 DMA_RESV_USAGE_BOOKKEEP));
3351 xe_bo_assert_held(vma->bo);
3355 for_each_gt(gt, xe, id) {
3356 if (xe_pt_zap_ptes(gt, vma)) {
3357 gt_needs_invalidate |= BIT(id);
3359 seqno[id] = xe_gt_tlb_invalidation_vma(gt, NULL, vma);
3365 for_each_gt(gt, xe, id) {
3366 if (gt_needs_invalidate & BIT(id)) {
3367 ret = xe_gt_tlb_invalidation_wait(gt, seqno[id]);
3373 vma->usm.gt_invalidated = vma->gt_mask;
3378 #if IS_ENABLED(CONFIG_DRM_XE_SIMPLE_ERROR_CAPTURE)
3379 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
3381 struct rb_node *node;
3385 if (!down_read_trylock(&vm->lock)) {
3386 drm_printf(p, " Failed to acquire VM lock to dump capture");
3389 if (vm->pt_root[gt_id]) {
3390 addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, GEN8_PAGE_SIZE, &is_lmem);
3391 drm_printf(p, " VM root: A:0x%llx %s\n", addr, is_lmem ? "LMEM" : "SYS");
3394 for (node = rb_first(&vm->vmas); node; node = rb_next(node)) {
3395 struct xe_vma *vma = to_xe_vma(node);
3396 bool is_userptr = xe_vma_is_userptr(vma);
3399 struct xe_res_cursor cur;
3401 xe_res_first_sg(vma->userptr.sg, 0, GEN8_PAGE_SIZE, &cur);
3402 addr = xe_res_dma(&cur);
3404 addr = xe_bo_addr(vma->bo, 0, GEN8_PAGE_SIZE, &is_lmem);
3406 drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
3407 vma->start, vma->end, vma->end - vma->start + 1ull,
3408 addr, is_userptr ? "USR" : is_lmem ? "VRAM" : "SYS");
3415 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)