1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021 Intel Corporation
8 #include <linux/dma-fence-array.h>
10 #include <drm/drm_print.h>
11 #include <drm/ttm/ttm_execbuf_util.h>
12 #include <drm/ttm/ttm_tt.h>
13 #include <drm/xe_drm.h>
14 #include <linux/delay.h>
15 #include <linux/kthread.h>
17 #include <linux/swap.h>
20 #include "xe_device.h"
21 #include "xe_engine.h"
23 #include "xe_gt_pagefault.h"
24 #include "xe_gt_tlb_invalidation.h"
25 #include "xe_migrate.h"
27 #include "xe_preempt_fence.h"
29 #include "xe_res_cursor.h"
33 #define TEST_VM_ASYNC_OPS_ERROR
36 * xe_vma_userptr_check_repin() - Advisory check for repin needed
37 * @vma: The userptr vma
39 * Check if the userptr vma has been invalidated since last successful
40 * repin. The check is advisory only and can the function can be called
41 * without the vm->userptr.notifier_lock held. There is no guarantee that the
42 * vma userptr will remain valid after a lockless check, so typically
43 * the call needs to be followed by a proper check under the notifier_lock.
45 * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
47 int xe_vma_userptr_check_repin(struct xe_vma *vma)
49 return mmu_interval_check_retry(&vma->userptr.notifier,
50 vma->userptr.notifier_seq) ?
54 int xe_vma_userptr_pin_pages(struct xe_vma *vma)
56 struct xe_vm *vm = xe_vma_vm(vma);
57 struct xe_device *xe = vm->xe;
58 const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT;
60 bool in_kthread = !current->mm;
61 unsigned long notifier_seq;
63 bool read_only = xe_vma_read_only(vma);
65 lockdep_assert_held(&vm->lock);
66 XE_BUG_ON(!xe_vma_is_userptr(vma));
68 if (vma->gpuva.flags & XE_VMA_DESTROYED)
71 notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier);
72 if (notifier_seq == vma->userptr.notifier_seq)
75 pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
79 if (vma->userptr.sg) {
80 dma_unmap_sgtable(xe->drm.dev,
82 read_only ? DMA_TO_DEVICE :
83 DMA_BIDIRECTIONAL, 0);
84 sg_free_table(vma->userptr.sg);
85 vma->userptr.sg = NULL;
90 if (!mmget_not_zero(vma->userptr.notifier.mm)) {
94 kthread_use_mm(vma->userptr.notifier.mm);
97 while (pinned < num_pages) {
98 ret = get_user_pages_fast(xe_vma_userptr(vma) +
101 read_only ? 0 : FOLL_WRITE,
114 kthread_unuse_mm(vma->userptr.notifier.mm);
115 mmput(vma->userptr.notifier.mm);
121 ret = sg_alloc_table_from_pages_segment(&vma->userptr.sgt, pages,
123 (u64)pinned << PAGE_SHIFT,
124 xe_sg_segment_size(xe->drm.dev),
127 vma->userptr.sg = NULL;
130 vma->userptr.sg = &vma->userptr.sgt;
132 ret = dma_map_sgtable(xe->drm.dev, vma->userptr.sg,
133 read_only ? DMA_TO_DEVICE :
135 DMA_ATTR_SKIP_CPU_SYNC |
136 DMA_ATTR_NO_KERNEL_MAPPING);
138 sg_free_table(vma->userptr.sg);
139 vma->userptr.sg = NULL;
143 for (i = 0; i < pinned; ++i) {
146 set_page_dirty(pages[i]);
147 unlock_page(pages[i]);
150 mark_page_accessed(pages[i]);
154 release_pages(pages, pinned);
158 vma->userptr.notifier_seq = notifier_seq;
159 if (xe_vma_userptr_check_repin(vma) == -EAGAIN)
163 return ret < 0 ? ret : 0;
166 static bool preempt_fences_waiting(struct xe_vm *vm)
170 lockdep_assert_held(&vm->lock);
171 xe_vm_assert_held(vm);
173 list_for_each_entry(e, &vm->preempt.engines, compute.link) {
174 if (!e->compute.pfence || (e->compute.pfence &&
175 test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
176 &e->compute.pfence->flags))) {
184 static void free_preempt_fences(struct list_head *list)
186 struct list_head *link, *next;
188 list_for_each_safe(link, next, list)
189 xe_preempt_fence_free(to_preempt_fence_from_link(link));
192 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
195 lockdep_assert_held(&vm->lock);
196 xe_vm_assert_held(vm);
198 if (*count >= vm->preempt.num_engines)
201 for (; *count < vm->preempt.num_engines; ++(*count)) {
202 struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
205 return PTR_ERR(pfence);
207 list_move_tail(xe_preempt_fence_link(pfence), list);
213 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
217 xe_vm_assert_held(vm);
219 list_for_each_entry(e, &vm->preempt.engines, compute.link) {
220 if (e->compute.pfence) {
221 long timeout = dma_fence_wait(e->compute.pfence, false);
225 dma_fence_put(e->compute.pfence);
226 e->compute.pfence = NULL;
233 static bool xe_vm_is_idle(struct xe_vm *vm)
237 xe_vm_assert_held(vm);
238 list_for_each_entry(e, &vm->preempt.engines, compute.link) {
239 if (!xe_engine_is_idle(e))
246 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
248 struct list_head *link;
251 list_for_each_entry(e, &vm->preempt.engines, compute.link) {
252 struct dma_fence *fence;
255 XE_BUG_ON(link == list);
257 fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
258 e, e->compute.context,
260 dma_fence_put(e->compute.pfence);
261 e->compute.pfence = fence;
265 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
268 struct ww_acquire_ctx ww;
271 err = xe_bo_lock(bo, &ww, vm->preempt.num_engines, true);
275 list_for_each_entry(e, &vm->preempt.engines, compute.link)
276 if (e->compute.pfence) {
277 dma_resv_add_fence(bo->ttm.base.resv,
279 DMA_RESV_USAGE_BOOKKEEP);
282 xe_bo_unlock(bo, &ww);
287 * xe_vm_fence_all_extobjs() - Add a fence to vm's external objects' resv
289 * @fence: The fence to add.
290 * @usage: The resv usage for the fence.
292 * Loops over all of the vm's external object bindings and adds a @fence
293 * with the given @usage to all of the external object's reservation
296 void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
297 enum dma_resv_usage usage)
301 list_for_each_entry(vma, &vm->extobj.list, extobj.link)
302 dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence, usage);
305 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm)
309 lockdep_assert_held(&vm->lock);
310 xe_vm_assert_held(vm);
312 list_for_each_entry(e, &vm->preempt.engines, compute.link) {
315 dma_resv_add_fence(xe_vm_resv(vm), e->compute.pfence,
316 DMA_RESV_USAGE_BOOKKEEP);
317 xe_vm_fence_all_extobjs(vm, e->compute.pfence,
318 DMA_RESV_USAGE_BOOKKEEP);
322 int xe_vm_add_compute_engine(struct xe_vm *vm, struct xe_engine *e)
324 struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
325 struct ttm_validate_buffer *tv;
326 struct ww_acquire_ctx ww;
327 struct list_head objs;
328 struct dma_fence *pfence;
332 XE_BUG_ON(!xe_vm_in_compute_mode(vm));
334 down_write(&vm->lock);
336 err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs, true, 1);
338 goto out_unlock_outer;
340 pfence = xe_preempt_fence_create(e, e->compute.context,
347 list_add(&e->compute.link, &vm->preempt.engines);
348 ++vm->preempt.num_engines;
349 e->compute.pfence = pfence;
351 down_read(&vm->userptr.notifier_lock);
353 dma_resv_add_fence(xe_vm_resv(vm), pfence,
354 DMA_RESV_USAGE_BOOKKEEP);
356 xe_vm_fence_all_extobjs(vm, pfence, DMA_RESV_USAGE_BOOKKEEP);
359 * Check to see if a preemption on VM is in flight or userptr
360 * invalidation, if so trigger this preempt fence to sync state with
361 * other preempt fences on the VM.
363 wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
365 dma_fence_enable_sw_signaling(pfence);
367 up_read(&vm->userptr.notifier_lock);
370 xe_vm_unlock_dma_resv(vm, tv_onstack, tv, &ww, &objs);
378 * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
379 * that need repinning.
382 * This function checks for whether the VM has userptrs that need repinning,
383 * and provides a release-type barrier on the userptr.notifier_lock after
386 * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
388 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
390 lockdep_assert_held_read(&vm->userptr.notifier_lock);
392 return (list_empty(&vm->userptr.repin_list) &&
393 list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
397 * xe_vm_lock_dma_resv() - Lock the vm dma_resv object and the dma_resv
398 * objects of the vm's external buffer objects.
400 * @ww: Pointer to a struct ww_acquire_ctx locking context.
401 * @tv_onstack: Array size XE_ONSTACK_TV of storage for the struct
402 * ttm_validate_buffers used for locking.
403 * @tv: Pointer to a pointer that on output contains the actual storage used.
404 * @objs: List head for the buffer objects locked.
405 * @intr: Whether to lock interruptible.
406 * @num_shared: Number of dma-fence slots to reserve in the locked objects.
408 * Locks the vm dma-resv objects and all the dma-resv objects of the
409 * buffer objects on the vm external object list. The TTM utilities require
410 * a list of struct ttm_validate_buffers pointing to the actual buffer
411 * objects to lock. Storage for those struct ttm_validate_buffers should
412 * be provided in @tv_onstack, and is typically reserved on the stack
413 * of the caller. If the size of @tv_onstack isn't sufficient, then
414 * storage will be allocated internally using kvmalloc().
416 * The function performs deadlock handling internally, and after a
417 * successful return the ww locking transaction should be considered
420 * Return: 0 on success, Negative error code on error. In particular if
421 * @intr is set to true, -EINTR or -ERESTARTSYS may be returned. In case
422 * of error, any locking performed has been reverted.
424 int xe_vm_lock_dma_resv(struct xe_vm *vm, struct ww_acquire_ctx *ww,
425 struct ttm_validate_buffer *tv_onstack,
426 struct ttm_validate_buffer **tv,
427 struct list_head *objs,
429 unsigned int num_shared)
431 struct ttm_validate_buffer *tv_vm, *tv_bo;
432 struct xe_vma *vma, *next;
436 lockdep_assert_held(&vm->lock);
438 if (vm->extobj.entries < XE_ONSTACK_TV) {
441 tv_vm = kvmalloc_array(vm->extobj.entries + 1, sizeof(*tv_vm),
448 INIT_LIST_HEAD(objs);
449 list_for_each_entry(vma, &vm->extobj.list, extobj.link) {
450 tv_bo->num_shared = num_shared;
451 tv_bo->bo = &xe_vma_bo(vma)->ttm;
453 list_add_tail(&tv_bo->head, objs);
456 tv_vm->num_shared = num_shared;
457 tv_vm->bo = xe_vm_ttm_bo(vm);
458 list_add_tail(&tv_vm->head, objs);
459 err = ttm_eu_reserve_buffers(ww, objs, intr, &dups);
463 spin_lock(&vm->notifier.list_lock);
464 list_for_each_entry_safe(vma, next, &vm->notifier.rebind_list,
465 notifier.rebind_link) {
466 xe_bo_assert_held(xe_vma_bo(vma));
468 list_del_init(&vma->notifier.rebind_link);
469 if (vma->tile_present && !(vma->gpuva.flags & XE_VMA_DESTROYED))
470 list_move_tail(&vma->combined_links.rebind,
473 spin_unlock(&vm->notifier.list_lock);
479 if (tv_vm != tv_onstack)
486 * xe_vm_unlock_dma_resv() - Unlock reservation objects locked by
487 * xe_vm_lock_dma_resv()
489 * @tv_onstack: The @tv_onstack array given to xe_vm_lock_dma_resv().
490 * @tv: The value of *@tv given by xe_vm_lock_dma_resv().
491 * @ww: The ww_acquire_context used for locking.
492 * @objs: The list returned from xe_vm_lock_dma_resv().
494 * Unlocks the reservation objects and frees any memory allocated by
495 * xe_vm_lock_dma_resv().
497 void xe_vm_unlock_dma_resv(struct xe_vm *vm,
498 struct ttm_validate_buffer *tv_onstack,
499 struct ttm_validate_buffer *tv,
500 struct ww_acquire_ctx *ww,
501 struct list_head *objs)
504 * Nothing should've been able to enter the list while we were locked,
505 * since we've held the dma-resvs of all the vm's external objects,
506 * and holding the dma_resv of an object is required for list
507 * addition, and we shouldn't add ourselves.
509 XE_WARN_ON(!list_empty(&vm->notifier.rebind_list));
511 ttm_eu_backoff_reservation(ww, objs);
512 if (tv && tv != tv_onstack)
516 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
518 static void xe_vm_kill(struct xe_vm *vm)
520 struct ww_acquire_ctx ww;
523 lockdep_assert_held(&vm->lock);
525 xe_vm_lock(vm, &ww, 0, false);
526 vm->flags |= XE_VM_FLAG_BANNED;
527 trace_xe_vm_kill(vm);
529 list_for_each_entry(e, &vm->preempt.engines, compute.link)
531 xe_vm_unlock(vm, &ww);
533 /* TODO: Inform user the VM is banned */
536 static void preempt_rebind_work_func(struct work_struct *w)
538 struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
540 struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
541 struct ttm_validate_buffer *tv;
542 struct ww_acquire_ctx ww;
543 struct list_head objs;
544 struct dma_fence *rebind_fence;
545 unsigned int fence_count = 0;
546 LIST_HEAD(preempt_fences);
550 int __maybe_unused tries = 0;
552 XE_BUG_ON(!xe_vm_in_compute_mode(vm));
553 trace_xe_vm_rebind_worker_enter(vm);
555 down_write(&vm->lock);
557 if (xe_vm_is_closed_or_banned(vm)) {
559 trace_xe_vm_rebind_worker_exit(vm);
564 if (vm->async_ops.error)
565 goto out_unlock_outer;
568 * Extreme corner where we exit a VM error state with a munmap style VM
569 * unbind inflight which requires a rebind. In this case the rebind
570 * needs to install some fences into the dma-resv slots. The worker to
571 * do this queued, let that worker make progress by dropping vm->lock
572 * and trying this again.
574 if (vm->async_ops.munmap_rebind_inflight) {
576 flush_work(&vm->async_ops.work);
580 if (xe_vm_userptr_check_repin(vm)) {
581 err = xe_vm_userptr_pin(vm);
583 goto out_unlock_outer;
586 err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs,
587 false, vm->preempt.num_engines);
589 goto out_unlock_outer;
591 if (xe_vm_is_idle(vm)) {
592 vm->preempt.rebind_deactivated = true;
596 /* Fresh preempt fences already installed. Everyting is running. */
597 if (!preempt_fences_waiting(vm))
601 * This makes sure vm is completely suspended and also balances
602 * xe_engine suspend- and resume; we resume *all* vm engines below.
604 err = wait_for_existing_preempt_fences(vm);
608 err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
612 list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
613 if (xe_vma_has_no_bo(vma) ||
614 vma->gpuva.flags & XE_VMA_DESTROYED)
617 err = xe_bo_validate(xe_vma_bo(vma), vm, false);
622 rebind_fence = xe_vm_rebind(vm, true);
623 if (IS_ERR(rebind_fence)) {
624 err = PTR_ERR(rebind_fence);
629 dma_fence_wait(rebind_fence, false);
630 dma_fence_put(rebind_fence);
633 /* Wait on munmap style VM unbinds */
634 wait = dma_resv_wait_timeout(xe_vm_resv(vm),
635 DMA_RESV_USAGE_KERNEL,
636 false, MAX_SCHEDULE_TIMEOUT);
642 #define retry_required(__tries, __vm) \
643 (IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
644 (!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
645 __xe_vm_userptr_needs_repin(__vm))
647 down_read(&vm->userptr.notifier_lock);
648 if (retry_required(tries, vm)) {
649 up_read(&vm->userptr.notifier_lock);
654 #undef retry_required
656 spin_lock(&vm->xe->ttm.lru_lock);
657 ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
658 spin_unlock(&vm->xe->ttm.lru_lock);
660 /* Point of no return. */
661 arm_preempt_fences(vm, &preempt_fences);
662 resume_and_reinstall_preempt_fences(vm);
663 up_read(&vm->userptr.notifier_lock);
666 xe_vm_unlock_dma_resv(vm, tv_onstack, tv, &ww, &objs);
668 if (err == -EAGAIN) {
669 trace_xe_vm_rebind_worker_retry(vm);
674 * With multiple active VMs, under memory pressure, it is possible that
675 * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
676 * Until ttm properly handles locking in such scenarios, best thing the
677 * driver can do is retry with a timeout. Killing the VM or putting it
678 * in error state after timeout or other error scenarios is still TBD.
680 if (err == -ENOMEM) {
681 ktime_t cur = ktime_get();
683 end = end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
684 if (ktime_before(cur, end)) {
686 trace_xe_vm_rebind_worker_retry(vm);
691 drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
696 free_preempt_fences(&preempt_fences);
698 trace_xe_vm_rebind_worker_exit(vm);
701 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
702 const struct mmu_notifier_range *range,
703 unsigned long cur_seq)
705 struct xe_vma *vma = container_of(mni, struct xe_vma, userptr.notifier);
706 struct xe_vm *vm = xe_vma_vm(vma);
707 struct dma_resv_iter cursor;
708 struct dma_fence *fence;
711 XE_BUG_ON(!xe_vma_is_userptr(vma));
712 trace_xe_vma_userptr_invalidate(vma);
714 if (!mmu_notifier_range_blockable(range))
717 down_write(&vm->userptr.notifier_lock);
718 mmu_interval_set_seq(mni, cur_seq);
720 /* No need to stop gpu access if the userptr is not yet bound. */
721 if (!vma->userptr.initial_bind) {
722 up_write(&vm->userptr.notifier_lock);
727 * Tell exec and rebind worker they need to repin and rebind this
730 if (!xe_vm_in_fault_mode(vm) &&
731 !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
732 spin_lock(&vm->userptr.invalidated_lock);
733 list_move_tail(&vma->userptr.invalidate_link,
734 &vm->userptr.invalidated);
735 spin_unlock(&vm->userptr.invalidated_lock);
738 up_write(&vm->userptr.notifier_lock);
741 * Preempt fences turn into schedule disables, pipeline these.
742 * Note that even in fault mode, we need to wait for binds and
743 * unbinds to complete, and those are attached as BOOKMARK fences
746 dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
747 DMA_RESV_USAGE_BOOKKEEP);
748 dma_resv_for_each_fence_unlocked(&cursor, fence)
749 dma_fence_enable_sw_signaling(fence);
750 dma_resv_iter_end(&cursor);
752 err = dma_resv_wait_timeout(xe_vm_resv(vm),
753 DMA_RESV_USAGE_BOOKKEEP,
754 false, MAX_SCHEDULE_TIMEOUT);
755 XE_WARN_ON(err <= 0);
757 if (xe_vm_in_fault_mode(vm)) {
758 err = xe_vm_invalidate_vma(vma);
762 trace_xe_vma_userptr_invalidate_complete(vma);
767 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
768 .invalidate = vma_userptr_invalidate,
771 int xe_vm_userptr_pin(struct xe_vm *vm)
773 struct xe_vma *vma, *next;
775 LIST_HEAD(tmp_evict);
777 lockdep_assert_held_write(&vm->lock);
779 /* Collect invalidated userptrs */
780 spin_lock(&vm->userptr.invalidated_lock);
781 list_for_each_entry_safe(vma, next, &vm->userptr.invalidated,
782 userptr.invalidate_link) {
783 list_del_init(&vma->userptr.invalidate_link);
784 if (list_empty(&vma->combined_links.userptr))
785 list_move_tail(&vma->combined_links.userptr,
786 &vm->userptr.repin_list);
788 spin_unlock(&vm->userptr.invalidated_lock);
790 /* Pin and move to temporary list */
791 list_for_each_entry_safe(vma, next, &vm->userptr.repin_list,
792 combined_links.userptr) {
793 err = xe_vma_userptr_pin_pages(vma);
797 list_move_tail(&vma->combined_links.userptr, &tmp_evict);
800 /* Take lock and move to rebind_list for rebinding. */
801 err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
805 list_for_each_entry_safe(vma, next, &tmp_evict, combined_links.userptr)
806 list_move_tail(&vma->combined_links.rebind, &vm->rebind_list);
808 dma_resv_unlock(xe_vm_resv(vm));
813 list_splice_tail(&tmp_evict, &vm->userptr.repin_list);
819 * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
820 * that need repinning.
823 * This function does an advisory check for whether the VM has userptrs that
826 * Return: 0 if there are no indications of userptrs needing repinning,
827 * -EAGAIN if there are.
829 int xe_vm_userptr_check_repin(struct xe_vm *vm)
831 return (list_empty_careful(&vm->userptr.repin_list) &&
832 list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
835 static struct dma_fence *
836 xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
837 struct xe_sync_entry *syncs, u32 num_syncs,
838 bool first_op, bool last_op);
840 struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
842 struct dma_fence *fence = NULL;
843 struct xe_vma *vma, *next;
845 lockdep_assert_held(&vm->lock);
846 if (xe_vm_no_dma_fences(vm) && !rebind_worker)
849 xe_vm_assert_held(vm);
850 list_for_each_entry_safe(vma, next, &vm->rebind_list,
851 combined_links.rebind) {
852 XE_WARN_ON(!vma->tile_present);
854 list_del_init(&vma->combined_links.rebind);
855 dma_fence_put(fence);
857 trace_xe_vma_rebind_worker(vma);
859 trace_xe_vma_rebind_exec(vma);
860 fence = xe_vm_bind_vma(vma, NULL, NULL, 0, false, false);
868 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
870 u64 bo_offset_or_userptr,
877 struct xe_tile *tile;
880 XE_BUG_ON(start >= end);
881 XE_BUG_ON(end >= vm->size);
883 if (!bo && !is_null) /* userptr */
884 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
886 vma = kzalloc(sizeof(*vma) - sizeof(struct xe_userptr),
889 vma = ERR_PTR(-ENOMEM);
893 INIT_LIST_HEAD(&vma->combined_links.rebind);
894 INIT_LIST_HEAD(&vma->notifier.rebind_link);
895 INIT_LIST_HEAD(&vma->extobj.link);
897 INIT_LIST_HEAD(&vma->gpuva.gem.entry);
898 vma->gpuva.vm = &vm->gpuvm;
899 vma->gpuva.va.addr = start;
900 vma->gpuva.va.range = end - start + 1;
902 vma->gpuva.flags |= XE_VMA_READ_ONLY;
904 vma->gpuva.flags |= DRM_GPUVA_SPARSE;
907 vma->tile_mask = tile_mask;
909 for_each_tile(tile, vm->xe, id)
910 vma->tile_mask |= 0x1 << id;
913 if (vm->xe->info.platform == XE_PVC)
914 vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
917 struct drm_gpuvm_bo *vm_bo;
919 xe_bo_assert_held(bo);
921 vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
924 return ERR_CAST(vm_bo);
927 drm_gem_object_get(&bo->ttm.base);
928 vma->gpuva.gem.obj = &bo->ttm.base;
929 vma->gpuva.gem.offset = bo_offset_or_userptr;
930 drm_gpuva_link(&vma->gpuva, vm_bo);
931 drm_gpuvm_bo_put(vm_bo);
932 } else /* userptr or null */ {
934 u64 size = end - start + 1;
937 INIT_LIST_HEAD(&vma->userptr.invalidate_link);
938 vma->gpuva.gem.offset = bo_offset_or_userptr;
940 err = mmu_interval_notifier_insert(&vma->userptr.notifier,
942 xe_vma_userptr(vma), size,
943 &vma_userptr_notifier_ops);
950 vma->userptr.notifier_seq = LONG_MAX;
959 static bool vm_remove_extobj(struct xe_vma *vma)
961 if (!list_empty(&vma->extobj.link)) {
962 xe_vma_vm(vma)->extobj.entries--;
963 list_del_init(&vma->extobj.link);
969 static void xe_vma_destroy_late(struct xe_vma *vma)
971 struct xe_vm *vm = xe_vma_vm(vma);
972 struct xe_device *xe = vm->xe;
973 bool read_only = xe_vma_read_only(vma);
975 if (xe_vma_is_userptr(vma)) {
976 if (vma->userptr.sg) {
977 dma_unmap_sgtable(xe->drm.dev,
979 read_only ? DMA_TO_DEVICE :
980 DMA_BIDIRECTIONAL, 0);
981 sg_free_table(vma->userptr.sg);
982 vma->userptr.sg = NULL;
986 * Since userptr pages are not pinned, we can't remove
987 * the notifer until we're sure the GPU is not accessing
990 mmu_interval_notifier_remove(&vma->userptr.notifier);
992 } else if (xe_vma_is_null(vma)) {
995 xe_bo_put(xe_vma_bo(vma));
1001 static void vma_destroy_work_func(struct work_struct *w)
1003 struct xe_vma *vma =
1004 container_of(w, struct xe_vma, destroy_work);
1006 xe_vma_destroy_late(vma);
1009 static struct xe_vma *
1010 bo_has_vm_references_locked(struct xe_bo *bo, struct xe_vm *vm,
1011 struct xe_vma *ignore)
1013 struct drm_gpuvm_bo *vm_bo;
1014 struct drm_gpuva *va;
1015 struct drm_gem_object *obj = &bo->ttm.base;
1017 xe_bo_assert_held(bo);
1019 drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
1020 drm_gpuvm_bo_for_each_va(va, vm_bo) {
1021 struct xe_vma *vma = gpuva_to_vma(va);
1023 if (vma != ignore && xe_vma_vm(vma) == vm)
1031 static bool bo_has_vm_references(struct xe_bo *bo, struct xe_vm *vm,
1032 struct xe_vma *ignore)
1034 struct ww_acquire_ctx ww;
1037 xe_bo_lock(bo, &ww, 0, false);
1038 ret = !!bo_has_vm_references_locked(bo, vm, ignore);
1039 xe_bo_unlock(bo, &ww);
1044 static void __vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
1046 lockdep_assert_held_write(&vm->lock);
1048 list_add(&vma->extobj.link, &vm->extobj.list);
1049 vm->extobj.entries++;
1052 static void vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
1054 struct xe_bo *bo = xe_vma_bo(vma);
1056 lockdep_assert_held_write(&vm->lock);
1058 if (bo_has_vm_references(bo, vm, vma))
1061 __vm_insert_extobj(vm, vma);
1064 static void vma_destroy_cb(struct dma_fence *fence,
1065 struct dma_fence_cb *cb)
1067 struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
1069 INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
1070 queue_work(system_unbound_wq, &vma->destroy_work);
1073 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
1075 struct xe_vm *vm = xe_vma_vm(vma);
1077 lockdep_assert_held_write(&vm->lock);
1078 XE_BUG_ON(!list_empty(&vma->combined_links.destroy));
1080 if (xe_vma_is_userptr(vma)) {
1081 XE_WARN_ON(!(vma->gpuva.flags & XE_VMA_DESTROYED));
1083 spin_lock(&vm->userptr.invalidated_lock);
1084 list_del(&vma->userptr.invalidate_link);
1085 spin_unlock(&vm->userptr.invalidated_lock);
1086 } else if (!xe_vma_is_null(vma)) {
1087 xe_bo_assert_held(xe_vma_bo(vma));
1089 spin_lock(&vm->notifier.list_lock);
1090 list_del(&vma->notifier.rebind_link);
1091 spin_unlock(&vm->notifier.list_lock);
1093 drm_gpuva_unlink(&vma->gpuva);
1095 if (!xe_vma_bo(vma)->vm && vm_remove_extobj(vma)) {
1096 struct xe_vma *other;
1098 other = bo_has_vm_references_locked(xe_vma_bo(vma), vm, NULL);
1101 __vm_insert_extobj(vm, other);
1105 xe_vm_assert_held(vm);
1107 int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
1111 XE_WARN_ON(ret != -ENOENT);
1112 xe_vma_destroy_late(vma);
1115 xe_vma_destroy_late(vma);
1119 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1121 struct ttm_validate_buffer tv[2];
1122 struct ww_acquire_ctx ww;
1123 struct xe_bo *bo = xe_vma_bo(vma);
1128 memset(tv, 0, sizeof(tv));
1129 tv[0].bo = xe_vm_ttm_bo(xe_vma_vm(vma));
1130 list_add(&tv[0].head, &objs);
1133 tv[1].bo = &xe_bo_get(bo)->ttm;
1134 list_add(&tv[1].head, &objs);
1136 err = ttm_eu_reserve_buffers(&ww, &objs, false, &dups);
1139 xe_vma_destroy(vma, NULL);
1141 ttm_eu_backoff_reservation(&ww, &objs);
1147 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
1149 struct drm_gpuva *gpuva;
1151 lockdep_assert_held(&vm->lock);
1153 if (xe_vm_is_closed_or_banned(vm))
1156 XE_BUG_ON(start + range > vm->size);
1158 gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
1160 return gpuva ? gpuva_to_vma(gpuva) : NULL;
1163 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1167 XE_BUG_ON(xe_vma_vm(vma) != vm);
1168 lockdep_assert_held(&vm->lock);
1170 err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
1171 XE_WARN_ON(err); /* Shouldn't be possible */
1176 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1178 XE_BUG_ON(xe_vma_vm(vma) != vm);
1179 lockdep_assert_held(&vm->lock);
1181 drm_gpuva_remove(&vma->gpuva);
1182 if (vm->usm.last_fault_vma == vma)
1183 vm->usm.last_fault_vma = NULL;
1186 static struct drm_gpuva_op *xe_vm_op_alloc(void)
1188 struct xe_vma_op *op;
1190 op = kzalloc(sizeof(*op), GFP_KERNEL);
1198 static void xe_vm_free(struct drm_gpuvm *gpuvm);
1200 static struct drm_gpuvm_ops gpuvm_ops = {
1201 .op_alloc = xe_vm_op_alloc,
1202 .vm_free = xe_vm_free,
1205 static void xe_vma_op_work_func(struct work_struct *w);
1206 static void vm_destroy_work_func(struct work_struct *w);
1208 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1210 struct drm_gem_object *vm_resv_obj;
1212 int err, number_tiles = 0;
1213 struct xe_tile *tile;
1216 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1218 return ERR_PTR(-ENOMEM);
1222 vm->size = 1ull << xe_pt_shift(xe->info.vm_max_level + 1);
1226 init_rwsem(&vm->lock);
1228 INIT_LIST_HEAD(&vm->rebind_list);
1230 INIT_LIST_HEAD(&vm->userptr.repin_list);
1231 INIT_LIST_HEAD(&vm->userptr.invalidated);
1232 init_rwsem(&vm->userptr.notifier_lock);
1233 spin_lock_init(&vm->userptr.invalidated_lock);
1235 INIT_LIST_HEAD(&vm->notifier.rebind_list);
1236 spin_lock_init(&vm->notifier.list_lock);
1238 INIT_LIST_HEAD(&vm->async_ops.pending);
1239 INIT_WORK(&vm->async_ops.work, xe_vma_op_work_func);
1240 spin_lock_init(&vm->async_ops.lock);
1242 INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1244 INIT_LIST_HEAD(&vm->preempt.engines);
1245 vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */
1247 for_each_tile(tile, xe, id)
1248 xe_range_fence_tree_init(&vm->rftree[id]);
1250 INIT_LIST_HEAD(&vm->extobj.list);
1252 if (!(flags & XE_VM_FLAG_MIGRATION))
1253 xe_device_mem_access_get(xe);
1255 vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
1261 drm_gpuvm_init(&vm->gpuvm, "Xe VM", 0, &xe->drm, vm_resv_obj,
1262 0, vm->size, 0, 0, &gpuvm_ops);
1264 drm_gem_object_put(vm_resv_obj);
1266 err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
1270 if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1271 vm->flags |= XE_VM_FLAG_64K;
1273 for_each_tile(tile, xe, id) {
1274 if (flags & XE_VM_FLAG_MIGRATION &&
1275 tile->id != XE_VM_FLAG_TILE_ID(flags))
1278 vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
1279 if (IS_ERR(vm->pt_root[id])) {
1280 err = PTR_ERR(vm->pt_root[id]);
1281 vm->pt_root[id] = NULL;
1282 goto err_unlock_close;
1286 if (flags & XE_VM_FLAG_SCRATCH_PAGE) {
1287 for_each_tile(tile, xe, id) {
1288 if (!vm->pt_root[id])
1291 err = xe_pt_create_scratch(xe, tile, vm);
1293 goto err_unlock_close;
1295 vm->batch_invalidate_tlb = true;
1298 if (flags & XE_VM_FLAG_COMPUTE_MODE) {
1299 INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1300 vm->flags |= XE_VM_FLAG_COMPUTE_MODE;
1301 vm->batch_invalidate_tlb = false;
1304 if (flags & XE_VM_FLAG_ASYNC_BIND_OPS) {
1305 vm->async_ops.fence.context = dma_fence_context_alloc(1);
1306 vm->flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
1309 /* Fill pt_root after allocating scratch tables */
1310 for_each_tile(tile, xe, id) {
1311 if (!vm->pt_root[id])
1314 xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
1316 dma_resv_unlock(xe_vm_resv(vm));
1318 /* Kernel migration VM shouldn't have a circular loop.. */
1319 if (!(flags & XE_VM_FLAG_MIGRATION)) {
1320 for_each_tile(tile, xe, id) {
1321 struct xe_gt *gt = tile->primary_gt;
1322 struct xe_vm *migrate_vm;
1323 struct xe_engine *eng;
1325 if (!vm->pt_root[id])
1328 migrate_vm = xe_migrate_get_vm(tile->migrate);
1329 eng = xe_engine_create_class(xe, gt, migrate_vm,
1330 XE_ENGINE_CLASS_COPY,
1332 xe_vm_put(migrate_vm);
1342 if (number_tiles > 1)
1343 vm->composite_fence_ctx = dma_fence_context_alloc(1);
1345 mutex_lock(&xe->usm.lock);
1346 if (flags & XE_VM_FLAG_FAULT_MODE)
1347 xe->usm.num_vm_in_fault_mode++;
1348 else if (!(flags & XE_VM_FLAG_MIGRATION))
1349 xe->usm.num_vm_in_non_fault_mode++;
1350 mutex_unlock(&xe->usm.lock);
1352 trace_xe_vm_create(vm);
1357 dma_resv_unlock(xe_vm_resv(vm));
1359 xe_vm_close_and_put(vm);
1360 return ERR_PTR(err);
1363 for_each_tile(tile, xe, id)
1364 xe_range_fence_tree_fini(&vm->rftree[id]);
1366 if (!(flags & XE_VM_FLAG_MIGRATION))
1367 xe_device_mem_access_put(xe);
1368 return ERR_PTR(err);
1371 static void flush_async_ops(struct xe_vm *vm)
1373 queue_work(system_unbound_wq, &vm->async_ops.work);
1374 flush_work(&vm->async_ops.work);
1377 static void vm_error_capture(struct xe_vm *vm, int err,
1378 u32 op, u64 addr, u64 size)
1380 struct drm_xe_vm_bind_op_error_capture capture;
1381 u64 __user *address =
1382 u64_to_user_ptr(vm->async_ops.error_capture.addr);
1383 bool in_kthread = !current->mm;
1385 capture.error = err;
1387 capture.addr = addr;
1388 capture.size = size;
1391 if (!mmget_not_zero(vm->async_ops.error_capture.mm))
1393 kthread_use_mm(vm->async_ops.error_capture.mm);
1396 if (copy_to_user(address, &capture, sizeof(capture)))
1397 XE_WARN_ON("Copy to user failed");
1400 kthread_unuse_mm(vm->async_ops.error_capture.mm);
1401 mmput(vm->async_ops.error_capture.mm);
1405 wake_up_all(&vm->async_ops.error_capture.wq);
1408 static void xe_vm_close(struct xe_vm *vm)
1410 down_write(&vm->lock);
1412 up_write(&vm->lock);
1415 void xe_vm_close_and_put(struct xe_vm *vm)
1417 LIST_HEAD(contested);
1418 struct ww_acquire_ctx ww;
1419 struct xe_device *xe = vm->xe;
1420 struct xe_tile *tile;
1421 struct xe_vma *vma, *next_vma;
1422 struct drm_gpuva *gpuva, *next;
1425 XE_BUG_ON(vm->preempt.num_engines);
1428 flush_async_ops(vm);
1429 if (xe_vm_in_compute_mode(vm))
1430 flush_work(&vm->preempt.rebind_work);
1432 for_each_tile(tile, xe, id) {
1434 xe_engine_kill(vm->eng[id]);
1435 xe_engine_put(vm->eng[id]);
1440 down_write(&vm->lock);
1441 xe_vm_lock(vm, &ww, 0, false);
1442 drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) {
1443 vma = gpuva_to_vma(gpuva);
1445 if (xe_vma_has_no_bo(vma)) {
1446 down_read(&vm->userptr.notifier_lock);
1447 vma->gpuva.flags |= XE_VMA_DESTROYED;
1448 up_read(&vm->userptr.notifier_lock);
1451 xe_vm_remove_vma(vm, vma);
1453 /* easy case, remove from VMA? */
1454 if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
1455 list_del_init(&vma->combined_links.rebind);
1456 xe_vma_destroy(vma, NULL);
1460 list_move_tail(&vma->combined_links.destroy, &contested);
1464 * All vm operations will add shared fences to resv.
1465 * The only exception is eviction for a shared object,
1466 * but even so, the unbind when evicted would still
1467 * install a fence to resv. Hence it's safe to
1468 * destroy the pagetables immediately.
1470 for_each_tile(tile, xe, id) {
1471 if (vm->scratch_bo[id]) {
1474 xe_bo_unpin(vm->scratch_bo[id]);
1475 xe_bo_put(vm->scratch_bo[id]);
1476 for (i = 0; i < vm->pt_root[id]->level; i++)
1477 xe_pt_destroy(vm->scratch_pt[id][i], vm->flags,
1480 if (vm->pt_root[id]) {
1481 xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1482 vm->pt_root[id] = NULL;
1485 xe_vm_unlock(vm, &ww);
1488 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1489 * Since we hold a refcount to the bo, we can remove and free
1490 * the members safely without locking.
1492 list_for_each_entry_safe(vma, next_vma, &contested,
1493 combined_links.destroy) {
1494 list_del_init(&vma->combined_links.destroy);
1495 xe_vma_destroy_unlocked(vma);
1498 if (vm->async_ops.error_capture.addr)
1499 wake_up_all(&vm->async_ops.error_capture.wq);
1501 XE_WARN_ON(!list_empty(&vm->extobj.list));
1502 up_write(&vm->lock);
1504 mutex_lock(&xe->usm.lock);
1505 if (vm->flags & XE_VM_FLAG_FAULT_MODE)
1506 xe->usm.num_vm_in_fault_mode--;
1507 else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1508 xe->usm.num_vm_in_non_fault_mode--;
1509 mutex_unlock(&xe->usm.lock);
1511 for_each_tile(tile, xe, id)
1512 xe_range_fence_tree_fini(&vm->rftree[id]);
1517 static void vm_destroy_work_func(struct work_struct *w)
1520 container_of(w, struct xe_vm, destroy_work);
1521 struct xe_device *xe = vm->xe;
1522 struct xe_tile *tile;
1526 /* xe_vm_close_and_put was not called? */
1527 XE_WARN_ON(vm->size);
1529 if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
1530 xe_device_mem_access_put(xe);
1532 if (xe->info.has_asid) {
1533 mutex_lock(&xe->usm.lock);
1534 lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1535 XE_WARN_ON(lookup != vm);
1536 mutex_unlock(&xe->usm.lock);
1540 for_each_tile(tile, xe, id)
1541 XE_WARN_ON(vm->pt_root[id]);
1543 trace_xe_vm_free(vm);
1544 dma_fence_put(vm->rebind_fence);
1548 static void xe_vm_free(struct drm_gpuvm *gpuvm)
1550 struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
1552 /* To destroy the VM we need to be able to sleep */
1553 queue_work(system_unbound_wq, &vm->destroy_work);
1556 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1560 mutex_lock(&xef->vm.lock);
1561 vm = xa_load(&xef->vm.xa, id);
1564 mutex_unlock(&xef->vm.lock);
1569 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
1571 return xe_pde_encode(vm->pt_root[tile->id]->bo, 0,
1575 static struct dma_fence *
1576 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_engine *e,
1577 struct xe_sync_entry *syncs, u32 num_syncs,
1578 bool first_op, bool last_op)
1580 struct xe_tile *tile;
1581 struct dma_fence *fence = NULL;
1582 struct dma_fence **fences = NULL;
1583 struct dma_fence_array *cf = NULL;
1584 struct xe_vm *vm = xe_vma_vm(vma);
1585 int cur_fence = 0, i;
1586 int number_tiles = hweight8(vma->tile_present);
1590 trace_xe_vma_unbind(vma);
1592 if (number_tiles > 1) {
1593 fences = kmalloc_array(number_tiles, sizeof(*fences),
1596 return ERR_PTR(-ENOMEM);
1599 for_each_tile(tile, vm->xe, id) {
1600 if (!(vma->tile_present & BIT(id)))
1603 fence = __xe_pt_unbind_vma(tile, vma, e, first_op ? syncs : NULL,
1604 first_op ? num_syncs : 0);
1605 if (IS_ERR(fence)) {
1606 err = PTR_ERR(fence);
1611 fences[cur_fence++] = fence;
1614 if (e && vm->pt_root[id] && !list_empty(&e->multi_gt_list))
1615 e = list_next_entry(e, multi_gt_list);
1619 cf = dma_fence_array_create(number_tiles, fences,
1620 vm->composite_fence_ctx,
1621 vm->composite_fence_seqno++,
1624 --vm->composite_fence_seqno;
1631 for (i = 0; i < num_syncs; i++)
1632 xe_sync_entry_signal(&syncs[i], NULL,
1633 cf ? &cf->base : fence);
1636 return cf ? &cf->base : !fence ? dma_fence_get_stub() : fence;
1641 /* FIXME: Rewind the previous binds? */
1642 dma_fence_put(fences[--cur_fence]);
1647 return ERR_PTR(err);
1650 static struct dma_fence *
1651 xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
1652 struct xe_sync_entry *syncs, u32 num_syncs,
1653 bool first_op, bool last_op)
1655 struct xe_tile *tile;
1656 struct dma_fence *fence;
1657 struct dma_fence **fences = NULL;
1658 struct dma_fence_array *cf = NULL;
1659 struct xe_vm *vm = xe_vma_vm(vma);
1660 int cur_fence = 0, i;
1661 int number_tiles = hweight8(vma->tile_mask);
1665 trace_xe_vma_bind(vma);
1667 if (number_tiles > 1) {
1668 fences = kmalloc_array(number_tiles, sizeof(*fences),
1671 return ERR_PTR(-ENOMEM);
1674 for_each_tile(tile, vm->xe, id) {
1675 if (!(vma->tile_mask & BIT(id)))
1678 fence = __xe_pt_bind_vma(tile, vma, e ? e : vm->eng[id],
1679 first_op ? syncs : NULL,
1680 first_op ? num_syncs : 0,
1681 vma->tile_present & BIT(id));
1682 if (IS_ERR(fence)) {
1683 err = PTR_ERR(fence);
1688 fences[cur_fence++] = fence;
1691 if (e && vm->pt_root[id] && !list_empty(&e->multi_gt_list))
1692 e = list_next_entry(e, multi_gt_list);
1696 cf = dma_fence_array_create(number_tiles, fences,
1697 vm->composite_fence_ctx,
1698 vm->composite_fence_seqno++,
1701 --vm->composite_fence_seqno;
1708 for (i = 0; i < num_syncs; i++)
1709 xe_sync_entry_signal(&syncs[i], NULL,
1710 cf ? &cf->base : fence);
1713 return cf ? &cf->base : fence;
1718 /* FIXME: Rewind the previous binds? */
1719 dma_fence_put(fences[--cur_fence]);
1724 return ERR_PTR(err);
1727 struct async_op_fence {
1728 struct dma_fence fence;
1729 struct dma_fence *wait_fence;
1730 struct dma_fence_cb cb;
1732 wait_queue_head_t wq;
1736 static const char *async_op_fence_get_driver_name(struct dma_fence *dma_fence)
1742 async_op_fence_get_timeline_name(struct dma_fence *dma_fence)
1744 return "async_op_fence";
1747 static const struct dma_fence_ops async_op_fence_ops = {
1748 .get_driver_name = async_op_fence_get_driver_name,
1749 .get_timeline_name = async_op_fence_get_timeline_name,
1752 static void async_op_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1754 struct async_op_fence *afence =
1755 container_of(cb, struct async_op_fence, cb);
1757 afence->fence.error = afence->wait_fence->error;
1758 dma_fence_signal(&afence->fence);
1759 xe_vm_put(afence->vm);
1760 dma_fence_put(afence->wait_fence);
1761 dma_fence_put(&afence->fence);
1764 static void add_async_op_fence_cb(struct xe_vm *vm,
1765 struct dma_fence *fence,
1766 struct async_op_fence *afence)
1770 if (!xe_vm_no_dma_fences(vm)) {
1771 afence->started = true;
1773 wake_up_all(&afence->wq);
1776 afence->wait_fence = dma_fence_get(fence);
1777 afence->vm = xe_vm_get(vm);
1778 dma_fence_get(&afence->fence);
1779 ret = dma_fence_add_callback(fence, &afence->cb, async_op_fence_cb);
1780 if (ret == -ENOENT) {
1781 afence->fence.error = afence->wait_fence->error;
1782 dma_fence_signal(&afence->fence);
1786 dma_fence_put(afence->wait_fence);
1787 dma_fence_put(&afence->fence);
1789 XE_WARN_ON(ret && ret != -ENOENT);
1792 int xe_vm_async_fence_wait_start(struct dma_fence *fence)
1794 if (fence->ops == &async_op_fence_ops) {
1795 struct async_op_fence *afence =
1796 container_of(fence, struct async_op_fence, fence);
1798 XE_BUG_ON(xe_vm_no_dma_fences(afence->vm));
1801 return wait_event_interruptible(afence->wq, afence->started);
1807 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
1808 struct xe_engine *e, struct xe_sync_entry *syncs,
1809 u32 num_syncs, struct async_op_fence *afence,
1810 bool immediate, bool first_op, bool last_op)
1812 struct dma_fence *fence;
1814 xe_vm_assert_held(vm);
1817 fence = xe_vm_bind_vma(vma, e, syncs, num_syncs, first_op,
1820 return PTR_ERR(fence);
1824 XE_BUG_ON(!xe_vm_in_fault_mode(vm));
1826 fence = dma_fence_get_stub();
1828 for (i = 0; i < num_syncs; i++)
1829 xe_sync_entry_signal(&syncs[i], NULL, fence);
1833 add_async_op_fence_cb(vm, fence, afence);
1835 dma_fence_put(fence);
1839 static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_engine *e,
1840 struct xe_bo *bo, struct xe_sync_entry *syncs,
1841 u32 num_syncs, struct async_op_fence *afence,
1842 bool immediate, bool first_op, bool last_op)
1846 xe_vm_assert_held(vm);
1847 xe_bo_assert_held(bo);
1849 if (bo && immediate) {
1850 err = xe_bo_validate(bo, vm, true);
1855 return __xe_vm_bind(vm, vma, e, syncs, num_syncs, afence, immediate,
1859 static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
1860 struct xe_engine *e, struct xe_sync_entry *syncs,
1861 u32 num_syncs, struct async_op_fence *afence,
1862 bool first_op, bool last_op)
1864 struct dma_fence *fence;
1866 xe_vm_assert_held(vm);
1867 xe_bo_assert_held(xe_vma_bo(vma));
1869 fence = xe_vm_unbind_vma(vma, e, syncs, num_syncs, first_op, last_op);
1871 return PTR_ERR(fence);
1873 add_async_op_fence_cb(vm, fence, afence);
1875 xe_vma_destroy(vma, fence);
1876 dma_fence_put(fence);
1881 static int vm_set_error_capture_address(struct xe_device *xe, struct xe_vm *vm,
1884 if (XE_IOCTL_DBG(xe, !value))
1887 if (XE_IOCTL_DBG(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
1890 if (XE_IOCTL_DBG(xe, vm->async_ops.error_capture.addr))
1893 vm->async_ops.error_capture.mm = current->mm;
1894 vm->async_ops.error_capture.addr = value;
1895 init_waitqueue_head(&vm->async_ops.error_capture.wq);
1900 typedef int (*xe_vm_set_property_fn)(struct xe_device *xe, struct xe_vm *vm,
1903 static const xe_vm_set_property_fn vm_set_property_funcs[] = {
1904 [XE_VM_PROPERTY_BIND_OP_ERROR_CAPTURE_ADDRESS] =
1905 vm_set_error_capture_address,
1908 static int vm_user_ext_set_property(struct xe_device *xe, struct xe_vm *vm,
1911 u64 __user *address = u64_to_user_ptr(extension);
1912 struct drm_xe_ext_vm_set_property ext;
1915 err = __copy_from_user(&ext, address, sizeof(ext));
1916 if (XE_IOCTL_DBG(xe, err))
1919 if (XE_IOCTL_DBG(xe, ext.property >=
1920 ARRAY_SIZE(vm_set_property_funcs)) ||
1921 XE_IOCTL_DBG(xe, ext.pad) ||
1922 XE_IOCTL_DBG(xe, ext.reserved[0] || ext.reserved[1]))
1925 return vm_set_property_funcs[ext.property](xe, vm, ext.value);
1928 typedef int (*xe_vm_user_extension_fn)(struct xe_device *xe, struct xe_vm *vm,
1931 static const xe_vm_set_property_fn vm_user_extension_funcs[] = {
1932 [XE_VM_EXTENSION_SET_PROPERTY] = vm_user_ext_set_property,
1935 #define MAX_USER_EXTENSIONS 16
1936 static int vm_user_extensions(struct xe_device *xe, struct xe_vm *vm,
1937 u64 extensions, int ext_number)
1939 u64 __user *address = u64_to_user_ptr(extensions);
1940 struct xe_user_extension ext;
1943 if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
1946 err = __copy_from_user(&ext, address, sizeof(ext));
1947 if (XE_IOCTL_DBG(xe, err))
1950 if (XE_IOCTL_DBG(xe, ext.pad) ||
1951 XE_IOCTL_DBG(xe, ext.name >=
1952 ARRAY_SIZE(vm_user_extension_funcs)))
1955 err = vm_user_extension_funcs[ext.name](xe, vm, extensions);
1956 if (XE_IOCTL_DBG(xe, err))
1959 if (ext.next_extension)
1960 return vm_user_extensions(xe, vm, ext.next_extension,
1966 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_SCRATCH_PAGE | \
1967 DRM_XE_VM_CREATE_COMPUTE_MODE | \
1968 DRM_XE_VM_CREATE_ASYNC_BIND_OPS | \
1969 DRM_XE_VM_CREATE_FAULT_MODE)
1971 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1972 struct drm_file *file)
1974 struct xe_device *xe = to_xe_device(dev);
1975 struct xe_file *xef = to_xe_file(file);
1976 struct drm_xe_vm_create *args = data;
1982 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1985 if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1988 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE &&
1989 args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
1992 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE &&
1993 args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
1996 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
1997 xe_device_in_non_fault_mode(xe)))
2000 if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FAULT_MODE) &&
2001 xe_device_in_fault_mode(xe)))
2004 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
2005 !xe->info.supports_usm))
2008 if (args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE)
2009 flags |= XE_VM_FLAG_SCRATCH_PAGE;
2010 if (args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE)
2011 flags |= XE_VM_FLAG_COMPUTE_MODE;
2012 if (args->flags & DRM_XE_VM_CREATE_ASYNC_BIND_OPS)
2013 flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
2014 if (args->flags & DRM_XE_VM_CREATE_FAULT_MODE)
2015 flags |= XE_VM_FLAG_FAULT_MODE;
2017 vm = xe_vm_create(xe, flags);
2021 if (args->extensions) {
2022 err = vm_user_extensions(xe, vm, args->extensions, 0);
2023 if (XE_IOCTL_DBG(xe, err)) {
2024 xe_vm_close_and_put(vm);
2029 mutex_lock(&xef->vm.lock);
2030 err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
2031 mutex_unlock(&xef->vm.lock);
2033 xe_vm_close_and_put(vm);
2037 if (xe->info.has_asid) {
2038 mutex_lock(&xe->usm.lock);
2039 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
2040 XA_LIMIT(0, XE_MAX_ASID - 1),
2041 &xe->usm.next_asid, GFP_KERNEL);
2042 mutex_unlock(&xe->usm.lock);
2044 xe_vm_close_and_put(vm);
2047 vm->usm.asid = asid;
2052 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
2053 /* Warning: Security issue - never enable by default */
2054 args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
2060 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
2061 struct drm_file *file)
2063 struct xe_device *xe = to_xe_device(dev);
2064 struct xe_file *xef = to_xe_file(file);
2065 struct drm_xe_vm_destroy *args = data;
2069 if (XE_IOCTL_DBG(xe, args->pad) ||
2070 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2073 mutex_lock(&xef->vm.lock);
2074 vm = xa_load(&xef->vm.xa, args->vm_id);
2075 if (XE_IOCTL_DBG(xe, !vm))
2077 else if (XE_IOCTL_DBG(xe, vm->preempt.num_engines))
2080 xa_erase(&xef->vm.xa, args->vm_id);
2081 mutex_unlock(&xef->vm.lock);
2084 xe_vm_close_and_put(vm);
2089 static const u32 region_to_mem_type[] = {
2095 static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
2096 struct xe_engine *e, u32 region,
2097 struct xe_sync_entry *syncs, u32 num_syncs,
2098 struct async_op_fence *afence, bool first_op,
2103 XE_BUG_ON(region > ARRAY_SIZE(region_to_mem_type));
2105 if (!xe_vma_has_no_bo(vma)) {
2106 err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
2111 if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
2112 return xe_vm_bind(vm, vma, e, xe_vma_bo(vma), syncs, num_syncs,
2113 afence, true, first_op, last_op);
2117 /* Nothing to do, signal fences now */
2119 for (i = 0; i < num_syncs; i++)
2120 xe_sync_entry_signal(&syncs[i], NULL,
2121 dma_fence_get_stub());
2124 dma_fence_signal(&afence->fence);
2129 #define VM_BIND_OP(op) (op & 0xffff)
2131 struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm)
2133 int idx = vm->flags & XE_VM_FLAG_MIGRATION ?
2134 XE_VM_FLAG_TILE_ID(vm->flags) : 0;
2136 /* Safe to use index 0 as all BO in the VM share a single dma-resv lock */
2137 return &vm->pt_root[idx]->bo->ttm;
2140 static void xe_vm_tv_populate(struct xe_vm *vm, struct ttm_validate_buffer *tv)
2143 tv->bo = xe_vm_ttm_bo(vm);
2146 static void vm_set_async_error(struct xe_vm *vm, int err)
2148 lockdep_assert_held(&vm->lock);
2149 vm->async_ops.error = err;
2152 static int vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo,
2153 u64 addr, u64 range, u32 op)
2155 struct xe_device *xe = vm->xe;
2157 bool async = !!(op & XE_VM_BIND_FLAG_ASYNC);
2159 lockdep_assert_held(&vm->lock);
2161 switch (VM_BIND_OP(op)) {
2162 case XE_VM_BIND_OP_MAP:
2163 case XE_VM_BIND_OP_MAP_USERPTR:
2164 vma = xe_vm_find_overlapping_vma(vm, addr, range);
2165 if (XE_IOCTL_DBG(xe, vma && !async))
2168 case XE_VM_BIND_OP_UNMAP:
2169 case XE_VM_BIND_OP_PREFETCH:
2170 vma = xe_vm_find_overlapping_vma(vm, addr, range);
2171 if (XE_IOCTL_DBG(xe, !vma))
2172 /* Not an actual error, IOCTL cleans up returns and 0 */
2174 if (XE_IOCTL_DBG(xe, (xe_vma_start(vma) != addr ||
2175 xe_vma_end(vma) != addr + range) && !async))
2178 case XE_VM_BIND_OP_UNMAP_ALL:
2179 if (XE_IOCTL_DBG(xe, list_empty(&bo->ttm.base.gpuva.list)))
2180 /* Not an actual error, IOCTL cleans up returns and 0 */
2184 XE_BUG_ON("NOT POSSIBLE");
2191 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
2194 down_read(&vm->userptr.notifier_lock);
2195 vma->gpuva.flags |= XE_VMA_DESTROYED;
2196 up_read(&vm->userptr.notifier_lock);
2198 xe_vm_remove_vma(vm, vma);
2202 #define ULL unsigned long long
2204 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
2205 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2210 case DRM_GPUVA_OP_MAP:
2211 vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx",
2212 (ULL)op->map.va.addr, (ULL)op->map.va.range);
2214 case DRM_GPUVA_OP_REMAP:
2215 vma = gpuva_to_vma(op->remap.unmap->va);
2216 vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2217 (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2218 op->unmap.keep ? 1 : 0);
2221 "REMAP:PREV: addr=0x%016llx, range=0x%016llx",
2222 (ULL)op->remap.prev->va.addr,
2223 (ULL)op->remap.prev->va.range);
2226 "REMAP:NEXT: addr=0x%016llx, range=0x%016llx",
2227 (ULL)op->remap.next->va.addr,
2228 (ULL)op->remap.next->va.range);
2230 case DRM_GPUVA_OP_UNMAP:
2231 vma = gpuva_to_vma(op->unmap.va);
2232 vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2233 (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2234 op->unmap.keep ? 1 : 0);
2236 case DRM_GPUVA_OP_PREFETCH:
2237 vma = gpuva_to_vma(op->prefetch.va);
2238 vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx",
2239 (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
2242 XE_BUG_ON("NOT POSSIBLE");
2246 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2252 * Create operations list from IOCTL arguments, setup operations fields so parse
2253 * and commit steps are decoupled from IOCTL arguments. This step can fail.
2255 static struct drm_gpuva_ops *
2256 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
2257 u64 bo_offset_or_userptr, u64 addr, u64 range,
2258 u32 operation, u8 tile_mask, u32 region)
2260 struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
2261 struct ww_acquire_ctx ww;
2262 struct drm_gpuva_ops *ops;
2263 struct drm_gpuva_op *__op;
2264 struct xe_vma_op *op;
2265 struct drm_gpuvm_bo *vm_bo;
2268 lockdep_assert_held_write(&vm->lock);
2270 vm_dbg(&vm->xe->drm,
2271 "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
2272 VM_BIND_OP(operation), (ULL)addr, (ULL)range,
2273 (ULL)bo_offset_or_userptr);
2275 switch (VM_BIND_OP(operation)) {
2276 case XE_VM_BIND_OP_MAP:
2277 case XE_VM_BIND_OP_MAP_USERPTR:
2278 ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
2279 obj, bo_offset_or_userptr);
2283 drm_gpuva_for_each_op(__op, ops) {
2284 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2286 op->tile_mask = tile_mask;
2288 operation & XE_VM_BIND_FLAG_IMMEDIATE;
2290 operation & XE_VM_BIND_FLAG_READONLY;
2291 op->map.is_null = operation & XE_VM_BIND_FLAG_NULL;
2294 case XE_VM_BIND_OP_UNMAP:
2295 ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
2299 drm_gpuva_for_each_op(__op, ops) {
2300 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2302 op->tile_mask = tile_mask;
2305 case XE_VM_BIND_OP_PREFETCH:
2306 ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
2310 drm_gpuva_for_each_op(__op, ops) {
2311 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2313 op->tile_mask = tile_mask;
2314 op->prefetch.region = region;
2317 case XE_VM_BIND_OP_UNMAP_ALL:
2320 err = xe_bo_lock(bo, &ww, 0, true);
2322 return ERR_PTR(err);
2324 vm_bo = drm_gpuvm_bo_find(&vm->gpuvm, obj);
2328 ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
2329 drm_gpuvm_bo_put(vm_bo);
2330 xe_bo_unlock(bo, &ww);
2334 drm_gpuva_for_each_op(__op, ops) {
2335 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2337 op->tile_mask = tile_mask;
2341 XE_BUG_ON("NOT POSSIBLE");
2342 ops = ERR_PTR(-EINVAL);
2345 #ifdef TEST_VM_ASYNC_OPS_ERROR
2346 if (operation & FORCE_ASYNC_OP_ERROR) {
2347 op = list_first_entry_or_null(&ops->list, struct xe_vma_op,
2350 op->inject_error = true;
2355 drm_gpuva_for_each_op(__op, ops)
2356 print_op(vm->xe, __op);
2361 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
2362 u8 tile_mask, bool read_only, bool is_null)
2364 struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
2366 struct ww_acquire_ctx ww;
2369 lockdep_assert_held_write(&vm->lock);
2372 err = xe_bo_lock(bo, &ww, 0, true);
2374 return ERR_PTR(err);
2376 vma = xe_vma_create(vm, bo, op->gem.offset,
2377 op->va.addr, op->va.addr +
2378 op->va.range - 1, read_only, is_null,
2381 xe_bo_unlock(bo, &ww);
2383 if (xe_vma_is_userptr(vma)) {
2384 err = xe_vma_userptr_pin_pages(vma);
2386 prep_vma_destroy(vm, vma, false);
2387 xe_vma_destroy_unlocked(vma);
2388 return ERR_PTR(err);
2390 } else if (!xe_vma_has_no_bo(vma) && !bo->vm) {
2391 vm_insert_extobj(vm, vma);
2392 err = add_preempt_fences(vm, bo);
2394 prep_vma_destroy(vm, vma, false);
2395 xe_vma_destroy_unlocked(vma);
2396 return ERR_PTR(err);
2403 static u64 xe_vma_max_pte_size(struct xe_vma *vma)
2405 if (vma->gpuva.flags & XE_VMA_PTE_1G)
2407 else if (vma->gpuva.flags & XE_VMA_PTE_2M)
2414 * Parse operations list and create any resources needed for the operations
2415 * prior to fully committing to the operations. This setup can fail.
2417 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_engine *e,
2418 struct drm_gpuva_ops **ops, int num_ops_list,
2419 struct xe_sync_entry *syncs, u32 num_syncs,
2420 struct list_head *ops_list, bool async)
2422 struct xe_vma_op *last_op = NULL;
2423 struct list_head *async_list = NULL;
2424 struct async_op_fence *fence = NULL;
2427 lockdep_assert_held_write(&vm->lock);
2428 XE_BUG_ON(num_ops_list > 1 && !async);
2430 if (num_syncs && async) {
2433 fence = kmalloc(sizeof(*fence), GFP_KERNEL);
2437 seqno = e ? ++e->bind.fence_seqno : ++vm->async_ops.fence.seqno;
2438 dma_fence_init(&fence->fence, &async_op_fence_ops,
2439 &vm->async_ops.lock, e ? e->bind.fence_ctx :
2440 vm->async_ops.fence.context, seqno);
2442 if (!xe_vm_no_dma_fences(vm)) {
2444 fence->started = false;
2445 init_waitqueue_head(&fence->wq);
2449 for (i = 0; i < num_ops_list; ++i) {
2450 struct drm_gpuva_ops *__ops = ops[i];
2451 struct drm_gpuva_op *__op;
2453 drm_gpuva_for_each_op(__op, __ops) {
2454 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2455 bool first = !async_list;
2457 XE_BUG_ON(!first && !async);
2459 INIT_LIST_HEAD(&op->link);
2461 async_list = ops_list;
2462 list_add_tail(&op->link, async_list);
2465 op->flags |= XE_VMA_OP_FIRST;
2466 op->num_syncs = num_syncs;
2472 switch (op->base.op) {
2473 case DRM_GPUVA_OP_MAP:
2477 vma = new_vma(vm, &op->base.map,
2478 op->tile_mask, op->map.read_only,
2488 case DRM_GPUVA_OP_REMAP:
2490 struct xe_vma *old =
2491 gpuva_to_vma(op->base.remap.unmap->va);
2493 op->remap.start = xe_vma_start(old);
2494 op->remap.range = xe_vma_size(old);
2496 if (op->base.remap.prev) {
2499 op->base.remap.unmap->va->flags &
2502 op->base.remap.unmap->va->flags &
2505 vma = new_vma(vm, op->base.remap.prev,
2506 op->tile_mask, read_only,
2513 op->remap.prev = vma;
2516 * Userptr creates a new SG mapping so
2517 * we must also rebind.
2519 op->remap.skip_prev = !xe_vma_is_userptr(old) &&
2520 IS_ALIGNED(xe_vma_end(vma),
2521 xe_vma_max_pte_size(old));
2522 if (op->remap.skip_prev) {
2526 op->remap.start = xe_vma_end(vma);
2530 if (op->base.remap.next) {
2533 op->base.remap.unmap->va->flags &
2537 op->base.remap.unmap->va->flags &
2540 vma = new_vma(vm, op->base.remap.next,
2541 op->tile_mask, read_only,
2548 op->remap.next = vma;
2551 * Userptr creates a new SG mapping so
2552 * we must also rebind.
2554 op->remap.skip_next = !xe_vma_is_userptr(old) &&
2555 IS_ALIGNED(xe_vma_start(vma),
2556 xe_vma_max_pte_size(old));
2557 if (op->remap.skip_next)
2564 case DRM_GPUVA_OP_UNMAP:
2565 case DRM_GPUVA_OP_PREFETCH:
2569 XE_BUG_ON("NOT POSSIBLE");
2575 last_op->ops = __ops;
2581 last_op->flags |= XE_VMA_OP_LAST;
2582 last_op->num_syncs = num_syncs;
2583 last_op->syncs = syncs;
2584 last_op->fence = fence;
2593 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
2597 lockdep_assert_held_write(&vm->lock);
2599 switch (op->base.op) {
2600 case DRM_GPUVA_OP_MAP:
2601 err |= xe_vm_insert_vma(vm, op->map.vma);
2603 case DRM_GPUVA_OP_REMAP:
2604 prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
2607 if (op->remap.prev) {
2608 err |= xe_vm_insert_vma(vm, op->remap.prev);
2609 if (!err && op->remap.skip_prev)
2610 op->remap.prev = NULL;
2612 if (op->remap.next) {
2613 err |= xe_vm_insert_vma(vm, op->remap.next);
2614 if (!err && op->remap.skip_next)
2615 op->remap.next = NULL;
2618 /* Adjust for partial unbind after removin VMA from VM */
2620 op->base.remap.unmap->va->va.addr = op->remap.start;
2621 op->base.remap.unmap->va->va.range = op->remap.range;
2624 case DRM_GPUVA_OP_UNMAP:
2625 prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
2627 case DRM_GPUVA_OP_PREFETCH:
2631 XE_BUG_ON("NOT POSSIBLE");
2634 op->flags |= XE_VMA_OP_COMMITTED;
2638 static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
2639 struct xe_vma_op *op)
2643 struct ttm_validate_buffer tv_bo, tv_vm;
2644 struct ww_acquire_ctx ww;
2648 lockdep_assert_held_write(&vm->lock);
2650 xe_vm_tv_populate(vm, &tv_vm);
2651 list_add_tail(&tv_vm.head, &objs);
2652 vbo = xe_vma_bo(vma);
2655 * An unbind can drop the last reference to the BO and
2656 * the BO is needed for ttm_eu_backoff_reservation so
2657 * take a reference here.
2662 tv_bo.bo = &vbo->ttm;
2663 tv_bo.num_shared = 1;
2664 list_add(&tv_bo.head, &objs);
2669 err = ttm_eu_reserve_buffers(&ww, &objs, true, &dups);
2675 xe_vm_assert_held(vm);
2676 xe_bo_assert_held(xe_vma_bo(vma));
2678 switch (op->base.op) {
2679 case DRM_GPUVA_OP_MAP:
2680 err = xe_vm_bind(vm, vma, op->engine, xe_vma_bo(vma),
2681 op->syncs, op->num_syncs, op->fence,
2682 op->map.immediate || !xe_vm_in_fault_mode(vm),
2683 op->flags & XE_VMA_OP_FIRST,
2684 op->flags & XE_VMA_OP_LAST);
2686 case DRM_GPUVA_OP_REMAP:
2688 bool prev = !!op->remap.prev;
2689 bool next = !!op->remap.next;
2691 if (!op->remap.unmap_done) {
2693 vm->async_ops.munmap_rebind_inflight = true;
2694 vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
2696 err = xe_vm_unbind(vm, vma, op->engine, op->syncs,
2698 !prev && !next ? op->fence : NULL,
2699 op->flags & XE_VMA_OP_FIRST,
2700 op->flags & XE_VMA_OP_LAST && !prev &&
2704 op->remap.unmap_done = true;
2708 op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
2709 err = xe_vm_bind(vm, op->remap.prev, op->engine,
2710 xe_vma_bo(op->remap.prev), op->syncs,
2712 !next ? op->fence : NULL, true, false,
2713 op->flags & XE_VMA_OP_LAST && !next);
2714 op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2717 op->remap.prev = NULL;
2721 op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
2722 err = xe_vm_bind(vm, op->remap.next, op->engine,
2723 xe_vma_bo(op->remap.next),
2724 op->syncs, op->num_syncs,
2725 op->fence, true, false,
2726 op->flags & XE_VMA_OP_LAST);
2727 op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2730 op->remap.next = NULL;
2732 vm->async_ops.munmap_rebind_inflight = false;
2736 case DRM_GPUVA_OP_UNMAP:
2737 err = xe_vm_unbind(vm, vma, op->engine, op->syncs,
2738 op->num_syncs, op->fence,
2739 op->flags & XE_VMA_OP_FIRST,
2740 op->flags & XE_VMA_OP_LAST);
2742 case DRM_GPUVA_OP_PREFETCH:
2743 err = xe_vm_prefetch(vm, vma, op->engine, op->prefetch.region,
2744 op->syncs, op->num_syncs, op->fence,
2745 op->flags & XE_VMA_OP_FIRST,
2746 op->flags & XE_VMA_OP_LAST);
2749 XE_BUG_ON("NOT POSSIBLE");
2752 ttm_eu_backoff_reservation(&ww, &objs);
2753 if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
2754 lockdep_assert_held_write(&vm->lock);
2755 err = xe_vma_userptr_pin_pages(vma);
2762 trace_xe_vma_fail(vma);
2767 static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
2771 lockdep_assert_held_write(&vm->lock);
2773 #ifdef TEST_VM_ASYNC_OPS_ERROR
2774 if (op->inject_error) {
2775 op->inject_error = false;
2780 switch (op->base.op) {
2781 case DRM_GPUVA_OP_MAP:
2782 ret = __xe_vma_op_execute(vm, op->map.vma, op);
2784 case DRM_GPUVA_OP_REMAP:
2788 if (!op->remap.unmap_done)
2789 vma = gpuva_to_vma(op->base.remap.unmap->va);
2790 else if (op->remap.prev)
2791 vma = op->remap.prev;
2793 vma = op->remap.next;
2795 ret = __xe_vma_op_execute(vm, vma, op);
2798 case DRM_GPUVA_OP_UNMAP:
2799 ret = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
2802 case DRM_GPUVA_OP_PREFETCH:
2803 ret = __xe_vma_op_execute(vm,
2804 gpuva_to_vma(op->base.prefetch.va),
2808 XE_BUG_ON("NOT POSSIBLE");
2814 static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op)
2816 bool last = op->flags & XE_VMA_OP_LAST;
2819 while (op->num_syncs--)
2820 xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
2823 xe_engine_put(op->engine);
2825 dma_fence_put(&op->fence->fence);
2827 if (!list_empty(&op->link)) {
2828 spin_lock_irq(&vm->async_ops.lock);
2829 list_del(&op->link);
2830 spin_unlock_irq(&vm->async_ops.lock);
2833 drm_gpuva_ops_free(&vm->gpuvm, op->ops);
2838 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
2841 lockdep_assert_held_write(&vm->lock);
2843 switch (op->base.op) {
2844 case DRM_GPUVA_OP_MAP:
2846 prep_vma_destroy(vm, op->map.vma, post_commit);
2847 xe_vma_destroy_unlocked(op->map.vma);
2850 case DRM_GPUVA_OP_UNMAP:
2852 struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2854 down_read(&vm->userptr.notifier_lock);
2855 vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2856 up_read(&vm->userptr.notifier_lock);
2858 xe_vm_insert_vma(vm, vma);
2861 case DRM_GPUVA_OP_REMAP:
2863 struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
2865 if (op->remap.prev) {
2866 prep_vma_destroy(vm, op->remap.prev, post_commit);
2867 xe_vma_destroy_unlocked(op->remap.prev);
2869 if (op->remap.next) {
2870 prep_vma_destroy(vm, op->remap.next, post_commit);
2871 xe_vma_destroy_unlocked(op->remap.next);
2873 down_read(&vm->userptr.notifier_lock);
2874 vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2875 up_read(&vm->userptr.notifier_lock);
2877 xe_vm_insert_vma(vm, vma);
2880 case DRM_GPUVA_OP_PREFETCH:
2884 XE_BUG_ON("NOT POSSIBLE");
2888 static struct xe_vma_op *next_vma_op(struct xe_vm *vm)
2890 return list_first_entry_or_null(&vm->async_ops.pending,
2891 struct xe_vma_op, link);
2894 static void xe_vma_op_work_func(struct work_struct *w)
2896 struct xe_vm *vm = container_of(w, struct xe_vm, async_ops.work);
2899 struct xe_vma_op *op;
2902 if (vm->async_ops.error && !xe_vm_is_closed(vm))
2905 spin_lock_irq(&vm->async_ops.lock);
2906 op = next_vma_op(vm);
2907 spin_unlock_irq(&vm->async_ops.lock);
2912 if (!xe_vm_is_closed(vm)) {
2913 down_write(&vm->lock);
2914 err = xe_vma_op_execute(vm, op);
2916 drm_warn(&vm->xe->drm,
2917 "Async VM op(%d) failed with %d",
2919 vm_set_async_error(vm, err);
2920 up_write(&vm->lock);
2922 if (vm->async_ops.error_capture.addr)
2923 vm_error_capture(vm, err, 0, 0, 0);
2926 up_write(&vm->lock);
2930 switch (op->base.op) {
2931 case DRM_GPUVA_OP_REMAP:
2932 vma = gpuva_to_vma(op->base.remap.unmap->va);
2933 trace_xe_vma_flush(vma);
2935 down_write(&vm->lock);
2936 xe_vma_destroy_unlocked(vma);
2937 up_write(&vm->lock);
2939 case DRM_GPUVA_OP_UNMAP:
2940 vma = gpuva_to_vma(op->base.unmap.va);
2941 trace_xe_vma_flush(vma);
2943 down_write(&vm->lock);
2944 xe_vma_destroy_unlocked(vma);
2945 up_write(&vm->lock);
2952 if (op->fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
2953 &op->fence->fence.flags)) {
2954 if (!xe_vm_no_dma_fences(vm)) {
2955 op->fence->started = true;
2956 wake_up_all(&op->fence->wq);
2958 dma_fence_signal(&op->fence->fence);
2962 xe_vma_op_cleanup(vm, op);
2966 static int vm_bind_ioctl_ops_commit(struct xe_vm *vm,
2967 struct list_head *ops_list, bool async)
2969 struct xe_vma_op *op, *last_op, *next;
2972 lockdep_assert_held_write(&vm->lock);
2974 list_for_each_entry(op, ops_list, link) {
2976 err = xe_vma_op_commit(vm, op);
2982 err = xe_vma_op_execute(vm, last_op);
2985 xe_vma_op_cleanup(vm, last_op);
2988 bool installed = false;
2990 for (i = 0; i < last_op->num_syncs; i++)
2991 installed |= xe_sync_entry_signal(&last_op->syncs[i],
2993 &last_op->fence->fence);
2994 if (!installed && last_op->fence)
2995 dma_fence_signal(&last_op->fence->fence);
2997 spin_lock_irq(&vm->async_ops.lock);
2998 list_splice_tail(ops_list, &vm->async_ops.pending);
2999 spin_unlock_irq(&vm->async_ops.lock);
3001 if (!vm->async_ops.error)
3002 queue_work(system_unbound_wq, &vm->async_ops.work);
3008 list_for_each_entry_reverse(op, ops_list, link)
3009 xe_vma_op_unwind(vm, op, op->flags & XE_VMA_OP_COMMITTED);
3010 list_for_each_entry_safe(op, next, ops_list, link)
3011 xe_vma_op_cleanup(vm, op);
3017 * Unwind operations list, called after a failure of vm_bind_ioctl_ops_create or
3018 * vm_bind_ioctl_ops_parse.
3020 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
3021 struct drm_gpuva_ops **ops,
3026 for (i = 0; i < num_ops_list; ++i) {
3027 struct drm_gpuva_ops *__ops = ops[i];
3028 struct drm_gpuva_op *__op;
3033 drm_gpuva_for_each_op(__op, __ops) {
3034 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
3036 xe_vma_op_unwind(vm, op, false);
3041 #ifdef TEST_VM_ASYNC_OPS_ERROR
3042 #define SUPPORTED_FLAGS \
3043 (FORCE_ASYNC_OP_ERROR | XE_VM_BIND_FLAG_ASYNC | \
3044 XE_VM_BIND_FLAG_READONLY | XE_VM_BIND_FLAG_IMMEDIATE | \
3045 XE_VM_BIND_FLAG_NULL | 0xffff)
3047 #define SUPPORTED_FLAGS \
3048 (XE_VM_BIND_FLAG_ASYNC | XE_VM_BIND_FLAG_READONLY | \
3049 XE_VM_BIND_FLAG_IMMEDIATE | XE_VM_BIND_FLAG_NULL | 0xffff)
3051 #define XE_64K_PAGE_MASK 0xffffull
3053 #define MAX_BINDS 512 /* FIXME: Picking random upper limit */
3055 static int vm_bind_ioctl_check_args(struct xe_device *xe,
3056 struct drm_xe_vm_bind *args,
3057 struct drm_xe_vm_bind_op **bind_ops,
3063 if (XE_IOCTL_DBG(xe, args->extensions) ||
3064 XE_IOCTL_DBG(xe, !args->num_binds) ||
3065 XE_IOCTL_DBG(xe, args->num_binds > MAX_BINDS))
3068 if (args->num_binds > 1) {
3069 u64 __user *bind_user =
3070 u64_to_user_ptr(args->vector_of_binds);
3072 *bind_ops = kmalloc(sizeof(struct drm_xe_vm_bind_op) *
3073 args->num_binds, GFP_KERNEL);
3077 err = __copy_from_user(*bind_ops, bind_user,
3078 sizeof(struct drm_xe_vm_bind_op) *
3080 if (XE_IOCTL_DBG(xe, err)) {
3085 *bind_ops = &args->bind;
3088 for (i = 0; i < args->num_binds; ++i) {
3089 u64 range = (*bind_ops)[i].range;
3090 u64 addr = (*bind_ops)[i].addr;
3091 u32 op = (*bind_ops)[i].op;
3092 u32 obj = (*bind_ops)[i].obj;
3093 u64 obj_offset = (*bind_ops)[i].obj_offset;
3094 u32 region = (*bind_ops)[i].region;
3095 bool is_null = op & XE_VM_BIND_FLAG_NULL;
3098 *async = !!(op & XE_VM_BIND_FLAG_ASYNC);
3099 } else if (XE_IOCTL_DBG(xe, !*async) ||
3100 XE_IOCTL_DBG(xe, !(op & XE_VM_BIND_FLAG_ASYNC)) ||
3101 XE_IOCTL_DBG(xe, VM_BIND_OP(op) ==
3102 XE_VM_BIND_OP_RESTART)) {
3107 if (XE_IOCTL_DBG(xe, !*async &&
3108 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL)) {
3113 if (XE_IOCTL_DBG(xe, !*async &&
3114 VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH)) {
3119 if (XE_IOCTL_DBG(xe, VM_BIND_OP(op) >
3120 XE_VM_BIND_OP_PREFETCH) ||
3121 XE_IOCTL_DBG(xe, op & ~SUPPORTED_FLAGS) ||
3122 XE_IOCTL_DBG(xe, obj && is_null) ||
3123 XE_IOCTL_DBG(xe, obj_offset && is_null) ||
3124 XE_IOCTL_DBG(xe, VM_BIND_OP(op) != XE_VM_BIND_OP_MAP &&
3126 XE_IOCTL_DBG(xe, !obj &&
3127 VM_BIND_OP(op) == XE_VM_BIND_OP_MAP &&
3129 XE_IOCTL_DBG(xe, !obj &&
3130 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
3131 XE_IOCTL_DBG(xe, addr &&
3132 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
3133 XE_IOCTL_DBG(xe, range &&
3134 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
3135 XE_IOCTL_DBG(xe, obj &&
3136 VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR) ||
3137 XE_IOCTL_DBG(xe, obj &&
3138 VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH) ||
3139 XE_IOCTL_DBG(xe, region &&
3140 VM_BIND_OP(op) != XE_VM_BIND_OP_PREFETCH) ||
3141 XE_IOCTL_DBG(xe, !(BIT(region) &
3142 xe->info.mem_region_mask)) ||
3143 XE_IOCTL_DBG(xe, obj &&
3144 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP)) {
3149 if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
3150 XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
3151 XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
3152 XE_IOCTL_DBG(xe, !range && VM_BIND_OP(op) !=
3153 XE_VM_BIND_OP_RESTART &&
3154 VM_BIND_OP(op) != XE_VM_BIND_OP_UNMAP_ALL)) {
3163 if (args->num_binds > 1)
3168 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3170 struct xe_device *xe = to_xe_device(dev);
3171 struct xe_file *xef = to_xe_file(file);
3172 struct drm_xe_vm_bind *args = data;
3173 struct drm_xe_sync __user *syncs_user;
3174 struct xe_bo **bos = NULL;
3175 struct drm_gpuva_ops **ops = NULL;
3177 struct xe_engine *e = NULL;
3179 struct xe_sync_entry *syncs = NULL;
3180 struct drm_xe_vm_bind_op *bind_ops;
3181 LIST_HEAD(ops_list);
3186 err = vm_bind_ioctl_check_args(xe, args, &bind_ops, &async);
3190 if (args->engine_id) {
3191 e = xe_engine_lookup(xef, args->engine_id);
3192 if (XE_IOCTL_DBG(xe, !e)) {
3197 if (XE_IOCTL_DBG(xe, !(e->flags & ENGINE_FLAG_VM))) {
3203 vm = xe_vm_lookup(xef, args->vm_id);
3204 if (XE_IOCTL_DBG(xe, !vm)) {
3209 err = down_write_killable(&vm->lock);
3213 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
3215 goto release_vm_lock;
3218 if (VM_BIND_OP(bind_ops[0].op) == XE_VM_BIND_OP_RESTART) {
3219 if (XE_IOCTL_DBG(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
3221 if (XE_IOCTL_DBG(xe, !err && args->num_syncs))
3223 if (XE_IOCTL_DBG(xe, !err && !vm->async_ops.error))
3227 trace_xe_vm_restart(vm);
3228 vm_set_async_error(vm, 0);
3230 queue_work(system_unbound_wq, &vm->async_ops.work);
3232 /* Rebinds may have been blocked, give worker a kick */
3233 if (xe_vm_in_compute_mode(vm))
3234 xe_vm_queue_rebind_worker(vm);
3237 goto release_vm_lock;
3240 if (XE_IOCTL_DBG(xe, !vm->async_ops.error &&
3241 async != !!(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS))) {
3243 goto release_vm_lock;
3246 for (i = 0; i < args->num_binds; ++i) {
3247 u64 range = bind_ops[i].range;
3248 u64 addr = bind_ops[i].addr;
3250 if (XE_IOCTL_DBG(xe, range > vm->size) ||
3251 XE_IOCTL_DBG(xe, addr > vm->size - range)) {
3253 goto release_vm_lock;
3256 if (bind_ops[i].tile_mask) {
3257 u64 valid_tiles = BIT(xe->info.tile_count) - 1;
3259 if (XE_IOCTL_DBG(xe, bind_ops[i].tile_mask &
3262 goto release_vm_lock;
3267 bos = kzalloc(sizeof(*bos) * args->num_binds, GFP_KERNEL);
3270 goto release_vm_lock;
3273 ops = kzalloc(sizeof(*ops) * args->num_binds, GFP_KERNEL);
3276 goto release_vm_lock;
3279 for (i = 0; i < args->num_binds; ++i) {
3280 struct drm_gem_object *gem_obj;
3281 u64 range = bind_ops[i].range;
3282 u64 addr = bind_ops[i].addr;
3283 u32 obj = bind_ops[i].obj;
3284 u64 obj_offset = bind_ops[i].obj_offset;
3289 gem_obj = drm_gem_object_lookup(file, obj);
3290 if (XE_IOCTL_DBG(xe, !gem_obj)) {
3294 bos[i] = gem_to_xe_bo(gem_obj);
3296 if (XE_IOCTL_DBG(xe, range > bos[i]->size) ||
3297 XE_IOCTL_DBG(xe, obj_offset >
3298 bos[i]->size - range)) {
3303 if (bos[i]->flags & XE_BO_INTERNAL_64K) {
3304 if (XE_IOCTL_DBG(xe, obj_offset &
3305 XE_64K_PAGE_MASK) ||
3306 XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
3307 XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
3314 if (args->num_syncs) {
3315 syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3322 syncs_user = u64_to_user_ptr(args->syncs);
3323 for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3324 err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3325 &syncs_user[num_syncs], false,
3326 xe_vm_no_dma_fences(vm));
3331 /* Do some error checking first to make the unwind easier */
3332 for (i = 0; i < args->num_binds; ++i) {
3333 u64 range = bind_ops[i].range;
3334 u64 addr = bind_ops[i].addr;
3335 u32 op = bind_ops[i].op;
3337 err = vm_bind_ioctl_lookup_vma(vm, bos[i], addr, range, op);
3342 for (i = 0; i < args->num_binds; ++i) {
3343 u64 range = bind_ops[i].range;
3344 u64 addr = bind_ops[i].addr;
3345 u32 op = bind_ops[i].op;
3346 u64 obj_offset = bind_ops[i].obj_offset;
3347 u8 tile_mask = bind_ops[i].tile_mask;
3348 u32 region = bind_ops[i].region;
3350 ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
3351 addr, range, op, tile_mask,
3353 if (IS_ERR(ops[i])) {
3354 err = PTR_ERR(ops[i]);
3360 err = vm_bind_ioctl_ops_parse(vm, e, ops, args->num_binds,
3361 syncs, num_syncs, &ops_list, async);
3365 err = vm_bind_ioctl_ops_commit(vm, &ops_list, async);
3366 up_write(&vm->lock);
3368 for (i = 0; i < args->num_binds; ++i)
3373 if (args->num_binds > 1)
3379 vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
3381 for (i = 0; err == -ENODATA && i < num_syncs; i++)
3382 xe_sync_entry_signal(&syncs[i], NULL, dma_fence_get_stub());
3384 xe_sync_entry_cleanup(&syncs[num_syncs]);
3388 for (i = 0; i < args->num_binds; ++i)
3391 up_write(&vm->lock);
3400 if (args->num_binds > 1)
3402 return err == -ENODATA ? 0 : err;
3406 * XXX: Using the TTM wrappers for now, likely can call into dma-resv code
3407 * directly to optimize. Also this likely should be an inline function.
3409 int xe_vm_lock(struct xe_vm *vm, struct ww_acquire_ctx *ww,
3410 int num_resv, bool intr)
3412 struct ttm_validate_buffer tv_vm;
3418 tv_vm.num_shared = num_resv;
3419 tv_vm.bo = xe_vm_ttm_bo(vm);
3420 list_add_tail(&tv_vm.head, &objs);
3422 return ttm_eu_reserve_buffers(ww, &objs, intr, &dups);
3425 void xe_vm_unlock(struct xe_vm *vm, struct ww_acquire_ctx *ww)
3427 dma_resv_unlock(xe_vm_resv(vm));
3428 ww_acquire_fini(ww);
3432 * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3433 * @vma: VMA to invalidate
3435 * Walks a list of page tables leaves which it memset the entries owned by this
3436 * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3439 * Returns 0 for success, negative error code otherwise.
3441 int xe_vm_invalidate_vma(struct xe_vma *vma)
3443 struct xe_device *xe = xe_vma_vm(vma)->xe;
3444 struct xe_tile *tile;
3445 u32 tile_needs_invalidate = 0;
3446 int seqno[XE_MAX_TILES_PER_DEVICE];
3450 XE_BUG_ON(!xe_vm_in_fault_mode(xe_vma_vm(vma)));
3451 XE_WARN_ON(xe_vma_is_null(vma));
3452 trace_xe_vma_usm_invalidate(vma);
3454 /* Check that we don't race with page-table updates */
3455 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3456 if (xe_vma_is_userptr(vma)) {
3457 WARN_ON_ONCE(!mmu_interval_check_retry
3458 (&vma->userptr.notifier,
3459 vma->userptr.notifier_seq));
3460 WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
3461 DMA_RESV_USAGE_BOOKKEEP));
3464 xe_bo_assert_held(xe_vma_bo(vma));
3468 for_each_tile(tile, xe, id) {
3469 if (xe_pt_zap_ptes(tile, vma)) {
3470 tile_needs_invalidate |= BIT(id);
3473 * FIXME: We potentially need to invalidate multiple
3474 * GTs within the tile
3476 seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
3482 for_each_tile(tile, xe, id) {
3483 if (tile_needs_invalidate & BIT(id)) {
3484 ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
3490 vma->usm.tile_invalidated = vma->tile_mask;
3495 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
3497 struct drm_gpuva *gpuva;
3501 if (!down_read_trylock(&vm->lock)) {
3502 drm_printf(p, " Failed to acquire VM lock to dump capture");
3505 if (vm->pt_root[gt_id]) {
3506 addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE,
3508 drm_printf(p, " VM root: A:0x%llx %s\n", addr, is_vram ? "VRAM" : "SYS");
3511 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3512 struct xe_vma *vma = gpuva_to_vma(gpuva);
3513 bool is_userptr = xe_vma_is_userptr(vma);
3514 bool is_null = xe_vma_is_null(vma);
3518 } else if (is_userptr) {
3519 struct xe_res_cursor cur;
3521 if (vma->userptr.sg) {
3522 xe_res_first_sg(vma->userptr.sg, 0, XE_PAGE_SIZE,
3524 addr = xe_res_dma(&cur);
3529 addr = __xe_bo_addr(xe_vma_bo(vma), 0, XE_PAGE_SIZE, &is_vram);
3531 drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
3532 xe_vma_start(vma), xe_vma_end(vma) - 1,
3534 addr, is_null ? "NULL" : is_userptr ? "USR" :
3535 is_vram ? "VRAM" : "SYS");