1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021 Intel Corporation
8 #include <linux/dma-fence-array.h>
10 #include <drm/drm_print.h>
11 #include <drm/ttm/ttm_execbuf_util.h>
12 #include <drm/ttm/ttm_tt.h>
13 #include <drm/xe_drm.h>
14 #include <linux/delay.h>
15 #include <linux/kthread.h>
17 #include <linux/swap.h>
20 #include "xe_device.h"
21 #include "xe_engine.h"
23 #include "xe_gt_pagefault.h"
24 #include "xe_gt_tlb_invalidation.h"
25 #include "xe_migrate.h"
27 #include "xe_preempt_fence.h"
29 #include "xe_res_cursor.h"
33 #define TEST_VM_ASYNC_OPS_ERROR
36 * xe_vma_userptr_check_repin() - Advisory check for repin needed
37 * @vma: The userptr vma
39 * Check if the userptr vma has been invalidated since last successful
40 * repin. The check is advisory only and can the function can be called
41 * without the vm->userptr.notifier_lock held. There is no guarantee that the
42 * vma userptr will remain valid after a lockless check, so typically
43 * the call needs to be followed by a proper check under the notifier_lock.
45 * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
47 int xe_vma_userptr_check_repin(struct xe_vma *vma)
49 return mmu_interval_check_retry(&vma->userptr.notifier,
50 vma->userptr.notifier_seq) ?
54 int xe_vma_userptr_pin_pages(struct xe_vma *vma)
56 struct xe_vm *vm = xe_vma_vm(vma);
57 struct xe_device *xe = vm->xe;
58 const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT;
60 bool in_kthread = !current->mm;
61 unsigned long notifier_seq;
63 bool read_only = xe_vma_read_only(vma);
65 lockdep_assert_held(&vm->lock);
66 XE_BUG_ON(!xe_vma_is_userptr(vma));
71 notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier);
72 if (notifier_seq == vma->userptr.notifier_seq)
75 pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
79 if (vma->userptr.sg) {
80 dma_unmap_sgtable(xe->drm.dev,
82 read_only ? DMA_TO_DEVICE :
83 DMA_BIDIRECTIONAL, 0);
84 sg_free_table(vma->userptr.sg);
85 vma->userptr.sg = NULL;
90 if (!mmget_not_zero(vma->userptr.notifier.mm)) {
94 kthread_use_mm(vma->userptr.notifier.mm);
97 while (pinned < num_pages) {
98 ret = get_user_pages_fast(xe_vma_userptr(vma) +
101 read_only ? 0 : FOLL_WRITE,
114 kthread_unuse_mm(vma->userptr.notifier.mm);
115 mmput(vma->userptr.notifier.mm);
121 ret = sg_alloc_table_from_pages_segment(&vma->userptr.sgt, pages,
123 (u64)pinned << PAGE_SHIFT,
124 xe_sg_segment_size(xe->drm.dev),
127 vma->userptr.sg = NULL;
130 vma->userptr.sg = &vma->userptr.sgt;
132 ret = dma_map_sgtable(xe->drm.dev, vma->userptr.sg,
133 read_only ? DMA_TO_DEVICE :
135 DMA_ATTR_SKIP_CPU_SYNC |
136 DMA_ATTR_NO_KERNEL_MAPPING);
138 sg_free_table(vma->userptr.sg);
139 vma->userptr.sg = NULL;
143 for (i = 0; i < pinned; ++i) {
146 set_page_dirty(pages[i]);
147 unlock_page(pages[i]);
150 mark_page_accessed(pages[i]);
154 release_pages(pages, pinned);
158 vma->userptr.notifier_seq = notifier_seq;
159 if (xe_vma_userptr_check_repin(vma) == -EAGAIN)
163 return ret < 0 ? ret : 0;
166 static bool preempt_fences_waiting(struct xe_vm *vm)
170 lockdep_assert_held(&vm->lock);
171 xe_vm_assert_held(vm);
173 list_for_each_entry(e, &vm->preempt.engines, compute.link) {
174 if (!e->compute.pfence || (e->compute.pfence &&
175 test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
176 &e->compute.pfence->flags))) {
184 static void free_preempt_fences(struct list_head *list)
186 struct list_head *link, *next;
188 list_for_each_safe(link, next, list)
189 xe_preempt_fence_free(to_preempt_fence_from_link(link));
192 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
195 lockdep_assert_held(&vm->lock);
196 xe_vm_assert_held(vm);
198 if (*count >= vm->preempt.num_engines)
201 for (; *count < vm->preempt.num_engines; ++(*count)) {
202 struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
205 return PTR_ERR(pfence);
207 list_move_tail(xe_preempt_fence_link(pfence), list);
213 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
217 xe_vm_assert_held(vm);
219 list_for_each_entry(e, &vm->preempt.engines, compute.link) {
220 if (e->compute.pfence) {
221 long timeout = dma_fence_wait(e->compute.pfence, false);
225 dma_fence_put(e->compute.pfence);
226 e->compute.pfence = NULL;
233 static bool xe_vm_is_idle(struct xe_vm *vm)
237 xe_vm_assert_held(vm);
238 list_for_each_entry(e, &vm->preempt.engines, compute.link) {
239 if (!xe_engine_is_idle(e))
246 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
248 struct list_head *link;
251 list_for_each_entry(e, &vm->preempt.engines, compute.link) {
252 struct dma_fence *fence;
255 XE_BUG_ON(link == list);
257 fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
258 e, e->compute.context,
260 dma_fence_put(e->compute.pfence);
261 e->compute.pfence = fence;
265 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
268 struct ww_acquire_ctx ww;
271 err = xe_bo_lock(bo, &ww, vm->preempt.num_engines, true);
275 list_for_each_entry(e, &vm->preempt.engines, compute.link)
276 if (e->compute.pfence) {
277 dma_resv_add_fence(bo->ttm.base.resv,
279 DMA_RESV_USAGE_BOOKKEEP);
282 xe_bo_unlock(bo, &ww);
287 * xe_vm_fence_all_extobjs() - Add a fence to vm's external objects' resv
289 * @fence: The fence to add.
290 * @usage: The resv usage for the fence.
292 * Loops over all of the vm's external object bindings and adds a @fence
293 * with the given @usage to all of the external object's reservation
296 void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
297 enum dma_resv_usage usage)
301 list_for_each_entry(vma, &vm->extobj.list, extobj.link)
302 dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence, usage);
305 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm)
309 lockdep_assert_held(&vm->lock);
310 xe_vm_assert_held(vm);
312 list_for_each_entry(e, &vm->preempt.engines, compute.link) {
315 dma_resv_add_fence(&vm->resv, e->compute.pfence,
316 DMA_RESV_USAGE_BOOKKEEP);
317 xe_vm_fence_all_extobjs(vm, e->compute.pfence,
318 DMA_RESV_USAGE_BOOKKEEP);
322 int xe_vm_add_compute_engine(struct xe_vm *vm, struct xe_engine *e)
324 struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
325 struct ttm_validate_buffer *tv;
326 struct ww_acquire_ctx ww;
327 struct list_head objs;
328 struct dma_fence *pfence;
332 XE_BUG_ON(!xe_vm_in_compute_mode(vm));
334 down_write(&vm->lock);
336 err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs, true, 1);
338 goto out_unlock_outer;
340 pfence = xe_preempt_fence_create(e, e->compute.context,
347 list_add(&e->compute.link, &vm->preempt.engines);
348 ++vm->preempt.num_engines;
349 e->compute.pfence = pfence;
351 down_read(&vm->userptr.notifier_lock);
353 dma_resv_add_fence(&vm->resv, pfence,
354 DMA_RESV_USAGE_BOOKKEEP);
356 xe_vm_fence_all_extobjs(vm, pfence, DMA_RESV_USAGE_BOOKKEEP);
359 * Check to see if a preemption on VM is in flight or userptr
360 * invalidation, if so trigger this preempt fence to sync state with
361 * other preempt fences on the VM.
363 wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
365 dma_fence_enable_sw_signaling(pfence);
367 up_read(&vm->userptr.notifier_lock);
370 xe_vm_unlock_dma_resv(vm, tv_onstack, tv, &ww, &objs);
378 * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
379 * that need repinning.
382 * This function checks for whether the VM has userptrs that need repinning,
383 * and provides a release-type barrier on the userptr.notifier_lock after
386 * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
388 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
390 lockdep_assert_held_read(&vm->userptr.notifier_lock);
392 return (list_empty(&vm->userptr.repin_list) &&
393 list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
397 * xe_vm_lock_dma_resv() - Lock the vm dma_resv object and the dma_resv
398 * objects of the vm's external buffer objects.
400 * @ww: Pointer to a struct ww_acquire_ctx locking context.
401 * @tv_onstack: Array size XE_ONSTACK_TV of storage for the struct
402 * ttm_validate_buffers used for locking.
403 * @tv: Pointer to a pointer that on output contains the actual storage used.
404 * @objs: List head for the buffer objects locked.
405 * @intr: Whether to lock interruptible.
406 * @num_shared: Number of dma-fence slots to reserve in the locked objects.
408 * Locks the vm dma-resv objects and all the dma-resv objects of the
409 * buffer objects on the vm external object list. The TTM utilities require
410 * a list of struct ttm_validate_buffers pointing to the actual buffer
411 * objects to lock. Storage for those struct ttm_validate_buffers should
412 * be provided in @tv_onstack, and is typically reserved on the stack
413 * of the caller. If the size of @tv_onstack isn't sufficient, then
414 * storage will be allocated internally using kvmalloc().
416 * The function performs deadlock handling internally, and after a
417 * successful return the ww locking transaction should be considered
420 * Return: 0 on success, Negative error code on error. In particular if
421 * @intr is set to true, -EINTR or -ERESTARTSYS may be returned. In case
422 * of error, any locking performed has been reverted.
424 int xe_vm_lock_dma_resv(struct xe_vm *vm, struct ww_acquire_ctx *ww,
425 struct ttm_validate_buffer *tv_onstack,
426 struct ttm_validate_buffer **tv,
427 struct list_head *objs,
429 unsigned int num_shared)
431 struct ttm_validate_buffer *tv_vm, *tv_bo;
432 struct xe_vma *vma, *next;
436 lockdep_assert_held(&vm->lock);
438 if (vm->extobj.entries < XE_ONSTACK_TV) {
441 tv_vm = kvmalloc_array(vm->extobj.entries + 1, sizeof(*tv_vm),
448 INIT_LIST_HEAD(objs);
449 list_for_each_entry(vma, &vm->extobj.list, extobj.link) {
450 tv_bo->num_shared = num_shared;
451 tv_bo->bo = &xe_vma_bo(vma)->ttm;
453 list_add_tail(&tv_bo->head, objs);
456 tv_vm->num_shared = num_shared;
457 tv_vm->bo = xe_vm_ttm_bo(vm);
458 list_add_tail(&tv_vm->head, objs);
459 err = ttm_eu_reserve_buffers(ww, objs, intr, &dups);
463 spin_lock(&vm->notifier.list_lock);
464 list_for_each_entry_safe(vma, next, &vm->notifier.rebind_list,
465 notifier.rebind_link) {
466 xe_bo_assert_held(xe_vma_bo(vma));
468 list_del_init(&vma->notifier.rebind_link);
469 if (vma->tile_present && !vma->destroyed)
470 list_move_tail(&vma->rebind_link, &vm->rebind_list);
472 spin_unlock(&vm->notifier.list_lock);
478 if (tv_vm != tv_onstack)
485 * xe_vm_unlock_dma_resv() - Unlock reservation objects locked by
486 * xe_vm_lock_dma_resv()
488 * @tv_onstack: The @tv_onstack array given to xe_vm_lock_dma_resv().
489 * @tv: The value of *@tv given by xe_vm_lock_dma_resv().
490 * @ww: The ww_acquire_context used for locking.
491 * @objs: The list returned from xe_vm_lock_dma_resv().
493 * Unlocks the reservation objects and frees any memory allocated by
494 * xe_vm_lock_dma_resv().
496 void xe_vm_unlock_dma_resv(struct xe_vm *vm,
497 struct ttm_validate_buffer *tv_onstack,
498 struct ttm_validate_buffer *tv,
499 struct ww_acquire_ctx *ww,
500 struct list_head *objs)
503 * Nothing should've been able to enter the list while we were locked,
504 * since we've held the dma-resvs of all the vm's external objects,
505 * and holding the dma_resv of an object is required for list
506 * addition, and we shouldn't add ourselves.
508 XE_WARN_ON(!list_empty(&vm->notifier.rebind_list));
510 ttm_eu_backoff_reservation(ww, objs);
511 if (tv && tv != tv_onstack)
515 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
517 static void xe_vm_kill(struct xe_vm *vm)
519 struct ww_acquire_ctx ww;
522 lockdep_assert_held(&vm->lock);
524 xe_vm_lock(vm, &ww, 0, false);
525 vm->flags |= XE_VM_FLAG_BANNED;
526 trace_xe_vm_kill(vm);
528 list_for_each_entry(e, &vm->preempt.engines, compute.link)
530 xe_vm_unlock(vm, &ww);
532 /* TODO: Inform user the VM is banned */
535 static void preempt_rebind_work_func(struct work_struct *w)
537 struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
539 struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
540 struct ttm_validate_buffer *tv;
541 struct ww_acquire_ctx ww;
542 struct list_head objs;
543 struct dma_fence *rebind_fence;
544 unsigned int fence_count = 0;
545 LIST_HEAD(preempt_fences);
549 int __maybe_unused tries = 0;
551 XE_BUG_ON(!xe_vm_in_compute_mode(vm));
552 trace_xe_vm_rebind_worker_enter(vm);
554 down_write(&vm->lock);
556 if (xe_vm_is_closed_or_banned(vm)) {
558 trace_xe_vm_rebind_worker_exit(vm);
563 if (vm->async_ops.error)
564 goto out_unlock_outer;
567 * Extreme corner where we exit a VM error state with a munmap style VM
568 * unbind inflight which requires a rebind. In this case the rebind
569 * needs to install some fences into the dma-resv slots. The worker to
570 * do this queued, let that worker make progress by dropping vm->lock
571 * and trying this again.
573 if (vm->async_ops.munmap_rebind_inflight) {
575 flush_work(&vm->async_ops.work);
579 if (xe_vm_userptr_check_repin(vm)) {
580 err = xe_vm_userptr_pin(vm);
582 goto out_unlock_outer;
585 err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs,
586 false, vm->preempt.num_engines);
588 goto out_unlock_outer;
590 if (xe_vm_is_idle(vm)) {
591 vm->preempt.rebind_deactivated = true;
595 /* Fresh preempt fences already installed. Everyting is running. */
596 if (!preempt_fences_waiting(vm))
600 * This makes sure vm is completely suspended and also balances
601 * xe_engine suspend- and resume; we resume *all* vm engines below.
603 err = wait_for_existing_preempt_fences(vm);
607 err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
611 list_for_each_entry(vma, &vm->rebind_list, rebind_link) {
612 if (xe_vma_has_no_bo(vma) || vma->destroyed)
615 err = xe_bo_validate(xe_vma_bo(vma), vm, false);
620 rebind_fence = xe_vm_rebind(vm, true);
621 if (IS_ERR(rebind_fence)) {
622 err = PTR_ERR(rebind_fence);
627 dma_fence_wait(rebind_fence, false);
628 dma_fence_put(rebind_fence);
631 /* Wait on munmap style VM unbinds */
632 wait = dma_resv_wait_timeout(&vm->resv,
633 DMA_RESV_USAGE_KERNEL,
634 false, MAX_SCHEDULE_TIMEOUT);
640 #define retry_required(__tries, __vm) \
641 (IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
642 (!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
643 __xe_vm_userptr_needs_repin(__vm))
645 down_read(&vm->userptr.notifier_lock);
646 if (retry_required(tries, vm)) {
647 up_read(&vm->userptr.notifier_lock);
652 #undef retry_required
654 spin_lock(&vm->xe->ttm.lru_lock);
655 ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
656 spin_unlock(&vm->xe->ttm.lru_lock);
658 /* Point of no return. */
659 arm_preempt_fences(vm, &preempt_fences);
660 resume_and_reinstall_preempt_fences(vm);
661 up_read(&vm->userptr.notifier_lock);
664 xe_vm_unlock_dma_resv(vm, tv_onstack, tv, &ww, &objs);
666 if (err == -EAGAIN) {
667 trace_xe_vm_rebind_worker_retry(vm);
672 * With multiple active VMs, under memory pressure, it is possible that
673 * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
674 * Until ttm properly handles locking in such scenarios, best thing the
675 * driver can do is retry with a timeout. Killing the VM or putting it
676 * in error state after timeout or other error scenarios is still TBD.
678 if (err == -ENOMEM) {
679 ktime_t cur = ktime_get();
681 end = end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
682 if (ktime_before(cur, end)) {
684 trace_xe_vm_rebind_worker_retry(vm);
689 drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
694 free_preempt_fences(&preempt_fences);
696 trace_xe_vm_rebind_worker_exit(vm);
699 struct async_op_fence;
700 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
701 struct xe_engine *e, struct xe_sync_entry *syncs,
702 u32 num_syncs, struct async_op_fence *afence);
704 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
705 const struct mmu_notifier_range *range,
706 unsigned long cur_seq)
708 struct xe_vma *vma = container_of(mni, struct xe_vma, userptr.notifier);
709 struct xe_vm *vm = xe_vma_vm(vma);
710 struct dma_resv_iter cursor;
711 struct dma_fence *fence;
714 XE_BUG_ON(!xe_vma_is_userptr(vma));
715 trace_xe_vma_userptr_invalidate(vma);
717 if (!mmu_notifier_range_blockable(range))
720 down_write(&vm->userptr.notifier_lock);
721 mmu_interval_set_seq(mni, cur_seq);
723 /* No need to stop gpu access if the userptr is not yet bound. */
724 if (!vma->userptr.initial_bind) {
725 up_write(&vm->userptr.notifier_lock);
730 * Tell exec and rebind worker they need to repin and rebind this
733 if (!xe_vm_in_fault_mode(vm) && !vma->destroyed && vma->tile_present) {
734 spin_lock(&vm->userptr.invalidated_lock);
735 list_move_tail(&vma->userptr.invalidate_link,
736 &vm->userptr.invalidated);
737 spin_unlock(&vm->userptr.invalidated_lock);
740 up_write(&vm->userptr.notifier_lock);
743 * Preempt fences turn into schedule disables, pipeline these.
744 * Note that even in fault mode, we need to wait for binds and
745 * unbinds to complete, and those are attached as BOOKMARK fences
748 dma_resv_iter_begin(&cursor, &vm->resv,
749 DMA_RESV_USAGE_BOOKKEEP);
750 dma_resv_for_each_fence_unlocked(&cursor, fence)
751 dma_fence_enable_sw_signaling(fence);
752 dma_resv_iter_end(&cursor);
754 err = dma_resv_wait_timeout(&vm->resv,
755 DMA_RESV_USAGE_BOOKKEEP,
756 false, MAX_SCHEDULE_TIMEOUT);
757 XE_WARN_ON(err <= 0);
759 if (xe_vm_in_fault_mode(vm)) {
760 err = xe_vm_invalidate_vma(vma);
764 trace_xe_vma_userptr_invalidate_complete(vma);
769 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
770 .invalidate = vma_userptr_invalidate,
773 int xe_vm_userptr_pin(struct xe_vm *vm)
775 struct xe_vma *vma, *next;
777 LIST_HEAD(tmp_evict);
779 lockdep_assert_held_write(&vm->lock);
781 /* Collect invalidated userptrs */
782 spin_lock(&vm->userptr.invalidated_lock);
783 list_for_each_entry_safe(vma, next, &vm->userptr.invalidated,
784 userptr.invalidate_link) {
785 list_del_init(&vma->userptr.invalidate_link);
786 list_move_tail(&vma->userptr_link, &vm->userptr.repin_list);
788 spin_unlock(&vm->userptr.invalidated_lock);
790 /* Pin and move to temporary list */
791 list_for_each_entry_safe(vma, next, &vm->userptr.repin_list, userptr_link) {
792 err = xe_vma_userptr_pin_pages(vma);
796 list_move_tail(&vma->userptr_link, &tmp_evict);
799 /* Take lock and move to rebind_list for rebinding. */
800 err = dma_resv_lock_interruptible(&vm->resv, NULL);
804 list_for_each_entry_safe(vma, next, &tmp_evict, userptr_link) {
805 list_del_init(&vma->userptr_link);
806 list_move_tail(&vma->rebind_link, &vm->rebind_list);
809 dma_resv_unlock(&vm->resv);
814 list_splice_tail(&tmp_evict, &vm->userptr.repin_list);
820 * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
821 * that need repinning.
824 * This function does an advisory check for whether the VM has userptrs that
827 * Return: 0 if there are no indications of userptrs needing repinning,
828 * -EAGAIN if there are.
830 int xe_vm_userptr_check_repin(struct xe_vm *vm)
832 return (list_empty_careful(&vm->userptr.repin_list) &&
833 list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
836 static struct dma_fence *
837 xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
838 struct xe_sync_entry *syncs, u32 num_syncs);
840 struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
842 struct dma_fence *fence = NULL;
843 struct xe_vma *vma, *next;
845 lockdep_assert_held(&vm->lock);
846 if (xe_vm_no_dma_fences(vm) && !rebind_worker)
849 xe_vm_assert_held(vm);
850 list_for_each_entry_safe(vma, next, &vm->rebind_list, rebind_link) {
851 XE_WARN_ON(!vma->tile_present);
853 list_del_init(&vma->rebind_link);
854 dma_fence_put(fence);
856 trace_xe_vma_rebind_worker(vma);
858 trace_xe_vma_rebind_exec(vma);
859 fence = xe_vm_bind_vma(vma, NULL, NULL, 0);
867 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
869 u64 bo_offset_or_userptr,
876 struct xe_tile *tile;
879 XE_BUG_ON(start >= end);
880 XE_BUG_ON(end >= vm->size);
882 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
884 vma = ERR_PTR(-ENOMEM);
888 INIT_LIST_HEAD(&vma->rebind_link);
889 INIT_LIST_HEAD(&vma->unbind_link);
890 INIT_LIST_HEAD(&vma->userptr_link);
891 INIT_LIST_HEAD(&vma->userptr.invalidate_link);
892 INIT_LIST_HEAD(&vma->notifier.rebind_link);
893 INIT_LIST_HEAD(&vma->extobj.link);
900 vma->pte_flags |= XE_PTE_FLAG_READ_ONLY;
902 vma->pte_flags |= XE_PTE_FLAG_NULL;
905 vma->tile_mask = tile_mask;
907 for_each_tile(tile, vm->xe, id)
908 vma->tile_mask |= 0x1 << id;
911 if (vm->xe->info.platform == XE_PVC)
912 vma->use_atomic_access_pte_bit = true;
915 xe_bo_assert_held(bo);
916 vma->bo_offset = bo_offset_or_userptr;
917 vma->bo = xe_bo_get(bo);
918 list_add_tail(&vma->bo_link, &bo->vmas);
919 } else /* userptr or null */ {
921 u64 size = end - start + 1;
924 vma->userptr.ptr = bo_offset_or_userptr;
926 err = mmu_interval_notifier_insert(&vma->userptr.notifier,
928 xe_vma_userptr(vma), size,
929 &vma_userptr_notifier_ops);
936 vma->userptr.notifier_seq = LONG_MAX;
945 static bool vm_remove_extobj(struct xe_vma *vma)
947 if (!list_empty(&vma->extobj.link)) {
948 xe_vma_vm(vma)->extobj.entries--;
949 list_del_init(&vma->extobj.link);
955 static void xe_vma_destroy_late(struct xe_vma *vma)
957 struct xe_vm *vm = xe_vma_vm(vma);
958 struct xe_device *xe = vm->xe;
959 bool read_only = xe_vma_read_only(vma);
961 if (xe_vma_is_userptr(vma)) {
962 if (vma->userptr.sg) {
963 dma_unmap_sgtable(xe->drm.dev,
965 read_only ? DMA_TO_DEVICE :
966 DMA_BIDIRECTIONAL, 0);
967 sg_free_table(vma->userptr.sg);
968 vma->userptr.sg = NULL;
972 * Since userptr pages are not pinned, we can't remove
973 * the notifer until we're sure the GPU is not accessing
976 mmu_interval_notifier_remove(&vma->userptr.notifier);
978 } else if (xe_vma_is_null(vma)) {
981 xe_bo_put(xe_vma_bo(vma));
987 static void vma_destroy_work_func(struct work_struct *w)
990 container_of(w, struct xe_vma, destroy_work);
992 xe_vma_destroy_late(vma);
995 static struct xe_vma *
996 bo_has_vm_references_locked(struct xe_bo *bo, struct xe_vm *vm,
997 struct xe_vma *ignore)
1001 list_for_each_entry(vma, &bo->vmas, bo_link) {
1002 if (vma != ignore && xe_vma_vm(vma) == vm)
1009 static bool bo_has_vm_references(struct xe_bo *bo, struct xe_vm *vm,
1010 struct xe_vma *ignore)
1012 struct ww_acquire_ctx ww;
1015 xe_bo_lock(bo, &ww, 0, false);
1016 ret = !!bo_has_vm_references_locked(bo, vm, ignore);
1017 xe_bo_unlock(bo, &ww);
1022 static void __vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
1024 list_add(&vma->extobj.link, &vm->extobj.list);
1025 vm->extobj.entries++;
1028 static void vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
1030 struct xe_bo *bo = xe_vma_bo(vma);
1032 lockdep_assert_held_write(&vm->lock);
1034 if (bo_has_vm_references(bo, vm, vma))
1037 __vm_insert_extobj(vm, vma);
1040 static void vma_destroy_cb(struct dma_fence *fence,
1041 struct dma_fence_cb *cb)
1043 struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
1045 INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
1046 queue_work(system_unbound_wq, &vma->destroy_work);
1049 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
1051 struct xe_vm *vm = xe_vma_vm(vma);
1053 lockdep_assert_held_write(&vm->lock);
1054 XE_BUG_ON(!list_empty(&vma->unbind_link));
1056 if (xe_vma_is_userptr(vma)) {
1057 XE_WARN_ON(!vma->destroyed);
1058 spin_lock(&vm->userptr.invalidated_lock);
1059 list_del_init(&vma->userptr.invalidate_link);
1060 spin_unlock(&vm->userptr.invalidated_lock);
1061 list_del(&vma->userptr_link);
1062 } else if (!xe_vma_is_null(vma)) {
1063 xe_bo_assert_held(xe_vma_bo(vma));
1064 list_del(&vma->bo_link);
1066 spin_lock(&vm->notifier.list_lock);
1067 list_del(&vma->notifier.rebind_link);
1068 spin_unlock(&vm->notifier.list_lock);
1070 if (!xe_vma_bo(vma)->vm && vm_remove_extobj(vma)) {
1071 struct xe_vma *other;
1073 other = bo_has_vm_references_locked(xe_vma_bo(vma), vm, NULL);
1076 __vm_insert_extobj(vm, other);
1080 xe_vm_assert_held(vm);
1081 if (!list_empty(&vma->rebind_link))
1082 list_del(&vma->rebind_link);
1085 int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
1089 XE_WARN_ON(ret != -ENOENT);
1090 xe_vma_destroy_late(vma);
1093 xe_vma_destroy_late(vma);
1097 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1099 struct ttm_validate_buffer tv[2];
1100 struct ww_acquire_ctx ww;
1101 struct xe_bo *bo = xe_vma_bo(vma);
1106 memset(tv, 0, sizeof(tv));
1107 tv[0].bo = xe_vm_ttm_bo(xe_vma_vm(vma));
1108 list_add(&tv[0].head, &objs);
1111 tv[1].bo = &xe_bo_get(bo)->ttm;
1112 list_add(&tv[1].head, &objs);
1114 err = ttm_eu_reserve_buffers(&ww, &objs, false, &dups);
1117 xe_vma_destroy(vma, NULL);
1119 ttm_eu_backoff_reservation(&ww, &objs);
1124 static struct xe_vma *to_xe_vma(const struct rb_node *node)
1126 BUILD_BUG_ON(offsetof(struct xe_vma, vm_node) != 0);
1127 return (struct xe_vma *)node;
1130 static int xe_vma_cmp(struct xe_vma *a, struct xe_vma *b)
1132 if (xe_vma_end(a) - 1 < xe_vma_start(b)) {
1134 } else if (xe_vma_end(b) - 1 < xe_vma_start(a)) {
1141 static bool xe_vma_less_cb(struct rb_node *a, const struct rb_node *b)
1143 return xe_vma_cmp(to_xe_vma(a), to_xe_vma(b)) < 0;
1146 int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node)
1148 struct xe_vma *cmp = to_xe_vma(node);
1149 struct xe_vma *own = (struct xe_vma *)key;
1151 if (xe_vma_start(own) > xe_vma_end(cmp) - 1)
1154 if (xe_vma_end(own) - 1 < xe_vma_start(cmp))
1161 xe_vm_find_overlapping_vma(struct xe_vm *vm, struct xe_vma *vma)
1163 struct rb_node *node;
1165 lockdep_assert_held(&vm->lock);
1167 if (xe_vm_is_closed_or_banned(vm))
1170 XE_BUG_ON(xe_vma_end(vma) > vm->size);
1172 node = rb_find(vma, &vm->vmas, xe_vma_cmp_vma_cb);
1174 return node ? to_xe_vma(node) : NULL;
1177 static void xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1179 XE_BUG_ON(xe_vma_vm(vma) != vm);
1180 lockdep_assert_held(&vm->lock);
1182 rb_add(&vma->vm_node, &vm->vmas, xe_vma_less_cb);
1185 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1187 XE_BUG_ON(xe_vma_vm(vma) != vm);
1188 lockdep_assert_held(&vm->lock);
1190 rb_erase(&vma->vm_node, &vm->vmas);
1191 if (vm->usm.last_fault_vma == vma)
1192 vm->usm.last_fault_vma = NULL;
1195 static void async_op_work_func(struct work_struct *w);
1196 static void vm_destroy_work_func(struct work_struct *w);
1198 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1201 int err, i = 0, number_tiles = 0;
1202 struct xe_tile *tile;
1205 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1207 return ERR_PTR(-ENOMEM);
1210 kref_init(&vm->refcount);
1211 dma_resv_init(&vm->resv);
1213 vm->size = 1ull << xe_pt_shift(xe->info.vm_max_level + 1);
1218 init_rwsem(&vm->lock);
1220 INIT_LIST_HEAD(&vm->rebind_list);
1222 INIT_LIST_HEAD(&vm->userptr.repin_list);
1223 INIT_LIST_HEAD(&vm->userptr.invalidated);
1224 init_rwsem(&vm->userptr.notifier_lock);
1225 spin_lock_init(&vm->userptr.invalidated_lock);
1227 INIT_LIST_HEAD(&vm->notifier.rebind_list);
1228 spin_lock_init(&vm->notifier.list_lock);
1230 INIT_LIST_HEAD(&vm->async_ops.pending);
1231 INIT_WORK(&vm->async_ops.work, async_op_work_func);
1232 spin_lock_init(&vm->async_ops.lock);
1234 INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1236 INIT_LIST_HEAD(&vm->preempt.engines);
1237 vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */
1239 INIT_LIST_HEAD(&vm->extobj.list);
1241 if (!(flags & XE_VM_FLAG_MIGRATION)) {
1242 /* We need to immeditatelly exit from any D3 state */
1243 xe_pm_runtime_get(xe);
1244 xe_device_mem_access_get(xe);
1247 err = dma_resv_lock_interruptible(&vm->resv, NULL);
1251 if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1252 vm->flags |= XE_VM_FLAGS_64K;
1254 for_each_tile(tile, xe, id) {
1255 if (flags & XE_VM_FLAG_MIGRATION &&
1256 tile->id != XE_VM_FLAG_GT_ID(flags))
1259 vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
1260 if (IS_ERR(vm->pt_root[id])) {
1261 err = PTR_ERR(vm->pt_root[id]);
1262 vm->pt_root[id] = NULL;
1263 goto err_destroy_root;
1267 if (flags & XE_VM_FLAG_SCRATCH_PAGE) {
1268 for_each_tile(tile, xe, id) {
1269 if (!vm->pt_root[id])
1272 err = xe_pt_create_scratch(xe, tile, vm);
1274 goto err_scratch_pt;
1276 vm->batch_invalidate_tlb = true;
1279 if (flags & XE_VM_FLAG_COMPUTE_MODE) {
1280 INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1281 vm->flags |= XE_VM_FLAG_COMPUTE_MODE;
1282 vm->batch_invalidate_tlb = false;
1285 if (flags & XE_VM_FLAG_ASYNC_BIND_OPS) {
1286 vm->async_ops.fence.context = dma_fence_context_alloc(1);
1287 vm->flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
1290 /* Fill pt_root after allocating scratch tables */
1291 for_each_tile(tile, xe, id) {
1292 if (!vm->pt_root[id])
1295 xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
1297 dma_resv_unlock(&vm->resv);
1299 /* Kernel migration VM shouldn't have a circular loop.. */
1300 if (!(flags & XE_VM_FLAG_MIGRATION)) {
1301 for_each_tile(tile, xe, id) {
1302 struct xe_gt *gt = tile->primary_gt;
1303 struct xe_vm *migrate_vm;
1304 struct xe_engine *eng;
1306 if (!vm->pt_root[id])
1309 migrate_vm = xe_migrate_get_vm(tile->migrate);
1310 eng = xe_engine_create_class(xe, gt, migrate_vm,
1311 XE_ENGINE_CLASS_COPY,
1313 xe_vm_put(migrate_vm);
1315 xe_vm_close_and_put(vm);
1316 return ERR_CAST(eng);
1323 if (number_tiles > 1)
1324 vm->composite_fence_ctx = dma_fence_context_alloc(1);
1326 mutex_lock(&xe->usm.lock);
1327 if (flags & XE_VM_FLAG_FAULT_MODE)
1328 xe->usm.num_vm_in_fault_mode++;
1329 else if (!(flags & XE_VM_FLAG_MIGRATION))
1330 xe->usm.num_vm_in_non_fault_mode++;
1331 mutex_unlock(&xe->usm.lock);
1333 trace_xe_vm_create(vm);
1338 for_each_tile(tile, xe, id) {
1339 if (!vm->pt_root[id])
1342 i = vm->pt_root[id]->level;
1344 if (vm->scratch_pt[id][--i])
1345 xe_pt_destroy(vm->scratch_pt[id][i],
1347 xe_bo_unpin(vm->scratch_bo[id]);
1348 xe_bo_put(vm->scratch_bo[id]);
1351 for_each_tile(tile, xe, id) {
1352 if (vm->pt_root[id])
1353 xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1355 dma_resv_unlock(&vm->resv);
1357 dma_resv_fini(&vm->resv);
1359 if (!(flags & XE_VM_FLAG_MIGRATION)) {
1360 xe_device_mem_access_put(xe);
1361 xe_pm_runtime_put(xe);
1363 return ERR_PTR(err);
1366 static void flush_async_ops(struct xe_vm *vm)
1368 queue_work(system_unbound_wq, &vm->async_ops.work);
1369 flush_work(&vm->async_ops.work);
1372 static void vm_error_capture(struct xe_vm *vm, int err,
1373 u32 op, u64 addr, u64 size)
1375 struct drm_xe_vm_bind_op_error_capture capture;
1376 u64 __user *address =
1377 u64_to_user_ptr(vm->async_ops.error_capture.addr);
1378 bool in_kthread = !current->mm;
1380 capture.error = err;
1382 capture.addr = addr;
1383 capture.size = size;
1386 if (!mmget_not_zero(vm->async_ops.error_capture.mm))
1388 kthread_use_mm(vm->async_ops.error_capture.mm);
1391 if (copy_to_user(address, &capture, sizeof(capture)))
1392 XE_WARN_ON("Copy to user failed");
1395 kthread_unuse_mm(vm->async_ops.error_capture.mm);
1396 mmput(vm->async_ops.error_capture.mm);
1400 wake_up_all(&vm->async_ops.error_capture.wq);
1403 static void xe_vm_close(struct xe_vm *vm)
1405 down_write(&vm->lock);
1407 up_write(&vm->lock);
1410 void xe_vm_close_and_put(struct xe_vm *vm)
1412 struct rb_root contested = RB_ROOT;
1413 struct ww_acquire_ctx ww;
1414 struct xe_device *xe = vm->xe;
1415 struct xe_tile *tile;
1418 XE_BUG_ON(vm->preempt.num_engines);
1422 flush_async_ops(vm);
1423 if (xe_vm_in_compute_mode(vm))
1424 flush_work(&vm->preempt.rebind_work);
1426 for_each_tile(tile, xe, id) {
1428 xe_engine_kill(vm->eng[id]);
1429 xe_engine_put(vm->eng[id]);
1434 down_write(&vm->lock);
1435 xe_vm_lock(vm, &ww, 0, false);
1436 while (vm->vmas.rb_node) {
1437 struct xe_vma *vma = to_xe_vma(vm->vmas.rb_node);
1439 if (xe_vma_has_no_bo(vma)) {
1440 down_read(&vm->userptr.notifier_lock);
1441 vma->destroyed = true;
1442 up_read(&vm->userptr.notifier_lock);
1445 rb_erase(&vma->vm_node, &vm->vmas);
1447 /* easy case, remove from VMA? */
1448 if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
1449 xe_vma_destroy(vma, NULL);
1453 rb_add(&vma->vm_node, &contested, xe_vma_less_cb);
1457 * All vm operations will add shared fences to resv.
1458 * The only exception is eviction for a shared object,
1459 * but even so, the unbind when evicted would still
1460 * install a fence to resv. Hence it's safe to
1461 * destroy the pagetables immediately.
1463 for_each_tile(tile, xe, id) {
1464 if (vm->scratch_bo[id]) {
1467 xe_bo_unpin(vm->scratch_bo[id]);
1468 xe_bo_put(vm->scratch_bo[id]);
1469 for (i = 0; i < vm->pt_root[id]->level; i++)
1470 xe_pt_destroy(vm->scratch_pt[id][i], vm->flags,
1474 xe_vm_unlock(vm, &ww);
1476 if (contested.rb_node) {
1479 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1480 * Since we hold a refcount to the bo, we can remove and free
1481 * the members safely without locking.
1483 while (contested.rb_node) {
1484 struct xe_vma *vma = to_xe_vma(contested.rb_node);
1486 rb_erase(&vma->vm_node, &contested);
1487 xe_vma_destroy_unlocked(vma);
1491 if (vm->async_ops.error_capture.addr)
1492 wake_up_all(&vm->async_ops.error_capture.wq);
1494 XE_WARN_ON(!list_empty(&vm->extobj.list));
1495 up_write(&vm->lock);
1497 mutex_lock(&xe->usm.lock);
1498 if (vm->flags & XE_VM_FLAG_FAULT_MODE)
1499 xe->usm.num_vm_in_fault_mode--;
1500 else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1501 xe->usm.num_vm_in_non_fault_mode--;
1502 mutex_unlock(&xe->usm.lock);
1507 static void vm_destroy_work_func(struct work_struct *w)
1510 container_of(w, struct xe_vm, destroy_work);
1511 struct ww_acquire_ctx ww;
1512 struct xe_device *xe = vm->xe;
1513 struct xe_tile *tile;
1517 /* xe_vm_close_and_put was not called? */
1518 XE_WARN_ON(vm->size);
1520 if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
1521 xe_device_mem_access_put(xe);
1522 xe_pm_runtime_put(xe);
1524 if (xe->info.has_asid) {
1525 mutex_lock(&xe->usm.lock);
1526 lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1527 XE_WARN_ON(lookup != vm);
1528 mutex_unlock(&xe->usm.lock);
1533 * XXX: We delay destroying the PT root until the VM if freed as PT root
1534 * is needed for xe_vm_lock to work. If we remove that dependency this
1535 * can be moved to xe_vm_close_and_put.
1537 xe_vm_lock(vm, &ww, 0, false);
1538 for_each_tile(tile, xe, id) {
1539 if (vm->pt_root[id]) {
1540 xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1541 vm->pt_root[id] = NULL;
1544 xe_vm_unlock(vm, &ww);
1546 trace_xe_vm_free(vm);
1547 dma_fence_put(vm->rebind_fence);
1548 dma_resv_fini(&vm->resv);
1552 void xe_vm_free(struct kref *ref)
1554 struct xe_vm *vm = container_of(ref, struct xe_vm, refcount);
1556 /* To destroy the VM we need to be able to sleep */
1557 queue_work(system_unbound_wq, &vm->destroy_work);
1560 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1564 mutex_lock(&xef->vm.lock);
1565 vm = xa_load(&xef->vm.xa, id);
1568 mutex_unlock(&xef->vm.lock);
1573 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
1575 return xe_pde_encode(vm->pt_root[tile->id]->bo, 0,
1579 static struct dma_fence *
1580 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_engine *e,
1581 struct xe_sync_entry *syncs, u32 num_syncs)
1583 struct xe_tile *tile;
1584 struct dma_fence *fence = NULL;
1585 struct dma_fence **fences = NULL;
1586 struct dma_fence_array *cf = NULL;
1587 struct xe_vm *vm = xe_vma_vm(vma);
1588 int cur_fence = 0, i;
1589 int number_tiles = hweight_long(vma->tile_present);
1593 trace_xe_vma_unbind(vma);
1595 if (number_tiles > 1) {
1596 fences = kmalloc_array(number_tiles, sizeof(*fences),
1599 return ERR_PTR(-ENOMEM);
1602 for_each_tile(tile, vm->xe, id) {
1603 if (!(vma->tile_present & BIT(id)))
1606 fence = __xe_pt_unbind_vma(tile, vma, e, syncs, num_syncs);
1607 if (IS_ERR(fence)) {
1608 err = PTR_ERR(fence);
1613 fences[cur_fence++] = fence;
1616 if (e && vm->pt_root[id] && !list_empty(&e->multi_gt_list))
1617 e = list_next_entry(e, multi_gt_list);
1621 cf = dma_fence_array_create(number_tiles, fences,
1622 vm->composite_fence_ctx,
1623 vm->composite_fence_seqno++,
1626 --vm->composite_fence_seqno;
1632 for (i = 0; i < num_syncs; i++)
1633 xe_sync_entry_signal(&syncs[i], NULL, cf ? &cf->base : fence);
1635 return cf ? &cf->base : !fence ? dma_fence_get_stub() : fence;
1640 /* FIXME: Rewind the previous binds? */
1641 dma_fence_put(fences[--cur_fence]);
1646 return ERR_PTR(err);
1649 static struct dma_fence *
1650 xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
1651 struct xe_sync_entry *syncs, u32 num_syncs)
1653 struct xe_tile *tile;
1654 struct dma_fence *fence;
1655 struct dma_fence **fences = NULL;
1656 struct dma_fence_array *cf = NULL;
1657 struct xe_vm *vm = xe_vma_vm(vma);
1658 int cur_fence = 0, i;
1659 int number_tiles = hweight_long(vma->tile_mask);
1663 trace_xe_vma_bind(vma);
1665 if (number_tiles > 1) {
1666 fences = kmalloc_array(number_tiles, sizeof(*fences),
1669 return ERR_PTR(-ENOMEM);
1672 for_each_tile(tile, vm->xe, id) {
1673 if (!(vma->tile_mask & BIT(id)))
1676 fence = __xe_pt_bind_vma(tile, vma, e, syncs, num_syncs,
1677 vma->tile_present & BIT(id));
1678 if (IS_ERR(fence)) {
1679 err = PTR_ERR(fence);
1684 fences[cur_fence++] = fence;
1687 if (e && vm->pt_root[id] && !list_empty(&e->multi_gt_list))
1688 e = list_next_entry(e, multi_gt_list);
1692 cf = dma_fence_array_create(number_tiles, fences,
1693 vm->composite_fence_ctx,
1694 vm->composite_fence_seqno++,
1697 --vm->composite_fence_seqno;
1703 for (i = 0; i < num_syncs; i++)
1704 xe_sync_entry_signal(&syncs[i], NULL, cf ? &cf->base : fence);
1706 return cf ? &cf->base : fence;
1711 /* FIXME: Rewind the previous binds? */
1712 dma_fence_put(fences[--cur_fence]);
1717 return ERR_PTR(err);
1720 struct async_op_fence {
1721 struct dma_fence fence;
1722 struct dma_fence *wait_fence;
1723 struct dma_fence_cb cb;
1725 wait_queue_head_t wq;
1729 static const char *async_op_fence_get_driver_name(struct dma_fence *dma_fence)
1735 async_op_fence_get_timeline_name(struct dma_fence *dma_fence)
1737 return "async_op_fence";
1740 static const struct dma_fence_ops async_op_fence_ops = {
1741 .get_driver_name = async_op_fence_get_driver_name,
1742 .get_timeline_name = async_op_fence_get_timeline_name,
1745 static void async_op_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1747 struct async_op_fence *afence =
1748 container_of(cb, struct async_op_fence, cb);
1750 afence->fence.error = afence->wait_fence->error;
1751 dma_fence_signal(&afence->fence);
1752 xe_vm_put(afence->vm);
1753 dma_fence_put(afence->wait_fence);
1754 dma_fence_put(&afence->fence);
1757 static void add_async_op_fence_cb(struct xe_vm *vm,
1758 struct dma_fence *fence,
1759 struct async_op_fence *afence)
1763 if (!xe_vm_no_dma_fences(vm)) {
1764 afence->started = true;
1766 wake_up_all(&afence->wq);
1769 afence->wait_fence = dma_fence_get(fence);
1770 afence->vm = xe_vm_get(vm);
1771 dma_fence_get(&afence->fence);
1772 ret = dma_fence_add_callback(fence, &afence->cb, async_op_fence_cb);
1773 if (ret == -ENOENT) {
1774 afence->fence.error = afence->wait_fence->error;
1775 dma_fence_signal(&afence->fence);
1779 dma_fence_put(afence->wait_fence);
1780 dma_fence_put(&afence->fence);
1782 XE_WARN_ON(ret && ret != -ENOENT);
1785 int xe_vm_async_fence_wait_start(struct dma_fence *fence)
1787 if (fence->ops == &async_op_fence_ops) {
1788 struct async_op_fence *afence =
1789 container_of(fence, struct async_op_fence, fence);
1791 XE_BUG_ON(xe_vm_no_dma_fences(afence->vm));
1794 return wait_event_interruptible(afence->wq, afence->started);
1800 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
1801 struct xe_engine *e, struct xe_sync_entry *syncs,
1802 u32 num_syncs, struct async_op_fence *afence)
1804 struct dma_fence *fence;
1806 xe_vm_assert_held(vm);
1808 fence = xe_vm_bind_vma(vma, e, syncs, num_syncs);
1810 return PTR_ERR(fence);
1812 add_async_op_fence_cb(vm, fence, afence);
1814 dma_fence_put(fence);
1818 static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_engine *e,
1819 struct xe_bo *bo, struct xe_sync_entry *syncs,
1820 u32 num_syncs, struct async_op_fence *afence)
1824 xe_vm_assert_held(vm);
1825 xe_bo_assert_held(bo);
1828 err = xe_bo_validate(bo, vm, true);
1833 return __xe_vm_bind(vm, vma, e, syncs, num_syncs, afence);
1836 static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
1837 struct xe_engine *e, struct xe_sync_entry *syncs,
1838 u32 num_syncs, struct async_op_fence *afence)
1840 struct dma_fence *fence;
1842 xe_vm_assert_held(vm);
1843 xe_bo_assert_held(xe_vma_bo(vma));
1845 fence = xe_vm_unbind_vma(vma, e, syncs, num_syncs);
1847 return PTR_ERR(fence);
1849 add_async_op_fence_cb(vm, fence, afence);
1851 xe_vma_destroy(vma, fence);
1852 dma_fence_put(fence);
1857 static int vm_set_error_capture_address(struct xe_device *xe, struct xe_vm *vm,
1860 if (XE_IOCTL_ERR(xe, !value))
1863 if (XE_IOCTL_ERR(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
1866 if (XE_IOCTL_ERR(xe, vm->async_ops.error_capture.addr))
1869 vm->async_ops.error_capture.mm = current->mm;
1870 vm->async_ops.error_capture.addr = value;
1871 init_waitqueue_head(&vm->async_ops.error_capture.wq);
1876 typedef int (*xe_vm_set_property_fn)(struct xe_device *xe, struct xe_vm *vm,
1879 static const xe_vm_set_property_fn vm_set_property_funcs[] = {
1880 [XE_VM_PROPERTY_BIND_OP_ERROR_CAPTURE_ADDRESS] =
1881 vm_set_error_capture_address,
1884 static int vm_user_ext_set_property(struct xe_device *xe, struct xe_vm *vm,
1887 u64 __user *address = u64_to_user_ptr(extension);
1888 struct drm_xe_ext_vm_set_property ext;
1891 err = __copy_from_user(&ext, address, sizeof(ext));
1892 if (XE_IOCTL_ERR(xe, err))
1895 if (XE_IOCTL_ERR(xe, ext.property >=
1896 ARRAY_SIZE(vm_set_property_funcs)) ||
1897 XE_IOCTL_ERR(xe, ext.pad) ||
1898 XE_IOCTL_ERR(xe, ext.reserved[0] || ext.reserved[1]))
1901 return vm_set_property_funcs[ext.property](xe, vm, ext.value);
1904 typedef int (*xe_vm_user_extension_fn)(struct xe_device *xe, struct xe_vm *vm,
1907 static const xe_vm_set_property_fn vm_user_extension_funcs[] = {
1908 [XE_VM_EXTENSION_SET_PROPERTY] = vm_user_ext_set_property,
1911 #define MAX_USER_EXTENSIONS 16
1912 static int vm_user_extensions(struct xe_device *xe, struct xe_vm *vm,
1913 u64 extensions, int ext_number)
1915 u64 __user *address = u64_to_user_ptr(extensions);
1916 struct xe_user_extension ext;
1919 if (XE_IOCTL_ERR(xe, ext_number >= MAX_USER_EXTENSIONS))
1922 err = __copy_from_user(&ext, address, sizeof(ext));
1923 if (XE_IOCTL_ERR(xe, err))
1926 if (XE_IOCTL_ERR(xe, ext.pad) ||
1927 XE_IOCTL_ERR(xe, ext.name >=
1928 ARRAY_SIZE(vm_user_extension_funcs)))
1931 err = vm_user_extension_funcs[ext.name](xe, vm, extensions);
1932 if (XE_IOCTL_ERR(xe, err))
1935 if (ext.next_extension)
1936 return vm_user_extensions(xe, vm, ext.next_extension,
1942 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_SCRATCH_PAGE | \
1943 DRM_XE_VM_CREATE_COMPUTE_MODE | \
1944 DRM_XE_VM_CREATE_ASYNC_BIND_OPS | \
1945 DRM_XE_VM_CREATE_FAULT_MODE)
1947 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1948 struct drm_file *file)
1950 struct xe_device *xe = to_xe_device(dev);
1951 struct xe_file *xef = to_xe_file(file);
1952 struct drm_xe_vm_create *args = data;
1958 if (XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1]))
1961 if (XE_IOCTL_ERR(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1964 if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE &&
1965 args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
1968 if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE &&
1969 args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
1972 if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
1973 xe_device_in_non_fault_mode(xe)))
1976 if (XE_IOCTL_ERR(xe, !(args->flags & DRM_XE_VM_CREATE_FAULT_MODE) &&
1977 xe_device_in_fault_mode(xe)))
1980 if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
1981 !xe->info.supports_usm))
1984 if (args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE)
1985 flags |= XE_VM_FLAG_SCRATCH_PAGE;
1986 if (args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE)
1987 flags |= XE_VM_FLAG_COMPUTE_MODE;
1988 if (args->flags & DRM_XE_VM_CREATE_ASYNC_BIND_OPS)
1989 flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
1990 if (args->flags & DRM_XE_VM_CREATE_FAULT_MODE)
1991 flags |= XE_VM_FLAG_FAULT_MODE;
1993 vm = xe_vm_create(xe, flags);
1997 if (args->extensions) {
1998 err = vm_user_extensions(xe, vm, args->extensions, 0);
1999 if (XE_IOCTL_ERR(xe, err)) {
2000 xe_vm_close_and_put(vm);
2005 mutex_lock(&xef->vm.lock);
2006 err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
2007 mutex_unlock(&xef->vm.lock);
2009 xe_vm_close_and_put(vm);
2013 if (xe->info.has_asid) {
2014 mutex_lock(&xe->usm.lock);
2015 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
2016 XA_LIMIT(0, XE_MAX_ASID - 1),
2017 &xe->usm.next_asid, GFP_KERNEL);
2018 mutex_unlock(&xe->usm.lock);
2020 xe_vm_close_and_put(vm);
2023 vm->usm.asid = asid;
2028 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
2029 /* Warning: Security issue - never enable by default */
2030 args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
2036 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
2037 struct drm_file *file)
2039 struct xe_device *xe = to_xe_device(dev);
2040 struct xe_file *xef = to_xe_file(file);
2041 struct drm_xe_vm_destroy *args = data;
2045 if (XE_IOCTL_ERR(xe, args->pad) ||
2046 XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1]))
2049 mutex_lock(&xef->vm.lock);
2050 vm = xa_load(&xef->vm.xa, args->vm_id);
2051 if (XE_IOCTL_ERR(xe, !vm))
2053 else if (XE_IOCTL_ERR(xe, vm->preempt.num_engines))
2056 xa_erase(&xef->vm.xa, args->vm_id);
2057 mutex_unlock(&xef->vm.lock);
2060 xe_vm_close_and_put(vm);
2065 static const u32 region_to_mem_type[] = {
2071 static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
2072 struct xe_engine *e, u32 region,
2073 struct xe_sync_entry *syncs, u32 num_syncs,
2074 struct async_op_fence *afence)
2078 XE_BUG_ON(region > ARRAY_SIZE(region_to_mem_type));
2080 if (!xe_vma_has_no_bo(vma)) {
2081 err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
2086 if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
2087 return xe_vm_bind(vm, vma, e, xe_vma_bo(vma), syncs, num_syncs,
2092 /* Nothing to do, signal fences now */
2093 for (i = 0; i < num_syncs; i++)
2094 xe_sync_entry_signal(&syncs[i], NULL,
2095 dma_fence_get_stub());
2097 dma_fence_signal(&afence->fence);
2102 #define VM_BIND_OP(op) (op & 0xffff)
2104 static int __vm_bind_ioctl(struct xe_vm *vm, struct xe_vma *vma,
2105 struct xe_engine *e, struct xe_bo *bo, u32 op,
2106 u32 region, struct xe_sync_entry *syncs,
2107 u32 num_syncs, struct async_op_fence *afence)
2109 switch (VM_BIND_OP(op)) {
2110 case XE_VM_BIND_OP_MAP:
2111 return xe_vm_bind(vm, vma, e, bo, syncs, num_syncs, afence);
2112 case XE_VM_BIND_OP_UNMAP:
2113 case XE_VM_BIND_OP_UNMAP_ALL:
2114 return xe_vm_unbind(vm, vma, e, syncs, num_syncs, afence);
2115 case XE_VM_BIND_OP_MAP_USERPTR:
2116 return xe_vm_bind(vm, vma, e, NULL, syncs, num_syncs, afence);
2117 case XE_VM_BIND_OP_PREFETCH:
2118 return xe_vm_prefetch(vm, vma, e, region, syncs, num_syncs,
2122 XE_BUG_ON("NOT POSSIBLE");
2127 struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm)
2129 int idx = vm->flags & XE_VM_FLAG_MIGRATION ?
2130 XE_VM_FLAG_GT_ID(vm->flags) : 0;
2132 /* Safe to use index 0 as all BO in the VM share a single dma-resv lock */
2133 return &vm->pt_root[idx]->bo->ttm;
2136 static void xe_vm_tv_populate(struct xe_vm *vm, struct ttm_validate_buffer *tv)
2139 tv->bo = xe_vm_ttm_bo(vm);
2142 static bool is_map_op(u32 op)
2144 return VM_BIND_OP(op) == XE_VM_BIND_OP_MAP ||
2145 VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR;
2148 static bool is_unmap_op(u32 op)
2150 return VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP ||
2151 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL;
2154 static int vm_bind_ioctl(struct xe_vm *vm, struct xe_vma *vma,
2155 struct xe_engine *e, struct xe_bo *bo,
2156 struct drm_xe_vm_bind_op *bind_op,
2157 struct xe_sync_entry *syncs, u32 num_syncs,
2158 struct async_op_fence *afence)
2162 struct ttm_validate_buffer tv_bo, tv_vm;
2163 struct ww_acquire_ctx ww;
2167 lockdep_assert_held(&vm->lock);
2168 XE_BUG_ON(!list_empty(&vma->unbind_link));
2170 /* Binds deferred to faults, signal fences now */
2171 if (xe_vm_in_fault_mode(vm) && is_map_op(bind_op->op) &&
2172 !(bind_op->op & XE_VM_BIND_FLAG_IMMEDIATE)) {
2173 for (i = 0; i < num_syncs; i++)
2174 xe_sync_entry_signal(&syncs[i], NULL,
2175 dma_fence_get_stub());
2177 dma_fence_signal(&afence->fence);
2181 xe_vm_tv_populate(vm, &tv_vm);
2182 list_add_tail(&tv_vm.head, &objs);
2183 vbo = xe_vma_bo(vma);
2186 * An unbind can drop the last reference to the BO and
2187 * the BO is needed for ttm_eu_backoff_reservation so
2188 * take a reference here.
2193 tv_bo.bo = &vbo->ttm;
2194 tv_bo.num_shared = 1;
2195 list_add(&tv_bo.head, &objs);
2200 err = ttm_eu_reserve_buffers(&ww, &objs, true, &dups);
2202 err = __vm_bind_ioctl(vm, vma, e, bo,
2203 bind_op->op, bind_op->region, syncs,
2205 ttm_eu_backoff_reservation(&ww, &objs);
2206 if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
2207 lockdep_assert_held_write(&vm->lock);
2208 err = xe_vma_userptr_pin_pages(vma);
2220 struct xe_engine *engine;
2222 struct drm_xe_vm_bind_op bind_op;
2223 struct xe_sync_entry *syncs;
2225 struct list_head link;
2226 struct async_op_fence *fence;
2229 static void async_op_cleanup(struct xe_vm *vm, struct async_op *op)
2231 while (op->num_syncs--)
2232 xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
2236 xe_engine_put(op->engine);
2239 dma_fence_put(&op->fence->fence);
2243 static struct async_op *next_async_op(struct xe_vm *vm)
2245 return list_first_entry_or_null(&vm->async_ops.pending,
2246 struct async_op, link);
2249 static void vm_set_async_error(struct xe_vm *vm, int err)
2251 lockdep_assert_held(&vm->lock);
2252 vm->async_ops.error = err;
2255 static void async_op_work_func(struct work_struct *w)
2257 struct xe_vm *vm = container_of(w, struct xe_vm, async_ops.work);
2260 struct async_op *op;
2263 if (vm->async_ops.error && !xe_vm_is_closed(vm))
2266 spin_lock_irq(&vm->async_ops.lock);
2267 op = next_async_op(vm);
2269 list_del_init(&op->link);
2270 spin_unlock_irq(&vm->async_ops.lock);
2275 if (!xe_vm_is_closed(vm)) {
2278 down_write(&vm->lock);
2280 first = op->vma->first_munmap_rebind;
2281 last = op->vma->last_munmap_rebind;
2282 #ifdef TEST_VM_ASYNC_OPS_ERROR
2283 #define FORCE_ASYNC_OP_ERROR BIT(31)
2284 if (!(op->bind_op.op & FORCE_ASYNC_OP_ERROR)) {
2285 err = vm_bind_ioctl(vm, op->vma, op->engine,
2286 op->bo, &op->bind_op,
2287 op->syncs, op->num_syncs,
2291 op->bind_op.op &= ~FORCE_ASYNC_OP_ERROR;
2294 err = vm_bind_ioctl(vm, op->vma, op->engine, op->bo,
2295 &op->bind_op, op->syncs,
2296 op->num_syncs, op->fence);
2299 * In order for the fencing to work (stall behind
2300 * existing jobs / prevent new jobs from running) all
2301 * the dma-resv slots need to be programmed in a batch
2302 * relative to execs / the rebind worker. The vm->lock
2305 if (!err && ((first && VM_BIND_OP(op->bind_op.op) ==
2306 XE_VM_BIND_OP_UNMAP) ||
2307 vm->async_ops.munmap_rebind_inflight)) {
2309 op->vma->last_munmap_rebind = false;
2310 vm->async_ops.munmap_rebind_inflight =
2313 vm->async_ops.munmap_rebind_inflight =
2316 async_op_cleanup(vm, op);
2318 spin_lock_irq(&vm->async_ops.lock);
2319 op = next_async_op(vm);
2321 list_del_init(&op->link);
2322 spin_unlock_irq(&vm->async_ops.lock);
2328 trace_xe_vma_fail(op->vma);
2329 drm_warn(&vm->xe->drm, "Async VM op(%d) failed with %d",
2330 VM_BIND_OP(op->bind_op.op),
2333 spin_lock_irq(&vm->async_ops.lock);
2334 list_add(&op->link, &vm->async_ops.pending);
2335 spin_unlock_irq(&vm->async_ops.lock);
2337 vm_set_async_error(vm, err);
2338 up_write(&vm->lock);
2340 if (vm->async_ops.error_capture.addr)
2341 vm_error_capture(vm, err,
2347 up_write(&vm->lock);
2349 trace_xe_vma_flush(op->vma);
2351 if (is_unmap_op(op->bind_op.op)) {
2352 down_write(&vm->lock);
2353 xe_vma_destroy_unlocked(op->vma);
2354 up_write(&vm->lock);
2357 if (op->fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
2358 &op->fence->fence.flags)) {
2359 if (!xe_vm_no_dma_fences(vm)) {
2360 op->fence->started = true;
2362 wake_up_all(&op->fence->wq);
2364 dma_fence_signal(&op->fence->fence);
2368 async_op_cleanup(vm, op);
2372 static int __vm_bind_ioctl_async(struct xe_vm *vm, struct xe_vma *vma,
2373 struct xe_engine *e, struct xe_bo *bo,
2374 struct drm_xe_vm_bind_op *bind_op,
2375 struct xe_sync_entry *syncs, u32 num_syncs)
2377 struct async_op *op;
2378 bool installed = false;
2382 lockdep_assert_held(&vm->lock);
2384 op = kmalloc(sizeof(*op), GFP_KERNEL);
2390 op->fence = kmalloc(sizeof(*op->fence), GFP_KERNEL);
2396 seqno = e ? ++e->bind.fence_seqno : ++vm->async_ops.fence.seqno;
2397 dma_fence_init(&op->fence->fence, &async_op_fence_ops,
2398 &vm->async_ops.lock, e ? e->bind.fence_ctx :
2399 vm->async_ops.fence.context, seqno);
2401 if (!xe_vm_no_dma_fences(vm)) {
2403 op->fence->started = false;
2404 init_waitqueue_head(&op->fence->wq);
2412 op->bind_op = *bind_op;
2414 op->num_syncs = num_syncs;
2415 INIT_LIST_HEAD(&op->link);
2417 for (i = 0; i < num_syncs; i++)
2418 installed |= xe_sync_entry_signal(&syncs[i], NULL,
2421 if (!installed && op->fence)
2422 dma_fence_signal(&op->fence->fence);
2424 spin_lock_irq(&vm->async_ops.lock);
2425 list_add_tail(&op->link, &vm->async_ops.pending);
2426 spin_unlock_irq(&vm->async_ops.lock);
2428 if (!vm->async_ops.error)
2429 queue_work(system_unbound_wq, &vm->async_ops.work);
2434 static int vm_bind_ioctl_async(struct xe_vm *vm, struct xe_vma *vma,
2435 struct xe_engine *e, struct xe_bo *bo,
2436 struct drm_xe_vm_bind_op *bind_op,
2437 struct xe_sync_entry *syncs, u32 num_syncs)
2439 struct xe_vma *__vma, *next;
2440 struct list_head rebind_list;
2441 struct xe_sync_entry *in_syncs = NULL, *out_syncs = NULL;
2442 u32 num_in_syncs = 0, num_out_syncs = 0;
2443 bool first = true, last;
2447 lockdep_assert_held(&vm->lock);
2449 /* Not a linked list of unbinds + rebinds, easy */
2450 if (list_empty(&vma->unbind_link))
2451 return __vm_bind_ioctl_async(vm, vma, e, bo, bind_op,
2455 * Linked list of unbinds + rebinds, decompose syncs into 'in / out'
2456 * passing the 'in' to the first operation and 'out' to the last. Also
2457 * the reference counting is a little tricky, increment the VM / bind
2458 * engine ref count on all but the last operation and increment the BOs
2459 * ref count on each rebind.
2462 XE_BUG_ON(VM_BIND_OP(bind_op->op) != XE_VM_BIND_OP_UNMAP &&
2463 VM_BIND_OP(bind_op->op) != XE_VM_BIND_OP_UNMAP_ALL &&
2464 VM_BIND_OP(bind_op->op) != XE_VM_BIND_OP_PREFETCH);
2466 /* Decompose syncs */
2468 in_syncs = kmalloc(sizeof(*in_syncs) * num_syncs, GFP_KERNEL);
2469 out_syncs = kmalloc(sizeof(*out_syncs) * num_syncs, GFP_KERNEL);
2470 if (!in_syncs || !out_syncs) {
2475 for (i = 0; i < num_syncs; ++i) {
2476 bool signal = syncs[i].flags & DRM_XE_SYNC_SIGNAL;
2479 out_syncs[num_out_syncs++] = syncs[i];
2481 in_syncs[num_in_syncs++] = syncs[i];
2485 /* Do unbinds + move rebinds to new list */
2486 INIT_LIST_HEAD(&rebind_list);
2487 list_for_each_entry_safe(__vma, next, &vma->unbind_link, unbind_link) {
2488 if (__vma->destroyed ||
2489 VM_BIND_OP(bind_op->op) == XE_VM_BIND_OP_PREFETCH) {
2490 list_del_init(&__vma->unbind_link);
2492 err = __vm_bind_ioctl_async(xe_vm_get(vm), __vma,
2493 e ? xe_engine_get(e) : NULL,
2494 bo, bind_op, first ?
2496 first ? num_in_syncs : 0);
2507 list_move_tail(&__vma->unbind_link, &rebind_list);
2510 last = list_empty(&rebind_list);
2516 err = __vm_bind_ioctl_async(vm, vma, e,
2519 last ? out_syncs : NULL,
2520 first ? num_in_syncs :
2521 last ? num_out_syncs : 0);
2533 list_for_each_entry_safe(__vma, next, &rebind_list, unbind_link) {
2534 list_del_init(&__vma->unbind_link);
2535 last = list_empty(&rebind_list);
2537 if (xe_vma_is_userptr(__vma)) {
2538 bind_op->op = XE_VM_BIND_FLAG_ASYNC |
2539 XE_VM_BIND_OP_MAP_USERPTR;
2541 bind_op->op = XE_VM_BIND_FLAG_ASYNC |
2543 xe_bo_get(xe_vma_bo(__vma));
2552 err = __vm_bind_ioctl_async(vm, __vma, e,
2553 xe_vma_bo(__vma), bind_op, last ?
2555 last ? num_out_syncs : 0);
2577 static int __vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo,
2578 u64 addr, u64 range, u32 op)
2580 struct xe_device *xe = vm->xe;
2581 struct xe_vma *vma, lookup;
2582 bool async = !!(op & XE_VM_BIND_FLAG_ASYNC);
2584 lockdep_assert_held(&vm->lock);
2586 lookup.start = addr;
2587 lookup.end = addr + range - 1;
2589 switch (VM_BIND_OP(op)) {
2590 case XE_VM_BIND_OP_MAP:
2591 case XE_VM_BIND_OP_MAP_USERPTR:
2592 vma = xe_vm_find_overlapping_vma(vm, &lookup);
2593 if (XE_IOCTL_ERR(xe, vma))
2596 case XE_VM_BIND_OP_UNMAP:
2597 case XE_VM_BIND_OP_PREFETCH:
2598 vma = xe_vm_find_overlapping_vma(vm, &lookup);
2599 if (XE_IOCTL_ERR(xe, !vma) ||
2600 XE_IOCTL_ERR(xe, (xe_vma_start(vma) != addr ||
2601 xe_vma_end(vma) != addr + range) && !async))
2604 case XE_VM_BIND_OP_UNMAP_ALL:
2607 XE_BUG_ON("NOT POSSIBLE");
2614 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma)
2616 down_read(&vm->userptr.notifier_lock);
2617 vma->destroyed = true;
2618 up_read(&vm->userptr.notifier_lock);
2619 xe_vm_remove_vma(vm, vma);
2622 static int prep_replacement_vma(struct xe_vm *vm, struct xe_vma *vma)
2626 if (xe_vma_bo(vma) && !xe_vma_bo(vma)->vm) {
2627 vm_insert_extobj(vm, vma);
2628 err = add_preempt_fences(vm, xe_vma_bo(vma));
2637 * Find all overlapping VMAs in lookup range and add to a list in the returned
2638 * VMA, all of VMAs found will be unbound. Also possibly add 2 new VMAs that
2639 * need to be bound if first / last VMAs are not fully unbound. This is akin to
2642 static struct xe_vma *vm_unbind_lookup_vmas(struct xe_vm *vm,
2643 struct xe_vma *lookup)
2645 struct xe_vma *vma = xe_vm_find_overlapping_vma(vm, lookup);
2646 struct rb_node *node;
2647 struct xe_vma *first = vma, *last = vma, *new_first = NULL,
2648 *new_last = NULL, *__vma, *next;
2650 bool first_munmap_rebind = false;
2652 lockdep_assert_held(&vm->lock);
2655 node = &vma->vm_node;
2656 while ((node = rb_next(node))) {
2657 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2658 __vma = to_xe_vma(node);
2659 list_add_tail(&__vma->unbind_link, &vma->unbind_link);
2666 node = &vma->vm_node;
2667 while ((node = rb_prev(node))) {
2668 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2669 __vma = to_xe_vma(node);
2670 list_add(&__vma->unbind_link, &vma->unbind_link);
2677 if (xe_vma_start(first) != xe_vma_start(lookup)) {
2678 struct ww_acquire_ctx ww;
2680 if (xe_vma_bo(first))
2681 err = xe_bo_lock(xe_vma_bo(first), &ww, 0, true);
2684 new_first = xe_vma_create(xe_vma_vm(first), xe_vma_bo(first),
2686 xe_vma_bo_offset(first) :
2687 xe_vma_userptr(first),
2688 xe_vma_start(first),
2689 xe_vma_start(lookup) - 1,
2690 xe_vma_read_only(first),
2694 if (xe_vma_bo(first))
2695 xe_bo_unlock(xe_vma_bo(first), &ww);
2700 if (xe_vma_is_userptr(first)) {
2701 err = xe_vma_userptr_pin_pages(new_first);
2705 err = prep_replacement_vma(vm, new_first);
2710 if (xe_vma_end(last) != xe_vma_end(lookup)) {
2711 struct ww_acquire_ctx ww;
2712 u64 chunk = xe_vma_end(lookup) - xe_vma_start(last);
2714 if (xe_vma_bo(last))
2715 err = xe_bo_lock(xe_vma_bo(last), &ww, 0, true);
2718 new_last = xe_vma_create(xe_vma_vm(last), xe_vma_bo(last),
2720 xe_vma_bo_offset(last) + chunk :
2721 xe_vma_userptr(last) + chunk,
2722 xe_vma_start(last) + chunk,
2723 xe_vma_end(last) - 1,
2724 xe_vma_read_only(last),
2725 (last->pte_flags & XE_PTE_FLAG_NULL),
2727 if (xe_vma_bo(last))
2728 xe_bo_unlock(xe_vma_bo(last), &ww);
2733 if (xe_vma_is_userptr(last)) {
2734 err = xe_vma_userptr_pin_pages(new_last);
2738 err = prep_replacement_vma(vm, new_last);
2743 prep_vma_destroy(vm, vma);
2744 if (list_empty(&vma->unbind_link) && (new_first || new_last))
2745 vma->first_munmap_rebind = true;
2746 list_for_each_entry(__vma, &vma->unbind_link, unbind_link) {
2747 if ((new_first || new_last) && !first_munmap_rebind) {
2748 __vma->first_munmap_rebind = true;
2749 first_munmap_rebind = true;
2751 prep_vma_destroy(vm, __vma);
2754 xe_vm_insert_vma(vm, new_first);
2755 list_add_tail(&new_first->unbind_link, &vma->unbind_link);
2757 new_first->last_munmap_rebind = true;
2760 xe_vm_insert_vma(vm, new_last);
2761 list_add_tail(&new_last->unbind_link, &vma->unbind_link);
2762 new_last->last_munmap_rebind = true;
2768 list_for_each_entry_safe(__vma, next, &vma->unbind_link, unbind_link)
2769 list_del_init(&__vma->unbind_link);
2771 prep_vma_destroy(vm, new_last);
2772 xe_vma_destroy_unlocked(new_last);
2775 prep_vma_destroy(vm, new_first);
2776 xe_vma_destroy_unlocked(new_first);
2779 return ERR_PTR(err);
2783 * Similar to vm_unbind_lookup_vmas, find all VMAs in lookup range to prefetch
2785 static struct xe_vma *vm_prefetch_lookup_vmas(struct xe_vm *vm,
2786 struct xe_vma *lookup,
2789 struct xe_vma *vma = xe_vm_find_overlapping_vma(vm, lookup), *__vma,
2791 struct rb_node *node;
2793 if (!xe_vma_has_no_bo(vma)) {
2794 if (!xe_bo_can_migrate(xe_vma_bo(vma), region_to_mem_type[region]))
2795 return ERR_PTR(-EINVAL);
2798 node = &vma->vm_node;
2799 while ((node = rb_next(node))) {
2800 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2801 __vma = to_xe_vma(node);
2802 if (!xe_vma_has_no_bo(__vma)) {
2803 if (!xe_bo_can_migrate(xe_vma_bo(__vma), region_to_mem_type[region]))
2806 list_add_tail(&__vma->unbind_link, &vma->unbind_link);
2812 node = &vma->vm_node;
2813 while ((node = rb_prev(node))) {
2814 if (!xe_vma_cmp_vma_cb(lookup, node)) {
2815 __vma = to_xe_vma(node);
2816 if (!xe_vma_has_no_bo(__vma)) {
2817 if (!xe_bo_can_migrate(xe_vma_bo(__vma), region_to_mem_type[region]))
2820 list_add(&__vma->unbind_link, &vma->unbind_link);
2829 list_for_each_entry_safe(__vma, next, &vma->unbind_link,
2831 list_del_init(&__vma->unbind_link);
2833 return ERR_PTR(-EINVAL);
2836 static struct xe_vma *vm_unbind_all_lookup_vmas(struct xe_vm *vm,
2839 struct xe_vma *first = NULL, *vma;
2841 lockdep_assert_held(&vm->lock);
2842 xe_bo_assert_held(bo);
2844 list_for_each_entry(vma, &bo->vmas, bo_link) {
2845 if (xe_vma_vm(vma) != vm)
2848 prep_vma_destroy(vm, vma);
2852 list_add_tail(&vma->unbind_link, &first->unbind_link);
2858 static struct xe_vma *vm_bind_ioctl_lookup_vma(struct xe_vm *vm,
2860 u64 bo_offset_or_userptr,
2861 u64 addr, u64 range, u32 op,
2862 u64 tile_mask, u32 region)
2864 struct ww_acquire_ctx ww;
2865 struct xe_vma *vma, lookup;
2868 lockdep_assert_held(&vm->lock);
2870 lookup.start = addr;
2871 lookup.end = addr + range - 1;
2873 switch (VM_BIND_OP(op)) {
2874 case XE_VM_BIND_OP_MAP:
2876 err = xe_bo_lock(bo, &ww, 0, true);
2878 return ERR_PTR(err);
2880 vma = xe_vma_create(vm, bo, bo_offset_or_userptr, addr,
2882 op & XE_VM_BIND_FLAG_READONLY,
2883 op & XE_VM_BIND_FLAG_NULL,
2886 xe_bo_unlock(bo, &ww);
2888 return ERR_PTR(-ENOMEM);
2890 xe_vm_insert_vma(vm, vma);
2891 if (bo && !bo->vm) {
2892 vm_insert_extobj(vm, vma);
2893 err = add_preempt_fences(vm, bo);
2895 prep_vma_destroy(vm, vma);
2896 xe_vma_destroy_unlocked(vma);
2898 return ERR_PTR(err);
2902 case XE_VM_BIND_OP_UNMAP:
2903 vma = vm_unbind_lookup_vmas(vm, &lookup);
2905 case XE_VM_BIND_OP_PREFETCH:
2906 vma = vm_prefetch_lookup_vmas(vm, &lookup, region);
2908 case XE_VM_BIND_OP_UNMAP_ALL:
2911 err = xe_bo_lock(bo, &ww, 0, true);
2913 return ERR_PTR(err);
2914 vma = vm_unbind_all_lookup_vmas(vm, bo);
2916 vma = ERR_PTR(-EINVAL);
2917 xe_bo_unlock(bo, &ww);
2919 case XE_VM_BIND_OP_MAP_USERPTR:
2922 vma = xe_vma_create(vm, NULL, bo_offset_or_userptr, addr,
2924 op & XE_VM_BIND_FLAG_READONLY,
2925 op & XE_VM_BIND_FLAG_NULL,
2928 return ERR_PTR(-ENOMEM);
2930 err = xe_vma_userptr_pin_pages(vma);
2932 prep_vma_destroy(vm, vma);
2933 xe_vma_destroy_unlocked(vma);
2935 return ERR_PTR(err);
2937 xe_vm_insert_vma(vm, vma);
2941 XE_BUG_ON("NOT POSSIBLE");
2942 vma = ERR_PTR(-EINVAL);
2948 #ifdef TEST_VM_ASYNC_OPS_ERROR
2949 #define SUPPORTED_FLAGS \
2950 (FORCE_ASYNC_OP_ERROR | XE_VM_BIND_FLAG_ASYNC | \
2951 XE_VM_BIND_FLAG_READONLY | XE_VM_BIND_FLAG_IMMEDIATE | \
2952 XE_VM_BIND_FLAG_NULL | 0xffff)
2954 #define SUPPORTED_FLAGS \
2955 (XE_VM_BIND_FLAG_ASYNC | XE_VM_BIND_FLAG_READONLY | \
2956 XE_VM_BIND_FLAG_IMMEDIATE | XE_VM_BIND_FLAG_NULL | 0xffff)
2958 #define XE_64K_PAGE_MASK 0xffffull
2960 #define MAX_BINDS 512 /* FIXME: Picking random upper limit */
2962 static int vm_bind_ioctl_check_args(struct xe_device *xe,
2963 struct drm_xe_vm_bind *args,
2964 struct drm_xe_vm_bind_op **bind_ops,
2970 if (XE_IOCTL_ERR(xe, args->extensions) ||
2971 XE_IOCTL_ERR(xe, args->pad || args->pad2) ||
2972 XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1]) ||
2973 XE_IOCTL_ERR(xe, !args->num_binds) ||
2974 XE_IOCTL_ERR(xe, args->num_binds > MAX_BINDS))
2977 if (args->num_binds > 1) {
2978 u64 __user *bind_user =
2979 u64_to_user_ptr(args->vector_of_binds);
2981 *bind_ops = kmalloc(sizeof(struct drm_xe_vm_bind_op) *
2982 args->num_binds, GFP_KERNEL);
2986 err = __copy_from_user(*bind_ops, bind_user,
2987 sizeof(struct drm_xe_vm_bind_op) *
2989 if (XE_IOCTL_ERR(xe, err)) {
2994 *bind_ops = &args->bind;
2997 for (i = 0; i < args->num_binds; ++i) {
2998 u64 range = (*bind_ops)[i].range;
2999 u64 addr = (*bind_ops)[i].addr;
3000 u32 op = (*bind_ops)[i].op;
3001 u32 obj = (*bind_ops)[i].obj;
3002 u64 obj_offset = (*bind_ops)[i].obj_offset;
3003 u32 region = (*bind_ops)[i].region;
3004 bool is_null = op & XE_VM_BIND_FLAG_NULL;
3006 if (XE_IOCTL_ERR(xe, (*bind_ops)[i].pad) ||
3007 XE_IOCTL_ERR(xe, (*bind_ops)[i].reserved[0] ||
3008 (*bind_ops)[i].reserved[1])) {
3014 *async = !!(op & XE_VM_BIND_FLAG_ASYNC);
3015 } else if (XE_IOCTL_ERR(xe, !*async) ||
3016 XE_IOCTL_ERR(xe, !(op & XE_VM_BIND_FLAG_ASYNC)) ||
3017 XE_IOCTL_ERR(xe, VM_BIND_OP(op) ==
3018 XE_VM_BIND_OP_RESTART)) {
3023 if (XE_IOCTL_ERR(xe, !*async &&
3024 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL)) {
3029 if (XE_IOCTL_ERR(xe, !*async &&
3030 VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH)) {
3035 if (XE_IOCTL_ERR(xe, VM_BIND_OP(op) >
3036 XE_VM_BIND_OP_PREFETCH) ||
3037 XE_IOCTL_ERR(xe, op & ~SUPPORTED_FLAGS) ||
3038 XE_IOCTL_ERR(xe, obj && is_null) ||
3039 XE_IOCTL_ERR(xe, obj_offset && is_null) ||
3040 XE_IOCTL_ERR(xe, VM_BIND_OP(op) != XE_VM_BIND_OP_MAP &&
3042 XE_IOCTL_ERR(xe, !obj &&
3043 VM_BIND_OP(op) == XE_VM_BIND_OP_MAP &&
3045 XE_IOCTL_ERR(xe, !obj &&
3046 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
3047 XE_IOCTL_ERR(xe, addr &&
3048 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
3049 XE_IOCTL_ERR(xe, range &&
3050 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
3051 XE_IOCTL_ERR(xe, obj &&
3052 VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR) ||
3053 XE_IOCTL_ERR(xe, obj &&
3054 VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH) ||
3055 XE_IOCTL_ERR(xe, region &&
3056 VM_BIND_OP(op) != XE_VM_BIND_OP_PREFETCH) ||
3057 XE_IOCTL_ERR(xe, !(BIT(region) &
3058 xe->info.mem_region_mask)) ||
3059 XE_IOCTL_ERR(xe, obj &&
3060 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP)) {
3065 if (XE_IOCTL_ERR(xe, obj_offset & ~PAGE_MASK) ||
3066 XE_IOCTL_ERR(xe, addr & ~PAGE_MASK) ||
3067 XE_IOCTL_ERR(xe, range & ~PAGE_MASK) ||
3068 XE_IOCTL_ERR(xe, !range && VM_BIND_OP(op) !=
3069 XE_VM_BIND_OP_RESTART &&
3070 VM_BIND_OP(op) != XE_VM_BIND_OP_UNMAP_ALL)) {
3079 if (args->num_binds > 1)
3084 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3086 struct xe_device *xe = to_xe_device(dev);
3087 struct xe_file *xef = to_xe_file(file);
3088 struct drm_xe_vm_bind *args = data;
3089 struct drm_xe_sync __user *syncs_user;
3090 struct xe_bo **bos = NULL;
3091 struct xe_vma **vmas = NULL;
3093 struct xe_engine *e = NULL;
3095 struct xe_sync_entry *syncs = NULL;
3096 struct drm_xe_vm_bind_op *bind_ops;
3101 err = vm_bind_ioctl_check_args(xe, args, &bind_ops, &async);
3105 if (args->engine_id) {
3106 e = xe_engine_lookup(xef, args->engine_id);
3107 if (XE_IOCTL_ERR(xe, !e)) {
3112 if (XE_IOCTL_ERR(xe, !(e->flags & ENGINE_FLAG_VM))) {
3118 vm = xe_vm_lookup(xef, args->vm_id);
3119 if (XE_IOCTL_ERR(xe, !vm)) {
3124 err = down_write_killable(&vm->lock);
3128 if (XE_IOCTL_ERR(xe, xe_vm_is_closed_or_banned(vm))) {
3130 goto release_vm_lock;
3133 if (VM_BIND_OP(bind_ops[0].op) == XE_VM_BIND_OP_RESTART) {
3134 if (XE_IOCTL_ERR(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
3136 if (XE_IOCTL_ERR(xe, !err && args->num_syncs))
3138 if (XE_IOCTL_ERR(xe, !err && !vm->async_ops.error))
3142 trace_xe_vm_restart(vm);
3143 vm_set_async_error(vm, 0);
3145 queue_work(system_unbound_wq, &vm->async_ops.work);
3147 /* Rebinds may have been blocked, give worker a kick */
3148 if (xe_vm_in_compute_mode(vm))
3149 xe_vm_queue_rebind_worker(vm);
3152 goto release_vm_lock;
3155 if (XE_IOCTL_ERR(xe, !vm->async_ops.error &&
3156 async != !!(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS))) {
3158 goto release_vm_lock;
3161 for (i = 0; i < args->num_binds; ++i) {
3162 u64 range = bind_ops[i].range;
3163 u64 addr = bind_ops[i].addr;
3165 if (XE_IOCTL_ERR(xe, range > vm->size) ||
3166 XE_IOCTL_ERR(xe, addr > vm->size - range)) {
3168 goto release_vm_lock;
3171 if (bind_ops[i].tile_mask) {
3172 u64 valid_tiles = BIT(xe->info.tile_count) - 1;
3174 if (XE_IOCTL_ERR(xe, bind_ops[i].tile_mask &
3177 goto release_vm_lock;
3182 bos = kzalloc(sizeof(*bos) * args->num_binds, GFP_KERNEL);
3185 goto release_vm_lock;
3188 vmas = kzalloc(sizeof(*vmas) * args->num_binds, GFP_KERNEL);
3191 goto release_vm_lock;
3194 for (i = 0; i < args->num_binds; ++i) {
3195 struct drm_gem_object *gem_obj;
3196 u64 range = bind_ops[i].range;
3197 u64 addr = bind_ops[i].addr;
3198 u32 obj = bind_ops[i].obj;
3199 u64 obj_offset = bind_ops[i].obj_offset;
3204 gem_obj = drm_gem_object_lookup(file, obj);
3205 if (XE_IOCTL_ERR(xe, !gem_obj)) {
3209 bos[i] = gem_to_xe_bo(gem_obj);
3211 if (XE_IOCTL_ERR(xe, range > bos[i]->size) ||
3212 XE_IOCTL_ERR(xe, obj_offset >
3213 bos[i]->size - range)) {
3218 if (bos[i]->flags & XE_BO_INTERNAL_64K) {
3219 if (XE_IOCTL_ERR(xe, obj_offset &
3220 XE_64K_PAGE_MASK) ||
3221 XE_IOCTL_ERR(xe, addr & XE_64K_PAGE_MASK) ||
3222 XE_IOCTL_ERR(xe, range & XE_64K_PAGE_MASK)) {
3229 if (args->num_syncs) {
3230 syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3237 syncs_user = u64_to_user_ptr(args->syncs);
3238 for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3239 err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3240 &syncs_user[num_syncs], false,
3241 xe_vm_in_fault_mode(vm));
3246 /* Do some error checking first to make the unwind easier */
3247 for (i = 0; i < args->num_binds; ++i) {
3248 u64 range = bind_ops[i].range;
3249 u64 addr = bind_ops[i].addr;
3250 u32 op = bind_ops[i].op;
3252 err = __vm_bind_ioctl_lookup_vma(vm, bos[i], addr, range, op);
3257 for (i = 0; i < args->num_binds; ++i) {
3258 u64 range = bind_ops[i].range;
3259 u64 addr = bind_ops[i].addr;
3260 u32 op = bind_ops[i].op;
3261 u64 obj_offset = bind_ops[i].obj_offset;
3262 u64 tile_mask = bind_ops[i].tile_mask;
3263 u32 region = bind_ops[i].region;
3265 vmas[i] = vm_bind_ioctl_lookup_vma(vm, bos[i], obj_offset,
3266 addr, range, op, tile_mask,
3268 if (IS_ERR(vmas[i])) {
3269 err = PTR_ERR(vmas[i]);
3275 for (j = 0; j < args->num_binds; ++j) {
3276 struct xe_sync_entry *__syncs;
3277 u32 __num_syncs = 0;
3278 bool first_or_last = j == 0 || j == args->num_binds - 1;
3280 if (args->num_binds == 1) {
3281 __num_syncs = num_syncs;
3283 } else if (first_or_last && num_syncs) {
3284 bool first = j == 0;
3286 __syncs = kmalloc(sizeof(*__syncs) * num_syncs,
3293 /* in-syncs on first bind, out-syncs on last bind */
3294 for (i = 0; i < num_syncs; ++i) {
3295 bool signal = syncs[i].flags &
3298 if ((first && !signal) || (!first && signal))
3299 __syncs[__num_syncs++] = syncs[i];
3307 bool last = j == args->num_binds - 1;
3310 * Each pass of async worker drops the ref, take a ref
3311 * here, 1 set of refs taken above
3319 err = vm_bind_ioctl_async(vm, vmas[j], e, bos[j],
3320 bind_ops + j, __syncs,
3330 XE_BUG_ON(j != 0); /* Not supported */
3331 err = vm_bind_ioctl(vm, vmas[j], e, bos[j],
3332 bind_ops + j, __syncs,
3334 break; /* Needed so cleanup loops work */
3338 /* Most of cleanup owned by the async bind worker */
3339 if (async && !err) {
3340 up_write(&vm->lock);
3341 if (args->num_binds > 1)
3347 for (i = j; err && i < args->num_binds; ++i) {
3348 u32 op = bind_ops[i].op;
3349 struct xe_vma *vma, *next;
3354 list_for_each_entry_safe(vma, next, &vmas[i]->unbind_link,
3356 list_del_init(&vma->unbind_link);
3357 if (!vma->destroyed) {
3358 prep_vma_destroy(vm, vma);
3359 xe_vma_destroy_unlocked(vma);
3363 switch (VM_BIND_OP(op)) {
3364 case XE_VM_BIND_OP_MAP:
3365 prep_vma_destroy(vm, vmas[i]);
3366 xe_vma_destroy_unlocked(vmas[i]);
3368 case XE_VM_BIND_OP_MAP_USERPTR:
3369 prep_vma_destroy(vm, vmas[i]);
3370 xe_vma_destroy_unlocked(vmas[i]);
3375 while (num_syncs--) {
3377 !(syncs[num_syncs].flags & DRM_XE_SYNC_SIGNAL))
3378 continue; /* Still in async worker */
3379 xe_sync_entry_cleanup(&syncs[num_syncs]);
3384 for (i = j; i < args->num_binds; ++i)
3387 up_write(&vm->lock);
3396 if (args->num_binds > 1)
3402 * XXX: Using the TTM wrappers for now, likely can call into dma-resv code
3403 * directly to optimize. Also this likely should be an inline function.
3405 int xe_vm_lock(struct xe_vm *vm, struct ww_acquire_ctx *ww,
3406 int num_resv, bool intr)
3408 struct ttm_validate_buffer tv_vm;
3414 tv_vm.num_shared = num_resv;
3415 tv_vm.bo = xe_vm_ttm_bo(vm);;
3416 list_add_tail(&tv_vm.head, &objs);
3418 return ttm_eu_reserve_buffers(ww, &objs, intr, &dups);
3421 void xe_vm_unlock(struct xe_vm *vm, struct ww_acquire_ctx *ww)
3423 dma_resv_unlock(&vm->resv);
3424 ww_acquire_fini(ww);
3428 * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3429 * @vma: VMA to invalidate
3431 * Walks a list of page tables leaves which it memset the entries owned by this
3432 * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3435 * Returns 0 for success, negative error code otherwise.
3437 int xe_vm_invalidate_vma(struct xe_vma *vma)
3439 struct xe_device *xe = xe_vma_vm(vma)->xe;
3440 struct xe_tile *tile;
3441 u32 tile_needs_invalidate = 0;
3442 int seqno[XE_MAX_TILES_PER_DEVICE];
3446 XE_BUG_ON(!xe_vm_in_fault_mode(xe_vma_vm(vma)));
3447 XE_WARN_ON(xe_vma_is_null(vma));
3448 trace_xe_vma_usm_invalidate(vma);
3450 /* Check that we don't race with page-table updates */
3451 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3452 if (xe_vma_is_userptr(vma)) {
3453 WARN_ON_ONCE(!mmu_interval_check_retry
3454 (&vma->userptr.notifier,
3455 vma->userptr.notifier_seq));
3456 WARN_ON_ONCE(!dma_resv_test_signaled(&xe_vma_vm(vma)->resv,
3457 DMA_RESV_USAGE_BOOKKEEP));
3460 xe_bo_assert_held(xe_vma_bo(vma));
3464 for_each_tile(tile, xe, id) {
3465 if (xe_pt_zap_ptes(tile, vma)) {
3466 tile_needs_invalidate |= BIT(id);
3469 * FIXME: We potentially need to invalidate multiple
3470 * GTs within the tile
3472 seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
3478 for_each_tile(tile, xe, id) {
3479 if (tile_needs_invalidate & BIT(id)) {
3480 ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
3486 vma->usm.tile_invalidated = vma->tile_mask;
3491 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
3493 struct rb_node *node;
3497 if (!down_read_trylock(&vm->lock)) {
3498 drm_printf(p, " Failed to acquire VM lock to dump capture");
3501 if (vm->pt_root[gt_id]) {
3502 addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE,
3504 drm_printf(p, " VM root: A:0x%llx %s\n", addr, is_vram ? "VRAM" : "SYS");
3507 for (node = rb_first(&vm->vmas); node; node = rb_next(node)) {
3508 struct xe_vma *vma = to_xe_vma(node);
3509 bool is_userptr = xe_vma_is_userptr(vma);
3510 bool is_null = xe_vma_is_null(vma);
3514 } else if (is_userptr) {
3515 struct xe_res_cursor cur;
3517 if (vma->userptr.sg) {
3518 xe_res_first_sg(vma->userptr.sg, 0, XE_PAGE_SIZE,
3520 addr = xe_res_dma(&cur);
3525 addr = __xe_bo_addr(xe_vma_bo(vma), 0, XE_PAGE_SIZE, &is_vram);
3527 drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
3528 xe_vma_start(vma), xe_vma_end(vma) - 1,
3530 addr, is_null ? "NULL" : is_userptr ? "USR" :
3531 is_vram ? "VRAM" : "SYS");