1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021 Intel Corporation
8 #include <linux/dma-fence-array.h>
10 #include <drm/drm_exec.h>
11 #include <drm/drm_print.h>
12 #include <drm/ttm/ttm_execbuf_util.h>
13 #include <drm/ttm/ttm_tt.h>
14 #include <drm/xe_drm.h>
15 #include <linux/delay.h>
16 #include <linux/kthread.h>
18 #include <linux/swap.h>
21 #include "xe_device.h"
22 #include "xe_drm_client.h"
23 #include "xe_exec_queue.h"
25 #include "xe_gt_pagefault.h"
26 #include "xe_gt_tlb_invalidation.h"
27 #include "xe_migrate.h"
29 #include "xe_preempt_fence.h"
31 #include "xe_res_cursor.h"
34 #include "generated/xe_wa_oob.h"
37 #define TEST_VM_ASYNC_OPS_ERROR
39 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
41 return vm->gpuvm.r_obj;
45 * xe_vma_userptr_check_repin() - Advisory check for repin needed
46 * @vma: The userptr vma
48 * Check if the userptr vma has been invalidated since last successful
49 * repin. The check is advisory only and can the function can be called
50 * without the vm->userptr.notifier_lock held. There is no guarantee that the
51 * vma userptr will remain valid after a lockless check, so typically
52 * the call needs to be followed by a proper check under the notifier_lock.
54 * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
56 int xe_vma_userptr_check_repin(struct xe_vma *vma)
58 return mmu_interval_check_retry(&vma->userptr.notifier,
59 vma->userptr.notifier_seq) ?
63 int xe_vma_userptr_pin_pages(struct xe_vma *vma)
65 struct xe_vm *vm = xe_vma_vm(vma);
66 struct xe_device *xe = vm->xe;
67 const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT;
69 bool in_kthread = !current->mm;
70 unsigned long notifier_seq;
72 bool read_only = xe_vma_read_only(vma);
74 lockdep_assert_held(&vm->lock);
75 xe_assert(xe, xe_vma_is_userptr(vma));
77 if (vma->gpuva.flags & XE_VMA_DESTROYED)
80 notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier);
81 if (notifier_seq == vma->userptr.notifier_seq)
84 pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
88 if (vma->userptr.sg) {
89 dma_unmap_sgtable(xe->drm.dev,
91 read_only ? DMA_TO_DEVICE :
92 DMA_BIDIRECTIONAL, 0);
93 sg_free_table(vma->userptr.sg);
94 vma->userptr.sg = NULL;
99 if (!mmget_not_zero(vma->userptr.notifier.mm)) {
103 kthread_use_mm(vma->userptr.notifier.mm);
106 while (pinned < num_pages) {
107 ret = get_user_pages_fast(xe_vma_userptr(vma) +
110 read_only ? 0 : FOLL_WRITE,
123 kthread_unuse_mm(vma->userptr.notifier.mm);
124 mmput(vma->userptr.notifier.mm);
130 ret = sg_alloc_table_from_pages_segment(&vma->userptr.sgt, pages,
132 (u64)pinned << PAGE_SHIFT,
133 xe_sg_segment_size(xe->drm.dev),
136 vma->userptr.sg = NULL;
139 vma->userptr.sg = &vma->userptr.sgt;
141 ret = dma_map_sgtable(xe->drm.dev, vma->userptr.sg,
142 read_only ? DMA_TO_DEVICE :
144 DMA_ATTR_SKIP_CPU_SYNC |
145 DMA_ATTR_NO_KERNEL_MAPPING);
147 sg_free_table(vma->userptr.sg);
148 vma->userptr.sg = NULL;
152 for (i = 0; i < pinned; ++i) {
155 set_page_dirty(pages[i]);
156 unlock_page(pages[i]);
159 mark_page_accessed(pages[i]);
163 release_pages(pages, pinned);
167 vma->userptr.notifier_seq = notifier_seq;
168 if (xe_vma_userptr_check_repin(vma) == -EAGAIN)
172 return ret < 0 ? ret : 0;
175 static bool preempt_fences_waiting(struct xe_vm *vm)
177 struct xe_exec_queue *q;
179 lockdep_assert_held(&vm->lock);
180 xe_vm_assert_held(vm);
182 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
183 if (!q->compute.pfence ||
184 (q->compute.pfence && test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
185 &q->compute.pfence->flags))) {
193 static void free_preempt_fences(struct list_head *list)
195 struct list_head *link, *next;
197 list_for_each_safe(link, next, list)
198 xe_preempt_fence_free(to_preempt_fence_from_link(link));
201 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
204 lockdep_assert_held(&vm->lock);
205 xe_vm_assert_held(vm);
207 if (*count >= vm->preempt.num_exec_queues)
210 for (; *count < vm->preempt.num_exec_queues; ++(*count)) {
211 struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
214 return PTR_ERR(pfence);
216 list_move_tail(xe_preempt_fence_link(pfence), list);
222 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
224 struct xe_exec_queue *q;
226 xe_vm_assert_held(vm);
228 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
229 if (q->compute.pfence) {
230 long timeout = dma_fence_wait(q->compute.pfence, false);
234 dma_fence_put(q->compute.pfence);
235 q->compute.pfence = NULL;
242 static bool xe_vm_is_idle(struct xe_vm *vm)
244 struct xe_exec_queue *q;
246 xe_vm_assert_held(vm);
247 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
248 if (!xe_exec_queue_is_idle(q))
255 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
257 struct list_head *link;
258 struct xe_exec_queue *q;
260 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
261 struct dma_fence *fence;
264 xe_assert(vm->xe, link != list);
266 fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
267 q, q->compute.context,
269 dma_fence_put(q->compute.pfence);
270 q->compute.pfence = fence;
274 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
276 struct xe_exec_queue *q;
279 err = xe_bo_lock(bo, true);
283 err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
287 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
288 if (q->compute.pfence) {
289 dma_resv_add_fence(bo->ttm.base.resv,
291 DMA_RESV_USAGE_BOOKKEEP);
300 * xe_vm_fence_all_extobjs() - Add a fence to vm's external objects' resv
302 * @fence: The fence to add.
303 * @usage: The resv usage for the fence.
305 * Loops over all of the vm's external object bindings and adds a @fence
306 * with the given @usage to all of the external object's reservation
309 void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
310 enum dma_resv_usage usage)
314 list_for_each_entry(vma, &vm->extobj.list, extobj.link)
315 dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence, usage);
318 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm)
320 struct xe_exec_queue *q;
322 lockdep_assert_held(&vm->lock);
323 xe_vm_assert_held(vm);
325 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
328 dma_resv_add_fence(xe_vm_resv(vm), q->compute.pfence,
329 DMA_RESV_USAGE_BOOKKEEP);
330 xe_vm_fence_all_extobjs(vm, q->compute.pfence,
331 DMA_RESV_USAGE_BOOKKEEP);
335 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
337 struct drm_exec exec;
338 struct dma_fence *pfence;
342 xe_assert(vm->xe, xe_vm_in_compute_mode(vm));
344 down_write(&vm->lock);
345 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
346 drm_exec_until_all_locked(&exec) {
347 err = xe_vm_lock_dma_resv(vm, &exec, 1, true);
348 drm_exec_retry_on_contention(&exec);
353 pfence = xe_preempt_fence_create(q, q->compute.context,
360 list_add(&q->compute.link, &vm->preempt.exec_queues);
361 ++vm->preempt.num_exec_queues;
362 q->compute.pfence = pfence;
364 down_read(&vm->userptr.notifier_lock);
366 dma_resv_add_fence(xe_vm_resv(vm), pfence,
367 DMA_RESV_USAGE_BOOKKEEP);
369 xe_vm_fence_all_extobjs(vm, pfence, DMA_RESV_USAGE_BOOKKEEP);
372 * Check to see if a preemption on VM is in flight or userptr
373 * invalidation, if so trigger this preempt fence to sync state with
374 * other preempt fences on the VM.
376 wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
378 dma_fence_enable_sw_signaling(pfence);
380 up_read(&vm->userptr.notifier_lock);
383 drm_exec_fini(&exec);
390 * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
391 * that need repinning.
394 * This function checks for whether the VM has userptrs that need repinning,
395 * and provides a release-type barrier on the userptr.notifier_lock after
398 * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
400 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
402 lockdep_assert_held_read(&vm->userptr.notifier_lock);
404 return (list_empty(&vm->userptr.repin_list) &&
405 list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
409 * xe_vm_lock_dma_resv() - Lock the vm dma_resv object and the dma_resv
410 * objects of the vm's external buffer objects.
412 * @exec: Pointer to a struct drm_exec locking context.
413 * @num_shared: Number of dma-fence slots to reserve in the locked objects.
414 * @lock_vm: Lock also the vm's dma_resv.
416 * Locks the vm dma-resv objects and all the dma-resv objects of the
417 * buffer objects on the vm external object list.
419 * Return: 0 on success, Negative error code on error. In particular if
420 * @intr is set to true, -EINTR or -ERESTARTSYS may be returned.
422 int xe_vm_lock_dma_resv(struct xe_vm *vm, struct drm_exec *exec,
423 unsigned int num_shared, bool lock_vm)
425 struct xe_vma *vma, *next;
428 lockdep_assert_held(&vm->lock);
431 err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
436 list_for_each_entry(vma, &vm->extobj.list, extobj.link) {
437 err = drm_exec_prepare_obj(exec, &xe_vma_bo(vma)->ttm.base, num_shared);
442 spin_lock(&vm->notifier.list_lock);
443 list_for_each_entry_safe(vma, next, &vm->notifier.rebind_list,
444 notifier.rebind_link) {
445 xe_bo_assert_held(xe_vma_bo(vma));
447 list_del_init(&vma->notifier.rebind_link);
448 if (vma->tile_present && !(vma->gpuva.flags & XE_VMA_DESTROYED))
449 list_move_tail(&vma->combined_links.rebind,
452 spin_unlock(&vm->notifier.list_lock);
457 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
459 static void xe_vm_kill(struct xe_vm *vm)
461 struct xe_exec_queue *q;
463 lockdep_assert_held(&vm->lock);
465 xe_vm_lock(vm, false);
466 vm->flags |= XE_VM_FLAG_BANNED;
467 trace_xe_vm_kill(vm);
469 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
473 /* TODO: Inform user the VM is banned */
477 * xe_vm_validate_should_retry() - Whether to retry after a validate error.
478 * @exec: The drm_exec object used for locking before validation.
479 * @err: The error returned from ttm_bo_validate().
480 * @end: A ktime_t cookie that should be set to 0 before first use and
481 * that should be reused on subsequent calls.
483 * With multiple active VMs, under memory pressure, it is possible that
484 * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
485 * Until ttm properly handles locking in such scenarios, best thing the
486 * driver can do is retry with a timeout. Check if that is necessary, and
487 * if so unlock the drm_exec's objects while keeping the ticket to prepare
490 * Return: true if a retry after drm_exec_init() is recommended;
493 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
501 *end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
502 if (!ktime_before(cur, *end))
506 * We would like to keep the ticket here with
507 * drm_exec_unlock_all(), but WW mutex asserts currently
508 * stop us from that. In any case this function could go away
509 * with proper TTM -EDEADLK handling.
517 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
524 * 1 fence for each preempt fence plus a fence for each tile from a
527 err = drm_exec_prepare_obj(exec, xe_vm_obj(vm),
528 vm->preempt.num_exec_queues +
529 vm->xe->info.tile_count);
533 if (xe_vm_is_idle(vm)) {
534 vm->preempt.rebind_deactivated = true;
539 if (!preempt_fences_waiting(vm)) {
544 err = xe_vm_lock_dma_resv(vm, exec, vm->preempt.num_exec_queues, false);
548 err = wait_for_existing_preempt_fences(vm);
552 list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
553 if (xe_vma_has_no_bo(vma) ||
554 vma->gpuva.flags & XE_VMA_DESTROYED)
557 err = xe_bo_validate(xe_vma_bo(vma), vm, false);
565 static void preempt_rebind_work_func(struct work_struct *w)
567 struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
568 struct drm_exec exec;
569 struct dma_fence *rebind_fence;
570 unsigned int fence_count = 0;
571 LIST_HEAD(preempt_fences);
575 int __maybe_unused tries = 0;
577 xe_assert(vm->xe, xe_vm_in_compute_mode(vm));
578 trace_xe_vm_rebind_worker_enter(vm);
580 down_write(&vm->lock);
582 if (xe_vm_is_closed_or_banned(vm)) {
584 trace_xe_vm_rebind_worker_exit(vm);
589 if (vm->async_ops.error)
590 goto out_unlock_outer;
593 * Extreme corner where we exit a VM error state with a munmap style VM
594 * unbind inflight which requires a rebind. In this case the rebind
595 * needs to install some fences into the dma-resv slots. The worker to
596 * do this queued, let that worker make progress by dropping vm->lock
597 * and trying this again.
599 if (vm->async_ops.munmap_rebind_inflight) {
601 flush_work(&vm->async_ops.work);
605 if (xe_vm_userptr_check_repin(vm)) {
606 err = xe_vm_userptr_pin(vm);
608 goto out_unlock_outer;
611 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
613 drm_exec_until_all_locked(&exec) {
616 err = xe_preempt_work_begin(&exec, vm, &done);
617 drm_exec_retry_on_contention(&exec);
618 if (err && xe_vm_validate_should_retry(&exec, err, &end)) {
620 goto out_unlock_outer;
626 err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
630 rebind_fence = xe_vm_rebind(vm, true);
631 if (IS_ERR(rebind_fence)) {
632 err = PTR_ERR(rebind_fence);
637 dma_fence_wait(rebind_fence, false);
638 dma_fence_put(rebind_fence);
641 /* Wait on munmap style VM unbinds */
642 wait = dma_resv_wait_timeout(xe_vm_resv(vm),
643 DMA_RESV_USAGE_KERNEL,
644 false, MAX_SCHEDULE_TIMEOUT);
650 #define retry_required(__tries, __vm) \
651 (IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
652 (!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
653 __xe_vm_userptr_needs_repin(__vm))
655 down_read(&vm->userptr.notifier_lock);
656 if (retry_required(tries, vm)) {
657 up_read(&vm->userptr.notifier_lock);
662 #undef retry_required
664 spin_lock(&vm->xe->ttm.lru_lock);
665 ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
666 spin_unlock(&vm->xe->ttm.lru_lock);
668 /* Point of no return. */
669 arm_preempt_fences(vm, &preempt_fences);
670 resume_and_reinstall_preempt_fences(vm);
671 up_read(&vm->userptr.notifier_lock);
674 drm_exec_fini(&exec);
676 if (err == -EAGAIN) {
677 trace_xe_vm_rebind_worker_retry(vm);
682 drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
687 free_preempt_fences(&preempt_fences);
689 trace_xe_vm_rebind_worker_exit(vm);
692 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
693 const struct mmu_notifier_range *range,
694 unsigned long cur_seq)
696 struct xe_vma *vma = container_of(mni, struct xe_vma, userptr.notifier);
697 struct xe_vm *vm = xe_vma_vm(vma);
698 struct dma_resv_iter cursor;
699 struct dma_fence *fence;
702 xe_assert(vm->xe, xe_vma_is_userptr(vma));
703 trace_xe_vma_userptr_invalidate(vma);
705 if (!mmu_notifier_range_blockable(range))
708 down_write(&vm->userptr.notifier_lock);
709 mmu_interval_set_seq(mni, cur_seq);
711 /* No need to stop gpu access if the userptr is not yet bound. */
712 if (!vma->userptr.initial_bind) {
713 up_write(&vm->userptr.notifier_lock);
718 * Tell exec and rebind worker they need to repin and rebind this
721 if (!xe_vm_in_fault_mode(vm) &&
722 !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
723 spin_lock(&vm->userptr.invalidated_lock);
724 list_move_tail(&vma->userptr.invalidate_link,
725 &vm->userptr.invalidated);
726 spin_unlock(&vm->userptr.invalidated_lock);
729 up_write(&vm->userptr.notifier_lock);
732 * Preempt fences turn into schedule disables, pipeline these.
733 * Note that even in fault mode, we need to wait for binds and
734 * unbinds to complete, and those are attached as BOOKMARK fences
737 dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
738 DMA_RESV_USAGE_BOOKKEEP);
739 dma_resv_for_each_fence_unlocked(&cursor, fence)
740 dma_fence_enable_sw_signaling(fence);
741 dma_resv_iter_end(&cursor);
743 err = dma_resv_wait_timeout(xe_vm_resv(vm),
744 DMA_RESV_USAGE_BOOKKEEP,
745 false, MAX_SCHEDULE_TIMEOUT);
746 XE_WARN_ON(err <= 0);
748 if (xe_vm_in_fault_mode(vm)) {
749 err = xe_vm_invalidate_vma(vma);
753 trace_xe_vma_userptr_invalidate_complete(vma);
758 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
759 .invalidate = vma_userptr_invalidate,
762 int xe_vm_userptr_pin(struct xe_vm *vm)
764 struct xe_vma *vma, *next;
766 LIST_HEAD(tmp_evict);
768 lockdep_assert_held_write(&vm->lock);
770 /* Collect invalidated userptrs */
771 spin_lock(&vm->userptr.invalidated_lock);
772 list_for_each_entry_safe(vma, next, &vm->userptr.invalidated,
773 userptr.invalidate_link) {
774 list_del_init(&vma->userptr.invalidate_link);
775 if (list_empty(&vma->combined_links.userptr))
776 list_move_tail(&vma->combined_links.userptr,
777 &vm->userptr.repin_list);
779 spin_unlock(&vm->userptr.invalidated_lock);
781 /* Pin and move to temporary list */
782 list_for_each_entry_safe(vma, next, &vm->userptr.repin_list,
783 combined_links.userptr) {
784 err = xe_vma_userptr_pin_pages(vma);
788 list_move_tail(&vma->combined_links.userptr, &tmp_evict);
791 /* Take lock and move to rebind_list for rebinding. */
792 err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
796 list_for_each_entry_safe(vma, next, &tmp_evict, combined_links.userptr)
797 list_move_tail(&vma->combined_links.rebind, &vm->rebind_list);
799 dma_resv_unlock(xe_vm_resv(vm));
804 list_splice_tail(&tmp_evict, &vm->userptr.repin_list);
810 * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
811 * that need repinning.
814 * This function does an advisory check for whether the VM has userptrs that
817 * Return: 0 if there are no indications of userptrs needing repinning,
818 * -EAGAIN if there are.
820 int xe_vm_userptr_check_repin(struct xe_vm *vm)
822 return (list_empty_careful(&vm->userptr.repin_list) &&
823 list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
826 static struct dma_fence *
827 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
828 struct xe_sync_entry *syncs, u32 num_syncs,
829 bool first_op, bool last_op);
831 struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
833 struct dma_fence *fence = NULL;
834 struct xe_vma *vma, *next;
836 lockdep_assert_held(&vm->lock);
837 if (xe_vm_no_dma_fences(vm) && !rebind_worker)
840 xe_vm_assert_held(vm);
841 list_for_each_entry_safe(vma, next, &vm->rebind_list,
842 combined_links.rebind) {
843 xe_assert(vm->xe, vma->tile_present);
845 list_del_init(&vma->combined_links.rebind);
846 dma_fence_put(fence);
848 trace_xe_vma_rebind_worker(vma);
850 trace_xe_vma_rebind_exec(vma);
851 fence = xe_vm_bind_vma(vma, NULL, NULL, 0, false, false);
859 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
861 u64 bo_offset_or_userptr,
868 struct xe_tile *tile;
871 xe_assert(vm->xe, start < end);
872 xe_assert(vm->xe, end < vm->size);
874 if (!bo && !is_null) /* userptr */
875 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
877 vma = kzalloc(sizeof(*vma) - sizeof(struct xe_userptr),
880 vma = ERR_PTR(-ENOMEM);
884 INIT_LIST_HEAD(&vma->combined_links.rebind);
885 INIT_LIST_HEAD(&vma->notifier.rebind_link);
886 INIT_LIST_HEAD(&vma->extobj.link);
888 INIT_LIST_HEAD(&vma->gpuva.gem.entry);
889 vma->gpuva.vm = &vm->gpuvm;
890 vma->gpuva.va.addr = start;
891 vma->gpuva.va.range = end - start + 1;
893 vma->gpuva.flags |= XE_VMA_READ_ONLY;
895 vma->gpuva.flags |= DRM_GPUVA_SPARSE;
898 vma->tile_mask = tile_mask;
900 for_each_tile(tile, vm->xe, id)
901 vma->tile_mask |= 0x1 << id;
904 if (vm->xe->info.platform == XE_PVC)
905 vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
908 struct drm_gpuvm_bo *vm_bo;
910 xe_bo_assert_held(bo);
912 vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
915 return ERR_CAST(vm_bo);
918 drm_gem_object_get(&bo->ttm.base);
919 vma->gpuva.gem.obj = &bo->ttm.base;
920 vma->gpuva.gem.offset = bo_offset_or_userptr;
921 drm_gpuva_link(&vma->gpuva, vm_bo);
922 drm_gpuvm_bo_put(vm_bo);
923 } else /* userptr or null */ {
925 u64 size = end - start + 1;
928 INIT_LIST_HEAD(&vma->userptr.invalidate_link);
929 vma->gpuva.gem.offset = bo_offset_or_userptr;
931 err = mmu_interval_notifier_insert(&vma->userptr.notifier,
933 xe_vma_userptr(vma), size,
934 &vma_userptr_notifier_ops);
941 vma->userptr.notifier_seq = LONG_MAX;
950 static bool vm_remove_extobj(struct xe_vma *vma)
952 if (!list_empty(&vma->extobj.link)) {
953 xe_vma_vm(vma)->extobj.entries--;
954 list_del_init(&vma->extobj.link);
960 static void xe_vma_destroy_late(struct xe_vma *vma)
962 struct xe_vm *vm = xe_vma_vm(vma);
963 struct xe_device *xe = vm->xe;
964 bool read_only = xe_vma_read_only(vma);
966 if (xe_vma_is_userptr(vma)) {
967 if (vma->userptr.sg) {
968 dma_unmap_sgtable(xe->drm.dev,
970 read_only ? DMA_TO_DEVICE :
971 DMA_BIDIRECTIONAL, 0);
972 sg_free_table(vma->userptr.sg);
973 vma->userptr.sg = NULL;
977 * Since userptr pages are not pinned, we can't remove
978 * the notifer until we're sure the GPU is not accessing
981 mmu_interval_notifier_remove(&vma->userptr.notifier);
983 } else if (xe_vma_is_null(vma)) {
986 xe_bo_put(xe_vma_bo(vma));
992 static void vma_destroy_work_func(struct work_struct *w)
995 container_of(w, struct xe_vma, destroy_work);
997 xe_vma_destroy_late(vma);
1000 static struct xe_vma *
1001 bo_has_vm_references_locked(struct xe_bo *bo, struct xe_vm *vm,
1002 struct xe_vma *ignore)
1004 struct drm_gpuvm_bo *vm_bo;
1005 struct drm_gpuva *va;
1006 struct drm_gem_object *obj = &bo->ttm.base;
1008 xe_bo_assert_held(bo);
1010 drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
1011 drm_gpuvm_bo_for_each_va(va, vm_bo) {
1012 struct xe_vma *vma = gpuva_to_vma(va);
1014 if (vma != ignore && xe_vma_vm(vma) == vm)
1022 static bool bo_has_vm_references(struct xe_bo *bo, struct xe_vm *vm,
1023 struct xe_vma *ignore)
1027 xe_bo_lock(bo, false);
1028 ret = !!bo_has_vm_references_locked(bo, vm, ignore);
1034 static void __vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
1036 lockdep_assert_held_write(&vm->lock);
1038 list_add(&vma->extobj.link, &vm->extobj.list);
1039 vm->extobj.entries++;
1042 static void vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
1044 struct xe_bo *bo = xe_vma_bo(vma);
1046 lockdep_assert_held_write(&vm->lock);
1048 if (bo_has_vm_references(bo, vm, vma))
1051 __vm_insert_extobj(vm, vma);
1054 static void vma_destroy_cb(struct dma_fence *fence,
1055 struct dma_fence_cb *cb)
1057 struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
1059 INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
1060 queue_work(system_unbound_wq, &vma->destroy_work);
1063 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
1065 struct xe_vm *vm = xe_vma_vm(vma);
1067 lockdep_assert_held_write(&vm->lock);
1068 xe_assert(vm->xe, list_empty(&vma->combined_links.destroy));
1070 if (xe_vma_is_userptr(vma)) {
1071 xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
1073 spin_lock(&vm->userptr.invalidated_lock);
1074 list_del(&vma->userptr.invalidate_link);
1075 spin_unlock(&vm->userptr.invalidated_lock);
1076 } else if (!xe_vma_is_null(vma)) {
1077 xe_bo_assert_held(xe_vma_bo(vma));
1079 spin_lock(&vm->notifier.list_lock);
1080 list_del(&vma->notifier.rebind_link);
1081 spin_unlock(&vm->notifier.list_lock);
1083 drm_gpuva_unlink(&vma->gpuva);
1085 if (!xe_vma_bo(vma)->vm && vm_remove_extobj(vma)) {
1086 struct xe_vma *other;
1088 other = bo_has_vm_references_locked(xe_vma_bo(vma), vm, NULL);
1091 __vm_insert_extobj(vm, other);
1095 xe_vm_assert_held(vm);
1097 int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
1101 XE_WARN_ON(ret != -ENOENT);
1102 xe_vma_destroy_late(vma);
1105 xe_vma_destroy_late(vma);
1110 * xe_vm_prepare_vma() - drm_exec utility to lock a vma
1111 * @exec: The drm_exec object we're currently locking for.
1112 * @vma: The vma for witch we want to lock the vm resv and any attached
1114 * @num_shared: The number of dma-fence slots to pre-allocate in the
1115 * objects' reservation objects.
1117 * Return: 0 on success, negative error code on error. In particular
1118 * may return -EDEADLK on WW transaction contention and -EINTR if
1119 * an interruptible wait is terminated by a signal.
1121 int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
1122 unsigned int num_shared)
1124 struct xe_vm *vm = xe_vma_vm(vma);
1125 struct xe_bo *bo = xe_vma_bo(vma);
1129 err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
1130 if (!err && bo && !bo->vm)
1131 err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared);
1136 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1138 struct drm_exec exec;
1141 drm_exec_init(&exec, 0);
1142 drm_exec_until_all_locked(&exec) {
1143 err = xe_vm_prepare_vma(&exec, vma, 0);
1144 drm_exec_retry_on_contention(&exec);
1145 if (XE_WARN_ON(err))
1149 xe_vma_destroy(vma, NULL);
1151 drm_exec_fini(&exec);
1155 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
1157 struct drm_gpuva *gpuva;
1159 lockdep_assert_held(&vm->lock);
1161 if (xe_vm_is_closed_or_banned(vm))
1164 xe_assert(vm->xe, start + range <= vm->size);
1166 gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
1168 return gpuva ? gpuva_to_vma(gpuva) : NULL;
1171 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1175 xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1176 lockdep_assert_held(&vm->lock);
1178 err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
1179 XE_WARN_ON(err); /* Shouldn't be possible */
1184 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1186 xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1187 lockdep_assert_held(&vm->lock);
1189 drm_gpuva_remove(&vma->gpuva);
1190 if (vm->usm.last_fault_vma == vma)
1191 vm->usm.last_fault_vma = NULL;
1194 static struct drm_gpuva_op *xe_vm_op_alloc(void)
1196 struct xe_vma_op *op;
1198 op = kzalloc(sizeof(*op), GFP_KERNEL);
1206 static void xe_vm_free(struct drm_gpuvm *gpuvm);
1208 static struct drm_gpuvm_ops gpuvm_ops = {
1209 .op_alloc = xe_vm_op_alloc,
1210 .vm_free = xe_vm_free,
1213 static u64 pde_encode_cache(struct xe_device *xe, enum xe_cache_level cache)
1215 u32 pat_index = xe->pat.idx[cache];
1218 if (pat_index & BIT(0))
1219 pte |= XE_PPGTT_PTE_PAT0;
1221 if (pat_index & BIT(1))
1222 pte |= XE_PPGTT_PTE_PAT1;
1227 static u64 pte_encode_cache(struct xe_device *xe, enum xe_cache_level cache)
1229 u32 pat_index = xe->pat.idx[cache];
1232 if (pat_index & BIT(0))
1233 pte |= XE_PPGTT_PTE_PAT0;
1235 if (pat_index & BIT(1))
1236 pte |= XE_PPGTT_PTE_PAT1;
1238 if (pat_index & BIT(2))
1239 pte |= XE_PPGTT_PTE_PAT2;
1241 if (pat_index & BIT(3))
1242 pte |= XELPG_PPGTT_PTE_PAT3;
1247 static u64 pte_encode_ps(u32 pt_level)
1249 XE_WARN_ON(pt_level > 2);
1252 return XE_PDE_PS_2M;
1253 else if (pt_level == 2)
1254 return XE_PDPE_PS_1G;
1259 static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
1260 const enum xe_cache_level cache)
1262 struct xe_device *xe = xe_bo_device(bo);
1265 pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1266 pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
1267 pde |= pde_encode_cache(xe, cache);
1272 static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
1273 enum xe_cache_level cache, u32 pt_level)
1275 struct xe_device *xe = xe_bo_device(bo);
1278 pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1279 pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1280 pte |= pte_encode_cache(xe, cache);
1281 pte |= pte_encode_ps(pt_level);
1283 if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
1284 pte |= XE_PPGTT_PTE_DM;
1289 static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
1290 enum xe_cache_level cache, u32 pt_level)
1292 struct xe_device *xe = xe_vma_vm(vma)->xe;
1294 pte |= XE_PAGE_PRESENT;
1296 if (likely(!xe_vma_read_only(vma)))
1299 pte |= pte_encode_cache(xe, cache);
1300 pte |= pte_encode_ps(pt_level);
1302 if (unlikely(xe_vma_is_null(vma)))
1308 static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
1309 enum xe_cache_level cache,
1310 u32 pt_level, bool devmem, u64 flags)
1314 /* Avoid passing random bits directly as flags */
1315 XE_WARN_ON(flags & ~XE_PTE_PS64);
1318 pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1319 pte |= pte_encode_cache(xe, cache);
1320 pte |= pte_encode_ps(pt_level);
1323 pte |= XE_PPGTT_PTE_DM;
1330 static const struct xe_pt_ops xelp_pt_ops = {
1331 .pte_encode_bo = xelp_pte_encode_bo,
1332 .pte_encode_vma = xelp_pte_encode_vma,
1333 .pte_encode_addr = xelp_pte_encode_addr,
1334 .pde_encode_bo = xelp_pde_encode_bo,
1337 static void xe_vma_op_work_func(struct work_struct *w);
1338 static void vm_destroy_work_func(struct work_struct *w);
1340 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1342 struct drm_gem_object *vm_resv_obj;
1344 int err, number_tiles = 0;
1345 struct xe_tile *tile;
1348 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1350 return ERR_PTR(-ENOMEM);
1354 vm->size = 1ull << xe->info.va_bits;
1358 init_rwsem(&vm->lock);
1360 INIT_LIST_HEAD(&vm->rebind_list);
1362 INIT_LIST_HEAD(&vm->userptr.repin_list);
1363 INIT_LIST_HEAD(&vm->userptr.invalidated);
1364 init_rwsem(&vm->userptr.notifier_lock);
1365 spin_lock_init(&vm->userptr.invalidated_lock);
1367 INIT_LIST_HEAD(&vm->notifier.rebind_list);
1368 spin_lock_init(&vm->notifier.list_lock);
1370 INIT_LIST_HEAD(&vm->async_ops.pending);
1371 INIT_WORK(&vm->async_ops.work, xe_vma_op_work_func);
1372 spin_lock_init(&vm->async_ops.lock);
1374 INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1376 INIT_LIST_HEAD(&vm->preempt.exec_queues);
1377 vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */
1379 for_each_tile(tile, xe, id)
1380 xe_range_fence_tree_init(&vm->rftree[id]);
1382 INIT_LIST_HEAD(&vm->extobj.list);
1384 vm->pt_ops = &xelp_pt_ops;
1386 if (!(flags & XE_VM_FLAG_MIGRATION))
1387 xe_device_mem_access_get(xe);
1389 vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
1395 drm_gpuvm_init(&vm->gpuvm, "Xe VM", 0, &xe->drm, vm_resv_obj,
1396 0, vm->size, 0, 0, &gpuvm_ops);
1398 drm_gem_object_put(vm_resv_obj);
1400 err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
1404 if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1405 vm->flags |= XE_VM_FLAG_64K;
1407 for_each_tile(tile, xe, id) {
1408 if (flags & XE_VM_FLAG_MIGRATION &&
1409 tile->id != XE_VM_FLAG_TILE_ID(flags))
1412 vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
1413 if (IS_ERR(vm->pt_root[id])) {
1414 err = PTR_ERR(vm->pt_root[id]);
1415 vm->pt_root[id] = NULL;
1416 goto err_unlock_close;
1420 if (flags & XE_VM_FLAG_SCRATCH_PAGE) {
1421 for_each_tile(tile, xe, id) {
1422 if (!vm->pt_root[id])
1425 err = xe_pt_create_scratch(xe, tile, vm);
1427 goto err_unlock_close;
1429 vm->batch_invalidate_tlb = true;
1432 if (flags & XE_VM_FLAG_COMPUTE_MODE) {
1433 INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1434 vm->flags |= XE_VM_FLAG_COMPUTE_MODE;
1435 vm->batch_invalidate_tlb = false;
1438 if (flags & XE_VM_FLAG_ASYNC_BIND_OPS) {
1439 vm->async_ops.fence.context = dma_fence_context_alloc(1);
1440 vm->flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
1443 /* Fill pt_root after allocating scratch tables */
1444 for_each_tile(tile, xe, id) {
1445 if (!vm->pt_root[id])
1448 xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
1450 dma_resv_unlock(xe_vm_resv(vm));
1452 /* Kernel migration VM shouldn't have a circular loop.. */
1453 if (!(flags & XE_VM_FLAG_MIGRATION)) {
1454 for_each_tile(tile, xe, id) {
1455 struct xe_gt *gt = tile->primary_gt;
1456 struct xe_vm *migrate_vm;
1457 struct xe_exec_queue *q;
1459 if (!vm->pt_root[id])
1462 migrate_vm = xe_migrate_get_vm(tile->migrate);
1463 q = xe_exec_queue_create_class(xe, gt, migrate_vm,
1464 XE_ENGINE_CLASS_COPY,
1465 EXEC_QUEUE_FLAG_VM);
1466 xe_vm_put(migrate_vm);
1476 if (number_tiles > 1)
1477 vm->composite_fence_ctx = dma_fence_context_alloc(1);
1479 mutex_lock(&xe->usm.lock);
1480 if (flags & XE_VM_FLAG_FAULT_MODE)
1481 xe->usm.num_vm_in_fault_mode++;
1482 else if (!(flags & XE_VM_FLAG_MIGRATION))
1483 xe->usm.num_vm_in_non_fault_mode++;
1484 mutex_unlock(&xe->usm.lock);
1486 trace_xe_vm_create(vm);
1491 dma_resv_unlock(xe_vm_resv(vm));
1493 xe_vm_close_and_put(vm);
1494 return ERR_PTR(err);
1497 for_each_tile(tile, xe, id)
1498 xe_range_fence_tree_fini(&vm->rftree[id]);
1500 if (!(flags & XE_VM_FLAG_MIGRATION))
1501 xe_device_mem_access_put(xe);
1502 return ERR_PTR(err);
1505 static void flush_async_ops(struct xe_vm *vm)
1507 queue_work(system_unbound_wq, &vm->async_ops.work);
1508 flush_work(&vm->async_ops.work);
1511 static void vm_error_capture(struct xe_vm *vm, int err,
1512 u32 op, u64 addr, u64 size)
1514 struct drm_xe_vm_bind_op_error_capture capture;
1515 u64 __user *address =
1516 u64_to_user_ptr(vm->async_ops.error_capture.addr);
1517 bool in_kthread = !current->mm;
1519 capture.error = err;
1521 capture.addr = addr;
1522 capture.size = size;
1525 if (!mmget_not_zero(vm->async_ops.error_capture.mm))
1527 kthread_use_mm(vm->async_ops.error_capture.mm);
1530 if (copy_to_user(address, &capture, sizeof(capture)))
1531 drm_warn(&vm->xe->drm, "Copy to user failed");
1534 kthread_unuse_mm(vm->async_ops.error_capture.mm);
1535 mmput(vm->async_ops.error_capture.mm);
1539 wake_up_all(&vm->async_ops.error_capture.wq);
1542 static void xe_vm_close(struct xe_vm *vm)
1544 down_write(&vm->lock);
1546 up_write(&vm->lock);
1549 void xe_vm_close_and_put(struct xe_vm *vm)
1551 LIST_HEAD(contested);
1552 struct xe_device *xe = vm->xe;
1553 struct xe_tile *tile;
1554 struct xe_vma *vma, *next_vma;
1555 struct drm_gpuva *gpuva, *next;
1558 xe_assert(xe, !vm->preempt.num_exec_queues);
1561 flush_async_ops(vm);
1562 if (xe_vm_in_compute_mode(vm))
1563 flush_work(&vm->preempt.rebind_work);
1565 for_each_tile(tile, xe, id) {
1567 xe_exec_queue_kill(vm->q[id]);
1568 xe_exec_queue_put(vm->q[id]);
1573 down_write(&vm->lock);
1574 xe_vm_lock(vm, false);
1575 drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) {
1576 vma = gpuva_to_vma(gpuva);
1578 if (xe_vma_has_no_bo(vma)) {
1579 down_read(&vm->userptr.notifier_lock);
1580 vma->gpuva.flags |= XE_VMA_DESTROYED;
1581 up_read(&vm->userptr.notifier_lock);
1584 xe_vm_remove_vma(vm, vma);
1586 /* easy case, remove from VMA? */
1587 if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
1588 list_del_init(&vma->combined_links.rebind);
1589 xe_vma_destroy(vma, NULL);
1593 list_move_tail(&vma->combined_links.destroy, &contested);
1594 vma->gpuva.flags |= XE_VMA_DESTROYED;
1598 * All vm operations will add shared fences to resv.
1599 * The only exception is eviction for a shared object,
1600 * but even so, the unbind when evicted would still
1601 * install a fence to resv. Hence it's safe to
1602 * destroy the pagetables immediately.
1604 for_each_tile(tile, xe, id) {
1605 if (vm->scratch_bo[id]) {
1608 xe_bo_unpin(vm->scratch_bo[id]);
1609 xe_bo_put(vm->scratch_bo[id]);
1610 for (i = 0; i < vm->pt_root[id]->level; i++)
1611 xe_pt_destroy(vm->scratch_pt[id][i], vm->flags,
1614 if (vm->pt_root[id]) {
1615 xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1616 vm->pt_root[id] = NULL;
1622 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1623 * Since we hold a refcount to the bo, we can remove and free
1624 * the members safely without locking.
1626 list_for_each_entry_safe(vma, next_vma, &contested,
1627 combined_links.destroy) {
1628 list_del_init(&vma->combined_links.destroy);
1629 xe_vma_destroy_unlocked(vma);
1632 if (vm->async_ops.error_capture.addr)
1633 wake_up_all(&vm->async_ops.error_capture.wq);
1635 xe_assert(xe, list_empty(&vm->extobj.list));
1636 up_write(&vm->lock);
1638 mutex_lock(&xe->usm.lock);
1639 if (vm->flags & XE_VM_FLAG_FAULT_MODE)
1640 xe->usm.num_vm_in_fault_mode--;
1641 else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1642 xe->usm.num_vm_in_non_fault_mode--;
1643 mutex_unlock(&xe->usm.lock);
1645 for_each_tile(tile, xe, id)
1646 xe_range_fence_tree_fini(&vm->rftree[id]);
1651 static void vm_destroy_work_func(struct work_struct *w)
1654 container_of(w, struct xe_vm, destroy_work);
1655 struct xe_device *xe = vm->xe;
1656 struct xe_tile *tile;
1660 /* xe_vm_close_and_put was not called? */
1661 xe_assert(xe, !vm->size);
1663 if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
1664 xe_device_mem_access_put(xe);
1666 if (xe->info.has_asid) {
1667 mutex_lock(&xe->usm.lock);
1668 lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1669 xe_assert(xe, lookup == vm);
1670 mutex_unlock(&xe->usm.lock);
1674 for_each_tile(tile, xe, id)
1675 XE_WARN_ON(vm->pt_root[id]);
1677 trace_xe_vm_free(vm);
1678 dma_fence_put(vm->rebind_fence);
1682 static void xe_vm_free(struct drm_gpuvm *gpuvm)
1684 struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
1686 /* To destroy the VM we need to be able to sleep */
1687 queue_work(system_unbound_wq, &vm->destroy_work);
1690 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1694 mutex_lock(&xef->vm.lock);
1695 vm = xa_load(&xef->vm.xa, id);
1698 mutex_unlock(&xef->vm.lock);
1703 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
1705 return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0,
1709 static struct dma_fence *
1710 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1711 struct xe_sync_entry *syncs, u32 num_syncs,
1712 bool first_op, bool last_op)
1714 struct xe_tile *tile;
1715 struct dma_fence *fence = NULL;
1716 struct dma_fence **fences = NULL;
1717 struct dma_fence_array *cf = NULL;
1718 struct xe_vm *vm = xe_vma_vm(vma);
1719 int cur_fence = 0, i;
1720 int number_tiles = hweight8(vma->tile_present);
1724 trace_xe_vma_unbind(vma);
1726 if (number_tiles > 1) {
1727 fences = kmalloc_array(number_tiles, sizeof(*fences),
1730 return ERR_PTR(-ENOMEM);
1733 for_each_tile(tile, vm->xe, id) {
1734 if (!(vma->tile_present & BIT(id)))
1737 fence = __xe_pt_unbind_vma(tile, vma, q, first_op ? syncs : NULL,
1738 first_op ? num_syncs : 0);
1739 if (IS_ERR(fence)) {
1740 err = PTR_ERR(fence);
1745 fences[cur_fence++] = fence;
1748 if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1749 q = list_next_entry(q, multi_gt_list);
1753 cf = dma_fence_array_create(number_tiles, fences,
1754 vm->composite_fence_ctx,
1755 vm->composite_fence_seqno++,
1758 --vm->composite_fence_seqno;
1765 for (i = 0; i < num_syncs; i++)
1766 xe_sync_entry_signal(&syncs[i], NULL,
1767 cf ? &cf->base : fence);
1770 return cf ? &cf->base : !fence ? dma_fence_get_stub() : fence;
1775 /* FIXME: Rewind the previous binds? */
1776 dma_fence_put(fences[--cur_fence]);
1781 return ERR_PTR(err);
1784 static struct dma_fence *
1785 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1786 struct xe_sync_entry *syncs, u32 num_syncs,
1787 bool first_op, bool last_op)
1789 struct xe_tile *tile;
1790 struct dma_fence *fence;
1791 struct dma_fence **fences = NULL;
1792 struct dma_fence_array *cf = NULL;
1793 struct xe_vm *vm = xe_vma_vm(vma);
1794 int cur_fence = 0, i;
1795 int number_tiles = hweight8(vma->tile_mask);
1799 trace_xe_vma_bind(vma);
1801 if (number_tiles > 1) {
1802 fences = kmalloc_array(number_tiles, sizeof(*fences),
1805 return ERR_PTR(-ENOMEM);
1808 for_each_tile(tile, vm->xe, id) {
1809 if (!(vma->tile_mask & BIT(id)))
1812 fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id],
1813 first_op ? syncs : NULL,
1814 first_op ? num_syncs : 0,
1815 vma->tile_present & BIT(id));
1816 if (IS_ERR(fence)) {
1817 err = PTR_ERR(fence);
1822 fences[cur_fence++] = fence;
1825 if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1826 q = list_next_entry(q, multi_gt_list);
1830 cf = dma_fence_array_create(number_tiles, fences,
1831 vm->composite_fence_ctx,
1832 vm->composite_fence_seqno++,
1835 --vm->composite_fence_seqno;
1842 for (i = 0; i < num_syncs; i++)
1843 xe_sync_entry_signal(&syncs[i], NULL,
1844 cf ? &cf->base : fence);
1847 return cf ? &cf->base : fence;
1852 /* FIXME: Rewind the previous binds? */
1853 dma_fence_put(fences[--cur_fence]);
1858 return ERR_PTR(err);
1861 struct async_op_fence {
1862 struct dma_fence fence;
1863 struct dma_fence *wait_fence;
1864 struct dma_fence_cb cb;
1866 wait_queue_head_t wq;
1870 static const char *async_op_fence_get_driver_name(struct dma_fence *dma_fence)
1876 async_op_fence_get_timeline_name(struct dma_fence *dma_fence)
1878 return "async_op_fence";
1881 static const struct dma_fence_ops async_op_fence_ops = {
1882 .get_driver_name = async_op_fence_get_driver_name,
1883 .get_timeline_name = async_op_fence_get_timeline_name,
1886 static void async_op_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1888 struct async_op_fence *afence =
1889 container_of(cb, struct async_op_fence, cb);
1891 afence->fence.error = afence->wait_fence->error;
1892 dma_fence_signal(&afence->fence);
1893 xe_vm_put(afence->vm);
1894 dma_fence_put(afence->wait_fence);
1895 dma_fence_put(&afence->fence);
1898 static void add_async_op_fence_cb(struct xe_vm *vm,
1899 struct dma_fence *fence,
1900 struct async_op_fence *afence)
1904 if (!xe_vm_no_dma_fences(vm)) {
1905 afence->started = true;
1907 wake_up_all(&afence->wq);
1910 afence->wait_fence = dma_fence_get(fence);
1911 afence->vm = xe_vm_get(vm);
1912 dma_fence_get(&afence->fence);
1913 ret = dma_fence_add_callback(fence, &afence->cb, async_op_fence_cb);
1914 if (ret == -ENOENT) {
1915 afence->fence.error = afence->wait_fence->error;
1916 dma_fence_signal(&afence->fence);
1920 dma_fence_put(afence->wait_fence);
1921 dma_fence_put(&afence->fence);
1923 XE_WARN_ON(ret && ret != -ENOENT);
1926 int xe_vm_async_fence_wait_start(struct dma_fence *fence)
1928 if (fence->ops == &async_op_fence_ops) {
1929 struct async_op_fence *afence =
1930 container_of(fence, struct async_op_fence, fence);
1932 xe_assert(afence->vm->xe, !xe_vm_no_dma_fences(afence->vm));
1935 return wait_event_interruptible(afence->wq, afence->started);
1941 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
1942 struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1943 u32 num_syncs, struct async_op_fence *afence,
1944 bool immediate, bool first_op, bool last_op)
1946 struct dma_fence *fence;
1948 xe_vm_assert_held(vm);
1951 fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
1954 return PTR_ERR(fence);
1958 xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
1960 fence = dma_fence_get_stub();
1962 for (i = 0; i < num_syncs; i++)
1963 xe_sync_entry_signal(&syncs[i], NULL, fence);
1967 add_async_op_fence_cb(vm, fence, afence);
1969 dma_fence_put(fence);
1973 static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
1974 struct xe_bo *bo, struct xe_sync_entry *syncs,
1975 u32 num_syncs, struct async_op_fence *afence,
1976 bool immediate, bool first_op, bool last_op)
1980 xe_vm_assert_held(vm);
1981 xe_bo_assert_held(bo);
1983 if (bo && immediate) {
1984 err = xe_bo_validate(bo, vm, true);
1989 return __xe_vm_bind(vm, vma, q, syncs, num_syncs, afence, immediate,
1993 static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
1994 struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1995 u32 num_syncs, struct async_op_fence *afence,
1996 bool first_op, bool last_op)
1998 struct dma_fence *fence;
2000 xe_vm_assert_held(vm);
2001 xe_bo_assert_held(xe_vma_bo(vma));
2003 fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
2005 return PTR_ERR(fence);
2007 add_async_op_fence_cb(vm, fence, afence);
2009 xe_vma_destroy(vma, fence);
2010 dma_fence_put(fence);
2015 static int vm_set_error_capture_address(struct xe_device *xe, struct xe_vm *vm,
2018 if (XE_IOCTL_DBG(xe, !value))
2021 if (XE_IOCTL_DBG(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
2024 if (XE_IOCTL_DBG(xe, vm->async_ops.error_capture.addr))
2027 vm->async_ops.error_capture.mm = current->mm;
2028 vm->async_ops.error_capture.addr = value;
2029 init_waitqueue_head(&vm->async_ops.error_capture.wq);
2034 typedef int (*xe_vm_set_property_fn)(struct xe_device *xe, struct xe_vm *vm,
2037 static const xe_vm_set_property_fn vm_set_property_funcs[] = {
2038 [XE_VM_PROPERTY_BIND_OP_ERROR_CAPTURE_ADDRESS] =
2039 vm_set_error_capture_address,
2042 static int vm_user_ext_set_property(struct xe_device *xe, struct xe_vm *vm,
2045 u64 __user *address = u64_to_user_ptr(extension);
2046 struct drm_xe_ext_vm_set_property ext;
2049 err = __copy_from_user(&ext, address, sizeof(ext));
2050 if (XE_IOCTL_DBG(xe, err))
2053 if (XE_IOCTL_DBG(xe, ext.property >=
2054 ARRAY_SIZE(vm_set_property_funcs)) ||
2055 XE_IOCTL_DBG(xe, ext.pad) ||
2056 XE_IOCTL_DBG(xe, ext.reserved[0] || ext.reserved[1]))
2059 return vm_set_property_funcs[ext.property](xe, vm, ext.value);
2062 typedef int (*xe_vm_user_extension_fn)(struct xe_device *xe, struct xe_vm *vm,
2065 static const xe_vm_set_property_fn vm_user_extension_funcs[] = {
2066 [XE_VM_EXTENSION_SET_PROPERTY] = vm_user_ext_set_property,
2069 #define MAX_USER_EXTENSIONS 16
2070 static int vm_user_extensions(struct xe_device *xe, struct xe_vm *vm,
2071 u64 extensions, int ext_number)
2073 u64 __user *address = u64_to_user_ptr(extensions);
2074 struct xe_user_extension ext;
2077 if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
2080 err = __copy_from_user(&ext, address, sizeof(ext));
2081 if (XE_IOCTL_DBG(xe, err))
2084 if (XE_IOCTL_DBG(xe, ext.pad) ||
2085 XE_IOCTL_DBG(xe, ext.name >=
2086 ARRAY_SIZE(vm_user_extension_funcs)))
2089 err = vm_user_extension_funcs[ext.name](xe, vm, extensions);
2090 if (XE_IOCTL_DBG(xe, err))
2093 if (ext.next_extension)
2094 return vm_user_extensions(xe, vm, ext.next_extension,
2100 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_SCRATCH_PAGE | \
2101 DRM_XE_VM_CREATE_COMPUTE_MODE | \
2102 DRM_XE_VM_CREATE_ASYNC_BIND_OPS | \
2103 DRM_XE_VM_CREATE_FAULT_MODE)
2105 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
2106 struct drm_file *file)
2108 struct xe_device *xe = to_xe_device(dev);
2109 struct xe_file *xef = to_xe_file(file);
2110 struct drm_xe_vm_create *args = data;
2111 struct xe_tile *tile;
2117 if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
2118 args->flags |= DRM_XE_VM_CREATE_SCRATCH_PAGE;
2120 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
2121 !xe->info.supports_usm))
2124 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2127 if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
2130 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE &&
2131 args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
2134 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE &&
2135 args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
2138 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
2139 xe_device_in_non_fault_mode(xe)))
2142 if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FAULT_MODE) &&
2143 xe_device_in_fault_mode(xe)))
2146 if (args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE)
2147 flags |= XE_VM_FLAG_SCRATCH_PAGE;
2148 if (args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE)
2149 flags |= XE_VM_FLAG_COMPUTE_MODE;
2150 if (args->flags & DRM_XE_VM_CREATE_ASYNC_BIND_OPS)
2151 flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
2152 if (args->flags & DRM_XE_VM_CREATE_FAULT_MODE)
2153 flags |= XE_VM_FLAG_FAULT_MODE;
2155 vm = xe_vm_create(xe, flags);
2159 if (args->extensions) {
2160 err = vm_user_extensions(xe, vm, args->extensions, 0);
2161 if (XE_IOCTL_DBG(xe, err)) {
2162 xe_vm_close_and_put(vm);
2167 mutex_lock(&xef->vm.lock);
2168 err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
2169 mutex_unlock(&xef->vm.lock);
2171 xe_vm_close_and_put(vm);
2175 if (xe->info.has_asid) {
2176 mutex_lock(&xe->usm.lock);
2177 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
2178 XA_LIMIT(0, XE_MAX_ASID - 1),
2179 &xe->usm.next_asid, GFP_KERNEL);
2180 mutex_unlock(&xe->usm.lock);
2182 xe_vm_close_and_put(vm);
2185 vm->usm.asid = asid;
2191 /* Record BO memory for VM pagetable created against client */
2192 for_each_tile(tile, xe, id)
2193 if (vm->pt_root[id])
2194 xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo);
2196 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
2197 /* Warning: Security issue - never enable by default */
2198 args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
2204 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
2205 struct drm_file *file)
2207 struct xe_device *xe = to_xe_device(dev);
2208 struct xe_file *xef = to_xe_file(file);
2209 struct drm_xe_vm_destroy *args = data;
2213 if (XE_IOCTL_DBG(xe, args->pad) ||
2214 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2217 mutex_lock(&xef->vm.lock);
2218 vm = xa_load(&xef->vm.xa, args->vm_id);
2219 if (XE_IOCTL_DBG(xe, !vm))
2221 else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues))
2224 xa_erase(&xef->vm.xa, args->vm_id);
2225 mutex_unlock(&xef->vm.lock);
2228 xe_vm_close_and_put(vm);
2233 static const u32 region_to_mem_type[] = {
2239 static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
2240 struct xe_exec_queue *q, u32 region,
2241 struct xe_sync_entry *syncs, u32 num_syncs,
2242 struct async_op_fence *afence, bool first_op,
2247 xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
2249 if (!xe_vma_has_no_bo(vma)) {
2250 err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
2255 if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
2256 return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
2257 afence, true, first_op, last_op);
2261 /* Nothing to do, signal fences now */
2263 for (i = 0; i < num_syncs; i++)
2264 xe_sync_entry_signal(&syncs[i], NULL,
2265 dma_fence_get_stub());
2268 dma_fence_signal(&afence->fence);
2273 #define VM_BIND_OP(op) (op & 0xffff)
2275 static void vm_set_async_error(struct xe_vm *vm, int err)
2277 lockdep_assert_held(&vm->lock);
2278 vm->async_ops.error = err;
2281 static int vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo,
2282 u64 addr, u64 range, u32 op)
2284 struct xe_device *xe = vm->xe;
2286 bool async = !!(op & XE_VM_BIND_FLAG_ASYNC);
2288 lockdep_assert_held(&vm->lock);
2290 switch (VM_BIND_OP(op)) {
2291 case XE_VM_BIND_OP_MAP:
2292 case XE_VM_BIND_OP_MAP_USERPTR:
2293 vma = xe_vm_find_overlapping_vma(vm, addr, range);
2294 if (XE_IOCTL_DBG(xe, vma && !async))
2297 case XE_VM_BIND_OP_UNMAP:
2298 case XE_VM_BIND_OP_PREFETCH:
2299 vma = xe_vm_find_overlapping_vma(vm, addr, range);
2300 if (XE_IOCTL_DBG(xe, !vma))
2301 /* Not an actual error, IOCTL cleans up returns and 0 */
2303 if (XE_IOCTL_DBG(xe, (xe_vma_start(vma) != addr ||
2304 xe_vma_end(vma) != addr + range) && !async))
2307 case XE_VM_BIND_OP_UNMAP_ALL:
2308 if (XE_IOCTL_DBG(xe, list_empty(&bo->ttm.base.gpuva.list)))
2309 /* Not an actual error, IOCTL cleans up returns and 0 */
2313 drm_warn(&xe->drm, "NOT POSSIBLE");
2320 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
2323 down_read(&vm->userptr.notifier_lock);
2324 vma->gpuva.flags |= XE_VMA_DESTROYED;
2325 up_read(&vm->userptr.notifier_lock);
2327 xe_vm_remove_vma(vm, vma);
2331 #define ULL unsigned long long
2333 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
2334 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2339 case DRM_GPUVA_OP_MAP:
2340 vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx",
2341 (ULL)op->map.va.addr, (ULL)op->map.va.range);
2343 case DRM_GPUVA_OP_REMAP:
2344 vma = gpuva_to_vma(op->remap.unmap->va);
2345 vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2346 (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2347 op->unmap.keep ? 1 : 0);
2350 "REMAP:PREV: addr=0x%016llx, range=0x%016llx",
2351 (ULL)op->remap.prev->va.addr,
2352 (ULL)op->remap.prev->va.range);
2355 "REMAP:NEXT: addr=0x%016llx, range=0x%016llx",
2356 (ULL)op->remap.next->va.addr,
2357 (ULL)op->remap.next->va.range);
2359 case DRM_GPUVA_OP_UNMAP:
2360 vma = gpuva_to_vma(op->unmap.va);
2361 vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2362 (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2363 op->unmap.keep ? 1 : 0);
2365 case DRM_GPUVA_OP_PREFETCH:
2366 vma = gpuva_to_vma(op->prefetch.va);
2367 vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx",
2368 (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
2371 drm_warn(&xe->drm, "NOT POSSIBLE");
2375 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2381 * Create operations list from IOCTL arguments, setup operations fields so parse
2382 * and commit steps are decoupled from IOCTL arguments. This step can fail.
2384 static struct drm_gpuva_ops *
2385 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
2386 u64 bo_offset_or_userptr, u64 addr, u64 range,
2387 u32 operation, u8 tile_mask, u32 region)
2389 struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
2390 struct drm_gpuva_ops *ops;
2391 struct drm_gpuva_op *__op;
2392 struct xe_vma_op *op;
2393 struct drm_gpuvm_bo *vm_bo;
2396 lockdep_assert_held_write(&vm->lock);
2398 vm_dbg(&vm->xe->drm,
2399 "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
2400 VM_BIND_OP(operation), (ULL)addr, (ULL)range,
2401 (ULL)bo_offset_or_userptr);
2403 switch (VM_BIND_OP(operation)) {
2404 case XE_VM_BIND_OP_MAP:
2405 case XE_VM_BIND_OP_MAP_USERPTR:
2406 ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
2407 obj, bo_offset_or_userptr);
2411 drm_gpuva_for_each_op(__op, ops) {
2412 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2414 op->tile_mask = tile_mask;
2416 operation & XE_VM_BIND_FLAG_IMMEDIATE;
2418 operation & XE_VM_BIND_FLAG_READONLY;
2419 op->map.is_null = operation & XE_VM_BIND_FLAG_NULL;
2422 case XE_VM_BIND_OP_UNMAP:
2423 ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
2427 drm_gpuva_for_each_op(__op, ops) {
2428 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2430 op->tile_mask = tile_mask;
2433 case XE_VM_BIND_OP_PREFETCH:
2434 ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
2438 drm_gpuva_for_each_op(__op, ops) {
2439 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2441 op->tile_mask = tile_mask;
2442 op->prefetch.region = region;
2445 case XE_VM_BIND_OP_UNMAP_ALL:
2446 xe_assert(vm->xe, bo);
2448 err = xe_bo_lock(bo, true);
2450 return ERR_PTR(err);
2452 vm_bo = drm_gpuvm_bo_find(&vm->gpuvm, obj);
2456 ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
2457 drm_gpuvm_bo_put(vm_bo);
2462 drm_gpuva_for_each_op(__op, ops) {
2463 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2465 op->tile_mask = tile_mask;
2469 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2470 ops = ERR_PTR(-EINVAL);
2473 #ifdef TEST_VM_ASYNC_OPS_ERROR
2474 if (operation & FORCE_ASYNC_OP_ERROR) {
2475 op = list_first_entry_or_null(&ops->list, struct xe_vma_op,
2478 op->inject_error = true;
2483 drm_gpuva_for_each_op(__op, ops)
2484 print_op(vm->xe, __op);
2489 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
2490 u8 tile_mask, bool read_only, bool is_null)
2492 struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
2496 lockdep_assert_held_write(&vm->lock);
2499 err = xe_bo_lock(bo, true);
2501 return ERR_PTR(err);
2503 vma = xe_vma_create(vm, bo, op->gem.offset,
2504 op->va.addr, op->va.addr +
2505 op->va.range - 1, read_only, is_null,
2510 if (xe_vma_is_userptr(vma)) {
2511 err = xe_vma_userptr_pin_pages(vma);
2513 prep_vma_destroy(vm, vma, false);
2514 xe_vma_destroy_unlocked(vma);
2515 return ERR_PTR(err);
2517 } else if (!xe_vma_has_no_bo(vma) && !bo->vm) {
2518 vm_insert_extobj(vm, vma);
2519 err = add_preempt_fences(vm, bo);
2521 prep_vma_destroy(vm, vma, false);
2522 xe_vma_destroy_unlocked(vma);
2523 return ERR_PTR(err);
2530 static u64 xe_vma_max_pte_size(struct xe_vma *vma)
2532 if (vma->gpuva.flags & XE_VMA_PTE_1G)
2534 else if (vma->gpuva.flags & XE_VMA_PTE_2M)
2540 static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
2544 vma->gpuva.flags |= XE_VMA_PTE_1G;
2547 vma->gpuva.flags |= XE_VMA_PTE_2M;
2554 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
2558 lockdep_assert_held_write(&vm->lock);
2560 switch (op->base.op) {
2561 case DRM_GPUVA_OP_MAP:
2562 err |= xe_vm_insert_vma(vm, op->map.vma);
2564 op->flags |= XE_VMA_OP_COMMITTED;
2566 case DRM_GPUVA_OP_REMAP:
2567 prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
2569 op->flags |= XE_VMA_OP_COMMITTED;
2571 if (op->remap.prev) {
2572 err |= xe_vm_insert_vma(vm, op->remap.prev);
2574 op->flags |= XE_VMA_OP_PREV_COMMITTED;
2575 if (!err && op->remap.skip_prev)
2576 op->remap.prev = NULL;
2578 if (op->remap.next) {
2579 err |= xe_vm_insert_vma(vm, op->remap.next);
2581 op->flags |= XE_VMA_OP_NEXT_COMMITTED;
2582 if (!err && op->remap.skip_next)
2583 op->remap.next = NULL;
2586 /* Adjust for partial unbind after removin VMA from VM */
2588 op->base.remap.unmap->va->va.addr = op->remap.start;
2589 op->base.remap.unmap->va->va.range = op->remap.range;
2592 case DRM_GPUVA_OP_UNMAP:
2593 prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
2594 op->flags |= XE_VMA_OP_COMMITTED;
2596 case DRM_GPUVA_OP_PREFETCH:
2597 op->flags |= XE_VMA_OP_COMMITTED;
2600 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2607 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
2608 struct drm_gpuva_ops *ops,
2609 struct xe_sync_entry *syncs, u32 num_syncs,
2610 struct list_head *ops_list, bool last,
2613 struct xe_vma_op *last_op = NULL;
2614 struct async_op_fence *fence = NULL;
2615 struct drm_gpuva_op *__op;
2618 lockdep_assert_held_write(&vm->lock);
2620 if (last && num_syncs && async) {
2623 fence = kmalloc(sizeof(*fence), GFP_KERNEL);
2627 seqno = q ? ++q->bind.fence_seqno : ++vm->async_ops.fence.seqno;
2628 dma_fence_init(&fence->fence, &async_op_fence_ops,
2629 &vm->async_ops.lock, q ? q->bind.fence_ctx :
2630 vm->async_ops.fence.context, seqno);
2632 if (!xe_vm_no_dma_fences(vm)) {
2634 fence->started = false;
2635 init_waitqueue_head(&fence->wq);
2639 drm_gpuva_for_each_op(__op, ops) {
2640 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2641 bool first = list_empty(ops_list);
2643 xe_assert(vm->xe, first || async);
2645 INIT_LIST_HEAD(&op->link);
2646 list_add_tail(&op->link, ops_list);
2649 op->flags |= XE_VMA_OP_FIRST;
2650 op->num_syncs = num_syncs;
2656 switch (op->base.op) {
2657 case DRM_GPUVA_OP_MAP:
2661 vma = new_vma(vm, &op->base.map,
2662 op->tile_mask, op->map.read_only,
2672 case DRM_GPUVA_OP_REMAP:
2674 struct xe_vma *old =
2675 gpuva_to_vma(op->base.remap.unmap->va);
2677 op->remap.start = xe_vma_start(old);
2678 op->remap.range = xe_vma_size(old);
2680 if (op->base.remap.prev) {
2683 op->base.remap.unmap->va->flags &
2686 op->base.remap.unmap->va->flags &
2689 vma = new_vma(vm, op->base.remap.prev,
2690 op->tile_mask, read_only,
2697 op->remap.prev = vma;
2700 * Userptr creates a new SG mapping so
2701 * we must also rebind.
2703 op->remap.skip_prev = !xe_vma_is_userptr(old) &&
2704 IS_ALIGNED(xe_vma_end(vma),
2705 xe_vma_max_pte_size(old));
2706 if (op->remap.skip_prev) {
2707 xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2711 op->remap.start = xe_vma_end(vma);
2715 if (op->base.remap.next) {
2718 op->base.remap.unmap->va->flags &
2722 op->base.remap.unmap->va->flags &
2725 vma = new_vma(vm, op->base.remap.next,
2726 op->tile_mask, read_only,
2733 op->remap.next = vma;
2736 * Userptr creates a new SG mapping so
2737 * we must also rebind.
2739 op->remap.skip_next = !xe_vma_is_userptr(old) &&
2740 IS_ALIGNED(xe_vma_start(vma),
2741 xe_vma_max_pte_size(old));
2742 if (op->remap.skip_next) {
2743 xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2751 case DRM_GPUVA_OP_UNMAP:
2752 case DRM_GPUVA_OP_PREFETCH:
2756 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2761 err = xe_vma_op_commit(vm, op);
2766 /* FIXME: Unhandled corner case */
2767 XE_WARN_ON(!last_op && last && !list_empty(ops_list));
2773 last_op->flags |= XE_VMA_OP_LAST;
2774 last_op->num_syncs = num_syncs;
2775 last_op->syncs = syncs;
2776 last_op->fence = fence;
2786 static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
2787 struct xe_vma *vma, struct xe_vma_op *op)
2791 lockdep_assert_held_write(&vm->lock);
2793 err = xe_vm_prepare_vma(exec, vma, 1);
2797 xe_vm_assert_held(vm);
2798 xe_bo_assert_held(xe_vma_bo(vma));
2800 switch (op->base.op) {
2801 case DRM_GPUVA_OP_MAP:
2802 err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
2803 op->syncs, op->num_syncs, op->fence,
2804 op->map.immediate || !xe_vm_in_fault_mode(vm),
2805 op->flags & XE_VMA_OP_FIRST,
2806 op->flags & XE_VMA_OP_LAST);
2808 case DRM_GPUVA_OP_REMAP:
2810 bool prev = !!op->remap.prev;
2811 bool next = !!op->remap.next;
2813 if (!op->remap.unmap_done) {
2815 vm->async_ops.munmap_rebind_inflight = true;
2816 vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
2818 err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2820 !prev && !next ? op->fence : NULL,
2821 op->flags & XE_VMA_OP_FIRST,
2822 op->flags & XE_VMA_OP_LAST && !prev &&
2826 op->remap.unmap_done = true;
2830 op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
2831 err = xe_vm_bind(vm, op->remap.prev, op->q,
2832 xe_vma_bo(op->remap.prev), op->syncs,
2834 !next ? op->fence : NULL, true, false,
2835 op->flags & XE_VMA_OP_LAST && !next);
2836 op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2839 op->remap.prev = NULL;
2843 op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
2844 err = xe_vm_bind(vm, op->remap.next, op->q,
2845 xe_vma_bo(op->remap.next),
2846 op->syncs, op->num_syncs,
2847 op->fence, true, false,
2848 op->flags & XE_VMA_OP_LAST);
2849 op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2852 op->remap.next = NULL;
2854 vm->async_ops.munmap_rebind_inflight = false;
2858 case DRM_GPUVA_OP_UNMAP:
2859 err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2860 op->num_syncs, op->fence,
2861 op->flags & XE_VMA_OP_FIRST,
2862 op->flags & XE_VMA_OP_LAST);
2864 case DRM_GPUVA_OP_PREFETCH:
2865 err = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region,
2866 op->syncs, op->num_syncs, op->fence,
2867 op->flags & XE_VMA_OP_FIRST,
2868 op->flags & XE_VMA_OP_LAST);
2871 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2875 trace_xe_vma_fail(vma);
2880 static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
2881 struct xe_vma_op *op)
2883 struct drm_exec exec;
2887 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
2888 drm_exec_until_all_locked(&exec) {
2889 err = op_execute(&exec, vm, vma, op);
2890 drm_exec_retry_on_contention(&exec);
2894 drm_exec_fini(&exec);
2896 if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
2897 lockdep_assert_held_write(&vm->lock);
2898 err = xe_vma_userptr_pin_pages(vma);
2902 trace_xe_vma_fail(vma);
2908 static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
2912 lockdep_assert_held_write(&vm->lock);
2914 #ifdef TEST_VM_ASYNC_OPS_ERROR
2915 if (op->inject_error) {
2916 op->inject_error = false;
2921 switch (op->base.op) {
2922 case DRM_GPUVA_OP_MAP:
2923 ret = __xe_vma_op_execute(vm, op->map.vma, op);
2925 case DRM_GPUVA_OP_REMAP:
2929 if (!op->remap.unmap_done)
2930 vma = gpuva_to_vma(op->base.remap.unmap->va);
2931 else if (op->remap.prev)
2932 vma = op->remap.prev;
2934 vma = op->remap.next;
2936 ret = __xe_vma_op_execute(vm, vma, op);
2939 case DRM_GPUVA_OP_UNMAP:
2940 ret = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
2943 case DRM_GPUVA_OP_PREFETCH:
2944 ret = __xe_vma_op_execute(vm,
2945 gpuva_to_vma(op->base.prefetch.va),
2949 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2955 static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op)
2957 bool last = op->flags & XE_VMA_OP_LAST;
2960 while (op->num_syncs--)
2961 xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
2964 xe_exec_queue_put(op->q);
2966 dma_fence_put(&op->fence->fence);
2968 if (!list_empty(&op->link)) {
2969 spin_lock_irq(&vm->async_ops.lock);
2970 list_del(&op->link);
2971 spin_unlock_irq(&vm->async_ops.lock);
2974 drm_gpuva_ops_free(&vm->gpuvm, op->ops);
2979 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
2980 bool post_commit, bool prev_post_commit,
2981 bool next_post_commit)
2983 lockdep_assert_held_write(&vm->lock);
2985 switch (op->base.op) {
2986 case DRM_GPUVA_OP_MAP:
2988 prep_vma_destroy(vm, op->map.vma, post_commit);
2989 xe_vma_destroy_unlocked(op->map.vma);
2992 case DRM_GPUVA_OP_UNMAP:
2994 struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2997 down_read(&vm->userptr.notifier_lock);
2998 vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2999 up_read(&vm->userptr.notifier_lock);
3001 xe_vm_insert_vma(vm, vma);
3005 case DRM_GPUVA_OP_REMAP:
3007 struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
3009 if (op->remap.prev) {
3010 prep_vma_destroy(vm, op->remap.prev, prev_post_commit);
3011 xe_vma_destroy_unlocked(op->remap.prev);
3013 if (op->remap.next) {
3014 prep_vma_destroy(vm, op->remap.next, next_post_commit);
3015 xe_vma_destroy_unlocked(op->remap.next);
3018 down_read(&vm->userptr.notifier_lock);
3019 vma->gpuva.flags &= ~XE_VMA_DESTROYED;
3020 up_read(&vm->userptr.notifier_lock);
3022 xe_vm_insert_vma(vm, vma);
3026 case DRM_GPUVA_OP_PREFETCH:
3030 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
3034 static struct xe_vma_op *next_vma_op(struct xe_vm *vm)
3036 return list_first_entry_or_null(&vm->async_ops.pending,
3037 struct xe_vma_op, link);
3040 static void xe_vma_op_work_func(struct work_struct *w)
3042 struct xe_vm *vm = container_of(w, struct xe_vm, async_ops.work);
3045 struct xe_vma_op *op;
3048 if (vm->async_ops.error && !xe_vm_is_closed(vm))
3051 spin_lock_irq(&vm->async_ops.lock);
3052 op = next_vma_op(vm);
3053 spin_unlock_irq(&vm->async_ops.lock);
3058 if (!xe_vm_is_closed(vm)) {
3059 down_write(&vm->lock);
3060 err = xe_vma_op_execute(vm, op);
3062 drm_warn(&vm->xe->drm,
3063 "Async VM op(%d) failed with %d",
3065 vm_set_async_error(vm, err);
3066 up_write(&vm->lock);
3068 if (vm->async_ops.error_capture.addr)
3069 vm_error_capture(vm, err, 0, 0, 0);
3072 up_write(&vm->lock);
3076 switch (op->base.op) {
3077 case DRM_GPUVA_OP_REMAP:
3078 vma = gpuva_to_vma(op->base.remap.unmap->va);
3079 trace_xe_vma_flush(vma);
3081 down_write(&vm->lock);
3082 xe_vma_destroy_unlocked(vma);
3083 up_write(&vm->lock);
3085 case DRM_GPUVA_OP_UNMAP:
3086 vma = gpuva_to_vma(op->base.unmap.va);
3087 trace_xe_vma_flush(vma);
3089 down_write(&vm->lock);
3090 xe_vma_destroy_unlocked(vma);
3091 up_write(&vm->lock);
3098 if (op->fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
3099 &op->fence->fence.flags)) {
3100 if (!xe_vm_no_dma_fences(vm)) {
3101 op->fence->started = true;
3102 wake_up_all(&op->fence->wq);
3104 dma_fence_signal(&op->fence->fence);
3108 xe_vma_op_cleanup(vm, op);
3112 static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
3113 struct list_head *ops_list, bool async)
3115 struct xe_vma_op *op, *last_op, *next;
3118 lockdep_assert_held_write(&vm->lock);
3120 list_for_each_entry(op, ops_list, link)
3124 err = xe_vma_op_execute(vm, last_op);
3127 xe_vma_op_cleanup(vm, last_op);
3130 bool installed = false;
3132 for (i = 0; i < last_op->num_syncs; i++)
3133 installed |= xe_sync_entry_signal(&last_op->syncs[i],
3135 &last_op->fence->fence);
3136 if (!installed && last_op->fence)
3137 dma_fence_signal(&last_op->fence->fence);
3139 spin_lock_irq(&vm->async_ops.lock);
3140 list_splice_tail(ops_list, &vm->async_ops.pending);
3141 spin_unlock_irq(&vm->async_ops.lock);
3143 if (!vm->async_ops.error)
3144 queue_work(system_unbound_wq, &vm->async_ops.work);
3150 list_for_each_entry_reverse(op, ops_list, link)
3151 xe_vma_op_unwind(vm, op, op->flags & XE_VMA_OP_COMMITTED,
3152 op->flags & XE_VMA_OP_PREV_COMMITTED,
3153 op->flags & XE_VMA_OP_NEXT_COMMITTED);
3154 list_for_each_entry_safe(op, next, ops_list, link)
3155 xe_vma_op_cleanup(vm, op);
3160 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
3161 struct drm_gpuva_ops **ops,
3166 for (i = num_ops_list - 1; i; ++i) {
3167 struct drm_gpuva_ops *__ops = ops[i];
3168 struct drm_gpuva_op *__op;
3173 drm_gpuva_for_each_op_reverse(__op, __ops) {
3174 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
3176 xe_vma_op_unwind(vm, op,
3177 op->flags & XE_VMA_OP_COMMITTED,
3178 op->flags & XE_VMA_OP_PREV_COMMITTED,
3179 op->flags & XE_VMA_OP_NEXT_COMMITTED);
3182 drm_gpuva_ops_free(&vm->gpuvm, __ops);
3186 #ifdef TEST_VM_ASYNC_OPS_ERROR
3187 #define SUPPORTED_FLAGS \
3188 (FORCE_ASYNC_OP_ERROR | XE_VM_BIND_FLAG_ASYNC | \
3189 XE_VM_BIND_FLAG_READONLY | XE_VM_BIND_FLAG_IMMEDIATE | \
3190 XE_VM_BIND_FLAG_NULL | 0xffff)
3192 #define SUPPORTED_FLAGS \
3193 (XE_VM_BIND_FLAG_ASYNC | XE_VM_BIND_FLAG_READONLY | \
3194 XE_VM_BIND_FLAG_IMMEDIATE | XE_VM_BIND_FLAG_NULL | 0xffff)
3196 #define XE_64K_PAGE_MASK 0xffffull
3198 #define MAX_BINDS 512 /* FIXME: Picking random upper limit */
3200 static int vm_bind_ioctl_check_args(struct xe_device *xe,
3201 struct drm_xe_vm_bind *args,
3202 struct drm_xe_vm_bind_op **bind_ops,
3208 if (XE_IOCTL_DBG(xe, args->extensions) ||
3209 XE_IOCTL_DBG(xe, !args->num_binds) ||
3210 XE_IOCTL_DBG(xe, args->num_binds > MAX_BINDS))
3213 if (args->num_binds > 1) {
3214 u64 __user *bind_user =
3215 u64_to_user_ptr(args->vector_of_binds);
3217 *bind_ops = kmalloc(sizeof(struct drm_xe_vm_bind_op) *
3218 args->num_binds, GFP_KERNEL);
3222 err = __copy_from_user(*bind_ops, bind_user,
3223 sizeof(struct drm_xe_vm_bind_op) *
3225 if (XE_IOCTL_DBG(xe, err)) {
3230 *bind_ops = &args->bind;
3233 for (i = 0; i < args->num_binds; ++i) {
3234 u64 range = (*bind_ops)[i].range;
3235 u64 addr = (*bind_ops)[i].addr;
3236 u32 op = (*bind_ops)[i].op;
3237 u32 obj = (*bind_ops)[i].obj;
3238 u64 obj_offset = (*bind_ops)[i].obj_offset;
3239 u32 region = (*bind_ops)[i].region;
3240 bool is_null = op & XE_VM_BIND_FLAG_NULL;
3243 *async = !!(op & XE_VM_BIND_FLAG_ASYNC);
3244 } else if (XE_IOCTL_DBG(xe, !*async) ||
3245 XE_IOCTL_DBG(xe, !(op & XE_VM_BIND_FLAG_ASYNC)) ||
3246 XE_IOCTL_DBG(xe, VM_BIND_OP(op) ==
3247 XE_VM_BIND_OP_RESTART)) {
3252 if (XE_IOCTL_DBG(xe, !*async &&
3253 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL)) {
3258 if (XE_IOCTL_DBG(xe, !*async &&
3259 VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH)) {
3264 if (XE_IOCTL_DBG(xe, VM_BIND_OP(op) >
3265 XE_VM_BIND_OP_PREFETCH) ||
3266 XE_IOCTL_DBG(xe, op & ~SUPPORTED_FLAGS) ||
3267 XE_IOCTL_DBG(xe, obj && is_null) ||
3268 XE_IOCTL_DBG(xe, obj_offset && is_null) ||
3269 XE_IOCTL_DBG(xe, VM_BIND_OP(op) != XE_VM_BIND_OP_MAP &&
3271 XE_IOCTL_DBG(xe, !obj &&
3272 VM_BIND_OP(op) == XE_VM_BIND_OP_MAP &&
3274 XE_IOCTL_DBG(xe, !obj &&
3275 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
3276 XE_IOCTL_DBG(xe, addr &&
3277 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
3278 XE_IOCTL_DBG(xe, range &&
3279 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
3280 XE_IOCTL_DBG(xe, obj &&
3281 VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR) ||
3282 XE_IOCTL_DBG(xe, obj &&
3283 VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH) ||
3284 XE_IOCTL_DBG(xe, region &&
3285 VM_BIND_OP(op) != XE_VM_BIND_OP_PREFETCH) ||
3286 XE_IOCTL_DBG(xe, !(BIT(region) &
3287 xe->info.mem_region_mask)) ||
3288 XE_IOCTL_DBG(xe, obj &&
3289 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP)) {
3294 if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
3295 XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
3296 XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
3297 XE_IOCTL_DBG(xe, !range && VM_BIND_OP(op) !=
3298 XE_VM_BIND_OP_RESTART &&
3299 VM_BIND_OP(op) != XE_VM_BIND_OP_UNMAP_ALL)) {
3308 if (args->num_binds > 1)
3313 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3315 struct xe_device *xe = to_xe_device(dev);
3316 struct xe_file *xef = to_xe_file(file);
3317 struct drm_xe_vm_bind *args = data;
3318 struct drm_xe_sync __user *syncs_user;
3319 struct xe_bo **bos = NULL;
3320 struct drm_gpuva_ops **ops = NULL;
3322 struct xe_exec_queue *q = NULL;
3324 struct xe_sync_entry *syncs = NULL;
3325 struct drm_xe_vm_bind_op *bind_ops;
3326 LIST_HEAD(ops_list);
3331 err = vm_bind_ioctl_check_args(xe, args, &bind_ops, &async);
3335 if (args->exec_queue_id) {
3336 q = xe_exec_queue_lookup(xef, args->exec_queue_id);
3337 if (XE_IOCTL_DBG(xe, !q)) {
3342 if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
3344 goto put_exec_queue;
3348 vm = xe_vm_lookup(xef, args->vm_id);
3349 if (XE_IOCTL_DBG(xe, !vm)) {
3351 goto put_exec_queue;
3354 err = down_write_killable(&vm->lock);
3358 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
3360 goto release_vm_lock;
3363 if (VM_BIND_OP(bind_ops[0].op) == XE_VM_BIND_OP_RESTART) {
3364 if (XE_IOCTL_DBG(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
3366 if (XE_IOCTL_DBG(xe, !err && args->num_syncs))
3368 if (XE_IOCTL_DBG(xe, !err && !vm->async_ops.error))
3372 trace_xe_vm_restart(vm);
3373 vm_set_async_error(vm, 0);
3375 queue_work(system_unbound_wq, &vm->async_ops.work);
3377 /* Rebinds may have been blocked, give worker a kick */
3378 if (xe_vm_in_compute_mode(vm))
3379 xe_vm_queue_rebind_worker(vm);
3382 goto release_vm_lock;
3385 if (XE_IOCTL_DBG(xe, !vm->async_ops.error &&
3386 async != !!(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS))) {
3388 goto release_vm_lock;
3391 for (i = 0; i < args->num_binds; ++i) {
3392 u64 range = bind_ops[i].range;
3393 u64 addr = bind_ops[i].addr;
3395 if (XE_IOCTL_DBG(xe, range > vm->size) ||
3396 XE_IOCTL_DBG(xe, addr > vm->size - range)) {
3398 goto release_vm_lock;
3401 if (bind_ops[i].tile_mask) {
3402 u64 valid_tiles = BIT(xe->info.tile_count) - 1;
3404 if (XE_IOCTL_DBG(xe, bind_ops[i].tile_mask &
3407 goto release_vm_lock;
3412 bos = kzalloc(sizeof(*bos) * args->num_binds, GFP_KERNEL);
3415 goto release_vm_lock;
3418 ops = kzalloc(sizeof(*ops) * args->num_binds, GFP_KERNEL);
3421 goto release_vm_lock;
3424 for (i = 0; i < args->num_binds; ++i) {
3425 struct drm_gem_object *gem_obj;
3426 u64 range = bind_ops[i].range;
3427 u64 addr = bind_ops[i].addr;
3428 u32 obj = bind_ops[i].obj;
3429 u64 obj_offset = bind_ops[i].obj_offset;
3434 gem_obj = drm_gem_object_lookup(file, obj);
3435 if (XE_IOCTL_DBG(xe, !gem_obj)) {
3439 bos[i] = gem_to_xe_bo(gem_obj);
3441 if (XE_IOCTL_DBG(xe, range > bos[i]->size) ||
3442 XE_IOCTL_DBG(xe, obj_offset >
3443 bos[i]->size - range)) {
3448 if (bos[i]->flags & XE_BO_INTERNAL_64K) {
3449 if (XE_IOCTL_DBG(xe, obj_offset &
3450 XE_64K_PAGE_MASK) ||
3451 XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
3452 XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
3459 if (args->num_syncs) {
3460 syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3467 syncs_user = u64_to_user_ptr(args->syncs);
3468 for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3469 err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3470 &syncs_user[num_syncs], false,
3471 xe_vm_no_dma_fences(vm));
3476 /* Do some error checking first to make the unwind easier */
3477 for (i = 0; i < args->num_binds; ++i) {
3478 u64 range = bind_ops[i].range;
3479 u64 addr = bind_ops[i].addr;
3480 u32 op = bind_ops[i].op;
3482 err = vm_bind_ioctl_lookup_vma(vm, bos[i], addr, range, op);
3487 for (i = 0; i < args->num_binds; ++i) {
3488 u64 range = bind_ops[i].range;
3489 u64 addr = bind_ops[i].addr;
3490 u32 op = bind_ops[i].op;
3491 u64 obj_offset = bind_ops[i].obj_offset;
3492 u8 tile_mask = bind_ops[i].tile_mask;
3493 u32 region = bind_ops[i].region;
3495 ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
3496 addr, range, op, tile_mask,
3498 if (IS_ERR(ops[i])) {
3499 err = PTR_ERR(ops[i]);
3504 err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
3506 i == args->num_binds - 1,
3513 if (list_empty(&ops_list)) {
3518 err = vm_bind_ioctl_ops_execute(vm, &ops_list, async);
3519 up_write(&vm->lock);
3521 for (i = 0; i < args->num_binds; ++i)
3526 if (args->num_binds > 1)
3532 vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
3534 for (i = 0; err == -ENODATA && i < num_syncs; i++)
3535 xe_sync_entry_signal(&syncs[i], NULL, dma_fence_get_stub());
3537 xe_sync_entry_cleanup(&syncs[num_syncs]);
3541 for (i = 0; i < args->num_binds; ++i)
3544 up_write(&vm->lock);
3549 xe_exec_queue_put(q);
3553 if (args->num_binds > 1)
3555 return err == -ENODATA ? 0 : err;
3559 * xe_vm_lock() - Lock the vm's dma_resv object
3560 * @vm: The struct xe_vm whose lock is to be locked
3561 * @intr: Whether to perform any wait interruptible
3563 * Return: 0 on success, -EINTR if @intr is true and the wait for a
3564 * contended lock was interrupted. If @intr is false, the function
3567 int xe_vm_lock(struct xe_vm *vm, bool intr)
3570 return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
3572 return dma_resv_lock(xe_vm_resv(vm), NULL);
3576 * xe_vm_unlock() - Unlock the vm's dma_resv object
3577 * @vm: The struct xe_vm whose lock is to be released.
3579 * Unlock a buffer object lock that was locked by xe_vm_lock().
3581 void xe_vm_unlock(struct xe_vm *vm)
3583 dma_resv_unlock(xe_vm_resv(vm));
3587 * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3588 * @vma: VMA to invalidate
3590 * Walks a list of page tables leaves which it memset the entries owned by this
3591 * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3594 * Returns 0 for success, negative error code otherwise.
3596 int xe_vm_invalidate_vma(struct xe_vma *vma)
3598 struct xe_device *xe = xe_vma_vm(vma)->xe;
3599 struct xe_tile *tile;
3600 u32 tile_needs_invalidate = 0;
3601 int seqno[XE_MAX_TILES_PER_DEVICE];
3605 xe_assert(xe, xe_vm_in_fault_mode(xe_vma_vm(vma)));
3606 xe_assert(xe, !xe_vma_is_null(vma));
3607 trace_xe_vma_usm_invalidate(vma);
3609 /* Check that we don't race with page-table updates */
3610 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3611 if (xe_vma_is_userptr(vma)) {
3612 WARN_ON_ONCE(!mmu_interval_check_retry
3613 (&vma->userptr.notifier,
3614 vma->userptr.notifier_seq));
3615 WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
3616 DMA_RESV_USAGE_BOOKKEEP));
3619 xe_bo_assert_held(xe_vma_bo(vma));
3623 for_each_tile(tile, xe, id) {
3624 if (xe_pt_zap_ptes(tile, vma)) {
3625 tile_needs_invalidate |= BIT(id);
3628 * FIXME: We potentially need to invalidate multiple
3629 * GTs within the tile
3631 seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
3637 for_each_tile(tile, xe, id) {
3638 if (tile_needs_invalidate & BIT(id)) {
3639 ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
3645 vma->usm.tile_invalidated = vma->tile_mask;
3650 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
3652 struct drm_gpuva *gpuva;
3656 if (!down_read_trylock(&vm->lock)) {
3657 drm_printf(p, " Failed to acquire VM lock to dump capture");
3660 if (vm->pt_root[gt_id]) {
3661 addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE);
3662 is_vram = xe_bo_is_vram(vm->pt_root[gt_id]->bo);
3663 drm_printf(p, " VM root: A:0x%llx %s\n", addr,
3664 is_vram ? "VRAM" : "SYS");
3667 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3668 struct xe_vma *vma = gpuva_to_vma(gpuva);
3669 bool is_userptr = xe_vma_is_userptr(vma);
3670 bool is_null = xe_vma_is_null(vma);
3674 } else if (is_userptr) {
3675 struct xe_res_cursor cur;
3677 if (vma->userptr.sg) {
3678 xe_res_first_sg(vma->userptr.sg, 0, XE_PAGE_SIZE,
3680 addr = xe_res_dma(&cur);
3685 addr = __xe_bo_addr(xe_vma_bo(vma), 0, XE_PAGE_SIZE);
3686 is_vram = xe_bo_is_vram(xe_vma_bo(vma));
3688 drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
3689 xe_vma_start(vma), xe_vma_end(vma) - 1,
3691 addr, is_null ? "NULL" : is_userptr ? "USR" :
3692 is_vram ? "VRAM" : "SYS");