1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021 Intel Corporation
8 #include <linux/dma-fence-array.h>
10 #include <drm/drm_exec.h>
11 #include <drm/drm_print.h>
12 #include <drm/ttm/ttm_execbuf_util.h>
13 #include <drm/ttm/ttm_tt.h>
14 #include <drm/xe_drm.h>
15 #include <linux/delay.h>
16 #include <linux/kthread.h>
18 #include <linux/swap.h>
20 #include "xe_assert.h"
22 #include "xe_device.h"
23 #include "xe_drm_client.h"
24 #include "xe_exec_queue.h"
26 #include "xe_gt_pagefault.h"
27 #include "xe_gt_tlb_invalidation.h"
28 #include "xe_migrate.h"
30 #include "xe_preempt_fence.h"
32 #include "xe_res_cursor.h"
35 #include "generated/xe_wa_oob.h"
38 #define TEST_VM_ASYNC_OPS_ERROR
40 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
42 return vm->gpuvm.r_obj;
46 * xe_vma_userptr_check_repin() - Advisory check for repin needed
47 * @vma: The userptr vma
49 * Check if the userptr vma has been invalidated since last successful
50 * repin. The check is advisory only and can the function can be called
51 * without the vm->userptr.notifier_lock held. There is no guarantee that the
52 * vma userptr will remain valid after a lockless check, so typically
53 * the call needs to be followed by a proper check under the notifier_lock.
55 * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
57 int xe_vma_userptr_check_repin(struct xe_vma *vma)
59 return mmu_interval_check_retry(&vma->userptr.notifier,
60 vma->userptr.notifier_seq) ?
64 int xe_vma_userptr_pin_pages(struct xe_vma *vma)
66 struct xe_vm *vm = xe_vma_vm(vma);
67 struct xe_device *xe = vm->xe;
68 const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT;
70 bool in_kthread = !current->mm;
71 unsigned long notifier_seq;
73 bool read_only = xe_vma_read_only(vma);
75 lockdep_assert_held(&vm->lock);
76 xe_assert(xe, xe_vma_is_userptr(vma));
78 if (vma->gpuva.flags & XE_VMA_DESTROYED)
81 notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier);
82 if (notifier_seq == vma->userptr.notifier_seq)
85 pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
89 if (vma->userptr.sg) {
90 dma_unmap_sgtable(xe->drm.dev,
92 read_only ? DMA_TO_DEVICE :
93 DMA_BIDIRECTIONAL, 0);
94 sg_free_table(vma->userptr.sg);
95 vma->userptr.sg = NULL;
100 if (!mmget_not_zero(vma->userptr.notifier.mm)) {
104 kthread_use_mm(vma->userptr.notifier.mm);
107 while (pinned < num_pages) {
108 ret = get_user_pages_fast(xe_vma_userptr(vma) +
111 read_only ? 0 : FOLL_WRITE,
124 kthread_unuse_mm(vma->userptr.notifier.mm);
125 mmput(vma->userptr.notifier.mm);
131 ret = sg_alloc_table_from_pages_segment(&vma->userptr.sgt, pages,
133 (u64)pinned << PAGE_SHIFT,
134 xe_sg_segment_size(xe->drm.dev),
137 vma->userptr.sg = NULL;
140 vma->userptr.sg = &vma->userptr.sgt;
142 ret = dma_map_sgtable(xe->drm.dev, vma->userptr.sg,
143 read_only ? DMA_TO_DEVICE :
145 DMA_ATTR_SKIP_CPU_SYNC |
146 DMA_ATTR_NO_KERNEL_MAPPING);
148 sg_free_table(vma->userptr.sg);
149 vma->userptr.sg = NULL;
153 for (i = 0; i < pinned; ++i) {
156 set_page_dirty(pages[i]);
157 unlock_page(pages[i]);
160 mark_page_accessed(pages[i]);
164 release_pages(pages, pinned);
168 vma->userptr.notifier_seq = notifier_seq;
169 if (xe_vma_userptr_check_repin(vma) == -EAGAIN)
173 return ret < 0 ? ret : 0;
176 static bool preempt_fences_waiting(struct xe_vm *vm)
178 struct xe_exec_queue *q;
180 lockdep_assert_held(&vm->lock);
181 xe_vm_assert_held(vm);
183 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
184 if (!q->compute.pfence ||
185 (q->compute.pfence && test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
186 &q->compute.pfence->flags))) {
194 static void free_preempt_fences(struct list_head *list)
196 struct list_head *link, *next;
198 list_for_each_safe(link, next, list)
199 xe_preempt_fence_free(to_preempt_fence_from_link(link));
202 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
205 lockdep_assert_held(&vm->lock);
206 xe_vm_assert_held(vm);
208 if (*count >= vm->preempt.num_exec_queues)
211 for (; *count < vm->preempt.num_exec_queues; ++(*count)) {
212 struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
215 return PTR_ERR(pfence);
217 list_move_tail(xe_preempt_fence_link(pfence), list);
223 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
225 struct xe_exec_queue *q;
227 xe_vm_assert_held(vm);
229 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
230 if (q->compute.pfence) {
231 long timeout = dma_fence_wait(q->compute.pfence, false);
235 dma_fence_put(q->compute.pfence);
236 q->compute.pfence = NULL;
243 static bool xe_vm_is_idle(struct xe_vm *vm)
245 struct xe_exec_queue *q;
247 xe_vm_assert_held(vm);
248 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
249 if (!xe_exec_queue_is_idle(q))
256 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
258 struct list_head *link;
259 struct xe_exec_queue *q;
261 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
262 struct dma_fence *fence;
265 xe_assert(vm->xe, link != list);
267 fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
268 q, q->compute.context,
270 dma_fence_put(q->compute.pfence);
271 q->compute.pfence = fence;
275 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
277 struct xe_exec_queue *q;
280 err = xe_bo_lock(bo, true);
284 err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
288 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
289 if (q->compute.pfence) {
290 dma_resv_add_fence(bo->ttm.base.resv,
292 DMA_RESV_USAGE_BOOKKEEP);
301 * xe_vm_fence_all_extobjs() - Add a fence to vm's external objects' resv
303 * @fence: The fence to add.
304 * @usage: The resv usage for the fence.
306 * Loops over all of the vm's external object bindings and adds a @fence
307 * with the given @usage to all of the external object's reservation
310 void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
311 enum dma_resv_usage usage)
315 list_for_each_entry(vma, &vm->extobj.list, extobj.link)
316 dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence, usage);
319 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm)
321 struct xe_exec_queue *q;
323 lockdep_assert_held(&vm->lock);
324 xe_vm_assert_held(vm);
326 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
329 dma_resv_add_fence(xe_vm_resv(vm), q->compute.pfence,
330 DMA_RESV_USAGE_BOOKKEEP);
331 xe_vm_fence_all_extobjs(vm, q->compute.pfence,
332 DMA_RESV_USAGE_BOOKKEEP);
336 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
338 struct drm_exec exec;
339 struct dma_fence *pfence;
343 xe_assert(vm->xe, xe_vm_in_compute_mode(vm));
345 down_write(&vm->lock);
346 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
347 drm_exec_until_all_locked(&exec) {
348 err = xe_vm_lock_dma_resv(vm, &exec, 1, true);
349 drm_exec_retry_on_contention(&exec);
354 pfence = xe_preempt_fence_create(q, q->compute.context,
361 list_add(&q->compute.link, &vm->preempt.exec_queues);
362 ++vm->preempt.num_exec_queues;
363 q->compute.pfence = pfence;
365 down_read(&vm->userptr.notifier_lock);
367 dma_resv_add_fence(xe_vm_resv(vm), pfence,
368 DMA_RESV_USAGE_BOOKKEEP);
370 xe_vm_fence_all_extobjs(vm, pfence, DMA_RESV_USAGE_BOOKKEEP);
373 * Check to see if a preemption on VM is in flight or userptr
374 * invalidation, if so trigger this preempt fence to sync state with
375 * other preempt fences on the VM.
377 wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
379 dma_fence_enable_sw_signaling(pfence);
381 up_read(&vm->userptr.notifier_lock);
384 drm_exec_fini(&exec);
391 * xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
395 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
397 if (!xe_vm_in_compute_mode(vm))
400 down_write(&vm->lock);
401 list_del(&q->compute.link);
402 --vm->preempt.num_exec_queues;
403 if (q->compute.pfence) {
404 dma_fence_enable_sw_signaling(q->compute.pfence);
405 dma_fence_put(q->compute.pfence);
406 q->compute.pfence = NULL;
412 * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
413 * that need repinning.
416 * This function checks for whether the VM has userptrs that need repinning,
417 * and provides a release-type barrier on the userptr.notifier_lock after
420 * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
422 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
424 lockdep_assert_held_read(&vm->userptr.notifier_lock);
426 return (list_empty(&vm->userptr.repin_list) &&
427 list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
431 * xe_vm_lock_dma_resv() - Lock the vm dma_resv object and the dma_resv
432 * objects of the vm's external buffer objects.
434 * @exec: Pointer to a struct drm_exec locking context.
435 * @num_shared: Number of dma-fence slots to reserve in the locked objects.
436 * @lock_vm: Lock also the vm's dma_resv.
438 * Locks the vm dma-resv objects and all the dma-resv objects of the
439 * buffer objects on the vm external object list.
441 * Return: 0 on success, Negative error code on error. In particular if
442 * @intr is set to true, -EINTR or -ERESTARTSYS may be returned.
444 int xe_vm_lock_dma_resv(struct xe_vm *vm, struct drm_exec *exec,
445 unsigned int num_shared, bool lock_vm)
447 struct xe_vma *vma, *next;
450 lockdep_assert_held(&vm->lock);
453 err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
458 list_for_each_entry(vma, &vm->extobj.list, extobj.link) {
459 err = drm_exec_prepare_obj(exec, &xe_vma_bo(vma)->ttm.base, num_shared);
464 spin_lock(&vm->notifier.list_lock);
465 list_for_each_entry_safe(vma, next, &vm->notifier.rebind_list,
466 notifier.rebind_link) {
467 xe_bo_assert_held(xe_vma_bo(vma));
469 list_del_init(&vma->notifier.rebind_link);
470 if (vma->tile_present && !(vma->gpuva.flags & XE_VMA_DESTROYED))
471 list_move_tail(&vma->combined_links.rebind,
474 spin_unlock(&vm->notifier.list_lock);
479 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
481 static void xe_vm_kill(struct xe_vm *vm)
483 struct xe_exec_queue *q;
485 lockdep_assert_held(&vm->lock);
487 xe_vm_lock(vm, false);
488 vm->flags |= XE_VM_FLAG_BANNED;
489 trace_xe_vm_kill(vm);
491 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
495 /* TODO: Inform user the VM is banned */
499 * xe_vm_validate_should_retry() - Whether to retry after a validate error.
500 * @exec: The drm_exec object used for locking before validation.
501 * @err: The error returned from ttm_bo_validate().
502 * @end: A ktime_t cookie that should be set to 0 before first use and
503 * that should be reused on subsequent calls.
505 * With multiple active VMs, under memory pressure, it is possible that
506 * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
507 * Until ttm properly handles locking in such scenarios, best thing the
508 * driver can do is retry with a timeout. Check if that is necessary, and
509 * if so unlock the drm_exec's objects while keeping the ticket to prepare
512 * Return: true if a retry after drm_exec_init() is recommended;
515 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
523 *end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
524 if (!ktime_before(cur, *end))
528 * We would like to keep the ticket here with
529 * drm_exec_unlock_all(), but WW mutex asserts currently
530 * stop us from that. In any case this function could go away
531 * with proper TTM -EDEADLK handling.
539 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
546 * 1 fence for each preempt fence plus a fence for each tile from a
549 err = drm_exec_prepare_obj(exec, xe_vm_obj(vm),
550 vm->preempt.num_exec_queues +
551 vm->xe->info.tile_count);
555 if (xe_vm_is_idle(vm)) {
556 vm->preempt.rebind_deactivated = true;
561 if (!preempt_fences_waiting(vm)) {
566 err = xe_vm_lock_dma_resv(vm, exec, vm->preempt.num_exec_queues, false);
570 err = wait_for_existing_preempt_fences(vm);
574 list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
575 if (xe_vma_has_no_bo(vma) ||
576 vma->gpuva.flags & XE_VMA_DESTROYED)
579 err = xe_bo_validate(xe_vma_bo(vma), vm, false);
587 static void preempt_rebind_work_func(struct work_struct *w)
589 struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
590 struct drm_exec exec;
591 struct dma_fence *rebind_fence;
592 unsigned int fence_count = 0;
593 LIST_HEAD(preempt_fences);
597 int __maybe_unused tries = 0;
599 xe_assert(vm->xe, xe_vm_in_compute_mode(vm));
600 trace_xe_vm_rebind_worker_enter(vm);
602 down_write(&vm->lock);
604 if (xe_vm_is_closed_or_banned(vm)) {
606 trace_xe_vm_rebind_worker_exit(vm);
611 if (xe_vm_userptr_check_repin(vm)) {
612 err = xe_vm_userptr_pin(vm);
614 goto out_unlock_outer;
617 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
619 drm_exec_until_all_locked(&exec) {
622 err = xe_preempt_work_begin(&exec, vm, &done);
623 drm_exec_retry_on_contention(&exec);
624 if (err && xe_vm_validate_should_retry(&exec, err, &end)) {
626 goto out_unlock_outer;
632 err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
636 rebind_fence = xe_vm_rebind(vm, true);
637 if (IS_ERR(rebind_fence)) {
638 err = PTR_ERR(rebind_fence);
643 dma_fence_wait(rebind_fence, false);
644 dma_fence_put(rebind_fence);
647 /* Wait on munmap style VM unbinds */
648 wait = dma_resv_wait_timeout(xe_vm_resv(vm),
649 DMA_RESV_USAGE_KERNEL,
650 false, MAX_SCHEDULE_TIMEOUT);
656 #define retry_required(__tries, __vm) \
657 (IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
658 (!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
659 __xe_vm_userptr_needs_repin(__vm))
661 down_read(&vm->userptr.notifier_lock);
662 if (retry_required(tries, vm)) {
663 up_read(&vm->userptr.notifier_lock);
668 #undef retry_required
670 spin_lock(&vm->xe->ttm.lru_lock);
671 ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
672 spin_unlock(&vm->xe->ttm.lru_lock);
674 /* Point of no return. */
675 arm_preempt_fences(vm, &preempt_fences);
676 resume_and_reinstall_preempt_fences(vm);
677 up_read(&vm->userptr.notifier_lock);
680 drm_exec_fini(&exec);
682 if (err == -EAGAIN) {
683 trace_xe_vm_rebind_worker_retry(vm);
688 drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
693 free_preempt_fences(&preempt_fences);
695 trace_xe_vm_rebind_worker_exit(vm);
698 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
699 const struct mmu_notifier_range *range,
700 unsigned long cur_seq)
702 struct xe_vma *vma = container_of(mni, struct xe_vma, userptr.notifier);
703 struct xe_vm *vm = xe_vma_vm(vma);
704 struct dma_resv_iter cursor;
705 struct dma_fence *fence;
708 xe_assert(vm->xe, xe_vma_is_userptr(vma));
709 trace_xe_vma_userptr_invalidate(vma);
711 if (!mmu_notifier_range_blockable(range))
714 down_write(&vm->userptr.notifier_lock);
715 mmu_interval_set_seq(mni, cur_seq);
717 /* No need to stop gpu access if the userptr is not yet bound. */
718 if (!vma->userptr.initial_bind) {
719 up_write(&vm->userptr.notifier_lock);
724 * Tell exec and rebind worker they need to repin and rebind this
727 if (!xe_vm_in_fault_mode(vm) &&
728 !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
729 spin_lock(&vm->userptr.invalidated_lock);
730 list_move_tail(&vma->userptr.invalidate_link,
731 &vm->userptr.invalidated);
732 spin_unlock(&vm->userptr.invalidated_lock);
735 up_write(&vm->userptr.notifier_lock);
738 * Preempt fences turn into schedule disables, pipeline these.
739 * Note that even in fault mode, we need to wait for binds and
740 * unbinds to complete, and those are attached as BOOKMARK fences
743 dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
744 DMA_RESV_USAGE_BOOKKEEP);
745 dma_resv_for_each_fence_unlocked(&cursor, fence)
746 dma_fence_enable_sw_signaling(fence);
747 dma_resv_iter_end(&cursor);
749 err = dma_resv_wait_timeout(xe_vm_resv(vm),
750 DMA_RESV_USAGE_BOOKKEEP,
751 false, MAX_SCHEDULE_TIMEOUT);
752 XE_WARN_ON(err <= 0);
754 if (xe_vm_in_fault_mode(vm)) {
755 err = xe_vm_invalidate_vma(vma);
759 trace_xe_vma_userptr_invalidate_complete(vma);
764 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
765 .invalidate = vma_userptr_invalidate,
768 int xe_vm_userptr_pin(struct xe_vm *vm)
770 struct xe_vma *vma, *next;
772 LIST_HEAD(tmp_evict);
774 lockdep_assert_held_write(&vm->lock);
776 /* Collect invalidated userptrs */
777 spin_lock(&vm->userptr.invalidated_lock);
778 list_for_each_entry_safe(vma, next, &vm->userptr.invalidated,
779 userptr.invalidate_link) {
780 list_del_init(&vma->userptr.invalidate_link);
781 if (list_empty(&vma->combined_links.userptr))
782 list_move_tail(&vma->combined_links.userptr,
783 &vm->userptr.repin_list);
785 spin_unlock(&vm->userptr.invalidated_lock);
787 /* Pin and move to temporary list */
788 list_for_each_entry_safe(vma, next, &vm->userptr.repin_list,
789 combined_links.userptr) {
790 err = xe_vma_userptr_pin_pages(vma);
794 list_move_tail(&vma->combined_links.userptr, &tmp_evict);
797 /* Take lock and move to rebind_list for rebinding. */
798 err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
802 list_for_each_entry_safe(vma, next, &tmp_evict, combined_links.userptr)
803 list_move_tail(&vma->combined_links.rebind, &vm->rebind_list);
805 dma_resv_unlock(xe_vm_resv(vm));
810 list_splice_tail(&tmp_evict, &vm->userptr.repin_list);
816 * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
817 * that need repinning.
820 * This function does an advisory check for whether the VM has userptrs that
823 * Return: 0 if there are no indications of userptrs needing repinning,
824 * -EAGAIN if there are.
826 int xe_vm_userptr_check_repin(struct xe_vm *vm)
828 return (list_empty_careful(&vm->userptr.repin_list) &&
829 list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
832 static struct dma_fence *
833 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
834 struct xe_sync_entry *syncs, u32 num_syncs,
835 bool first_op, bool last_op);
837 struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
839 struct dma_fence *fence = NULL;
840 struct xe_vma *vma, *next;
842 lockdep_assert_held(&vm->lock);
843 if (xe_vm_no_dma_fences(vm) && !rebind_worker)
846 xe_vm_assert_held(vm);
847 list_for_each_entry_safe(vma, next, &vm->rebind_list,
848 combined_links.rebind) {
849 xe_assert(vm->xe, vma->tile_present);
851 list_del_init(&vma->combined_links.rebind);
852 dma_fence_put(fence);
854 trace_xe_vma_rebind_worker(vma);
856 trace_xe_vma_rebind_exec(vma);
857 fence = xe_vm_bind_vma(vma, NULL, NULL, 0, false, false);
865 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
867 u64 bo_offset_or_userptr,
874 struct xe_tile *tile;
877 xe_assert(vm->xe, start < end);
878 xe_assert(vm->xe, end < vm->size);
880 if (!bo && !is_null) /* userptr */
881 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
883 vma = kzalloc(sizeof(*vma) - sizeof(struct xe_userptr),
886 vma = ERR_PTR(-ENOMEM);
890 INIT_LIST_HEAD(&vma->combined_links.rebind);
891 INIT_LIST_HEAD(&vma->notifier.rebind_link);
892 INIT_LIST_HEAD(&vma->extobj.link);
894 INIT_LIST_HEAD(&vma->gpuva.gem.entry);
895 vma->gpuva.vm = &vm->gpuvm;
896 vma->gpuva.va.addr = start;
897 vma->gpuva.va.range = end - start + 1;
899 vma->gpuva.flags |= XE_VMA_READ_ONLY;
901 vma->gpuva.flags |= DRM_GPUVA_SPARSE;
904 vma->tile_mask = tile_mask;
906 for_each_tile(tile, vm->xe, id)
907 vma->tile_mask |= 0x1 << id;
910 if (GRAPHICS_VER(vm->xe) >= 20 || vm->xe->info.platform == XE_PVC)
911 vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
914 struct drm_gpuvm_bo *vm_bo;
916 xe_bo_assert_held(bo);
918 vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
921 return ERR_CAST(vm_bo);
924 drm_gem_object_get(&bo->ttm.base);
925 vma->gpuva.gem.obj = &bo->ttm.base;
926 vma->gpuva.gem.offset = bo_offset_or_userptr;
927 drm_gpuva_link(&vma->gpuva, vm_bo);
928 drm_gpuvm_bo_put(vm_bo);
929 } else /* userptr or null */ {
931 u64 size = end - start + 1;
934 INIT_LIST_HEAD(&vma->userptr.invalidate_link);
935 vma->gpuva.gem.offset = bo_offset_or_userptr;
937 err = mmu_interval_notifier_insert(&vma->userptr.notifier,
939 xe_vma_userptr(vma), size,
940 &vma_userptr_notifier_ops);
947 vma->userptr.notifier_seq = LONG_MAX;
956 static bool vm_remove_extobj(struct xe_vma *vma)
958 if (!list_empty(&vma->extobj.link)) {
959 xe_vma_vm(vma)->extobj.entries--;
960 list_del_init(&vma->extobj.link);
966 static void xe_vma_destroy_late(struct xe_vma *vma)
968 struct xe_vm *vm = xe_vma_vm(vma);
969 struct xe_device *xe = vm->xe;
970 bool read_only = xe_vma_read_only(vma);
972 if (xe_vma_is_userptr(vma)) {
973 if (vma->userptr.sg) {
974 dma_unmap_sgtable(xe->drm.dev,
976 read_only ? DMA_TO_DEVICE :
977 DMA_BIDIRECTIONAL, 0);
978 sg_free_table(vma->userptr.sg);
979 vma->userptr.sg = NULL;
983 * Since userptr pages are not pinned, we can't remove
984 * the notifer until we're sure the GPU is not accessing
987 mmu_interval_notifier_remove(&vma->userptr.notifier);
989 } else if (xe_vma_is_null(vma)) {
992 xe_bo_put(xe_vma_bo(vma));
998 static void vma_destroy_work_func(struct work_struct *w)
1000 struct xe_vma *vma =
1001 container_of(w, struct xe_vma, destroy_work);
1003 xe_vma_destroy_late(vma);
1006 static struct xe_vma *
1007 bo_has_vm_references_locked(struct xe_bo *bo, struct xe_vm *vm,
1008 struct xe_vma *ignore)
1010 struct drm_gpuvm_bo *vm_bo;
1011 struct drm_gpuva *va;
1012 struct drm_gem_object *obj = &bo->ttm.base;
1014 xe_bo_assert_held(bo);
1016 drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
1017 drm_gpuvm_bo_for_each_va(va, vm_bo) {
1018 struct xe_vma *vma = gpuva_to_vma(va);
1020 if (vma != ignore && xe_vma_vm(vma) == vm)
1028 static bool bo_has_vm_references(struct xe_bo *bo, struct xe_vm *vm,
1029 struct xe_vma *ignore)
1033 xe_bo_lock(bo, false);
1034 ret = !!bo_has_vm_references_locked(bo, vm, ignore);
1040 static void __vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
1042 lockdep_assert_held_write(&vm->lock);
1044 list_add(&vma->extobj.link, &vm->extobj.list);
1045 vm->extobj.entries++;
1048 static void vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
1050 struct xe_bo *bo = xe_vma_bo(vma);
1052 lockdep_assert_held_write(&vm->lock);
1054 if (bo_has_vm_references(bo, vm, vma))
1057 __vm_insert_extobj(vm, vma);
1060 static void vma_destroy_cb(struct dma_fence *fence,
1061 struct dma_fence_cb *cb)
1063 struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
1065 INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
1066 queue_work(system_unbound_wq, &vma->destroy_work);
1069 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
1071 struct xe_vm *vm = xe_vma_vm(vma);
1073 lockdep_assert_held_write(&vm->lock);
1074 xe_assert(vm->xe, list_empty(&vma->combined_links.destroy));
1076 if (xe_vma_is_userptr(vma)) {
1077 xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
1079 spin_lock(&vm->userptr.invalidated_lock);
1080 list_del(&vma->userptr.invalidate_link);
1081 spin_unlock(&vm->userptr.invalidated_lock);
1082 } else if (!xe_vma_is_null(vma)) {
1083 xe_bo_assert_held(xe_vma_bo(vma));
1085 spin_lock(&vm->notifier.list_lock);
1086 list_del(&vma->notifier.rebind_link);
1087 spin_unlock(&vm->notifier.list_lock);
1089 drm_gpuva_unlink(&vma->gpuva);
1091 if (!xe_vma_bo(vma)->vm && vm_remove_extobj(vma)) {
1092 struct xe_vma *other;
1094 other = bo_has_vm_references_locked(xe_vma_bo(vma), vm, NULL);
1097 __vm_insert_extobj(vm, other);
1101 xe_vm_assert_held(vm);
1103 int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
1107 XE_WARN_ON(ret != -ENOENT);
1108 xe_vma_destroy_late(vma);
1111 xe_vma_destroy_late(vma);
1116 * xe_vm_prepare_vma() - drm_exec utility to lock a vma
1117 * @exec: The drm_exec object we're currently locking for.
1118 * @vma: The vma for witch we want to lock the vm resv and any attached
1120 * @num_shared: The number of dma-fence slots to pre-allocate in the
1121 * objects' reservation objects.
1123 * Return: 0 on success, negative error code on error. In particular
1124 * may return -EDEADLK on WW transaction contention and -EINTR if
1125 * an interruptible wait is terminated by a signal.
1127 int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
1128 unsigned int num_shared)
1130 struct xe_vm *vm = xe_vma_vm(vma);
1131 struct xe_bo *bo = xe_vma_bo(vma);
1135 err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
1136 if (!err && bo && !bo->vm)
1137 err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared);
1142 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1144 struct drm_exec exec;
1147 drm_exec_init(&exec, 0);
1148 drm_exec_until_all_locked(&exec) {
1149 err = xe_vm_prepare_vma(&exec, vma, 0);
1150 drm_exec_retry_on_contention(&exec);
1151 if (XE_WARN_ON(err))
1155 xe_vma_destroy(vma, NULL);
1157 drm_exec_fini(&exec);
1161 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
1163 struct drm_gpuva *gpuva;
1165 lockdep_assert_held(&vm->lock);
1167 if (xe_vm_is_closed_or_banned(vm))
1170 xe_assert(vm->xe, start + range <= vm->size);
1172 gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
1174 return gpuva ? gpuva_to_vma(gpuva) : NULL;
1177 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1181 xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1182 lockdep_assert_held(&vm->lock);
1184 err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
1185 XE_WARN_ON(err); /* Shouldn't be possible */
1190 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1192 xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1193 lockdep_assert_held(&vm->lock);
1195 drm_gpuva_remove(&vma->gpuva);
1196 if (vm->usm.last_fault_vma == vma)
1197 vm->usm.last_fault_vma = NULL;
1200 static struct drm_gpuva_op *xe_vm_op_alloc(void)
1202 struct xe_vma_op *op;
1204 op = kzalloc(sizeof(*op), GFP_KERNEL);
1212 static void xe_vm_free(struct drm_gpuvm *gpuvm);
1214 static struct drm_gpuvm_ops gpuvm_ops = {
1215 .op_alloc = xe_vm_op_alloc,
1216 .vm_free = xe_vm_free,
1219 static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index)
1223 if (pat_index & BIT(0))
1224 pte |= XE_PPGTT_PTE_PAT0;
1226 if (pat_index & BIT(1))
1227 pte |= XE_PPGTT_PTE_PAT1;
1232 static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index,
1237 if (pat_index & BIT(0))
1238 pte |= XE_PPGTT_PTE_PAT0;
1240 if (pat_index & BIT(1))
1241 pte |= XE_PPGTT_PTE_PAT1;
1243 if (pat_index & BIT(2)) {
1245 pte |= XE_PPGTT_PDE_PDPE_PAT2;
1247 pte |= XE_PPGTT_PTE_PAT2;
1250 if (pat_index & BIT(3))
1251 pte |= XELPG_PPGTT_PTE_PAT3;
1253 if (pat_index & (BIT(4)))
1254 pte |= XE2_PPGTT_PTE_PAT4;
1259 static u64 pte_encode_ps(u32 pt_level)
1261 XE_WARN_ON(pt_level > 2);
1264 return XE_PDE_PS_2M;
1265 else if (pt_level == 2)
1266 return XE_PDPE_PS_1G;
1271 static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
1272 const u16 pat_index)
1274 struct xe_device *xe = xe_bo_device(bo);
1277 pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1278 pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
1279 pde |= pde_encode_pat_index(xe, pat_index);
1284 static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
1285 u16 pat_index, u32 pt_level)
1287 struct xe_device *xe = xe_bo_device(bo);
1290 pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1291 pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1292 pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1293 pte |= pte_encode_ps(pt_level);
1295 if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
1296 pte |= XE_PPGTT_PTE_DM;
1301 static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
1302 u16 pat_index, u32 pt_level)
1304 struct xe_device *xe = xe_vma_vm(vma)->xe;
1306 pte |= XE_PAGE_PRESENT;
1308 if (likely(!xe_vma_read_only(vma)))
1311 pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1312 pte |= pte_encode_ps(pt_level);
1314 if (unlikely(xe_vma_is_null(vma)))
1320 static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
1322 u32 pt_level, bool devmem, u64 flags)
1326 /* Avoid passing random bits directly as flags */
1327 xe_assert(xe, !(flags & ~XE_PTE_PS64));
1330 pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1331 pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1332 pte |= pte_encode_ps(pt_level);
1335 pte |= XE_PPGTT_PTE_DM;
1342 static const struct xe_pt_ops xelp_pt_ops = {
1343 .pte_encode_bo = xelp_pte_encode_bo,
1344 .pte_encode_vma = xelp_pte_encode_vma,
1345 .pte_encode_addr = xelp_pte_encode_addr,
1346 .pde_encode_bo = xelp_pde_encode_bo,
1349 static void vm_destroy_work_func(struct work_struct *w);
1351 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1353 struct drm_gem_object *vm_resv_obj;
1355 int err, number_tiles = 0;
1356 struct xe_tile *tile;
1359 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1361 return ERR_PTR(-ENOMEM);
1365 vm->size = 1ull << xe->info.va_bits;
1369 init_rwsem(&vm->lock);
1371 INIT_LIST_HEAD(&vm->rebind_list);
1373 INIT_LIST_HEAD(&vm->userptr.repin_list);
1374 INIT_LIST_HEAD(&vm->userptr.invalidated);
1375 init_rwsem(&vm->userptr.notifier_lock);
1376 spin_lock_init(&vm->userptr.invalidated_lock);
1378 INIT_LIST_HEAD(&vm->notifier.rebind_list);
1379 spin_lock_init(&vm->notifier.list_lock);
1381 INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1383 INIT_LIST_HEAD(&vm->preempt.exec_queues);
1384 vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */
1386 for_each_tile(tile, xe, id)
1387 xe_range_fence_tree_init(&vm->rftree[id]);
1389 INIT_LIST_HEAD(&vm->extobj.list);
1391 vm->pt_ops = &xelp_pt_ops;
1393 if (!(flags & XE_VM_FLAG_MIGRATION))
1394 xe_device_mem_access_get(xe);
1396 vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
1402 drm_gpuvm_init(&vm->gpuvm, "Xe VM", 0, &xe->drm, vm_resv_obj,
1403 0, vm->size, 0, 0, &gpuvm_ops);
1405 drm_gem_object_put(vm_resv_obj);
1407 err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
1411 if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1412 vm->flags |= XE_VM_FLAG_64K;
1414 for_each_tile(tile, xe, id) {
1415 if (flags & XE_VM_FLAG_MIGRATION &&
1416 tile->id != XE_VM_FLAG_TILE_ID(flags))
1419 vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
1420 if (IS_ERR(vm->pt_root[id])) {
1421 err = PTR_ERR(vm->pt_root[id]);
1422 vm->pt_root[id] = NULL;
1423 goto err_unlock_close;
1427 if (flags & XE_VM_FLAG_SCRATCH_PAGE) {
1428 for_each_tile(tile, xe, id) {
1429 if (!vm->pt_root[id])
1432 err = xe_pt_create_scratch(xe, tile, vm);
1434 goto err_unlock_close;
1436 vm->batch_invalidate_tlb = true;
1439 if (flags & XE_VM_FLAG_COMPUTE_MODE) {
1440 INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1441 vm->flags |= XE_VM_FLAG_COMPUTE_MODE;
1442 vm->batch_invalidate_tlb = false;
1445 /* Fill pt_root after allocating scratch tables */
1446 for_each_tile(tile, xe, id) {
1447 if (!vm->pt_root[id])
1450 xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
1452 dma_resv_unlock(xe_vm_resv(vm));
1454 /* Kernel migration VM shouldn't have a circular loop.. */
1455 if (!(flags & XE_VM_FLAG_MIGRATION)) {
1456 for_each_tile(tile, xe, id) {
1457 struct xe_gt *gt = tile->primary_gt;
1458 struct xe_vm *migrate_vm;
1459 struct xe_exec_queue *q;
1460 u32 create_flags = EXEC_QUEUE_FLAG_VM |
1461 ((flags & XE_VM_FLAG_ASYNC_DEFAULT) ?
1462 EXEC_QUEUE_FLAG_VM_ASYNC : 0);
1464 if (!vm->pt_root[id])
1467 migrate_vm = xe_migrate_get_vm(tile->migrate);
1468 q = xe_exec_queue_create_class(xe, gt, migrate_vm,
1469 XE_ENGINE_CLASS_COPY,
1471 xe_vm_put(migrate_vm);
1481 if (number_tiles > 1)
1482 vm->composite_fence_ctx = dma_fence_context_alloc(1);
1484 mutex_lock(&xe->usm.lock);
1485 if (flags & XE_VM_FLAG_FAULT_MODE)
1486 xe->usm.num_vm_in_fault_mode++;
1487 else if (!(flags & XE_VM_FLAG_MIGRATION))
1488 xe->usm.num_vm_in_non_fault_mode++;
1489 mutex_unlock(&xe->usm.lock);
1491 trace_xe_vm_create(vm);
1496 dma_resv_unlock(xe_vm_resv(vm));
1498 xe_vm_close_and_put(vm);
1499 return ERR_PTR(err);
1502 for_each_tile(tile, xe, id)
1503 xe_range_fence_tree_fini(&vm->rftree[id]);
1505 if (!(flags & XE_VM_FLAG_MIGRATION))
1506 xe_device_mem_access_put(xe);
1507 return ERR_PTR(err);
1510 static void xe_vm_close(struct xe_vm *vm)
1512 down_write(&vm->lock);
1514 up_write(&vm->lock);
1517 void xe_vm_close_and_put(struct xe_vm *vm)
1519 LIST_HEAD(contested);
1520 struct xe_device *xe = vm->xe;
1521 struct xe_tile *tile;
1522 struct xe_vma *vma, *next_vma;
1523 struct drm_gpuva *gpuva, *next;
1526 xe_assert(xe, !vm->preempt.num_exec_queues);
1529 if (xe_vm_in_compute_mode(vm))
1530 flush_work(&vm->preempt.rebind_work);
1532 down_write(&vm->lock);
1533 for_each_tile(tile, xe, id) {
1535 xe_exec_queue_last_fence_put(vm->q[id], vm);
1537 up_write(&vm->lock);
1539 for_each_tile(tile, xe, id) {
1541 xe_exec_queue_kill(vm->q[id]);
1542 xe_exec_queue_put(vm->q[id]);
1547 down_write(&vm->lock);
1548 xe_vm_lock(vm, false);
1549 drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) {
1550 vma = gpuva_to_vma(gpuva);
1552 if (xe_vma_has_no_bo(vma)) {
1553 down_read(&vm->userptr.notifier_lock);
1554 vma->gpuva.flags |= XE_VMA_DESTROYED;
1555 up_read(&vm->userptr.notifier_lock);
1558 xe_vm_remove_vma(vm, vma);
1560 /* easy case, remove from VMA? */
1561 if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
1562 list_del_init(&vma->combined_links.rebind);
1563 xe_vma_destroy(vma, NULL);
1567 list_move_tail(&vma->combined_links.destroy, &contested);
1568 vma->gpuva.flags |= XE_VMA_DESTROYED;
1572 * All vm operations will add shared fences to resv.
1573 * The only exception is eviction for a shared object,
1574 * but even so, the unbind when evicted would still
1575 * install a fence to resv. Hence it's safe to
1576 * destroy the pagetables immediately.
1578 for_each_tile(tile, xe, id) {
1579 if (vm->scratch_bo[id]) {
1582 xe_bo_unpin(vm->scratch_bo[id]);
1583 xe_bo_put(vm->scratch_bo[id]);
1584 for (i = 0; i < vm->pt_root[id]->level; i++)
1585 xe_pt_destroy(vm->scratch_pt[id][i], vm->flags,
1588 if (vm->pt_root[id]) {
1589 xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1590 vm->pt_root[id] = NULL;
1596 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1597 * Since we hold a refcount to the bo, we can remove and free
1598 * the members safely without locking.
1600 list_for_each_entry_safe(vma, next_vma, &contested,
1601 combined_links.destroy) {
1602 list_del_init(&vma->combined_links.destroy);
1603 xe_vma_destroy_unlocked(vma);
1606 xe_assert(xe, list_empty(&vm->extobj.list));
1607 up_write(&vm->lock);
1609 mutex_lock(&xe->usm.lock);
1610 if (vm->flags & XE_VM_FLAG_FAULT_MODE)
1611 xe->usm.num_vm_in_fault_mode--;
1612 else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1613 xe->usm.num_vm_in_non_fault_mode--;
1614 mutex_unlock(&xe->usm.lock);
1616 for_each_tile(tile, xe, id)
1617 xe_range_fence_tree_fini(&vm->rftree[id]);
1622 static void vm_destroy_work_func(struct work_struct *w)
1625 container_of(w, struct xe_vm, destroy_work);
1626 struct xe_device *xe = vm->xe;
1627 struct xe_tile *tile;
1631 /* xe_vm_close_and_put was not called? */
1632 xe_assert(xe, !vm->size);
1634 if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
1635 xe_device_mem_access_put(xe);
1637 if (xe->info.has_asid) {
1638 mutex_lock(&xe->usm.lock);
1639 lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1640 xe_assert(xe, lookup == vm);
1641 mutex_unlock(&xe->usm.lock);
1645 for_each_tile(tile, xe, id)
1646 XE_WARN_ON(vm->pt_root[id]);
1648 trace_xe_vm_free(vm);
1649 dma_fence_put(vm->rebind_fence);
1653 static void xe_vm_free(struct drm_gpuvm *gpuvm)
1655 struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
1657 /* To destroy the VM we need to be able to sleep */
1658 queue_work(system_unbound_wq, &vm->destroy_work);
1661 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1665 mutex_lock(&xef->vm.lock);
1666 vm = xa_load(&xef->vm.xa, id);
1669 mutex_unlock(&xef->vm.lock);
1674 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
1676 return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0,
1677 tile_to_xe(tile)->pat.idx[XE_CACHE_WB]);
1680 static struct xe_exec_queue *
1681 to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
1683 return q ? q : vm->q[0];
1686 static struct dma_fence *
1687 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1688 struct xe_sync_entry *syncs, u32 num_syncs,
1689 bool first_op, bool last_op)
1691 struct xe_vm *vm = xe_vma_vm(vma);
1692 struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1693 struct xe_tile *tile;
1694 struct dma_fence *fence = NULL;
1695 struct dma_fence **fences = NULL;
1696 struct dma_fence_array *cf = NULL;
1697 int cur_fence = 0, i;
1698 int number_tiles = hweight8(vma->tile_present);
1702 trace_xe_vma_unbind(vma);
1704 if (number_tiles > 1) {
1705 fences = kmalloc_array(number_tiles, sizeof(*fences),
1708 return ERR_PTR(-ENOMEM);
1711 for_each_tile(tile, vm->xe, id) {
1712 if (!(vma->tile_present & BIT(id)))
1715 fence = __xe_pt_unbind_vma(tile, vma, q ? q : vm->q[id],
1716 first_op ? syncs : NULL,
1717 first_op ? num_syncs : 0);
1718 if (IS_ERR(fence)) {
1719 err = PTR_ERR(fence);
1724 fences[cur_fence++] = fence;
1727 if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1728 q = list_next_entry(q, multi_gt_list);
1732 cf = dma_fence_array_create(number_tiles, fences,
1733 vm->composite_fence_ctx,
1734 vm->composite_fence_seqno++,
1737 --vm->composite_fence_seqno;
1743 fence = cf ? &cf->base : !fence ?
1744 xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
1746 for (i = 0; i < num_syncs; i++)
1747 xe_sync_entry_signal(&syncs[i], NULL, fence);
1755 dma_fence_put(fences[--cur_fence]);
1759 return ERR_PTR(err);
1762 static struct dma_fence *
1763 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1764 struct xe_sync_entry *syncs, u32 num_syncs,
1765 bool first_op, bool last_op)
1767 struct xe_tile *tile;
1768 struct dma_fence *fence;
1769 struct dma_fence **fences = NULL;
1770 struct dma_fence_array *cf = NULL;
1771 struct xe_vm *vm = xe_vma_vm(vma);
1772 int cur_fence = 0, i;
1773 int number_tiles = hweight8(vma->tile_mask);
1777 trace_xe_vma_bind(vma);
1779 if (number_tiles > 1) {
1780 fences = kmalloc_array(number_tiles, sizeof(*fences),
1783 return ERR_PTR(-ENOMEM);
1786 for_each_tile(tile, vm->xe, id) {
1787 if (!(vma->tile_mask & BIT(id)))
1790 fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id],
1791 first_op ? syncs : NULL,
1792 first_op ? num_syncs : 0,
1793 vma->tile_present & BIT(id));
1794 if (IS_ERR(fence)) {
1795 err = PTR_ERR(fence);
1800 fences[cur_fence++] = fence;
1803 if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1804 q = list_next_entry(q, multi_gt_list);
1808 cf = dma_fence_array_create(number_tiles, fences,
1809 vm->composite_fence_ctx,
1810 vm->composite_fence_seqno++,
1813 --vm->composite_fence_seqno;
1820 for (i = 0; i < num_syncs; i++)
1821 xe_sync_entry_signal(&syncs[i], NULL,
1822 cf ? &cf->base : fence);
1825 return cf ? &cf->base : fence;
1830 dma_fence_put(fences[--cur_fence]);
1834 return ERR_PTR(err);
1837 static bool xe_vm_sync_mode(struct xe_vm *vm, struct xe_exec_queue *q)
1839 return q ? !(q->flags & EXEC_QUEUE_FLAG_VM_ASYNC) :
1840 !(vm->flags & XE_VM_FLAG_ASYNC_DEFAULT);
1843 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
1844 struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1845 u32 num_syncs, bool immediate, bool first_op,
1848 struct dma_fence *fence;
1849 struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1851 xe_vm_assert_held(vm);
1854 fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
1857 return PTR_ERR(fence);
1861 xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
1863 fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
1865 for (i = 0; i < num_syncs; i++)
1866 xe_sync_entry_signal(&syncs[i], NULL, fence);
1871 xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
1872 if (last_op && xe_vm_sync_mode(vm, q))
1873 dma_fence_wait(fence, true);
1874 dma_fence_put(fence);
1879 static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
1880 struct xe_bo *bo, struct xe_sync_entry *syncs,
1881 u32 num_syncs, bool immediate, bool first_op,
1886 xe_vm_assert_held(vm);
1887 xe_bo_assert_held(bo);
1889 if (bo && immediate) {
1890 err = xe_bo_validate(bo, vm, true);
1895 return __xe_vm_bind(vm, vma, q, syncs, num_syncs, immediate, first_op,
1899 static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
1900 struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1901 u32 num_syncs, bool first_op, bool last_op)
1903 struct dma_fence *fence;
1904 struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1906 xe_vm_assert_held(vm);
1907 xe_bo_assert_held(xe_vma_bo(vma));
1909 fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
1911 return PTR_ERR(fence);
1913 xe_vma_destroy(vma, fence);
1915 xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
1916 if (last_op && xe_vm_sync_mode(vm, q))
1917 dma_fence_wait(fence, true);
1918 dma_fence_put(fence);
1923 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
1924 DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE | \
1925 DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT | \
1926 DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1928 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1929 struct drm_file *file)
1931 struct xe_device *xe = to_xe_device(dev);
1932 struct xe_file *xef = to_xe_file(file);
1933 struct drm_xe_vm_create *args = data;
1934 struct xe_tile *tile;
1940 if (XE_IOCTL_DBG(xe, args->extensions))
1943 if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
1944 args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
1946 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1947 !xe->info.supports_usm))
1950 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1953 if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1956 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
1957 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1960 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE &&
1961 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1964 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1965 xe_device_in_non_fault_mode(xe)))
1968 if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) &&
1969 xe_device_in_fault_mode(xe)))
1972 if (XE_IOCTL_DBG(xe, args->extensions))
1975 if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
1976 flags |= XE_VM_FLAG_SCRATCH_PAGE;
1977 if (args->flags & DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE)
1978 flags |= XE_VM_FLAG_COMPUTE_MODE;
1979 if (args->flags & DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT)
1980 flags |= XE_VM_FLAG_ASYNC_DEFAULT;
1981 if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1982 flags |= XE_VM_FLAG_FAULT_MODE;
1984 vm = xe_vm_create(xe, flags);
1988 mutex_lock(&xef->vm.lock);
1989 err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1990 mutex_unlock(&xef->vm.lock);
1992 xe_vm_close_and_put(vm);
1996 if (xe->info.has_asid) {
1997 mutex_lock(&xe->usm.lock);
1998 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
1999 XA_LIMIT(0, XE_MAX_ASID - 1),
2000 &xe->usm.next_asid, GFP_KERNEL);
2001 mutex_unlock(&xe->usm.lock);
2003 xe_vm_close_and_put(vm);
2006 vm->usm.asid = asid;
2012 /* Record BO memory for VM pagetable created against client */
2013 for_each_tile(tile, xe, id)
2014 if (vm->pt_root[id])
2015 xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo);
2017 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
2018 /* Warning: Security issue - never enable by default */
2019 args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
2025 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
2026 struct drm_file *file)
2028 struct xe_device *xe = to_xe_device(dev);
2029 struct xe_file *xef = to_xe_file(file);
2030 struct drm_xe_vm_destroy *args = data;
2034 if (XE_IOCTL_DBG(xe, args->pad) ||
2035 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2038 mutex_lock(&xef->vm.lock);
2039 vm = xa_load(&xef->vm.xa, args->vm_id);
2040 if (XE_IOCTL_DBG(xe, !vm))
2042 else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues))
2045 xa_erase(&xef->vm.xa, args->vm_id);
2046 mutex_unlock(&xef->vm.lock);
2049 xe_vm_close_and_put(vm);
2054 static const u32 region_to_mem_type[] = {
2060 static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
2061 struct xe_exec_queue *q, u32 region,
2062 struct xe_sync_entry *syncs, u32 num_syncs,
2063 bool first_op, bool last_op)
2065 struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
2068 xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
2070 if (!xe_vma_has_no_bo(vma)) {
2071 err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
2076 if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
2077 return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
2078 true, first_op, last_op);
2082 /* Nothing to do, signal fences now */
2084 for (i = 0; i < num_syncs; i++) {
2085 struct dma_fence *fence =
2086 xe_exec_queue_last_fence_get(wait_exec_queue, vm);
2088 xe_sync_entry_signal(&syncs[i], NULL, fence);
2096 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
2099 down_read(&vm->userptr.notifier_lock);
2100 vma->gpuva.flags |= XE_VMA_DESTROYED;
2101 up_read(&vm->userptr.notifier_lock);
2103 xe_vm_remove_vma(vm, vma);
2107 #define ULL unsigned long long
2109 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
2110 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2115 case DRM_GPUVA_OP_MAP:
2116 vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx",
2117 (ULL)op->map.va.addr, (ULL)op->map.va.range);
2119 case DRM_GPUVA_OP_REMAP:
2120 vma = gpuva_to_vma(op->remap.unmap->va);
2121 vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2122 (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2123 op->remap.unmap->keep ? 1 : 0);
2126 "REMAP:PREV: addr=0x%016llx, range=0x%016llx",
2127 (ULL)op->remap.prev->va.addr,
2128 (ULL)op->remap.prev->va.range);
2131 "REMAP:NEXT: addr=0x%016llx, range=0x%016llx",
2132 (ULL)op->remap.next->va.addr,
2133 (ULL)op->remap.next->va.range);
2135 case DRM_GPUVA_OP_UNMAP:
2136 vma = gpuva_to_vma(op->unmap.va);
2137 vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2138 (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2139 op->unmap.keep ? 1 : 0);
2141 case DRM_GPUVA_OP_PREFETCH:
2142 vma = gpuva_to_vma(op->prefetch.va);
2143 vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx",
2144 (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
2147 drm_warn(&xe->drm, "NOT POSSIBLE");
2151 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2157 * Create operations list from IOCTL arguments, setup operations fields so parse
2158 * and commit steps are decoupled from IOCTL arguments. This step can fail.
2160 static struct drm_gpuva_ops *
2161 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
2162 u64 bo_offset_or_userptr, u64 addr, u64 range,
2163 u32 operation, u32 flags, u8 tile_mask,
2164 u32 prefetch_region)
2166 struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
2167 struct drm_gpuva_ops *ops;
2168 struct drm_gpuva_op *__op;
2169 struct xe_vma_op *op;
2170 struct drm_gpuvm_bo *vm_bo;
2173 lockdep_assert_held_write(&vm->lock);
2175 vm_dbg(&vm->xe->drm,
2176 "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
2177 operation, (ULL)addr, (ULL)range,
2178 (ULL)bo_offset_or_userptr);
2180 switch (operation) {
2181 case DRM_XE_VM_BIND_OP_MAP:
2182 case DRM_XE_VM_BIND_OP_MAP_USERPTR:
2183 ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
2184 obj, bo_offset_or_userptr);
2186 case DRM_XE_VM_BIND_OP_UNMAP:
2187 ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
2189 case DRM_XE_VM_BIND_OP_PREFETCH:
2190 ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
2192 case DRM_XE_VM_BIND_OP_UNMAP_ALL:
2193 xe_assert(vm->xe, bo);
2195 err = xe_bo_lock(bo, true);
2197 return ERR_PTR(err);
2199 vm_bo = drm_gpuvm_bo_find(&vm->gpuvm, obj);
2203 ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
2204 drm_gpuvm_bo_put(vm_bo);
2208 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2209 ops = ERR_PTR(-EINVAL);
2214 #ifdef TEST_VM_ASYNC_OPS_ERROR
2215 if (operation & FORCE_ASYNC_OP_ERROR) {
2216 op = list_first_entry_or_null(&ops->list, struct xe_vma_op,
2219 op->inject_error = true;
2223 drm_gpuva_for_each_op(__op, ops) {
2224 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2226 op->tile_mask = tile_mask;
2227 if (__op->op == DRM_GPUVA_OP_MAP) {
2229 flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
2231 flags & DRM_XE_VM_BIND_FLAG_READONLY;
2232 op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
2233 } else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
2234 op->prefetch.region = prefetch_region;
2237 print_op(vm->xe, __op);
2243 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
2244 u8 tile_mask, bool read_only, bool is_null)
2246 struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
2250 lockdep_assert_held_write(&vm->lock);
2253 err = xe_bo_lock(bo, true);
2255 return ERR_PTR(err);
2257 vma = xe_vma_create(vm, bo, op->gem.offset,
2258 op->va.addr, op->va.addr +
2259 op->va.range - 1, read_only, is_null,
2264 if (xe_vma_is_userptr(vma)) {
2265 err = xe_vma_userptr_pin_pages(vma);
2267 prep_vma_destroy(vm, vma, false);
2268 xe_vma_destroy_unlocked(vma);
2269 return ERR_PTR(err);
2271 } else if (!xe_vma_has_no_bo(vma) && !bo->vm) {
2272 vm_insert_extobj(vm, vma);
2273 err = add_preempt_fences(vm, bo);
2275 prep_vma_destroy(vm, vma, false);
2276 xe_vma_destroy_unlocked(vma);
2277 return ERR_PTR(err);
2284 static u64 xe_vma_max_pte_size(struct xe_vma *vma)
2286 if (vma->gpuva.flags & XE_VMA_PTE_1G)
2288 else if (vma->gpuva.flags & XE_VMA_PTE_2M)
2294 static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
2298 vma->gpuva.flags |= XE_VMA_PTE_1G;
2301 vma->gpuva.flags |= XE_VMA_PTE_2M;
2308 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
2312 lockdep_assert_held_write(&vm->lock);
2314 switch (op->base.op) {
2315 case DRM_GPUVA_OP_MAP:
2316 err |= xe_vm_insert_vma(vm, op->map.vma);
2318 op->flags |= XE_VMA_OP_COMMITTED;
2320 case DRM_GPUVA_OP_REMAP:
2323 gpuva_to_vma(op->base.remap.unmap->va)->tile_present;
2325 prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
2327 op->flags |= XE_VMA_OP_COMMITTED;
2329 if (op->remap.prev) {
2330 err |= xe_vm_insert_vma(vm, op->remap.prev);
2332 op->flags |= XE_VMA_OP_PREV_COMMITTED;
2333 if (!err && op->remap.skip_prev) {
2334 op->remap.prev->tile_present =
2336 op->remap.prev = NULL;
2339 if (op->remap.next) {
2340 err |= xe_vm_insert_vma(vm, op->remap.next);
2342 op->flags |= XE_VMA_OP_NEXT_COMMITTED;
2343 if (!err && op->remap.skip_next) {
2344 op->remap.next->tile_present =
2346 op->remap.next = NULL;
2350 /* Adjust for partial unbind after removin VMA from VM */
2352 op->base.remap.unmap->va->va.addr = op->remap.start;
2353 op->base.remap.unmap->va->va.range = op->remap.range;
2357 case DRM_GPUVA_OP_UNMAP:
2358 prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
2359 op->flags |= XE_VMA_OP_COMMITTED;
2361 case DRM_GPUVA_OP_PREFETCH:
2362 op->flags |= XE_VMA_OP_COMMITTED;
2365 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2372 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
2373 struct drm_gpuva_ops *ops,
2374 struct xe_sync_entry *syncs, u32 num_syncs,
2375 struct list_head *ops_list, bool last,
2378 struct xe_vma_op *last_op = NULL;
2379 struct drm_gpuva_op *__op;
2382 lockdep_assert_held_write(&vm->lock);
2384 drm_gpuva_for_each_op(__op, ops) {
2385 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2386 bool first = list_empty(ops_list);
2388 INIT_LIST_HEAD(&op->link);
2389 list_add_tail(&op->link, ops_list);
2392 op->flags |= XE_VMA_OP_FIRST;
2393 op->num_syncs = num_syncs;
2399 switch (op->base.op) {
2400 case DRM_GPUVA_OP_MAP:
2404 vma = new_vma(vm, &op->base.map,
2405 op->tile_mask, op->map.read_only,
2408 return PTR_ERR(vma);
2413 case DRM_GPUVA_OP_REMAP:
2415 struct xe_vma *old =
2416 gpuva_to_vma(op->base.remap.unmap->va);
2418 op->remap.start = xe_vma_start(old);
2419 op->remap.range = xe_vma_size(old);
2421 if (op->base.remap.prev) {
2424 op->base.remap.unmap->va->flags &
2427 op->base.remap.unmap->va->flags &
2430 vma = new_vma(vm, op->base.remap.prev,
2431 op->tile_mask, read_only,
2434 return PTR_ERR(vma);
2436 op->remap.prev = vma;
2439 * Userptr creates a new SG mapping so
2440 * we must also rebind.
2442 op->remap.skip_prev = !xe_vma_is_userptr(old) &&
2443 IS_ALIGNED(xe_vma_end(vma),
2444 xe_vma_max_pte_size(old));
2445 if (op->remap.skip_prev) {
2446 xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2450 op->remap.start = xe_vma_end(vma);
2454 if (op->base.remap.next) {
2457 op->base.remap.unmap->va->flags &
2461 op->base.remap.unmap->va->flags &
2464 vma = new_vma(vm, op->base.remap.next,
2465 op->tile_mask, read_only,
2468 return PTR_ERR(vma);
2470 op->remap.next = vma;
2473 * Userptr creates a new SG mapping so
2474 * we must also rebind.
2476 op->remap.skip_next = !xe_vma_is_userptr(old) &&
2477 IS_ALIGNED(xe_vma_start(vma),
2478 xe_vma_max_pte_size(old));
2479 if (op->remap.skip_next) {
2480 xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2488 case DRM_GPUVA_OP_UNMAP:
2489 case DRM_GPUVA_OP_PREFETCH:
2493 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2498 err = xe_vma_op_commit(vm, op);
2503 /* FIXME: Unhandled corner case */
2504 XE_WARN_ON(!last_op && last && !list_empty(ops_list));
2511 last_op->flags |= XE_VMA_OP_LAST;
2512 last_op->num_syncs = num_syncs;
2513 last_op->syncs = syncs;
2519 static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
2520 struct xe_vma *vma, struct xe_vma_op *op)
2524 lockdep_assert_held_write(&vm->lock);
2526 err = xe_vm_prepare_vma(exec, vma, 1);
2530 xe_vm_assert_held(vm);
2531 xe_bo_assert_held(xe_vma_bo(vma));
2533 switch (op->base.op) {
2534 case DRM_GPUVA_OP_MAP:
2535 err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
2536 op->syncs, op->num_syncs,
2537 op->map.immediate || !xe_vm_in_fault_mode(vm),
2538 op->flags & XE_VMA_OP_FIRST,
2539 op->flags & XE_VMA_OP_LAST);
2541 case DRM_GPUVA_OP_REMAP:
2543 bool prev = !!op->remap.prev;
2544 bool next = !!op->remap.next;
2546 if (!op->remap.unmap_done) {
2548 vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
2549 err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2551 op->flags & XE_VMA_OP_FIRST,
2552 op->flags & XE_VMA_OP_LAST &&
2556 op->remap.unmap_done = true;
2560 op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
2561 err = xe_vm_bind(vm, op->remap.prev, op->q,
2562 xe_vma_bo(op->remap.prev), op->syncs,
2563 op->num_syncs, true, false,
2564 op->flags & XE_VMA_OP_LAST && !next);
2565 op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2568 op->remap.prev = NULL;
2572 op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
2573 err = xe_vm_bind(vm, op->remap.next, op->q,
2574 xe_vma_bo(op->remap.next),
2575 op->syncs, op->num_syncs,
2577 op->flags & XE_VMA_OP_LAST);
2578 op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2581 op->remap.next = NULL;
2586 case DRM_GPUVA_OP_UNMAP:
2587 err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2588 op->num_syncs, op->flags & XE_VMA_OP_FIRST,
2589 op->flags & XE_VMA_OP_LAST);
2591 case DRM_GPUVA_OP_PREFETCH:
2592 err = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region,
2593 op->syncs, op->num_syncs,
2594 op->flags & XE_VMA_OP_FIRST,
2595 op->flags & XE_VMA_OP_LAST);
2598 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2602 trace_xe_vma_fail(vma);
2607 static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
2608 struct xe_vma_op *op)
2610 struct drm_exec exec;
2614 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
2615 drm_exec_until_all_locked(&exec) {
2616 err = op_execute(&exec, vm, vma, op);
2617 drm_exec_retry_on_contention(&exec);
2621 drm_exec_fini(&exec);
2623 if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
2624 lockdep_assert_held_write(&vm->lock);
2625 err = xe_vma_userptr_pin_pages(vma);
2629 trace_xe_vma_fail(vma);
2635 static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
2639 lockdep_assert_held_write(&vm->lock);
2641 #ifdef TEST_VM_ASYNC_OPS_ERROR
2642 if (op->inject_error) {
2643 op->inject_error = false;
2648 switch (op->base.op) {
2649 case DRM_GPUVA_OP_MAP:
2650 ret = __xe_vma_op_execute(vm, op->map.vma, op);
2652 case DRM_GPUVA_OP_REMAP:
2656 if (!op->remap.unmap_done)
2657 vma = gpuva_to_vma(op->base.remap.unmap->va);
2658 else if (op->remap.prev)
2659 vma = op->remap.prev;
2661 vma = op->remap.next;
2663 ret = __xe_vma_op_execute(vm, vma, op);
2666 case DRM_GPUVA_OP_UNMAP:
2667 ret = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
2670 case DRM_GPUVA_OP_PREFETCH:
2671 ret = __xe_vma_op_execute(vm,
2672 gpuva_to_vma(op->base.prefetch.va),
2676 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2682 static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op)
2684 bool last = op->flags & XE_VMA_OP_LAST;
2687 while (op->num_syncs--)
2688 xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
2691 xe_exec_queue_put(op->q);
2693 if (!list_empty(&op->link))
2694 list_del(&op->link);
2696 drm_gpuva_ops_free(&vm->gpuvm, op->ops);
2701 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
2702 bool post_commit, bool prev_post_commit,
2703 bool next_post_commit)
2705 lockdep_assert_held_write(&vm->lock);
2707 switch (op->base.op) {
2708 case DRM_GPUVA_OP_MAP:
2710 prep_vma_destroy(vm, op->map.vma, post_commit);
2711 xe_vma_destroy_unlocked(op->map.vma);
2714 case DRM_GPUVA_OP_UNMAP:
2716 struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2719 down_read(&vm->userptr.notifier_lock);
2720 vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2721 up_read(&vm->userptr.notifier_lock);
2723 xe_vm_insert_vma(vm, vma);
2727 case DRM_GPUVA_OP_REMAP:
2729 struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
2731 if (op->remap.prev) {
2732 prep_vma_destroy(vm, op->remap.prev, prev_post_commit);
2733 xe_vma_destroy_unlocked(op->remap.prev);
2735 if (op->remap.next) {
2736 prep_vma_destroy(vm, op->remap.next, next_post_commit);
2737 xe_vma_destroy_unlocked(op->remap.next);
2740 down_read(&vm->userptr.notifier_lock);
2741 vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2742 up_read(&vm->userptr.notifier_lock);
2744 xe_vm_insert_vma(vm, vma);
2748 case DRM_GPUVA_OP_PREFETCH:
2752 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2756 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
2757 struct drm_gpuva_ops **ops,
2762 for (i = num_ops_list - 1; i; ++i) {
2763 struct drm_gpuva_ops *__ops = ops[i];
2764 struct drm_gpuva_op *__op;
2769 drm_gpuva_for_each_op_reverse(__op, __ops) {
2770 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2772 xe_vma_op_unwind(vm, op,
2773 op->flags & XE_VMA_OP_COMMITTED,
2774 op->flags & XE_VMA_OP_PREV_COMMITTED,
2775 op->flags & XE_VMA_OP_NEXT_COMMITTED);
2778 drm_gpuva_ops_free(&vm->gpuvm, __ops);
2782 static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
2783 struct list_head *ops_list)
2785 struct xe_vma_op *op, *next;
2788 lockdep_assert_held_write(&vm->lock);
2790 list_for_each_entry_safe(op, next, ops_list, link) {
2791 err = xe_vma_op_execute(vm, op);
2793 drm_warn(&vm->xe->drm, "VM op(%d) failed with %d",
2796 * FIXME: Killing VM rather than proper error handling
2801 xe_vma_op_cleanup(vm, op);
2807 #ifdef TEST_VM_ASYNC_OPS_ERROR
2808 #define SUPPORTED_FLAGS \
2809 (FORCE_ASYNC_OP_ERROR | DRM_XE_VM_BIND_FLAG_ASYNC | \
2810 DRM_XE_VM_BIND_FLAG_READONLY | DRM_XE_VM_BIND_FLAG_IMMEDIATE | \
2811 DRM_XE_VM_BIND_FLAG_NULL | 0xffff)
2813 #define SUPPORTED_FLAGS \
2814 (DRM_XE_VM_BIND_FLAG_ASYNC | DRM_XE_VM_BIND_FLAG_READONLY | \
2815 DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | \
2818 #define XE_64K_PAGE_MASK 0xffffull
2820 #define MAX_BINDS 512 /* FIXME: Picking random upper limit */
2822 static int vm_bind_ioctl_check_args(struct xe_device *xe,
2823 struct drm_xe_vm_bind *args,
2824 struct drm_xe_vm_bind_op **bind_ops,
2830 if (XE_IOCTL_DBG(xe, args->extensions) ||
2831 XE_IOCTL_DBG(xe, !args->num_binds) ||
2832 XE_IOCTL_DBG(xe, args->num_binds > MAX_BINDS))
2835 if (args->num_binds > 1) {
2836 u64 __user *bind_user =
2837 u64_to_user_ptr(args->vector_of_binds);
2839 *bind_ops = kmalloc(sizeof(struct drm_xe_vm_bind_op) *
2840 args->num_binds, GFP_KERNEL);
2844 err = __copy_from_user(*bind_ops, bind_user,
2845 sizeof(struct drm_xe_vm_bind_op) *
2847 if (XE_IOCTL_DBG(xe, err)) {
2852 *bind_ops = &args->bind;
2855 for (i = 0; i < args->num_binds; ++i) {
2856 u64 range = (*bind_ops)[i].range;
2857 u64 addr = (*bind_ops)[i].addr;
2858 u32 op = (*bind_ops)[i].op;
2859 u32 flags = (*bind_ops)[i].flags;
2860 u32 obj = (*bind_ops)[i].obj;
2861 u64 obj_offset = (*bind_ops)[i].obj_offset;
2862 u32 prefetch_region = (*bind_ops)[i].prefetch_mem_region_instance;
2863 bool is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
2866 *async = !!(flags & DRM_XE_VM_BIND_FLAG_ASYNC);
2867 if (XE_IOCTL_DBG(xe, !*async && args->num_syncs)) {
2871 } else if (XE_IOCTL_DBG(xe, *async !=
2872 !!(flags & DRM_XE_VM_BIND_FLAG_ASYNC))) {
2877 if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
2878 XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
2879 XE_IOCTL_DBG(xe, obj && is_null) ||
2880 XE_IOCTL_DBG(xe, obj_offset && is_null) ||
2881 XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP &&
2883 XE_IOCTL_DBG(xe, !obj &&
2884 op == DRM_XE_VM_BIND_OP_MAP &&
2886 XE_IOCTL_DBG(xe, !obj &&
2887 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2888 XE_IOCTL_DBG(xe, addr &&
2889 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2890 XE_IOCTL_DBG(xe, range &&
2891 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2892 XE_IOCTL_DBG(xe, obj &&
2893 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
2894 XE_IOCTL_DBG(xe, obj &&
2895 op == DRM_XE_VM_BIND_OP_PREFETCH) ||
2896 XE_IOCTL_DBG(xe, prefetch_region &&
2897 op != DRM_XE_VM_BIND_OP_PREFETCH) ||
2898 XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
2899 xe->info.mem_region_mask)) ||
2900 XE_IOCTL_DBG(xe, obj &&
2901 op == DRM_XE_VM_BIND_OP_UNMAP)) {
2906 if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
2907 XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
2908 XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
2909 XE_IOCTL_DBG(xe, !range &&
2910 op != DRM_XE_VM_BIND_OP_UNMAP_ALL)) {
2919 if (args->num_binds > 1)
2924 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2926 struct xe_device *xe = to_xe_device(dev);
2927 struct xe_file *xef = to_xe_file(file);
2928 struct drm_xe_vm_bind *args = data;
2929 struct drm_xe_sync __user *syncs_user;
2930 struct xe_bo **bos = NULL;
2931 struct drm_gpuva_ops **ops = NULL;
2933 struct xe_exec_queue *q = NULL;
2935 struct xe_sync_entry *syncs = NULL;
2936 struct drm_xe_vm_bind_op *bind_ops;
2937 LIST_HEAD(ops_list);
2942 err = vm_bind_ioctl_check_args(xe, args, &bind_ops, &async);
2946 if (args->exec_queue_id) {
2947 q = xe_exec_queue_lookup(xef, args->exec_queue_id);
2948 if (XE_IOCTL_DBG(xe, !q)) {
2953 if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
2955 goto put_exec_queue;
2958 if (XE_IOCTL_DBG(xe, async !=
2959 !!(q->flags & EXEC_QUEUE_FLAG_VM_ASYNC))) {
2961 goto put_exec_queue;
2965 vm = xe_vm_lookup(xef, args->vm_id);
2966 if (XE_IOCTL_DBG(xe, !vm)) {
2968 goto put_exec_queue;
2971 if (!args->exec_queue_id) {
2972 if (XE_IOCTL_DBG(xe, async !=
2973 !!(vm->flags & XE_VM_FLAG_ASYNC_DEFAULT))) {
2979 err = down_write_killable(&vm->lock);
2983 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
2985 goto release_vm_lock;
2988 for (i = 0; i < args->num_binds; ++i) {
2989 u64 range = bind_ops[i].range;
2990 u64 addr = bind_ops[i].addr;
2992 if (XE_IOCTL_DBG(xe, range > vm->size) ||
2993 XE_IOCTL_DBG(xe, addr > vm->size - range)) {
2995 goto release_vm_lock;
2998 if (bind_ops[i].tile_mask) {
2999 u64 valid_tiles = BIT(xe->info.tile_count) - 1;
3001 if (XE_IOCTL_DBG(xe, bind_ops[i].tile_mask &
3004 goto release_vm_lock;
3009 bos = kzalloc(sizeof(*bos) * args->num_binds, GFP_KERNEL);
3012 goto release_vm_lock;
3015 ops = kzalloc(sizeof(*ops) * args->num_binds, GFP_KERNEL);
3018 goto release_vm_lock;
3021 for (i = 0; i < args->num_binds; ++i) {
3022 struct drm_gem_object *gem_obj;
3023 u64 range = bind_ops[i].range;
3024 u64 addr = bind_ops[i].addr;
3025 u32 obj = bind_ops[i].obj;
3026 u64 obj_offset = bind_ops[i].obj_offset;
3031 gem_obj = drm_gem_object_lookup(file, obj);
3032 if (XE_IOCTL_DBG(xe, !gem_obj)) {
3036 bos[i] = gem_to_xe_bo(gem_obj);
3038 if (XE_IOCTL_DBG(xe, range > bos[i]->size) ||
3039 XE_IOCTL_DBG(xe, obj_offset >
3040 bos[i]->size - range)) {
3045 if (bos[i]->flags & XE_BO_INTERNAL_64K) {
3046 if (XE_IOCTL_DBG(xe, obj_offset &
3047 XE_64K_PAGE_MASK) ||
3048 XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
3049 XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
3056 if (args->num_syncs) {
3057 syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3064 syncs_user = u64_to_user_ptr(args->syncs);
3065 for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3066 err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3067 &syncs_user[num_syncs], false,
3068 xe_vm_no_dma_fences(vm));
3073 for (i = 0; i < args->num_binds; ++i) {
3074 u64 range = bind_ops[i].range;
3075 u64 addr = bind_ops[i].addr;
3076 u32 op = bind_ops[i].op;
3077 u32 flags = bind_ops[i].flags;
3078 u64 obj_offset = bind_ops[i].obj_offset;
3079 u8 tile_mask = bind_ops[i].tile_mask;
3080 u32 prefetch_region = bind_ops[i].prefetch_mem_region_instance;
3082 ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
3083 addr, range, op, flags,
3084 tile_mask, prefetch_region);
3085 if (IS_ERR(ops[i])) {
3086 err = PTR_ERR(ops[i]);
3091 err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
3093 i == args->num_binds - 1,
3100 if (list_empty(&ops_list)) {
3107 xe_exec_queue_get(q);
3109 err = vm_bind_ioctl_ops_execute(vm, &ops_list);
3111 up_write(&vm->lock);
3114 xe_exec_queue_put(q);
3117 for (i = 0; bos && i < args->num_binds; ++i)
3122 if (args->num_binds > 1)
3128 vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
3130 for (i = 0; err == -ENODATA && i < num_syncs; i++) {
3131 struct dma_fence *fence =
3132 xe_exec_queue_last_fence_get(to_wait_exec_queue(vm, q), vm);
3134 xe_sync_entry_signal(&syncs[i], NULL, fence);
3137 xe_sync_entry_cleanup(&syncs[num_syncs]);
3141 for (i = 0; i < args->num_binds; ++i)
3144 up_write(&vm->lock);
3149 xe_exec_queue_put(q);
3153 if (args->num_binds > 1)
3155 return err == -ENODATA ? 0 : err;
3159 * xe_vm_lock() - Lock the vm's dma_resv object
3160 * @vm: The struct xe_vm whose lock is to be locked
3161 * @intr: Whether to perform any wait interruptible
3163 * Return: 0 on success, -EINTR if @intr is true and the wait for a
3164 * contended lock was interrupted. If @intr is false, the function
3167 int xe_vm_lock(struct xe_vm *vm, bool intr)
3170 return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
3172 return dma_resv_lock(xe_vm_resv(vm), NULL);
3176 * xe_vm_unlock() - Unlock the vm's dma_resv object
3177 * @vm: The struct xe_vm whose lock is to be released.
3179 * Unlock a buffer object lock that was locked by xe_vm_lock().
3181 void xe_vm_unlock(struct xe_vm *vm)
3183 dma_resv_unlock(xe_vm_resv(vm));
3187 * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3188 * @vma: VMA to invalidate
3190 * Walks a list of page tables leaves which it memset the entries owned by this
3191 * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3194 * Returns 0 for success, negative error code otherwise.
3196 int xe_vm_invalidate_vma(struct xe_vma *vma)
3198 struct xe_device *xe = xe_vma_vm(vma)->xe;
3199 struct xe_tile *tile;
3200 u32 tile_needs_invalidate = 0;
3201 int seqno[XE_MAX_TILES_PER_DEVICE];
3205 xe_assert(xe, xe_vm_in_fault_mode(xe_vma_vm(vma)));
3206 xe_assert(xe, !xe_vma_is_null(vma));
3207 trace_xe_vma_usm_invalidate(vma);
3209 /* Check that we don't race with page-table updates */
3210 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3211 if (xe_vma_is_userptr(vma)) {
3212 WARN_ON_ONCE(!mmu_interval_check_retry
3213 (&vma->userptr.notifier,
3214 vma->userptr.notifier_seq));
3215 WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
3216 DMA_RESV_USAGE_BOOKKEEP));
3219 xe_bo_assert_held(xe_vma_bo(vma));
3223 for_each_tile(tile, xe, id) {
3224 if (xe_pt_zap_ptes(tile, vma)) {
3225 tile_needs_invalidate |= BIT(id);
3228 * FIXME: We potentially need to invalidate multiple
3229 * GTs within the tile
3231 seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
3237 for_each_tile(tile, xe, id) {
3238 if (tile_needs_invalidate & BIT(id)) {
3239 ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
3245 vma->usm.tile_invalidated = vma->tile_mask;
3250 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
3252 struct drm_gpuva *gpuva;
3256 if (!down_read_trylock(&vm->lock)) {
3257 drm_printf(p, " Failed to acquire VM lock to dump capture");
3260 if (vm->pt_root[gt_id]) {
3261 addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE);
3262 is_vram = xe_bo_is_vram(vm->pt_root[gt_id]->bo);
3263 drm_printf(p, " VM root: A:0x%llx %s\n", addr,
3264 is_vram ? "VRAM" : "SYS");
3267 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3268 struct xe_vma *vma = gpuva_to_vma(gpuva);
3269 bool is_userptr = xe_vma_is_userptr(vma);
3270 bool is_null = xe_vma_is_null(vma);
3274 } else if (is_userptr) {
3275 struct xe_res_cursor cur;
3277 if (vma->userptr.sg) {
3278 xe_res_first_sg(vma->userptr.sg, 0, XE_PAGE_SIZE,
3280 addr = xe_res_dma(&cur);
3285 addr = __xe_bo_addr(xe_vma_bo(vma), 0, XE_PAGE_SIZE);
3286 is_vram = xe_bo_is_vram(xe_vma_bo(vma));
3288 drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
3289 xe_vma_start(vma), xe_vma_end(vma) - 1,
3291 addr, is_null ? "NULL" : is_userptr ? "USR" :
3292 is_vram ? "VRAM" : "SYS");