#include <linux/dma-fence-array.h>
+#include <drm/drm_exec.h>
+#include <drm/drm_print.h>
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/xe_drm.h>
+#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/mm.h>
#include <linux/swap.h>
+#include "xe_assert.h"
#include "xe_bo.h"
#include "xe_device.h"
-#include "xe_engine.h"
+#include "xe_drm_client.h"
+#include "xe_exec_queue.h"
#include "xe_gt.h"
#include "xe_gt_pagefault.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_res_cursor.h"
#include "xe_sync.h"
#include "xe_trace.h"
+#include "generated/xe_wa_oob.h"
+#include "xe_wa.h"
#define TEST_VM_ASYNC_OPS_ERROR
+static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
+{
+ return vm->gpuvm.r_obj;
+}
+
/**
* xe_vma_userptr_check_repin() - Advisory check for repin needed
* @vma: The userptr vma
int xe_vma_userptr_pin_pages(struct xe_vma *vma)
{
- struct xe_vm *vm = vma->vm;
+ struct xe_vm *vm = xe_vma_vm(vma);
struct xe_device *xe = vm->xe;
- const unsigned long num_pages =
- (vma->end - vma->start + 1) >> PAGE_SHIFT;
+ const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT;
struct page **pages;
bool in_kthread = !current->mm;
unsigned long notifier_seq;
int pinned, ret, i;
- bool read_only = vma->pte_flags & PTE_READ_ONLY;
+ bool read_only = xe_vma_read_only(vma);
lockdep_assert_held(&vm->lock);
- XE_BUG_ON(!xe_vma_is_userptr(vma));
+ xe_assert(xe, xe_vma_is_userptr(vma));
retry:
- if (vma->destroyed)
+ if (vma->gpuva.flags & XE_VMA_DESTROYED)
return 0;
notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier);
}
while (pinned < num_pages) {
- ret = get_user_pages_fast(vma->userptr.ptr + pinned * PAGE_SIZE,
+ ret = get_user_pages_fast(xe_vma_userptr(vma) +
+ pinned * PAGE_SIZE,
num_pages - pinned,
read_only ? 0 : FOLL_WRITE,
&pages[pinned]);
if (ret)
goto out;
- ret = sg_alloc_table_from_pages(&vma->userptr.sgt, pages, pinned,
- 0, (u64)pinned << PAGE_SHIFT,
- GFP_KERNEL);
+ ret = sg_alloc_table_from_pages_segment(&vma->userptr.sgt, pages,
+ pinned, 0,
+ (u64)pinned << PAGE_SHIFT,
+ xe_sg_segment_size(xe->drm.dev),
+ GFP_KERNEL);
if (ret) {
vma->userptr.sg = NULL;
goto out;
static bool preempt_fences_waiting(struct xe_vm *vm)
{
- struct xe_engine *e;
+ struct xe_exec_queue *q;
lockdep_assert_held(&vm->lock);
xe_vm_assert_held(vm);
- list_for_each_entry(e, &vm->preempt.engines, compute.link) {
- if (!e->compute.pfence || (e->compute.pfence &&
- test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &e->compute.pfence->flags))) {
+ list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
+ if (!q->compute.pfence ||
+ (q->compute.pfence && test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &q->compute.pfence->flags))) {
return true;
}
}
lockdep_assert_held(&vm->lock);
xe_vm_assert_held(vm);
- if (*count >= vm->preempt.num_engines)
+ if (*count >= vm->preempt.num_exec_queues)
return 0;
- for (; *count < vm->preempt.num_engines; ++(*count)) {
+ for (; *count < vm->preempt.num_exec_queues; ++(*count)) {
struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
if (IS_ERR(pfence))
static int wait_for_existing_preempt_fences(struct xe_vm *vm)
{
- struct xe_engine *e;
+ struct xe_exec_queue *q;
xe_vm_assert_held(vm);
- list_for_each_entry(e, &vm->preempt.engines, compute.link) {
- if (e->compute.pfence) {
- long timeout = dma_fence_wait(e->compute.pfence, false);
+ list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
+ if (q->compute.pfence) {
+ long timeout = dma_fence_wait(q->compute.pfence, false);
if (timeout < 0)
return -ETIME;
- dma_fence_put(e->compute.pfence);
- e->compute.pfence = NULL;
+ dma_fence_put(q->compute.pfence);
+ q->compute.pfence = NULL;
}
}
static bool xe_vm_is_idle(struct xe_vm *vm)
{
- struct xe_engine *e;
+ struct xe_exec_queue *q;
xe_vm_assert_held(vm);
- list_for_each_entry(e, &vm->preempt.engines, compute.link) {
- if (!xe_engine_is_idle(e))
+ list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
+ if (!xe_exec_queue_is_idle(q))
return false;
}
static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
{
struct list_head *link;
- struct xe_engine *e;
+ struct xe_exec_queue *q;
- list_for_each_entry(e, &vm->preempt.engines, compute.link) {
+ list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
struct dma_fence *fence;
link = list->next;
- XE_BUG_ON(link == list);
+ xe_assert(vm->xe, link != list);
fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
- e, e->compute.context,
- ++e->compute.seqno);
- dma_fence_put(e->compute.pfence);
- e->compute.pfence = fence;
+ q, q->compute.context,
+ ++q->compute.seqno);
+ dma_fence_put(q->compute.pfence);
+ q->compute.pfence = fence;
}
}
static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
{
- struct xe_engine *e;
- struct ww_acquire_ctx ww;
+ struct xe_exec_queue *q;
int err;
- err = xe_bo_lock(bo, &ww, vm->preempt.num_engines, true);
+ err = xe_bo_lock(bo, true);
if (err)
return err;
- list_for_each_entry(e, &vm->preempt.engines, compute.link)
- if (e->compute.pfence) {
+ err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
+ if (err)
+ goto out_unlock;
+
+ list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
+ if (q->compute.pfence) {
dma_resv_add_fence(bo->ttm.base.resv,
- e->compute.pfence,
+ q->compute.pfence,
DMA_RESV_USAGE_BOOKKEEP);
}
- xe_bo_unlock(bo, &ww);
- return 0;
+out_unlock:
+ xe_bo_unlock(bo);
+ return err;
}
/**
struct xe_vma *vma;
list_for_each_entry(vma, &vm->extobj.list, extobj.link)
- dma_resv_add_fence(vma->bo->ttm.base.resv, fence, usage);
+ dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence, usage);
}
static void resume_and_reinstall_preempt_fences(struct xe_vm *vm)
{
- struct xe_engine *e;
+ struct xe_exec_queue *q;
lockdep_assert_held(&vm->lock);
xe_vm_assert_held(vm);
- list_for_each_entry(e, &vm->preempt.engines, compute.link) {
- e->ops->resume(e);
+ list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
+ q->ops->resume(q);
- dma_resv_add_fence(&vm->resv, e->compute.pfence,
+ dma_resv_add_fence(xe_vm_resv(vm), q->compute.pfence,
DMA_RESV_USAGE_BOOKKEEP);
- xe_vm_fence_all_extobjs(vm, e->compute.pfence,
+ xe_vm_fence_all_extobjs(vm, q->compute.pfence,
DMA_RESV_USAGE_BOOKKEEP);
}
}
-int xe_vm_add_compute_engine(struct xe_vm *vm, struct xe_engine *e)
+int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
{
- struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
- struct ttm_validate_buffer *tv;
- struct ww_acquire_ctx ww;
- struct list_head objs;
+ struct drm_exec exec;
struct dma_fence *pfence;
int err;
bool wait;
- XE_BUG_ON(!xe_vm_in_compute_mode(vm));
+ xe_assert(vm->xe, xe_vm_in_compute_mode(vm));
down_write(&vm->lock);
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_until_all_locked(&exec) {
+ err = xe_vm_lock_dma_resv(vm, &exec, 1, true);
+ drm_exec_retry_on_contention(&exec);
+ if (err)
+ goto out_unlock;
+ }
- err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs, true, 1);
- if (err)
- goto out_unlock_outer;
-
- pfence = xe_preempt_fence_create(e, e->compute.context,
- ++e->compute.seqno);
+ pfence = xe_preempt_fence_create(q, q->compute.context,
+ ++q->compute.seqno);
if (!pfence) {
err = -ENOMEM;
goto out_unlock;
}
- list_add(&e->compute.link, &vm->preempt.engines);
- ++vm->preempt.num_engines;
- e->compute.pfence = pfence;
+ list_add(&q->compute.link, &vm->preempt.exec_queues);
+ ++vm->preempt.num_exec_queues;
+ q->compute.pfence = pfence;
down_read(&vm->userptr.notifier_lock);
- dma_resv_add_fence(&vm->resv, pfence,
+ dma_resv_add_fence(xe_vm_resv(vm), pfence,
DMA_RESV_USAGE_BOOKKEEP);
xe_vm_fence_all_extobjs(vm, pfence, DMA_RESV_USAGE_BOOKKEEP);
up_read(&vm->userptr.notifier_lock);
out_unlock:
- xe_vm_unlock_dma_resv(vm, tv_onstack, tv, &ww, &objs);
-out_unlock_outer:
+ drm_exec_fini(&exec);
up_write(&vm->lock);
return err;
}
+/**
+ * xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
+ * @vm: The VM.
+ * @q: The exec_queue
+ */
+void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
+{
+ if (!xe_vm_in_compute_mode(vm))
+ return;
+
+ down_write(&vm->lock);
+ list_del(&q->compute.link);
+ --vm->preempt.num_exec_queues;
+ if (q->compute.pfence) {
+ dma_fence_enable_sw_signaling(q->compute.pfence);
+ dma_fence_put(q->compute.pfence);
+ q->compute.pfence = NULL;
+ }
+ up_write(&vm->lock);
+}
+
/**
* __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
* that need repinning.
* xe_vm_lock_dma_resv() - Lock the vm dma_resv object and the dma_resv
* objects of the vm's external buffer objects.
* @vm: The vm.
- * @ww: Pointer to a struct ww_acquire_ctx locking context.
- * @tv_onstack: Array size XE_ONSTACK_TV of storage for the struct
- * ttm_validate_buffers used for locking.
- * @tv: Pointer to a pointer that on output contains the actual storage used.
- * @objs: List head for the buffer objects locked.
- * @intr: Whether to lock interruptible.
+ * @exec: Pointer to a struct drm_exec locking context.
* @num_shared: Number of dma-fence slots to reserve in the locked objects.
+ * @lock_vm: Lock also the vm's dma_resv.
*
* Locks the vm dma-resv objects and all the dma-resv objects of the
- * buffer objects on the vm external object list. The TTM utilities require
- * a list of struct ttm_validate_buffers pointing to the actual buffer
- * objects to lock. Storage for those struct ttm_validate_buffers should
- * be provided in @tv_onstack, and is typically reserved on the stack
- * of the caller. If the size of @tv_onstack isn't sufficient, then
- * storage will be allocated internally using kvmalloc().
- *
- * The function performs deadlock handling internally, and after a
- * successful return the ww locking transaction should be considered
- * sealed.
+ * buffer objects on the vm external object list.
*
* Return: 0 on success, Negative error code on error. In particular if
- * @intr is set to true, -EINTR or -ERESTARTSYS may be returned. In case
- * of error, any locking performed has been reverted.
+ * @intr is set to true, -EINTR or -ERESTARTSYS may be returned.
*/
-int xe_vm_lock_dma_resv(struct xe_vm *vm, struct ww_acquire_ctx *ww,
- struct ttm_validate_buffer *tv_onstack,
- struct ttm_validate_buffer **tv,
- struct list_head *objs,
- bool intr,
- unsigned int num_shared)
-{
- struct ttm_validate_buffer *tv_vm, *tv_bo;
+int xe_vm_lock_dma_resv(struct xe_vm *vm, struct drm_exec *exec,
+ unsigned int num_shared, bool lock_vm)
+{
struct xe_vma *vma, *next;
- LIST_HEAD(dups);
- int err;
+ int err = 0;
lockdep_assert_held(&vm->lock);
- if (vm->extobj.entries < XE_ONSTACK_TV) {
- tv_vm = tv_onstack;
- } else {
- tv_vm = kvmalloc_array(vm->extobj.entries + 1, sizeof(*tv_vm),
- GFP_KERNEL);
- if (!tv_vm)
- return -ENOMEM;
+ if (lock_vm) {
+ err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
+ if (err)
+ return err;
}
- tv_bo = tv_vm + 1;
- INIT_LIST_HEAD(objs);
list_for_each_entry(vma, &vm->extobj.list, extobj.link) {
- tv_bo->num_shared = num_shared;
- tv_bo->bo = &vma->bo->ttm;
-
- list_add_tail(&tv_bo->head, objs);
- tv_bo++;
+ err = drm_exec_prepare_obj(exec, &xe_vma_bo(vma)->ttm.base, num_shared);
+ if (err)
+ return err;
}
- tv_vm->num_shared = num_shared;
- tv_vm->bo = xe_vm_ttm_bo(vm);
- list_add_tail(&tv_vm->head, objs);
- err = ttm_eu_reserve_buffers(ww, objs, intr, &dups);
- if (err)
- goto out_err;
spin_lock(&vm->notifier.list_lock);
list_for_each_entry_safe(vma, next, &vm->notifier.rebind_list,
notifier.rebind_link) {
- xe_bo_assert_held(vma->bo);
+ xe_bo_assert_held(xe_vma_bo(vma));
list_del_init(&vma->notifier.rebind_link);
- if (vma->gt_present && !vma->destroyed)
- list_move_tail(&vma->rebind_link, &vm->rebind_list);
+ if (vma->tile_present && !(vma->gpuva.flags & XE_VMA_DESTROYED))
+ list_move_tail(&vma->combined_links.rebind,
+ &vm->rebind_list);
}
spin_unlock(&vm->notifier.list_lock);
- *tv = tv_vm;
return 0;
+}
-out_err:
- if (tv_vm != tv_onstack)
- kvfree(tv_vm);
+#define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
- return err;
+static void xe_vm_kill(struct xe_vm *vm)
+{
+ struct xe_exec_queue *q;
+
+ lockdep_assert_held(&vm->lock);
+
+ xe_vm_lock(vm, false);
+ vm->flags |= XE_VM_FLAG_BANNED;
+ trace_xe_vm_kill(vm);
+
+ list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
+ q->ops->kill(q);
+ xe_vm_unlock(vm);
+
+ /* TODO: Inform user the VM is banned */
}
/**
- * xe_vm_unlock_dma_resv() - Unlock reservation objects locked by
- * xe_vm_lock_dma_resv()
- * @vm: The vm.
- * @tv_onstack: The @tv_onstack array given to xe_vm_lock_dma_resv().
- * @tv: The value of *@tv given by xe_vm_lock_dma_resv().
- * @ww: The ww_acquire_context used for locking.
- * @objs: The list returned from xe_vm_lock_dma_resv().
+ * xe_vm_validate_should_retry() - Whether to retry after a validate error.
+ * @exec: The drm_exec object used for locking before validation.
+ * @err: The error returned from ttm_bo_validate().
+ * @end: A ktime_t cookie that should be set to 0 before first use and
+ * that should be reused on subsequent calls.
*
- * Unlocks the reservation objects and frees any memory allocated by
- * xe_vm_lock_dma_resv().
+ * With multiple active VMs, under memory pressure, it is possible that
+ * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
+ * Until ttm properly handles locking in such scenarios, best thing the
+ * driver can do is retry with a timeout. Check if that is necessary, and
+ * if so unlock the drm_exec's objects while keeping the ticket to prepare
+ * for a rerun.
+ *
+ * Return: true if a retry after drm_exec_init() is recommended;
+ * false otherwise.
*/
-void xe_vm_unlock_dma_resv(struct xe_vm *vm,
- struct ttm_validate_buffer *tv_onstack,
- struct ttm_validate_buffer *tv,
- struct ww_acquire_ctx *ww,
- struct list_head *objs)
+bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
+{
+ ktime_t cur;
+
+ if (err != -ENOMEM)
+ return false;
+
+ cur = ktime_get();
+ *end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
+ if (!ktime_before(cur, *end))
+ return false;
+
+ /*
+ * We would like to keep the ticket here with
+ * drm_exec_unlock_all(), but WW mutex asserts currently
+ * stop us from that. In any case this function could go away
+ * with proper TTM -EDEADLK handling.
+ */
+ drm_exec_fini(exec);
+
+ msleep(20);
+ return true;
+}
+
+static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
+ bool *done)
{
+ struct xe_vma *vma;
+ int err;
+
/*
- * Nothing should've been able to enter the list while we were locked,
- * since we've held the dma-resvs of all the vm's external objects,
- * and holding the dma_resv of an object is required for list
- * addition, and we shouldn't add ourselves.
+ * 1 fence for each preempt fence plus a fence for each tile from a
+ * possible rebind
*/
- XE_WARN_ON(!list_empty(&vm->notifier.rebind_list));
+ err = drm_exec_prepare_obj(exec, xe_vm_obj(vm),
+ vm->preempt.num_exec_queues +
+ vm->xe->info.tile_count);
+ if (err)
+ return err;
+
+ if (xe_vm_is_idle(vm)) {
+ vm->preempt.rebind_deactivated = true;
+ *done = true;
+ return 0;
+ }
+
+ if (!preempt_fences_waiting(vm)) {
+ *done = true;
+ return 0;
+ }
+
+ err = xe_vm_lock_dma_resv(vm, exec, vm->preempt.num_exec_queues, false);
+ if (err)
+ return err;
+
+ err = wait_for_existing_preempt_fences(vm);
+ if (err)
+ return err;
+
+ list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
+ if (xe_vma_has_no_bo(vma) ||
+ vma->gpuva.flags & XE_VMA_DESTROYED)
+ continue;
+
+ err = xe_bo_validate(xe_vma_bo(vma), vm, false);
+ if (err)
+ break;
+ }
- ttm_eu_backoff_reservation(ww, objs);
- if (tv && tv != tv_onstack)
- kvfree(tv);
+ return err;
}
static void preempt_rebind_work_func(struct work_struct *w)
{
struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
- struct xe_vma *vma;
- struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
- struct ttm_validate_buffer *tv;
- struct ww_acquire_ctx ww;
- struct list_head objs;
+ struct drm_exec exec;
struct dma_fence *rebind_fence;
unsigned int fence_count = 0;
LIST_HEAD(preempt_fences);
- int err;
+ ktime_t end = 0;
+ int err = 0;
long wait;
int __maybe_unused tries = 0;
- XE_BUG_ON(!xe_vm_in_compute_mode(vm));
+ xe_assert(vm->xe, xe_vm_in_compute_mode(vm));
trace_xe_vm_rebind_worker_enter(vm);
- if (xe_vm_is_closed(vm)) {
- trace_xe_vm_rebind_worker_exit(vm);
- return;
- }
-
down_write(&vm->lock);
-retry:
- if (vm->async_ops.error)
- goto out_unlock_outer;
-
- /*
- * Extreme corner where we exit a VM error state with a munmap style VM
- * unbind inflight which requires a rebind. In this case the rebind
- * needs to install some fences into the dma-resv slots. The worker to
- * do this queued, let that worker make progress by dropping vm->lock
- * and trying this again.
- */
- if (vm->async_ops.munmap_rebind_inflight) {
+ if (xe_vm_is_closed_or_banned(vm)) {
up_write(&vm->lock);
- flush_work(&vm->async_ops.work);
- goto retry;
+ trace_xe_vm_rebind_worker_exit(vm);
+ return;
}
+retry:
if (xe_vm_userptr_check_repin(vm)) {
err = xe_vm_userptr_pin(vm);
if (err)
goto out_unlock_outer;
}
- err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs,
- false, vm->preempt.num_engines);
- if (err)
- goto out_unlock_outer;
-
- if (xe_vm_is_idle(vm)) {
- vm->preempt.rebind_deactivated = true;
- goto out_unlock;
- }
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
- /* Fresh preempt fences already installed. Everyting is running. */
- if (!preempt_fences_waiting(vm))
- goto out_unlock;
+ drm_exec_until_all_locked(&exec) {
+ bool done = false;
- /*
- * This makes sure vm is completely suspended and also balances
- * xe_engine suspend- and resume; we resume *all* vm engines below.
- */
- err = wait_for_existing_preempt_fences(vm);
- if (err)
- goto out_unlock;
+ err = xe_preempt_work_begin(&exec, vm, &done);
+ drm_exec_retry_on_contention(&exec);
+ if (err && xe_vm_validate_should_retry(&exec, err, &end)) {
+ err = -EAGAIN;
+ goto out_unlock_outer;
+ }
+ if (err || done)
+ goto out_unlock;
+ }
err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
if (err)
goto out_unlock;
- list_for_each_entry(vma, &vm->rebind_list, rebind_link) {
- if (xe_vma_is_userptr(vma) || vma->destroyed)
- continue;
-
- err = xe_bo_validate(vma->bo, vm, false);
- if (err)
- goto out_unlock;
- }
-
rebind_fence = xe_vm_rebind(vm, true);
if (IS_ERR(rebind_fence)) {
err = PTR_ERR(rebind_fence);
}
/* Wait on munmap style VM unbinds */
- wait = dma_resv_wait_timeout(&vm->resv,
+ wait = dma_resv_wait_timeout(xe_vm_resv(vm),
DMA_RESV_USAGE_KERNEL,
false, MAX_SCHEDULE_TIMEOUT);
if (wait <= 0) {
#undef retry_required
+ spin_lock(&vm->xe->ttm.lru_lock);
+ ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
+ spin_unlock(&vm->xe->ttm.lru_lock);
+
/* Point of no return. */
arm_preempt_fences(vm, &preempt_fences);
resume_and_reinstall_preempt_fences(vm);
up_read(&vm->userptr.notifier_lock);
out_unlock:
- xe_vm_unlock_dma_resv(vm, tv_onstack, tv, &ww, &objs);
+ drm_exec_fini(&exec);
out_unlock_outer:
if (err == -EAGAIN) {
trace_xe_vm_rebind_worker_retry(vm);
goto retry;
}
+
+ if (err) {
+ drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
+ xe_vm_kill(vm);
+ }
up_write(&vm->lock);
free_preempt_fences(&preempt_fences);
- XE_WARN_ON(err < 0); /* TODO: Kill VM or put in error state */
trace_xe_vm_rebind_worker_exit(vm);
}
-struct async_op_fence;
-static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_engine *e, struct xe_sync_entry *syncs,
- u32 num_syncs, struct async_op_fence *afence);
-
static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range,
unsigned long cur_seq)
{
struct xe_vma *vma = container_of(mni, struct xe_vma, userptr.notifier);
- struct xe_vm *vm = vma->vm;
+ struct xe_vm *vm = xe_vma_vm(vma);
struct dma_resv_iter cursor;
struct dma_fence *fence;
long err;
- XE_BUG_ON(!xe_vma_is_userptr(vma));
+ xe_assert(vm->xe, xe_vma_is_userptr(vma));
trace_xe_vma_userptr_invalidate(vma);
if (!mmu_notifier_range_blockable(range))
* Tell exec and rebind worker they need to repin and rebind this
* userptr.
*/
- if (!xe_vm_in_fault_mode(vm) && !vma->destroyed && vma->gt_present) {
+ if (!xe_vm_in_fault_mode(vm) &&
+ !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
spin_lock(&vm->userptr.invalidated_lock);
list_move_tail(&vma->userptr.invalidate_link,
&vm->userptr.invalidated);
* unbinds to complete, and those are attached as BOOKMARK fences
* to the vm.
*/
- dma_resv_iter_begin(&cursor, &vm->resv,
+ dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
DMA_RESV_USAGE_BOOKKEEP);
dma_resv_for_each_fence_unlocked(&cursor, fence)
dma_fence_enable_sw_signaling(fence);
dma_resv_iter_end(&cursor);
- err = dma_resv_wait_timeout(&vm->resv,
+ err = dma_resv_wait_timeout(xe_vm_resv(vm),
DMA_RESV_USAGE_BOOKKEEP,
false, MAX_SCHEDULE_TIMEOUT);
XE_WARN_ON(err <= 0);
list_for_each_entry_safe(vma, next, &vm->userptr.invalidated,
userptr.invalidate_link) {
list_del_init(&vma->userptr.invalidate_link);
- list_move_tail(&vma->userptr_link, &vm->userptr.repin_list);
+ if (list_empty(&vma->combined_links.userptr))
+ list_move_tail(&vma->combined_links.userptr,
+ &vm->userptr.repin_list);
}
spin_unlock(&vm->userptr.invalidated_lock);
/* Pin and move to temporary list */
- list_for_each_entry_safe(vma, next, &vm->userptr.repin_list, userptr_link) {
+ list_for_each_entry_safe(vma, next, &vm->userptr.repin_list,
+ combined_links.userptr) {
err = xe_vma_userptr_pin_pages(vma);
if (err < 0)
goto out_err;
- list_move_tail(&vma->userptr_link, &tmp_evict);
+ list_move_tail(&vma->combined_links.userptr, &tmp_evict);
}
/* Take lock and move to rebind_list for rebinding. */
- err = dma_resv_lock_interruptible(&vm->resv, NULL);
+ err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
if (err)
goto out_err;
- list_for_each_entry_safe(vma, next, &tmp_evict, userptr_link) {
- list_del_init(&vma->userptr_link);
- list_move_tail(&vma->rebind_link, &vm->rebind_list);
- }
+ list_for_each_entry_safe(vma, next, &tmp_evict, combined_links.userptr)
+ list_move_tail(&vma->combined_links.rebind, &vm->rebind_list);
- dma_resv_unlock(&vm->resv);
+ dma_resv_unlock(xe_vm_resv(vm));
return 0;
}
static struct dma_fence *
-xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
- struct xe_sync_entry *syncs, u32 num_syncs);
+xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
+ struct xe_sync_entry *syncs, u32 num_syncs,
+ bool first_op, bool last_op);
struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
{
return NULL;
xe_vm_assert_held(vm);
- list_for_each_entry_safe(vma, next, &vm->rebind_list, rebind_link) {
- XE_WARN_ON(!vma->gt_present);
+ list_for_each_entry_safe(vma, next, &vm->rebind_list,
+ combined_links.rebind) {
+ xe_assert(vm->xe, vma->tile_present);
- list_del_init(&vma->rebind_link);
+ list_del_init(&vma->combined_links.rebind);
dma_fence_put(fence);
if (rebind_worker)
trace_xe_vma_rebind_worker(vma);
else
trace_xe_vma_rebind_exec(vma);
- fence = xe_vm_bind_vma(vma, NULL, NULL, 0);
+ fence = xe_vm_bind_vma(vma, NULL, NULL, 0, false, false);
if (IS_ERR(fence))
return fence;
}
u64 bo_offset_or_userptr,
u64 start, u64 end,
bool read_only,
- u64 gt_mask)
+ bool is_null,
+ u8 tile_mask)
{
struct xe_vma *vma;
- struct xe_gt *gt;
+ struct xe_tile *tile;
u8 id;
- XE_BUG_ON(start >= end);
- XE_BUG_ON(end >= vm->size);
+ xe_assert(vm->xe, start < end);
+ xe_assert(vm->xe, end < vm->size);
- vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+ if (!bo && !is_null) /* userptr */
+ vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+ else
+ vma = kzalloc(sizeof(*vma) - sizeof(struct xe_userptr),
+ GFP_KERNEL);
if (!vma) {
vma = ERR_PTR(-ENOMEM);
return vma;
}
- INIT_LIST_HEAD(&vma->rebind_link);
- INIT_LIST_HEAD(&vma->unbind_link);
- INIT_LIST_HEAD(&vma->userptr_link);
- INIT_LIST_HEAD(&vma->userptr.invalidate_link);
+ INIT_LIST_HEAD(&vma->combined_links.rebind);
INIT_LIST_HEAD(&vma->notifier.rebind_link);
INIT_LIST_HEAD(&vma->extobj.link);
- vma->vm = vm;
- vma->start = start;
- vma->end = end;
+ INIT_LIST_HEAD(&vma->gpuva.gem.entry);
+ vma->gpuva.vm = &vm->gpuvm;
+ vma->gpuva.va.addr = start;
+ vma->gpuva.va.range = end - start + 1;
if (read_only)
- vma->pte_flags = PTE_READ_ONLY;
+ vma->gpuva.flags |= XE_VMA_READ_ONLY;
+ if (is_null)
+ vma->gpuva.flags |= DRM_GPUVA_SPARSE;
- if (gt_mask) {
- vma->gt_mask = gt_mask;
+ if (tile_mask) {
+ vma->tile_mask = tile_mask;
} else {
- for_each_gt(gt, vm->xe, id)
- if (!xe_gt_is_media_type(gt))
- vma->gt_mask |= 0x1 << id;
+ for_each_tile(tile, vm->xe, id)
+ vma->tile_mask |= 0x1 << id;
}
- if (vm->xe->info.platform == XE_PVC)
- vma->use_atomic_access_pte_bit = true;
+ if (GRAPHICS_VER(vm->xe) >= 20 || vm->xe->info.platform == XE_PVC)
+ vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
if (bo) {
+ struct drm_gpuvm_bo *vm_bo;
+
xe_bo_assert_held(bo);
- vma->bo_offset = bo_offset_or_userptr;
- vma->bo = xe_bo_get(bo);
- list_add_tail(&vma->bo_link, &bo->vmas);
- } else /* userptr */ {
- u64 size = end - start + 1;
- int err;
-
- vma->userptr.ptr = bo_offset_or_userptr;
-
- err = mmu_interval_notifier_insert(&vma->userptr.notifier,
- current->mm,
- vma->userptr.ptr, size,
- &vma_userptr_notifier_ops);
- if (err) {
+
+ vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
+ if (IS_ERR(vm_bo)) {
kfree(vma);
- vma = ERR_PTR(err);
- return vma;
+ return ERR_CAST(vm_bo);
+ }
+
+ drm_gem_object_get(&bo->ttm.base);
+ vma->gpuva.gem.obj = &bo->ttm.base;
+ vma->gpuva.gem.offset = bo_offset_or_userptr;
+ drm_gpuva_link(&vma->gpuva, vm_bo);
+ drm_gpuvm_bo_put(vm_bo);
+ } else /* userptr or null */ {
+ if (!is_null) {
+ u64 size = end - start + 1;
+ int err;
+
+ INIT_LIST_HEAD(&vma->userptr.invalidate_link);
+ vma->gpuva.gem.offset = bo_offset_or_userptr;
+
+ err = mmu_interval_notifier_insert(&vma->userptr.notifier,
+ current->mm,
+ xe_vma_userptr(vma), size,
+ &vma_userptr_notifier_ops);
+ if (err) {
+ kfree(vma);
+ vma = ERR_PTR(err);
+ return vma;
+ }
+
+ vma->userptr.notifier_seq = LONG_MAX;
}
- vma->userptr.notifier_seq = LONG_MAX;
xe_vm_get(vm);
}
static bool vm_remove_extobj(struct xe_vma *vma)
{
if (!list_empty(&vma->extobj.link)) {
- vma->vm->extobj.entries--;
+ xe_vma_vm(vma)->extobj.entries--;
list_del_init(&vma->extobj.link);
return true;
}
static void xe_vma_destroy_late(struct xe_vma *vma)
{
- struct xe_vm *vm = vma->vm;
+ struct xe_vm *vm = xe_vma_vm(vma);
struct xe_device *xe = vm->xe;
- bool read_only = vma->pte_flags & PTE_READ_ONLY;
+ bool read_only = xe_vma_read_only(vma);
if (xe_vma_is_userptr(vma)) {
if (vma->userptr.sg) {
*/
mmu_interval_notifier_remove(&vma->userptr.notifier);
xe_vm_put(vm);
+ } else if (xe_vma_is_null(vma)) {
+ xe_vm_put(vm);
} else {
- xe_bo_put(vma->bo);
+ xe_bo_put(xe_vma_bo(vma));
}
kfree(vma);
bo_has_vm_references_locked(struct xe_bo *bo, struct xe_vm *vm,
struct xe_vma *ignore)
{
- struct xe_vma *vma;
+ struct drm_gpuvm_bo *vm_bo;
+ struct drm_gpuva *va;
+ struct drm_gem_object *obj = &bo->ttm.base;
+
+ xe_bo_assert_held(bo);
- list_for_each_entry(vma, &bo->vmas, bo_link) {
- if (vma != ignore && vma->vm == vm && !vma->destroyed)
- return vma;
+ drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
+ drm_gpuvm_bo_for_each_va(va, vm_bo) {
+ struct xe_vma *vma = gpuva_to_vma(va);
+
+ if (vma != ignore && xe_vma_vm(vma) == vm)
+ return vma;
+ }
}
return NULL;
static bool bo_has_vm_references(struct xe_bo *bo, struct xe_vm *vm,
struct xe_vma *ignore)
{
- struct ww_acquire_ctx ww;
bool ret;
- xe_bo_lock(bo, &ww, 0, false);
+ xe_bo_lock(bo, false);
ret = !!bo_has_vm_references_locked(bo, vm, ignore);
- xe_bo_unlock(bo, &ww);
+ xe_bo_unlock(bo);
return ret;
}
static void __vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
{
+ lockdep_assert_held_write(&vm->lock);
+
list_add(&vma->extobj.link, &vm->extobj.list);
vm->extobj.entries++;
}
static void vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
{
- struct xe_bo *bo = vma->bo;
+ struct xe_bo *bo = xe_vma_bo(vma);
lockdep_assert_held_write(&vm->lock);
static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
{
- struct xe_vm *vm = vma->vm;
+ struct xe_vm *vm = xe_vma_vm(vma);
lockdep_assert_held_write(&vm->lock);
- XE_BUG_ON(!list_empty(&vma->unbind_link));
+ xe_assert(vm->xe, list_empty(&vma->combined_links.destroy));
if (xe_vma_is_userptr(vma)) {
- XE_WARN_ON(!vma->destroyed);
+ xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
+
spin_lock(&vm->userptr.invalidated_lock);
- list_del_init(&vma->userptr.invalidate_link);
+ list_del(&vma->userptr.invalidate_link);
spin_unlock(&vm->userptr.invalidated_lock);
- list_del(&vma->userptr_link);
- } else {
- xe_bo_assert_held(vma->bo);
- list_del(&vma->bo_link);
+ } else if (!xe_vma_is_null(vma)) {
+ xe_bo_assert_held(xe_vma_bo(vma));
spin_lock(&vm->notifier.list_lock);
list_del(&vma->notifier.rebind_link);
spin_unlock(&vm->notifier.list_lock);
- if (!vma->bo->vm && vm_remove_extobj(vma)) {
+ drm_gpuva_unlink(&vma->gpuva);
+
+ if (!xe_vma_bo(vma)->vm && vm_remove_extobj(vma)) {
struct xe_vma *other;
- other = bo_has_vm_references_locked(vma->bo, vm, NULL);
+ other = bo_has_vm_references_locked(xe_vma_bo(vma), vm, NULL);
if (other)
__vm_insert_extobj(vm, other);
}
xe_vm_assert_held(vm);
- if (!list_empty(&vma->rebind_link))
- list_del(&vma->rebind_link);
-
if (fence) {
int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
vma_destroy_cb);
}
}
-static void xe_vma_destroy_unlocked(struct xe_vma *vma)
+/**
+ * xe_vm_prepare_vma() - drm_exec utility to lock a vma
+ * @exec: The drm_exec object we're currently locking for.
+ * @vma: The vma for witch we want to lock the vm resv and any attached
+ * object's resv.
+ * @num_shared: The number of dma-fence slots to pre-allocate in the
+ * objects' reservation objects.
+ *
+ * Return: 0 on success, negative error code on error. In particular
+ * may return -EDEADLK on WW transaction contention and -EINTR if
+ * an interruptible wait is terminated by a signal.
+ */
+int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
+ unsigned int num_shared)
{
- struct ttm_validate_buffer tv[2];
- struct ww_acquire_ctx ww;
- struct xe_bo *bo = vma->bo;
- LIST_HEAD(objs);
- LIST_HEAD(dups);
+ struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_bo *bo = xe_vma_bo(vma);
int err;
- memset(tv, 0, sizeof(tv));
- tv[0].bo = xe_vm_ttm_bo(vma->vm);
- list_add(&tv[0].head, &objs);
+ XE_WARN_ON(!vm);
+ err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
+ if (!err && bo && !bo->vm)
+ err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared);
- if (bo) {
- tv[1].bo = &xe_bo_get(bo)->ttm;
- list_add(&tv[1].head, &objs);
+ return err;
+}
+
+static void xe_vma_destroy_unlocked(struct xe_vma *vma)
+{
+ struct drm_exec exec;
+ int err;
+
+ drm_exec_init(&exec, 0);
+ drm_exec_until_all_locked(&exec) {
+ err = xe_vm_prepare_vma(&exec, vma, 0);
+ drm_exec_retry_on_contention(&exec);
+ if (XE_WARN_ON(err))
+ break;
}
- err = ttm_eu_reserve_buffers(&ww, &objs, false, &dups);
- XE_WARN_ON(err);
xe_vma_destroy(vma, NULL);
- ttm_eu_backoff_reservation(&ww, &objs);
- if (bo)
- xe_bo_put(bo);
+ drm_exec_fini(&exec);
}
-static struct xe_vma *to_xe_vma(const struct rb_node *node)
+struct xe_vma *
+xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
{
- BUILD_BUG_ON(offsetof(struct xe_vma, vm_node) != 0);
- return (struct xe_vma *)node;
+ struct drm_gpuva *gpuva;
+
+ lockdep_assert_held(&vm->lock);
+
+ if (xe_vm_is_closed_or_banned(vm))
+ return NULL;
+
+ xe_assert(vm->xe, start + range <= vm->size);
+
+ gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
+
+ return gpuva ? gpuva_to_vma(gpuva) : NULL;
}
-static int xe_vma_cmp(const struct xe_vma *a, const struct xe_vma *b)
+static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
{
- if (a->end < b->start) {
- return -1;
- } else if (b->end < a->start) {
- return 1;
- } else {
- return 0;
- }
+ int err;
+
+ xe_assert(vm->xe, xe_vma_vm(vma) == vm);
+ lockdep_assert_held(&vm->lock);
+
+ err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
+ XE_WARN_ON(err); /* Shouldn't be possible */
+
+ return err;
+}
+
+static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
+{
+ xe_assert(vm->xe, xe_vma_vm(vma) == vm);
+ lockdep_assert_held(&vm->lock);
+
+ drm_gpuva_remove(&vma->gpuva);
+ if (vm->usm.last_fault_vma == vma)
+ vm->usm.last_fault_vma = NULL;
}
-static bool xe_vma_less_cb(struct rb_node *a, const struct rb_node *b)
+static struct drm_gpuva_op *xe_vm_op_alloc(void)
{
- return xe_vma_cmp(to_xe_vma(a), to_xe_vma(b)) < 0;
+ struct xe_vma_op *op;
+
+ op = kzalloc(sizeof(*op), GFP_KERNEL);
+
+ if (unlikely(!op))
+ return NULL;
+
+ return &op->base;
+}
+
+static void xe_vm_free(struct drm_gpuvm *gpuvm);
+
+static struct drm_gpuvm_ops gpuvm_ops = {
+ .op_alloc = xe_vm_op_alloc,
+ .vm_free = xe_vm_free,
+};
+
+static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index)
+{
+ u64 pte = 0;
+
+ if (pat_index & BIT(0))
+ pte |= XE_PPGTT_PTE_PAT0;
+
+ if (pat_index & BIT(1))
+ pte |= XE_PPGTT_PTE_PAT1;
+
+ return pte;
}
-int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node)
+static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index,
+ u32 pt_level)
{
- struct xe_vma *cmp = to_xe_vma(node);
- const struct xe_vma *own = key;
+ u64 pte = 0;
+
+ if (pat_index & BIT(0))
+ pte |= XE_PPGTT_PTE_PAT0;
+
+ if (pat_index & BIT(1))
+ pte |= XE_PPGTT_PTE_PAT1;
+
+ if (pat_index & BIT(2)) {
+ if (pt_level)
+ pte |= XE_PPGTT_PDE_PDPE_PAT2;
+ else
+ pte |= XE_PPGTT_PTE_PAT2;
+ }
+
+ if (pat_index & BIT(3))
+ pte |= XELPG_PPGTT_PTE_PAT3;
+
+ if (pat_index & (BIT(4)))
+ pte |= XE2_PPGTT_PTE_PAT4;
- if (own->start > cmp->end)
- return 1;
+ return pte;
+}
+
+static u64 pte_encode_ps(u32 pt_level)
+{
+ XE_WARN_ON(pt_level > 2);
- if (own->end < cmp->start)
- return -1;
+ if (pt_level == 1)
+ return XE_PDE_PS_2M;
+ else if (pt_level == 2)
+ return XE_PDPE_PS_1G;
return 0;
}
-struct xe_vma *
-xe_vm_find_overlapping_vma(struct xe_vm *vm, const struct xe_vma *vma)
+static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
+ const u16 pat_index)
{
- struct rb_node *node;
+ struct xe_device *xe = xe_bo_device(bo);
+ u64 pde;
- if (xe_vm_is_closed(vm))
- return NULL;
+ pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
+ pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
+ pde |= pde_encode_pat_index(xe, pat_index);
- XE_BUG_ON(vma->end >= vm->size);
- lockdep_assert_held(&vm->lock);
+ return pde;
+}
+
+static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
+ u16 pat_index, u32 pt_level)
+{
+ struct xe_device *xe = xe_bo_device(bo);
+ u64 pte;
+
+ pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
+ pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
+ pte |= pte_encode_pat_index(xe, pat_index, pt_level);
+ pte |= pte_encode_ps(pt_level);
- node = rb_find(vma, &vm->vmas, xe_vma_cmp_vma_cb);
+ if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
+ pte |= XE_PPGTT_PTE_DM;
- return node ? to_xe_vma(node) : NULL;
+ return pte;
}
-static void xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
+static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
+ u16 pat_index, u32 pt_level)
{
- XE_BUG_ON(vma->vm != vm);
- lockdep_assert_held(&vm->lock);
+ struct xe_device *xe = xe_vma_vm(vma)->xe;
+
+ pte |= XE_PAGE_PRESENT;
- rb_add(&vma->vm_node, &vm->vmas, xe_vma_less_cb);
+ if (likely(!xe_vma_read_only(vma)))
+ pte |= XE_PAGE_RW;
+
+ pte |= pte_encode_pat_index(xe, pat_index, pt_level);
+ pte |= pte_encode_ps(pt_level);
+
+ if (unlikely(xe_vma_is_null(vma)))
+ pte |= XE_PTE_NULL;
+
+ return pte;
}
-static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
+static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
+ u16 pat_index,
+ u32 pt_level, bool devmem, u64 flags)
{
- XE_BUG_ON(vma->vm != vm);
- lockdep_assert_held(&vm->lock);
+ u64 pte;
- rb_erase(&vma->vm_node, &vm->vmas);
- if (vm->usm.last_fault_vma == vma)
- vm->usm.last_fault_vma = NULL;
+ /* Avoid passing random bits directly as flags */
+ xe_assert(xe, !(flags & ~XE_PTE_PS64));
+
+ pte = addr;
+ pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
+ pte |= pte_encode_pat_index(xe, pat_index, pt_level);
+ pte |= pte_encode_ps(pt_level);
+
+ if (devmem)
+ pte |= XE_PPGTT_PTE_DM;
+
+ pte |= flags;
+
+ return pte;
}
-static void async_op_work_func(struct work_struct *w);
+static const struct xe_pt_ops xelp_pt_ops = {
+ .pte_encode_bo = xelp_pte_encode_bo,
+ .pte_encode_vma = xelp_pte_encode_vma,
+ .pte_encode_addr = xelp_pte_encode_addr,
+ .pde_encode_bo = xelp_pde_encode_bo,
+};
+
static void vm_destroy_work_func(struct work_struct *w);
struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
{
+ struct drm_gem_object *vm_resv_obj;
struct xe_vm *vm;
- int err, i = 0, number_gts = 0;
- struct xe_gt *gt;
+ int err, number_tiles = 0;
+ struct xe_tile *tile;
u8 id;
vm = kzalloc(sizeof(*vm), GFP_KERNEL);
return ERR_PTR(-ENOMEM);
vm->xe = xe;
- kref_init(&vm->refcount);
- dma_resv_init(&vm->resv);
- vm->size = 1ull << xe_pt_shift(xe->info.vm_max_level + 1);
+ vm->size = 1ull << xe->info.va_bits;
- vm->vmas = RB_ROOT;
vm->flags = flags;
init_rwsem(&vm->lock);
INIT_LIST_HEAD(&vm->notifier.rebind_list);
spin_lock_init(&vm->notifier.list_lock);
- INIT_LIST_HEAD(&vm->async_ops.pending);
- INIT_WORK(&vm->async_ops.work, async_op_work_func);
- spin_lock_init(&vm->async_ops.lock);
-
INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
- INIT_LIST_HEAD(&vm->preempt.engines);
+ INIT_LIST_HEAD(&vm->preempt.exec_queues);
vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */
+ for_each_tile(tile, xe, id)
+ xe_range_fence_tree_init(&vm->rftree[id]);
+
INIT_LIST_HEAD(&vm->extobj.list);
- if (!(flags & XE_VM_FLAG_MIGRATION)) {
- /* We need to immeditatelly exit from any D3 state */
- xe_pm_runtime_get(xe);
+ vm->pt_ops = &xelp_pt_ops;
+
+ if (!(flags & XE_VM_FLAG_MIGRATION))
xe_device_mem_access_get(xe);
+
+ vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
+ if (!vm_resv_obj) {
+ err = -ENOMEM;
+ goto err_no_resv;
}
- err = dma_resv_lock_interruptible(&vm->resv, NULL);
+ drm_gpuvm_init(&vm->gpuvm, "Xe VM", 0, &xe->drm, vm_resv_obj,
+ 0, vm->size, 0, 0, &gpuvm_ops);
+
+ drm_gem_object_put(vm_resv_obj);
+
+ err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
if (err)
- goto err_put;
+ goto err_close;
if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
- vm->flags |= XE_VM_FLAGS_64K;
-
- for_each_gt(gt, xe, id) {
- if (xe_gt_is_media_type(gt))
- continue;
+ vm->flags |= XE_VM_FLAG_64K;
+ for_each_tile(tile, xe, id) {
if (flags & XE_VM_FLAG_MIGRATION &&
- gt->info.id != XE_VM_FLAG_GT_ID(flags))
+ tile->id != XE_VM_FLAG_TILE_ID(flags))
continue;
- vm->pt_root[id] = xe_pt_create(vm, gt, xe->info.vm_max_level);
+ vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
if (IS_ERR(vm->pt_root[id])) {
err = PTR_ERR(vm->pt_root[id]);
vm->pt_root[id] = NULL;
- goto err_destroy_root;
+ goto err_unlock_close;
}
}
if (flags & XE_VM_FLAG_SCRATCH_PAGE) {
- for_each_gt(gt, xe, id) {
+ for_each_tile(tile, xe, id) {
if (!vm->pt_root[id])
continue;
- err = xe_pt_create_scratch(xe, gt, vm);
+ err = xe_pt_create_scratch(xe, tile, vm);
if (err)
- goto err_scratch_pt;
+ goto err_unlock_close;
}
+ vm->batch_invalidate_tlb = true;
}
- if (flags & DRM_XE_VM_CREATE_COMPUTE_MODE) {
+ if (flags & XE_VM_FLAG_COMPUTE_MODE) {
INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
vm->flags |= XE_VM_FLAG_COMPUTE_MODE;
- }
-
- if (flags & DRM_XE_VM_CREATE_ASYNC_BIND_OPS) {
- vm->async_ops.fence.context = dma_fence_context_alloc(1);
- vm->flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
+ vm->batch_invalidate_tlb = false;
}
/* Fill pt_root after allocating scratch tables */
- for_each_gt(gt, xe, id) {
+ for_each_tile(tile, xe, id) {
if (!vm->pt_root[id])
continue;
- xe_pt_populate_empty(gt, vm, vm->pt_root[id]);
+ xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
}
- dma_resv_unlock(&vm->resv);
+ dma_resv_unlock(xe_vm_resv(vm));
/* Kernel migration VM shouldn't have a circular loop.. */
if (!(flags & XE_VM_FLAG_MIGRATION)) {
- for_each_gt(gt, xe, id) {
+ for_each_tile(tile, xe, id) {
+ struct xe_gt *gt = tile->primary_gt;
struct xe_vm *migrate_vm;
- struct xe_engine *eng;
+ struct xe_exec_queue *q;
+ u32 create_flags = EXEC_QUEUE_FLAG_VM |
+ ((flags & XE_VM_FLAG_ASYNC_DEFAULT) ?
+ EXEC_QUEUE_FLAG_VM_ASYNC : 0);
if (!vm->pt_root[id])
continue;
- migrate_vm = xe_migrate_get_vm(gt->migrate);
- eng = xe_engine_create_class(xe, gt, migrate_vm,
- XE_ENGINE_CLASS_COPY,
- ENGINE_FLAG_VM);
+ migrate_vm = xe_migrate_get_vm(tile->migrate);
+ q = xe_exec_queue_create_class(xe, gt, migrate_vm,
+ XE_ENGINE_CLASS_COPY,
+ create_flags);
xe_vm_put(migrate_vm);
- if (IS_ERR(eng)) {
- xe_vm_close_and_put(vm);
- return ERR_CAST(eng);
+ if (IS_ERR(q)) {
+ err = PTR_ERR(q);
+ goto err_close;
}
- vm->eng[id] = eng;
- number_gts++;
+ vm->q[id] = q;
+ number_tiles++;
}
}
- if (number_gts > 1)
+ if (number_tiles > 1)
vm->composite_fence_ctx = dma_fence_context_alloc(1);
mutex_lock(&xe->usm.lock);
return vm;
-err_scratch_pt:
- for_each_gt(gt, xe, id) {
- if (!vm->pt_root[id])
- continue;
+err_unlock_close:
+ dma_resv_unlock(xe_vm_resv(vm));
+err_close:
+ xe_vm_close_and_put(vm);
+ return ERR_PTR(err);
- i = vm->pt_root[id]->level;
- while (i)
- if (vm->scratch_pt[id][--i])
- xe_pt_destroy(vm->scratch_pt[id][i],
- vm->flags, NULL);
- xe_bo_unpin(vm->scratch_bo[id]);
- xe_bo_put(vm->scratch_bo[id]);
- }
-err_destroy_root:
- for_each_gt(gt, xe, id) {
- if (vm->pt_root[id])
- xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
- }
- dma_resv_unlock(&vm->resv);
-err_put:
- dma_resv_fini(&vm->resv);
+err_no_resv:
+ for_each_tile(tile, xe, id)
+ xe_range_fence_tree_fini(&vm->rftree[id]);
kfree(vm);
- if (!(flags & XE_VM_FLAG_MIGRATION)) {
+ if (!(flags & XE_VM_FLAG_MIGRATION))
xe_device_mem_access_put(xe);
- xe_pm_runtime_put(xe);
- }
return ERR_PTR(err);
}
-static void flush_async_ops(struct xe_vm *vm)
-{
- queue_work(system_unbound_wq, &vm->async_ops.work);
- flush_work(&vm->async_ops.work);
-}
-
-static void vm_error_capture(struct xe_vm *vm, int err,
- u32 op, u64 addr, u64 size)
+static void xe_vm_close(struct xe_vm *vm)
{
- struct drm_xe_vm_bind_op_error_capture capture;
- u64 __user *address =
- u64_to_user_ptr(vm->async_ops.error_capture.addr);
- bool in_kthread = !current->mm;
-
- capture.error = err;
- capture.op = op;
- capture.addr = addr;
- capture.size = size;
-
- if (in_kthread) {
- if (!mmget_not_zero(vm->async_ops.error_capture.mm))
- goto mm_closed;
- kthread_use_mm(vm->async_ops.error_capture.mm);
- }
-
- if (copy_to_user(address, &capture, sizeof(capture)))
- XE_WARN_ON("Copy to user failed");
-
- if (in_kthread) {
- kthread_unuse_mm(vm->async_ops.error_capture.mm);
- mmput(vm->async_ops.error_capture.mm);
- }
-
-mm_closed:
- wake_up_all(&vm->async_ops.error_capture.wq);
+ down_write(&vm->lock);
+ vm->size = 0;
+ up_write(&vm->lock);
}
void xe_vm_close_and_put(struct xe_vm *vm)
{
- struct rb_root contested = RB_ROOT;
- struct ww_acquire_ctx ww;
+ LIST_HEAD(contested);
struct xe_device *xe = vm->xe;
- struct xe_gt *gt;
+ struct xe_tile *tile;
+ struct xe_vma *vma, *next_vma;
+ struct drm_gpuva *gpuva, *next;
u8 id;
- XE_BUG_ON(vm->preempt.num_engines);
+ xe_assert(xe, !vm->preempt.num_exec_queues);
- vm->size = 0;
- smp_mb();
- flush_async_ops(vm);
+ xe_vm_close(vm);
if (xe_vm_in_compute_mode(vm))
flush_work(&vm->preempt.rebind_work);
- for_each_gt(gt, xe, id) {
- if (vm->eng[id]) {
- xe_engine_kill(vm->eng[id]);
- xe_engine_put(vm->eng[id]);
- vm->eng[id] = NULL;
+ down_write(&vm->lock);
+ for_each_tile(tile, xe, id) {
+ if (vm->q[id])
+ xe_exec_queue_last_fence_put(vm->q[id], vm);
+ }
+ up_write(&vm->lock);
+
+ for_each_tile(tile, xe, id) {
+ if (vm->q[id]) {
+ xe_exec_queue_kill(vm->q[id]);
+ xe_exec_queue_put(vm->q[id]);
+ vm->q[id] = NULL;
}
}
down_write(&vm->lock);
- xe_vm_lock(vm, &ww, 0, false);
- while (vm->vmas.rb_node) {
- struct xe_vma *vma = to_xe_vma(vm->vmas.rb_node);
+ xe_vm_lock(vm, false);
+ drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) {
+ vma = gpuva_to_vma(gpuva);
- if (xe_vma_is_userptr(vma)) {
+ if (xe_vma_has_no_bo(vma)) {
down_read(&vm->userptr.notifier_lock);
- vma->destroyed = true;
+ vma->gpuva.flags |= XE_VMA_DESTROYED;
up_read(&vm->userptr.notifier_lock);
}
- rb_erase(&vma->vm_node, &vm->vmas);
+ xe_vm_remove_vma(vm, vma);
/* easy case, remove from VMA? */
- if (xe_vma_is_userptr(vma) || vma->bo->vm) {
+ if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
+ list_del_init(&vma->combined_links.rebind);
xe_vma_destroy(vma, NULL);
continue;
}
- rb_add(&vma->vm_node, &contested, xe_vma_less_cb);
+ list_move_tail(&vma->combined_links.destroy, &contested);
+ vma->gpuva.flags |= XE_VMA_DESTROYED;
}
/*
* install a fence to resv. Hence it's safe to
* destroy the pagetables immediately.
*/
- for_each_gt(gt, xe, id) {
+ for_each_tile(tile, xe, id) {
if (vm->scratch_bo[id]) {
u32 i;
xe_pt_destroy(vm->scratch_pt[id][i], vm->flags,
NULL);
}
- }
- xe_vm_unlock(vm, &ww);
-
- if (contested.rb_node) {
-
- /*
- * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
- * Since we hold a refcount to the bo, we can remove and free
- * the members safely without locking.
- */
- while (contested.rb_node) {
- struct xe_vma *vma = to_xe_vma(contested.rb_node);
-
- rb_erase(&vma->vm_node, &contested);
- xe_vma_destroy_unlocked(vma);
+ if (vm->pt_root[id]) {
+ xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
+ vm->pt_root[id] = NULL;
}
}
+ xe_vm_unlock(vm);
- if (vm->async_ops.error_capture.addr)
- wake_up_all(&vm->async_ops.error_capture.wq);
+ /*
+ * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
+ * Since we hold a refcount to the bo, we can remove and free
+ * the members safely without locking.
+ */
+ list_for_each_entry_safe(vma, next_vma, &contested,
+ combined_links.destroy) {
+ list_del_init(&vma->combined_links.destroy);
+ xe_vma_destroy_unlocked(vma);
+ }
- XE_WARN_ON(!list_empty(&vm->extobj.list));
+ xe_assert(xe, list_empty(&vm->extobj.list));
up_write(&vm->lock);
mutex_lock(&xe->usm.lock);
xe->usm.num_vm_in_non_fault_mode--;
mutex_unlock(&xe->usm.lock);
+ for_each_tile(tile, xe, id)
+ xe_range_fence_tree_fini(&vm->rftree[id]);
+
xe_vm_put(vm);
}
{
struct xe_vm *vm =
container_of(w, struct xe_vm, destroy_work);
- struct ww_acquire_ctx ww;
struct xe_device *xe = vm->xe;
- struct xe_gt *gt;
+ struct xe_tile *tile;
u8 id;
void *lookup;
/* xe_vm_close_and_put was not called? */
- XE_WARN_ON(vm->size);
+ xe_assert(xe, !vm->size);
if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
xe_device_mem_access_put(xe);
- xe_pm_runtime_put(xe);
if (xe->info.has_asid) {
mutex_lock(&xe->usm.lock);
lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
- XE_WARN_ON(lookup != vm);
+ xe_assert(xe, lookup == vm);
mutex_unlock(&xe->usm.lock);
}
}
- /*
- * XXX: We delay destroying the PT root until the VM if freed as PT root
- * is needed for xe_vm_lock to work. If we remove that dependency this
- * can be moved to xe_vm_close_and_put.
- */
- xe_vm_lock(vm, &ww, 0, false);
- for_each_gt(gt, xe, id) {
- if (vm->pt_root[id]) {
- xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
- vm->pt_root[id] = NULL;
- }
- }
- xe_vm_unlock(vm, &ww);
+ for_each_tile(tile, xe, id)
+ XE_WARN_ON(vm->pt_root[id]);
trace_xe_vm_free(vm);
dma_fence_put(vm->rebind_fence);
- dma_resv_fini(&vm->resv);
kfree(vm);
}
-void xe_vm_free(struct kref *ref)
+static void xe_vm_free(struct drm_gpuvm *gpuvm)
{
- struct xe_vm *vm = container_of(ref, struct xe_vm, refcount);
+ struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
/* To destroy the VM we need to be able to sleep */
queue_work(system_unbound_wq, &vm->destroy_work);
mutex_lock(&xef->vm.lock);
vm = xa_load(&xef->vm.xa, id);
- mutex_unlock(&xef->vm.lock);
-
if (vm)
xe_vm_get(vm);
+ mutex_unlock(&xef->vm.lock);
return vm;
}
-u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_gt *full_gt)
+u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
{
- XE_BUG_ON(xe_gt_is_media_type(full_gt));
+ return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0,
+ tile_to_xe(tile)->pat.idx[XE_CACHE_WB]);
+}
- return gen8_pde_encode(vm->pt_root[full_gt->info.id]->bo, 0,
- XE_CACHE_WB);
+static struct xe_exec_queue *
+to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
+{
+ return q ? q : vm->q[0];
}
static struct dma_fence *
-xe_vm_unbind_vma(struct xe_vma *vma, struct xe_engine *e,
- struct xe_sync_entry *syncs, u32 num_syncs)
+xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
+ struct xe_sync_entry *syncs, u32 num_syncs,
+ bool first_op, bool last_op)
{
- struct xe_gt *gt;
+ struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
+ struct xe_tile *tile;
struct dma_fence *fence = NULL;
struct dma_fence **fences = NULL;
struct dma_fence_array *cf = NULL;
- struct xe_vm *vm = vma->vm;
int cur_fence = 0, i;
- int number_gts = hweight_long(vma->gt_present);
+ int number_tiles = hweight8(vma->tile_present);
int err;
u8 id;
trace_xe_vma_unbind(vma);
- if (number_gts > 1) {
- fences = kmalloc_array(number_gts, sizeof(*fences),
+ if (number_tiles > 1) {
+ fences = kmalloc_array(number_tiles, sizeof(*fences),
GFP_KERNEL);
if (!fences)
return ERR_PTR(-ENOMEM);
}
- for_each_gt(gt, vm->xe, id) {
- if (!(vma->gt_present & BIT(id)))
+ for_each_tile(tile, vm->xe, id) {
+ if (!(vma->tile_present & BIT(id)))
goto next;
- XE_BUG_ON(xe_gt_is_media_type(gt));
-
- fence = __xe_pt_unbind_vma(gt, vma, e, syncs, num_syncs);
+ fence = __xe_pt_unbind_vma(tile, vma, q ? q : vm->q[id],
+ first_op ? syncs : NULL,
+ first_op ? num_syncs : 0);
if (IS_ERR(fence)) {
err = PTR_ERR(fence);
goto err_fences;
fences[cur_fence++] = fence;
next:
- if (e && vm->pt_root[id] && !list_empty(&e->multi_gt_list))
- e = list_next_entry(e, multi_gt_list);
+ if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
+ q = list_next_entry(q, multi_gt_list);
}
if (fences) {
- cf = dma_fence_array_create(number_gts, fences,
+ cf = dma_fence_array_create(number_tiles, fences,
vm->composite_fence_ctx,
vm->composite_fence_seqno++,
false);
}
}
- for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], NULL, cf ? &cf->base : fence);
+ fence = cf ? &cf->base : !fence ?
+ xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
+ if (last_op) {
+ for (i = 0; i < num_syncs; i++)
+ xe_sync_entry_signal(&syncs[i], NULL, fence);
+ }
- return cf ? &cf->base : !fence ? dma_fence_get_stub() : fence;
+ return fence;
err_fences:
if (fences) {
- while (cur_fence) {
- /* FIXME: Rewind the previous binds? */
+ while (cur_fence)
dma_fence_put(fences[--cur_fence]);
- }
kfree(fences);
}
}
static struct dma_fence *
-xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
- struct xe_sync_entry *syncs, u32 num_syncs)
+xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
+ struct xe_sync_entry *syncs, u32 num_syncs,
+ bool first_op, bool last_op)
{
- struct xe_gt *gt;
+ struct xe_tile *tile;
struct dma_fence *fence;
struct dma_fence **fences = NULL;
struct dma_fence_array *cf = NULL;
- struct xe_vm *vm = vma->vm;
+ struct xe_vm *vm = xe_vma_vm(vma);
int cur_fence = 0, i;
- int number_gts = hweight_long(vma->gt_mask);
+ int number_tiles = hweight8(vma->tile_mask);
int err;
u8 id;
trace_xe_vma_bind(vma);
- if (number_gts > 1) {
- fences = kmalloc_array(number_gts, sizeof(*fences),
+ if (number_tiles > 1) {
+ fences = kmalloc_array(number_tiles, sizeof(*fences),
GFP_KERNEL);
if (!fences)
return ERR_PTR(-ENOMEM);
}
- for_each_gt(gt, vm->xe, id) {
- if (!(vma->gt_mask & BIT(id)))
+ for_each_tile(tile, vm->xe, id) {
+ if (!(vma->tile_mask & BIT(id)))
goto next;
- XE_BUG_ON(xe_gt_is_media_type(gt));
- fence = __xe_pt_bind_vma(gt, vma, e, syncs, num_syncs,
- vma->gt_present & BIT(id));
+ fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id],
+ first_op ? syncs : NULL,
+ first_op ? num_syncs : 0,
+ vma->tile_present & BIT(id));
if (IS_ERR(fence)) {
err = PTR_ERR(fence);
goto err_fences;
fences[cur_fence++] = fence;
next:
- if (e && vm->pt_root[id] && !list_empty(&e->multi_gt_list))
- e = list_next_entry(e, multi_gt_list);
+ if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
+ q = list_next_entry(q, multi_gt_list);
}
if (fences) {
- cf = dma_fence_array_create(number_gts, fences,
+ cf = dma_fence_array_create(number_tiles, fences,
vm->composite_fence_ctx,
vm->composite_fence_seqno++,
false);
}
}
- for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], NULL, cf ? &cf->base : fence);
+ if (last_op) {
+ for (i = 0; i < num_syncs; i++)
+ xe_sync_entry_signal(&syncs[i], NULL,
+ cf ? &cf->base : fence);
+ }
return cf ? &cf->base : fence;
err_fences:
if (fences) {
- while (cur_fence) {
- /* FIXME: Rewind the previous binds? */
+ while (cur_fence)
dma_fence_put(fences[--cur_fence]);
- }
kfree(fences);
}
return ERR_PTR(err);
}
-struct async_op_fence {
- struct dma_fence fence;
- struct dma_fence *wait_fence;
- struct dma_fence_cb cb;
- struct xe_vm *vm;
- wait_queue_head_t wq;
- bool started;
-};
-
-static const char *async_op_fence_get_driver_name(struct dma_fence *dma_fence)
-{
- return "xe";
-}
-
-static const char *
-async_op_fence_get_timeline_name(struct dma_fence *dma_fence)
-{
- return "async_op_fence";
-}
-
-static const struct dma_fence_ops async_op_fence_ops = {
- .get_driver_name = async_op_fence_get_driver_name,
- .get_timeline_name = async_op_fence_get_timeline_name,
-};
-
-static void async_op_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
-{
- struct async_op_fence *afence =
- container_of(cb, struct async_op_fence, cb);
-
- afence->fence.error = afence->wait_fence->error;
- dma_fence_signal(&afence->fence);
- xe_vm_put(afence->vm);
- dma_fence_put(afence->wait_fence);
- dma_fence_put(&afence->fence);
-}
-
-static void add_async_op_fence_cb(struct xe_vm *vm,
- struct dma_fence *fence,
- struct async_op_fence *afence)
-{
- int ret;
-
- if (!xe_vm_no_dma_fences(vm)) {
- afence->started = true;
- smp_wmb();
- wake_up_all(&afence->wq);
- }
-
- afence->wait_fence = dma_fence_get(fence);
- afence->vm = xe_vm_get(vm);
- dma_fence_get(&afence->fence);
- ret = dma_fence_add_callback(fence, &afence->cb, async_op_fence_cb);
- if (ret == -ENOENT) {
- afence->fence.error = afence->wait_fence->error;
- dma_fence_signal(&afence->fence);
- }
- if (ret) {
- xe_vm_put(vm);
- dma_fence_put(afence->wait_fence);
- dma_fence_put(&afence->fence);
- }
- XE_WARN_ON(ret && ret != -ENOENT);
-}
-
-int xe_vm_async_fence_wait_start(struct dma_fence *fence)
+static bool xe_vm_sync_mode(struct xe_vm *vm, struct xe_exec_queue *q)
{
- if (fence->ops == &async_op_fence_ops) {
- struct async_op_fence *afence =
- container_of(fence, struct async_op_fence, fence);
-
- XE_BUG_ON(xe_vm_no_dma_fences(afence->vm));
-
- smp_rmb();
- return wait_event_interruptible(afence->wq, afence->started);
- }
-
- return 0;
+ return q ? !(q->flags & EXEC_QUEUE_FLAG_VM_ASYNC) :
+ !(vm->flags & XE_VM_FLAG_ASYNC_DEFAULT);
}
static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_engine *e, struct xe_sync_entry *syncs,
- u32 num_syncs, struct async_op_fence *afence)
+ struct xe_exec_queue *q, struct xe_sync_entry *syncs,
+ u32 num_syncs, bool immediate, bool first_op,
+ bool last_op)
{
struct dma_fence *fence;
+ struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
xe_vm_assert_held(vm);
- fence = xe_vm_bind_vma(vma, e, syncs, num_syncs);
- if (IS_ERR(fence))
- return PTR_ERR(fence);
- if (afence)
- add_async_op_fence_cb(vm, fence, afence);
+ if (immediate) {
+ fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
+ last_op);
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+ } else {
+ int i;
+
+ xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
+
+ fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
+ if (last_op) {
+ for (i = 0; i < num_syncs; i++)
+ xe_sync_entry_signal(&syncs[i], NULL, fence);
+ }
+ }
+ if (last_op)
+ xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
+ if (last_op && xe_vm_sync_mode(vm, q))
+ dma_fence_wait(fence, true);
dma_fence_put(fence);
+
return 0;
}
-static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_engine *e,
+static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
struct xe_bo *bo, struct xe_sync_entry *syncs,
- u32 num_syncs, struct async_op_fence *afence)
+ u32 num_syncs, bool immediate, bool first_op,
+ bool last_op)
{
int err;
xe_vm_assert_held(vm);
xe_bo_assert_held(bo);
- if (bo) {
+ if (bo && immediate) {
err = xe_bo_validate(bo, vm, true);
if (err)
return err;
}
- return __xe_vm_bind(vm, vma, e, syncs, num_syncs, afence);
+ return __xe_vm_bind(vm, vma, q, syncs, num_syncs, immediate, first_op,
+ last_op);
}
static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_engine *e, struct xe_sync_entry *syncs,
- u32 num_syncs, struct async_op_fence *afence)
+ struct xe_exec_queue *q, struct xe_sync_entry *syncs,
+ u32 num_syncs, bool first_op, bool last_op)
{
struct dma_fence *fence;
+ struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
xe_vm_assert_held(vm);
- xe_bo_assert_held(vma->bo);
+ xe_bo_assert_held(xe_vma_bo(vma));
- fence = xe_vm_unbind_vma(vma, e, syncs, num_syncs);
+ fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
if (IS_ERR(fence))
return PTR_ERR(fence);
- if (afence)
- add_async_op_fence_cb(vm, fence, afence);
xe_vma_destroy(vma, fence);
+ if (last_op)
+ xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
+ if (last_op && xe_vm_sync_mode(vm, q))
+ dma_fence_wait(fence, true);
dma_fence_put(fence);
return 0;
}
-static int vm_set_error_capture_address(struct xe_device *xe, struct xe_vm *vm,
- u64 value)
-{
- if (XE_IOCTL_ERR(xe, !value))
- return -EINVAL;
-
- if (XE_IOCTL_ERR(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
- return -ENOTSUPP;
-
- if (XE_IOCTL_ERR(xe, vm->async_ops.error_capture.addr))
- return -ENOTSUPP;
-
- vm->async_ops.error_capture.mm = current->mm;
- vm->async_ops.error_capture.addr = value;
- init_waitqueue_head(&vm->async_ops.error_capture.wq);
-
- return 0;
-}
-
-typedef int (*xe_vm_set_property_fn)(struct xe_device *xe, struct xe_vm *vm,
- u64 value);
-
-static const xe_vm_set_property_fn vm_set_property_funcs[] = {
- [XE_VM_PROPERTY_BIND_OP_ERROR_CAPTURE_ADDRESS] =
- vm_set_error_capture_address,
-};
-
-static int vm_user_ext_set_property(struct xe_device *xe, struct xe_vm *vm,
- u64 extension)
-{
- u64 __user *address = u64_to_user_ptr(extension);
- struct drm_xe_ext_vm_set_property ext;
- int err;
-
- err = __copy_from_user(&ext, address, sizeof(ext));
- if (XE_IOCTL_ERR(xe, err))
- return -EFAULT;
-
- if (XE_IOCTL_ERR(xe, ext.property >=
- ARRAY_SIZE(vm_set_property_funcs)))
- return -EINVAL;
-
- return vm_set_property_funcs[ext.property](xe, vm, ext.value);
-}
-
-typedef int (*xe_vm_user_extension_fn)(struct xe_device *xe, struct xe_vm *vm,
- u64 extension);
-
-static const xe_vm_set_property_fn vm_user_extension_funcs[] = {
- [XE_VM_EXTENSION_SET_PROPERTY] = vm_user_ext_set_property,
-};
-
-#define MAX_USER_EXTENSIONS 16
-static int vm_user_extensions(struct xe_device *xe, struct xe_vm *vm,
- u64 extensions, int ext_number)
-{
- u64 __user *address = u64_to_user_ptr(extensions);
- struct xe_user_extension ext;
- int err;
-
- if (XE_IOCTL_ERR(xe, ext_number >= MAX_USER_EXTENSIONS))
- return -E2BIG;
-
- err = __copy_from_user(&ext, address, sizeof(ext));
- if (XE_IOCTL_ERR(xe, err))
- return -EFAULT;
-
- if (XE_IOCTL_ERR(xe, ext.name >=
- ARRAY_SIZE(vm_user_extension_funcs)))
- return -EINVAL;
-
- err = vm_user_extension_funcs[ext.name](xe, vm, extensions);
- if (XE_IOCTL_ERR(xe, err))
- return err;
-
- if (ext.next_extension)
- return vm_user_extensions(xe, vm, ext.next_extension,
- ++ext_number);
-
- return 0;
-}
-
-#define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_SCRATCH_PAGE | \
- DRM_XE_VM_CREATE_COMPUTE_MODE | \
- DRM_XE_VM_CREATE_ASYNC_BIND_OPS | \
- DRM_XE_VM_CREATE_FAULT_MODE)
+#define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
+ DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE | \
+ DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT | \
+ DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
int xe_vm_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
struct xe_device *xe = to_xe_device(dev);
struct xe_file *xef = to_xe_file(file);
struct drm_xe_vm_create *args = data;
+ struct xe_tile *tile;
struct xe_vm *vm;
u32 id, asid;
int err;
u32 flags = 0;
- if (XE_IOCTL_ERR(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
+ if (XE_IOCTL_DBG(xe, args->extensions))
+ return -EINVAL;
+
+ if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
+ args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
+
+ if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
+ !xe->info.supports_usm))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
return -EINVAL;
- if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE &&
- args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
+ if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
+ args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
return -EINVAL;
- if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE &&
- args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
+ if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE &&
+ args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
return -EINVAL;
- if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
+ if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
xe_device_in_non_fault_mode(xe)))
return -EINVAL;
- if (XE_IOCTL_ERR(xe, !(args->flags & DRM_XE_VM_CREATE_FAULT_MODE) &&
+ if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) &&
xe_device_in_fault_mode(xe)))
return -EINVAL;
- if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
- !xe->info.supports_usm))
+ if (XE_IOCTL_DBG(xe, args->extensions))
return -EINVAL;
- if (args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE)
+ if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
flags |= XE_VM_FLAG_SCRATCH_PAGE;
- if (args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE)
+ if (args->flags & DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE)
flags |= XE_VM_FLAG_COMPUTE_MODE;
- if (args->flags & DRM_XE_VM_CREATE_ASYNC_BIND_OPS)
- flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
- if (args->flags & DRM_XE_VM_CREATE_FAULT_MODE)
+ if (args->flags & DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT)
+ flags |= XE_VM_FLAG_ASYNC_DEFAULT;
+ if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
flags |= XE_VM_FLAG_FAULT_MODE;
vm = xe_vm_create(xe, flags);
if (IS_ERR(vm))
return PTR_ERR(vm);
- if (args->extensions) {
- err = vm_user_extensions(xe, vm, args->extensions, 0);
- if (XE_IOCTL_ERR(xe, err)) {
- xe_vm_close_and_put(vm);
- return err;
- }
- }
-
mutex_lock(&xef->vm.lock);
err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
mutex_unlock(&xef->vm.lock);
if (xe->info.has_asid) {
mutex_lock(&xe->usm.lock);
err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
- XA_LIMIT(0, XE_MAX_ASID - 1),
+ XA_LIMIT(1, XE_MAX_ASID - 1),
&xe->usm.next_asid, GFP_KERNEL);
mutex_unlock(&xe->usm.lock);
- if (err) {
+ if (err < 0) {
xe_vm_close_and_put(vm);
return err;
}
+ err = 0;
vm->usm.asid = asid;
}
args->vm_id = id;
+ vm->xef = xef;
+
+ /* Record BO memory for VM pagetable created against client */
+ for_each_tile(tile, xe, id)
+ if (vm->pt_root[id])
+ xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo);
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
/* Warning: Security issue - never enable by default */
- args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, GEN8_PAGE_SIZE);
+ args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
#endif
return 0;
struct xe_file *xef = to_xe_file(file);
struct drm_xe_vm_destroy *args = data;
struct xe_vm *vm;
+ int err = 0;
- if (XE_IOCTL_ERR(xe, args->pad))
+ if (XE_IOCTL_DBG(xe, args->pad) ||
+ XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
- vm = xe_vm_lookup(xef, args->vm_id);
- if (XE_IOCTL_ERR(xe, !vm))
- return -ENOENT;
- xe_vm_put(vm);
-
- /* FIXME: Extend this check to non-compute mode VMs */
- if (XE_IOCTL_ERR(xe, vm->preempt.num_engines))
- return -EBUSY;
-
mutex_lock(&xef->vm.lock);
- xa_erase(&xef->vm.xa, args->vm_id);
+ vm = xa_load(&xef->vm.xa, args->vm_id);
+ if (XE_IOCTL_DBG(xe, !vm))
+ err = -ENOENT;
+ else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues))
+ err = -EBUSY;
+ else
+ xa_erase(&xef->vm.xa, args->vm_id);
mutex_unlock(&xef->vm.lock);
- xe_vm_close_and_put(vm);
+ if (!err)
+ xe_vm_close_and_put(vm);
- return 0;
+ return err;
}
static const u32 region_to_mem_type[] = {
};
static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_engine *e, u32 region,
+ struct xe_exec_queue *q, u32 region,
struct xe_sync_entry *syncs, u32 num_syncs,
- struct async_op_fence *afence)
+ bool first_op, bool last_op)
{
+ struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
int err;
- XE_BUG_ON(region > ARRAY_SIZE(region_to_mem_type));
+ xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
- if (!xe_vma_is_userptr(vma)) {
- err = xe_bo_migrate(vma->bo, region_to_mem_type[region]);
+ if (!xe_vma_has_no_bo(vma)) {
+ err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
if (err)
return err;
}
- if (vma->gt_mask != (vma->gt_present & ~vma->usm.gt_invalidated)) {
- return xe_vm_bind(vm, vma, e, vma->bo, syncs, num_syncs,
- afence);
+ if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
+ return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
+ true, first_op, last_op);
} else {
int i;
/* Nothing to do, signal fences now */
- for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], NULL,
- dma_fence_get_stub());
- if (afence)
- dma_fence_signal(&afence->fence);
- return 0;
- }
-}
+ if (last_op) {
+ for (i = 0; i < num_syncs; i++) {
+ struct dma_fence *fence =
+ xe_exec_queue_last_fence_get(wait_exec_queue, vm);
-#define VM_BIND_OP(op) (op & 0xffff)
+ xe_sync_entry_signal(&syncs[i], NULL, fence);
+ }
+ }
-static int __vm_bind_ioctl(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_engine *e, struct xe_bo *bo, u32 op,
- u32 region, struct xe_sync_entry *syncs,
- u32 num_syncs, struct async_op_fence *afence)
-{
- switch (VM_BIND_OP(op)) {
- case XE_VM_BIND_OP_MAP:
- return xe_vm_bind(vm, vma, e, bo, syncs, num_syncs, afence);
- case XE_VM_BIND_OP_UNMAP:
- case XE_VM_BIND_OP_UNMAP_ALL:
- return xe_vm_unbind(vm, vma, e, syncs, num_syncs, afence);
- case XE_VM_BIND_OP_MAP_USERPTR:
- return xe_vm_bind(vm, vma, e, NULL, syncs, num_syncs, afence);
- case XE_VM_BIND_OP_PREFETCH:
- return xe_vm_prefetch(vm, vma, e, region, syncs, num_syncs,
- afence);
- break;
- default:
- XE_BUG_ON("NOT POSSIBLE");
- return -EINVAL;
+ return 0;
}
}
-struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm)
-{
- int idx = vm->flags & XE_VM_FLAG_MIGRATION ?
- XE_VM_FLAG_GT_ID(vm->flags) : 0;
-
- /* Safe to use index 0 as all BO in the VM share a single dma-resv lock */
- return &vm->pt_root[idx]->bo->ttm;
-}
-
-static void xe_vm_tv_populate(struct xe_vm *vm, struct ttm_validate_buffer *tv)
-{
- tv->num_shared = 1;
- tv->bo = xe_vm_ttm_bo(vm);
-}
-
-static bool is_map_op(u32 op)
+static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
+ bool post_commit)
{
- return VM_BIND_OP(op) == XE_VM_BIND_OP_MAP ||
- VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR;
+ down_read(&vm->userptr.notifier_lock);
+ vma->gpuva.flags |= XE_VMA_DESTROYED;
+ up_read(&vm->userptr.notifier_lock);
+ if (post_commit)
+ xe_vm_remove_vma(vm, vma);
}
-static bool is_unmap_op(u32 op)
-{
- return VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP ||
- VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL;
-}
+#undef ULL
+#define ULL unsigned long long
-static int vm_bind_ioctl(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_engine *e, struct xe_bo *bo,
- struct drm_xe_vm_bind_op *bind_op,
- struct xe_sync_entry *syncs, u32 num_syncs,
- struct async_op_fence *afence)
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
+static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
{
- LIST_HEAD(objs);
- LIST_HEAD(dups);
- struct ttm_validate_buffer tv_bo, tv_vm;
- struct ww_acquire_ctx ww;
- struct xe_bo *vbo;
- int err, i;
-
- lockdep_assert_held(&vm->lock);
- XE_BUG_ON(!list_empty(&vma->unbind_link));
-
- /* Binds deferred to faults, signal fences now */
- if (xe_vm_in_fault_mode(vm) && is_map_op(bind_op->op) &&
- !(bind_op->op & XE_VM_BIND_FLAG_IMMEDIATE)) {
- for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], NULL,
- dma_fence_get_stub());
- if (afence)
- dma_fence_signal(&afence->fence);
- return 0;
- }
-
- xe_vm_tv_populate(vm, &tv_vm);
- list_add_tail(&tv_vm.head, &objs);
- vbo = vma->bo;
- if (vbo) {
- /*
- * An unbind can drop the last reference to the BO and
- * the BO is needed for ttm_eu_backoff_reservation so
- * take a reference here.
- */
- xe_bo_get(vbo);
-
- tv_bo.bo = &vbo->ttm;
- tv_bo.num_shared = 1;
- list_add(&tv_bo.head, &objs);
- }
-
-again:
- err = ttm_eu_reserve_buffers(&ww, &objs, true, &dups);
- if (!err) {
- err = __vm_bind_ioctl(vm, vma, e, bo,
- bind_op->op, bind_op->region, syncs,
- num_syncs, afence);
- ttm_eu_backoff_reservation(&ww, &objs);
- if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
- lockdep_assert_held_write(&vm->lock);
- err = xe_vma_userptr_pin_pages(vma);
- if (!err)
- goto again;
- }
- }
- xe_bo_put(vbo);
-
- return err;
-}
-
-struct async_op {
struct xe_vma *vma;
- struct xe_engine *engine;
- struct xe_bo *bo;
- struct drm_xe_vm_bind_op bind_op;
- struct xe_sync_entry *syncs;
- u32 num_syncs;
- struct list_head link;
- struct async_op_fence *fence;
-};
-static void async_op_cleanup(struct xe_vm *vm, struct async_op *op)
-{
- while (op->num_syncs--)
- xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
- kfree(op->syncs);
- xe_bo_put(op->bo);
- if (op->engine)
- xe_engine_put(op->engine);
- xe_vm_put(vm);
- if (op->fence)
- dma_fence_put(&op->fence->fence);
- kfree(op);
+ switch (op->op) {
+ case DRM_GPUVA_OP_MAP:
+ vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx",
+ (ULL)op->map.va.addr, (ULL)op->map.va.range);
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ vma = gpuva_to_vma(op->remap.unmap->va);
+ vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
+ (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
+ op->remap.unmap->keep ? 1 : 0);
+ if (op->remap.prev)
+ vm_dbg(&xe->drm,
+ "REMAP:PREV: addr=0x%016llx, range=0x%016llx",
+ (ULL)op->remap.prev->va.addr,
+ (ULL)op->remap.prev->va.range);
+ if (op->remap.next)
+ vm_dbg(&xe->drm,
+ "REMAP:NEXT: addr=0x%016llx, range=0x%016llx",
+ (ULL)op->remap.next->va.addr,
+ (ULL)op->remap.next->va.range);
+ break;
+ case DRM_GPUVA_OP_UNMAP:
+ vma = gpuva_to_vma(op->unmap.va);
+ vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
+ (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
+ op->unmap.keep ? 1 : 0);
+ break;
+ case DRM_GPUVA_OP_PREFETCH:
+ vma = gpuva_to_vma(op->prefetch.va);
+ vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx",
+ (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
+ break;
+ default:
+ drm_warn(&xe->drm, "NOT POSSIBLE");
+ }
}
-
-static struct async_op *next_async_op(struct xe_vm *vm)
+#else
+static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
{
- return list_first_entry_or_null(&vm->async_ops.pending,
- struct async_op, link);
}
+#endif
-static void vm_set_async_error(struct xe_vm *vm, int err)
-{
- lockdep_assert_held(&vm->lock);
- vm->async_ops.error = err;
-}
+/*
+ * Create operations list from IOCTL arguments, setup operations fields so parse
+ * and commit steps are decoupled from IOCTL arguments. This step can fail.
+ */
+static struct drm_gpuva_ops *
+vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
+ u64 bo_offset_or_userptr, u64 addr, u64 range,
+ u32 operation, u32 flags, u8 tile_mask,
+ u32 prefetch_region)
+{
+ struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
+ struct drm_gpuva_ops *ops;
+ struct drm_gpuva_op *__op;
+ struct xe_vma_op *op;
+ struct drm_gpuvm_bo *vm_bo;
+ int err;
-static void async_op_work_func(struct work_struct *w)
-{
- struct xe_vm *vm = container_of(w, struct xe_vm, async_ops.work);
+ lockdep_assert_held_write(&vm->lock);
- for (;;) {
- struct async_op *op;
- int err;
+ vm_dbg(&vm->xe->drm,
+ "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
+ operation, (ULL)addr, (ULL)range,
+ (ULL)bo_offset_or_userptr);
- if (vm->async_ops.error && !xe_vm_is_closed(vm))
- break;
+ switch (operation) {
+ case DRM_XE_VM_BIND_OP_MAP:
+ case DRM_XE_VM_BIND_OP_MAP_USERPTR:
+ ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
+ obj, bo_offset_or_userptr);
+ break;
+ case DRM_XE_VM_BIND_OP_UNMAP:
+ ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
+ break;
+ case DRM_XE_VM_BIND_OP_PREFETCH:
+ ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
+ break;
+ case DRM_XE_VM_BIND_OP_UNMAP_ALL:
+ xe_assert(vm->xe, bo);
- spin_lock_irq(&vm->async_ops.lock);
- op = next_async_op(vm);
- if (op)
- list_del_init(&op->link);
- spin_unlock_irq(&vm->async_ops.lock);
+ err = xe_bo_lock(bo, true);
+ if (err)
+ return ERR_PTR(err);
- if (!op)
+ vm_bo = drm_gpuvm_bo_find(&vm->gpuvm, obj);
+ if (!vm_bo)
break;
- if (!xe_vm_is_closed(vm)) {
- bool first, last;
+ ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
+ drm_gpuvm_bo_put(vm_bo);
+ xe_bo_unlock(bo);
+ break;
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
+ ops = ERR_PTR(-EINVAL);
+ }
+ if (IS_ERR(ops))
+ return ops;
- down_write(&vm->lock);
-again:
- first = op->vma->first_munmap_rebind;
- last = op->vma->last_munmap_rebind;
#ifdef TEST_VM_ASYNC_OPS_ERROR
-#define FORCE_ASYNC_OP_ERROR BIT(31)
- if (!(op->bind_op.op & FORCE_ASYNC_OP_ERROR)) {
- err = vm_bind_ioctl(vm, op->vma, op->engine,
- op->bo, &op->bind_op,
- op->syncs, op->num_syncs,
- op->fence);
- } else {
- err = -ENOMEM;
- op->bind_op.op &= ~FORCE_ASYNC_OP_ERROR;
- }
-#else
- err = vm_bind_ioctl(vm, op->vma, op->engine, op->bo,
- &op->bind_op, op->syncs,
- op->num_syncs, op->fence);
+ if (operation & FORCE_ASYNC_OP_ERROR) {
+ op = list_first_entry_or_null(&ops->list, struct xe_vma_op,
+ base.entry);
+ if (op)
+ op->inject_error = true;
+ }
#endif
- /*
- * In order for the fencing to work (stall behind
- * existing jobs / prevent new jobs from running) all
- * the dma-resv slots need to be programmed in a batch
- * relative to execs / the rebind worker. The vm->lock
- * ensure this.
- */
- if (!err && ((first && VM_BIND_OP(op->bind_op.op) ==
- XE_VM_BIND_OP_UNMAP) ||
- vm->async_ops.munmap_rebind_inflight)) {
- if (last) {
- op->vma->last_munmap_rebind = false;
- vm->async_ops.munmap_rebind_inflight =
- false;
- } else {
- vm->async_ops.munmap_rebind_inflight =
- true;
-
- async_op_cleanup(vm, op);
-
- spin_lock_irq(&vm->async_ops.lock);
- op = next_async_op(vm);
- XE_BUG_ON(!op);
- list_del_init(&op->link);
- spin_unlock_irq(&vm->async_ops.lock);
-
- goto again;
- }
- }
- if (err) {
- trace_xe_vma_fail(op->vma);
- drm_warn(&vm->xe->drm, "Async VM op(%d) failed with %d",
- VM_BIND_OP(op->bind_op.op),
- err);
-
- spin_lock_irq(&vm->async_ops.lock);
- list_add(&op->link, &vm->async_ops.pending);
- spin_unlock_irq(&vm->async_ops.lock);
-
- vm_set_async_error(vm, err);
- up_write(&vm->lock);
-
- if (vm->async_ops.error_capture.addr)
- vm_error_capture(vm, err,
- op->bind_op.op,
- op->bind_op.addr,
- op->bind_op.range);
- break;
- }
- up_write(&vm->lock);
- } else {
- trace_xe_vma_flush(op->vma);
- if (is_unmap_op(op->bind_op.op)) {
- down_write(&vm->lock);
- xe_vma_destroy_unlocked(op->vma);
- up_write(&vm->lock);
- }
+ drm_gpuva_for_each_op(__op, ops) {
+ struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
- if (op->fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &op->fence->fence.flags)) {
- if (!xe_vm_no_dma_fences(vm)) {
- op->fence->started = true;
- smp_wmb();
- wake_up_all(&op->fence->wq);
- }
- dma_fence_signal(&op->fence->fence);
- }
+ op->tile_mask = tile_mask;
+ if (__op->op == DRM_GPUVA_OP_MAP) {
+ op->map.immediate =
+ flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
+ op->map.read_only =
+ flags & DRM_XE_VM_BIND_FLAG_READONLY;
+ op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
+ } else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
+ op->prefetch.region = prefetch_region;
}
- async_op_cleanup(vm, op);
+ print_op(vm->xe, __op);
}
+
+ return ops;
}
-static int __vm_bind_ioctl_async(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_engine *e, struct xe_bo *bo,
- struct drm_xe_vm_bind_op *bind_op,
- struct xe_sync_entry *syncs, u32 num_syncs)
+static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
+ u8 tile_mask, bool read_only, bool is_null)
{
- struct async_op *op;
- bool installed = false;
- u64 seqno;
- int i;
+ struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
+ struct xe_vma *vma;
+ int err;
- lockdep_assert_held(&vm->lock);
+ lockdep_assert_held_write(&vm->lock);
- op = kmalloc(sizeof(*op), GFP_KERNEL);
- if (!op) {
- return -ENOMEM;
+ if (bo) {
+ err = xe_bo_lock(bo, true);
+ if (err)
+ return ERR_PTR(err);
}
+ vma = xe_vma_create(vm, bo, op->gem.offset,
+ op->va.addr, op->va.addr +
+ op->va.range - 1, read_only, is_null,
+ tile_mask);
+ if (bo)
+ xe_bo_unlock(bo);
- if (num_syncs) {
- op->fence = kmalloc(sizeof(*op->fence), GFP_KERNEL);
- if (!op->fence) {
- kfree(op);
- return -ENOMEM;
+ if (xe_vma_is_userptr(vma)) {
+ err = xe_vma_userptr_pin_pages(vma);
+ if (err) {
+ prep_vma_destroy(vm, vma, false);
+ xe_vma_destroy_unlocked(vma);
+ return ERR_PTR(err);
}
-
- seqno = e ? ++e->bind.fence_seqno : ++vm->async_ops.fence.seqno;
- dma_fence_init(&op->fence->fence, &async_op_fence_ops,
- &vm->async_ops.lock, e ? e->bind.fence_ctx :
- vm->async_ops.fence.context, seqno);
-
- if (!xe_vm_no_dma_fences(vm)) {
- op->fence->vm = vm;
- op->fence->started = false;
- init_waitqueue_head(&op->fence->wq);
+ } else if (!xe_vma_has_no_bo(vma) && !bo->vm) {
+ vm_insert_extobj(vm, vma);
+ err = add_preempt_fences(vm, bo);
+ if (err) {
+ prep_vma_destroy(vm, vma, false);
+ xe_vma_destroy_unlocked(vma);
+ return ERR_PTR(err);
}
- } else {
- op->fence = NULL;
}
- op->vma = vma;
- op->engine = e;
- op->bo = bo;
- op->bind_op = *bind_op;
- op->syncs = syncs;
- op->num_syncs = num_syncs;
- INIT_LIST_HEAD(&op->link);
- for (i = 0; i < num_syncs; i++)
- installed |= xe_sync_entry_signal(&syncs[i], NULL,
- &op->fence->fence);
+ return vma;
+}
- if (!installed && op->fence)
- dma_fence_signal(&op->fence->fence);
+static u64 xe_vma_max_pte_size(struct xe_vma *vma)
+{
+ if (vma->gpuva.flags & XE_VMA_PTE_1G)
+ return SZ_1G;
+ else if (vma->gpuva.flags & XE_VMA_PTE_2M)
+ return SZ_2M;
- spin_lock_irq(&vm->async_ops.lock);
- list_add_tail(&op->link, &vm->async_ops.pending);
- spin_unlock_irq(&vm->async_ops.lock);
+ return SZ_4K;
+}
- if (!vm->async_ops.error)
- queue_work(system_unbound_wq, &vm->async_ops.work);
+static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
+{
+ switch (size) {
+ case SZ_1G:
+ vma->gpuva.flags |= XE_VMA_PTE_1G;
+ break;
+ case SZ_2M:
+ vma->gpuva.flags |= XE_VMA_PTE_2M;
+ break;
+ }
- return 0;
+ return SZ_4K;
}
-static int vm_bind_ioctl_async(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_engine *e, struct xe_bo *bo,
- struct drm_xe_vm_bind_op *bind_op,
- struct xe_sync_entry *syncs, u32 num_syncs)
+static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
{
- struct xe_vma *__vma, *next;
- struct list_head rebind_list;
- struct xe_sync_entry *in_syncs = NULL, *out_syncs = NULL;
- u32 num_in_syncs = 0, num_out_syncs = 0;
- bool first = true, last;
- int err;
- int i;
-
- lockdep_assert_held(&vm->lock);
+ int err = 0;
- /* Not a linked list of unbinds + rebinds, easy */
- if (list_empty(&vma->unbind_link))
- return __vm_bind_ioctl_async(vm, vma, e, bo, bind_op,
- syncs, num_syncs);
+ lockdep_assert_held_write(&vm->lock);
- /*
- * Linked list of unbinds + rebinds, decompose syncs into 'in / out'
- * passing the 'in' to the first operation and 'out' to the last. Also
- * the reference counting is a little tricky, increment the VM / bind
- * engine ref count on all but the last operation and increment the BOs
- * ref count on each rebind.
- */
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ err |= xe_vm_insert_vma(vm, op->map.vma);
+ if (!err)
+ op->flags |= XE_VMA_OP_COMMITTED;
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ {
+ u8 tile_present =
+ gpuva_to_vma(op->base.remap.unmap->va)->tile_present;
- XE_BUG_ON(VM_BIND_OP(bind_op->op) != XE_VM_BIND_OP_UNMAP &&
- VM_BIND_OP(bind_op->op) != XE_VM_BIND_OP_UNMAP_ALL &&
- VM_BIND_OP(bind_op->op) != XE_VM_BIND_OP_PREFETCH);
+ prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
+ true);
+ op->flags |= XE_VMA_OP_COMMITTED;
- /* Decompose syncs */
- if (num_syncs) {
- in_syncs = kmalloc(sizeof(*in_syncs) * num_syncs, GFP_KERNEL);
- out_syncs = kmalloc(sizeof(*out_syncs) * num_syncs, GFP_KERNEL);
- if (!in_syncs || !out_syncs) {
- err = -ENOMEM;
- goto out_error;
+ if (op->remap.prev) {
+ err |= xe_vm_insert_vma(vm, op->remap.prev);
+ if (!err)
+ op->flags |= XE_VMA_OP_PREV_COMMITTED;
+ if (!err && op->remap.skip_prev) {
+ op->remap.prev->tile_present =
+ tile_present;
+ op->remap.prev = NULL;
+ }
}
-
- for (i = 0; i < num_syncs; ++i) {
- bool signal = syncs[i].flags & DRM_XE_SYNC_SIGNAL;
-
- if (signal)
- out_syncs[num_out_syncs++] = syncs[i];
- else
- in_syncs[num_in_syncs++] = syncs[i];
+ if (op->remap.next) {
+ err |= xe_vm_insert_vma(vm, op->remap.next);
+ if (!err)
+ op->flags |= XE_VMA_OP_NEXT_COMMITTED;
+ if (!err && op->remap.skip_next) {
+ op->remap.next->tile_present =
+ tile_present;
+ op->remap.next = NULL;
+ }
}
- }
- /* Do unbinds + move rebinds to new list */
- INIT_LIST_HEAD(&rebind_list);
- list_for_each_entry_safe(__vma, next, &vma->unbind_link, unbind_link) {
- if (__vma->destroyed ||
- VM_BIND_OP(bind_op->op) == XE_VM_BIND_OP_PREFETCH) {
- list_del_init(&__vma->unbind_link);
- xe_bo_get(bo);
- err = __vm_bind_ioctl_async(xe_vm_get(vm), __vma,
- e ? xe_engine_get(e) : NULL,
- bo, bind_op, first ?
- in_syncs : NULL,
- first ? num_in_syncs : 0);
- if (err) {
- xe_bo_put(bo);
- xe_vm_put(vm);
- if (e)
- xe_engine_put(e);
- goto out_error;
- }
- in_syncs = NULL;
- first = false;
- } else {
- list_move_tail(&__vma->unbind_link, &rebind_list);
+ /* Adjust for partial unbind after removin VMA from VM */
+ if (!err) {
+ op->base.remap.unmap->va->va.addr = op->remap.start;
+ op->base.remap.unmap->va->va.range = op->remap.range;
}
+ break;
}
- last = list_empty(&rebind_list);
- if (!last) {
- xe_vm_get(vm);
- if (e)
- xe_engine_get(e);
- }
- err = __vm_bind_ioctl_async(vm, vma, e,
- bo, bind_op,
- first ? in_syncs :
- last ? out_syncs : NULL,
- first ? num_in_syncs :
- last ? num_out_syncs : 0);
- if (err) {
- if (!last) {
- xe_vm_put(vm);
- if (e)
- xe_engine_put(e);
- }
- goto out_error;
+ case DRM_GPUVA_OP_UNMAP:
+ prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
+ op->flags |= XE_VMA_OP_COMMITTED;
+ break;
+ case DRM_GPUVA_OP_PREFETCH:
+ op->flags |= XE_VMA_OP_COMMITTED;
+ break;
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
}
- in_syncs = NULL;
- /* Do rebinds */
- list_for_each_entry_safe(__vma, next, &rebind_list, unbind_link) {
- list_del_init(&__vma->unbind_link);
- last = list_empty(&rebind_list);
+ return err;
+}
- if (xe_vma_is_userptr(__vma)) {
- bind_op->op = XE_VM_BIND_FLAG_ASYNC |
- XE_VM_BIND_OP_MAP_USERPTR;
- } else {
- bind_op->op = XE_VM_BIND_FLAG_ASYNC |
- XE_VM_BIND_OP_MAP;
- xe_bo_get(__vma->bo);
+
+static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
+ struct drm_gpuva_ops *ops,
+ struct xe_sync_entry *syncs, u32 num_syncs,
+ struct list_head *ops_list, bool last,
+ bool async)
+{
+ struct xe_vma_op *last_op = NULL;
+ struct drm_gpuva_op *__op;
+ int err = 0;
+
+ lockdep_assert_held_write(&vm->lock);
+
+ drm_gpuva_for_each_op(__op, ops) {
+ struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
+ bool first = list_empty(ops_list);
+
+ INIT_LIST_HEAD(&op->link);
+ list_add_tail(&op->link, ops_list);
+
+ if (first) {
+ op->flags |= XE_VMA_OP_FIRST;
+ op->num_syncs = num_syncs;
+ op->syncs = syncs;
}
- if (!last) {
- xe_vm_get(vm);
- if (e)
- xe_engine_get(e);
+ op->q = q;
+
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ {
+ struct xe_vma *vma;
+
+ vma = new_vma(vm, &op->base.map,
+ op->tile_mask, op->map.read_only,
+ op->map.is_null);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ op->map.vma = vma;
+ break;
}
+ case DRM_GPUVA_OP_REMAP:
+ {
+ struct xe_vma *old =
+ gpuva_to_vma(op->base.remap.unmap->va);
+
+ op->remap.start = xe_vma_start(old);
+ op->remap.range = xe_vma_size(old);
+
+ if (op->base.remap.prev) {
+ struct xe_vma *vma;
+ bool read_only =
+ op->base.remap.unmap->va->flags &
+ XE_VMA_READ_ONLY;
+ bool is_null =
+ op->base.remap.unmap->va->flags &
+ DRM_GPUVA_SPARSE;
+
+ vma = new_vma(vm, op->base.remap.prev,
+ op->tile_mask, read_only,
+ is_null);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ op->remap.prev = vma;
+
+ /*
+ * Userptr creates a new SG mapping so
+ * we must also rebind.
+ */
+ op->remap.skip_prev = !xe_vma_is_userptr(old) &&
+ IS_ALIGNED(xe_vma_end(vma),
+ xe_vma_max_pte_size(old));
+ if (op->remap.skip_prev) {
+ xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
+ op->remap.range -=
+ xe_vma_end(vma) -
+ xe_vma_start(old);
+ op->remap.start = xe_vma_end(vma);
+ }
+ }
- err = __vm_bind_ioctl_async(vm, __vma, e,
- __vma->bo, bind_op, last ?
- out_syncs : NULL,
- last ? num_out_syncs : 0);
- if (err) {
- if (!last) {
- xe_vm_put(vm);
- if (e)
- xe_engine_put(e);
+ if (op->base.remap.next) {
+ struct xe_vma *vma;
+ bool read_only =
+ op->base.remap.unmap->va->flags &
+ XE_VMA_READ_ONLY;
+
+ bool is_null =
+ op->base.remap.unmap->va->flags &
+ DRM_GPUVA_SPARSE;
+
+ vma = new_vma(vm, op->base.remap.next,
+ op->tile_mask, read_only,
+ is_null);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ op->remap.next = vma;
+
+ /*
+ * Userptr creates a new SG mapping so
+ * we must also rebind.
+ */
+ op->remap.skip_next = !xe_vma_is_userptr(old) &&
+ IS_ALIGNED(xe_vma_start(vma),
+ xe_vma_max_pte_size(old));
+ if (op->remap.skip_next) {
+ xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
+ op->remap.range -=
+ xe_vma_end(old) -
+ xe_vma_start(vma);
+ }
}
- goto out_error;
+ break;
+ }
+ case DRM_GPUVA_OP_UNMAP:
+ case DRM_GPUVA_OP_PREFETCH:
+ /* Nothing to do */
+ break;
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
}
+
+ last_op = op;
+
+ err = xe_vma_op_commit(vm, op);
+ if (err)
+ return err;
}
- kfree(syncs);
- return 0;
+ /* FIXME: Unhandled corner case */
+ XE_WARN_ON(!last_op && last && !list_empty(ops_list));
-out_error:
- kfree(in_syncs);
- kfree(out_syncs);
- kfree(syncs);
+ if (!last_op)
+ return 0;
- return err;
+ last_op->ops = ops;
+ if (last) {
+ last_op->flags |= XE_VMA_OP_LAST;
+ last_op->num_syncs = num_syncs;
+ last_op->syncs = syncs;
+ }
+
+ return 0;
}
-static int __vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo,
- u64 addr, u64 range, u32 op)
+static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
+ struct xe_vma *vma, struct xe_vma_op *op)
{
- struct xe_device *xe = vm->xe;
- struct xe_vma *vma, lookup;
- bool async = !!(op & XE_VM_BIND_FLAG_ASYNC);
+ int err;
- lockdep_assert_held(&vm->lock);
+ lockdep_assert_held_write(&vm->lock);
- lookup.start = addr;
- lookup.end = addr + range - 1;
+ err = xe_vm_prepare_vma(exec, vma, 1);
+ if (err)
+ return err;
- switch (VM_BIND_OP(op)) {
- case XE_VM_BIND_OP_MAP:
- case XE_VM_BIND_OP_MAP_USERPTR:
- vma = xe_vm_find_overlapping_vma(vm, &lookup);
- if (XE_IOCTL_ERR(xe, vma))
- return -EBUSY;
+ xe_vm_assert_held(vm);
+ xe_bo_assert_held(xe_vma_bo(vma));
+
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
+ op->syncs, op->num_syncs,
+ op->map.immediate || !xe_vm_in_fault_mode(vm),
+ op->flags & XE_VMA_OP_FIRST,
+ op->flags & XE_VMA_OP_LAST);
break;
- case XE_VM_BIND_OP_UNMAP:
- case XE_VM_BIND_OP_PREFETCH:
- vma = xe_vm_find_overlapping_vma(vm, &lookup);
- if (XE_IOCTL_ERR(xe, !vma) ||
- XE_IOCTL_ERR(xe, (vma->start != addr ||
- vma->end != addr + range - 1) && !async))
- return -EINVAL;
+ case DRM_GPUVA_OP_REMAP:
+ {
+ bool prev = !!op->remap.prev;
+ bool next = !!op->remap.next;
+
+ if (!op->remap.unmap_done) {
+ if (prev || next)
+ vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
+ err = xe_vm_unbind(vm, vma, op->q, op->syncs,
+ op->num_syncs,
+ op->flags & XE_VMA_OP_FIRST,
+ op->flags & XE_VMA_OP_LAST &&
+ !prev && !next);
+ if (err)
+ break;
+ op->remap.unmap_done = true;
+ }
+
+ if (prev) {
+ op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
+ err = xe_vm_bind(vm, op->remap.prev, op->q,
+ xe_vma_bo(op->remap.prev), op->syncs,
+ op->num_syncs, true, false,
+ op->flags & XE_VMA_OP_LAST && !next);
+ op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
+ if (err)
+ break;
+ op->remap.prev = NULL;
+ }
+
+ if (next) {
+ op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
+ err = xe_vm_bind(vm, op->remap.next, op->q,
+ xe_vma_bo(op->remap.next),
+ op->syncs, op->num_syncs,
+ true, false,
+ op->flags & XE_VMA_OP_LAST);
+ op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
+ if (err)
+ break;
+ op->remap.next = NULL;
+ }
+
break;
- case XE_VM_BIND_OP_UNMAP_ALL:
+ }
+ case DRM_GPUVA_OP_UNMAP:
+ err = xe_vm_unbind(vm, vma, op->q, op->syncs,
+ op->num_syncs, op->flags & XE_VMA_OP_FIRST,
+ op->flags & XE_VMA_OP_LAST);
+ break;
+ case DRM_GPUVA_OP_PREFETCH:
+ err = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region,
+ op->syncs, op->num_syncs,
+ op->flags & XE_VMA_OP_FIRST,
+ op->flags & XE_VMA_OP_LAST);
break;
default:
- XE_BUG_ON("NOT POSSIBLE");
- return -EINVAL;
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
}
- return 0;
-}
+ if (err)
+ trace_xe_vma_fail(vma);
-static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma)
-{
- down_read(&vm->userptr.notifier_lock);
- vma->destroyed = true;
- up_read(&vm->userptr.notifier_lock);
- xe_vm_remove_vma(vm, vma);
+ return err;
}
-static int prep_replacement_vma(struct xe_vm *vm, struct xe_vma *vma)
+static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_vma_op *op)
{
+ struct drm_exec exec;
int err;
- if (vma->bo && !vma->bo->vm) {
- vm_insert_extobj(vm, vma);
- err = add_preempt_fences(vm, vma->bo);
+retry_userptr:
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_until_all_locked(&exec) {
+ err = op_execute(&exec, vm, vma, op);
+ drm_exec_retry_on_contention(&exec);
if (err)
- return err;
+ break;
}
+ drm_exec_fini(&exec);
- return 0;
+ if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
+ lockdep_assert_held_write(&vm->lock);
+ err = xe_vma_userptr_pin_pages(vma);
+ if (!err)
+ goto retry_userptr;
+
+ trace_xe_vma_fail(vma);
+ }
+
+ return err;
}
-/*
- * Find all overlapping VMAs in lookup range and add to a list in the returned
- * VMA, all of VMAs found will be unbound. Also possibly add 2 new VMAs that
- * need to be bound if first / last VMAs are not fully unbound. This is akin to
- * how munmap works.
- */
-static struct xe_vma *vm_unbind_lookup_vmas(struct xe_vm *vm,
- struct xe_vma *lookup)
+static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
{
- struct xe_vma *vma = xe_vm_find_overlapping_vma(vm, lookup);
- struct rb_node *node;
- struct xe_vma *first = vma, *last = vma, *new_first = NULL,
- *new_last = NULL, *__vma, *next;
- int err = 0;
- bool first_munmap_rebind = false;
+ int ret = 0;
- lockdep_assert_held(&vm->lock);
- XE_BUG_ON(!vma);
-
- node = &vma->vm_node;
- while ((node = rb_next(node))) {
- if (!xe_vma_cmp_vma_cb(lookup, node)) {
- __vma = to_xe_vma(node);
- list_add_tail(&__vma->unbind_link, &vma->unbind_link);
- last = __vma;
- } else {
- break;
- }
- }
+ lockdep_assert_held_write(&vm->lock);
- node = &vma->vm_node;
- while ((node = rb_prev(node))) {
- if (!xe_vma_cmp_vma_cb(lookup, node)) {
- __vma = to_xe_vma(node);
- list_add(&__vma->unbind_link, &vma->unbind_link);
- first = __vma;
- } else {
- break;
- }
+#ifdef TEST_VM_ASYNC_OPS_ERROR
+ if (op->inject_error) {
+ op->inject_error = false;
+ return -ENOMEM;
}
+#endif
- if (first->start != lookup->start) {
- struct ww_acquire_ctx ww;
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ ret = __xe_vma_op_execute(vm, op->map.vma, op);
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ {
+ struct xe_vma *vma;
+
+ if (!op->remap.unmap_done)
+ vma = gpuva_to_vma(op->base.remap.unmap->va);
+ else if (op->remap.prev)
+ vma = op->remap.prev;
+ else
+ vma = op->remap.next;
- if (first->bo)
- err = xe_bo_lock(first->bo, &ww, 0, true);
- if (err)
- goto unwind;
- new_first = xe_vma_create(first->vm, first->bo,
- first->bo ? first->bo_offset :
- first->userptr.ptr,
- first->start,
- lookup->start - 1,
- (first->pte_flags & PTE_READ_ONLY),
- first->gt_mask);
- if (first->bo)
- xe_bo_unlock(first->bo, &ww);
- if (!new_first) {
- err = -ENOMEM;
- goto unwind;
- }
- if (!first->bo) {
- err = xe_vma_userptr_pin_pages(new_first);
- if (err)
- goto unwind;
- }
- err = prep_replacement_vma(vm, new_first);
- if (err)
- goto unwind;
+ ret = __xe_vma_op_execute(vm, vma, op);
+ break;
}
-
- if (last->end != lookup->end) {
- struct ww_acquire_ctx ww;
- u64 chunk = lookup->end + 1 - last->start;
-
- if (last->bo)
- err = xe_bo_lock(last->bo, &ww, 0, true);
- if (err)
- goto unwind;
- new_last = xe_vma_create(last->vm, last->bo,
- last->bo ? last->bo_offset + chunk :
- last->userptr.ptr + chunk,
- last->start + chunk,
- last->end,
- (last->pte_flags & PTE_READ_ONLY),
- last->gt_mask);
- if (last->bo)
- xe_bo_unlock(last->bo, &ww);
- if (!new_last) {
- err = -ENOMEM;
- goto unwind;
- }
- if (!last->bo) {
- err = xe_vma_userptr_pin_pages(new_last);
- if (err)
- goto unwind;
- }
- err = prep_replacement_vma(vm, new_last);
- if (err)
- goto unwind;
+ case DRM_GPUVA_OP_UNMAP:
+ ret = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
+ op);
+ break;
+ case DRM_GPUVA_OP_PREFETCH:
+ ret = __xe_vma_op_execute(vm,
+ gpuva_to_vma(op->base.prefetch.va),
+ op);
+ break;
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
}
- prep_vma_destroy(vm, vma);
- if (list_empty(&vma->unbind_link) && (new_first || new_last))
- vma->first_munmap_rebind = true;
- list_for_each_entry(__vma, &vma->unbind_link, unbind_link) {
- if ((new_first || new_last) && !first_munmap_rebind) {
- __vma->first_munmap_rebind = true;
- first_munmap_rebind = true;
- }
- prep_vma_destroy(vm, __vma);
- }
- if (new_first) {
- xe_vm_insert_vma(vm, new_first);
- list_add_tail(&new_first->unbind_link, &vma->unbind_link);
- if (!new_last)
- new_first->last_munmap_rebind = true;
- }
- if (new_last) {
- xe_vm_insert_vma(vm, new_last);
- list_add_tail(&new_last->unbind_link, &vma->unbind_link);
- new_last->last_munmap_rebind = true;
- }
+ return ret;
+}
- return vma;
+static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op)
+{
+ bool last = op->flags & XE_VMA_OP_LAST;
-unwind:
- list_for_each_entry_safe(__vma, next, &vma->unbind_link, unbind_link)
- list_del_init(&__vma->unbind_link);
- if (new_last) {
- prep_vma_destroy(vm, new_last);
- xe_vma_destroy_unlocked(new_last);
- }
- if (new_first) {
- prep_vma_destroy(vm, new_first);
- xe_vma_destroy_unlocked(new_first);
+ if (last) {
+ while (op->num_syncs--)
+ xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
+ kfree(op->syncs);
+ if (op->q)
+ xe_exec_queue_put(op->q);
}
-
- return ERR_PTR(err);
+ if (!list_empty(&op->link))
+ list_del(&op->link);
+ if (op->ops)
+ drm_gpuva_ops_free(&vm->gpuvm, op->ops);
+ if (last)
+ xe_vm_put(vm);
}
-/*
- * Similar to vm_unbind_lookup_vmas, find all VMAs in lookup range to prefetch
- */
-static struct xe_vma *vm_prefetch_lookup_vmas(struct xe_vm *vm,
- struct xe_vma *lookup,
- u32 region)
-{
- struct xe_vma *vma = xe_vm_find_overlapping_vma(vm, lookup), *__vma,
- *next;
- struct rb_node *node;
-
- if (!xe_vma_is_userptr(vma)) {
- if (!xe_bo_can_migrate(vma->bo, region_to_mem_type[region]))
- return ERR_PTR(-EINVAL);
- }
-
- node = &vma->vm_node;
- while ((node = rb_next(node))) {
- if (!xe_vma_cmp_vma_cb(lookup, node)) {
- __vma = to_xe_vma(node);
- if (!xe_vma_is_userptr(__vma)) {
- if (!xe_bo_can_migrate(__vma->bo, region_to_mem_type[region]))
- goto flush_list;
- }
- list_add_tail(&__vma->unbind_link, &vma->unbind_link);
- } else {
- break;
+static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
+ bool post_commit, bool prev_post_commit,
+ bool next_post_commit)
+{
+ lockdep_assert_held_write(&vm->lock);
+
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ if (op->map.vma) {
+ prep_vma_destroy(vm, op->map.vma, post_commit);
+ xe_vma_destroy_unlocked(op->map.vma);
}
- }
+ break;
+ case DRM_GPUVA_OP_UNMAP:
+ {
+ struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
- node = &vma->vm_node;
- while ((node = rb_prev(node))) {
- if (!xe_vma_cmp_vma_cb(lookup, node)) {
- __vma = to_xe_vma(node);
- if (!xe_vma_is_userptr(__vma)) {
- if (!xe_bo_can_migrate(__vma->bo, region_to_mem_type[region]))
- goto flush_list;
- }
- list_add(&__vma->unbind_link, &vma->unbind_link);
- } else {
- break;
+ if (vma) {
+ down_read(&vm->userptr.notifier_lock);
+ vma->gpuva.flags &= ~XE_VMA_DESTROYED;
+ up_read(&vm->userptr.notifier_lock);
+ if (post_commit)
+ xe_vm_insert_vma(vm, vma);
}
+ break;
}
+ case DRM_GPUVA_OP_REMAP:
+ {
+ struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
- return vma;
-
-flush_list:
- list_for_each_entry_safe(__vma, next, &vma->unbind_link,
- unbind_link)
- list_del_init(&__vma->unbind_link);
-
- return ERR_PTR(-EINVAL);
+ if (op->remap.prev) {
+ prep_vma_destroy(vm, op->remap.prev, prev_post_commit);
+ xe_vma_destroy_unlocked(op->remap.prev);
+ }
+ if (op->remap.next) {
+ prep_vma_destroy(vm, op->remap.next, next_post_commit);
+ xe_vma_destroy_unlocked(op->remap.next);
+ }
+ if (vma) {
+ down_read(&vm->userptr.notifier_lock);
+ vma->gpuva.flags &= ~XE_VMA_DESTROYED;
+ up_read(&vm->userptr.notifier_lock);
+ if (post_commit)
+ xe_vm_insert_vma(vm, vma);
+ }
+ break;
+ }
+ case DRM_GPUVA_OP_PREFETCH:
+ /* Nothing to do */
+ break;
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
+ }
}
-static struct xe_vma *vm_unbind_all_lookup_vmas(struct xe_vm *vm,
- struct xe_bo *bo)
+static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
+ struct drm_gpuva_ops **ops,
+ int num_ops_list)
{
- struct xe_vma *first = NULL, *vma;
+ int i;
- lockdep_assert_held(&vm->lock);
- xe_bo_assert_held(bo);
+ for (i = num_ops_list - 1; i; ++i) {
+ struct drm_gpuva_ops *__ops = ops[i];
+ struct drm_gpuva_op *__op;
- list_for_each_entry(vma, &bo->vmas, bo_link) {
- if (vma->vm != vm)
+ if (!__ops)
continue;
- prep_vma_destroy(vm, vma);
- if (!first)
- first = vma;
- else
- list_add_tail(&vma->unbind_link, &first->unbind_link);
- }
+ drm_gpuva_for_each_op_reverse(__op, __ops) {
+ struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
- return first;
+ xe_vma_op_unwind(vm, op,
+ op->flags & XE_VMA_OP_COMMITTED,
+ op->flags & XE_VMA_OP_PREV_COMMITTED,
+ op->flags & XE_VMA_OP_NEXT_COMMITTED);
+ }
+
+ drm_gpuva_ops_free(&vm->gpuvm, __ops);
+ }
}
-static struct xe_vma *vm_bind_ioctl_lookup_vma(struct xe_vm *vm,
- struct xe_bo *bo,
- u64 bo_offset_or_userptr,
- u64 addr, u64 range, u32 op,
- u64 gt_mask, u32 region)
+static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
+ struct list_head *ops_list)
{
- struct ww_acquire_ctx ww;
- struct xe_vma *vma, lookup;
+ struct xe_vma_op *op, *next;
int err;
- lockdep_assert_held(&vm->lock);
-
- lookup.start = addr;
- lookup.end = addr + range - 1;
-
- switch (VM_BIND_OP(op)) {
- case XE_VM_BIND_OP_MAP:
- XE_BUG_ON(!bo);
-
- err = xe_bo_lock(bo, &ww, 0, true);
- if (err)
- return ERR_PTR(err);
- vma = xe_vma_create(vm, bo, bo_offset_or_userptr, addr,
- addr + range - 1,
- op & XE_VM_BIND_FLAG_READONLY,
- gt_mask);
- xe_bo_unlock(bo, &ww);
- if (!vma)
- return ERR_PTR(-ENOMEM);
-
- xe_vm_insert_vma(vm, vma);
- if (!bo->vm) {
- vm_insert_extobj(vm, vma);
- err = add_preempt_fences(vm, bo);
- if (err) {
- prep_vma_destroy(vm, vma);
- xe_vma_destroy_unlocked(vma);
-
- return ERR_PTR(err);
- }
- }
- break;
- case XE_VM_BIND_OP_UNMAP:
- vma = vm_unbind_lookup_vmas(vm, &lookup);
- break;
- case XE_VM_BIND_OP_PREFETCH:
- vma = vm_prefetch_lookup_vmas(vm, &lookup, region);
- break;
- case XE_VM_BIND_OP_UNMAP_ALL:
- XE_BUG_ON(!bo);
-
- err = xe_bo_lock(bo, &ww, 0, true);
- if (err)
- return ERR_PTR(err);
- vma = vm_unbind_all_lookup_vmas(vm, bo);
- if (!vma)
- vma = ERR_PTR(-EINVAL);
- xe_bo_unlock(bo, &ww);
- break;
- case XE_VM_BIND_OP_MAP_USERPTR:
- XE_BUG_ON(bo);
-
- vma = xe_vma_create(vm, NULL, bo_offset_or_userptr, addr,
- addr + range - 1,
- op & XE_VM_BIND_FLAG_READONLY,
- gt_mask);
- if (!vma)
- return ERR_PTR(-ENOMEM);
+ lockdep_assert_held_write(&vm->lock);
- err = xe_vma_userptr_pin_pages(vma);
+ list_for_each_entry_safe(op, next, ops_list, link) {
+ err = xe_vma_op_execute(vm, op);
if (err) {
- prep_vma_destroy(vm, vma);
- xe_vma_destroy_unlocked(vma);
-
- return ERR_PTR(err);
- } else {
- xe_vm_insert_vma(vm, vma);
+ drm_warn(&vm->xe->drm, "VM op(%d) failed with %d",
+ op->base.op, err);
+ /*
+ * FIXME: Killing VM rather than proper error handling
+ */
+ xe_vm_kill(vm);
+ return -ENOSPC;
}
- break;
- default:
- XE_BUG_ON("NOT POSSIBLE");
- vma = ERR_PTR(-EINVAL);
+ xe_vma_op_cleanup(vm, op);
}
- return vma;
+ return 0;
}
#ifdef TEST_VM_ASYNC_OPS_ERROR
#define SUPPORTED_FLAGS \
- (FORCE_ASYNC_OP_ERROR | XE_VM_BIND_FLAG_ASYNC | \
- XE_VM_BIND_FLAG_READONLY | XE_VM_BIND_FLAG_IMMEDIATE | 0xffff)
+ (FORCE_ASYNC_OP_ERROR | DRM_XE_VM_BIND_FLAG_ASYNC | \
+ DRM_XE_VM_BIND_FLAG_READONLY | DRM_XE_VM_BIND_FLAG_IMMEDIATE | \
+ DRM_XE_VM_BIND_FLAG_NULL | 0xffff)
#else
#define SUPPORTED_FLAGS \
- (XE_VM_BIND_FLAG_ASYNC | XE_VM_BIND_FLAG_READONLY | \
- XE_VM_BIND_FLAG_IMMEDIATE | 0xffff)
+ (DRM_XE_VM_BIND_FLAG_ASYNC | DRM_XE_VM_BIND_FLAG_READONLY | \
+ DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | \
+ 0xffff)
#endif
#define XE_64K_PAGE_MASK 0xffffull
int err;
int i;
- if (XE_IOCTL_ERR(xe, args->extensions) ||
- XE_IOCTL_ERR(xe, !args->num_binds) ||
- XE_IOCTL_ERR(xe, args->num_binds > MAX_BINDS))
+ if (XE_IOCTL_DBG(xe, args->extensions) ||
+ XE_IOCTL_DBG(xe, !args->num_binds) ||
+ XE_IOCTL_DBG(xe, args->num_binds > MAX_BINDS))
return -EINVAL;
if (args->num_binds > 1) {
err = __copy_from_user(*bind_ops, bind_user,
sizeof(struct drm_xe_vm_bind_op) *
args->num_binds);
- if (XE_IOCTL_ERR(xe, err)) {
+ if (XE_IOCTL_DBG(xe, err)) {
err = -EFAULT;
goto free_bind_ops;
}
u64 range = (*bind_ops)[i].range;
u64 addr = (*bind_ops)[i].addr;
u32 op = (*bind_ops)[i].op;
+ u32 flags = (*bind_ops)[i].flags;
u32 obj = (*bind_ops)[i].obj;
u64 obj_offset = (*bind_ops)[i].obj_offset;
- u32 region = (*bind_ops)[i].region;
+ u32 prefetch_region = (*bind_ops)[i].prefetch_mem_region_instance;
+ bool is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
if (i == 0) {
- *async = !!(op & XE_VM_BIND_FLAG_ASYNC);
- } else if (XE_IOCTL_ERR(xe, !*async) ||
- XE_IOCTL_ERR(xe, !(op & XE_VM_BIND_FLAG_ASYNC)) ||
- XE_IOCTL_ERR(xe, VM_BIND_OP(op) ==
- XE_VM_BIND_OP_RESTART)) {
- err = -EINVAL;
- goto free_bind_ops;
- }
-
- if (XE_IOCTL_ERR(xe, !*async &&
- VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL)) {
- err = -EINVAL;
- goto free_bind_ops;
- }
-
- if (XE_IOCTL_ERR(xe, !*async &&
- VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH)) {
+ *async = !!(flags & DRM_XE_VM_BIND_FLAG_ASYNC);
+ if (XE_IOCTL_DBG(xe, !*async && args->num_syncs)) {
+ err = -EINVAL;
+ goto free_bind_ops;
+ }
+ } else if (XE_IOCTL_DBG(xe, *async !=
+ !!(flags & DRM_XE_VM_BIND_FLAG_ASYNC))) {
err = -EINVAL;
goto free_bind_ops;
}
- if (XE_IOCTL_ERR(xe, VM_BIND_OP(op) >
- XE_VM_BIND_OP_PREFETCH) ||
- XE_IOCTL_ERR(xe, op & ~SUPPORTED_FLAGS) ||
- XE_IOCTL_ERR(xe, !obj &&
- VM_BIND_OP(op) == XE_VM_BIND_OP_MAP) ||
- XE_IOCTL_ERR(xe, !obj &&
- VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
- XE_IOCTL_ERR(xe, addr &&
- VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
- XE_IOCTL_ERR(xe, range &&
- VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
- XE_IOCTL_ERR(xe, obj &&
- VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR) ||
- XE_IOCTL_ERR(xe, obj &&
- VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH) ||
- XE_IOCTL_ERR(xe, region &&
- VM_BIND_OP(op) != XE_VM_BIND_OP_PREFETCH) ||
- XE_IOCTL_ERR(xe, !(BIT(region) &
+ if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
+ XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
+ XE_IOCTL_DBG(xe, obj && is_null) ||
+ XE_IOCTL_DBG(xe, obj_offset && is_null) ||
+ XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP &&
+ is_null) ||
+ XE_IOCTL_DBG(xe, !obj &&
+ op == DRM_XE_VM_BIND_OP_MAP &&
+ !is_null) ||
+ XE_IOCTL_DBG(xe, !obj &&
+ op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
+ XE_IOCTL_DBG(xe, addr &&
+ op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
+ XE_IOCTL_DBG(xe, range &&
+ op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
+ XE_IOCTL_DBG(xe, obj &&
+ op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
+ XE_IOCTL_DBG(xe, obj &&
+ op == DRM_XE_VM_BIND_OP_PREFETCH) ||
+ XE_IOCTL_DBG(xe, prefetch_region &&
+ op != DRM_XE_VM_BIND_OP_PREFETCH) ||
+ XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
xe->info.mem_region_mask)) ||
- XE_IOCTL_ERR(xe, obj &&
- VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP)) {
+ XE_IOCTL_DBG(xe, obj &&
+ op == DRM_XE_VM_BIND_OP_UNMAP)) {
err = -EINVAL;
goto free_bind_ops;
}
- if (XE_IOCTL_ERR(xe, obj_offset & ~PAGE_MASK) ||
- XE_IOCTL_ERR(xe, addr & ~PAGE_MASK) ||
- XE_IOCTL_ERR(xe, range & ~PAGE_MASK) ||
- XE_IOCTL_ERR(xe, !range && VM_BIND_OP(op) !=
- XE_VM_BIND_OP_RESTART &&
- VM_BIND_OP(op) != XE_VM_BIND_OP_UNMAP_ALL)) {
+ if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
+ XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
+ XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
+ XE_IOCTL_DBG(xe, !range &&
+ op != DRM_XE_VM_BIND_OP_UNMAP_ALL)) {
err = -EINVAL;
goto free_bind_ops;
}
struct drm_xe_vm_bind *args = data;
struct drm_xe_sync __user *syncs_user;
struct xe_bo **bos = NULL;
- struct xe_vma **vmas = NULL;
+ struct drm_gpuva_ops **ops = NULL;
struct xe_vm *vm;
- struct xe_engine *e = NULL;
+ struct xe_exec_queue *q = NULL;
u32 num_syncs;
struct xe_sync_entry *syncs = NULL;
struct drm_xe_vm_bind_op *bind_ops;
+ LIST_HEAD(ops_list);
bool async;
int err;
- int i, j = 0;
+ int i;
err = vm_bind_ioctl_check_args(xe, args, &bind_ops, &async);
if (err)
return err;
- vm = xe_vm_lookup(xef, args->vm_id);
- if (XE_IOCTL_ERR(xe, !vm)) {
- err = -EINVAL;
- goto free_objs;
- }
-
- if (XE_IOCTL_ERR(xe, xe_vm_is_closed(vm))) {
- DRM_ERROR("VM closed while we began looking up?\n");
- err = -ENOENT;
- goto put_vm;
- }
-
- if (args->engine_id) {
- e = xe_engine_lookup(xef, args->engine_id);
- if (XE_IOCTL_ERR(xe, !e)) {
+ if (args->exec_queue_id) {
+ q = xe_exec_queue_lookup(xef, args->exec_queue_id);
+ if (XE_IOCTL_DBG(xe, !q)) {
err = -ENOENT;
- goto put_vm;
+ goto free_objs;
}
- if (XE_IOCTL_ERR(xe, !(e->flags & ENGINE_FLAG_VM))) {
+
+ if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
err = -EINVAL;
- goto put_engine;
+ goto put_exec_queue;
}
- }
-
- if (VM_BIND_OP(bind_ops[0].op) == XE_VM_BIND_OP_RESTART) {
- if (XE_IOCTL_ERR(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
- err = -ENOTSUPP;
- if (XE_IOCTL_ERR(xe, !err && args->num_syncs))
- err = EINVAL;
- if (XE_IOCTL_ERR(xe, !err && !vm->async_ops.error))
- err = -EPROTO;
- if (!err) {
- down_write(&vm->lock);
- trace_xe_vm_restart(vm);
- vm_set_async_error(vm, 0);
- up_write(&vm->lock);
+ if (XE_IOCTL_DBG(xe, async !=
+ !!(q->flags & EXEC_QUEUE_FLAG_VM_ASYNC))) {
+ err = -EINVAL;
+ goto put_exec_queue;
+ }
+ }
- queue_work(system_unbound_wq, &vm->async_ops.work);
+ vm = xe_vm_lookup(xef, args->vm_id);
+ if (XE_IOCTL_DBG(xe, !vm)) {
+ err = -EINVAL;
+ goto put_exec_queue;
+ }
- /* Rebinds may have been blocked, give worker a kick */
- if (xe_vm_in_compute_mode(vm))
- queue_work(vm->xe->ordered_wq,
- &vm->preempt.rebind_work);
+ if (!args->exec_queue_id) {
+ if (XE_IOCTL_DBG(xe, async !=
+ !!(vm->flags & XE_VM_FLAG_ASYNC_DEFAULT))) {
+ err = -EINVAL;
+ goto put_vm;
}
-
- goto put_engine;
}
- if (XE_IOCTL_ERR(xe, !vm->async_ops.error &&
- async != !!(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS))) {
- err = -ENOTSUPP;
- goto put_engine;
+ err = down_write_killable(&vm->lock);
+ if (err)
+ goto put_vm;
+
+ if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
+ err = -ENOENT;
+ goto release_vm_lock;
}
for (i = 0; i < args->num_binds; ++i) {
u64 range = bind_ops[i].range;
u64 addr = bind_ops[i].addr;
- if (XE_IOCTL_ERR(xe, range > vm->size) ||
- XE_IOCTL_ERR(xe, addr > vm->size - range)) {
+ if (XE_IOCTL_DBG(xe, range > vm->size) ||
+ XE_IOCTL_DBG(xe, addr > vm->size - range)) {
err = -EINVAL;
- goto put_engine;
+ goto release_vm_lock;
}
- if (bind_ops[i].gt_mask) {
- u64 valid_gts = BIT(xe->info.tile_count) - 1;
+ if (bind_ops[i].tile_mask) {
+ u64 valid_tiles = BIT(xe->info.tile_count) - 1;
- if (XE_IOCTL_ERR(xe, bind_ops[i].gt_mask &
- ~valid_gts)) {
+ if (XE_IOCTL_DBG(xe, bind_ops[i].tile_mask &
+ ~valid_tiles)) {
err = -EINVAL;
- goto put_engine;
+ goto release_vm_lock;
}
}
}
bos = kzalloc(sizeof(*bos) * args->num_binds, GFP_KERNEL);
if (!bos) {
err = -ENOMEM;
- goto put_engine;
+ goto release_vm_lock;
}
- vmas = kzalloc(sizeof(*vmas) * args->num_binds, GFP_KERNEL);
- if (!vmas) {
+ ops = kzalloc(sizeof(*ops) * args->num_binds, GFP_KERNEL);
+ if (!ops) {
err = -ENOMEM;
- goto put_engine;
+ goto release_vm_lock;
}
for (i = 0; i < args->num_binds; ++i) {
continue;
gem_obj = drm_gem_object_lookup(file, obj);
- if (XE_IOCTL_ERR(xe, !gem_obj)) {
+ if (XE_IOCTL_DBG(xe, !gem_obj)) {
err = -ENOENT;
goto put_obj;
}
bos[i] = gem_to_xe_bo(gem_obj);
- if (XE_IOCTL_ERR(xe, range > bos[i]->size) ||
- XE_IOCTL_ERR(xe, obj_offset >
+ if (XE_IOCTL_DBG(xe, range > bos[i]->size) ||
+ XE_IOCTL_DBG(xe, obj_offset >
bos[i]->size - range)) {
err = -EINVAL;
goto put_obj;
}
if (bos[i]->flags & XE_BO_INTERNAL_64K) {
- if (XE_IOCTL_ERR(xe, obj_offset &
+ if (XE_IOCTL_DBG(xe, obj_offset &
XE_64K_PAGE_MASK) ||
- XE_IOCTL_ERR(xe, addr & XE_64K_PAGE_MASK) ||
- XE_IOCTL_ERR(xe, range & XE_64K_PAGE_MASK)) {
+ XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
+ XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
err = -EINVAL;
goto put_obj;
}
goto free_syncs;
}
- err = down_write_killable(&vm->lock);
- if (err)
- goto free_syncs;
-
- /* Do some error checking first to make the unwind easier */
for (i = 0; i < args->num_binds; ++i) {
u64 range = bind_ops[i].range;
u64 addr = bind_ops[i].addr;
u32 op = bind_ops[i].op;
-
- err = __vm_bind_ioctl_lookup_vma(vm, bos[i], addr, range, op);
+ u32 flags = bind_ops[i].flags;
+ u64 obj_offset = bind_ops[i].obj_offset;
+ u8 tile_mask = bind_ops[i].tile_mask;
+ u32 prefetch_region = bind_ops[i].prefetch_mem_region_instance;
+
+ ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
+ addr, range, op, flags,
+ tile_mask, prefetch_region);
+ if (IS_ERR(ops[i])) {
+ err = PTR_ERR(ops[i]);
+ ops[i] = NULL;
+ goto unwind_ops;
+ }
+
+ err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
+ &ops_list,
+ i == args->num_binds - 1,
+ async);
if (err)
- goto release_vm_lock;
+ goto unwind_ops;
}
- for (i = 0; i < args->num_binds; ++i) {
- u64 range = bind_ops[i].range;
- u64 addr = bind_ops[i].addr;
- u32 op = bind_ops[i].op;
- u64 obj_offset = bind_ops[i].obj_offset;
- u64 gt_mask = bind_ops[i].gt_mask;
- u32 region = bind_ops[i].region;
-
- vmas[i] = vm_bind_ioctl_lookup_vma(vm, bos[i], obj_offset,
- addr, range, op, gt_mask,
- region);
- if (IS_ERR(vmas[i])) {
- err = PTR_ERR(vmas[i]);
- vmas[i] = NULL;
- goto destroy_vmas;
- }
- }
-
- for (j = 0; j < args->num_binds; ++j) {
- struct xe_sync_entry *__syncs;
- u32 __num_syncs = 0;
- bool first_or_last = j == 0 || j == args->num_binds - 1;
-
- if (args->num_binds == 1) {
- __num_syncs = num_syncs;
- __syncs = syncs;
- } else if (first_or_last && num_syncs) {
- bool first = j == 0;
-
- __syncs = kmalloc(sizeof(*__syncs) * num_syncs,
- GFP_KERNEL);
- if (!__syncs) {
- err = ENOMEM;
- break;
- }
-
- /* in-syncs on first bind, out-syncs on last bind */
- for (i = 0; i < num_syncs; ++i) {
- bool signal = syncs[i].flags &
- DRM_XE_SYNC_SIGNAL;
+ /* Nothing to do */
+ if (list_empty(&ops_list)) {
+ err = -ENODATA;
+ goto unwind_ops;
+ }
- if ((first && !signal) || (!first && signal))
- __syncs[__num_syncs++] = syncs[i];
- }
- } else {
- __num_syncs = 0;
- __syncs = NULL;
- }
+ xe_vm_get(vm);
+ if (q)
+ xe_exec_queue_get(q);
- if (async) {
- bool last = j == args->num_binds - 1;
+ err = vm_bind_ioctl_ops_execute(vm, &ops_list);
- /*
- * Each pass of async worker drops the ref, take a ref
- * here, 1 set of refs taken above
- */
- if (!last) {
- if (e)
- xe_engine_get(e);
- xe_vm_get(vm);
- }
+ up_write(&vm->lock);
- err = vm_bind_ioctl_async(vm, vmas[j], e, bos[j],
- bind_ops + j, __syncs,
- __num_syncs);
- if (err && !last) {
- if (e)
- xe_engine_put(e);
- xe_vm_put(vm);
- }
- if (err)
- break;
- } else {
- XE_BUG_ON(j != 0); /* Not supported */
- err = vm_bind_ioctl(vm, vmas[j], e, bos[j],
- bind_ops + j, __syncs,
- __num_syncs, NULL);
- break; /* Needed so cleanup loops work */
- }
- }
+ if (q)
+ xe_exec_queue_put(q);
+ xe_vm_put(vm);
- /* Most of cleanup owned by the async bind worker */
- if (async && !err) {
- up_write(&vm->lock);
- if (args->num_binds > 1)
- kfree(syncs);
- goto free_objs;
- }
+ for (i = 0; bos && i < args->num_binds; ++i)
+ xe_bo_put(bos[i]);
-destroy_vmas:
- for (i = j; err && i < args->num_binds; ++i) {
- u32 op = bind_ops[i].op;
- struct xe_vma *vma, *next;
+ kfree(bos);
+ kfree(ops);
+ if (args->num_binds > 1)
+ kfree(bind_ops);
- if (!vmas[i])
- break;
+ return err;
- list_for_each_entry_safe(vma, next, &vma->unbind_link,
- unbind_link) {
- list_del_init(&vma->unbind_link);
- if (!vma->destroyed) {
- prep_vma_destroy(vm, vma);
- xe_vma_destroy_unlocked(vma);
- }
- }
+unwind_ops:
+ vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
+free_syncs:
+ for (i = 0; err == -ENODATA && i < num_syncs; i++) {
+ struct dma_fence *fence =
+ xe_exec_queue_last_fence_get(to_wait_exec_queue(vm, q), vm);
- switch (VM_BIND_OP(op)) {
- case XE_VM_BIND_OP_MAP:
- prep_vma_destroy(vm, vmas[i]);
- xe_vma_destroy_unlocked(vmas[i]);
- break;
- case XE_VM_BIND_OP_MAP_USERPTR:
- prep_vma_destroy(vm, vmas[i]);
- xe_vma_destroy_unlocked(vmas[i]);
- break;
- }
+ xe_sync_entry_signal(&syncs[i], NULL, fence);
}
-release_vm_lock:
- up_write(&vm->lock);
-free_syncs:
- while (num_syncs--) {
- if (async && j &&
- !(syncs[num_syncs].flags & DRM_XE_SYNC_SIGNAL))
- continue; /* Still in async worker */
+ while (num_syncs--)
xe_sync_entry_cleanup(&syncs[num_syncs]);
- }
kfree(syncs);
put_obj:
- for (i = j; i < args->num_binds; ++i)
+ for (i = 0; i < args->num_binds; ++i)
xe_bo_put(bos[i]);
-put_engine:
- if (e)
- xe_engine_put(e);
+release_vm_lock:
+ up_write(&vm->lock);
put_vm:
xe_vm_put(vm);
+put_exec_queue:
+ if (q)
+ xe_exec_queue_put(q);
free_objs:
kfree(bos);
- kfree(vmas);
+ kfree(ops);
if (args->num_binds > 1)
kfree(bind_ops);
- return err;
+ return err == -ENODATA ? 0 : err;
}
-/*
- * XXX: Using the TTM wrappers for now, likely can call into dma-resv code
- * directly to optimize. Also this likely should be an inline function.
+/**
+ * xe_vm_lock() - Lock the vm's dma_resv object
+ * @vm: The struct xe_vm whose lock is to be locked
+ * @intr: Whether to perform any wait interruptible
+ *
+ * Return: 0 on success, -EINTR if @intr is true and the wait for a
+ * contended lock was interrupted. If @intr is false, the function
+ * always returns 0.
*/
-int xe_vm_lock(struct xe_vm *vm, struct ww_acquire_ctx *ww,
- int num_resv, bool intr)
+int xe_vm_lock(struct xe_vm *vm, bool intr)
{
- struct ttm_validate_buffer tv_vm;
- LIST_HEAD(objs);
- LIST_HEAD(dups);
-
- XE_BUG_ON(!ww);
-
- tv_vm.num_shared = num_resv;
- tv_vm.bo = xe_vm_ttm_bo(vm);;
- list_add_tail(&tv_vm.head, &objs);
+ if (intr)
+ return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
- return ttm_eu_reserve_buffers(ww, &objs, intr, &dups);
+ return dma_resv_lock(xe_vm_resv(vm), NULL);
}
-void xe_vm_unlock(struct xe_vm *vm, struct ww_acquire_ctx *ww)
+/**
+ * xe_vm_unlock() - Unlock the vm's dma_resv object
+ * @vm: The struct xe_vm whose lock is to be released.
+ *
+ * Unlock a buffer object lock that was locked by xe_vm_lock().
+ */
+void xe_vm_unlock(struct xe_vm *vm)
{
- dma_resv_unlock(&vm->resv);
- ww_acquire_fini(ww);
+ dma_resv_unlock(xe_vm_resv(vm));
}
/**
*/
int xe_vm_invalidate_vma(struct xe_vma *vma)
{
- struct xe_device *xe = vma->vm->xe;
- struct xe_gt *gt;
- u32 gt_needs_invalidate = 0;
- int seqno[XE_MAX_GT];
+ struct xe_device *xe = xe_vma_vm(vma)->xe;
+ struct xe_tile *tile;
+ u32 tile_needs_invalidate = 0;
+ int seqno[XE_MAX_TILES_PER_DEVICE];
u8 id;
int ret;
- XE_BUG_ON(!xe_vm_in_fault_mode(vma->vm));
+ xe_assert(xe, xe_vm_in_fault_mode(xe_vma_vm(vma)));
+ xe_assert(xe, !xe_vma_is_null(vma));
trace_xe_vma_usm_invalidate(vma);
/* Check that we don't race with page-table updates */
WARN_ON_ONCE(!mmu_interval_check_retry
(&vma->userptr.notifier,
vma->userptr.notifier_seq));
- WARN_ON_ONCE(!dma_resv_test_signaled(&vma->vm->resv,
+ WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
DMA_RESV_USAGE_BOOKKEEP));
} else {
- xe_bo_assert_held(vma->bo);
+ xe_bo_assert_held(xe_vma_bo(vma));
}
}
- for_each_gt(gt, xe, id) {
- if (xe_pt_zap_ptes(gt, vma)) {
- gt_needs_invalidate |= BIT(id);
+ for_each_tile(tile, xe, id) {
+ if (xe_pt_zap_ptes(tile, vma)) {
+ tile_needs_invalidate |= BIT(id);
xe_device_wmb(xe);
- seqno[id] = xe_gt_tlb_invalidation_vma(gt, NULL, vma);
+ /*
+ * FIXME: We potentially need to invalidate multiple
+ * GTs within the tile
+ */
+ seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
if (seqno[id] < 0)
return seqno[id];
}
}
- for_each_gt(gt, xe, id) {
- if (gt_needs_invalidate & BIT(id)) {
- ret = xe_gt_tlb_invalidation_wait(gt, seqno[id]);
+ for_each_tile(tile, xe, id) {
+ if (tile_needs_invalidate & BIT(id)) {
+ ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
if (ret < 0)
return ret;
}
}
- vma->usm.gt_invalidated = vma->gt_mask;
+ vma->usm.tile_invalidated = vma->tile_mask;
return 0;
}
-#if IS_ENABLED(CONFIG_DRM_XE_SIMPLE_ERROR_CAPTURE)
int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
{
- struct rb_node *node;
+ struct drm_gpuva *gpuva;
bool is_vram;
uint64_t addr;
return 0;
}
if (vm->pt_root[gt_id]) {
- addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, GEN8_PAGE_SIZE, &is_vram);
- drm_printf(p, " VM root: A:0x%llx %s\n", addr, is_vram ? "VRAM" : "SYS");
+ addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE);
+ is_vram = xe_bo_is_vram(vm->pt_root[gt_id]->bo);
+ drm_printf(p, " VM root: A:0x%llx %s\n", addr,
+ is_vram ? "VRAM" : "SYS");
}
- for (node = rb_first(&vm->vmas); node; node = rb_next(node)) {
- struct xe_vma *vma = to_xe_vma(node);
+ drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
+ struct xe_vma *vma = gpuva_to_vma(gpuva);
bool is_userptr = xe_vma_is_userptr(vma);
+ bool is_null = xe_vma_is_null(vma);
- if (is_userptr) {
+ if (is_null) {
+ addr = 0;
+ } else if (is_userptr) {
struct xe_res_cursor cur;
- xe_res_first_sg(vma->userptr.sg, 0, GEN8_PAGE_SIZE, &cur);
- addr = xe_res_dma(&cur);
+ if (vma->userptr.sg) {
+ xe_res_first_sg(vma->userptr.sg, 0, XE_PAGE_SIZE,
+ &cur);
+ addr = xe_res_dma(&cur);
+ } else {
+ addr = 0;
+ }
} else {
- addr = xe_bo_addr(vma->bo, 0, GEN8_PAGE_SIZE, &is_vram);
+ addr = __xe_bo_addr(xe_vma_bo(vma), 0, XE_PAGE_SIZE);
+ is_vram = xe_bo_is_vram(xe_vma_bo(vma));
}
drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
- vma->start, vma->end, vma->end - vma->start + 1ull,
- addr, is_userptr ? "USR" : is_vram ? "VRAM" : "SYS");
+ xe_vma_start(vma), xe_vma_end(vma) - 1,
+ xe_vma_size(vma),
+ addr, is_null ? "NULL" : is_userptr ? "USR" :
+ is_vram ? "VRAM" : "SYS");
}
up_read(&vm->lock);
return 0;
}
-#else
-int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
-{
- return 0;
-}
-#endif