From 5ef091fc32a4fe7116a4ecc778369f161de9c11a Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Sun, 13 Aug 2023 20:19:20 -0700 Subject: [PATCH] drm/xe: Fixup unwind on VM ops errors Remap ops have 3 parts: unmap, prev, and next. The commit step can fail on any of these. Add a flag for each to these so the unwind is only done the steps that have been committed. v2: (Rodrigo) Use bit macros Reviewed-by: Rodrigo Vivi Signed-off-by: Matthew Brost Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/xe/xe_vm.c | 24 +++++++++++++++++------- drivers/gpu/drm/xe/xe_vm_types.h | 10 +++++++--- 2 files changed, 24 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index a774f9632dda..71f61806df77 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -2622,18 +2622,25 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op) switch (op->base.op) { case DRM_GPUVA_OP_MAP: err |= xe_vm_insert_vma(vm, op->map.vma); + if (!err) + op->flags |= XE_VMA_OP_COMMITTED; break; case DRM_GPUVA_OP_REMAP: prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va), true); + op->flags |= XE_VMA_OP_COMMITTED; if (op->remap.prev) { err |= xe_vm_insert_vma(vm, op->remap.prev); + if (!err) + op->flags |= XE_VMA_OP_PREV_COMMITTED; if (!err && op->remap.skip_prev) op->remap.prev = NULL; } if (op->remap.next) { err |= xe_vm_insert_vma(vm, op->remap.next); + if (!err) + op->flags |= XE_VMA_OP_NEXT_COMMITTED; if (!err && op->remap.skip_next) op->remap.next = NULL; } @@ -2646,15 +2653,15 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op) break; case DRM_GPUVA_OP_UNMAP: prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true); + op->flags |= XE_VMA_OP_COMMITTED; break; case DRM_GPUVA_OP_PREFETCH: - /* Nothing to do */ + op->flags |= XE_VMA_OP_COMMITTED; break; default: XE_WARN_ON("NOT POSSIBLE"); } - op->flags |= XE_VMA_OP_COMMITTED; return err; } @@ -2859,7 +2866,8 @@ static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op) } static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op, - bool post_commit) + bool post_commit, bool prev_post_commit, + bool next_post_commit) { lockdep_assert_held_write(&vm->lock); @@ -2886,11 +2894,11 @@ static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op, struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va); if (op->remap.prev) { - prep_vma_destroy(vm, op->remap.prev, post_commit); + prep_vma_destroy(vm, op->remap.prev, prev_post_commit); xe_vma_destroy_unlocked(op->remap.prev); } if (op->remap.next) { - prep_vma_destroy(vm, op->remap.next, post_commit); + prep_vma_destroy(vm, op->remap.next, next_post_commit); xe_vma_destroy_unlocked(op->remap.next); } down_read(&vm->userptr.notifier_lock); @@ -3029,7 +3037,9 @@ static int vm_bind_ioctl_ops_commit(struct xe_vm *vm, unwind: list_for_each_entry_reverse(op, ops_list, link) - xe_vma_op_unwind(vm, op, op->flags & XE_VMA_OP_COMMITTED); + xe_vma_op_unwind(vm, op, op->flags & XE_VMA_OP_COMMITTED, + op->flags & XE_VMA_OP_PREV_COMMITTED, + op->flags & XE_VMA_OP_NEXT_COMMITTED); list_for_each_entry_safe(op, next, ops_list, link) xe_vma_op_cleanup(vm, op); @@ -3056,7 +3066,7 @@ static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm, drm_gpuva_for_each_op(__op, __ops) { struct xe_vma_op *op = gpuva_op_to_vma_op(__op); - xe_vma_op_unwind(vm, op, false); + xe_vma_op_unwind(vm, op, false, false, false); } } } diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h index 40ce8953bacb..dfbc53e56a86 100644 --- a/drivers/gpu/drm/xe/xe_vm_types.h +++ b/drivers/gpu/drm/xe/xe_vm_types.h @@ -370,11 +370,15 @@ struct xe_vma_op_prefetch { /** enum xe_vma_op_flags - flags for VMA operation */ enum xe_vma_op_flags { /** @XE_VMA_OP_FIRST: first VMA operation for a set of syncs */ - XE_VMA_OP_FIRST = BIT(0), + XE_VMA_OP_FIRST = BIT(0), /** @XE_VMA_OP_LAST: last VMA operation for a set of syncs */ - XE_VMA_OP_LAST = BIT(1), + XE_VMA_OP_LAST = BIT(1), /** @XE_VMA_OP_COMMITTED: VMA operation committed */ - XE_VMA_OP_COMMITTED = BIT(2), + XE_VMA_OP_COMMITTED = BIT(2), + /** @XE_VMA_OP_PREV_COMMITTED: Previous VMA operation committed */ + XE_VMA_OP_PREV_COMMITTED = BIT(3), + /** @XE_VMA_OP_NEXT_COMMITTED: Next VMA operation committed */ + XE_VMA_OP_NEXT_COMMITTED = BIT(4), }; /** struct xe_vma_op - VMA operation */ -- 2.20.1