1 /* SPDX-License-Identifier: MIT */
3 * Copyright © 2021 Intel Corporation
9 #include "xe_bo_types.h"
10 #include "xe_macros.h"
12 #include "xe_vm_types.h"
18 struct ttm_buffer_object;
19 struct ttm_validate_buffer;
26 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags);
28 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id);
29 int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node);
31 static inline struct xe_vm *xe_vm_get(struct xe_vm *vm)
33 drm_gpuvm_get(&vm->gpuvm);
37 static inline void xe_vm_put(struct xe_vm *vm)
39 drm_gpuvm_put(&vm->gpuvm);
42 int xe_vm_lock(struct xe_vm *vm, bool intr);
44 void xe_vm_unlock(struct xe_vm *vm);
46 static inline bool xe_vm_is_closed(struct xe_vm *vm)
48 /* Only guaranteed not to change when vm->lock is held */
52 static inline bool xe_vm_is_banned(struct xe_vm *vm)
54 return vm->flags & XE_VM_FLAG_BANNED;
57 static inline bool xe_vm_is_closed_or_banned(struct xe_vm *vm)
59 lockdep_assert_held(&vm->lock);
60 return xe_vm_is_closed(vm) || xe_vm_is_banned(vm);
64 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range);
66 static inline struct xe_vm *gpuva_to_vm(struct drm_gpuva *gpuva)
68 return container_of(gpuva->vm, struct xe_vm, gpuvm);
71 static inline struct xe_vma *gpuva_to_vma(struct drm_gpuva *gpuva)
73 return container_of(gpuva, struct xe_vma, gpuva);
76 static inline struct xe_vma_op *gpuva_op_to_vma_op(struct drm_gpuva_op *op)
78 return container_of(op, struct xe_vma_op, base);
82 * DOC: Provide accessors for vma members to facilitate easy change of
85 static inline u64 xe_vma_start(struct xe_vma *vma)
87 return vma->gpuva.va.addr;
90 static inline u64 xe_vma_size(struct xe_vma *vma)
92 return vma->gpuva.va.range;
95 static inline u64 xe_vma_end(struct xe_vma *vma)
97 return xe_vma_start(vma) + xe_vma_size(vma);
100 static inline u64 xe_vma_bo_offset(struct xe_vma *vma)
102 return vma->gpuva.gem.offset;
105 static inline struct xe_bo *xe_vma_bo(struct xe_vma *vma)
107 return !vma->gpuva.gem.obj ? NULL :
108 container_of(vma->gpuva.gem.obj, struct xe_bo, ttm.base);
111 static inline struct xe_vm *xe_vma_vm(struct xe_vma *vma)
113 return container_of(vma->gpuva.vm, struct xe_vm, gpuvm);
116 static inline bool xe_vma_read_only(struct xe_vma *vma)
118 return vma->gpuva.flags & XE_VMA_READ_ONLY;
121 static inline u64 xe_vma_userptr(struct xe_vma *vma)
123 return vma->gpuva.gem.offset;
126 static inline bool xe_vma_is_null(struct xe_vma *vma)
128 return vma->gpuva.flags & DRM_GPUVA_SPARSE;
131 static inline bool xe_vma_has_no_bo(struct xe_vma *vma)
133 return !xe_vma_bo(vma);
136 static inline bool xe_vma_is_userptr(struct xe_vma *vma)
138 return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma);
141 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
143 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
144 struct drm_file *file);
145 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
146 struct drm_file *file);
147 int xe_vm_bind_ioctl(struct drm_device *dev, void *data,
148 struct drm_file *file);
150 void xe_vm_close_and_put(struct xe_vm *vm);
152 static inline bool xe_vm_in_compute_mode(struct xe_vm *vm)
154 return vm->flags & XE_VM_FLAG_COMPUTE_MODE;
157 static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
159 return vm->flags & XE_VM_FLAG_FAULT_MODE;
162 static inline bool xe_vm_no_dma_fences(struct xe_vm *vm)
164 return xe_vm_in_compute_mode(vm) || xe_vm_in_fault_mode(vm);
167 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
169 int xe_vm_userptr_pin(struct xe_vm *vm);
171 int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
173 int xe_vm_userptr_check_repin(struct xe_vm *vm);
175 struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
177 int xe_vm_invalidate_vma(struct xe_vma *vma);
179 int xe_vm_async_fence_wait_start(struct dma_fence *fence);
181 extern struct ttm_device_funcs xe_ttm_funcs;
183 struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm);
185 static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
187 XE_WARN_ON(!xe_vm_in_compute_mode(vm));
188 queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work);
192 * xe_vm_reactivate_rebind() - Reactivate the rebind functionality on compute
196 * If the rebind functionality on a compute vm was disabled due
197 * to nothing to execute. Reactivate it and run the rebind worker.
198 * This function should be called after submitting a batch to a compute vm.
200 static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
202 if (xe_vm_in_compute_mode(vm) && vm->preempt.rebind_deactivated) {
203 vm->preempt.rebind_deactivated = false;
204 xe_vm_queue_rebind_worker(vm);
208 int xe_vma_userptr_pin_pages(struct xe_vma *vma);
210 int xe_vma_userptr_check_repin(struct xe_vma *vma);
212 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end);
214 int xe_vm_lock_dma_resv(struct xe_vm *vm, struct drm_exec *exec,
215 unsigned int num_shared, bool lock_vm);
217 void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
218 enum dma_resv_usage usage);
220 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id);
222 int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
223 unsigned int num_shared);
226 * xe_vm_resv() - Return's the vm's reservation object
229 * Return: Pointer to the vm's reservation object.
231 static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
233 return drm_gpuvm_resv(&vm->gpuvm);
237 * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
240 #define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm))
242 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
243 #define vm_dbg drm_dbg
246 static inline void vm_dbg(const struct drm_device *dev,
247 const char *format, ...)