1 /* SPDX-License-Identifier: MIT */
3 * Copyright © 2021 Intel Corporation
10 #include "xe_bo_types.h"
11 #include "xe_macros.h"
13 #include "xe_vm_types.h"
19 struct ttm_buffer_object;
20 struct ttm_validate_buffer;
27 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags);
29 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id);
30 int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node);
32 static inline struct xe_vm *xe_vm_get(struct xe_vm *vm)
34 drm_gpuvm_get(&vm->gpuvm);
38 static inline void xe_vm_put(struct xe_vm *vm)
40 drm_gpuvm_put(&vm->gpuvm);
43 int xe_vm_lock(struct xe_vm *vm, bool intr);
45 void xe_vm_unlock(struct xe_vm *vm);
47 static inline bool xe_vm_is_closed(struct xe_vm *vm)
49 /* Only guaranteed not to change when vm->lock is held */
53 static inline bool xe_vm_is_banned(struct xe_vm *vm)
55 return vm->flags & XE_VM_FLAG_BANNED;
58 static inline bool xe_vm_is_closed_or_banned(struct xe_vm *vm)
60 lockdep_assert_held(&vm->lock);
61 return xe_vm_is_closed(vm) || xe_vm_is_banned(vm);
65 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range);
68 * xe_vm_has_scratch() - Whether the vm is configured for scratch PTEs
71 * Return: whether the vm populates unmapped areas with scratch PTEs
73 static inline bool xe_vm_has_scratch(const struct xe_vm *vm)
75 return vm->flags & XE_VM_FLAG_SCRATCH_PAGE;
79 * gpuvm_to_vm() - Return the embedding xe_vm from a struct drm_gpuvm pointer
80 * @gpuvm: The struct drm_gpuvm pointer
82 * Return: Pointer to the embedding struct xe_vm.
84 static inline struct xe_vm *gpuvm_to_vm(struct drm_gpuvm *gpuvm)
86 return container_of(gpuvm, struct xe_vm, gpuvm);
89 static inline struct xe_vm *gpuva_to_vm(struct drm_gpuva *gpuva)
91 return gpuvm_to_vm(gpuva->vm);
94 static inline struct xe_vma *gpuva_to_vma(struct drm_gpuva *gpuva)
96 return container_of(gpuva, struct xe_vma, gpuva);
99 static inline struct xe_vma_op *gpuva_op_to_vma_op(struct drm_gpuva_op *op)
101 return container_of(op, struct xe_vma_op, base);
105 * DOC: Provide accessors for vma members to facilitate easy change of
108 static inline u64 xe_vma_start(struct xe_vma *vma)
110 return vma->gpuva.va.addr;
113 static inline u64 xe_vma_size(struct xe_vma *vma)
115 return vma->gpuva.va.range;
118 static inline u64 xe_vma_end(struct xe_vma *vma)
120 return xe_vma_start(vma) + xe_vma_size(vma);
123 static inline u64 xe_vma_bo_offset(struct xe_vma *vma)
125 return vma->gpuva.gem.offset;
128 static inline struct xe_bo *xe_vma_bo(struct xe_vma *vma)
130 return !vma->gpuva.gem.obj ? NULL :
131 container_of(vma->gpuva.gem.obj, struct xe_bo, ttm.base);
134 static inline struct xe_vm *xe_vma_vm(struct xe_vma *vma)
136 return container_of(vma->gpuva.vm, struct xe_vm, gpuvm);
139 static inline bool xe_vma_read_only(struct xe_vma *vma)
141 return vma->gpuva.flags & XE_VMA_READ_ONLY;
144 static inline u64 xe_vma_userptr(struct xe_vma *vma)
146 return vma->gpuva.gem.offset;
149 static inline bool xe_vma_is_null(struct xe_vma *vma)
151 return vma->gpuva.flags & DRM_GPUVA_SPARSE;
154 static inline bool xe_vma_has_no_bo(struct xe_vma *vma)
156 return !xe_vma_bo(vma);
159 static inline bool xe_vma_is_userptr(struct xe_vma *vma)
161 return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma);
165 * to_userptr_vma() - Return a pointer to an embedding userptr vma
166 * @vma: Pointer to the embedded struct xe_vma
168 * Return: Pointer to the embedding userptr vma
170 static inline struct xe_userptr_vma *to_userptr_vma(struct xe_vma *vma)
172 xe_assert(xe_vma_vm(vma)->xe, xe_vma_is_userptr(vma));
173 return container_of(vma, struct xe_userptr_vma, vma);
176 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
178 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
179 struct drm_file *file);
180 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
181 struct drm_file *file);
182 int xe_vm_bind_ioctl(struct drm_device *dev, void *data,
183 struct drm_file *file);
185 void xe_vm_close_and_put(struct xe_vm *vm);
187 static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
189 return vm->flags & XE_VM_FLAG_FAULT_MODE;
192 static inline bool xe_vm_in_lr_mode(struct xe_vm *vm)
194 return vm->flags & XE_VM_FLAG_LR_MODE;
197 static inline bool xe_vm_in_preempt_fence_mode(struct xe_vm *vm)
199 return xe_vm_in_lr_mode(vm) && !xe_vm_in_fault_mode(vm);
202 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
203 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
205 int xe_vm_userptr_pin(struct xe_vm *vm);
207 int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
209 int xe_vm_userptr_check_repin(struct xe_vm *vm);
211 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
212 struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma,
215 int xe_vm_invalidate_vma(struct xe_vma *vma);
217 static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
219 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
220 queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work);
224 * xe_vm_reactivate_rebind() - Reactivate the rebind functionality on compute
228 * If the rebind functionality on a compute vm was disabled due
229 * to nothing to execute. Reactivate it and run the rebind worker.
230 * This function should be called after submitting a batch to a compute vm.
232 static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
234 if (xe_vm_in_preempt_fence_mode(vm) && vm->preempt.rebind_deactivated) {
235 vm->preempt.rebind_deactivated = false;
236 xe_vm_queue_rebind_worker(vm);
240 int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma);
242 int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma);
244 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end);
246 int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma);
248 int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
249 unsigned int num_fences);
252 * xe_vm_resv() - Return's the vm's reservation object
255 * Return: Pointer to the vm's reservation object.
257 static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
259 return drm_gpuvm_resv(&vm->gpuvm);
262 void xe_vm_kill(struct xe_vm *vm, bool unlocked);
265 * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
268 #define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm))
270 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
271 #define vm_dbg drm_dbg
274 static inline void vm_dbg(const struct drm_device *dev,
275 const char *format, ...)
280 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm);
281 void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap);
282 void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p);
283 void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);