1 /* SPDX-License-Identifier: MIT */
3 * Copyright © 2021 Intel Corporation
11 #include "xe_vm_types.h"
17 struct ttm_buffer_object;
18 struct ttm_validate_buffer;
24 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags);
25 void xe_vm_free(struct kref *ref);
27 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id);
28 int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node);
30 static inline struct xe_vm *xe_vm_get(struct xe_vm *vm)
32 kref_get(&vm->refcount);
36 static inline void xe_vm_put(struct xe_vm *vm)
38 kref_put(&vm->refcount, xe_vm_free);
41 int xe_vm_lock(struct xe_vm *vm, struct ww_acquire_ctx *ww,
42 int num_resv, bool intr);
44 void xe_vm_unlock(struct xe_vm *vm, struct ww_acquire_ctx *ww);
46 static inline bool xe_vm_is_closed(struct xe_vm *vm)
48 /* Only guaranteed not to change when vm->resv is held */
53 xe_vm_find_overlapping_vma(struct xe_vm *vm, const struct xe_vma *vma);
55 #define xe_vm_assert_held(vm) dma_resv_assert_held(&(vm)->resv)
57 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_gt *full_gt);
59 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
60 struct drm_file *file);
61 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
62 struct drm_file *file);
63 int xe_vm_bind_ioctl(struct drm_device *dev, void *data,
64 struct drm_file *file);
66 void xe_vm_close_and_put(struct xe_vm *vm);
68 static inline bool xe_vm_in_compute_mode(struct xe_vm *vm)
70 return vm->flags & XE_VM_FLAG_COMPUTE_MODE;
73 static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
75 return vm->flags & XE_VM_FLAG_FAULT_MODE;
78 static inline bool xe_vm_no_dma_fences(struct xe_vm *vm)
80 return xe_vm_in_compute_mode(vm) || xe_vm_in_fault_mode(vm);
83 int xe_vm_add_compute_engine(struct xe_vm *vm, struct xe_engine *e);
85 int xe_vm_userptr_pin(struct xe_vm *vm);
87 int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
89 int xe_vm_userptr_check_repin(struct xe_vm *vm);
91 struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
93 int xe_vm_invalidate_vma(struct xe_vma *vma);
95 int xe_vm_async_fence_wait_start(struct dma_fence *fence);
97 extern struct ttm_device_funcs xe_ttm_funcs;
99 struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm);
101 static inline bool xe_vma_is_userptr(struct xe_vma *vma)
106 int xe_vma_userptr_pin_pages(struct xe_vma *vma);
108 int xe_vma_userptr_check_repin(struct xe_vma *vma);
111 * XE_ONSTACK_TV is used to size the tv_onstack array that is input
112 * to xe_vm_lock_dma_resv() and xe_vm_unlock_dma_resv().
114 #define XE_ONSTACK_TV 20
115 int xe_vm_lock_dma_resv(struct xe_vm *vm, struct ww_acquire_ctx *ww,
116 struct ttm_validate_buffer *tv_onstack,
117 struct ttm_validate_buffer **tv,
118 struct list_head *objs,
120 unsigned int num_shared);
122 void xe_vm_unlock_dma_resv(struct xe_vm *vm,
123 struct ttm_validate_buffer *tv_onstack,
124 struct ttm_validate_buffer *tv,
125 struct ww_acquire_ctx *ww,
126 struct list_head *objs);
128 void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
129 enum dma_resv_usage usage);
131 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id);
133 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
134 #define vm_dbg drm_dbg
137 static inline void vm_dbg(const struct drm_device *dev,
138 const char *format, ...)