1 /* SPDX-License-Identifier: MIT */
3 * Copyright © 2022 Intel Corporation
6 #ifndef _XE_VM_TYPES_H_
7 #define _XE_VM_TYPES_H_
9 #include <linux/dma-resv.h>
10 #include <linux/kref.h>
11 #include <linux/mmu_notifier.h>
12 #include <linux/scatterlist.h>
14 #include "xe_device_types.h"
15 #include "xe_pt_types.h"
21 struct rb_node vm_node;
22 /** @vm: VM which this VMA belongs to */
26 * @start: start address of this VMA within its address domain, end -
27 * start + 1 == VMA size
30 /** @end: end address of this VMA within its address domain */
32 /** @pte_flags: pte flags for this VMA */
35 /** @bo: BO if not a userptr, must be NULL is userptr */
37 /** @bo_offset: offset into BO if not a userptr, unused for userptr */
40 /** @tile_mask: Tile mask of where to create binding for this VMA */
44 * @tile_present: GT mask of binding are present for this VMA.
45 * protected by vm->lock, vm->resv and for userptrs,
46 * vm->userptr.notifier_lock for writing. Needs either for reading,
47 * but if reading is done under the vm->lock only, it needs to be held
53 * @destroyed: VMA is destroyed, in the sense that it shouldn't be
54 * subject to rebind anymore. This field must be written under
55 * the vm lock in write mode and the userptr.notifier_lock in
56 * either mode. Read under the vm lock or the userptr.notifier_lock in
62 * @first_munmap_rebind: VMA is first in a sequence of ops that triggers
63 * a rebind (munmap style VM unbinds). This indicates the operation
64 * using this VMA must wait on all dma-resv slots (wait for pending jobs
65 * / trigger preempt fences).
67 bool first_munmap_rebind;
70 * @last_munmap_rebind: VMA is first in a sequence of ops that triggers
71 * a rebind (munmap style VM unbinds). This indicates the operation
72 * using this VMA must install itself into kernel dma-resv slot (blocks
73 * future jobs) and kick the rebind work in compute mode.
75 bool last_munmap_rebind;
77 /** @use_atomic_access_pte_bit: Set atomic access bit in PTE */
78 bool use_atomic_access_pte_bit;
81 /** @bo_link: link into BO if not a userptr */
82 struct list_head bo_link;
83 /** @userptr_link: link into VM repin list if userptr */
84 struct list_head userptr_link;
88 * @rebind_link: link into VM if this VMA needs rebinding, and
89 * if it's a bo (not userptr) needs validation after a possible
90 * eviction. Protected by the vm's resv lock.
92 struct list_head rebind_link;
95 * @unbind_link: link or list head if an unbind of multiple VMAs, in
96 * single unbind op, is being done.
98 struct list_head unbind_link;
100 /** @destroy_cb: callback to destroy VMA when unbind job is done */
101 struct dma_fence_cb destroy_cb;
103 /** @destroy_work: worker to destroy this BO */
104 struct work_struct destroy_work;
106 /** @userptr: user pointer state */
108 /** @ptr: user pointer */
110 /** @invalidate_link: Link for the vm::userptr.invalidated list */
111 struct list_head invalidate_link;
113 * @notifier: MMU notifier for user pointer (invalidation call back)
115 struct mmu_interval_notifier notifier;
116 /** @sgt: storage for a scatter gather table */
118 /** @sg: allocated scatter gather table */
120 /** @notifier_seq: notifier sequence number */
121 unsigned long notifier_seq;
123 * @initial_bind: user pointer has been bound at least once.
124 * write: vm->userptr.notifier_lock in read mode and vm->resv held.
125 * read: vm->userptr.notifier_lock in write mode or vm->resv held.
128 #if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
133 /** @usm: unified shared memory state */
135 /** @tile_invalidated: VMA has been invalidated */
136 u64 tile_invalidated;
140 struct list_head rebind_link;
145 * @extobj.link: Link into vm's external object list.
146 * protected by the vm lock.
148 struct list_head link;
154 #define xe_vm_assert_held(vm) dma_resv_assert_held(&(vm)->resv)
157 struct xe_device *xe;
159 struct kref refcount;
161 /* engine used for (un)binding vma's */
162 struct xe_engine *eng[XE_MAX_TILES_PER_DEVICE];
164 /** Protects @rebind_list and the page-table structures */
165 struct dma_resv resv;
170 struct xe_pt *pt_root[XE_MAX_TILES_PER_DEVICE];
171 struct xe_bo *scratch_bo[XE_MAX_TILES_PER_DEVICE];
172 struct xe_pt *scratch_pt[XE_MAX_TILES_PER_DEVICE][XE_VM_MAX_LEVEL];
174 /** @flags: flags for this VM, statically setup a creation time */
175 #define XE_VM_FLAGS_64K BIT(0)
176 #define XE_VM_FLAG_COMPUTE_MODE BIT(1)
177 #define XE_VM_FLAG_ASYNC_BIND_OPS BIT(2)
178 #define XE_VM_FLAG_MIGRATION BIT(3)
179 #define XE_VM_FLAG_SCRATCH_PAGE BIT(4)
180 #define XE_VM_FLAG_FAULT_MODE BIT(5)
181 #define XE_VM_FLAG_GT_ID(flags) (((flags) >> 6) & 0x3)
182 #define XE_VM_FLAG_SET_TILE_ID(tile) ((tile)->id << 6)
185 /** @composite_fence_ctx: context composite fence */
186 u64 composite_fence_ctx;
187 /** @composite_fence_seqno: seqno for composite fence */
188 u32 composite_fence_seqno;
191 * @lock: outer most lock, protects objects of anything attached to this
194 struct rw_semaphore lock;
197 * @rebind_list: list of VMAs that need rebinding, and if they are
198 * bos (not userptr), need validation after a possible eviction. The
199 * list is protected by @resv.
201 struct list_head rebind_list;
203 /** @rebind_fence: rebind fence from execbuf */
204 struct dma_fence *rebind_fence;
207 * @destroy_work: worker to destroy VM, needed as a dma_fence signaling
208 * from an irq context can be last put and the destroy needs to be able
211 struct work_struct destroy_work;
213 /** @extobj: bookkeeping for external objects. Protected by the vm lock */
215 /** @enties: number of external BOs attached this VM */
217 /** @list: list of vmas with external bos attached */
218 struct list_head list;
221 /** @async_ops: async VM operations (bind / unbinds) */
223 /** @list: list of pending async VM ops */
224 struct list_head pending;
225 /** @work: worker to execute async VM ops */
226 struct work_struct work;
227 /** @lock: protects list of pending async VM ops and fences */
229 /** @error_capture: error capture state */
232 struct mm_struct *mm;
234 * @addr: user pointer to copy error capture state too
237 /** @wq: user fence wait queue for VM errors */
238 wait_queue_head_t wq;
240 /** @fence: fence state */
242 /** @context: context of async fence */
244 /** @seqno: seqno of async fence */
247 /** @error: error state for async VM ops */
250 * @munmap_rebind_inflight: an munmap style VM bind is in the
251 * middle of a set of ops which requires a rebind at the end.
253 bool munmap_rebind_inflight;
256 /** @userptr: user pointer state */
259 * @userptr.repin_list: list of VMAs which are user pointers,
260 * and needs repinning. Protected by @lock.
262 struct list_head repin_list;
264 * @notifier_lock: protects notifier in write mode and
265 * submission in read mode.
267 struct rw_semaphore notifier_lock;
269 * @userptr.invalidated_lock: Protects the
270 * @userptr.invalidated list.
272 spinlock_t invalidated_lock;
274 * @userptr.invalidated: List of invalidated userptrs, not yet
276 * up for revalidation. Protected from access with the
277 * @invalidated_lock. Removing items from the list
278 * additionally requires @lock in write mode, and adding
279 * items to the list requires the @userptr.notifer_lock in
282 struct list_head invalidated;
285 /** @preempt: preempt state */
288 * @min_run_period_ms: The minimum run period before preempting
291 s64 min_run_period_ms;
292 /** @engines: list of engines attached to this VM */
293 struct list_head engines;
294 /** @num_engines: number user engines attached to this VM */
297 * @rebind_deactivated: Whether rebind has been temporarily deactivated
298 * due to no work available. Protected by the vm resv.
300 bool rebind_deactivated;
302 * @rebind_work: worker to rebind invalidated userptrs / evicted
305 struct work_struct rebind_work;
308 /** @um: unified memory state */
310 /** @asid: address space ID, unique to each VM */
313 * @last_fault_vma: Last fault VMA, used for fast lookup when we
314 * get a flood of faults to the same VMA
316 struct xe_vma *last_fault_vma;
320 * @notifier: Lists and locks for temporary usage within notifiers where
321 * we either can't grab the vm lock or the vm resv.
324 /** @notifier.list_lock: lock protecting @rebind_list */
325 spinlock_t list_lock;
327 * @notifier.rebind_list: list of vmas that we want to put on the
328 * main @rebind_list. This list is protected for writing by both
329 * notifier.list_lock, and the resv of the bo the vma points to,
330 * and for reading by the notifier.list_lock only.
332 struct list_head rebind_list;
335 /** @error_capture: allow to track errors */
337 /** @capture_once: capture only one error per VM */