1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
3 #ifndef __DRM_GPUVM_H__
4 #define __DRM_GPUVM_H__
7 * Copyright (c) 2022 Red Hat.
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
28 #include <linux/dma-resv.h>
29 #include <linux/list.h>
30 #include <linux/rbtree.h>
31 #include <linux/types.h>
33 #include <drm/drm_device.h>
34 #include <drm/drm_gem.h>
35 #include <drm/drm_exec.h>
42 * enum drm_gpuva_flags - flags for struct drm_gpuva
44 enum drm_gpuva_flags {
46 * @DRM_GPUVA_INVALIDATED:
48 * Flag indicating that the &drm_gpuva's backing GEM is invalidated.
50 DRM_GPUVA_INVALIDATED = (1 << 0),
55 * Flag indicating that the &drm_gpuva is a sparse mapping.
57 DRM_GPUVA_SPARSE = (1 << 1),
60 * @DRM_GPUVA_USERBITS: user defined bits
62 DRM_GPUVA_USERBITS = (1 << 2),
66 * struct drm_gpuva - structure to track a GPU VA mapping
68 * This structure represents a GPU VA mapping and is associated with a
71 * Typically, this structure is embedded in bigger driver structures.
75 * @vm: the &drm_gpuvm this object is associated with
80 * @vm_bo: the &drm_gpuvm_bo abstraction for the mapped
83 struct drm_gpuvm_bo *vm_bo;
86 * @flags: the &drm_gpuva_flags for this mapping
88 enum drm_gpuva_flags flags;
91 * @va: structure containing the address and range of the &drm_gpuva
95 * @addr: the start address
106 * @gem: structure containing the &drm_gem_object and it's offset
110 * @offset: the offset within the &drm_gem_object
115 * @obj: the mapped &drm_gem_object
117 struct drm_gem_object *obj;
120 * @entry: the &list_head to attach this object to a &drm_gpuvm_bo
122 struct list_head entry;
126 * @rb: structure containing data to store &drm_gpuvas in a rb-tree
130 * @rb: the rb-tree node
135 * @entry: The &list_head to additionally connect &drm_gpuvas
136 * in the same order they appear in the interval tree. This is
137 * useful to keep iterating &drm_gpuvas from a start node found
138 * through the rb-tree while doing modifications on the rb-tree
141 struct list_head entry;
144 * @__subtree_last: needed by the interval tree, holding last-in-subtree
150 int drm_gpuva_insert(struct drm_gpuvm *gpuvm, struct drm_gpuva *va);
151 void drm_gpuva_remove(struct drm_gpuva *va);
153 void drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo);
154 void drm_gpuva_unlink(struct drm_gpuva *va);
156 struct drm_gpuva *drm_gpuva_find(struct drm_gpuvm *gpuvm,
157 u64 addr, u64 range);
158 struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuvm *gpuvm,
159 u64 addr, u64 range);
160 struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start);
161 struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end);
163 static inline void drm_gpuva_init(struct drm_gpuva *va, u64 addr, u64 range,
164 struct drm_gem_object *obj, u64 offset)
167 va->va.range = range;
169 va->gem.offset = offset;
173 * drm_gpuva_invalidate() - sets whether the backing GEM of this &drm_gpuva is
175 * @va: the &drm_gpuva to set the invalidate flag for
176 * @invalidate: indicates whether the &drm_gpuva is invalidated
178 static inline void drm_gpuva_invalidate(struct drm_gpuva *va, bool invalidate)
181 va->flags |= DRM_GPUVA_INVALIDATED;
183 va->flags &= ~DRM_GPUVA_INVALIDATED;
187 * drm_gpuva_invalidated() - indicates whether the backing BO of this &drm_gpuva
189 * @va: the &drm_gpuva to check
191 static inline bool drm_gpuva_invalidated(struct drm_gpuva *va)
193 return va->flags & DRM_GPUVA_INVALIDATED;
197 * enum drm_gpuvm_flags - flags for struct drm_gpuvm
199 enum drm_gpuvm_flags {
201 * @DRM_GPUVM_RESV_PROTECTED: GPUVM is protected externally by the
202 * GPUVM's &dma_resv lock
204 DRM_GPUVM_RESV_PROTECTED = BIT(0),
207 * @DRM_GPUVM_USERBITS: user defined bits
209 DRM_GPUVM_USERBITS = BIT(1),
213 * struct drm_gpuvm - DRM GPU VA Manager
215 * The DRM GPU VA Manager keeps track of a GPU's virtual address space by using
216 * &maple_tree structures. Typically, this structure is embedded in bigger
219 * Drivers can pass addresses and ranges in an arbitrary unit, e.g. bytes or
222 * There should be one manager instance per GPU virtual address space.
226 * @name: the name of the DRM GPU VA space
231 * @flags: the &drm_gpuvm_flags of this GPUVM
233 enum drm_gpuvm_flags flags;
236 * @drm: the &drm_device this VM lives in
238 struct drm_device *drm;
241 * @mm_start: start of the VA space
246 * @mm_range: length of the VA space
251 * @rb: structures to track &drm_gpuva entries
255 * @tree: the rb-tree to track GPU VA mappings
257 struct rb_root_cached tree;
260 * @list: the &list_head to track GPU VA mappings
262 struct list_head list;
266 * @kref: reference count of this object
271 * @kernel_alloc_node:
273 * &drm_gpuva representing the address space cutout reserved for
276 struct drm_gpuva kernel_alloc_node;
279 * @ops: &drm_gpuvm_ops providing the split/merge steps to drivers
281 const struct drm_gpuvm_ops *ops;
284 * @r_obj: Resv GEM object; representing the GPUVM's common &dma_resv.
286 struct drm_gem_object *r_obj;
289 * @extobj: structure holding the extobj list
293 * @list: &list_head storing &drm_gpuvm_bos serving as
296 struct list_head list;
299 * @local_list: pointer to the local list temporarily storing
300 * entries from the external object list
302 struct list_head *local_list;
305 * @lock: spinlock to protect the extobj list
311 * @evict: structure holding the evict list and evict list lock
315 * @list: &list_head storing &drm_gpuvm_bos currently being
318 struct list_head list;
321 * @local_list: pointer to the local list temporarily storing
322 * entries from the evicted object list
324 struct list_head *local_list;
327 * @lock: spinlock to protect the evict list
333 void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
334 enum drm_gpuvm_flags flags,
335 struct drm_device *drm,
336 struct drm_gem_object *r_obj,
337 u64 start_offset, u64 range,
338 u64 reserve_offset, u64 reserve_range,
339 const struct drm_gpuvm_ops *ops);
342 * drm_gpuvm_get() - acquire a struct drm_gpuvm reference
343 * @gpuvm: the &drm_gpuvm to acquire the reference of
345 * This function acquires an additional reference to @gpuvm. It is illegal to
346 * call this without already holding a reference. No locks required.
348 static inline struct drm_gpuvm *
349 drm_gpuvm_get(struct drm_gpuvm *gpuvm)
351 kref_get(&gpuvm->kref);
356 void drm_gpuvm_put(struct drm_gpuvm *gpuvm);
358 bool drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, u64 addr, u64 range);
359 bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range);
361 struct drm_gem_object *
362 drm_gpuvm_resv_object_alloc(struct drm_device *drm);
365 * drm_gpuvm_resv_protected() - indicates whether &DRM_GPUVM_RESV_PROTECTED is
367 * @gpuvm: the &drm_gpuvm
369 * Returns: true if &DRM_GPUVM_RESV_PROTECTED is set, false otherwise.
372 drm_gpuvm_resv_protected(struct drm_gpuvm *gpuvm)
374 return gpuvm->flags & DRM_GPUVM_RESV_PROTECTED;
378 * drm_gpuvm_resv() - returns the &drm_gpuvm's &dma_resv
379 * @gpuvm__: the &drm_gpuvm
381 * Returns: a pointer to the &drm_gpuvm's shared &dma_resv
383 #define drm_gpuvm_resv(gpuvm__) ((gpuvm__)->r_obj->resv)
386 * drm_gpuvm_resv_obj() - returns the &drm_gem_object holding the &drm_gpuvm's
388 * @gpuvm__: the &drm_gpuvm
390 * Returns: a pointer to the &drm_gem_object holding the &drm_gpuvm's shared
393 #define drm_gpuvm_resv_obj(gpuvm__) ((gpuvm__)->r_obj)
395 #define drm_gpuvm_resv_held(gpuvm__) \
396 dma_resv_held(drm_gpuvm_resv(gpuvm__))
398 #define drm_gpuvm_resv_assert_held(gpuvm__) \
399 dma_resv_assert_held(drm_gpuvm_resv(gpuvm__))
401 #define drm_gpuvm_resv_held(gpuvm__) \
402 dma_resv_held(drm_gpuvm_resv(gpuvm__))
404 #define drm_gpuvm_resv_assert_held(gpuvm__) \
405 dma_resv_assert_held(drm_gpuvm_resv(gpuvm__))
408 * drm_gpuvm_is_extobj() - indicates whether the given &drm_gem_object is an
410 * @gpuvm: the &drm_gpuvm to check
411 * @obj: the &drm_gem_object to check
413 * Returns: true if the &drm_gem_object &dma_resv differs from the
414 * &drm_gpuvms &dma_resv, false otherwise
417 drm_gpuvm_is_extobj(struct drm_gpuvm *gpuvm,
418 struct drm_gem_object *obj)
420 return obj && obj->resv != drm_gpuvm_resv(gpuvm);
423 static inline struct drm_gpuva *
424 __drm_gpuva_next(struct drm_gpuva *va)
426 if (va && !list_is_last(&va->rb.entry, &va->vm->rb.list))
427 return list_next_entry(va, rb.entry);
433 * drm_gpuvm_for_each_va_range() - iterate over a range of &drm_gpuvas
434 * @va__: &drm_gpuva structure to assign to in each iteration step
435 * @gpuvm__: &drm_gpuvm to walk over
436 * @start__: starting offset, the first gpuva will overlap this
437 * @end__: ending offset, the last gpuva will start before this (but may
440 * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie
441 * between @start__ and @end__. It is implemented similarly to list_for_each(),
442 * but is using the &drm_gpuvm's internal interval tree to accelerate
443 * the search for the starting &drm_gpuva, and hence isn't safe against removal
444 * of elements. It assumes that @end__ is within (or is the upper limit of) the
445 * &drm_gpuvm. This iterator does not skip over the &drm_gpuvm's
446 * @kernel_alloc_node.
448 #define drm_gpuvm_for_each_va_range(va__, gpuvm__, start__, end__) \
449 for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)); \
450 va__ && (va__->va.addr < (end__)); \
451 va__ = __drm_gpuva_next(va__))
454 * drm_gpuvm_for_each_va_range_safe() - safely iterate over a range of
456 * @va__: &drm_gpuva to assign to in each iteration step
457 * @next__: another &drm_gpuva to use as temporary storage
458 * @gpuvm__: &drm_gpuvm to walk over
459 * @start__: starting offset, the first gpuva will overlap this
460 * @end__: ending offset, the last gpuva will start before this (but may
463 * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie
464 * between @start__ and @end__. It is implemented similarly to
465 * list_for_each_safe(), but is using the &drm_gpuvm's internal interval
466 * tree to accelerate the search for the starting &drm_gpuva, and hence is safe
467 * against removal of elements. It assumes that @end__ is within (or is the
468 * upper limit of) the &drm_gpuvm. This iterator does not skip over the
469 * &drm_gpuvm's @kernel_alloc_node.
471 #define drm_gpuvm_for_each_va_range_safe(va__, next__, gpuvm__, start__, end__) \
472 for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)), \
473 next__ = __drm_gpuva_next(va__); \
474 va__ && (va__->va.addr < (end__)); \
475 va__ = next__, next__ = __drm_gpuva_next(va__))
478 * drm_gpuvm_for_each_va() - iterate over all &drm_gpuvas
479 * @va__: &drm_gpuva to assign to in each iteration step
480 * @gpuvm__: &drm_gpuvm to walk over
482 * This iterator walks over all &drm_gpuva structures associated with the given
485 #define drm_gpuvm_for_each_va(va__, gpuvm__) \
486 list_for_each_entry(va__, &(gpuvm__)->rb.list, rb.entry)
489 * drm_gpuvm_for_each_va_safe() - safely iterate over all &drm_gpuvas
490 * @va__: &drm_gpuva to assign to in each iteration step
491 * @next__: another &drm_gpuva to use as temporary storage
492 * @gpuvm__: &drm_gpuvm to walk over
494 * This iterator walks over all &drm_gpuva structures associated with the given
495 * &drm_gpuvm. It is implemented with list_for_each_entry_safe(), and
496 * hence safe against the removal of elements.
498 #define drm_gpuvm_for_each_va_safe(va__, next__, gpuvm__) \
499 list_for_each_entry_safe(va__, next__, &(gpuvm__)->rb.list, rb.entry)
502 * struct drm_gpuvm_exec - &drm_gpuvm abstraction of &drm_exec
504 * This structure should be created on the stack as &drm_exec should be.
506 * Optionally, @extra can be set in order to lock additional &drm_gem_objects.
508 struct drm_gpuvm_exec {
510 * @exec: the &drm_exec structure
512 struct drm_exec exec;
515 * @flags: the flags for the struct drm_exec
520 * @vm: the &drm_gpuvm to lock its DMA reservations
522 struct drm_gpuvm *vm;
525 * @num_fences: the number of fences to reserve for the &dma_resv of the
526 * locked &drm_gem_objects
528 unsigned int num_fences;
531 * @extra: Callback and corresponding private data for the driver to
532 * lock arbitrary additional &drm_gem_objects.
536 * @fn: The driver callback to lock additional &drm_gem_objects.
538 int (*fn)(struct drm_gpuvm_exec *vm_exec);
541 * @priv: driver private data for the @fn callback
547 int drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm,
548 struct drm_exec *exec,
549 unsigned int num_fences);
551 int drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm,
552 struct drm_exec *exec,
553 unsigned int num_fences);
555 int drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm,
556 struct drm_exec *exec,
558 unsigned int num_fences);
560 int drm_gpuvm_exec_lock(struct drm_gpuvm_exec *vm_exec);
562 int drm_gpuvm_exec_lock_array(struct drm_gpuvm_exec *vm_exec,
563 struct drm_gem_object **objs,
564 unsigned int num_objs);
566 int drm_gpuvm_exec_lock_range(struct drm_gpuvm_exec *vm_exec,
567 u64 addr, u64 range);
570 * drm_gpuvm_exec_unlock() - lock all dma-resv of all assoiciated BOs
571 * @vm_exec: the &drm_gpuvm_exec wrapper
573 * Releases all dma-resv locks of all &drm_gem_objects previously acquired
574 * through drm_gpuvm_exec_lock() or its variants.
576 * Returns: 0 on success, negative error code on failure.
579 drm_gpuvm_exec_unlock(struct drm_gpuvm_exec *vm_exec)
581 drm_exec_fini(&vm_exec->exec);
584 int drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec);
585 void drm_gpuvm_resv_add_fence(struct drm_gpuvm *gpuvm,
586 struct drm_exec *exec,
587 struct dma_fence *fence,
588 enum dma_resv_usage private_usage,
589 enum dma_resv_usage extobj_usage);
592 * drm_gpuvm_exec_resv_add_fence()
593 * @vm_exec: the &drm_gpuvm_exec wrapper
594 * @fence: fence to add
595 * @private_usage: private dma-resv usage
596 * @extobj_usage: extobj dma-resv usage
598 * See drm_gpuvm_resv_add_fence().
601 drm_gpuvm_exec_resv_add_fence(struct drm_gpuvm_exec *vm_exec,
602 struct dma_fence *fence,
603 enum dma_resv_usage private_usage,
604 enum dma_resv_usage extobj_usage)
606 drm_gpuvm_resv_add_fence(vm_exec->vm, &vm_exec->exec, fence,
607 private_usage, extobj_usage);
611 * drm_gpuvm_exec_validate()
612 * @vm_exec: the &drm_gpuvm_exec wrapper
614 * See drm_gpuvm_validate().
617 drm_gpuvm_exec_validate(struct drm_gpuvm_exec *vm_exec)
619 return drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec);
623 * struct drm_gpuvm_bo - structure representing a &drm_gpuvm and
624 * &drm_gem_object combination
626 * This structure is an abstraction representing a &drm_gpuvm and
627 * &drm_gem_object combination. It serves as an indirection to accelerate
628 * iterating all &drm_gpuvas within a &drm_gpuvm backed by the same
631 * Furthermore it is used cache evicted GEM objects for a certain GPU-VM to
632 * accelerate validation.
634 * Typically, drivers want to create an instance of a struct drm_gpuvm_bo once
635 * a GEM object is mapped first in a GPU-VM and release the instance once the
636 * last mapping of the GEM object in this GPU-VM is unmapped.
638 struct drm_gpuvm_bo {
640 * @vm: The &drm_gpuvm the @obj is mapped in. This is a reference
643 struct drm_gpuvm *vm;
646 * @obj: The &drm_gem_object being mapped in @vm. This is a reference
649 struct drm_gem_object *obj;
652 * @evicted: Indicates whether the &drm_gem_object is evicted; field
653 * protected by the &drm_gem_object's dma-resv lock.
658 * @kref: The reference count for this &drm_gpuvm_bo.
663 * @list: Structure containing all &list_heads.
667 * @gpuva: The list of linked &drm_gpuvas.
669 * It is safe to access entries from this list as long as the
670 * GEM's gpuva lock is held. See also struct drm_gem_object.
672 struct list_head gpuva;
675 * @entry: Structure containing all &list_heads serving as
680 * @gem: List entry to attach to the &drm_gem_objects
683 struct list_head gem;
686 * @evict: List entry to attach to the &drm_gpuvms
689 struct list_head extobj;
692 * @evict: List entry to attach to the &drm_gpuvms evict
695 struct list_head evict;
700 struct drm_gpuvm_bo *
701 drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm,
702 struct drm_gem_object *obj);
704 struct drm_gpuvm_bo *
705 drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
706 struct drm_gem_object *obj);
707 struct drm_gpuvm_bo *
708 drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *vm_bo);
711 * drm_gpuvm_bo_get() - acquire a struct drm_gpuvm_bo reference
712 * @vm_bo: the &drm_gpuvm_bo to acquire the reference of
714 * This function acquires an additional reference to @vm_bo. It is illegal to
715 * call this without already holding a reference. No locks required.
717 static inline struct drm_gpuvm_bo *
718 drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo)
720 kref_get(&vm_bo->kref);
724 bool drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo);
726 struct drm_gpuvm_bo *
727 drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
728 struct drm_gem_object *obj);
730 void drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict);
733 * drm_gpuvm_bo_gem_evict()
734 * @obj: the &drm_gem_object
735 * @evict: indicates whether @obj is evicted
737 * See drm_gpuvm_bo_evict().
740 drm_gpuvm_bo_gem_evict(struct drm_gem_object *obj, bool evict)
742 struct drm_gpuvm_bo *vm_bo;
744 drm_gem_gpuva_assert_lock_held(obj);
745 drm_gem_for_each_gpuvm_bo(vm_bo, obj)
746 drm_gpuvm_bo_evict(vm_bo, evict);
749 void drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo *vm_bo);
752 * drm_gpuvm_bo_for_each_va() - iterator to walk over a list of &drm_gpuva
753 * @va__: &drm_gpuva structure to assign to in each iteration step
754 * @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with
756 * This iterator walks over all &drm_gpuva structures associated with the
759 * The caller must hold the GEM's gpuva lock.
761 #define drm_gpuvm_bo_for_each_va(va__, vm_bo__) \
762 list_for_each_entry(va__, &(vm_bo)->list.gpuva, gem.entry)
765 * drm_gpuvm_bo_for_each_va_safe() - iterator to safely walk over a list of
767 * @va__: &drm_gpuva structure to assign to in each iteration step
768 * @next__: &next &drm_gpuva to store the next step
769 * @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with
771 * This iterator walks over all &drm_gpuva structures associated with the
772 * &drm_gpuvm_bo. It is implemented with list_for_each_entry_safe(), hence
773 * it is save against removal of elements.
775 * The caller must hold the GEM's gpuva lock.
777 #define drm_gpuvm_bo_for_each_va_safe(va__, next__, vm_bo__) \
778 list_for_each_entry_safe(va__, next__, &(vm_bo)->list.gpuva, gem.entry)
781 * enum drm_gpuva_op_type - GPU VA operation type
783 * Operations to alter the GPU VA mappings tracked by the &drm_gpuvm.
785 enum drm_gpuva_op_type {
787 * @DRM_GPUVA_OP_MAP: the map op type
792 * @DRM_GPUVA_OP_REMAP: the remap op type
797 * @DRM_GPUVA_OP_UNMAP: the unmap op type
802 * @DRM_GPUVA_OP_PREFETCH: the prefetch op type
804 DRM_GPUVA_OP_PREFETCH,
808 * struct drm_gpuva_op_map - GPU VA map operation
810 * This structure represents a single map operation generated by the
811 * DRM GPU VA manager.
813 struct drm_gpuva_op_map {
815 * @va: structure containing address and range of a map
820 * @addr: the base address of the new mapping
825 * @range: the range of the new mapping
831 * @gem: structure containing the &drm_gem_object and it's offset
835 * @offset: the offset within the &drm_gem_object
840 * @obj: the &drm_gem_object to map
842 struct drm_gem_object *obj;
847 * struct drm_gpuva_op_unmap - GPU VA unmap operation
849 * This structure represents a single unmap operation generated by the
850 * DRM GPU VA manager.
852 struct drm_gpuva_op_unmap {
854 * @va: the &drm_gpuva to unmap
856 struct drm_gpuva *va;
861 * Indicates whether this &drm_gpuva is physically contiguous with the
862 * original mapping request.
864 * Optionally, if &keep is set, drivers may keep the actual page table
865 * mappings for this &drm_gpuva, adding the missing page table entries
866 * only and update the &drm_gpuvm accordingly.
872 * struct drm_gpuva_op_remap - GPU VA remap operation
874 * This represents a single remap operation generated by the DRM GPU VA manager.
876 * A remap operation is generated when an existing GPU VA mmapping is split up
877 * by inserting a new GPU VA mapping or by partially unmapping existent
878 * mapping(s), hence it consists of a maximum of two map and one unmap
881 * The @unmap operation takes care of removing the original existing mapping.
882 * @prev is used to remap the preceding part, @next the subsequent part.
884 * If either a new mapping's start address is aligned with the start address
885 * of the old mapping or the new mapping's end address is aligned with the
886 * end address of the old mapping, either @prev or @next is NULL.
888 * Note, the reason for a dedicated remap operation, rather than arbitrary
889 * unmap and map operations, is to give drivers the chance of extracting driver
890 * specific data for creating the new mappings from the unmap operations's
891 * &drm_gpuva structure which typically is embedded in larger driver specific
894 struct drm_gpuva_op_remap {
896 * @prev: the preceding part of a split mapping
898 struct drm_gpuva_op_map *prev;
901 * @next: the subsequent part of a split mapping
903 struct drm_gpuva_op_map *next;
906 * @unmap: the unmap operation for the original existing mapping
908 struct drm_gpuva_op_unmap *unmap;
912 * struct drm_gpuva_op_prefetch - GPU VA prefetch operation
914 * This structure represents a single prefetch operation generated by the
915 * DRM GPU VA manager.
917 struct drm_gpuva_op_prefetch {
919 * @va: the &drm_gpuva to prefetch
921 struct drm_gpuva *va;
925 * struct drm_gpuva_op - GPU VA operation
927 * This structure represents a single generic operation.
929 * The particular type of the operation is defined by @op.
931 struct drm_gpuva_op {
935 * The &list_head used to distribute instances of this struct within
938 struct list_head entry;
941 * @op: the type of the operation
943 enum drm_gpuva_op_type op;
947 * @map: the map operation
949 struct drm_gpuva_op_map map;
952 * @remap: the remap operation
954 struct drm_gpuva_op_remap remap;
957 * @unmap: the unmap operation
959 struct drm_gpuva_op_unmap unmap;
962 * @prefetch: the prefetch operation
964 struct drm_gpuva_op_prefetch prefetch;
969 * struct drm_gpuva_ops - wraps a list of &drm_gpuva_op
971 struct drm_gpuva_ops {
973 * @list: the &list_head
975 struct list_head list;
979 * drm_gpuva_for_each_op() - iterator to walk over &drm_gpuva_ops
980 * @op: &drm_gpuva_op to assign in each iteration step
981 * @ops: &drm_gpuva_ops to walk
983 * This iterator walks over all ops within a given list of operations.
985 #define drm_gpuva_for_each_op(op, ops) list_for_each_entry(op, &(ops)->list, entry)
988 * drm_gpuva_for_each_op_safe() - iterator to safely walk over &drm_gpuva_ops
989 * @op: &drm_gpuva_op to assign in each iteration step
990 * @next: &next &drm_gpuva_op to store the next step
991 * @ops: &drm_gpuva_ops to walk
993 * This iterator walks over all ops within a given list of operations. It is
994 * implemented with list_for_each_safe(), so save against removal of elements.
996 #define drm_gpuva_for_each_op_safe(op, next, ops) \
997 list_for_each_entry_safe(op, next, &(ops)->list, entry)
1000 * drm_gpuva_for_each_op_from_reverse() - iterate backwards from the given point
1001 * @op: &drm_gpuva_op to assign in each iteration step
1002 * @ops: &drm_gpuva_ops to walk
1004 * This iterator walks over all ops within a given list of operations beginning
1005 * from the given operation in reverse order.
1007 #define drm_gpuva_for_each_op_from_reverse(op, ops) \
1008 list_for_each_entry_from_reverse(op, &(ops)->list, entry)
1011 * drm_gpuva_first_op() - returns the first &drm_gpuva_op from &drm_gpuva_ops
1012 * @ops: the &drm_gpuva_ops to get the fist &drm_gpuva_op from
1014 #define drm_gpuva_first_op(ops) \
1015 list_first_entry(&(ops)->list, struct drm_gpuva_op, entry)
1018 * drm_gpuva_last_op() - returns the last &drm_gpuva_op from &drm_gpuva_ops
1019 * @ops: the &drm_gpuva_ops to get the last &drm_gpuva_op from
1021 #define drm_gpuva_last_op(ops) \
1022 list_last_entry(&(ops)->list, struct drm_gpuva_op, entry)
1025 * drm_gpuva_prev_op() - previous &drm_gpuva_op in the list
1026 * @op: the current &drm_gpuva_op
1028 #define drm_gpuva_prev_op(op) list_prev_entry(op, entry)
1031 * drm_gpuva_next_op() - next &drm_gpuva_op in the list
1032 * @op: the current &drm_gpuva_op
1034 #define drm_gpuva_next_op(op) list_next_entry(op, entry)
1036 struct drm_gpuva_ops *
1037 drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
1038 u64 addr, u64 range,
1039 struct drm_gem_object *obj, u64 offset);
1040 struct drm_gpuva_ops *
1041 drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
1042 u64 addr, u64 range);
1044 struct drm_gpuva_ops *
1045 drm_gpuvm_prefetch_ops_create(struct drm_gpuvm *gpuvm,
1046 u64 addr, u64 range);
1048 struct drm_gpuva_ops *
1049 drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo);
1051 void drm_gpuva_ops_free(struct drm_gpuvm *gpuvm,
1052 struct drm_gpuva_ops *ops);
1054 static inline void drm_gpuva_init_from_op(struct drm_gpuva *va,
1055 struct drm_gpuva_op_map *op)
1057 drm_gpuva_init(va, op->va.addr, op->va.range,
1058 op->gem.obj, op->gem.offset);
1062 * struct drm_gpuvm_ops - callbacks for split/merge steps
1064 * This structure defines the callbacks used by &drm_gpuvm_sm_map and
1065 * &drm_gpuvm_sm_unmap to provide the split/merge steps for map and unmap
1066 * operations to drivers.
1068 struct drm_gpuvm_ops {
1070 * @vm_free: called when the last reference of a struct drm_gpuvm is
1073 * This callback is mandatory.
1075 void (*vm_free)(struct drm_gpuvm *gpuvm);
1078 * @op_alloc: called when the &drm_gpuvm allocates
1079 * a struct drm_gpuva_op
1081 * Some drivers may want to embed struct drm_gpuva_op into driver
1082 * specific structures. By implementing this callback drivers can
1083 * allocate memory accordingly.
1085 * This callback is optional.
1087 struct drm_gpuva_op *(*op_alloc)(void);
1090 * @op_free: called when the &drm_gpuvm frees a
1091 * struct drm_gpuva_op
1093 * Some drivers may want to embed struct drm_gpuva_op into driver
1094 * specific structures. By implementing this callback drivers can
1095 * free the previously allocated memory accordingly.
1097 * This callback is optional.
1099 void (*op_free)(struct drm_gpuva_op *op);
1102 * @vm_bo_alloc: called when the &drm_gpuvm allocates
1103 * a struct drm_gpuvm_bo
1105 * Some drivers may want to embed struct drm_gpuvm_bo into driver
1106 * specific structures. By implementing this callback drivers can
1107 * allocate memory accordingly.
1109 * This callback is optional.
1111 struct drm_gpuvm_bo *(*vm_bo_alloc)(void);
1114 * @vm_bo_free: called when the &drm_gpuvm frees a
1115 * struct drm_gpuvm_bo
1117 * Some drivers may want to embed struct drm_gpuvm_bo into driver
1118 * specific structures. By implementing this callback drivers can
1119 * free the previously allocated memory accordingly.
1121 * This callback is optional.
1123 void (*vm_bo_free)(struct drm_gpuvm_bo *vm_bo);
1126 * @vm_bo_validate: called from drm_gpuvm_validate()
1128 * Drivers receive this callback for every evicted &drm_gem_object being
1129 * mapped in the corresponding &drm_gpuvm.
1131 * Typically, drivers would call their driver specific variant of
1132 * ttm_bo_validate() from within this callback.
1134 int (*vm_bo_validate)(struct drm_gpuvm_bo *vm_bo,
1135 struct drm_exec *exec);
1138 * @sm_step_map: called from &drm_gpuvm_sm_map to finally insert the
1139 * mapping once all previous steps were completed
1141 * The &priv pointer matches the one the driver passed to
1142 * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
1144 * Can be NULL if &drm_gpuvm_sm_map is used.
1146 int (*sm_step_map)(struct drm_gpuva_op *op, void *priv);
1149 * @sm_step_remap: called from &drm_gpuvm_sm_map and
1150 * &drm_gpuvm_sm_unmap to split up an existent mapping
1152 * This callback is called when existent mapping needs to be split up.
1153 * This is the case when either a newly requested mapping overlaps or
1154 * is enclosed by an existent mapping or a partial unmap of an existent
1155 * mapping is requested.
1157 * The &priv pointer matches the one the driver passed to
1158 * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
1160 * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is
1163 int (*sm_step_remap)(struct drm_gpuva_op *op, void *priv);
1166 * @sm_step_unmap: called from &drm_gpuvm_sm_map and
1167 * &drm_gpuvm_sm_unmap to unmap an existent mapping
1169 * This callback is called when existent mapping needs to be unmapped.
1170 * This is the case when either a newly requested mapping encloses an
1171 * existent mapping or an unmap of an existent mapping is requested.
1173 * The &priv pointer matches the one the driver passed to
1174 * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
1176 * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is
1179 int (*sm_step_unmap)(struct drm_gpuva_op *op, void *priv);
1182 int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
1183 u64 addr, u64 range,
1184 struct drm_gem_object *obj, u64 offset);
1186 int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
1187 u64 addr, u64 range);
1189 void drm_gpuva_map(struct drm_gpuvm *gpuvm,
1190 struct drm_gpuva *va,
1191 struct drm_gpuva_op_map *op);
1193 void drm_gpuva_remap(struct drm_gpuva *prev,
1194 struct drm_gpuva *next,
1195 struct drm_gpuva_op_remap *op);
1197 void drm_gpuva_unmap(struct drm_gpuva_op_unmap *op);
1200 * drm_gpuva_op_remap_to_unmap_range() - Helper to get the start and range of
1201 * the unmap stage of a remap op.
1203 * @start_addr: Output pointer for the start of the required unmap.
1204 * @range: Output pointer for the length of the required unmap.
1206 * The given start address and range will be set such that they represent the
1207 * range of the address space that was previously covered by the mapping being
1208 * re-mapped, but is now empty.
1211 drm_gpuva_op_remap_to_unmap_range(const struct drm_gpuva_op_remap *op,
1212 u64 *start_addr, u64 *range)
1214 const u64 va_start = op->prev ?
1215 op->prev->va.addr + op->prev->va.range :
1216 op->unmap->va->va.addr;
1217 const u64 va_end = op->next ?
1219 op->unmap->va->va.addr + op->unmap->va->va.range;
1222 *start_addr = va_start;
1224 *range = va_end - va_start;
1227 #endif /* __DRM_GPUVM_H__ */