2 * SPDX-License-Identifier: MIT
4 * Copyright © 2016 Intel Corporation
7 #ifndef __I915_GEM_OBJECT_TYPES_H__
8 #define __I915_GEM_OBJECT_TYPES_H__
10 #include <drm/drm_gem.h>
11 #include <uapi/drm/i915_drm.h>
13 #include "i915_active.h"
14 #include "i915_selftest.h"
16 struct drm_i915_gem_object;
17 struct intel_fronbuffer;
20 * struct i915_lut_handle tracks the fast lookups from handle to vma used
21 * for execbuf. Although we use a radixtree for that mapping, in order to
22 * remove them as the object or context is closed, we need a secondary list
23 * and a translation entry (i915_lut_handle).
25 struct i915_lut_handle {
26 struct list_head obj_link;
27 struct i915_gem_context *ctx;
31 struct drm_i915_gem_object_ops {
33 #define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
34 #define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
35 #define I915_GEM_OBJECT_IS_PROXY BIT(2)
36 #define I915_GEM_OBJECT_NO_GGTT BIT(3)
37 #define I915_GEM_OBJECT_ASYNC_CANCEL BIT(4)
39 /* Interface between the GEM object and its backing storage.
40 * get_pages() is called once prior to the use of the associated set
41 * of pages before to binding them into the GTT, and put_pages() is
42 * called after we no longer need them. As we expect there to be
43 * associated cost with migrating pages between the backing storage
44 * and making them available for the GPU (e.g. clflush), we may hold
45 * onto the pages after they are no longer referenced by the GPU
46 * in case they may be used again shortly (for example migrating the
47 * pages to a different memory domain within the GTT). put_pages()
48 * will therefore most likely be called when the object itself is
49 * being released or under memory pressure (where we attempt to
50 * reap pages for the shrinker).
52 int (*get_pages)(struct drm_i915_gem_object *obj);
53 void (*put_pages)(struct drm_i915_gem_object *obj,
54 struct sg_table *pages);
55 void (*truncate)(struct drm_i915_gem_object *obj);
56 void (*writeback)(struct drm_i915_gem_object *obj);
58 int (*pwrite)(struct drm_i915_gem_object *obj,
59 const struct drm_i915_gem_pwrite *arg);
61 int (*dmabuf_export)(struct drm_i915_gem_object *obj);
62 void (*release)(struct drm_i915_gem_object *obj);
65 struct drm_i915_gem_object {
66 struct drm_gem_object base;
68 const struct drm_i915_gem_object_ops *ops;
72 * @vma.lock: protect the list/tree of vmas
77 * @vma.list: List of VMAs backed by this object
79 * The VMA on this list are ordered by type, all GGTT vma are
80 * placed at the head and all ppGTT vma are placed at the tail.
81 * The different types of GGTT vma are unordered between
82 * themselves, use the @vma.tree (which has a defined order
83 * between all VMA) to quickly find an exact match.
85 struct list_head list;
88 * @vma.tree: Ordered tree of VMAs backed by this object
90 * All VMA created for this object are placed in the @vma.tree
91 * for fast retrieval via a binary search in
92 * i915_vma_instance(). They are also added to @vma.list for
99 * @lut_list: List of vma lookup entries in use for this object.
101 * If this object is closed, we need to remove all of its VMA from
102 * the fast lookup index in associated contexts; @lut_list provides
103 * this translation from object to context->handles_vma.
105 struct list_head lut_list;
107 /** Stolen memory for this object, instead of being backed by shmem. */
108 struct drm_mm_node *stolen;
111 struct llist_node freed;
115 * Whether the object is currently in the GGTT mmap.
117 unsigned int userfault_count;
118 struct list_head userfault_link;
120 I915_SELFTEST_DECLARE(struct list_head st_link);
123 #define I915_BO_ALLOC_CONTIGUOUS BIT(0)
124 #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS)
127 * Is the object to be mapped as read-only to the GPU
128 * Only honoured if hardware has relevant pte bit
130 unsigned int cache_level:3;
131 unsigned int cache_coherent:2;
132 #define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
133 #define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
134 unsigned int cache_dirty:1;
137 * @read_domains: Read memory domains.
139 * These monitor which caches contain read/write data related to the
140 * object. When transitioning from one set of domains to another,
141 * the driver is called to ensure that caches are suitably flushed and
147 * @write_domain: Corresponding unique write memory domain.
151 struct intel_frontbuffer *frontbuffer;
153 /** Current tiling stride for the object, if it's tiled. */
154 unsigned int tiling_and_stride;
155 #define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
156 #define TILING_MASK (FENCE_MINIMUM_STRIDE - 1)
157 #define STRIDE_MASK (~TILING_MASK)
159 /** Count of VMA actually bound by this object */
163 struct mutex lock; /* protects the pages and their use */
164 atomic_t pages_pin_count;
168 * Memory region for this object.
170 struct intel_memory_region *region;
172 * List of memory region blocks allocated for this object.
174 struct list_head blocks;
176 struct sg_table *pages;
179 struct i915_page_sizes {
181 * The sg mask of the pages sg_table. i.e the mask of
182 * of the lengths for each sg entry.
187 * The gtt page sizes we are allowed to use given the
188 * sg mask and the supported page sizes. This will
189 * express the smallest unit we can use for the whole
190 * object, as well as the larger sizes we may be able
191 * to use opportunistically.
196 * The actual gtt page size usage. Since we can have
197 * multiple vma associated with this object we need to
198 * prevent any trampling of state, hence a copy of this
199 * struct also lives in each vma, therefore the gtt
200 * value here should only be read/write through the vma.
205 I915_SELFTEST_DECLARE(unsigned int page_mask);
207 struct i915_gem_object_page_iter {
208 struct scatterlist *sg_pos;
209 unsigned int sg_idx; /* in pages, but 32bit eek! */
211 struct radix_tree_root radix;
212 struct mutex lock; /* protects this cache */
216 * Element within i915->mm.unbound_list or i915->mm.bound_list,
217 * locked by i915->mm.obj_lock.
219 struct list_head link;
222 * Advice: are the backing pages purgeable?
227 * This is set if the object has been written to since the
228 * pages were last acquired.
233 * This is set if the object has been pinned due to unknown
239 /** Record of address bit 17 of each page at last unbind. */
240 unsigned long *bit_17;
243 struct i915_gem_userptr {
246 struct i915_mm_struct *mm;
247 struct i915_mmu_object *mmu_object;
248 struct work_struct *work;
251 unsigned long scratch;
256 /** for phys allocated objects */
257 struct drm_dma_handle *phys_handle;
260 static inline struct drm_i915_gem_object *
261 to_intel_bo(struct drm_gem_object *gem)
263 /* Assert that to_intel_bo(NULL) == NULL */
264 BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
266 return container_of(gem, struct drm_i915_gem_object, base);