2 * SPDX-License-Identifier: MIT
4 * Copyright © 2016 Intel Corporation
7 #ifndef __I915_GEM_OBJECT_TYPES_H__
8 #define __I915_GEM_OBJECT_TYPES_H__
10 #include <linux/mmu_notifier.h>
12 #include <drm/drm_gem.h>
13 #include <drm/ttm/ttm_bo_api.h>
14 #include <uapi/drm/i915_drm.h>
16 #include "i915_active.h"
17 #include "i915_selftest.h"
19 struct drm_i915_gem_object;
20 struct intel_fronbuffer;
23 * struct i915_lut_handle tracks the fast lookups from handle to vma used
24 * for execbuf. Although we use a radixtree for that mapping, in order to
25 * remove them as the object or context is closed, we need a secondary list
26 * and a translation entry (i915_lut_handle).
28 struct i915_lut_handle {
29 struct list_head obj_link;
30 struct i915_gem_context *ctx;
34 struct drm_i915_gem_object_ops {
36 #define I915_GEM_OBJECT_HAS_IOMEM BIT(1)
37 #define I915_GEM_OBJECT_IS_SHRINKABLE BIT(2)
38 #define I915_GEM_OBJECT_IS_PROXY BIT(3)
39 #define I915_GEM_OBJECT_NO_MMAP BIT(4)
41 /* Interface between the GEM object and its backing storage.
42 * get_pages() is called once prior to the use of the associated set
43 * of pages before to binding them into the GTT, and put_pages() is
44 * called after we no longer need them. As we expect there to be
45 * associated cost with migrating pages between the backing storage
46 * and making them available for the GPU (e.g. clflush), we may hold
47 * onto the pages after they are no longer referenced by the GPU
48 * in case they may be used again shortly (for example migrating the
49 * pages to a different memory domain within the GTT). put_pages()
50 * will therefore most likely be called when the object itself is
51 * being released or under memory pressure (where we attempt to
52 * reap pages for the shrinker).
54 int (*get_pages)(struct drm_i915_gem_object *obj);
55 void (*put_pages)(struct drm_i915_gem_object *obj,
56 struct sg_table *pages);
57 void (*truncate)(struct drm_i915_gem_object *obj);
58 void (*writeback)(struct drm_i915_gem_object *obj);
60 int (*pread)(struct drm_i915_gem_object *obj,
61 const struct drm_i915_gem_pread *arg);
62 int (*pwrite)(struct drm_i915_gem_object *obj,
63 const struct drm_i915_gem_pwrite *arg);
64 u64 (*mmap_offset)(struct drm_i915_gem_object *obj);
66 int (*dmabuf_export)(struct drm_i915_gem_object *obj);
69 * adjust_lru - notify that the madvise value was updated
70 * @obj: The gem object
72 * The madvise value may have been updated, or object was recently
73 * referenced so act accordingly (Perhaps changing an LRU list etc).
75 void (*adjust_lru)(struct drm_i915_gem_object *obj);
78 * delayed_free - Override the default delayed free implementation
80 void (*delayed_free)(struct drm_i915_gem_object *obj);
81 void (*release)(struct drm_i915_gem_object *obj);
83 const struct vm_operations_struct *mmap_ops;
84 const char *name; /* friendly name for debug, e.g. lockdep classes */
90 #define I915_MAP_OVERRIDE BIT(31)
91 I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
92 I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
96 I915_MMAP_TYPE_GTT = 0,
102 struct i915_mmap_offset {
103 struct drm_vma_offset_node vma_node;
104 struct drm_i915_gem_object *obj;
105 enum i915_mmap_type mmap_type;
107 struct rb_node offset;
110 struct i915_gem_object_page_iter {
111 struct scatterlist *sg_pos;
112 unsigned int sg_idx; /* in pages, but 32bit eek! */
114 struct radix_tree_root radix;
115 struct mutex lock; /* protects this cache */
118 struct drm_i915_gem_object {
120 * We might have reason to revisit the below since it wastes
121 * a lot of space for non-ttm gem objects.
122 * In any case, always use the accessors for the ttm_buffer_object
126 struct drm_gem_object base;
127 struct ttm_buffer_object __do_not_access;
130 const struct drm_i915_gem_object_ops *ops;
134 * @vma.lock: protect the list/tree of vmas
139 * @vma.list: List of VMAs backed by this object
141 * The VMA on this list are ordered by type, all GGTT vma are
142 * placed at the head and all ppGTT vma are placed at the tail.
143 * The different types of GGTT vma are unordered between
144 * themselves, use the @vma.tree (which has a defined order
145 * between all VMA) to quickly find an exact match.
147 struct list_head list;
150 * @vma.tree: Ordered tree of VMAs backed by this object
152 * All VMA created for this object are placed in the @vma.tree
153 * for fast retrieval via a binary search in
154 * i915_vma_instance(). They are also added to @vma.list for
161 * @lut_list: List of vma lookup entries in use for this object.
163 * If this object is closed, we need to remove all of its VMA from
164 * the fast lookup index in associated contexts; @lut_list provides
165 * this translation from object to context->handles_vma.
167 struct list_head lut_list;
168 spinlock_t lut_lock; /* guards lut_list */
171 * @obj_link: Link into @i915_gem_ww_ctx.obj_list
173 * When we lock this object through i915_gem_object_lock() with a
174 * context, we add it to the list to ensure we can unlock everything
175 * when i915_gem_ww_ctx_backoff() or i915_gem_ww_ctx_fini() are called.
177 struct list_head obj_link;
179 * @shared_resv_from: The object shares the resv from this vm.
181 struct i915_address_space *shares_resv_from;
185 struct llist_node freed;
189 * Whether the object is currently in the GGTT mmap.
191 unsigned int userfault_count;
192 struct list_head userfault_link;
195 spinlock_t lock; /* Protects access to mmo offsets */
196 struct rb_root offsets;
199 I915_SELFTEST_DECLARE(struct list_head st_link);
202 #define I915_BO_ALLOC_CONTIGUOUS BIT(0)
203 #define I915_BO_ALLOC_VOLATILE BIT(1)
204 #define I915_BO_ALLOC_STRUCT_PAGE BIT(2)
205 #define I915_BO_ALLOC_CPU_CLEAR BIT(3)
206 #define I915_BO_ALLOC_USER BIT(4)
207 #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
208 I915_BO_ALLOC_VOLATILE | \
209 I915_BO_ALLOC_STRUCT_PAGE | \
210 I915_BO_ALLOC_CPU_CLEAR | \
212 #define I915_BO_READONLY BIT(5)
213 #define I915_TILING_QUIRK_BIT 6 /* unknown swizzling; do not release! */
216 * Is the object to be mapped as read-only to the GPU
217 * Only honoured if hardware has relevant pte bit
219 unsigned int cache_level:3;
220 unsigned int cache_coherent:2;
221 #define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
222 #define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
223 unsigned int cache_dirty:1;
226 * @read_domains: Read memory domains.
228 * These monitor which caches contain read/write data related to the
229 * object. When transitioning from one set of domains to another,
230 * the driver is called to ensure that caches are suitably flushed and
236 * @write_domain: Corresponding unique write memory domain.
240 struct intel_frontbuffer __rcu *frontbuffer;
242 /** Current tiling stride for the object, if it's tiled. */
243 unsigned int tiling_and_stride;
244 #define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
245 #define TILING_MASK (FENCE_MINIMUM_STRIDE - 1)
246 #define STRIDE_MASK (~TILING_MASK)
250 * Protects the pages and their use. Do not use directly, but
251 * instead go through the pin/unpin interfaces.
253 atomic_t pages_pin_count;
257 * Priority list of potential placements for this object.
259 struct intel_memory_region **placements;
263 * Memory region for this object.
265 struct intel_memory_region *region;
268 * Memory manager node allocated for this object.
273 * Element within memory_region->objects or region->purgeable
274 * if the object is marked as DONTNEED. Access is protected by
277 struct list_head region_link;
279 struct sg_table *pages;
282 struct i915_page_sizes {
284 * The sg mask of the pages sg_table. i.e the mask of
285 * of the lengths for each sg entry.
290 * The gtt page sizes we are allowed to use given the
291 * sg mask and the supported page sizes. This will
292 * express the smallest unit we can use for the whole
293 * object, as well as the larger sizes we may be able
294 * to use opportunistically.
299 * The actual gtt page size usage. Since we can have
300 * multiple vma associated with this object we need to
301 * prevent any trampling of state, hence a copy of this
302 * struct also lives in each vma, therefore the gtt
303 * value here should only be read/write through the vma.
308 I915_SELFTEST_DECLARE(unsigned int page_mask);
310 struct i915_gem_object_page_iter get_page;
311 struct i915_gem_object_page_iter get_dma_page;
314 * Element within i915->mm.unbound_list or i915->mm.bound_list,
315 * locked by i915->mm.obj_lock.
317 struct list_head link;
320 * Advice: are the backing pages purgeable?
325 * This is set if the object has been written to since the
326 * pages were last acquired.
332 struct sg_table *cached_io_st;
333 struct i915_gem_object_page_iter get_io_page;
337 /** Record of address bit 17 of each page at last unbind. */
338 unsigned long *bit_17;
341 #ifdef CONFIG_MMU_NOTIFIER
342 struct i915_gem_userptr {
344 unsigned long notifier_seq;
346 struct mmu_interval_notifier notifier;
352 struct drm_mm_node *stolen;
354 unsigned long scratch;
361 static inline struct drm_i915_gem_object *
362 to_intel_bo(struct drm_gem_object *gem)
364 /* Assert that to_intel_bo(NULL) == NULL */
365 BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
367 return container_of(gem, struct drm_i915_gem_object, base);