2 * SPDX-License-Identifier: MIT
4 * Copyright © 2016 Intel Corporation
7 #ifndef __I915_GEM_OBJECT_H__
8 #define __I915_GEM_OBJECT_H__
10 #include <drm/drm_gem.h>
11 #include <drm/drm_file.h>
12 #include <drm/drm_device.h>
14 #include "display/intel_frontbuffer.h"
15 #include "i915_gem_object_types.h"
16 #include "i915_gem_gtt.h"
17 #include "i915_gem_ww.h"
18 #include "i915_vma_types.h"
21 * XXX: There is a prevalence of the assumption that we fit the
22 * object's page count inside a 32bit _signed_ variable. Let's document
23 * this and catch if we ever need to fix it. In the meantime, if you do
24 * spot such a local variable, please consider fixing!
26 * Aside from our own locals (for which we have no excuse!):
27 * - sg_table embeds unsigned int for num_pages
28 * - get_user_pages*() mixed ints with longs
30 #define GEM_CHECK_SIZE_OVERFLOW(sz) \
31 GEM_WARN_ON((sz) >> PAGE_SHIFT > INT_MAX)
33 static inline bool i915_gem_object_size_2big(u64 size)
35 struct drm_i915_gem_object *obj;
37 if (GEM_CHECK_SIZE_OVERFLOW(size))
40 if (overflows_type(size, obj->base.size))
46 void i915_gem_init__objects(struct drm_i915_private *i915);
48 struct drm_i915_gem_object *i915_gem_object_alloc(void);
49 void i915_gem_object_free(struct drm_i915_gem_object *obj);
51 void i915_gem_object_init(struct drm_i915_gem_object *obj,
52 const struct drm_i915_gem_object_ops *ops,
53 struct lock_class_key *key,
54 unsigned alloc_flags);
55 struct drm_i915_gem_object *
56 i915_gem_object_create_shmem(struct drm_i915_private *i915,
57 resource_size_t size);
58 struct drm_i915_gem_object *
59 i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
60 const void *data, resource_size_t size);
62 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
64 void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
65 struct sg_table *pages,
68 int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
69 const struct drm_i915_gem_pwrite *args);
70 int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj,
71 const struct drm_i915_gem_pread *args);
73 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
74 void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj,
75 struct sg_table *pages);
76 void i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
77 struct sg_table *pages);
79 void i915_gem_flush_free_objects(struct drm_i915_private *i915);
82 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
83 void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
86 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
87 * @filp: DRM file private date
88 * @handle: userspace handle
92 * A pointer to the object named by the handle if such exists on @filp, NULL
93 * otherwise. This object is only valid whilst under the RCU read lock, and
94 * note carefully the object may be in the process of being destroyed.
96 static inline struct drm_i915_gem_object *
97 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
100 WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
102 return idr_find(&file->object_idr, handle);
105 static inline struct drm_i915_gem_object *
106 i915_gem_object_get_rcu(struct drm_i915_gem_object *obj)
108 if (obj && !kref_get_unless_zero(&obj->base.refcount))
114 static inline struct drm_i915_gem_object *
115 i915_gem_object_lookup(struct drm_file *file, u32 handle)
117 struct drm_i915_gem_object *obj;
120 obj = i915_gem_object_lookup_rcu(file, handle);
121 obj = i915_gem_object_get_rcu(obj);
128 struct drm_gem_object *
129 drm_gem_object_lookup(struct drm_file *file, u32 handle);
131 __attribute__((nonnull))
132 static inline struct drm_i915_gem_object *
133 i915_gem_object_get(struct drm_i915_gem_object *obj)
135 drm_gem_object_get(&obj->base);
139 __attribute__((nonnull))
141 i915_gem_object_put(struct drm_i915_gem_object *obj)
143 __drm_gem_object_put(&obj->base);
146 #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
149 * If more than one potential simultaneous locker, assert held.
151 static inline void assert_object_held_shared(struct drm_i915_gem_object *obj)
154 * Note mm list lookup is protected by
155 * kref_get_unless_zero().
157 if (IS_ENABLED(CONFIG_LOCKDEP) &&
158 kref_read(&obj->base.refcount) > 0)
159 assert_object_held(obj);
162 static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj,
163 struct i915_gem_ww_ctx *ww,
169 ret = dma_resv_lock_interruptible(obj->base.resv, ww ? &ww->ctx : NULL);
171 ret = dma_resv_lock(obj->base.resv, ww ? &ww->ctx : NULL);
174 i915_gem_object_get(obj);
175 list_add_tail(&obj->obj_link, &ww->obj_list);
177 if (ret == -EALREADY)
180 if (ret == -EDEADLK) {
181 i915_gem_object_get(obj);
188 static inline int i915_gem_object_lock(struct drm_i915_gem_object *obj,
189 struct i915_gem_ww_ctx *ww)
191 return __i915_gem_object_lock(obj, ww, ww && ww->intr);
194 static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj,
195 struct i915_gem_ww_ctx *ww)
197 WARN_ON(ww && !ww->intr);
198 return __i915_gem_object_lock(obj, ww, true);
201 static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
203 return dma_resv_trylock(obj->base.resv);
206 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
208 if (obj->ops->adjust_lru)
209 obj->ops->adjust_lru(obj);
211 dma_resv_unlock(obj->base.resv);
215 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
217 obj->flags |= I915_BO_READONLY;
221 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
223 return obj->flags & I915_BO_READONLY;
227 i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
229 return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
233 i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
235 return obj->flags & I915_BO_ALLOC_VOLATILE;
239 i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
241 obj->flags |= I915_BO_ALLOC_VOLATILE;
245 i915_gem_object_has_tiling_quirk(struct drm_i915_gem_object *obj)
247 return test_bit(I915_TILING_QUIRK_BIT, &obj->flags);
251 i915_gem_object_set_tiling_quirk(struct drm_i915_gem_object *obj)
253 set_bit(I915_TILING_QUIRK_BIT, &obj->flags);
257 i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object *obj)
259 clear_bit(I915_TILING_QUIRK_BIT, &obj->flags);
263 i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
266 return obj->ops->flags & flags;
270 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
272 return obj->flags & I915_BO_ALLOC_STRUCT_PAGE;
276 i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj)
278 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM);
282 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
284 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
288 i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
290 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY);
294 i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
296 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP);
300 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
302 return READ_ONCE(obj->frontbuffer);
305 static inline unsigned int
306 i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
308 return obj->tiling_and_stride & TILING_MASK;
312 i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
314 return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
317 static inline unsigned int
318 i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
320 return obj->tiling_and_stride & STRIDE_MASK;
323 static inline unsigned int
324 i915_gem_tile_height(unsigned int tiling)
327 return tiling == I915_TILING_Y ? 32 : 8;
330 static inline unsigned int
331 i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
333 return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
336 static inline unsigned int
337 i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
339 return (i915_gem_object_get_stride(obj) *
340 i915_gem_object_get_tile_height(obj));
343 int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
344 unsigned int tiling, unsigned int stride);
347 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
348 struct i915_gem_object_page_iter *iter,
350 unsigned int *offset, bool allow_alloc, bool dma);
352 static inline struct scatterlist *
353 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
355 unsigned int *offset, bool allow_alloc)
357 return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset, allow_alloc, false);
360 static inline struct scatterlist *
361 i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj,
363 unsigned int *offset, bool allow_alloc)
365 return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset, allow_alloc, true);
369 i915_gem_object_get_page(struct drm_i915_gem_object *obj,
373 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
377 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
382 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
385 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
386 struct sg_table *pages,
387 unsigned int sg_page_sizes);
389 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
390 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
392 static inline int __must_check
393 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
395 assert_object_held(obj);
397 if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
400 return __i915_gem_object_get_pages(obj);
403 int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj);
406 i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
408 return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
412 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
414 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
416 atomic_inc(&obj->mm.pages_pin_count);
420 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
422 return atomic_read(&obj->mm.pages_pin_count);
426 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
428 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
429 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
431 atomic_dec(&obj->mm.pages_pin_count);
435 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
437 __i915_gem_object_unpin_pages(obj);
440 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
441 void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
442 void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
445 * i915_gem_object_pin_map - return a contiguous mapping of the entire object
446 * @obj: the object to map into kernel address space
447 * @type: the type of mapping, used to select pgprot_t
449 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
450 * pages and then returns a contiguous mapping of the backing storage into
451 * the kernel address space. Based on the @type of mapping, the PTE will be
452 * set to either WriteBack or WriteCombine (via pgprot_t).
454 * The caller is responsible for calling i915_gem_object_unpin_map() when the
455 * mapping is no longer required.
457 * Returns the pointer through which to access the mapped object, or an
458 * ERR_PTR() on error.
460 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
461 enum i915_map_type type);
463 void *__must_check i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
464 enum i915_map_type type);
466 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
467 unsigned long offset,
469 static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
471 __i915_gem_object_flush_map(obj, 0, obj->base.size);
475 * i915_gem_object_unpin_map - releases an earlier mapping
476 * @obj: the object to unmap
478 * After pinning the object and mapping its pages, once you are finished
479 * with your access, call i915_gem_object_unpin_map() to release the pin
480 * upon the mapping. Once the pin count reaches zero, that mapping may be
483 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
485 i915_gem_object_unpin_pages(obj);
488 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj);
490 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
491 unsigned int *needs_clflush);
492 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
493 unsigned int *needs_clflush);
494 #define CLFLUSH_BEFORE BIT(0)
495 #define CLFLUSH_AFTER BIT(1)
496 #define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER)
499 i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
501 i915_gem_object_unpin_pages(obj);
504 static inline struct intel_engine_cs *
505 i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
507 struct intel_engine_cs *engine = NULL;
508 struct dma_fence *fence;
511 fence = dma_resv_get_excl_unlocked(obj->base.resv);
514 if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
515 engine = to_request(fence)->engine;
516 dma_fence_put(fence);
521 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
522 unsigned int cache_level);
523 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
524 void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj);
527 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
529 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
531 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
532 struct i915_vma * __must_check
533 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
534 struct i915_gem_ww_ctx *ww,
536 const struct i915_ggtt_view *view,
539 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
540 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
541 void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
543 static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
545 if (obj->cache_dirty)
548 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
551 /* Currently in use by HW (display engine)? Keep flushed. */
552 return i915_gem_object_is_framebuffer(obj);
555 static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
557 obj->read_domains = I915_GEM_DOMAIN_CPU;
558 obj->write_domain = I915_GEM_DOMAIN_CPU;
559 if (cpu_write_needs_clflush(obj))
560 obj->cache_dirty = true;
563 void i915_gem_fence_wait_priority(struct dma_fence *fence,
564 const struct i915_sched_attr *attr);
566 int i915_gem_object_wait(struct drm_i915_gem_object *obj,
569 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
571 const struct i915_sched_attr *attr);
573 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
574 enum fb_op_origin origin);
575 void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
576 enum fb_op_origin origin);
579 i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
580 enum fb_op_origin origin)
582 if (unlikely(rcu_access_pointer(obj->frontbuffer)))
583 __i915_gem_object_flush_frontbuffer(obj, origin);
587 i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
588 enum fb_op_origin origin)
590 if (unlikely(rcu_access_pointer(obj->frontbuffer)))
591 __i915_gem_object_invalidate_frontbuffer(obj, origin);
594 int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size);
596 bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj);
598 void __i915_gem_free_object_rcu(struct rcu_head *head);
600 void __i915_gem_free_object(struct drm_i915_gem_object *obj);
602 bool i915_gem_object_evictable(struct drm_i915_gem_object *obj);
604 bool i915_gem_object_migratable(struct drm_i915_gem_object *obj);
606 bool i915_gem_object_validates_to_lmem(struct drm_i915_gem_object *obj);
608 #ifdef CONFIG_MMU_NOTIFIER
610 i915_gem_object_is_userptr(struct drm_i915_gem_object *obj)
612 return obj->userptr.notifier.mm;
615 int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj);
616 int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj);
617 int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj);
619 static inline bool i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) { return false; }
621 static inline int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
622 static inline int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
623 static inline int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }