1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021 Intel Corporation
6 #include <drm/ttm/ttm_bo_driver.h>
7 #include <drm/ttm/ttm_placement.h>
10 #include "intel_memory_region.h"
11 #include "intel_region_ttm.h"
13 #include "gem/i915_gem_mman.h"
14 #include "gem/i915_gem_object.h"
15 #include "gem/i915_gem_region.h"
16 #include "gem/i915_gem_ttm.h"
17 #include "gem/i915_gem_ttm_move.h"
18 #include "gem/i915_gem_ttm_pm.h"
20 #define I915_TTM_PRIO_PURGE 0
21 #define I915_TTM_PRIO_NO_PAGES 1
22 #define I915_TTM_PRIO_HAS_PAGES 2
25 * Size of struct ttm_place vector in on-stack struct ttm_placement allocs
27 #define I915_TTM_MAX_PLACEMENTS INTEL_REGION_UNKNOWN
30 * struct i915_ttm_tt - TTM page vector with additional private information
31 * @ttm: The base TTM page vector.
32 * @dev: The struct device used for dma mapping and unmapping.
33 * @cached_rsgt: The cached scatter-gather table.
34 * @is_shmem: Set if using shmem.
35 * @filp: The shmem file, if using shmem backend.
37 * Note that DMA may be going on right up to the point where the page-
38 * vector is unpopulated in delayed destroy. Hence keep the
39 * scatter-gather table mapped and cached up to that point. This is
40 * different from the cached gem object io scatter-gather table which
41 * doesn't have an associated dma mapping.
46 struct i915_refct_sgt cached_rsgt;
52 static const struct ttm_place sys_placement_flags = {
55 .mem_type = I915_PL_SYSTEM,
59 static struct ttm_placement i915_sys_placement = {
61 .placement = &sys_placement_flags,
62 .num_busy_placement = 1,
63 .busy_placement = &sys_placement_flags,
67 * i915_ttm_sys_placement - Return the struct ttm_placement to be
68 * used for an object in system memory.
70 * Rather than making the struct extern, use this
73 * Return: A pointer to a static variable for sys placement.
75 struct ttm_placement *i915_ttm_sys_placement(void)
77 return &i915_sys_placement;
80 static int i915_ttm_err_to_gem(int err)
89 * TTM likes to convert -EDEADLK to -EBUSY, and wants us to
90 * restart the operation, since we don't record the contending
91 * lock. We use -EAGAIN to restart.
96 * Memory type / region is full, and we can't evict.
97 * Except possibly system, that returns -ENOMEM;
107 static enum ttm_caching
108 i915_ttm_select_tt_caching(const struct drm_i915_gem_object *obj)
111 * Objects only allowed in system get cached cpu-mappings, or when
112 * evicting lmem-only buffers to system for swapping. Other objects get
113 * WC mapping for now. Even if in system.
115 if (obj->mm.n_placements <= 1)
118 return ttm_write_combined;
122 i915_ttm_place_from_region(const struct intel_memory_region *mr,
123 struct ttm_place *place,
126 memset(place, 0, sizeof(*place));
127 place->mem_type = intel_region_to_ttm_type(mr);
129 if (flags & I915_BO_ALLOC_CONTIGUOUS)
130 place->flags = TTM_PL_FLAG_CONTIGUOUS;
134 i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj,
135 struct ttm_place *requested,
136 struct ttm_place *busy,
137 struct ttm_placement *placement)
139 unsigned int num_allowed = obj->mm.n_placements;
140 unsigned int flags = obj->flags;
143 placement->num_placement = 1;
144 i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] :
145 obj->mm.region, requested, flags);
147 /* Cache this on object? */
148 placement->num_busy_placement = num_allowed;
149 for (i = 0; i < placement->num_busy_placement; ++i)
150 i915_ttm_place_from_region(obj->mm.placements[i], busy + i, flags);
152 if (num_allowed == 0) {
154 placement->num_busy_placement = 1;
157 placement->placement = requested;
158 placement->busy_placement = busy;
161 static int i915_ttm_tt_shmem_populate(struct ttm_device *bdev,
163 struct ttm_operation_ctx *ctx)
165 struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev);
166 struct intel_memory_region *mr = i915->mm.regions[INTEL_MEMORY_SYSTEM];
167 struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
168 const unsigned int max_segment = i915_sg_segment_size();
169 const size_t size = (size_t)ttm->num_pages << PAGE_SHIFT;
170 struct file *filp = i915_tt->filp;
171 struct sgt_iter sgt_iter;
178 struct address_space *mapping;
181 filp = shmem_file_setup("i915-shmem-tt", size, VM_NORESERVE);
183 return PTR_ERR(filp);
185 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
187 mapping = filp->f_mapping;
188 mapping_set_gfp_mask(mapping, mask);
189 GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
191 i915_tt->filp = filp;
194 st = &i915_tt->cached_rsgt.table;
195 err = shmem_sg_alloc_table(i915, st, size, mr, filp->f_mapping,
200 err = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL,
201 DMA_ATTR_SKIP_CPU_SYNC);
206 for_each_sgt_page(page, sgt_iter, st)
207 ttm->pages[i++] = page;
209 if (ttm->page_flags & TTM_TT_FLAG_SWAPPED)
210 ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
215 shmem_sg_free_table(st, filp->f_mapping, false, false);
220 static void i915_ttm_tt_shmem_unpopulate(struct ttm_tt *ttm)
222 struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
223 bool backup = ttm->page_flags & TTM_TT_FLAG_SWAPPED;
224 struct sg_table *st = &i915_tt->cached_rsgt.table;
226 shmem_sg_free_table(st, file_inode(i915_tt->filp)->i_mapping,
230 static void i915_ttm_tt_release(struct kref *ref)
232 struct i915_ttm_tt *i915_tt =
233 container_of(ref, typeof(*i915_tt), cached_rsgt.kref);
234 struct sg_table *st = &i915_tt->cached_rsgt.table;
236 GEM_WARN_ON(st->sgl);
241 static const struct i915_refct_sgt_ops tt_rsgt_ops = {
242 .release = i915_ttm_tt_release
245 static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
248 struct ttm_resource_manager *man =
249 ttm_manager_type(bo->bdev, bo->resource->mem_type);
250 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
251 enum ttm_caching caching;
252 struct i915_ttm_tt *i915_tt;
258 i915_tt = kzalloc(sizeof(*i915_tt), GFP_KERNEL);
262 if (obj->flags & I915_BO_ALLOC_CPU_CLEAR &&
264 page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
266 caching = i915_ttm_select_tt_caching(obj);
267 if (i915_gem_object_is_shrinkable(obj) && caching == ttm_cached) {
268 page_flags |= TTM_TT_FLAG_EXTERNAL |
269 TTM_TT_FLAG_EXTERNAL_MAPPABLE;
270 i915_tt->is_shmem = true;
273 ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, caching);
277 __i915_refct_sgt_init(&i915_tt->cached_rsgt, bo->base.size,
280 i915_tt->dev = obj->base.dev->dev;
282 return &i915_tt->ttm;
289 static int i915_ttm_tt_populate(struct ttm_device *bdev,
291 struct ttm_operation_ctx *ctx)
293 struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
295 if (i915_tt->is_shmem)
296 return i915_ttm_tt_shmem_populate(bdev, ttm, ctx);
298 return ttm_pool_alloc(&bdev->pool, ttm, ctx);
301 static void i915_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
303 struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
304 struct sg_table *st = &i915_tt->cached_rsgt.table;
307 dma_unmap_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0);
309 if (i915_tt->is_shmem) {
310 i915_ttm_tt_shmem_unpopulate(ttm);
313 ttm_pool_free(&bdev->pool, ttm);
317 static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
319 struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
325 i915_refct_sgt_put(&i915_tt->cached_rsgt);
328 static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
329 const struct ttm_place *place)
331 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
337 * EXTERNAL objects should never be swapped out by TTM, instead we need
338 * to handle that ourselves. TTM will already skip such objects for us,
339 * but we would like to avoid grabbing locks for no good reason.
341 if (bo->ttm && bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
344 /* Will do for now. Our pinned objects are still on TTM's LRU lists */
345 return i915_gem_object_evictable(obj);
348 static void i915_ttm_evict_flags(struct ttm_buffer_object *bo,
349 struct ttm_placement *placement)
351 *placement = i915_sys_placement;
355 * i915_ttm_free_cached_io_rsgt - Free object cached LMEM information
356 * @obj: The GEM object
357 * This function frees any LMEM-related information that is cached on
358 * the object. For example the radix tree for fast page lookup and the
359 * cached refcounted sg-table
361 void i915_ttm_free_cached_io_rsgt(struct drm_i915_gem_object *obj)
363 struct radix_tree_iter iter;
366 if (!obj->ttm.cached_io_rsgt)
370 radix_tree_for_each_slot(slot, &obj->ttm.get_io_page.radix, &iter, 0)
371 radix_tree_delete(&obj->ttm.get_io_page.radix, iter.index);
374 i915_refct_sgt_put(obj->ttm.cached_io_rsgt);
375 obj->ttm.cached_io_rsgt = NULL;
379 * i915_ttm_purge - Clear an object of its memory
382 * This function is called to clear an object of it's memory when it is
383 * marked as not needed anymore.
385 * Return: 0 on success, negative error code on failure.
387 int i915_ttm_purge(struct drm_i915_gem_object *obj)
389 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
390 struct i915_ttm_tt *i915_tt =
391 container_of(bo->ttm, typeof(*i915_tt), ttm);
392 struct ttm_operation_ctx ctx = {
393 .interruptible = true,
394 .no_wait_gpu = false,
396 struct ttm_placement place = {};
399 if (obj->mm.madv == __I915_MADV_PURGED)
402 ret = ttm_bo_validate(bo, &place, &ctx);
406 if (bo->ttm && i915_tt->filp) {
408 * The below fput(which eventually calls shmem_truncate) might
409 * be delayed by worker, so when directly called to purge the
410 * pages(like by the shrinker) we should try to be more
411 * aggressive and release the pages immediately.
413 shmem_truncate_range(file_inode(i915_tt->filp),
415 fput(fetch_and_zero(&i915_tt->filp));
418 obj->write_domain = 0;
419 obj->read_domains = 0;
420 i915_ttm_adjust_gem_after_move(obj);
421 i915_ttm_free_cached_io_rsgt(obj);
422 obj->mm.madv = __I915_MADV_PURGED;
427 static int i915_ttm_shrinker_release_pages(struct drm_i915_gem_object *obj,
429 bool should_writeback)
431 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
432 struct i915_ttm_tt *i915_tt =
433 container_of(bo->ttm, typeof(*i915_tt), ttm);
434 struct ttm_operation_ctx ctx = {
435 .interruptible = true,
436 .no_wait_gpu = no_wait_gpu,
438 struct ttm_placement place = {};
441 if (!bo->ttm || bo->resource->mem_type != TTM_PL_SYSTEM)
444 GEM_BUG_ON(!i915_tt->is_shmem);
449 ret = ttm_bo_wait_ctx(bo, &ctx);
453 switch (obj->mm.madv) {
454 case I915_MADV_DONTNEED:
455 return i915_ttm_purge(obj);
456 case __I915_MADV_PURGED:
460 if (bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED)
463 bo->ttm->page_flags |= TTM_TT_FLAG_SWAPPED;
464 ret = ttm_bo_validate(bo, &place, &ctx);
466 bo->ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
470 if (should_writeback)
471 __shmem_writeback(obj->base.size, i915_tt->filp->f_mapping);
476 static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
478 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
481 __i915_gem_object_pages_fini(obj);
482 i915_ttm_free_cached_io_rsgt(obj);
486 static struct i915_refct_sgt *i915_ttm_tt_get_st(struct ttm_tt *ttm)
488 struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
492 if (i915_tt->cached_rsgt.table.sgl)
493 return i915_refct_sgt_get(&i915_tt->cached_rsgt);
495 st = &i915_tt->cached_rsgt.table;
496 ret = sg_alloc_table_from_pages_segment(st,
497 ttm->pages, ttm->num_pages,
498 0, (unsigned long)ttm->num_pages << PAGE_SHIFT,
499 i915_sg_segment_size(), GFP_KERNEL);
505 ret = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0);
511 return i915_refct_sgt_get(&i915_tt->cached_rsgt);
515 * i915_ttm_resource_get_st - Get a refcounted sg-table pointing to the
517 * @obj: The GEM object used for sg-table caching
518 * @res: The struct ttm_resource for which an sg-table is requested.
520 * This function returns a refcounted sg-table representing the memory
521 * pointed to by @res. If @res is the object's current resource it may also
522 * cache the sg_table on the object or attempt to access an already cached
523 * sg-table. The refcounted sg-table needs to be put when no-longer in use.
525 * Return: A valid pointer to a struct i915_refct_sgt or error pointer on
528 struct i915_refct_sgt *
529 i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
530 struct ttm_resource *res)
532 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
534 if (!i915_ttm_gtt_binds_lmem(res))
535 return i915_ttm_tt_get_st(bo->ttm);
538 * If CPU mapping differs, we need to add the ttm_tt pages to
539 * the resulting st. Might make sense for GGTT.
541 GEM_WARN_ON(!i915_ttm_cpu_maps_iomem(res));
542 if (bo->resource == res) {
543 if (!obj->ttm.cached_io_rsgt) {
544 struct i915_refct_sgt *rsgt;
546 rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region,
551 obj->ttm.cached_io_rsgt = rsgt;
553 return i915_refct_sgt_get(obj->ttm.cached_io_rsgt);
556 return intel_region_ttm_resource_to_rsgt(obj->mm.region, res);
559 static int i915_ttm_truncate(struct drm_i915_gem_object *obj)
561 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
564 WARN_ON_ONCE(obj->mm.madv == I915_MADV_WILLNEED);
566 err = i915_ttm_move_notify(bo);
570 return i915_ttm_purge(obj);
573 static void i915_ttm_swap_notify(struct ttm_buffer_object *bo)
575 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
581 ret = i915_ttm_move_notify(bo);
583 GEM_WARN_ON(obj->ttm.cached_io_rsgt);
584 if (!ret && obj->mm.madv != I915_MADV_WILLNEED)
588 static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
590 if (!i915_ttm_cpu_maps_iomem(mem))
593 mem->bus.caching = ttm_write_combined;
594 mem->bus.is_iomem = true;
599 static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
600 unsigned long page_offset)
602 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
603 struct scatterlist *sg;
608 GEM_WARN_ON(bo->ttm);
610 base = obj->mm.region->iomap.base - obj->mm.region->region.start;
611 sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true);
613 return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs;
617 * All callbacks need to take care not to downcast a struct ttm_buffer_object
618 * without checking its subclass, since it might be a TTM ghost object.
620 static struct ttm_device_funcs i915_ttm_bo_driver = {
621 .ttm_tt_create = i915_ttm_tt_create,
622 .ttm_tt_populate = i915_ttm_tt_populate,
623 .ttm_tt_unpopulate = i915_ttm_tt_unpopulate,
624 .ttm_tt_destroy = i915_ttm_tt_destroy,
625 .eviction_valuable = i915_ttm_eviction_valuable,
626 .evict_flags = i915_ttm_evict_flags,
627 .move = i915_ttm_move,
628 .swap_notify = i915_ttm_swap_notify,
629 .delete_mem_notify = i915_ttm_delete_mem_notify,
630 .io_mem_reserve = i915_ttm_io_mem_reserve,
631 .io_mem_pfn = i915_ttm_io_mem_pfn,
635 * i915_ttm_driver - Return a pointer to the TTM device funcs
637 * Return: Pointer to statically allocated TTM device funcs.
639 struct ttm_device_funcs *i915_ttm_driver(void)
641 return &i915_ttm_bo_driver;
644 static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
645 struct ttm_placement *placement)
647 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
648 struct ttm_operation_ctx ctx = {
649 .interruptible = true,
650 .no_wait_gpu = false,
655 /* First try only the requested placement. No eviction. */
656 real_num_busy = fetch_and_zero(&placement->num_busy_placement);
657 ret = ttm_bo_validate(bo, placement, &ctx);
659 ret = i915_ttm_err_to_gem(ret);
661 * Anything that wants to restart the operation gets to
664 if (ret == -EDEADLK || ret == -EINTR || ret == -ERESTARTSYS ||
669 * If the initial attempt fails, allow all accepted placements,
670 * evicting if necessary.
672 placement->num_busy_placement = real_num_busy;
673 ret = ttm_bo_validate(bo, placement, &ctx);
675 return i915_ttm_err_to_gem(ret);
678 if (bo->ttm && !ttm_tt_is_populated(bo->ttm)) {
679 ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
683 i915_ttm_adjust_domains_after_move(obj);
684 i915_ttm_adjust_gem_after_move(obj);
687 if (!i915_gem_object_has_pages(obj)) {
688 struct i915_refct_sgt *rsgt =
689 i915_ttm_resource_get_st(obj, bo->resource);
692 return PTR_ERR(rsgt);
694 GEM_BUG_ON(obj->mm.rsgt);
696 __i915_gem_object_set_pages(obj, &rsgt->table,
697 i915_sg_dma_sizes(rsgt->table.sgl));
700 i915_ttm_adjust_lru(obj);
704 static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
706 struct ttm_place requested, busy[I915_TTM_MAX_PLACEMENTS];
707 struct ttm_placement placement;
709 GEM_BUG_ON(obj->mm.n_placements > I915_TTM_MAX_PLACEMENTS);
711 /* Move to the requested placement. */
712 i915_ttm_placement_from_obj(obj, &requested, busy, &placement);
714 return __i915_ttm_get_pages(obj, &placement);
718 * DOC: Migration vs eviction
720 * GEM migration may not be the same as TTM migration / eviction. If
721 * the TTM core decides to evict an object it may be evicted to a
722 * TTM memory type that is not in the object's allowable GEM regions, or
723 * in fact theoretically to a TTM memory type that doesn't correspond to
724 * a GEM memory region. In that case the object's GEM region is not
725 * updated, and the data is migrated back to the GEM region at
726 * get_pages time. TTM may however set up CPU ptes to the object even
727 * when it is evicted.
728 * Gem forced migration using the i915_ttm_migrate() op, is allowed even
729 * to regions that are not in the object's list of allowable placements.
731 static int i915_ttm_migrate(struct drm_i915_gem_object *obj,
732 struct intel_memory_region *mr)
734 struct ttm_place requested;
735 struct ttm_placement placement;
738 i915_ttm_place_from_region(mr, &requested, obj->flags);
739 placement.num_placement = 1;
740 placement.num_busy_placement = 1;
741 placement.placement = &requested;
742 placement.busy_placement = &requested;
744 ret = __i915_ttm_get_pages(obj, &placement);
749 * Reinitialize the region bindings. This is primarily
750 * required for objects where the new region is not in
751 * its allowable placements.
753 if (obj->mm.region != mr) {
754 i915_gem_object_release_memory_region(obj);
755 i915_gem_object_init_memory_region(obj, mr);
761 static void i915_ttm_put_pages(struct drm_i915_gem_object *obj,
765 * We're currently not called from a shrinker, so put_pages()
766 * typically means the object is about to destroyed, or called
767 * from move_notify(). So just avoid doing much for now.
768 * If the object is not destroyed next, The TTM eviction logic
769 * and shrinkers will move it out if needed.
773 i915_refct_sgt_put(fetch_and_zero(&obj->mm.rsgt));
777 * i915_ttm_adjust_lru - Adjust an object's position on relevant LRU lists.
780 void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
782 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
783 struct i915_ttm_tt *i915_tt =
784 container_of(bo->ttm, typeof(*i915_tt), ttm);
786 bo->ttm && i915_tt->filp && ttm_tt_is_populated(bo->ttm);
789 * Don't manipulate the TTM LRUs while in TTM bo destruction.
790 * We're called through i915_ttm_delete_mem_notify().
792 if (!kref_read(&bo->kref))
796 * We skip managing the shrinker LRU in set_pages() and just manage
797 * everything here. This does at least solve the issue with having
798 * temporary shmem mappings(like with evicted lmem) not being visible to
799 * the shrinker. Only our shmem objects are shrinkable, everything else
800 * we keep as unshrinkable.
802 * To make sure everything plays nice we keep an extra shrink pin in TTM
803 * if the underlying pages are not currently shrinkable. Once we release
804 * our pin, like when the pages are moved to shmem, the pages will then
805 * be added to the shrinker LRU, assuming the caller isn't also holding
808 * TODO: consider maybe also bumping the shrinker list here when we have
809 * already unpinned it, which should give us something more like an LRU.
811 * TODO: There is a small window of opportunity for this function to
812 * get called from eviction after we've dropped the last GEM refcount,
813 * but before the TTM deleted flag is set on the object. Avoid
814 * adjusting the shrinker list in such cases, since the object is
815 * not available to the shrinker anyway due to its zero refcount.
816 * To fix this properly we should move to a TTM shrinker LRU list for
819 if (kref_get_unless_zero(&obj->base.refcount)) {
820 if (shrinkable != obj->mm.ttm_shrinkable) {
822 if (obj->mm.madv == I915_MADV_WILLNEED)
823 __i915_gem_object_make_shrinkable(obj);
825 __i915_gem_object_make_purgeable(obj);
827 i915_gem_object_make_unshrinkable(obj);
830 obj->mm.ttm_shrinkable = shrinkable;
832 i915_gem_object_put(obj);
836 * Put on the correct LRU list depending on the MADV status
838 spin_lock(&bo->bdev->lru_lock);
840 /* Try to keep shmem_tt from being considered for shrinking. */
841 bo->priority = TTM_MAX_BO_PRIORITY - 1;
842 } else if (obj->mm.madv != I915_MADV_WILLNEED) {
843 bo->priority = I915_TTM_PRIO_PURGE;
844 } else if (!i915_gem_object_has_pages(obj)) {
845 if (bo->priority < I915_TTM_PRIO_HAS_PAGES)
846 bo->priority = I915_TTM_PRIO_HAS_PAGES;
848 if (bo->priority > I915_TTM_PRIO_NO_PAGES)
849 bo->priority = I915_TTM_PRIO_NO_PAGES;
852 ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
853 spin_unlock(&bo->bdev->lru_lock);
857 * TTM-backed gem object destruction requires some clarification.
858 * Basically we have two possibilities here. We can either rely on the
859 * i915 delayed destruction and put the TTM object when the object
860 * is idle. This would be detected by TTM which would bypass the
861 * TTM delayed destroy handling. The other approach is to put the TTM
862 * object early and rely on the TTM destroyed handling, and then free
863 * the leftover parts of the GEM object once TTM's destroyed list handling is
864 * complete. For now, we rely on the latter for two reasons:
865 * a) TTM can evict an object even when it's on the delayed destroy list,
866 * which in theory allows for complete eviction.
867 * b) There is work going on in TTM to allow freeing an object even when
868 * it's not idle, and using the TTM destroyed list handling could help us
871 static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
873 GEM_BUG_ON(!obj->ttm.created);
875 ttm_bo_put(i915_gem_to_ttm(obj));
878 static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
880 struct vm_area_struct *area = vmf->vma;
881 struct ttm_buffer_object *bo = area->vm_private_data;
882 struct drm_device *dev = bo->base.dev;
883 struct drm_i915_gem_object *obj;
887 obj = i915_ttm_to_gem(bo);
889 return VM_FAULT_SIGBUS;
891 /* Sanity check that we allow writing into this object */
892 if (unlikely(i915_gem_object_is_readonly(obj) &&
893 area->vm_flags & VM_WRITE))
894 return VM_FAULT_SIGBUS;
896 ret = ttm_bo_vm_reserve(bo, vmf);
900 if (obj->mm.madv != I915_MADV_WILLNEED) {
901 dma_resv_unlock(bo->base.resv);
902 return VM_FAULT_SIGBUS;
905 if (drm_dev_enter(dev, &idx)) {
906 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
907 TTM_BO_VM_NUM_PREFAULT);
910 ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
912 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
915 i915_ttm_adjust_lru(obj);
917 dma_resv_unlock(bo->base.resv);
922 vm_access_ttm(struct vm_area_struct *area, unsigned long addr,
923 void *buf, int len, int write)
925 struct drm_i915_gem_object *obj =
926 i915_ttm_to_gem(area->vm_private_data);
928 if (i915_gem_object_is_readonly(obj) && write)
931 return ttm_bo_vm_access(area, addr, buf, len, write);
934 static void ttm_vm_open(struct vm_area_struct *vma)
936 struct drm_i915_gem_object *obj =
937 i915_ttm_to_gem(vma->vm_private_data);
940 i915_gem_object_get(obj);
943 static void ttm_vm_close(struct vm_area_struct *vma)
945 struct drm_i915_gem_object *obj =
946 i915_ttm_to_gem(vma->vm_private_data);
949 i915_gem_object_put(obj);
952 static const struct vm_operations_struct vm_ops_ttm = {
953 .fault = vm_fault_ttm,
954 .access = vm_access_ttm,
956 .close = ttm_vm_close,
959 static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
961 /* The ttm_bo must be allocated with I915_BO_ALLOC_USER */
962 GEM_BUG_ON(!drm_mm_node_allocated(&obj->base.vma_node.vm_node));
964 return drm_vma_node_offset_addr(&obj->base.vma_node);
967 static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj)
969 ttm_bo_unmap_virtual(i915_gem_to_ttm(obj));
972 static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
973 .name = "i915_gem_object_ttm",
974 .flags = I915_GEM_OBJECT_IS_SHRINKABLE |
975 I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST,
977 .get_pages = i915_ttm_get_pages,
978 .put_pages = i915_ttm_put_pages,
979 .truncate = i915_ttm_truncate,
980 .shrinker_release_pages = i915_ttm_shrinker_release_pages,
982 .adjust_lru = i915_ttm_adjust_lru,
983 .delayed_free = i915_ttm_delayed_free,
984 .migrate = i915_ttm_migrate,
986 .mmap_offset = i915_ttm_mmap_offset,
987 .unmap_virtual = i915_ttm_unmap_virtual,
988 .mmap_ops = &vm_ops_ttm,
991 void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
993 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
995 i915_gem_object_release_memory_region(obj);
996 mutex_destroy(&obj->ttm.get_io_page.lock);
998 if (obj->ttm.created) {
1000 * We freely manage the shrinker LRU outide of the mm.pages life
1001 * cycle. As a result when destroying the object we should be
1002 * extra paranoid and ensure we remove it from the LRU, before
1003 * we free the object.
1005 * Touching the ttm_shrinkable outside of the object lock here
1006 * should be safe now that the last GEM object ref was dropped.
1008 if (obj->mm.ttm_shrinkable)
1009 i915_gem_object_make_unshrinkable(obj);
1011 i915_ttm_backup_free(obj);
1013 /* This releases all gem object bindings to the backend. */
1014 __i915_gem_free_object(obj);
1016 call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
1018 __i915_gem_object_fini(obj);
1023 * __i915_gem_ttm_object_init - Initialize a ttm-backed i915 gem object
1024 * @mem: The initial memory region for the object.
1025 * @obj: The gem object.
1026 * @size: Object size in bytes.
1027 * @flags: gem object flags.
1029 * Return: 0 on success, negative error code on failure.
1031 int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
1032 struct drm_i915_gem_object *obj,
1033 resource_size_t size,
1034 resource_size_t page_size,
1037 static struct lock_class_key lock_class;
1038 struct drm_i915_private *i915 = mem->i915;
1039 struct ttm_operation_ctx ctx = {
1040 .interruptible = true,
1041 .no_wait_gpu = false,
1043 enum ttm_bo_type bo_type;
1046 drm_gem_private_object_init(&i915->drm, &obj->base, size);
1047 i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
1049 /* Don't put on a region list until we're either locked or fully initialized. */
1050 obj->mm.region = mem;
1051 INIT_LIST_HEAD(&obj->mm.region_link);
1053 INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN);
1054 mutex_init(&obj->ttm.get_io_page.lock);
1055 bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device :
1058 obj->base.vma_node.driver_private = i915_gem_to_ttm(obj);
1060 /* Forcing the page size is kernel internal only */
1061 GEM_BUG_ON(page_size && obj->mm.n_placements);
1064 * Keep an extra shrink pin to prevent the object from being made
1065 * shrinkable too early. If the ttm_tt is ever allocated in shmem, we
1066 * drop the pin. The TTM backend manages the shrinker LRU itself,
1067 * outside of the normal mm.pages life cycle.
1069 i915_gem_object_make_unshrinkable(obj);
1072 * If this function fails, it will call the destructor, but
1073 * our caller still owns the object. So no freeing in the
1074 * destructor until obj->ttm.created is true.
1075 * Similarly, in delayed_destroy, we can't call ttm_bo_put()
1076 * until successful initialization.
1078 ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), size,
1079 bo_type, &i915_sys_placement,
1080 page_size >> PAGE_SHIFT,
1081 &ctx, NULL, NULL, i915_ttm_bo_destroy);
1083 return i915_ttm_err_to_gem(ret);
1085 obj->ttm.created = true;
1086 i915_gem_object_release_memory_region(obj);
1087 i915_gem_object_init_memory_region(obj, mem);
1088 i915_ttm_adjust_domains_after_move(obj);
1089 i915_ttm_adjust_gem_after_move(obj);
1090 i915_gem_object_unlock(obj);
1095 static const struct intel_memory_region_ops ttm_system_region_ops = {
1096 .init_object = __i915_gem_ttm_object_init,
1097 .release = intel_region_ttm_fini,
1100 struct intel_memory_region *
1101 i915_gem_ttm_system_setup(struct drm_i915_private *i915,
1102 u16 type, u16 instance)
1104 struct intel_memory_region *mr;
1106 mr = intel_memory_region_create(i915, 0,
1107 totalram_pages() << PAGE_SHIFT,
1110 &ttm_system_region_ops);
1114 intel_memory_region_set_name(mr, "system-ttm");