1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021 Intel Corporation
6 #include <drm/ttm/ttm_bo_driver.h>
7 #include <drm/ttm/ttm_placement.h>
10 #include "intel_memory_region.h"
11 #include "intel_region_ttm.h"
13 #include "gem/i915_gem_object.h"
14 #include "gem/i915_gem_region.h"
15 #include "gem/i915_gem_ttm.h"
16 #include "gem/i915_gem_mman.h"
18 #include "gt/intel_migrate.h"
19 #include "gt/intel_engine_pm.h"
21 #define I915_PL_LMEM0 TTM_PL_PRIV
22 #define I915_PL_SYSTEM TTM_PL_SYSTEM
23 #define I915_PL_STOLEN TTM_PL_VRAM
24 #define I915_PL_GGTT TTM_PL_TT
26 #define I915_TTM_PRIO_PURGE 0
27 #define I915_TTM_PRIO_NO_PAGES 1
28 #define I915_TTM_PRIO_HAS_PAGES 2
31 * Size of struct ttm_place vector in on-stack struct ttm_placement allocs
33 #define I915_TTM_MAX_PLACEMENTS INTEL_REGION_UNKNOWN
36 * struct i915_ttm_tt - TTM page vector with additional private information
37 * @ttm: The base TTM page vector.
38 * @dev: The struct device used for dma mapping and unmapping.
39 * @cached_st: The cached scatter-gather table.
41 * Note that DMA may be going on right up to the point where the page-
42 * vector is unpopulated in delayed destroy. Hence keep the
43 * scatter-gather table mapped and cached up to that point. This is
44 * different from the cached gem object io scatter-gather table which
45 * doesn't have an associated dma mapping.
50 struct sg_table *cached_st;
53 static const struct ttm_place sys_placement_flags = {
56 .mem_type = I915_PL_SYSTEM,
60 static struct ttm_placement i915_sys_placement = {
62 .placement = &sys_placement_flags,
63 .num_busy_placement = 1,
64 .busy_placement = &sys_placement_flags,
67 static int i915_ttm_err_to_gem(int err)
76 * TTM likes to convert -EDEADLK to -EBUSY, and wants us to
77 * restart the operation, since we don't record the contending
78 * lock. We use -EAGAIN to restart.
83 * Memory type / region is full, and we can't evict.
84 * Except possibly system, that returns -ENOMEM;
94 static bool gpu_binds_iomem(struct ttm_resource *mem)
96 return mem->mem_type != TTM_PL_SYSTEM;
99 static bool cpu_maps_iomem(struct ttm_resource *mem)
101 /* Once / if we support GGTT, this is also false for cached ttm_tts */
102 return mem->mem_type != TTM_PL_SYSTEM;
105 static enum i915_cache_level
106 i915_ttm_cache_level(struct drm_i915_private *i915, struct ttm_resource *res,
109 return ((HAS_LLC(i915) || HAS_SNOOP(i915)) && !gpu_binds_iomem(res) &&
110 ttm->caching == ttm_cached) ? I915_CACHE_LLC :
114 static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj);
116 static enum ttm_caching
117 i915_ttm_select_tt_caching(const struct drm_i915_gem_object *obj)
120 * Objects only allowed in system get cached cpu-mappings.
121 * Other objects get WC mapping for now. Even if in system.
123 if (obj->mm.region->type == INTEL_MEMORY_SYSTEM &&
124 obj->mm.n_placements <= 1)
127 return ttm_write_combined;
131 i915_ttm_place_from_region(const struct intel_memory_region *mr,
132 struct ttm_place *place,
135 memset(place, 0, sizeof(*place));
136 place->mem_type = intel_region_to_ttm_type(mr);
138 if (flags & I915_BO_ALLOC_CONTIGUOUS)
139 place->flags = TTM_PL_FLAG_CONTIGUOUS;
143 i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj,
144 struct ttm_place *requested,
145 struct ttm_place *busy,
146 struct ttm_placement *placement)
148 unsigned int num_allowed = obj->mm.n_placements;
149 unsigned int flags = obj->flags;
152 placement->num_placement = 1;
153 i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] :
154 obj->mm.region, requested, flags);
156 /* Cache this on object? */
157 placement->num_busy_placement = num_allowed;
158 for (i = 0; i < placement->num_busy_placement; ++i)
159 i915_ttm_place_from_region(obj->mm.placements[i], busy + i, flags);
161 if (num_allowed == 0) {
163 placement->num_busy_placement = 1;
166 placement->placement = requested;
167 placement->busy_placement = busy;
170 static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
173 struct ttm_resource_manager *man =
174 ttm_manager_type(bo->bdev, bo->resource->mem_type);
175 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
176 struct i915_ttm_tt *i915_tt;
179 i915_tt = kzalloc(sizeof(*i915_tt), GFP_KERNEL);
183 if (obj->flags & I915_BO_ALLOC_CPU_CLEAR &&
185 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
187 ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags,
188 i915_ttm_select_tt_caching(obj));
194 i915_tt->dev = obj->base.dev->dev;
196 return &i915_tt->ttm;
199 static void i915_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
201 struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
203 if (i915_tt->cached_st) {
204 dma_unmap_sgtable(i915_tt->dev, i915_tt->cached_st,
205 DMA_BIDIRECTIONAL, 0);
206 sg_free_table(i915_tt->cached_st);
207 kfree(i915_tt->cached_st);
208 i915_tt->cached_st = NULL;
210 ttm_pool_free(&bdev->pool, ttm);
213 static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
215 struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
217 ttm_tt_destroy_common(bdev, ttm);
222 static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
223 const struct ttm_place *place)
225 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
227 /* Will do for now. Our pinned objects are still on TTM's LRU lists */
228 return i915_gem_object_evictable(obj);
231 static void i915_ttm_evict_flags(struct ttm_buffer_object *bo,
232 struct ttm_placement *placement)
234 *placement = i915_sys_placement;
237 static int i915_ttm_move_notify(struct ttm_buffer_object *bo)
239 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
242 ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
246 ret = __i915_gem_object_put_pages(obj);
253 static void i915_ttm_free_cached_io_st(struct drm_i915_gem_object *obj)
255 struct radix_tree_iter iter;
258 if (!obj->ttm.cached_io_st)
262 radix_tree_for_each_slot(slot, &obj->ttm.get_io_page.radix, &iter, 0)
263 radix_tree_delete(&obj->ttm.get_io_page.radix, iter.index);
266 sg_free_table(obj->ttm.cached_io_st);
267 kfree(obj->ttm.cached_io_st);
268 obj->ttm.cached_io_st = NULL;
272 i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object *obj)
274 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
276 if (cpu_maps_iomem(bo->resource) || bo->ttm->caching != ttm_cached) {
277 obj->write_domain = I915_GEM_DOMAIN_WC;
278 obj->read_domains = I915_GEM_DOMAIN_WC;
280 obj->write_domain = I915_GEM_DOMAIN_CPU;
281 obj->read_domains = I915_GEM_DOMAIN_CPU;
285 static void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
287 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
288 unsigned int cache_level;
292 * If object was moved to an allowable region, update the object
293 * region to consider it migrated. Note that if it's currently not
294 * in an allowable region, it's evicted and we don't update the
297 if (intel_region_to_ttm_type(obj->mm.region) != bo->resource->mem_type) {
298 for (i = 0; i < obj->mm.n_placements; ++i) {
299 struct intel_memory_region *mr = obj->mm.placements[i];
301 if (intel_region_to_ttm_type(mr) == bo->resource->mem_type &&
302 mr != obj->mm.region) {
303 i915_gem_object_release_memory_region(obj);
304 i915_gem_object_init_memory_region(obj, mr);
310 obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM);
312 obj->mem_flags |= cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM :
313 I915_BO_FLAG_STRUCT_PAGE;
315 cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource,
317 i915_gem_object_set_cache_coherency(obj, cache_level);
320 static void i915_ttm_purge(struct drm_i915_gem_object *obj)
322 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
323 struct ttm_operation_ctx ctx = {
324 .interruptible = true,
325 .no_wait_gpu = false,
327 struct ttm_placement place = {};
330 if (obj->mm.madv == __I915_MADV_PURGED)
333 /* TTM's purge interface. Note that we might be reentering. */
334 ret = ttm_bo_validate(bo, &place, &ctx);
336 obj->write_domain = 0;
337 obj->read_domains = 0;
338 i915_ttm_adjust_gem_after_move(obj);
339 i915_ttm_free_cached_io_st(obj);
340 obj->mm.madv = __I915_MADV_PURGED;
344 static void i915_ttm_swap_notify(struct ttm_buffer_object *bo)
346 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
347 int ret = i915_ttm_move_notify(bo);
350 GEM_WARN_ON(obj->ttm.cached_io_st);
351 if (!ret && obj->mm.madv != I915_MADV_WILLNEED)
355 static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
357 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
360 i915_ttm_free_cached_io_st(obj);
363 static struct intel_memory_region *
364 i915_ttm_region(struct ttm_device *bdev, int ttm_mem_type)
366 struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev);
368 /* There's some room for optimization here... */
369 GEM_BUG_ON(ttm_mem_type != I915_PL_SYSTEM &&
370 ttm_mem_type < I915_PL_LMEM0);
371 if (ttm_mem_type == I915_PL_SYSTEM)
372 return intel_memory_region_lookup(i915, INTEL_MEMORY_SYSTEM,
375 return intel_memory_region_lookup(i915, INTEL_MEMORY_LOCAL,
376 ttm_mem_type - I915_PL_LMEM0);
379 static struct sg_table *i915_ttm_tt_get_st(struct ttm_tt *ttm)
381 struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
385 if (i915_tt->cached_st)
386 return i915_tt->cached_st;
388 st = kzalloc(sizeof(*st), GFP_KERNEL);
390 return ERR_PTR(-ENOMEM);
392 ret = sg_alloc_table_from_pages_segment(st,
393 ttm->pages, ttm->num_pages,
394 0, (unsigned long)ttm->num_pages << PAGE_SHIFT,
395 i915_sg_segment_size(), GFP_KERNEL);
401 ret = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0);
408 i915_tt->cached_st = st;
412 static struct sg_table *
413 i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
414 struct ttm_resource *res)
416 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
418 if (!gpu_binds_iomem(res))
419 return i915_ttm_tt_get_st(bo->ttm);
422 * If CPU mapping differs, we need to add the ttm_tt pages to
423 * the resulting st. Might make sense for GGTT.
425 GEM_WARN_ON(!cpu_maps_iomem(res));
426 return intel_region_ttm_resource_to_st(obj->mm.region, res);
429 static int i915_ttm_accel_move(struct ttm_buffer_object *bo,
431 struct ttm_resource *dst_mem,
432 struct ttm_tt *dst_ttm,
433 struct sg_table *dst_st)
435 struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
437 struct ttm_resource_manager *src_man =
438 ttm_manager_type(bo->bdev, bo->resource->mem_type);
439 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
440 struct sg_table *src_st;
441 struct i915_request *rq;
442 struct ttm_tt *src_ttm = bo->ttm;
443 enum i915_cache_level src_level, dst_level;
446 if (!i915->gt.migrate.context)
449 dst_level = i915_ttm_cache_level(i915, dst_mem, dst_ttm);
451 if (bo->type == ttm_bo_type_kernel)
454 intel_engine_pm_get(i915->gt.migrate.context->engine);
455 ret = intel_context_migrate_clear(i915->gt.migrate.context, NULL,
456 dst_st->sgl, dst_level,
457 gpu_binds_iomem(dst_mem),
461 i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
462 i915_request_put(rq);
464 intel_engine_pm_put(i915->gt.migrate.context->engine);
466 src_st = src_man->use_tt ? i915_ttm_tt_get_st(src_ttm) :
467 obj->ttm.cached_io_st;
469 src_level = i915_ttm_cache_level(i915, bo->resource, src_ttm);
470 intel_engine_pm_get(i915->gt.migrate.context->engine);
471 ret = intel_context_migrate_copy(i915->gt.migrate.context,
472 NULL, src_st->sgl, src_level,
473 gpu_binds_iomem(bo->resource),
474 dst_st->sgl, dst_level,
475 gpu_binds_iomem(dst_mem),
478 i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
479 i915_request_put(rq);
481 intel_engine_pm_put(i915->gt.migrate.context->engine);
487 static void __i915_ttm_move(struct ttm_buffer_object *bo, bool clear,
488 struct ttm_resource *dst_mem,
489 struct ttm_tt *dst_ttm,
490 struct sg_table *dst_st,
496 ret = i915_ttm_accel_move(bo, clear, dst_mem, dst_ttm, dst_st);
498 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
499 struct intel_memory_region *dst_reg, *src_reg;
501 struct ttm_kmap_iter_tt tt;
502 struct ttm_kmap_iter_iomap io;
503 } _dst_iter, _src_iter;
504 struct ttm_kmap_iter *dst_iter, *src_iter;
506 dst_reg = i915_ttm_region(bo->bdev, dst_mem->mem_type);
507 src_reg = i915_ttm_region(bo->bdev, bo->resource->mem_type);
508 GEM_BUG_ON(!dst_reg || !src_reg);
510 dst_iter = !cpu_maps_iomem(dst_mem) ?
511 ttm_kmap_iter_tt_init(&_dst_iter.tt, dst_ttm) :
512 ttm_kmap_iter_iomap_init(&_dst_iter.io, &dst_reg->iomap,
513 dst_st, dst_reg->region.start);
515 src_iter = !cpu_maps_iomem(bo->resource) ?
516 ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm) :
517 ttm_kmap_iter_iomap_init(&_src_iter.io, &src_reg->iomap,
518 obj->ttm.cached_io_st,
519 src_reg->region.start);
521 ttm_move_memcpy(clear, dst_mem->num_pages, dst_iter, src_iter);
525 static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
526 struct ttm_operation_ctx *ctx,
527 struct ttm_resource *dst_mem,
528 struct ttm_place *hop)
530 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
531 struct ttm_resource_manager *dst_man =
532 ttm_manager_type(bo->bdev, dst_mem->mem_type);
533 struct ttm_tt *ttm = bo->ttm;
534 struct sg_table *dst_st;
538 /* Sync for now. We could do the actual copy async. */
539 ret = ttm_bo_wait_ctx(bo, ctx);
543 ret = i915_ttm_move_notify(bo);
547 if (obj->mm.madv != I915_MADV_WILLNEED) {
549 ttm_resource_free(bo, &dst_mem);
553 /* Populate ttm with pages if needed. Typically system memory. */
554 if (ttm && (dst_man->use_tt || (ttm->page_flags & TTM_PAGE_FLAG_SWAPPED))) {
555 ret = ttm_tt_populate(bo->bdev, ttm, ctx);
560 dst_st = i915_ttm_resource_get_st(obj, dst_mem);
562 return PTR_ERR(dst_st);
564 clear = !cpu_maps_iomem(bo->resource) && (!ttm || !ttm_tt_is_populated(ttm));
565 if (!(clear && ttm && !(ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)))
566 __i915_ttm_move(bo, clear, dst_mem, bo->ttm, dst_st, true);
568 ttm_bo_move_sync_cleanup(bo, dst_mem);
569 i915_ttm_adjust_domains_after_move(obj);
570 i915_ttm_free_cached_io_st(obj);
572 if (gpu_binds_iomem(dst_mem) || cpu_maps_iomem(dst_mem)) {
573 obj->ttm.cached_io_st = dst_st;
574 obj->ttm.get_io_page.sg_pos = dst_st->sgl;
575 obj->ttm.get_io_page.sg_idx = 0;
578 i915_ttm_adjust_gem_after_move(obj);
582 static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
584 if (!cpu_maps_iomem(mem))
587 mem->bus.caching = ttm_write_combined;
588 mem->bus.is_iomem = true;
593 static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
594 unsigned long page_offset)
596 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
597 unsigned long base = obj->mm.region->iomap.base - obj->mm.region->region.start;
598 struct scatterlist *sg;
601 GEM_WARN_ON(bo->ttm);
603 sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true);
605 return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs;
608 static struct ttm_device_funcs i915_ttm_bo_driver = {
609 .ttm_tt_create = i915_ttm_tt_create,
610 .ttm_tt_unpopulate = i915_ttm_tt_unpopulate,
611 .ttm_tt_destroy = i915_ttm_tt_destroy,
612 .eviction_valuable = i915_ttm_eviction_valuable,
613 .evict_flags = i915_ttm_evict_flags,
614 .move = i915_ttm_move,
615 .swap_notify = i915_ttm_swap_notify,
616 .delete_mem_notify = i915_ttm_delete_mem_notify,
617 .io_mem_reserve = i915_ttm_io_mem_reserve,
618 .io_mem_pfn = i915_ttm_io_mem_pfn,
622 * i915_ttm_driver - Return a pointer to the TTM device funcs
624 * Return: Pointer to statically allocated TTM device funcs.
626 struct ttm_device_funcs *i915_ttm_driver(void)
628 return &i915_ttm_bo_driver;
631 static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
632 struct ttm_placement *placement)
634 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
635 struct ttm_operation_ctx ctx = {
636 .interruptible = true,
637 .no_wait_gpu = false,
643 /* First try only the requested placement. No eviction. */
644 real_num_busy = fetch_and_zero(&placement->num_busy_placement);
645 ret = ttm_bo_validate(bo, placement, &ctx);
647 ret = i915_ttm_err_to_gem(ret);
649 * Anything that wants to restart the operation gets to
652 if (ret == -EDEADLK || ret == -EINTR || ret == -ERESTARTSYS ||
657 * If the initial attempt fails, allow all accepted placements,
658 * evicting if necessary.
660 placement->num_busy_placement = real_num_busy;
661 ret = ttm_bo_validate(bo, placement, &ctx);
663 return i915_ttm_err_to_gem(ret);
666 i915_ttm_adjust_lru(obj);
667 if (bo->ttm && !ttm_tt_is_populated(bo->ttm)) {
668 ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
672 i915_ttm_adjust_domains_after_move(obj);
673 i915_ttm_adjust_gem_after_move(obj);
676 if (!i915_gem_object_has_pages(obj)) {
677 /* Object either has a page vector or is an iomem object */
678 st = bo->ttm ? i915_ttm_tt_get_st(bo->ttm) : obj->ttm.cached_io_st;
682 __i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl));
688 static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
690 struct ttm_place requested, busy[I915_TTM_MAX_PLACEMENTS];
691 struct ttm_placement placement;
693 GEM_BUG_ON(obj->mm.n_placements > I915_TTM_MAX_PLACEMENTS);
695 /* Move to the requested placement. */
696 i915_ttm_placement_from_obj(obj, &requested, busy, &placement);
698 return __i915_ttm_get_pages(obj, &placement);
702 * DOC: Migration vs eviction
704 * GEM migration may not be the same as TTM migration / eviction. If
705 * the TTM core decides to evict an object it may be evicted to a
706 * TTM memory type that is not in the object's allowable GEM regions, or
707 * in fact theoretically to a TTM memory type that doesn't correspond to
708 * a GEM memory region. In that case the object's GEM region is not
709 * updated, and the data is migrated back to the GEM region at
710 * get_pages time. TTM may however set up CPU ptes to the object even
711 * when it is evicted.
712 * Gem forced migration using the i915_ttm_migrate() op, is allowed even
713 * to regions that are not in the object's list of allowable placements.
715 static int i915_ttm_migrate(struct drm_i915_gem_object *obj,
716 struct intel_memory_region *mr)
718 struct ttm_place requested;
719 struct ttm_placement placement;
722 i915_ttm_place_from_region(mr, &requested, obj->flags);
723 placement.num_placement = 1;
724 placement.num_busy_placement = 1;
725 placement.placement = &requested;
726 placement.busy_placement = &requested;
728 ret = __i915_ttm_get_pages(obj, &placement);
733 * Reinitialize the region bindings. This is primarily
734 * required for objects where the new region is not in
735 * its allowable placements.
737 if (obj->mm.region != mr) {
738 i915_gem_object_release_memory_region(obj);
739 i915_gem_object_init_memory_region(obj, mr);
745 static void i915_ttm_put_pages(struct drm_i915_gem_object *obj,
749 * We're currently not called from a shrinker, so put_pages()
750 * typically means the object is about to destroyed, or called
751 * from move_notify(). So just avoid doing much for now.
752 * If the object is not destroyed next, The TTM eviction logic
753 * and shrinkers will move it out if needed.
756 i915_ttm_adjust_lru(obj);
759 static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
761 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
764 * Don't manipulate the TTM LRUs while in TTM bo destruction.
765 * We're called through i915_ttm_delete_mem_notify().
767 if (!kref_read(&bo->kref))
771 * Put on the correct LRU list depending on the MADV status
773 spin_lock(&bo->bdev->lru_lock);
774 if (obj->mm.madv != I915_MADV_WILLNEED) {
775 bo->priority = I915_TTM_PRIO_PURGE;
776 } else if (!i915_gem_object_has_pages(obj)) {
777 if (bo->priority < I915_TTM_PRIO_HAS_PAGES)
778 bo->priority = I915_TTM_PRIO_HAS_PAGES;
780 if (bo->priority > I915_TTM_PRIO_NO_PAGES)
781 bo->priority = I915_TTM_PRIO_NO_PAGES;
784 ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
785 spin_unlock(&bo->bdev->lru_lock);
789 * TTM-backed gem object destruction requires some clarification.
790 * Basically we have two possibilities here. We can either rely on the
791 * i915 delayed destruction and put the TTM object when the object
792 * is idle. This would be detected by TTM which would bypass the
793 * TTM delayed destroy handling. The other approach is to put the TTM
794 * object early and rely on the TTM destroyed handling, and then free
795 * the leftover parts of the GEM object once TTM's destroyed list handling is
796 * complete. For now, we rely on the latter for two reasons:
797 * a) TTM can evict an object even when it's on the delayed destroy list,
798 * which in theory allows for complete eviction.
799 * b) There is work going on in TTM to allow freeing an object even when
800 * it's not idle, and using the TTM destroyed list handling could help us
803 static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
805 if (obj->ttm.created) {
806 ttm_bo_put(i915_gem_to_ttm(obj));
808 __i915_gem_free_object(obj);
809 call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
813 static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
815 struct vm_area_struct *area = vmf->vma;
816 struct drm_i915_gem_object *obj =
817 i915_ttm_to_gem(area->vm_private_data);
819 /* Sanity check that we allow writing into this object */
820 if (unlikely(i915_gem_object_is_readonly(obj) &&
821 area->vm_flags & VM_WRITE))
822 return VM_FAULT_SIGBUS;
824 return ttm_bo_vm_fault(vmf);
828 vm_access_ttm(struct vm_area_struct *area, unsigned long addr,
829 void *buf, int len, int write)
831 struct drm_i915_gem_object *obj =
832 i915_ttm_to_gem(area->vm_private_data);
834 if (i915_gem_object_is_readonly(obj) && write)
837 return ttm_bo_vm_access(area, addr, buf, len, write);
840 static void ttm_vm_open(struct vm_area_struct *vma)
842 struct drm_i915_gem_object *obj =
843 i915_ttm_to_gem(vma->vm_private_data);
846 i915_gem_object_get(obj);
849 static void ttm_vm_close(struct vm_area_struct *vma)
851 struct drm_i915_gem_object *obj =
852 i915_ttm_to_gem(vma->vm_private_data);
855 i915_gem_object_put(obj);
858 static const struct vm_operations_struct vm_ops_ttm = {
859 .fault = vm_fault_ttm,
860 .access = vm_access_ttm,
862 .close = ttm_vm_close,
865 static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
867 /* The ttm_bo must be allocated with I915_BO_ALLOC_USER */
868 GEM_BUG_ON(!drm_mm_node_allocated(&obj->base.vma_node.vm_node));
870 return drm_vma_node_offset_addr(&obj->base.vma_node);
873 static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
874 .name = "i915_gem_object_ttm",
876 .get_pages = i915_ttm_get_pages,
877 .put_pages = i915_ttm_put_pages,
878 .truncate = i915_ttm_purge,
879 .adjust_lru = i915_ttm_adjust_lru,
880 .delayed_free = i915_ttm_delayed_free,
881 .migrate = i915_ttm_migrate,
882 .mmap_offset = i915_ttm_mmap_offset,
883 .mmap_ops = &vm_ops_ttm,
886 void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
888 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
890 /* This releases all gem object bindings to the backend. */
891 __i915_gem_free_object(obj);
893 i915_gem_object_release_memory_region(obj);
894 mutex_destroy(&obj->ttm.get_io_page.lock);
896 if (obj->ttm.created)
897 call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
901 * __i915_gem_ttm_object_init - Initialize a ttm-backed i915 gem object
902 * @mem: The initial memory region for the object.
903 * @obj: The gem object.
904 * @size: Object size in bytes.
905 * @flags: gem object flags.
907 * Return: 0 on success, negative error code on failure.
909 int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
910 struct drm_i915_gem_object *obj,
911 resource_size_t size,
912 resource_size_t page_size,
915 static struct lock_class_key lock_class;
916 struct drm_i915_private *i915 = mem->i915;
917 struct ttm_operation_ctx ctx = {
918 .interruptible = true,
919 .no_wait_gpu = false,
921 enum ttm_bo_type bo_type;
924 drm_gem_private_object_init(&i915->drm, &obj->base, size);
925 i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
926 i915_gem_object_init_memory_region(obj, mem);
927 i915_gem_object_make_unshrinkable(obj);
928 INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN);
929 mutex_init(&obj->ttm.get_io_page.lock);
930 bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device :
933 obj->base.vma_node.driver_private = i915_gem_to_ttm(obj);
935 /* Forcing the page size is kernel internal only */
936 GEM_BUG_ON(page_size && obj->mm.n_placements);
939 * If this function fails, it will call the destructor, but
940 * our caller still owns the object. So no freeing in the
941 * destructor until obj->ttm.created is true.
942 * Similarly, in delayed_destroy, we can't call ttm_bo_put()
943 * until successful initialization.
945 ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), size,
946 bo_type, &i915_sys_placement,
947 page_size >> PAGE_SHIFT,
948 &ctx, NULL, NULL, i915_ttm_bo_destroy);
950 return i915_ttm_err_to_gem(ret);
952 obj->ttm.created = true;
953 i915_ttm_adjust_domains_after_move(obj);
954 i915_ttm_adjust_gem_after_move(obj);
955 i915_gem_object_unlock(obj);
960 static const struct intel_memory_region_ops ttm_system_region_ops = {
961 .init_object = __i915_gem_ttm_object_init,
964 struct intel_memory_region *
965 i915_gem_ttm_system_setup(struct drm_i915_private *i915,
966 u16 type, u16 instance)
968 struct intel_memory_region *mr;
970 mr = intel_memory_region_create(i915, 0,
971 totalram_pages() << PAGE_SHIFT,
974 &ttm_system_region_ops);
978 intel_memory_region_set_name(mr, "system-ttm");
983 * i915_gem_obj_copy_ttm - Copy the contents of one ttm-based gem object to
985 * @dst: The destination object
986 * @src: The source object
987 * @allow_accel: Allow using the blitter. Otherwise TTM memcpy is used.
988 * @intr: Whether to perform waits interruptible:
990 * Note: The caller is responsible for assuring that the underlying
991 * TTM objects are populated if needed and locked.
993 * Return: Zero on success. Negative error code on error. If @intr == true,
994 * then it may return -ERESTARTSYS or -EINTR.
996 int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
997 struct drm_i915_gem_object *src,
998 bool allow_accel, bool intr)
1000 struct ttm_buffer_object *dst_bo = i915_gem_to_ttm(dst);
1001 struct ttm_buffer_object *src_bo = i915_gem_to_ttm(src);
1002 struct ttm_operation_ctx ctx = {
1003 .interruptible = intr,
1005 struct sg_table *dst_st;
1008 assert_object_held(dst);
1009 assert_object_held(src);
1012 * Sync for now. This will change with async moves.
1014 ret = ttm_bo_wait_ctx(dst_bo, &ctx);
1016 ret = ttm_bo_wait_ctx(src_bo, &ctx);
1020 dst_st = gpu_binds_iomem(dst_bo->resource) ?
1021 dst->ttm.cached_io_st : i915_ttm_tt_get_st(dst_bo->ttm);
1023 __i915_ttm_move(src_bo, false, dst_bo->resource, dst_bo->ttm,
1024 dst_st, allow_accel);