1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021 Intel Corporation
6 #include <drm/ttm/ttm_bo_driver.h>
7 #include <drm/ttm/ttm_placement.h>
10 #include "intel_memory_region.h"
11 #include "intel_region_ttm.h"
13 #include "gem/i915_gem_object.h"
14 #include "gem/i915_gem_region.h"
15 #include "gem/i915_gem_ttm.h"
16 #include "gem/i915_gem_mman.h"
18 #define I915_PL_LMEM0 TTM_PL_PRIV
19 #define I915_PL_SYSTEM TTM_PL_SYSTEM
20 #define I915_PL_STOLEN TTM_PL_VRAM
21 #define I915_PL_GGTT TTM_PL_TT
23 #define I915_TTM_PRIO_PURGE 0
24 #define I915_TTM_PRIO_NO_PAGES 1
25 #define I915_TTM_PRIO_HAS_PAGES 2
28 * struct i915_ttm_tt - TTM page vector with additional private information
29 * @ttm: The base TTM page vector.
30 * @dev: The struct device used for dma mapping and unmapping.
31 * @cached_st: The cached scatter-gather table.
33 * Note that DMA may be going on right up to the point where the page-
34 * vector is unpopulated in delayed destroy. Hence keep the
35 * scatter-gather table mapped and cached up to that point. This is
36 * different from the cached gem object io scatter-gather table which
37 * doesn't have an associated dma mapping.
42 struct sg_table *cached_st;
45 static const struct ttm_place lmem0_sys_placement_flags[] = {
49 .mem_type = I915_PL_LMEM0,
54 .mem_type = I915_PL_SYSTEM,
59 static struct ttm_placement i915_lmem0_placement = {
61 .placement = &lmem0_sys_placement_flags[0],
62 .num_busy_placement = 1,
63 .busy_placement = &lmem0_sys_placement_flags[0],
66 static struct ttm_placement i915_sys_placement = {
68 .placement = &lmem0_sys_placement_flags[1],
69 .num_busy_placement = 1,
70 .busy_placement = &lmem0_sys_placement_flags[1],
73 static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj);
75 static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
78 struct ttm_resource_manager *man =
79 ttm_manager_type(bo->bdev, bo->resource->mem_type);
80 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
81 struct i915_ttm_tt *i915_tt;
84 i915_tt = kzalloc(sizeof(*i915_tt), GFP_KERNEL);
88 if (obj->flags & I915_BO_ALLOC_CPU_CLEAR &&
90 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
92 ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, ttm_write_combined);
98 i915_tt->dev = obj->base.dev->dev;
100 return &i915_tt->ttm;
103 static void i915_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
105 struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
107 if (i915_tt->cached_st) {
108 dma_unmap_sgtable(i915_tt->dev, i915_tt->cached_st,
109 DMA_BIDIRECTIONAL, 0);
110 sg_free_table(i915_tt->cached_st);
111 kfree(i915_tt->cached_st);
112 i915_tt->cached_st = NULL;
114 ttm_pool_free(&bdev->pool, ttm);
117 static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
119 struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
121 ttm_tt_destroy_common(bdev, ttm);
125 static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
126 const struct ttm_place *place)
128 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
130 /* Will do for now. Our pinned objects are still on TTM's LRU lists */
131 if (!i915_gem_object_evictable(obj))
134 /* This isn't valid with a buddy allocator */
135 return ttm_bo_eviction_valuable(bo, place);
138 static void i915_ttm_evict_flags(struct ttm_buffer_object *bo,
139 struct ttm_placement *placement)
141 *placement = i915_sys_placement;
144 static int i915_ttm_move_notify(struct ttm_buffer_object *bo)
146 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
149 ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
153 ret = __i915_gem_object_put_pages(obj);
160 static void i915_ttm_free_cached_io_st(struct drm_i915_gem_object *obj)
162 struct radix_tree_iter iter;
165 if (!obj->ttm.cached_io_st)
169 radix_tree_for_each_slot(slot, &obj->ttm.get_io_page.radix, &iter, 0)
170 radix_tree_delete(&obj->ttm.get_io_page.radix, iter.index);
173 sg_free_table(obj->ttm.cached_io_st);
174 kfree(obj->ttm.cached_io_st);
175 obj->ttm.cached_io_st = NULL;
178 static void i915_ttm_purge(struct drm_i915_gem_object *obj)
180 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
181 struct ttm_operation_ctx ctx = {
182 .interruptible = true,
183 .no_wait_gpu = false,
185 struct ttm_placement place = {};
188 if (obj->mm.madv == __I915_MADV_PURGED)
191 /* TTM's purge interface. Note that we might be reentering. */
192 ret = ttm_bo_validate(bo, &place, &ctx);
195 i915_ttm_free_cached_io_st(obj);
196 obj->mm.madv = __I915_MADV_PURGED;
200 static void i915_ttm_swap_notify(struct ttm_buffer_object *bo)
202 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
203 int ret = i915_ttm_move_notify(bo);
206 GEM_WARN_ON(obj->ttm.cached_io_st);
207 if (!ret && obj->mm.madv != I915_MADV_WILLNEED)
211 static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
213 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
216 /* This releases all gem object bindings to the backend. */
217 __i915_gem_free_object(obj);
221 static struct intel_memory_region *
222 i915_ttm_region(struct ttm_device *bdev, int ttm_mem_type)
224 struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev);
226 /* There's some room for optimization here... */
227 GEM_BUG_ON(ttm_mem_type != I915_PL_SYSTEM &&
228 ttm_mem_type < I915_PL_LMEM0);
229 if (ttm_mem_type == I915_PL_SYSTEM)
230 return intel_memory_region_lookup(i915, INTEL_MEMORY_SYSTEM,
233 return intel_memory_region_lookup(i915, INTEL_MEMORY_LOCAL,
234 ttm_mem_type - I915_PL_LMEM0);
237 static struct sg_table *i915_ttm_tt_get_st(struct ttm_tt *ttm)
239 struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
240 struct scatterlist *sg;
244 if (i915_tt->cached_st)
245 return i915_tt->cached_st;
247 st = kzalloc(sizeof(*st), GFP_KERNEL);
249 return ERR_PTR(-ENOMEM);
251 sg = __sg_alloc_table_from_pages
252 (st, ttm->pages, ttm->num_pages, 0,
253 (unsigned long)ttm->num_pages << PAGE_SHIFT,
254 i915_sg_segment_size(), NULL, 0, GFP_KERNEL);
260 ret = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0);
267 i915_tt->cached_st = st;
271 static struct sg_table *
272 i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
273 struct ttm_resource *res)
275 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
276 struct ttm_resource_manager *man =
277 ttm_manager_type(bo->bdev, res->mem_type);
280 return i915_ttm_tt_get_st(bo->ttm);
282 return intel_region_ttm_node_to_st(obj->mm.region, res);
285 static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
286 struct ttm_operation_ctx *ctx,
287 struct ttm_resource *dst_mem,
288 struct ttm_place *hop)
290 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
291 struct ttm_resource_manager *dst_man =
292 ttm_manager_type(bo->bdev, dst_mem->mem_type);
293 struct ttm_resource_manager *src_man =
294 ttm_manager_type(bo->bdev, bo->resource->mem_type);
295 struct intel_memory_region *dst_reg, *src_reg;
297 struct ttm_kmap_iter_tt tt;
298 struct ttm_kmap_iter_iomap io;
299 } _dst_iter, _src_iter;
300 struct ttm_kmap_iter *dst_iter, *src_iter;
301 struct sg_table *dst_st;
304 dst_reg = i915_ttm_region(bo->bdev, dst_mem->mem_type);
305 src_reg = i915_ttm_region(bo->bdev, bo->resource->mem_type);
306 GEM_BUG_ON(!dst_reg || !src_reg);
308 /* Sync for now. We could do the actual copy async. */
309 ret = ttm_bo_wait_ctx(bo, ctx);
313 ret = i915_ttm_move_notify(bo);
317 if (obj->mm.madv != I915_MADV_WILLNEED) {
319 ttm_resource_free(bo, &dst_mem);
323 /* Populate ttm with pages if needed. Typically system memory. */
324 if (bo->ttm && (dst_man->use_tt ||
325 (bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED))) {
326 ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
331 dst_st = i915_ttm_resource_get_st(obj, dst_mem);
333 return PTR_ERR(dst_st);
335 /* If we start mapping GGTT, we can no longer use man::use_tt here. */
336 dst_iter = dst_man->use_tt ?
337 ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm) :
338 ttm_kmap_iter_iomap_init(&_dst_iter.io, &dst_reg->iomap,
339 dst_st, dst_reg->region.start);
341 src_iter = src_man->use_tt ?
342 ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm) :
343 ttm_kmap_iter_iomap_init(&_src_iter.io, &src_reg->iomap,
344 obj->ttm.cached_io_st,
345 src_reg->region.start);
347 ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter);
348 ttm_bo_move_sync_cleanup(bo, dst_mem);
349 i915_ttm_free_cached_io_st(obj);
351 if (!dst_man->use_tt) {
352 obj->ttm.cached_io_st = dst_st;
353 obj->ttm.get_io_page.sg_pos = dst_st->sgl;
354 obj->ttm.get_io_page.sg_idx = 0;
360 static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
362 if (mem->mem_type < I915_PL_LMEM0)
365 mem->bus.caching = ttm_write_combined;
366 mem->bus.is_iomem = true;
371 static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
372 unsigned long page_offset)
374 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
375 unsigned long base = obj->mm.region->iomap.base - obj->mm.region->region.start;
376 struct scatterlist *sg;
379 GEM_WARN_ON(bo->ttm);
381 sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true, true);
383 return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs;
386 static struct ttm_device_funcs i915_ttm_bo_driver = {
387 .ttm_tt_create = i915_ttm_tt_create,
388 .ttm_tt_unpopulate = i915_ttm_tt_unpopulate,
389 .ttm_tt_destroy = i915_ttm_tt_destroy,
390 .eviction_valuable = i915_ttm_eviction_valuable,
391 .evict_flags = i915_ttm_evict_flags,
392 .move = i915_ttm_move,
393 .swap_notify = i915_ttm_swap_notify,
394 .delete_mem_notify = i915_ttm_delete_mem_notify,
395 .io_mem_reserve = i915_ttm_io_mem_reserve,
396 .io_mem_pfn = i915_ttm_io_mem_pfn,
400 * i915_ttm_driver - Return a pointer to the TTM device funcs
402 * Return: Pointer to statically allocated TTM device funcs.
404 struct ttm_device_funcs *i915_ttm_driver(void)
406 return &i915_ttm_bo_driver;
409 static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
411 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
412 struct ttm_operation_ctx ctx = {
413 .interruptible = true,
414 .no_wait_gpu = false,
419 /* Move to the requested placement. */
420 ret = ttm_bo_validate(bo, &i915_lmem0_placement, &ctx);
422 return ret == -ENOSPC ? -ENXIO : ret;
424 /* Object either has a page vector or is an iomem object */
425 st = bo->ttm ? i915_ttm_tt_get_st(bo->ttm) : obj->ttm.cached_io_st;
429 __i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl));
431 i915_ttm_adjust_lru(obj);
436 static void i915_ttm_put_pages(struct drm_i915_gem_object *obj,
440 * We're currently not called from a shrinker, so put_pages()
441 * typically means the object is about to destroyed, or called
442 * from move_notify(). So just avoid doing much for now.
443 * If the object is not destroyed next, The TTM eviction logic
444 * and shrinkers will move it out if needed.
447 i915_ttm_adjust_lru(obj);
450 static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
452 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
455 * Don't manipulate the TTM LRUs while in TTM bo destruction.
456 * We're called through i915_ttm_delete_mem_notify().
458 if (!kref_read(&bo->kref))
462 * Put on the correct LRU list depending on the MADV status
464 spin_lock(&bo->bdev->lru_lock);
465 if (obj->mm.madv != I915_MADV_WILLNEED) {
466 bo->priority = I915_TTM_PRIO_PURGE;
467 } else if (!i915_gem_object_has_pages(obj)) {
468 if (bo->priority < I915_TTM_PRIO_HAS_PAGES)
469 bo->priority = I915_TTM_PRIO_HAS_PAGES;
471 if (bo->priority > I915_TTM_PRIO_NO_PAGES)
472 bo->priority = I915_TTM_PRIO_NO_PAGES;
475 ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
476 spin_unlock(&bo->bdev->lru_lock);
480 * TTM-backed gem object destruction requires some clarification.
481 * Basically we have two possibilities here. We can either rely on the
482 * i915 delayed destruction and put the TTM object when the object
483 * is idle. This would be detected by TTM which would bypass the
484 * TTM delayed destroy handling. The other approach is to put the TTM
485 * object early and rely on the TTM destroyed handling, and then free
486 * the leftover parts of the GEM object once TTM's destroyed list handling is
487 * complete. For now, we rely on the latter for two reasons:
488 * a) TTM can evict an object even when it's on the delayed destroy list,
489 * which in theory allows for complete eviction.
490 * b) There is work going on in TTM to allow freeing an object even when
491 * it's not idle, and using the TTM destroyed list handling could help us
494 static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
496 if (obj->ttm.created) {
497 ttm_bo_put(i915_gem_to_ttm(obj));
499 __i915_gem_free_object(obj);
500 call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
504 static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
506 struct vm_area_struct *area = vmf->vma;
507 struct drm_i915_gem_object *obj =
508 i915_ttm_to_gem(area->vm_private_data);
510 /* Sanity check that we allow writing into this object */
511 if (unlikely(i915_gem_object_is_readonly(obj) &&
512 area->vm_flags & VM_WRITE))
513 return VM_FAULT_SIGBUS;
515 return ttm_bo_vm_fault(vmf);
519 vm_access_ttm(struct vm_area_struct *area, unsigned long addr,
520 void *buf, int len, int write)
522 struct drm_i915_gem_object *obj =
523 i915_ttm_to_gem(area->vm_private_data);
525 if (i915_gem_object_is_readonly(obj) && write)
528 return ttm_bo_vm_access(area, addr, buf, len, write);
531 static void ttm_vm_open(struct vm_area_struct *vma)
533 struct drm_i915_gem_object *obj =
534 i915_ttm_to_gem(vma->vm_private_data);
537 i915_gem_object_get(obj);
540 static void ttm_vm_close(struct vm_area_struct *vma)
542 struct drm_i915_gem_object *obj =
543 i915_ttm_to_gem(vma->vm_private_data);
546 i915_gem_object_put(obj);
549 static const struct vm_operations_struct vm_ops_ttm = {
550 .fault = vm_fault_ttm,
551 .access = vm_access_ttm,
553 .close = ttm_vm_close,
556 static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
558 /* The ttm_bo must be allocated with I915_BO_ALLOC_USER */
559 GEM_BUG_ON(!drm_mm_node_allocated(&obj->base.vma_node.vm_node));
561 return drm_vma_node_offset_addr(&obj->base.vma_node);
564 const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
565 .name = "i915_gem_object_ttm",
566 .flags = I915_GEM_OBJECT_HAS_IOMEM,
568 .get_pages = i915_ttm_get_pages,
569 .put_pages = i915_ttm_put_pages,
570 .truncate = i915_ttm_purge,
571 .adjust_lru = i915_ttm_adjust_lru,
572 .delayed_free = i915_ttm_delayed_free,
573 .mmap_offset = i915_ttm_mmap_offset,
574 .mmap_ops = &vm_ops_ttm,
577 void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
579 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
581 i915_gem_object_release_memory_region(obj);
582 mutex_destroy(&obj->ttm.get_io_page.lock);
583 if (obj->ttm.created)
584 call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
588 * __i915_gem_ttm_object_init - Initialize a ttm-backed i915 gem object
589 * @mem: The initial memory region for the object.
590 * @obj: The gem object.
591 * @size: Object size in bytes.
592 * @flags: gem object flags.
594 * Return: 0 on success, negative error code on failure.
596 int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
597 struct drm_i915_gem_object *obj,
598 resource_size_t size,
601 static struct lock_class_key lock_class;
602 struct drm_i915_private *i915 = mem->i915;
603 enum ttm_bo_type bo_type;
604 size_t alignment = 0;
607 /* Adjust alignment to GPU- and CPU huge page sizes. */
609 if (mem->is_range_manager) {
611 alignment = SZ_1G >> PAGE_SHIFT;
612 else if (size >= SZ_2M)
613 alignment = SZ_2M >> PAGE_SHIFT;
614 else if (size >= SZ_64K)
615 alignment = SZ_64K >> PAGE_SHIFT;
618 drm_gem_private_object_init(&i915->drm, &obj->base, size);
619 i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
620 i915_gem_object_init_memory_region(obj, mem);
621 i915_gem_object_make_unshrinkable(obj);
622 obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT;
623 i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
624 INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN);
625 mutex_init(&obj->ttm.get_io_page.lock);
627 bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device :
631 * If this function fails, it will call the destructor, but
632 * our caller still owns the object. So no freeing in the
633 * destructor until obj->ttm.created is true.
634 * Similarly, in delayed_destroy, we can't call ttm_bo_put()
635 * until successful initialization.
637 obj->base.vma_node.driver_private = i915_gem_to_ttm(obj);
638 ret = ttm_bo_init(&i915->bdev, i915_gem_to_ttm(obj), size,
639 bo_type, &i915_sys_placement, alignment,
640 true, NULL, NULL, i915_ttm_bo_destroy);
643 obj->ttm.created = true;
645 /* i915 wants -ENXIO when out of memory region space. */
646 return (ret == -ENOSPC) ? -ENXIO : ret;