1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021 Intel Corporation
6 #include <drm/ttm/ttm_bo_driver.h>
7 #include <drm/ttm/ttm_placement.h>
10 #include "intel_memory_region.h"
11 #include "intel_region_ttm.h"
13 #include "gem/i915_gem_object.h"
14 #include "gem/i915_gem_region.h"
15 #include "gem/i915_gem_ttm.h"
16 #include "gem/i915_gem_mman.h"
18 #include "gt/intel_migrate.h"
19 #include "gt/intel_engine_pm.h"
21 #define I915_PL_LMEM0 TTM_PL_PRIV
22 #define I915_PL_SYSTEM TTM_PL_SYSTEM
23 #define I915_PL_STOLEN TTM_PL_VRAM
24 #define I915_PL_GGTT TTM_PL_TT
26 #define I915_TTM_PRIO_PURGE 0
27 #define I915_TTM_PRIO_NO_PAGES 1
28 #define I915_TTM_PRIO_HAS_PAGES 2
31 * Size of struct ttm_place vector in on-stack struct ttm_placement allocs
33 #define I915_TTM_MAX_PLACEMENTS INTEL_REGION_UNKNOWN
36 * struct i915_ttm_tt - TTM page vector with additional private information
37 * @ttm: The base TTM page vector.
38 * @dev: The struct device used for dma mapping and unmapping.
39 * @cached_st: The cached scatter-gather table.
41 * Note that DMA may be going on right up to the point where the page-
42 * vector is unpopulated in delayed destroy. Hence keep the
43 * scatter-gather table mapped and cached up to that point. This is
44 * different from the cached gem object io scatter-gather table which
45 * doesn't have an associated dma mapping.
50 struct sg_table *cached_st;
53 static const struct ttm_place sys_placement_flags = {
56 .mem_type = I915_PL_SYSTEM,
60 static struct ttm_placement i915_sys_placement = {
62 .placement = &sys_placement_flags,
63 .num_busy_placement = 1,
64 .busy_placement = &sys_placement_flags,
67 static int i915_ttm_err_to_gem(int err)
76 * TTM likes to convert -EDEADLK to -EBUSY, and wants us to
77 * restart the operation, since we don't record the contending
78 * lock. We use -EAGAIN to restart.
83 * Memory type / region is full, and we can't evict.
84 * Except possibly system, that returns -ENOMEM;
94 static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj);
96 static enum ttm_caching
97 i915_ttm_select_tt_caching(const struct drm_i915_gem_object *obj)
100 * Objects only allowed in system get cached cpu-mappings.
101 * Other objects get WC mapping for now. Even if in system.
103 if (obj->mm.region->type == INTEL_MEMORY_SYSTEM &&
104 obj->mm.n_placements <= 1)
107 return ttm_write_combined;
111 i915_ttm_place_from_region(const struct intel_memory_region *mr,
112 struct ttm_place *place,
115 memset(place, 0, sizeof(*place));
116 place->mem_type = intel_region_to_ttm_type(mr);
118 if (flags & I915_BO_ALLOC_CONTIGUOUS)
119 place->flags = TTM_PL_FLAG_CONTIGUOUS;
123 i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj,
124 struct ttm_place *requested,
125 struct ttm_place *busy,
126 struct ttm_placement *placement)
128 unsigned int num_allowed = obj->mm.n_placements;
129 unsigned int flags = obj->flags;
132 placement->num_placement = 1;
133 i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] :
134 obj->mm.region, requested, flags);
136 /* Cache this on object? */
137 placement->num_busy_placement = num_allowed;
138 for (i = 0; i < placement->num_busy_placement; ++i)
139 i915_ttm_place_from_region(obj->mm.placements[i], busy + i, flags);
141 if (num_allowed == 0) {
143 placement->num_busy_placement = 1;
146 placement->placement = requested;
147 placement->busy_placement = busy;
150 static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
153 struct ttm_resource_manager *man =
154 ttm_manager_type(bo->bdev, bo->resource->mem_type);
155 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
156 struct i915_ttm_tt *i915_tt;
159 i915_tt = kzalloc(sizeof(*i915_tt), GFP_KERNEL);
163 if (obj->flags & I915_BO_ALLOC_CPU_CLEAR &&
165 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
167 ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags,
168 i915_ttm_select_tt_caching(obj));
174 i915_tt->dev = obj->base.dev->dev;
176 return &i915_tt->ttm;
179 static void i915_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
181 struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
183 if (i915_tt->cached_st) {
184 dma_unmap_sgtable(i915_tt->dev, i915_tt->cached_st,
185 DMA_BIDIRECTIONAL, 0);
186 sg_free_table(i915_tt->cached_st);
187 kfree(i915_tt->cached_st);
188 i915_tt->cached_st = NULL;
190 ttm_pool_free(&bdev->pool, ttm);
193 static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
195 struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
197 ttm_tt_destroy_common(bdev, ttm);
202 static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
203 const struct ttm_place *place)
205 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
207 /* Will do for now. Our pinned objects are still on TTM's LRU lists */
208 return i915_gem_object_evictable(obj);
211 static void i915_ttm_evict_flags(struct ttm_buffer_object *bo,
212 struct ttm_placement *placement)
214 *placement = i915_sys_placement;
217 static int i915_ttm_move_notify(struct ttm_buffer_object *bo)
219 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
222 ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
226 ret = __i915_gem_object_put_pages(obj);
233 static void i915_ttm_free_cached_io_st(struct drm_i915_gem_object *obj)
235 struct radix_tree_iter iter;
238 if (!obj->ttm.cached_io_st)
242 radix_tree_for_each_slot(slot, &obj->ttm.get_io_page.radix, &iter, 0)
243 radix_tree_delete(&obj->ttm.get_io_page.radix, iter.index);
246 sg_free_table(obj->ttm.cached_io_st);
247 kfree(obj->ttm.cached_io_st);
248 obj->ttm.cached_io_st = NULL;
251 static void i915_ttm_purge(struct drm_i915_gem_object *obj)
253 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
254 struct ttm_operation_ctx ctx = {
255 .interruptible = true,
256 .no_wait_gpu = false,
258 struct ttm_placement place = {};
261 if (obj->mm.madv == __I915_MADV_PURGED)
264 /* TTM's purge interface. Note that we might be reentering. */
265 ret = ttm_bo_validate(bo, &place, &ctx);
268 i915_ttm_free_cached_io_st(obj);
269 obj->mm.madv = __I915_MADV_PURGED;
273 static void i915_ttm_swap_notify(struct ttm_buffer_object *bo)
275 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
276 int ret = i915_ttm_move_notify(bo);
279 GEM_WARN_ON(obj->ttm.cached_io_st);
280 if (!ret && obj->mm.madv != I915_MADV_WILLNEED)
284 static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
286 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
289 /* This releases all gem object bindings to the backend. */
290 i915_ttm_free_cached_io_st(obj);
291 __i915_gem_free_object(obj);
295 static struct intel_memory_region *
296 i915_ttm_region(struct ttm_device *bdev, int ttm_mem_type)
298 struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev);
300 /* There's some room for optimization here... */
301 GEM_BUG_ON(ttm_mem_type != I915_PL_SYSTEM &&
302 ttm_mem_type < I915_PL_LMEM0);
303 if (ttm_mem_type == I915_PL_SYSTEM)
304 return intel_memory_region_lookup(i915, INTEL_MEMORY_SYSTEM,
307 return intel_memory_region_lookup(i915, INTEL_MEMORY_LOCAL,
308 ttm_mem_type - I915_PL_LMEM0);
311 static struct sg_table *i915_ttm_tt_get_st(struct ttm_tt *ttm)
313 struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
314 struct scatterlist *sg;
318 if (i915_tt->cached_st)
319 return i915_tt->cached_st;
321 st = kzalloc(sizeof(*st), GFP_KERNEL);
323 return ERR_PTR(-ENOMEM);
325 sg = __sg_alloc_table_from_pages
326 (st, ttm->pages, ttm->num_pages, 0,
327 (unsigned long)ttm->num_pages << PAGE_SHIFT,
328 i915_sg_segment_size(), NULL, 0, GFP_KERNEL);
334 ret = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0);
341 i915_tt->cached_st = st;
345 static struct sg_table *
346 i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
347 struct ttm_resource *res)
349 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
350 struct ttm_resource_manager *man =
351 ttm_manager_type(bo->bdev, res->mem_type);
354 return i915_ttm_tt_get_st(bo->ttm);
356 return intel_region_ttm_resource_to_st(obj->mm.region, res);
359 static int i915_ttm_accel_move(struct ttm_buffer_object *bo,
360 struct ttm_resource *dst_mem,
361 struct sg_table *dst_st)
363 struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
365 struct ttm_resource_manager *src_man =
366 ttm_manager_type(bo->bdev, bo->resource->mem_type);
367 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
368 struct sg_table *src_st;
369 struct i915_request *rq;
372 if (!i915->gt.migrate.context)
375 if (!bo->ttm || !ttm_tt_is_populated(bo->ttm)) {
376 if (bo->type == ttm_bo_type_kernel)
380 !(bo->ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC))
383 intel_engine_pm_get(i915->gt.migrate.context->engine);
384 ret = intel_context_migrate_clear(i915->gt.migrate.context, NULL,
385 dst_st->sgl, I915_CACHE_NONE,
386 dst_mem->mem_type >= I915_PL_LMEM0,
390 i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
391 i915_request_put(rq);
393 intel_engine_pm_put(i915->gt.migrate.context->engine);
395 src_st = src_man->use_tt ? i915_ttm_tt_get_st(bo->ttm) :
396 obj->ttm.cached_io_st;
398 intel_engine_pm_get(i915->gt.migrate.context->engine);
399 ret = intel_context_migrate_copy(i915->gt.migrate.context,
400 NULL, src_st->sgl, I915_CACHE_NONE,
401 bo->resource->mem_type >= I915_PL_LMEM0,
402 dst_st->sgl, I915_CACHE_NONE,
403 dst_mem->mem_type >= I915_PL_LMEM0,
406 i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
407 i915_request_put(rq);
409 intel_engine_pm_put(i915->gt.migrate.context->engine);
415 static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
416 struct ttm_operation_ctx *ctx,
417 struct ttm_resource *dst_mem,
418 struct ttm_place *hop)
420 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
421 struct ttm_resource_manager *dst_man =
422 ttm_manager_type(bo->bdev, dst_mem->mem_type);
423 struct ttm_resource_manager *src_man =
424 ttm_manager_type(bo->bdev, bo->resource->mem_type);
425 struct intel_memory_region *dst_reg, *src_reg;
427 struct ttm_kmap_iter_tt tt;
428 struct ttm_kmap_iter_iomap io;
429 } _dst_iter, _src_iter;
430 struct ttm_kmap_iter *dst_iter, *src_iter;
431 struct sg_table *dst_st;
434 dst_reg = i915_ttm_region(bo->bdev, dst_mem->mem_type);
435 src_reg = i915_ttm_region(bo->bdev, bo->resource->mem_type);
436 GEM_BUG_ON(!dst_reg || !src_reg);
438 /* Sync for now. We could do the actual copy async. */
439 ret = ttm_bo_wait_ctx(bo, ctx);
443 ret = i915_ttm_move_notify(bo);
447 if (obj->mm.madv != I915_MADV_WILLNEED) {
449 ttm_resource_free(bo, &dst_mem);
453 /* Populate ttm with pages if needed. Typically system memory. */
454 if (bo->ttm && (dst_man->use_tt ||
455 (bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED))) {
456 ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
461 dst_st = i915_ttm_resource_get_st(obj, dst_mem);
463 return PTR_ERR(dst_st);
465 ret = i915_ttm_accel_move(bo, dst_mem, dst_st);
467 /* If we start mapping GGTT, we can no longer use man::use_tt here. */
468 dst_iter = dst_man->use_tt ?
469 ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm) :
470 ttm_kmap_iter_iomap_init(&_dst_iter.io, &dst_reg->iomap,
471 dst_st, dst_reg->region.start);
473 src_iter = src_man->use_tt ?
474 ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm) :
475 ttm_kmap_iter_iomap_init(&_src_iter.io, &src_reg->iomap,
476 obj->ttm.cached_io_st,
477 src_reg->region.start);
479 ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter);
481 ttm_bo_move_sync_cleanup(bo, dst_mem);
482 i915_ttm_free_cached_io_st(obj);
484 if (!dst_man->use_tt) {
485 obj->ttm.cached_io_st = dst_st;
486 obj->ttm.get_io_page.sg_pos = dst_st->sgl;
487 obj->ttm.get_io_page.sg_idx = 0;
493 static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
495 if (mem->mem_type < I915_PL_LMEM0)
498 mem->bus.caching = ttm_write_combined;
499 mem->bus.is_iomem = true;
504 static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
505 unsigned long page_offset)
507 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
508 unsigned long base = obj->mm.region->iomap.base - obj->mm.region->region.start;
509 struct scatterlist *sg;
512 GEM_WARN_ON(bo->ttm);
514 sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true, true);
516 return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs;
519 static struct ttm_device_funcs i915_ttm_bo_driver = {
520 .ttm_tt_create = i915_ttm_tt_create,
521 .ttm_tt_unpopulate = i915_ttm_tt_unpopulate,
522 .ttm_tt_destroy = i915_ttm_tt_destroy,
523 .eviction_valuable = i915_ttm_eviction_valuable,
524 .evict_flags = i915_ttm_evict_flags,
525 .move = i915_ttm_move,
526 .swap_notify = i915_ttm_swap_notify,
527 .delete_mem_notify = i915_ttm_delete_mem_notify,
528 .io_mem_reserve = i915_ttm_io_mem_reserve,
529 .io_mem_pfn = i915_ttm_io_mem_pfn,
533 * i915_ttm_driver - Return a pointer to the TTM device funcs
535 * Return: Pointer to statically allocated TTM device funcs.
537 struct ttm_device_funcs *i915_ttm_driver(void)
539 return &i915_ttm_bo_driver;
542 static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
544 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
545 struct ttm_operation_ctx ctx = {
546 .interruptible = true,
547 .no_wait_gpu = false,
550 struct ttm_place requested, busy[I915_TTM_MAX_PLACEMENTS];
551 struct ttm_placement placement;
555 GEM_BUG_ON(obj->mm.n_placements > I915_TTM_MAX_PLACEMENTS);
557 /* Move to the requested placement. */
558 i915_ttm_placement_from_obj(obj, &requested, busy, &placement);
561 * For now we support LMEM only with TTM.
562 * TODO: Remove with system support
564 GEM_BUG_ON(requested.mem_type < I915_PL_LMEM0 ||
565 busy[0].mem_type < I915_PL_LMEM0);
567 /* First try only the requested placement. No eviction. */
568 real_num_busy = fetch_and_zero(&placement.num_busy_placement);
569 ret = ttm_bo_validate(bo, &placement, &ctx);
571 ret = i915_ttm_err_to_gem(ret);
573 * Anything that wants to restart the operation gets to
576 if (ret == -EDEADLK || ret == -EINTR || ret == -ERESTARTSYS ||
580 /* TODO: Remove this when we support system as TTM. */
584 * If the initial attempt fails, allow all accepted placements,
585 * evicting if necessary.
587 placement.num_busy_placement = real_num_busy;
588 ret = ttm_bo_validate(bo, &placement, &ctx);
590 return i915_ttm_err_to_gem(ret);
593 /* Object either has a page vector or is an iomem object */
594 st = bo->ttm ? i915_ttm_tt_get_st(bo->ttm) : obj->ttm.cached_io_st;
598 __i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl));
600 i915_ttm_adjust_lru(obj);
605 static void i915_ttm_put_pages(struct drm_i915_gem_object *obj,
609 * We're currently not called from a shrinker, so put_pages()
610 * typically means the object is about to destroyed, or called
611 * from move_notify(). So just avoid doing much for now.
612 * If the object is not destroyed next, The TTM eviction logic
613 * and shrinkers will move it out if needed.
616 i915_ttm_adjust_lru(obj);
619 static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
621 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
624 * Don't manipulate the TTM LRUs while in TTM bo destruction.
625 * We're called through i915_ttm_delete_mem_notify().
627 if (!kref_read(&bo->kref))
631 * Put on the correct LRU list depending on the MADV status
633 spin_lock(&bo->bdev->lru_lock);
634 if (obj->mm.madv != I915_MADV_WILLNEED) {
635 bo->priority = I915_TTM_PRIO_PURGE;
636 } else if (!i915_gem_object_has_pages(obj)) {
637 if (bo->priority < I915_TTM_PRIO_HAS_PAGES)
638 bo->priority = I915_TTM_PRIO_HAS_PAGES;
640 if (bo->priority > I915_TTM_PRIO_NO_PAGES)
641 bo->priority = I915_TTM_PRIO_NO_PAGES;
644 ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
645 spin_unlock(&bo->bdev->lru_lock);
649 * TTM-backed gem object destruction requires some clarification.
650 * Basically we have two possibilities here. We can either rely on the
651 * i915 delayed destruction and put the TTM object when the object
652 * is idle. This would be detected by TTM which would bypass the
653 * TTM delayed destroy handling. The other approach is to put the TTM
654 * object early and rely on the TTM destroyed handling, and then free
655 * the leftover parts of the GEM object once TTM's destroyed list handling is
656 * complete. For now, we rely on the latter for two reasons:
657 * a) TTM can evict an object even when it's on the delayed destroy list,
658 * which in theory allows for complete eviction.
659 * b) There is work going on in TTM to allow freeing an object even when
660 * it's not idle, and using the TTM destroyed list handling could help us
663 static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
665 if (obj->ttm.created) {
666 ttm_bo_put(i915_gem_to_ttm(obj));
668 __i915_gem_free_object(obj);
669 call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
673 static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
675 struct vm_area_struct *area = vmf->vma;
676 struct drm_i915_gem_object *obj =
677 i915_ttm_to_gem(area->vm_private_data);
679 /* Sanity check that we allow writing into this object */
680 if (unlikely(i915_gem_object_is_readonly(obj) &&
681 area->vm_flags & VM_WRITE))
682 return VM_FAULT_SIGBUS;
684 return ttm_bo_vm_fault(vmf);
688 vm_access_ttm(struct vm_area_struct *area, unsigned long addr,
689 void *buf, int len, int write)
691 struct drm_i915_gem_object *obj =
692 i915_ttm_to_gem(area->vm_private_data);
694 if (i915_gem_object_is_readonly(obj) && write)
697 return ttm_bo_vm_access(area, addr, buf, len, write);
700 static void ttm_vm_open(struct vm_area_struct *vma)
702 struct drm_i915_gem_object *obj =
703 i915_ttm_to_gem(vma->vm_private_data);
706 i915_gem_object_get(obj);
709 static void ttm_vm_close(struct vm_area_struct *vma)
711 struct drm_i915_gem_object *obj =
712 i915_ttm_to_gem(vma->vm_private_data);
715 i915_gem_object_put(obj);
718 static const struct vm_operations_struct vm_ops_ttm = {
719 .fault = vm_fault_ttm,
720 .access = vm_access_ttm,
722 .close = ttm_vm_close,
725 static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
727 /* The ttm_bo must be allocated with I915_BO_ALLOC_USER */
728 GEM_BUG_ON(!drm_mm_node_allocated(&obj->base.vma_node.vm_node));
730 return drm_vma_node_offset_addr(&obj->base.vma_node);
733 const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
734 .name = "i915_gem_object_ttm",
735 .flags = I915_GEM_OBJECT_HAS_IOMEM,
737 .get_pages = i915_ttm_get_pages,
738 .put_pages = i915_ttm_put_pages,
739 .truncate = i915_ttm_purge,
740 .adjust_lru = i915_ttm_adjust_lru,
741 .delayed_free = i915_ttm_delayed_free,
742 .mmap_offset = i915_ttm_mmap_offset,
743 .mmap_ops = &vm_ops_ttm,
746 void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
748 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
750 i915_gem_object_release_memory_region(obj);
751 mutex_destroy(&obj->ttm.get_io_page.lock);
752 if (obj->ttm.created)
753 call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
757 * __i915_gem_ttm_object_init - Initialize a ttm-backed i915 gem object
758 * @mem: The initial memory region for the object.
759 * @obj: The gem object.
760 * @size: Object size in bytes.
761 * @flags: gem object flags.
763 * Return: 0 on success, negative error code on failure.
765 int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
766 struct drm_i915_gem_object *obj,
767 resource_size_t size,
770 static struct lock_class_key lock_class;
771 struct drm_i915_private *i915 = mem->i915;
772 enum ttm_bo_type bo_type;
775 drm_gem_private_object_init(&i915->drm, &obj->base, size);
776 i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
777 i915_gem_object_init_memory_region(obj, mem);
778 i915_gem_object_make_unshrinkable(obj);
779 obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT;
780 i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
781 INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN);
782 mutex_init(&obj->ttm.get_io_page.lock);
783 bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device :
787 * If this function fails, it will call the destructor, but
788 * our caller still owns the object. So no freeing in the
789 * destructor until obj->ttm.created is true.
790 * Similarly, in delayed_destroy, we can't call ttm_bo_put()
791 * until successful initialization.
793 obj->base.vma_node.driver_private = i915_gem_to_ttm(obj);
794 ret = ttm_bo_init(&i915->bdev, i915_gem_to_ttm(obj), size,
795 bo_type, &i915_sys_placement,
796 mem->min_page_size >> PAGE_SHIFT,
797 true, NULL, NULL, i915_ttm_bo_destroy);
799 obj->ttm.created = true;
801 /* i915 wants -ENXIO when out of memory region space. */
802 return i915_ttm_err_to_gem(ret);