1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021 Intel Corporation
9 #include <linux/dma-buf.h>
11 #include <drm/drm_drv.h>
12 #include <drm/drm_gem_ttm_helper.h>
13 #include <drm/ttm/ttm_device.h>
14 #include <drm/ttm/ttm_placement.h>
15 #include <drm/ttm/ttm_tt.h>
16 #include <drm/xe_drm.h>
18 #include "xe_device.h"
19 #include "xe_dma_buf.h"
23 #include "xe_migrate.h"
24 #include "xe_preempt_fence.h"
25 #include "xe_res_cursor.h"
27 #include "xe_ttm_stolen_mgr.h"
30 static const struct ttm_place sys_placement_flags = {
33 .mem_type = XE_PL_SYSTEM,
37 static struct ttm_placement sys_placement = {
39 .placement = &sys_placement_flags,
40 .num_busy_placement = 1,
41 .busy_placement = &sys_placement_flags,
44 bool mem_type_is_vram(u32 mem_type)
46 return mem_type >= XE_PL_VRAM0 && mem_type != XE_PL_STOLEN;
49 static bool resource_is_stolen_vram(struct xe_device *xe, struct ttm_resource *res)
51 return res->mem_type == XE_PL_STOLEN && IS_DGFX(xe);
54 static bool resource_is_vram(struct ttm_resource *res)
56 return mem_type_is_vram(res->mem_type);
59 bool xe_bo_is_vram(struct xe_bo *bo)
61 return resource_is_vram(bo->ttm.resource) ||
62 resource_is_stolen_vram(xe_bo_device(bo), bo->ttm.resource);
65 bool xe_bo_is_stolen(struct xe_bo *bo)
67 return bo->ttm.resource->mem_type == XE_PL_STOLEN;
70 static bool xe_bo_is_user(struct xe_bo *bo)
72 return bo->flags & XE_BO_CREATE_USER_BIT;
76 mem_type_to_gt(struct xe_device *xe, u32 mem_type)
78 XE_BUG_ON(mem_type != XE_PL_STOLEN && !mem_type_is_vram(mem_type));
80 return xe_device_get_gt(xe, mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0));
83 static void try_add_system(struct xe_bo *bo, struct ttm_place *places,
86 if (bo_flags & XE_BO_CREATE_SYSTEM_BIT) {
87 places[*c] = (struct ttm_place) {
92 if (bo->props.preferred_mem_type == XE_BO_PROPS_INVALID)
93 bo->props.preferred_mem_type = XE_PL_TT;
97 static void try_add_vram0(struct xe_device *xe, struct xe_bo *bo,
98 struct ttm_place *places, u32 bo_flags, u32 *c)
102 if (bo_flags & XE_BO_CREATE_VRAM0_BIT) {
103 gt = mem_type_to_gt(xe, XE_PL_VRAM0);
104 XE_BUG_ON(!gt->mem.vram.size);
106 places[*c] = (struct ttm_place) {
107 .mem_type = XE_PL_VRAM0,
109 * For eviction / restore on suspend / resume objects
110 * pinned in VRAM must be contiguous
112 .flags = bo_flags & (XE_BO_CREATE_PINNED_BIT |
113 XE_BO_CREATE_GGTT_BIT) ?
114 TTM_PL_FLAG_CONTIGUOUS : 0,
118 if (bo->props.preferred_mem_type == XE_BO_PROPS_INVALID)
119 bo->props.preferred_mem_type = XE_PL_VRAM0;
123 static void try_add_vram1(struct xe_device *xe, struct xe_bo *bo,
124 struct ttm_place *places, u32 bo_flags, u32 *c)
128 if (bo_flags & XE_BO_CREATE_VRAM1_BIT) {
129 gt = mem_type_to_gt(xe, XE_PL_VRAM1);
130 XE_BUG_ON(!gt->mem.vram.size);
132 places[*c] = (struct ttm_place) {
133 .mem_type = XE_PL_VRAM1,
135 * For eviction / restore on suspend / resume objects
136 * pinned in VRAM must be contiguous
138 .flags = bo_flags & (XE_BO_CREATE_PINNED_BIT |
139 XE_BO_CREATE_GGTT_BIT) ?
140 TTM_PL_FLAG_CONTIGUOUS : 0,
144 if (bo->props.preferred_mem_type == XE_BO_PROPS_INVALID)
145 bo->props.preferred_mem_type = XE_PL_VRAM1;
149 static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
150 struct ttm_place *places, u32 bo_flags, u32 *c)
152 if (bo_flags & XE_BO_CREATE_STOLEN_BIT) {
153 places[*c] = (struct ttm_place) {
154 .mem_type = XE_PL_STOLEN,
155 .flags = bo_flags & (XE_BO_CREATE_PINNED_BIT |
156 XE_BO_CREATE_GGTT_BIT) ?
157 TTM_PL_FLAG_CONTIGUOUS : 0,
163 static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
166 struct ttm_place *places = bo->placements;
169 bo->props.preferred_mem_type = XE_BO_PROPS_INVALID;
171 /* The order of placements should indicate preferred location */
173 if (bo->props.preferred_mem_class == XE_MEM_REGION_CLASS_SYSMEM) {
174 try_add_system(bo, places, bo_flags, &c);
175 if (bo->props.preferred_gt == XE_GT1) {
176 try_add_vram1(xe, bo, places, bo_flags, &c);
177 try_add_vram0(xe, bo, places, bo_flags, &c);
179 try_add_vram0(xe, bo, places, bo_flags, &c);
180 try_add_vram1(xe, bo, places, bo_flags, &c);
182 } else if (bo->props.preferred_gt == XE_GT1) {
183 try_add_vram1(xe, bo, places, bo_flags, &c);
184 try_add_vram0(xe, bo, places, bo_flags, &c);
185 try_add_system(bo, places, bo_flags, &c);
187 try_add_vram0(xe, bo, places, bo_flags, &c);
188 try_add_vram1(xe, bo, places, bo_flags, &c);
189 try_add_system(bo, places, bo_flags, &c);
191 try_add_stolen(xe, bo, places, bo_flags, &c);
196 bo->placement = (struct ttm_placement) {
199 .num_busy_placement = c,
200 .busy_placement = places,
206 int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
209 xe_bo_assert_held(bo);
210 return __xe_bo_placement_for_flags(xe, bo, bo_flags);
213 static void xe_evict_flags(struct ttm_buffer_object *tbo,
214 struct ttm_placement *placement)
218 if (!xe_bo_is_xe_bo(tbo)) {
219 /* Don't handle scatter gather BOs */
220 if (tbo->type == ttm_bo_type_sg) {
221 placement->num_placement = 0;
222 placement->num_busy_placement = 0;
226 *placement = sys_placement;
231 * For xe, sg bos that are evicted to system just triggers a
232 * rebind of the sg list upon subsequent validation to XE_PL_TT.
235 bo = ttm_to_xe_bo(tbo);
236 switch (tbo->resource->mem_type) {
242 /* for now kick out to system */
243 *placement = sys_placement;
255 static int xe_tt_map_sg(struct ttm_tt *tt)
257 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
258 unsigned long num_pages = tt->num_pages;
261 XE_BUG_ON(tt->page_flags & TTM_TT_FLAG_EXTERNAL);
266 ret = sg_alloc_table_from_pages(&xe_tt->sgt, tt->pages, num_pages,
267 0, (u64)num_pages << PAGE_SHIFT,
272 xe_tt->sg = &xe_tt->sgt;
273 ret = dma_map_sgtable(xe_tt->dev, xe_tt->sg, DMA_BIDIRECTIONAL,
274 DMA_ATTR_SKIP_CPU_SYNC);
276 sg_free_table(xe_tt->sg);
284 struct sg_table *xe_bo_get_sg(struct xe_bo *bo)
286 struct ttm_tt *tt = bo->ttm.ttm;
287 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
292 static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
295 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
296 struct xe_device *xe = xe_bo_device(bo);
297 struct xe_ttm_tt *tt;
300 tt = kzalloc(sizeof(*tt), GFP_KERNEL);
304 tt->dev = xe->drm.dev;
306 /* TODO: Select caching mode */
307 err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags,
308 bo->flags & XE_BO_SCANOUT_BIT ? ttm_write_combined : ttm_cached,
309 DIV_ROUND_UP(xe_device_ccs_bytes(xe_bo_device(bo),
320 static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt,
321 struct ttm_operation_ctx *ctx)
326 * dma-bufs are not populated with pages, and the dma-
327 * addresses are set up when moved to XE_PL_TT.
329 if (tt->page_flags & TTM_TT_FLAG_EXTERNAL)
332 err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx);
336 /* A follow up may move this xe_bo_move when BO is moved to XE_PL_TT */
337 err = xe_tt_map_sg(tt);
339 ttm_pool_free(&ttm_dev->pool, tt);
344 static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt)
346 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
348 if (tt->page_flags & TTM_TT_FLAG_EXTERNAL)
352 dma_unmap_sgtable(xe_tt->dev, xe_tt->sg,
353 DMA_BIDIRECTIONAL, 0);
354 sg_free_table(xe_tt->sg);
358 return ttm_pool_free(&ttm_dev->pool, tt);
361 static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt)
367 static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
368 struct ttm_resource *mem)
370 struct xe_device *xe = ttm_to_xe_device(bdev);
373 switch (mem->mem_type) {
379 gt = mem_type_to_gt(xe, mem->mem_type);
380 mem->bus.offset = mem->start << PAGE_SHIFT;
382 if (gt->mem.vram.mapping &&
383 mem->placement & TTM_PL_FLAG_CONTIGUOUS)
384 mem->bus.addr = (u8 *)gt->mem.vram.mapping +
387 mem->bus.offset += gt->mem.vram.io_start;
388 mem->bus.is_iomem = true;
390 #if !defined(CONFIG_X86)
391 mem->bus.caching = ttm_write_combined;
395 return xe_ttm_stolen_io_mem_reserve(xe, mem);
401 static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo,
402 const struct ttm_operation_ctx *ctx)
404 struct dma_resv_iter cursor;
405 struct dma_fence *fence;
409 dma_resv_assert_held(bo->ttm.base.resv);
411 if (!xe_device_in_fault_mode(xe) && !list_empty(&bo->vmas)) {
412 dma_resv_iter_begin(&cursor, bo->ttm.base.resv,
413 DMA_RESV_USAGE_BOOKKEEP);
414 dma_resv_for_each_fence_unlocked(&cursor, fence)
415 dma_fence_enable_sw_signaling(fence);
416 dma_resv_iter_end(&cursor);
419 list_for_each_entry(vma, &bo->vmas, bo_link) {
420 struct xe_vm *vm = vma->vm;
422 trace_xe_vma_evict(vma);
424 if (xe_vm_in_fault_mode(vm)) {
425 /* Wait for pending binds / unbinds. */
428 if (ctx->no_wait_gpu &&
429 !dma_resv_test_signaled(bo->ttm.base.resv,
430 DMA_RESV_USAGE_BOOKKEEP))
433 timeout = dma_resv_wait_timeout(bo->ttm.base.resv,
434 DMA_RESV_USAGE_BOOKKEEP,
436 MAX_SCHEDULE_TIMEOUT);
438 ret = xe_vm_invalidate_vma(vma);
440 } else if (!timeout) {
447 bool vm_resv_locked = false;
448 struct xe_vm *vm = vma->vm;
451 * We need to put the vma on the vm's rebind_list,
452 * but need the vm resv to do so. If we can't verify
453 * that we indeed have it locked, put the vma an the
454 * vm's notifier.rebind_list instead and scoop later.
456 if (dma_resv_trylock(&vm->resv))
457 vm_resv_locked = true;
458 else if (ctx->resv != &vm->resv) {
459 spin_lock(&vm->notifier.list_lock);
460 list_move_tail(&vma->notifier.rebind_link,
461 &vm->notifier.rebind_list);
462 spin_unlock(&vm->notifier.list_lock);
466 xe_vm_assert_held(vm);
467 if (list_empty(&vma->rebind_link) && vma->gt_present)
468 list_add_tail(&vma->rebind_link, &vm->rebind_list);
471 dma_resv_unlock(&vm->resv);
479 * The dma-buf map_attachment() / unmap_attachment() is hooked up here.
480 * Note that unmapping the attachment is deferred to the next
481 * map_attachment time, or to bo destroy (after idling) whichever comes first.
482 * This is to avoid syncing before unmap_attachment(), assuming that the
483 * caller relies on idling the reservation object before moving the
484 * backing store out. Should that assumption not hold, then we will be able
485 * to unconditionally call unmap_attachment() when moving out to system.
487 static int xe_bo_move_dmabuf(struct ttm_buffer_object *ttm_bo,
488 struct ttm_resource *old_res,
489 struct ttm_resource *new_res)
491 struct dma_buf_attachment *attach = ttm_bo->base.import_attach;
492 struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, struct xe_ttm_tt,
497 XE_BUG_ON(!ttm_bo->ttm);
499 if (new_res->mem_type == XE_PL_SYSTEM)
503 dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL);
507 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
515 ttm_bo_move_null(ttm_bo, new_res);
521 * xe_bo_move_notify - Notify subsystems of a pending move
522 * @bo: The buffer object
523 * @ctx: The struct ttm_operation_ctx controlling locking and waits.
525 * This function notifies subsystems of an upcoming buffer move.
526 * Upon receiving such a notification, subsystems should schedule
527 * halting access to the underlying pages and optionally add a fence
528 * to the buffer object's dma_resv object, that signals when access is
529 * stopped. The caller will wait on all dma_resv fences before
532 * A subsystem may commence access to the object after obtaining
533 * bindings to the new backing memory under the object lock.
535 * Return: 0 on success, -EINTR or -ERESTARTSYS if interrupted in fault mode,
536 * negative error code on error.
538 static int xe_bo_move_notify(struct xe_bo *bo,
539 const struct ttm_operation_ctx *ctx)
541 struct ttm_buffer_object *ttm_bo = &bo->ttm;
542 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
546 * If this starts to call into many components, consider
547 * using a notification chain here.
550 if (xe_bo_is_pinned(bo))
554 ret = xe_bo_trigger_rebind(xe, bo, ctx);
558 /* Don't call move_notify() for imported dma-bufs. */
559 if (ttm_bo->base.dma_buf && !ttm_bo->base.import_attach)
560 dma_buf_move_notify(ttm_bo->base.dma_buf);
565 static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
566 struct ttm_operation_ctx *ctx,
567 struct ttm_resource *new_mem,
568 struct ttm_place *hop)
570 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
571 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
572 struct ttm_resource *old_mem = ttm_bo->resource;
573 struct ttm_tt *ttm = ttm_bo->ttm;
574 struct xe_gt *gt = NULL;
575 struct dma_fence *fence;
576 bool move_lacks_source;
581 if (new_mem->mem_type != TTM_PL_SYSTEM) {
582 hop->mem_type = TTM_PL_SYSTEM;
583 hop->flags = TTM_PL_FLAG_TEMPORARY;
588 ttm_bo_move_null(ttm_bo, new_mem);
592 if (ttm_bo->type == ttm_bo_type_sg) {
593 ret = xe_bo_move_notify(bo, ctx);
595 ret = xe_bo_move_dmabuf(ttm_bo, old_mem, new_mem);
599 move_lacks_source = !resource_is_vram(old_mem) &&
600 (!ttm || !ttm_tt_is_populated(ttm));
602 needs_clear = (ttm && ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC) ||
603 (!ttm && ttm_bo->type == ttm_bo_type_device);
605 if ((move_lacks_source && !needs_clear) ||
606 (old_mem->mem_type == XE_PL_SYSTEM &&
607 new_mem->mem_type == XE_PL_TT)) {
608 ttm_bo_move_null(ttm_bo, new_mem);
612 if (!move_lacks_source && !xe_bo_is_pinned(bo)) {
613 ret = xe_bo_move_notify(bo, ctx);
618 if (old_mem->mem_type == XE_PL_TT &&
619 new_mem->mem_type == XE_PL_SYSTEM) {
620 long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
621 DMA_RESV_USAGE_BOOKKEEP,
623 MAX_SCHEDULE_TIMEOUT);
628 ttm_bo_move_null(ttm_bo, new_mem);
632 if (!move_lacks_source &&
633 ((old_mem->mem_type == XE_PL_SYSTEM && resource_is_vram(new_mem)) ||
634 (resource_is_vram(old_mem) &&
635 new_mem->mem_type == XE_PL_SYSTEM))) {
638 hop->mem_type = XE_PL_TT;
639 hop->flags = TTM_PL_FLAG_TEMPORARY;
646 else if (resource_is_vram(new_mem))
647 gt = mem_type_to_gt(xe, new_mem->mem_type);
648 else if (resource_is_vram(old_mem))
649 gt = mem_type_to_gt(xe, old_mem->mem_type);
652 XE_BUG_ON(!gt->migrate);
654 trace_xe_bo_move(bo);
655 xe_device_mem_access_get(xe);
657 if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
659 * Kernel memory that is pinned should only be moved on suspend
660 * / resume, some of the pinned memory is required for the
661 * device to resume / use the GPU to move other evicted memory
662 * (user memory) around. This likely could be optimized a bit
663 * futher where we find the minimum set of pinned memory
664 * required for resume but for simplity doing a memcpy for all
667 ret = xe_bo_vmap(bo);
669 ret = ttm_bo_move_memcpy(ttm_bo, ctx, new_mem);
671 /* Create a new VMAP once kernel BO back in VRAM */
672 if (!ret && resource_is_vram(new_mem)) {
673 void *new_addr = gt->mem.vram.mapping +
674 (new_mem->start << PAGE_SHIFT);
676 XE_BUG_ON(new_mem->start !=
677 bo->placements->fpfn);
679 iosys_map_set_vaddr_iomem(&bo->vmap, new_addr);
683 if (move_lacks_source)
684 fence = xe_migrate_clear(gt->migrate, bo, new_mem, 0);
686 fence = xe_migrate_copy(gt->migrate, bo, old_mem, new_mem);
688 ret = PTR_ERR(fence);
689 xe_device_mem_access_put(xe);
692 ret = ttm_bo_move_accel_cleanup(ttm_bo, fence, evict, true,
694 dma_fence_put(fence);
697 xe_device_mem_access_put(xe);
698 trace_printk("new_mem->mem_type=%d\n", new_mem->mem_type);
705 static unsigned long xe_ttm_io_mem_pfn(struct ttm_buffer_object *ttm_bo,
706 unsigned long page_offset)
708 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
709 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
710 struct xe_gt *gt = mem_type_to_gt(xe, ttm_bo->resource->mem_type);
711 struct xe_res_cursor cursor;
713 if (ttm_bo->resource->mem_type == XE_PL_STOLEN)
714 return xe_ttm_stolen_io_offset(bo, page_offset << PAGE_SHIFT) >> PAGE_SHIFT;
716 xe_res_first(ttm_bo->resource, (u64)page_offset << PAGE_SHIFT, 0, &cursor);
717 return (gt->mem.vram.io_start + cursor.start) >> PAGE_SHIFT;
720 static void __xe_bo_vunmap(struct xe_bo *bo);
723 * TODO: Move this function to TTM so we don't rely on how TTM does its
724 * locking, thereby abusing TTM internals.
726 static bool xe_ttm_bo_lock_in_destructor(struct ttm_buffer_object *ttm_bo)
730 XE_WARN_ON(kref_read(&ttm_bo->kref));
733 * We can typically only race with TTM trylocking under the
734 * lru_lock, which will immediately be unlocked again since
735 * the ttm_bo refcount is zero at this point. So trylocking *should*
736 * always succeed here, as long as we hold the lru lock.
738 spin_lock(&ttm_bo->bdev->lru_lock);
739 locked = dma_resv_trylock(ttm_bo->base.resv);
740 spin_unlock(&ttm_bo->bdev->lru_lock);
746 static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
748 struct dma_resv_iter cursor;
749 struct dma_fence *fence;
750 struct dma_fence *replacement = NULL;
753 if (!xe_bo_is_xe_bo(ttm_bo))
756 bo = ttm_to_xe_bo(ttm_bo);
757 XE_WARN_ON(bo->created && kref_read(&ttm_bo->base.refcount));
760 * Corner case where TTM fails to allocate memory and this BOs resv
761 * still points the VMs resv
763 if (ttm_bo->base.resv != &ttm_bo->base._resv)
766 if (!xe_ttm_bo_lock_in_destructor(ttm_bo))
770 * Scrub the preempt fences if any. The unbind fence is already
771 * attached to the resv.
772 * TODO: Don't do this for external bos once we scrub them after
775 dma_resv_for_each_fence(&cursor, ttm_bo->base.resv,
776 DMA_RESV_USAGE_BOOKKEEP, fence) {
777 if (xe_fence_is_xe_preempt(fence) &&
778 !dma_fence_is_signaled(fence)) {
780 replacement = dma_fence_get_stub();
782 dma_resv_replace_fences(ttm_bo->base.resv,
785 DMA_RESV_USAGE_BOOKKEEP);
788 dma_fence_put(replacement);
790 dma_resv_unlock(ttm_bo->base.resv);
793 static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo)
795 if (!xe_bo_is_xe_bo(ttm_bo))
799 * Object is idle and about to be destroyed. Release the
800 * dma-buf attachment.
802 if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) {
803 struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm,
804 struct xe_ttm_tt, ttm);
806 dma_buf_unmap_attachment(ttm_bo->base.import_attach, ttm_bo->sg,
813 struct ttm_device_funcs xe_ttm_funcs = {
814 .ttm_tt_create = xe_ttm_tt_create,
815 .ttm_tt_populate = xe_ttm_tt_populate,
816 .ttm_tt_unpopulate = xe_ttm_tt_unpopulate,
817 .ttm_tt_destroy = xe_ttm_tt_destroy,
818 .evict_flags = xe_evict_flags,
820 .io_mem_reserve = xe_ttm_io_mem_reserve,
821 .io_mem_pfn = xe_ttm_io_mem_pfn,
822 .release_notify = xe_ttm_bo_release_notify,
823 .eviction_valuable = ttm_bo_eviction_valuable,
824 .delete_mem_notify = xe_ttm_bo_delete_mem_notify,
827 static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
829 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
831 if (bo->ttm.base.import_attach)
832 drm_prime_gem_destroy(&bo->ttm.base, NULL);
833 drm_gem_object_release(&bo->ttm.base);
835 WARN_ON(!list_empty(&bo->vmas));
837 if (bo->ggtt_node.size)
838 xe_ggtt_remove_bo(bo->gt->mem.ggtt, bo);
840 if (bo->vm && xe_bo_is_user(bo))
846 static void xe_gem_object_free(struct drm_gem_object *obj)
848 /* Our BO reference counting scheme works as follows:
850 * The gem object kref is typically used throughout the driver,
851 * and the gem object holds a ttm_buffer_object refcount, so
852 * that when the last gem object reference is put, which is when
853 * we end up in this function, we put also that ttm_buffer_object
854 * refcount. Anything using gem interfaces is then no longer
855 * allowed to access the object in a way that requires a gem
856 * refcount, including locking the object.
858 * driver ttm callbacks is allowed to use the ttm_buffer_object
859 * refcount directly if needed.
861 __xe_bo_vunmap(gem_to_xe_bo(obj));
862 ttm_bo_put(container_of(obj, struct ttm_buffer_object, base));
865 static bool should_migrate_to_system(struct xe_bo *bo)
867 struct xe_device *xe = xe_bo_device(bo);
869 return xe_device_in_fault_mode(xe) && bo->props.cpu_atomic;
872 static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
874 struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
875 struct drm_device *ddev = tbo->base.dev;
879 ret = ttm_bo_vm_reserve(tbo, vmf);
883 if (drm_dev_enter(ddev, &idx)) {
884 struct xe_bo *bo = ttm_to_xe_bo(tbo);
886 trace_xe_bo_cpu_fault(bo);
888 if (should_migrate_to_system(bo)) {
889 r = xe_bo_migrate(bo, XE_PL_TT);
890 if (r == -EBUSY || r == -ERESTARTSYS || r == -EINTR)
891 ret = VM_FAULT_NOPAGE;
893 ret = VM_FAULT_SIGBUS;
896 ret = ttm_bo_vm_fault_reserved(vmf,
897 vmf->vma->vm_page_prot,
898 TTM_BO_VM_NUM_PREFAULT);
902 ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
904 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
907 dma_resv_unlock(tbo->base.resv);
911 static const struct vm_operations_struct xe_gem_vm_ops = {
912 .fault = xe_gem_fault,
913 .open = ttm_bo_vm_open,
914 .close = ttm_bo_vm_close,
915 .access = ttm_bo_vm_access
918 static const struct drm_gem_object_funcs xe_gem_object_funcs = {
919 .free = xe_gem_object_free,
920 .mmap = drm_gem_ttm_mmap,
921 .export = xe_gem_prime_export,
922 .vm_ops = &xe_gem_vm_ops,
926 * xe_bo_alloc - Allocate storage for a struct xe_bo
928 * This funcition is intended to allocate storage to be used for input
929 * to __xe_bo_create_locked(), in the case a pointer to the bo to be
930 * created is needed before the call to __xe_bo_create_locked().
931 * If __xe_bo_create_locked ends up never to be called, then the
932 * storage allocated with this function needs to be freed using
935 * Return: A pointer to an uninitialized struct xe_bo on success,
936 * ERR_PTR(-ENOMEM) on error.
938 struct xe_bo *xe_bo_alloc(void)
940 struct xe_bo *bo = kzalloc(sizeof(*bo), GFP_KERNEL);
943 return ERR_PTR(-ENOMEM);
949 * xe_bo_free - Free storage allocated using xe_bo_alloc()
950 * @bo: The buffer object storage.
952 * Refer to xe_bo_alloc() documentation for valid use-cases.
954 void xe_bo_free(struct xe_bo *bo)
959 struct xe_bo *__xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
960 struct xe_gt *gt, struct dma_resv *resv,
961 size_t size, enum ttm_bo_type type,
964 struct ttm_operation_ctx ctx = {
965 .interruptible = true,
966 .no_wait_gpu = false,
968 struct ttm_placement *placement;
972 /* Only kernel objects should set GT */
973 XE_BUG_ON(gt && type != ttm_bo_type_kernel);
981 if (flags & (XE_BO_CREATE_VRAM0_BIT | XE_BO_CREATE_VRAM1_BIT |
982 XE_BO_CREATE_STOLEN_BIT) &&
983 !(flags & XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT) &&
984 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) {
985 size = ALIGN(size, SZ_64K);
986 flags |= XE_BO_INTERNAL_64K;
987 alignment = SZ_64K >> PAGE_SHIFT;
989 alignment = SZ_4K >> PAGE_SHIFT;
995 bo->ttm.base.funcs = &xe_gem_object_funcs;
996 bo->props.preferred_mem_class = XE_BO_PROPS_INVALID;
997 bo->props.preferred_gt = XE_BO_PROPS_INVALID;
998 bo->props.preferred_mem_type = XE_BO_PROPS_INVALID;
999 bo->ttm.priority = DRM_XE_VMA_PRIORITY_NORMAL;
1000 INIT_LIST_HEAD(&bo->vmas);
1001 INIT_LIST_HEAD(&bo->pinned_link);
1003 drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size);
1006 ctx.allow_res_evict = true;
1010 if (!(flags & XE_BO_FIXED_PLACEMENT_BIT)) {
1011 err = __xe_bo_placement_for_flags(xe, bo, bo->flags);
1013 return ERR_PTR(err);
1016 /* Defer populating type_sg bos */
1017 placement = (type == ttm_bo_type_sg ||
1018 bo->flags & XE_BO_DEFER_BACKING) ? &sys_placement :
1020 err = ttm_bo_init_reserved(&xe->ttm, &bo->ttm, type,
1021 placement, alignment,
1022 &ctx, NULL, resv, xe_ttm_bo_destroy);
1024 return ERR_PTR(err);
1027 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
1032 static int __xe_bo_fixed_placement(struct xe_device *xe,
1035 u64 start, u64 end, u64 size)
1037 struct ttm_place *place = bo->placements;
1039 if (flags & (XE_BO_CREATE_USER_BIT|XE_BO_CREATE_SYSTEM_BIT))
1042 place->flags = TTM_PL_FLAG_CONTIGUOUS;
1043 place->fpfn = start >> PAGE_SHIFT;
1044 place->lpfn = end >> PAGE_SHIFT;
1046 switch (flags & (XE_BO_CREATE_STOLEN_BIT |
1047 XE_BO_CREATE_VRAM0_BIT |XE_BO_CREATE_VRAM1_BIT)) {
1048 case XE_BO_CREATE_VRAM0_BIT:
1049 place->mem_type = XE_PL_VRAM0;
1051 case XE_BO_CREATE_VRAM1_BIT:
1052 place->mem_type = XE_PL_VRAM1;
1054 case XE_BO_CREATE_STOLEN_BIT:
1055 place->mem_type = XE_PL_STOLEN;
1059 /* 0 or multiple of the above set */
1063 bo->placement = (struct ttm_placement) {
1066 .num_busy_placement = 1,
1067 .busy_placement = place,
1074 xe_bo_create_locked_range(struct xe_device *xe,
1075 struct xe_gt *gt, struct xe_vm *vm,
1076 size_t size, u64 start, u64 end,
1077 enum ttm_bo_type type, u32 flags)
1079 struct xe_bo *bo = NULL;
1083 xe_vm_assert_held(vm);
1085 if (start || end != ~0ULL) {
1090 flags |= XE_BO_FIXED_PLACEMENT_BIT;
1091 err = __xe_bo_fixed_placement(xe, bo, flags, start, end, size);
1094 return ERR_PTR(err);
1098 bo = __xe_bo_create_locked(xe, bo, gt, vm ? &vm->resv : NULL, size,
1103 if (vm && xe_bo_is_user(bo))
1107 if (bo->flags & XE_BO_CREATE_GGTT_BIT) {
1108 if (!gt && flags & XE_BO_CREATE_STOLEN_BIT)
1109 gt = xe_device_get_gt(xe, 0);
1113 if (flags & XE_BO_CREATE_STOLEN_BIT &&
1114 flags & XE_BO_FIXED_PLACEMENT_BIT) {
1115 err = xe_ggtt_insert_bo_at(gt->mem.ggtt, bo, start);
1117 err = xe_ggtt_insert_bo(gt->mem.ggtt, bo);
1120 goto err_unlock_put_bo;
1126 xe_bo_unlock_vm_held(bo);
1128 return ERR_PTR(err);
1131 struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_gt *gt,
1132 struct xe_vm *vm, size_t size,
1133 enum ttm_bo_type type, u32 flags)
1135 return xe_bo_create_locked_range(xe, gt, vm, size, 0, ~0ULL, type, flags);
1138 struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_gt *gt,
1139 struct xe_vm *vm, size_t size,
1140 enum ttm_bo_type type, u32 flags)
1142 struct xe_bo *bo = xe_bo_create_locked(xe, gt, vm, size, type, flags);
1145 xe_bo_unlock_vm_held(bo);
1150 struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_gt *gt,
1152 size_t size, u64 offset,
1153 enum ttm_bo_type type, u32 flags)
1157 u64 start = offset == ~0ull ? 0 : offset;
1158 u64 end = offset == ~0ull ? offset : start + size;
1160 if (flags & XE_BO_CREATE_STOLEN_BIT &&
1161 xe_ttm_stolen_inaccessible(xe))
1162 flags |= XE_BO_CREATE_GGTT_BIT;
1164 bo = xe_bo_create_locked_range(xe, gt, vm, size, start, end, type, flags);
1168 err = xe_bo_pin(bo);
1172 err = xe_bo_vmap(bo);
1176 xe_bo_unlock_vm_held(bo);
1183 xe_bo_unlock_vm_held(bo);
1185 return ERR_PTR(err);
1188 struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_gt *gt,
1189 struct xe_vm *vm, size_t size,
1190 enum ttm_bo_type type, u32 flags)
1192 return xe_bo_create_pin_map_at(xe, gt, vm, size, ~0ull, type, flags);
1195 struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_gt *gt,
1196 const void *data, size_t size,
1197 enum ttm_bo_type type, u32 flags)
1199 struct xe_bo *bo = xe_bo_create_pin_map(xe, gt, NULL,
1200 ALIGN(size, PAGE_SIZE),
1205 xe_map_memcpy_to(xe, &bo->vmap, 0, data, size);
1211 * XXX: This is in the VM bind data path, likely should calculate this once and
1212 * store, with a recalculation if the BO is moved.
1214 static uint64_t vram_region_io_offset(struct xe_bo *bo)
1216 struct xe_device *xe = xe_bo_device(bo);
1217 struct xe_gt *gt = mem_type_to_gt(xe, bo->ttm.resource->mem_type);
1219 if (bo->ttm.resource->mem_type == XE_PL_STOLEN)
1220 return xe_ttm_stolen_gpu_offset(xe);
1222 return gt->mem.vram.io_start - xe->mem.vram.io_start;
1226 * xe_bo_pin_external - pin an external BO
1227 * @bo: buffer object to be pinned
1229 * Pin an external (not tied to a VM, can be exported via dma-buf / prime FD)
1230 * BO. Unique call compared to xe_bo_pin as this function has it own set of
1231 * asserts and code to ensure evict / restore on suspend / resume.
1233 * Returns 0 for success, negative error code otherwise.
1235 int xe_bo_pin_external(struct xe_bo *bo)
1237 struct xe_device *xe = xe_bo_device(bo);
1241 XE_BUG_ON(!xe_bo_is_user(bo));
1243 if (!xe_bo_is_pinned(bo)) {
1244 err = xe_bo_validate(bo, NULL, false);
1248 if (xe_bo_is_vram(bo)) {
1249 spin_lock(&xe->pinned.lock);
1250 list_add_tail(&bo->pinned_link,
1251 &xe->pinned.external_vram);
1252 spin_unlock(&xe->pinned.lock);
1256 ttm_bo_pin(&bo->ttm);
1259 * FIXME: If we always use the reserve / unreserve functions for locking
1260 * we do not need this.
1262 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
1267 int xe_bo_pin(struct xe_bo *bo)
1269 struct xe_device *xe = xe_bo_device(bo);
1272 /* We currently don't expect user BO to be pinned */
1273 XE_BUG_ON(xe_bo_is_user(bo));
1275 /* Pinned object must be in GGTT or have pinned flag */
1276 XE_BUG_ON(!(bo->flags & (XE_BO_CREATE_PINNED_BIT |
1277 XE_BO_CREATE_GGTT_BIT)));
1280 * No reason we can't support pinning imported dma-bufs we just don't
1281 * expect to pin an imported dma-buf.
1283 XE_BUG_ON(bo->ttm.base.import_attach);
1285 /* We only expect at most 1 pin */
1286 XE_BUG_ON(xe_bo_is_pinned(bo));
1288 err = xe_bo_validate(bo, NULL, false);
1293 * For pinned objects in on DGFX, which are also in vram, we expect
1294 * these to be in contiguous VRAM memory. Required eviction / restore
1295 * during suspend / resume (force restore to same physical address).
1297 if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
1298 bo->flags & XE_BO_INTERNAL_TEST)) {
1299 struct ttm_place *place = &(bo->placements[0]);
1302 if (mem_type_is_vram(place->mem_type)) {
1303 XE_BUG_ON(!(place->flags & TTM_PL_FLAG_CONTIGUOUS));
1305 place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE, &lmem) -
1306 vram_region_io_offset(bo)) >> PAGE_SHIFT;
1307 place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT);
1309 spin_lock(&xe->pinned.lock);
1310 list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
1311 spin_unlock(&xe->pinned.lock);
1315 ttm_bo_pin(&bo->ttm);
1318 * FIXME: If we always use the reserve / unreserve functions for locking
1319 * we do not need this.
1321 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
1327 * xe_bo_unpin_external - unpin an external BO
1328 * @bo: buffer object to be unpinned
1330 * Unpin an external (not tied to a VM, can be exported via dma-buf / prime FD)
1331 * BO. Unique call compared to xe_bo_unpin as this function has it own set of
1332 * asserts and code to ensure evict / restore on suspend / resume.
1334 * Returns 0 for success, negative error code otherwise.
1336 void xe_bo_unpin_external(struct xe_bo *bo)
1338 struct xe_device *xe = xe_bo_device(bo);
1341 XE_BUG_ON(!xe_bo_is_pinned(bo));
1342 XE_BUG_ON(!xe_bo_is_user(bo));
1344 if (bo->ttm.pin_count == 1 && !list_empty(&bo->pinned_link)) {
1345 spin_lock(&xe->pinned.lock);
1346 list_del_init(&bo->pinned_link);
1347 spin_unlock(&xe->pinned.lock);
1350 ttm_bo_unpin(&bo->ttm);
1353 * FIXME: If we always use the reserve / unreserve functions for locking
1354 * we do not need this.
1356 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
1359 void xe_bo_unpin(struct xe_bo *bo)
1361 struct xe_device *xe = xe_bo_device(bo);
1363 XE_BUG_ON(bo->ttm.base.import_attach);
1364 XE_BUG_ON(!xe_bo_is_pinned(bo));
1366 if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
1367 bo->flags & XE_BO_INTERNAL_TEST)) {
1368 struct ttm_place *place = &(bo->placements[0]);
1370 if (mem_type_is_vram(place->mem_type)) {
1371 XE_BUG_ON(list_empty(&bo->pinned_link));
1373 spin_lock(&xe->pinned.lock);
1374 list_del_init(&bo->pinned_link);
1375 spin_unlock(&xe->pinned.lock);
1379 ttm_bo_unpin(&bo->ttm);
1383 * xe_bo_validate() - Make sure the bo is in an allowed placement
1385 * @vm: Pointer to a the vm the bo shares a locked dma_resv object with, or
1386 * NULL. Used together with @allow_res_evict.
1387 * @allow_res_evict: Whether it's allowed to evict bos sharing @vm's
1388 * reservation object.
1390 * Make sure the bo is in allowed placement, migrating it if necessary. If
1391 * needed, other bos will be evicted. If bos selected for eviction shares
1392 * the @vm's reservation object, they can be evicted iff @allow_res_evict is
1393 * set to true, otherwise they will be bypassed.
1395 * Return: 0 on success, negative error code on failure. May return
1396 * -EINTR or -ERESTARTSYS if internal waits are interrupted by a signal.
1398 int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
1400 struct ttm_operation_ctx ctx = {
1401 .interruptible = true,
1402 .no_wait_gpu = false,
1406 lockdep_assert_held(&vm->lock);
1407 xe_vm_assert_held(vm);
1409 ctx.allow_res_evict = allow_res_evict;
1410 ctx.resv = &vm->resv;
1413 return ttm_bo_validate(&bo->ttm, &bo->placement, &ctx);
1416 bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo)
1418 if (bo->destroy == &xe_ttm_bo_destroy)
1424 dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset,
1425 size_t page_size, bool *is_lmem)
1427 struct xe_res_cursor cur;
1430 if (!READ_ONCE(bo->ttm.pin_count))
1431 xe_bo_assert_held(bo);
1433 XE_BUG_ON(page_size > PAGE_SIZE);
1434 page = offset >> PAGE_SHIFT;
1435 offset &= (PAGE_SIZE - 1);
1437 *is_lmem = xe_bo_is_vram(bo);
1439 if (!*is_lmem && !xe_bo_is_stolen(bo)) {
1440 XE_BUG_ON(!bo->ttm.ttm);
1442 xe_res_first_sg(xe_bo_get_sg(bo), page << PAGE_SHIFT,
1444 return xe_res_dma(&cur) + offset;
1446 struct xe_res_cursor cur;
1448 xe_res_first(bo->ttm.resource, page << PAGE_SHIFT,
1450 return cur.start + offset + vram_region_io_offset(bo);
1454 int xe_bo_vmap(struct xe_bo *bo)
1460 xe_bo_assert_held(bo);
1462 if (!iosys_map_is_null(&bo->vmap))
1466 * We use this more or less deprecated interface for now since
1467 * ttm_bo_vmap() doesn't offer the optimization of kmapping
1468 * single page bos, which is done here.
1469 * TODO: Fix up ttm_bo_vmap to do that, or fix up ttm_bo_kmap
1470 * to use struct iosys_map.
1472 ret = ttm_bo_kmap(&bo->ttm, 0, bo->size >> PAGE_SHIFT, &bo->kmap);
1476 virtual = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
1478 iosys_map_set_vaddr_iomem(&bo->vmap, (void __iomem *)virtual);
1480 iosys_map_set_vaddr(&bo->vmap, virtual);
1485 static void __xe_bo_vunmap(struct xe_bo *bo)
1487 if (!iosys_map_is_null(&bo->vmap)) {
1488 iosys_map_clear(&bo->vmap);
1489 ttm_bo_kunmap(&bo->kmap);
1493 void xe_bo_vunmap(struct xe_bo *bo)
1495 xe_bo_assert_held(bo);
1499 int xe_gem_create_ioctl(struct drm_device *dev, void *data,
1500 struct drm_file *file)
1502 struct xe_device *xe = to_xe_device(dev);
1503 struct xe_file *xef = to_xe_file(file);
1504 struct drm_xe_gem_create *args = data;
1505 struct ww_acquire_ctx ww;
1506 struct xe_vm *vm = NULL;
1508 unsigned bo_flags = XE_BO_CREATE_USER_BIT;
1512 if (XE_IOCTL_ERR(xe, args->extensions))
1515 if (XE_IOCTL_ERR(xe, args->flags &
1516 ~(XE_GEM_CREATE_FLAG_DEFER_BACKING |
1517 XE_GEM_CREATE_FLAG_SCANOUT |
1518 xe->info.mem_region_mask)))
1521 /* at least one memory type must be specified */
1522 if (XE_IOCTL_ERR(xe, !(args->flags & xe->info.mem_region_mask)))
1525 if (XE_IOCTL_ERR(xe, args->handle))
1528 if (XE_IOCTL_ERR(xe, args->size > SIZE_MAX))
1531 if (XE_IOCTL_ERR(xe, args->size & ~PAGE_MASK))
1535 vm = xe_vm_lookup(xef, args->vm_id);
1536 if (XE_IOCTL_ERR(xe, !vm))
1538 err = xe_vm_lock(vm, &ww, 0, true);
1545 if (args->flags & XE_GEM_CREATE_FLAG_DEFER_BACKING)
1546 bo_flags |= XE_BO_DEFER_BACKING;
1548 if (args->flags & XE_GEM_CREATE_FLAG_SCANOUT)
1549 bo_flags |= XE_BO_SCANOUT_BIT;
1551 bo_flags |= args->flags << (ffs(XE_BO_CREATE_SYSTEM_BIT) - 1);
1552 bo = xe_bo_create(xe, NULL, vm, args->size, ttm_bo_type_device,
1555 xe_vm_unlock(vm, &ww);
1562 err = drm_gem_handle_create(file, &bo->ttm.base, &handle);
1567 args->handle = handle;
1572 int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
1573 struct drm_file *file)
1575 struct xe_device *xe = to_xe_device(dev);
1576 struct drm_xe_gem_mmap_offset *args = data;
1577 struct drm_gem_object *gem_obj;
1579 if (XE_IOCTL_ERR(xe, args->extensions))
1582 if (XE_IOCTL_ERR(xe, args->flags))
1585 gem_obj = drm_gem_object_lookup(file, args->handle);
1586 if (XE_IOCTL_ERR(xe, !gem_obj))
1589 /* The mmap offset was set up at BO allocation time. */
1590 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
1592 xe_bo_put(gem_to_xe_bo(gem_obj));
1596 int xe_bo_lock(struct xe_bo *bo, struct ww_acquire_ctx *ww,
1597 int num_resv, bool intr)
1599 struct ttm_validate_buffer tv_bo;
1605 tv_bo.num_shared = num_resv;
1606 tv_bo.bo = &bo->ttm;;
1607 list_add_tail(&tv_bo.head, &objs);
1609 return ttm_eu_reserve_buffers(ww, &objs, intr, &dups);
1612 void xe_bo_unlock(struct xe_bo *bo, struct ww_acquire_ctx *ww)
1614 dma_resv_unlock(bo->ttm.base.resv);
1615 ww_acquire_fini(ww);
1619 * xe_bo_can_migrate - Whether a buffer object likely can be migrated
1620 * @bo: The buffer object to migrate
1621 * @mem_type: The TTM memory type intended to migrate to
1623 * Check whether the buffer object supports migration to the
1624 * given memory type. Note that pinning may affect the ability to migrate as
1625 * returned by this function.
1627 * This function is primarily intended as a helper for checking the
1628 * possibility to migrate buffer objects and can be called without
1629 * the object lock held.
1631 * Return: true if migration is possible, false otherwise.
1633 bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type)
1635 unsigned int cur_place;
1637 if (bo->ttm.type == ttm_bo_type_kernel)
1640 if (bo->ttm.type == ttm_bo_type_sg)
1643 for (cur_place = 0; cur_place < bo->placement.num_placement;
1645 if (bo->placements[cur_place].mem_type == mem_type)
1652 static void xe_place_from_ttm_type(u32 mem_type, struct ttm_place *place)
1654 memset(place, 0, sizeof(*place));
1655 place->mem_type = mem_type;
1659 * xe_bo_migrate - Migrate an object to the desired region id
1660 * @bo: The buffer object to migrate.
1661 * @mem_type: The TTM region type to migrate to.
1663 * Attempt to migrate the buffer object to the desired memory region. The
1664 * buffer object may not be pinned, and must be locked.
1665 * On successful completion, the object memory type will be updated,
1666 * but an async migration task may not have completed yet, and to
1667 * accomplish that, the object's kernel fences must be signaled with
1668 * the object lock held.
1670 * Return: 0 on success. Negative error code on failure. In particular may
1671 * return -EINTR or -ERESTARTSYS if signal pending.
1673 int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
1675 struct ttm_operation_ctx ctx = {
1676 .interruptible = true,
1677 .no_wait_gpu = false,
1679 struct ttm_placement placement;
1680 struct ttm_place requested;
1682 xe_bo_assert_held(bo);
1684 if (bo->ttm.resource->mem_type == mem_type)
1687 if (xe_bo_is_pinned(bo))
1690 if (!xe_bo_can_migrate(bo, mem_type))
1693 xe_place_from_ttm_type(mem_type, &requested);
1694 placement.num_placement = 1;
1695 placement.num_busy_placement = 1;
1696 placement.placement = &requested;
1697 placement.busy_placement = &requested;
1699 return ttm_bo_validate(&bo->ttm, &placement, &ctx);
1703 * xe_bo_evict - Evict an object to evict placement
1704 * @bo: The buffer object to migrate.
1705 * @force_alloc: Set force_alloc in ttm_operation_ctx
1707 * On successful completion, the object memory will be moved to evict
1708 * placement. Ths function blocks until the object has been fully moved.
1710 * Return: 0 on success. Negative error code on failure.
1712 int xe_bo_evict(struct xe_bo *bo, bool force_alloc)
1714 struct ttm_operation_ctx ctx = {
1715 .interruptible = false,
1716 .no_wait_gpu = false,
1717 .force_alloc = force_alloc,
1719 struct ttm_placement placement;
1722 xe_evict_flags(&bo->ttm, &placement);
1723 ret = ttm_bo_validate(&bo->ttm, &placement, &ctx);
1727 dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
1728 false, MAX_SCHEDULE_TIMEOUT);
1734 * xe_bo_needs_ccs_pages - Whether a bo needs to back up CCS pages when
1735 * placed in system memory.
1738 * If a bo has an allowable placement in XE_PL_TT memory, it can't use
1739 * flat CCS compression, because the GPU then has no way to access the
1740 * CCS metadata using relevant commands. For the opposite case, we need to
1741 * allocate storage for the CCS metadata when the BO is not resident in
1744 * Return: true if extra pages need to be allocated, false otherwise.
1746 bool xe_bo_needs_ccs_pages(struct xe_bo *bo)
1748 return bo->ttm.type == ttm_bo_type_device &&
1749 !(bo->flags & XE_BO_CREATE_SYSTEM_BIT) &&
1750 (bo->flags & (XE_BO_CREATE_VRAM0_BIT | XE_BO_CREATE_VRAM1_BIT));
1754 * __xe_bo_release_dummy() - Dummy kref release function
1755 * @kref: The embedded struct kref.
1757 * Dummy release function for xe_bo_put_deferred(). Keep off.
1759 void __xe_bo_release_dummy(struct kref *kref)
1764 * xe_bo_put_commit() - Put bos whose put was deferred by xe_bo_put_deferred().
1765 * @deferred: The lockless list used for the call to xe_bo_put_deferred().
1767 * Puts all bos whose put was deferred by xe_bo_put_deferred().
1768 * The @deferred list can be either an onstack local list or a global
1769 * shared list used by a workqueue.
1771 void xe_bo_put_commit(struct llist_head *deferred)
1773 struct llist_node *freed;
1774 struct xe_bo *bo, *next;
1779 freed = llist_del_all(deferred);
1783 llist_for_each_entry_safe(bo, next, freed, freed)
1784 drm_gem_object_free(&bo->ttm.base.refcount);
1788 * xe_bo_dumb_create - Create a dumb bo as backing for a fb
1793 * See dumb_create() hook in include/drm/drm_drv.h
1797 int xe_bo_dumb_create(struct drm_file *file_priv,
1798 struct drm_device *dev,
1799 struct drm_mode_create_dumb *args)
1801 struct xe_device *xe = to_xe_device(dev);
1804 int cpp = DIV_ROUND_UP(args->bpp, 8);
1806 u32 page_size = max_t(u32, PAGE_SIZE,
1807 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K);
1809 args->pitch = ALIGN(args->width * cpp, 64);
1810 args->size = ALIGN(mul_u32_u32(args->pitch, args->height),
1813 bo = xe_bo_create(xe, NULL, NULL, args->size, ttm_bo_type_device,
1814 XE_BO_CREATE_VRAM_IF_DGFX(to_gt(xe)) |
1815 XE_BO_CREATE_USER_BIT | XE_BO_SCANOUT_BIT);
1819 err = drm_gem_handle_create(file_priv, &bo->ttm.base, &handle);
1820 /* drop reference from allocate - handle holds it now */
1821 drm_gem_object_put(&bo->ttm.base);
1823 args->handle = handle;
1827 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1828 #include "tests/xe_bo.c"