1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021 Intel Corporation
8 #include <linux/dma-buf.h>
10 #include <drm/drm_drv.h>
11 #include <drm/drm_gem_ttm_helper.h>
12 #include <drm/ttm/ttm_device.h>
13 #include <drm/ttm/ttm_placement.h>
14 #include <drm/ttm/ttm_tt.h>
15 #include <drm/xe_drm.h>
17 #include "xe_device.h"
18 #include "xe_dma_buf.h"
19 #include "xe_drm_client.h"
23 #include "xe_migrate.h"
24 #include "xe_preempt_fence.h"
25 #include "xe_res_cursor.h"
27 #include "xe_ttm_stolen_mgr.h"
30 static const struct ttm_place sys_placement_flags = {
33 .mem_type = XE_PL_SYSTEM,
37 static struct ttm_placement sys_placement = {
39 .placement = &sys_placement_flags,
40 .num_busy_placement = 1,
41 .busy_placement = &sys_placement_flags,
44 static const struct ttm_place tt_placement_flags = {
51 static struct ttm_placement tt_placement = {
53 .placement = &tt_placement_flags,
54 .num_busy_placement = 1,
55 .busy_placement = &sys_placement_flags,
58 bool mem_type_is_vram(u32 mem_type)
60 return mem_type >= XE_PL_VRAM0 && mem_type != XE_PL_STOLEN;
63 static bool resource_is_stolen_vram(struct xe_device *xe, struct ttm_resource *res)
65 return res->mem_type == XE_PL_STOLEN && IS_DGFX(xe);
68 static bool resource_is_vram(struct ttm_resource *res)
70 return mem_type_is_vram(res->mem_type);
73 bool xe_bo_is_vram(struct xe_bo *bo)
75 return resource_is_vram(bo->ttm.resource) ||
76 resource_is_stolen_vram(xe_bo_device(bo), bo->ttm.resource);
79 bool xe_bo_is_stolen(struct xe_bo *bo)
81 return bo->ttm.resource->mem_type == XE_PL_STOLEN;
85 * xe_bo_is_stolen_devmem - check if BO is of stolen type accessed via PCI BAR
88 * The stolen memory is accessed through the PCI BAR for both DGFX and some
89 * integrated platforms that have a dedicated bit in the PTE for devmem (DM).
91 * Returns: true if it's stolen memory accessed via PCI BAR, false otherwise.
93 bool xe_bo_is_stolen_devmem(struct xe_bo *bo)
95 return xe_bo_is_stolen(bo) &&
96 GRAPHICS_VERx100(xe_bo_device(bo)) >= 1270;
99 static bool xe_bo_is_user(struct xe_bo *bo)
101 return bo->flags & XE_BO_CREATE_USER_BIT;
104 static struct xe_tile *
105 mem_type_to_tile(struct xe_device *xe, u32 mem_type)
107 xe_assert(xe, mem_type == XE_PL_STOLEN || mem_type_is_vram(mem_type));
109 return &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)];
113 * xe_bo_to_tile() - Get a tile from a BO's memory location
114 * @bo: The buffer object
116 * Get a tile from a BO's memory location, should be called on BOs in VRAM only.
118 * Return: xe_tile object which is closest to the BO
120 struct xe_tile *xe_bo_to_tile(struct xe_bo *bo)
122 return mem_type_to_tile(xe_bo_device(bo), bo->ttm.resource->mem_type);
125 static void try_add_system(struct xe_bo *bo, struct ttm_place *places,
126 u32 bo_flags, u32 *c)
128 if (bo_flags & XE_BO_CREATE_SYSTEM_BIT) {
129 places[*c] = (struct ttm_place) {
130 .mem_type = XE_PL_TT,
134 if (bo->props.preferred_mem_type == XE_BO_PROPS_INVALID)
135 bo->props.preferred_mem_type = XE_PL_TT;
139 static void add_vram(struct xe_device *xe, struct xe_bo *bo,
140 struct ttm_place *places, u32 bo_flags, u32 mem_type, u32 *c)
142 struct xe_tile *tile = mem_type_to_tile(xe, mem_type);
143 struct ttm_place place = { .mem_type = mem_type };
144 u64 io_size = tile->mem.vram.io_size;
146 xe_assert(xe, tile->mem.vram.usable_size);
149 * For eviction / restore on suspend / resume objects
150 * pinned in VRAM must be contiguous
152 if (bo_flags & (XE_BO_CREATE_PINNED_BIT |
153 XE_BO_CREATE_GGTT_BIT))
154 place.flags |= TTM_PL_FLAG_CONTIGUOUS;
156 if (io_size < tile->mem.vram.usable_size) {
157 if (bo_flags & XE_BO_NEEDS_CPU_ACCESS) {
159 place.lpfn = io_size >> PAGE_SHIFT;
161 place.flags |= TTM_PL_FLAG_TOPDOWN;
167 if (bo->props.preferred_mem_type == XE_BO_PROPS_INVALID)
168 bo->props.preferred_mem_type = mem_type;
171 static void try_add_vram(struct xe_device *xe, struct xe_bo *bo,
172 struct ttm_place *places, u32 bo_flags, u32 *c)
174 if (bo->props.preferred_gt == XE_GT1) {
175 if (bo_flags & XE_BO_CREATE_VRAM1_BIT)
176 add_vram(xe, bo, places, bo_flags, XE_PL_VRAM1, c);
177 if (bo_flags & XE_BO_CREATE_VRAM0_BIT)
178 add_vram(xe, bo, places, bo_flags, XE_PL_VRAM0, c);
180 if (bo_flags & XE_BO_CREATE_VRAM0_BIT)
181 add_vram(xe, bo, places, bo_flags, XE_PL_VRAM0, c);
182 if (bo_flags & XE_BO_CREATE_VRAM1_BIT)
183 add_vram(xe, bo, places, bo_flags, XE_PL_VRAM1, c);
187 static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
188 struct ttm_place *places, u32 bo_flags, u32 *c)
190 if (bo_flags & XE_BO_CREATE_STOLEN_BIT) {
191 places[*c] = (struct ttm_place) {
192 .mem_type = XE_PL_STOLEN,
193 .flags = bo_flags & (XE_BO_CREATE_PINNED_BIT |
194 XE_BO_CREATE_GGTT_BIT) ?
195 TTM_PL_FLAG_CONTIGUOUS : 0,
201 static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
204 struct ttm_place *places = bo->placements;
207 bo->props.preferred_mem_type = XE_BO_PROPS_INVALID;
209 /* The order of placements should indicate preferred location */
211 if (bo->props.preferred_mem_class == XE_MEM_REGION_CLASS_SYSMEM) {
212 try_add_system(bo, places, bo_flags, &c);
213 try_add_vram(xe, bo, places, bo_flags, &c);
215 try_add_vram(xe, bo, places, bo_flags, &c);
216 try_add_system(bo, places, bo_flags, &c);
218 try_add_stolen(xe, bo, places, bo_flags, &c);
223 bo->placement = (struct ttm_placement) {
226 .num_busy_placement = c,
227 .busy_placement = places,
233 int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
236 xe_bo_assert_held(bo);
237 return __xe_bo_placement_for_flags(xe, bo, bo_flags);
240 static void xe_evict_flags(struct ttm_buffer_object *tbo,
241 struct ttm_placement *placement)
245 if (!xe_bo_is_xe_bo(tbo)) {
246 /* Don't handle scatter gather BOs */
247 if (tbo->type == ttm_bo_type_sg) {
248 placement->num_placement = 0;
249 placement->num_busy_placement = 0;
253 *placement = sys_placement;
258 * For xe, sg bos that are evicted to system just triggers a
259 * rebind of the sg list upon subsequent validation to XE_PL_TT.
262 bo = ttm_to_xe_bo(tbo);
263 switch (tbo->resource->mem_type) {
267 *placement = tt_placement;
271 *placement = sys_placement;
283 static int xe_tt_map_sg(struct ttm_tt *tt)
285 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
286 unsigned long num_pages = tt->num_pages;
289 XE_WARN_ON(tt->page_flags & TTM_TT_FLAG_EXTERNAL);
294 ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages,
296 (u64)num_pages << PAGE_SHIFT,
297 xe_sg_segment_size(xe_tt->dev),
302 xe_tt->sg = &xe_tt->sgt;
303 ret = dma_map_sgtable(xe_tt->dev, xe_tt->sg, DMA_BIDIRECTIONAL,
304 DMA_ATTR_SKIP_CPU_SYNC);
306 sg_free_table(xe_tt->sg);
314 struct sg_table *xe_bo_get_sg(struct xe_bo *bo)
316 struct ttm_tt *tt = bo->ttm.ttm;
317 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
322 static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
325 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
326 struct xe_device *xe = xe_bo_device(bo);
327 struct xe_ttm_tt *tt;
328 unsigned long extra_pages;
329 enum ttm_caching caching = ttm_cached;
332 tt = kzalloc(sizeof(*tt), GFP_KERNEL);
336 tt->dev = xe->drm.dev;
339 if (xe_bo_needs_ccs_pages(bo))
340 extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, bo->size),
344 * Display scanout is always non-coherent with the CPU cache.
346 * For Xe_LPG and beyond, PPGTT PTE lookups are also non-coherent and
347 * require a CPU:WC mapping.
349 if (bo->flags & XE_BO_SCANOUT_BIT ||
350 (xe->info.graphics_verx100 >= 1270 && bo->flags & XE_BO_PAGETABLE))
351 caching = ttm_write_combined;
353 err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages);
362 static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt,
363 struct ttm_operation_ctx *ctx)
368 * dma-bufs are not populated with pages, and the dma-
369 * addresses are set up when moved to XE_PL_TT.
371 if (tt->page_flags & TTM_TT_FLAG_EXTERNAL)
374 err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx);
378 /* A follow up may move this xe_bo_move when BO is moved to XE_PL_TT */
379 err = xe_tt_map_sg(tt);
381 ttm_pool_free(&ttm_dev->pool, tt);
386 static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt)
388 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
390 if (tt->page_flags & TTM_TT_FLAG_EXTERNAL)
394 dma_unmap_sgtable(xe_tt->dev, xe_tt->sg,
395 DMA_BIDIRECTIONAL, 0);
396 sg_free_table(xe_tt->sg);
400 return ttm_pool_free(&ttm_dev->pool, tt);
403 static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt)
409 static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
410 struct ttm_resource *mem)
412 struct xe_device *xe = ttm_to_xe_device(bdev);
414 switch (mem->mem_type) {
420 struct xe_tile *tile = mem_type_to_tile(xe, mem->mem_type);
421 struct xe_ttm_vram_mgr_resource *vres =
422 to_xe_ttm_vram_mgr_resource(mem);
424 if (vres->used_visible_size < mem->size)
427 mem->bus.offset = mem->start << PAGE_SHIFT;
429 if (tile->mem.vram.mapping &&
430 mem->placement & TTM_PL_FLAG_CONTIGUOUS)
431 mem->bus.addr = (u8 *)tile->mem.vram.mapping +
434 mem->bus.offset += tile->mem.vram.io_start;
435 mem->bus.is_iomem = true;
437 #if !defined(CONFIG_X86)
438 mem->bus.caching = ttm_write_combined;
442 return xe_ttm_stolen_io_mem_reserve(xe, mem);
448 static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo,
449 const struct ttm_operation_ctx *ctx)
451 struct dma_resv_iter cursor;
452 struct dma_fence *fence;
453 struct drm_gpuva *gpuva;
454 struct drm_gem_object *obj = &bo->ttm.base;
455 struct drm_gpuvm_bo *vm_bo;
458 dma_resv_assert_held(bo->ttm.base.resv);
460 if (!list_empty(&bo->ttm.base.gpuva.list)) {
461 dma_resv_iter_begin(&cursor, bo->ttm.base.resv,
462 DMA_RESV_USAGE_BOOKKEEP);
463 dma_resv_for_each_fence_unlocked(&cursor, fence)
464 dma_fence_enable_sw_signaling(fence);
465 dma_resv_iter_end(&cursor);
468 drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
469 drm_gpuvm_bo_for_each_va(gpuva, vm_bo) {
470 struct xe_vma *vma = gpuva_to_vma(gpuva);
471 struct xe_vm *vm = xe_vma_vm(vma);
473 trace_xe_vma_evict(vma);
475 if (xe_vm_in_fault_mode(vm)) {
476 /* Wait for pending binds / unbinds. */
479 if (ctx->no_wait_gpu &&
480 !dma_resv_test_signaled(bo->ttm.base.resv,
481 DMA_RESV_USAGE_BOOKKEEP))
484 timeout = dma_resv_wait_timeout(bo->ttm.base.resv,
485 DMA_RESV_USAGE_BOOKKEEP,
487 MAX_SCHEDULE_TIMEOUT);
489 ret = xe_vm_invalidate_vma(vma);
491 } else if (!timeout) {
498 bool vm_resv_locked = false;
501 * We need to put the vma on the vm's rebind_list,
502 * but need the vm resv to do so. If we can't verify
503 * that we indeed have it locked, put the vma an the
504 * vm's notifier.rebind_list instead and scoop later.
506 if (dma_resv_trylock(xe_vm_resv(vm)))
507 vm_resv_locked = true;
508 else if (ctx->resv != xe_vm_resv(vm)) {
509 spin_lock(&vm->notifier.list_lock);
510 if (!(vma->gpuva.flags & XE_VMA_DESTROYED))
511 list_move_tail(&vma->notifier.rebind_link,
512 &vm->notifier.rebind_list);
513 spin_unlock(&vm->notifier.list_lock);
517 xe_vm_assert_held(vm);
518 if (vma->tile_present &&
519 !(vma->gpuva.flags & XE_VMA_DESTROYED) &&
520 list_empty(&vma->combined_links.rebind))
521 list_add_tail(&vma->combined_links.rebind,
525 dma_resv_unlock(xe_vm_resv(vm));
534 * The dma-buf map_attachment() / unmap_attachment() is hooked up here.
535 * Note that unmapping the attachment is deferred to the next
536 * map_attachment time, or to bo destroy (after idling) whichever comes first.
537 * This is to avoid syncing before unmap_attachment(), assuming that the
538 * caller relies on idling the reservation object before moving the
539 * backing store out. Should that assumption not hold, then we will be able
540 * to unconditionally call unmap_attachment() when moving out to system.
542 static int xe_bo_move_dmabuf(struct ttm_buffer_object *ttm_bo,
543 struct ttm_resource *new_res)
545 struct dma_buf_attachment *attach = ttm_bo->base.import_attach;
546 struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, struct xe_ttm_tt,
548 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
551 xe_assert(xe, attach);
552 xe_assert(xe, ttm_bo->ttm);
554 if (new_res->mem_type == XE_PL_SYSTEM)
558 dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL);
562 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
570 ttm_bo_move_null(ttm_bo, new_res);
576 * xe_bo_move_notify - Notify subsystems of a pending move
577 * @bo: The buffer object
578 * @ctx: The struct ttm_operation_ctx controlling locking and waits.
580 * This function notifies subsystems of an upcoming buffer move.
581 * Upon receiving such a notification, subsystems should schedule
582 * halting access to the underlying pages and optionally add a fence
583 * to the buffer object's dma_resv object, that signals when access is
584 * stopped. The caller will wait on all dma_resv fences before
587 * A subsystem may commence access to the object after obtaining
588 * bindings to the new backing memory under the object lock.
590 * Return: 0 on success, -EINTR or -ERESTARTSYS if interrupted in fault mode,
591 * negative error code on error.
593 static int xe_bo_move_notify(struct xe_bo *bo,
594 const struct ttm_operation_ctx *ctx)
596 struct ttm_buffer_object *ttm_bo = &bo->ttm;
597 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
601 * If this starts to call into many components, consider
602 * using a notification chain here.
605 if (xe_bo_is_pinned(bo))
609 ret = xe_bo_trigger_rebind(xe, bo, ctx);
613 /* Don't call move_notify() for imported dma-bufs. */
614 if (ttm_bo->base.dma_buf && !ttm_bo->base.import_attach)
615 dma_buf_move_notify(ttm_bo->base.dma_buf);
620 static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
621 struct ttm_operation_ctx *ctx,
622 struct ttm_resource *new_mem,
623 struct ttm_place *hop)
625 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
626 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
627 struct ttm_resource *old_mem = ttm_bo->resource;
628 u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM;
629 struct ttm_tt *ttm = ttm_bo->ttm;
630 struct xe_tile *tile = NULL;
631 struct dma_fence *fence;
632 bool move_lacks_source;
637 /* Bo creation path, moving to system or TT. No clearing required. */
638 if (!old_mem && ttm) {
639 ttm_bo_move_null(ttm_bo, new_mem);
643 if (ttm_bo->type == ttm_bo_type_sg) {
644 ret = xe_bo_move_notify(bo, ctx);
646 ret = xe_bo_move_dmabuf(ttm_bo, new_mem);
650 tt_has_data = ttm && (ttm_tt_is_populated(ttm) ||
651 (ttm->page_flags & TTM_TT_FLAG_SWAPPED));
653 move_lacks_source = !mem_type_is_vram(old_mem_type) && !tt_has_data;
655 needs_clear = (ttm && ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC) ||
656 (!ttm && ttm_bo->type == ttm_bo_type_device);
658 if ((move_lacks_source && !needs_clear) ||
659 (old_mem_type == XE_PL_SYSTEM &&
660 new_mem->mem_type == XE_PL_TT)) {
661 ttm_bo_move_null(ttm_bo, new_mem);
666 * Failed multi-hop where the old_mem is still marked as
667 * TTM_PL_FLAG_TEMPORARY, should just be a dummy move.
669 if (old_mem_type == XE_PL_TT &&
670 new_mem->mem_type == XE_PL_TT) {
671 ttm_bo_move_null(ttm_bo, new_mem);
675 if (!move_lacks_source && !xe_bo_is_pinned(bo)) {
676 ret = xe_bo_move_notify(bo, ctx);
681 if (old_mem_type == XE_PL_TT &&
682 new_mem->mem_type == XE_PL_SYSTEM) {
683 long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
684 DMA_RESV_USAGE_BOOKKEEP,
686 MAX_SCHEDULE_TIMEOUT);
691 ttm_bo_move_null(ttm_bo, new_mem);
695 if (!move_lacks_source &&
696 ((old_mem_type == XE_PL_SYSTEM && resource_is_vram(new_mem)) ||
697 (mem_type_is_vram(old_mem_type) &&
698 new_mem->mem_type == XE_PL_SYSTEM))) {
701 hop->mem_type = XE_PL_TT;
702 hop->flags = TTM_PL_FLAG_TEMPORARY;
709 else if (resource_is_vram(new_mem))
710 tile = mem_type_to_tile(xe, new_mem->mem_type);
711 else if (mem_type_is_vram(old_mem_type))
712 tile = mem_type_to_tile(xe, old_mem_type);
715 xe_tile_assert(tile, tile->migrate);
717 trace_xe_bo_move(bo);
718 xe_device_mem_access_get(xe);
720 if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
722 * Kernel memory that is pinned should only be moved on suspend
723 * / resume, some of the pinned memory is required for the
724 * device to resume / use the GPU to move other evicted memory
725 * (user memory) around. This likely could be optimized a bit
726 * futher where we find the minimum set of pinned memory
727 * required for resume but for simplity doing a memcpy for all
730 ret = xe_bo_vmap(bo);
732 ret = ttm_bo_move_memcpy(ttm_bo, ctx, new_mem);
734 /* Create a new VMAP once kernel BO back in VRAM */
735 if (!ret && resource_is_vram(new_mem)) {
736 void *new_addr = tile->mem.vram.mapping +
737 (new_mem->start << PAGE_SHIFT);
739 if (XE_WARN_ON(new_mem->start == XE_BO_INVALID_OFFSET)) {
741 xe_device_mem_access_put(xe);
745 xe_assert(xe, new_mem->start ==
746 bo->placements->fpfn);
748 iosys_map_set_vaddr_iomem(&bo->vmap, new_addr);
752 if (move_lacks_source)
753 fence = xe_migrate_clear(tile->migrate, bo, new_mem);
755 fence = xe_migrate_copy(tile->migrate,
756 bo, bo, old_mem, new_mem);
758 ret = PTR_ERR(fence);
759 xe_device_mem_access_put(xe);
762 if (!move_lacks_source) {
763 ret = ttm_bo_move_accel_cleanup(ttm_bo, fence, evict,
766 dma_fence_wait(fence, false);
767 ttm_bo_move_null(ttm_bo, new_mem);
772 * ttm_bo_move_accel_cleanup() may blow up if
773 * bo->resource == NULL, so just attach the
774 * fence and set the new resource.
776 dma_resv_add_fence(ttm_bo->base.resv, fence,
777 DMA_RESV_USAGE_KERNEL);
778 ttm_bo_move_null(ttm_bo, new_mem);
781 dma_fence_put(fence);
784 xe_device_mem_access_put(xe);
785 trace_printk("new_mem->mem_type=%d\n", new_mem->mem_type);
793 * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
794 * @bo: The buffer object to move.
796 * On successful completion, the object memory will be moved to sytem memory.
797 * This function blocks until the object has been fully moved.
799 * This is needed to for special handling of pinned VRAM object during
802 * Return: 0 on success. Negative error code on failure.
804 int xe_bo_evict_pinned(struct xe_bo *bo)
806 struct ttm_place place = {
807 .mem_type = XE_PL_TT,
809 struct ttm_placement placement = {
813 struct ttm_operation_ctx ctx = {
814 .interruptible = false,
816 struct ttm_resource *new_mem;
819 xe_bo_assert_held(bo);
821 if (WARN_ON(!bo->ttm.resource))
824 if (WARN_ON(!xe_bo_is_pinned(bo)))
827 if (WARN_ON(!xe_bo_is_vram(bo)))
830 ret = ttm_bo_mem_space(&bo->ttm, &placement, &new_mem, &ctx);
835 bo->ttm.ttm = xe_ttm_tt_create(&bo->ttm, 0);
842 ret = ttm_tt_populate(bo->ttm.bdev, bo->ttm.ttm, &ctx);
846 ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
850 ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL);
854 dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
855 false, MAX_SCHEDULE_TIMEOUT);
860 ttm_resource_free(&bo->ttm, &new_mem);
865 * xe_bo_restore_pinned() - Restore a pinned VRAM object
866 * @bo: The buffer object to move.
868 * On successful completion, the object memory will be moved back to VRAM.
869 * This function blocks until the object has been fully moved.
871 * This is needed to for special handling of pinned VRAM object during
874 * Return: 0 on success. Negative error code on failure.
876 int xe_bo_restore_pinned(struct xe_bo *bo)
878 struct ttm_operation_ctx ctx = {
879 .interruptible = false,
881 struct ttm_resource *new_mem;
884 xe_bo_assert_held(bo);
886 if (WARN_ON(!bo->ttm.resource))
889 if (WARN_ON(!xe_bo_is_pinned(bo)))
892 if (WARN_ON(xe_bo_is_vram(bo) || !bo->ttm.ttm))
895 ret = ttm_bo_mem_space(&bo->ttm, &bo->placement, &new_mem, &ctx);
899 ret = ttm_tt_populate(bo->ttm.bdev, bo->ttm.ttm, &ctx);
903 ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
907 ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL);
911 dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
912 false, MAX_SCHEDULE_TIMEOUT);
917 ttm_resource_free(&bo->ttm, &new_mem);
921 static unsigned long xe_ttm_io_mem_pfn(struct ttm_buffer_object *ttm_bo,
922 unsigned long page_offset)
924 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
925 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
926 struct xe_tile *tile = mem_type_to_tile(xe, ttm_bo->resource->mem_type);
927 struct xe_res_cursor cursor;
929 if (ttm_bo->resource->mem_type == XE_PL_STOLEN)
930 return xe_ttm_stolen_io_offset(bo, page_offset << PAGE_SHIFT) >> PAGE_SHIFT;
932 xe_res_first(ttm_bo->resource, (u64)page_offset << PAGE_SHIFT, 0, &cursor);
933 return (tile->mem.vram.io_start + cursor.start) >> PAGE_SHIFT;
936 static void __xe_bo_vunmap(struct xe_bo *bo);
939 * TODO: Move this function to TTM so we don't rely on how TTM does its
940 * locking, thereby abusing TTM internals.
942 static bool xe_ttm_bo_lock_in_destructor(struct ttm_buffer_object *ttm_bo)
944 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
947 xe_assert(xe, !kref_read(&ttm_bo->kref));
950 * We can typically only race with TTM trylocking under the
951 * lru_lock, which will immediately be unlocked again since
952 * the ttm_bo refcount is zero at this point. So trylocking *should*
953 * always succeed here, as long as we hold the lru lock.
955 spin_lock(&ttm_bo->bdev->lru_lock);
956 locked = dma_resv_trylock(ttm_bo->base.resv);
957 spin_unlock(&ttm_bo->bdev->lru_lock);
958 xe_assert(xe, locked);
963 static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
965 struct dma_resv_iter cursor;
966 struct dma_fence *fence;
967 struct dma_fence *replacement = NULL;
970 if (!xe_bo_is_xe_bo(ttm_bo))
973 bo = ttm_to_xe_bo(ttm_bo);
974 xe_assert(xe_bo_device(bo), !(bo->created && kref_read(&ttm_bo->base.refcount)));
977 * Corner case where TTM fails to allocate memory and this BOs resv
978 * still points the VMs resv
980 if (ttm_bo->base.resv != &ttm_bo->base._resv)
983 if (!xe_ttm_bo_lock_in_destructor(ttm_bo))
987 * Scrub the preempt fences if any. The unbind fence is already
988 * attached to the resv.
989 * TODO: Don't do this for external bos once we scrub them after
992 dma_resv_for_each_fence(&cursor, ttm_bo->base.resv,
993 DMA_RESV_USAGE_BOOKKEEP, fence) {
994 if (xe_fence_is_xe_preempt(fence) &&
995 !dma_fence_is_signaled(fence)) {
997 replacement = dma_fence_get_stub();
999 dma_resv_replace_fences(ttm_bo->base.resv,
1002 DMA_RESV_USAGE_BOOKKEEP);
1005 dma_fence_put(replacement);
1007 dma_resv_unlock(ttm_bo->base.resv);
1010 static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo)
1012 if (!xe_bo_is_xe_bo(ttm_bo))
1016 * Object is idle and about to be destroyed. Release the
1017 * dma-buf attachment.
1019 if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) {
1020 struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm,
1021 struct xe_ttm_tt, ttm);
1023 dma_buf_unmap_attachment(ttm_bo->base.import_attach, ttm_bo->sg,
1030 struct ttm_device_funcs xe_ttm_funcs = {
1031 .ttm_tt_create = xe_ttm_tt_create,
1032 .ttm_tt_populate = xe_ttm_tt_populate,
1033 .ttm_tt_unpopulate = xe_ttm_tt_unpopulate,
1034 .ttm_tt_destroy = xe_ttm_tt_destroy,
1035 .evict_flags = xe_evict_flags,
1037 .io_mem_reserve = xe_ttm_io_mem_reserve,
1038 .io_mem_pfn = xe_ttm_io_mem_pfn,
1039 .release_notify = xe_ttm_bo_release_notify,
1040 .eviction_valuable = ttm_bo_eviction_valuable,
1041 .delete_mem_notify = xe_ttm_bo_delete_mem_notify,
1044 static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
1046 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
1047 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
1049 if (bo->ttm.base.import_attach)
1050 drm_prime_gem_destroy(&bo->ttm.base, NULL);
1051 drm_gem_object_release(&bo->ttm.base);
1053 xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list));
1055 if (bo->ggtt_node.size)
1056 xe_ggtt_remove_bo(bo->tile->mem.ggtt, bo);
1058 #ifdef CONFIG_PROC_FS
1060 xe_drm_client_remove_bo(bo);
1063 if (bo->vm && xe_bo_is_user(bo))
1069 static void xe_gem_object_free(struct drm_gem_object *obj)
1071 /* Our BO reference counting scheme works as follows:
1073 * The gem object kref is typically used throughout the driver,
1074 * and the gem object holds a ttm_buffer_object refcount, so
1075 * that when the last gem object reference is put, which is when
1076 * we end up in this function, we put also that ttm_buffer_object
1077 * refcount. Anything using gem interfaces is then no longer
1078 * allowed to access the object in a way that requires a gem
1079 * refcount, including locking the object.
1081 * driver ttm callbacks is allowed to use the ttm_buffer_object
1082 * refcount directly if needed.
1084 __xe_bo_vunmap(gem_to_xe_bo(obj));
1085 ttm_bo_put(container_of(obj, struct ttm_buffer_object, base));
1088 static void xe_gem_object_close(struct drm_gem_object *obj,
1089 struct drm_file *file_priv)
1091 struct xe_bo *bo = gem_to_xe_bo(obj);
1093 if (bo->vm && !xe_vm_in_fault_mode(bo->vm)) {
1094 xe_assert(xe_bo_device(bo), xe_bo_is_user(bo));
1096 xe_bo_lock(bo, false);
1097 ttm_bo_set_bulk_move(&bo->ttm, NULL);
1102 static bool should_migrate_to_system(struct xe_bo *bo)
1104 struct xe_device *xe = xe_bo_device(bo);
1106 return xe_device_in_fault_mode(xe) && bo->props.cpu_atomic;
1109 static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
1111 struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
1112 struct drm_device *ddev = tbo->base.dev;
1116 ret = ttm_bo_vm_reserve(tbo, vmf);
1120 if (drm_dev_enter(ddev, &idx)) {
1121 struct xe_bo *bo = ttm_to_xe_bo(tbo);
1123 trace_xe_bo_cpu_fault(bo);
1125 if (should_migrate_to_system(bo)) {
1126 r = xe_bo_migrate(bo, XE_PL_TT);
1127 if (r == -EBUSY || r == -ERESTARTSYS || r == -EINTR)
1128 ret = VM_FAULT_NOPAGE;
1130 ret = VM_FAULT_SIGBUS;
1133 ret = ttm_bo_vm_fault_reserved(vmf,
1134 vmf->vma->vm_page_prot,
1135 TTM_BO_VM_NUM_PREFAULT);
1138 ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
1140 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
1143 dma_resv_unlock(tbo->base.resv);
1147 static const struct vm_operations_struct xe_gem_vm_ops = {
1148 .fault = xe_gem_fault,
1149 .open = ttm_bo_vm_open,
1150 .close = ttm_bo_vm_close,
1151 .access = ttm_bo_vm_access
1154 static const struct drm_gem_object_funcs xe_gem_object_funcs = {
1155 .free = xe_gem_object_free,
1156 .close = xe_gem_object_close,
1157 .mmap = drm_gem_ttm_mmap,
1158 .export = xe_gem_prime_export,
1159 .vm_ops = &xe_gem_vm_ops,
1163 * xe_bo_alloc - Allocate storage for a struct xe_bo
1165 * This funcition is intended to allocate storage to be used for input
1166 * to __xe_bo_create_locked(), in the case a pointer to the bo to be
1167 * created is needed before the call to __xe_bo_create_locked().
1168 * If __xe_bo_create_locked ends up never to be called, then the
1169 * storage allocated with this function needs to be freed using
1172 * Return: A pointer to an uninitialized struct xe_bo on success,
1173 * ERR_PTR(-ENOMEM) on error.
1175 struct xe_bo *xe_bo_alloc(void)
1177 struct xe_bo *bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1180 return ERR_PTR(-ENOMEM);
1186 * xe_bo_free - Free storage allocated using xe_bo_alloc()
1187 * @bo: The buffer object storage.
1189 * Refer to xe_bo_alloc() documentation for valid use-cases.
1191 void xe_bo_free(struct xe_bo *bo)
1196 struct xe_bo *__xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
1197 struct xe_tile *tile, struct dma_resv *resv,
1198 struct ttm_lru_bulk_move *bulk, size_t size,
1199 enum ttm_bo_type type, u32 flags)
1201 struct ttm_operation_ctx ctx = {
1202 .interruptible = true,
1203 .no_wait_gpu = false,
1205 struct ttm_placement *placement;
1209 /* Only kernel objects should set GT */
1210 xe_assert(xe, !tile || type == ttm_bo_type_kernel);
1212 if (XE_WARN_ON(!size)) {
1214 return ERR_PTR(-EINVAL);
1223 if (flags & (XE_BO_CREATE_VRAM_MASK | XE_BO_CREATE_STOLEN_BIT) &&
1224 !(flags & XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT) &&
1225 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) {
1226 size = ALIGN(size, SZ_64K);
1227 flags |= XE_BO_INTERNAL_64K;
1228 alignment = SZ_64K >> PAGE_SHIFT;
1230 size = ALIGN(size, PAGE_SIZE);
1231 alignment = SZ_4K >> PAGE_SHIFT;
1237 bo->ttm.base.funcs = &xe_gem_object_funcs;
1238 bo->props.preferred_mem_class = XE_BO_PROPS_INVALID;
1239 bo->props.preferred_gt = XE_BO_PROPS_INVALID;
1240 bo->props.preferred_mem_type = XE_BO_PROPS_INVALID;
1241 bo->ttm.priority = DRM_XE_VMA_PRIORITY_NORMAL;
1242 INIT_LIST_HEAD(&bo->pinned_link);
1243 #ifdef CONFIG_PROC_FS
1244 INIT_LIST_HEAD(&bo->client_link);
1247 drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size);
1250 ctx.allow_res_evict = !(flags & XE_BO_CREATE_NO_RESV_EVICT);
1254 if (!(flags & XE_BO_FIXED_PLACEMENT_BIT)) {
1255 err = __xe_bo_placement_for_flags(xe, bo, bo->flags);
1257 xe_ttm_bo_destroy(&bo->ttm);
1258 return ERR_PTR(err);
1262 /* Defer populating type_sg bos */
1263 placement = (type == ttm_bo_type_sg ||
1264 bo->flags & XE_BO_DEFER_BACKING) ? &sys_placement :
1266 err = ttm_bo_init_reserved(&xe->ttm, &bo->ttm, type,
1267 placement, alignment,
1268 &ctx, NULL, resv, xe_ttm_bo_destroy);
1270 return ERR_PTR(err);
1273 * The VRAM pages underneath are potentially still being accessed by the
1274 * GPU, as per async GPU clearing and async evictions. However TTM makes
1275 * sure to add any corresponding move/clear fences into the objects
1276 * dma-resv using the DMA_RESV_USAGE_KERNEL slot.
1278 * For KMD internal buffers we don't care about GPU clearing, however we
1279 * still need to handle async evictions, where the VRAM is still being
1280 * accessed by the GPU. Most internal callers are not expecting this,
1281 * since they are missing the required synchronisation before accessing
1282 * the memory. To keep things simple just sync wait any kernel fences
1283 * here, if the buffer is designated KMD internal.
1285 * For normal userspace objects we should already have the required
1286 * pipelining or sync waiting elsewhere, since we already have to deal
1287 * with things like async GPU clearing.
1289 if (type == ttm_bo_type_kernel) {
1290 long timeout = dma_resv_wait_timeout(bo->ttm.base.resv,
1291 DMA_RESV_USAGE_KERNEL,
1293 MAX_SCHEDULE_TIMEOUT);
1297 dma_resv_unlock(bo->ttm.base.resv);
1299 return ERR_PTR(timeout);
1305 ttm_bo_set_bulk_move(&bo->ttm, bulk);
1307 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
1312 static int __xe_bo_fixed_placement(struct xe_device *xe,
1315 u64 start, u64 end, u64 size)
1317 struct ttm_place *place = bo->placements;
1319 if (flags & (XE_BO_CREATE_USER_BIT|XE_BO_CREATE_SYSTEM_BIT))
1322 place->flags = TTM_PL_FLAG_CONTIGUOUS;
1323 place->fpfn = start >> PAGE_SHIFT;
1324 place->lpfn = end >> PAGE_SHIFT;
1326 switch (flags & (XE_BO_CREATE_STOLEN_BIT | XE_BO_CREATE_VRAM_MASK)) {
1327 case XE_BO_CREATE_VRAM0_BIT:
1328 place->mem_type = XE_PL_VRAM0;
1330 case XE_BO_CREATE_VRAM1_BIT:
1331 place->mem_type = XE_PL_VRAM1;
1333 case XE_BO_CREATE_STOLEN_BIT:
1334 place->mem_type = XE_PL_STOLEN;
1338 /* 0 or multiple of the above set */
1342 bo->placement = (struct ttm_placement) {
1345 .num_busy_placement = 1,
1346 .busy_placement = place,
1353 xe_bo_create_locked_range(struct xe_device *xe,
1354 struct xe_tile *tile, struct xe_vm *vm,
1355 size_t size, u64 start, u64 end,
1356 enum ttm_bo_type type, u32 flags)
1358 struct xe_bo *bo = NULL;
1362 xe_vm_assert_held(vm);
1364 if (start || end != ~0ULL) {
1369 flags |= XE_BO_FIXED_PLACEMENT_BIT;
1370 err = __xe_bo_fixed_placement(xe, bo, flags, start, end, size);
1373 return ERR_PTR(err);
1377 bo = __xe_bo_create_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL,
1378 vm && !xe_vm_in_fault_mode(vm) &&
1379 flags & XE_BO_CREATE_USER_BIT ?
1380 &vm->lru_bulk_move : NULL, size,
1386 * Note that instead of taking a reference no the drm_gpuvm_resv_bo(),
1387 * to ensure the shared resv doesn't disappear under the bo, the bo
1388 * will keep a reference to the vm, and avoid circular references
1389 * by having all the vm's bo refereferences released at vm close
1392 if (vm && xe_bo_is_user(bo))
1396 if (bo->flags & XE_BO_CREATE_GGTT_BIT) {
1397 if (!tile && flags & XE_BO_CREATE_STOLEN_BIT)
1398 tile = xe_device_get_root_tile(xe);
1400 xe_assert(xe, tile);
1402 if (flags & XE_BO_CREATE_STOLEN_BIT &&
1403 flags & XE_BO_FIXED_PLACEMENT_BIT) {
1404 err = xe_ggtt_insert_bo_at(tile->mem.ggtt, bo, start);
1406 err = xe_ggtt_insert_bo(tile->mem.ggtt, bo);
1409 goto err_unlock_put_bo;
1415 __xe_bo_unset_bulk_move(bo);
1416 xe_bo_unlock_vm_held(bo);
1418 return ERR_PTR(err);
1421 struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
1422 struct xe_vm *vm, size_t size,
1423 enum ttm_bo_type type, u32 flags)
1425 return xe_bo_create_locked_range(xe, tile, vm, size, 0, ~0ULL, type, flags);
1428 struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_tile *tile,
1429 struct xe_vm *vm, size_t size,
1430 enum ttm_bo_type type, u32 flags)
1432 struct xe_bo *bo = xe_bo_create_locked(xe, tile, vm, size, type, flags);
1435 xe_bo_unlock_vm_held(bo);
1440 struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile,
1442 size_t size, u64 offset,
1443 enum ttm_bo_type type, u32 flags)
1447 u64 start = offset == ~0ull ? 0 : offset;
1448 u64 end = offset == ~0ull ? offset : start + size;
1450 if (flags & XE_BO_CREATE_STOLEN_BIT &&
1451 xe_ttm_stolen_cpu_access_needs_ggtt(xe))
1452 flags |= XE_BO_CREATE_GGTT_BIT;
1454 bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type,
1455 flags | XE_BO_NEEDS_CPU_ACCESS);
1459 err = xe_bo_pin(bo);
1463 err = xe_bo_vmap(bo);
1467 xe_bo_unlock_vm_held(bo);
1474 xe_bo_unlock_vm_held(bo);
1476 return ERR_PTR(err);
1479 struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
1480 struct xe_vm *vm, size_t size,
1481 enum ttm_bo_type type, u32 flags)
1483 return xe_bo_create_pin_map_at(xe, tile, vm, size, ~0ull, type, flags);
1486 struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
1487 const void *data, size_t size,
1488 enum ttm_bo_type type, u32 flags)
1490 struct xe_bo *bo = xe_bo_create_pin_map(xe, tile, NULL,
1491 ALIGN(size, PAGE_SIZE),
1496 xe_map_memcpy_to(xe, &bo->vmap, 0, data, size);
1502 * XXX: This is in the VM bind data path, likely should calculate this once and
1503 * store, with a recalculation if the BO is moved.
1505 uint64_t vram_region_gpu_offset(struct ttm_resource *res)
1507 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
1508 struct xe_tile *tile = mem_type_to_tile(xe, res->mem_type);
1510 if (res->mem_type == XE_PL_STOLEN)
1511 return xe_ttm_stolen_gpu_offset(xe);
1513 return tile->mem.vram.dpa_base;
1517 * xe_bo_pin_external - pin an external BO
1518 * @bo: buffer object to be pinned
1520 * Pin an external (not tied to a VM, can be exported via dma-buf / prime FD)
1521 * BO. Unique call compared to xe_bo_pin as this function has it own set of
1522 * asserts and code to ensure evict / restore on suspend / resume.
1524 * Returns 0 for success, negative error code otherwise.
1526 int xe_bo_pin_external(struct xe_bo *bo)
1528 struct xe_device *xe = xe_bo_device(bo);
1531 xe_assert(xe, !bo->vm);
1532 xe_assert(xe, xe_bo_is_user(bo));
1534 if (!xe_bo_is_pinned(bo)) {
1535 err = xe_bo_validate(bo, NULL, false);
1539 if (xe_bo_is_vram(bo)) {
1540 spin_lock(&xe->pinned.lock);
1541 list_add_tail(&bo->pinned_link,
1542 &xe->pinned.external_vram);
1543 spin_unlock(&xe->pinned.lock);
1547 ttm_bo_pin(&bo->ttm);
1550 * FIXME: If we always use the reserve / unreserve functions for locking
1551 * we do not need this.
1553 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
1558 int xe_bo_pin(struct xe_bo *bo)
1560 struct xe_device *xe = xe_bo_device(bo);
1563 /* We currently don't expect user BO to be pinned */
1564 xe_assert(xe, !xe_bo_is_user(bo));
1566 /* Pinned object must be in GGTT or have pinned flag */
1567 xe_assert(xe, bo->flags & (XE_BO_CREATE_PINNED_BIT |
1568 XE_BO_CREATE_GGTT_BIT));
1571 * No reason we can't support pinning imported dma-bufs we just don't
1572 * expect to pin an imported dma-buf.
1574 xe_assert(xe, !bo->ttm.base.import_attach);
1576 /* We only expect at most 1 pin */
1577 xe_assert(xe, !xe_bo_is_pinned(bo));
1579 err = xe_bo_validate(bo, NULL, false);
1584 * For pinned objects in on DGFX, which are also in vram, we expect
1585 * these to be in contiguous VRAM memory. Required eviction / restore
1586 * during suspend / resume (force restore to same physical address).
1588 if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
1589 bo->flags & XE_BO_INTERNAL_TEST)) {
1590 struct ttm_place *place = &(bo->placements[0]);
1592 if (mem_type_is_vram(place->mem_type)) {
1593 xe_assert(xe, place->flags & TTM_PL_FLAG_CONTIGUOUS);
1595 place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) -
1596 vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT;
1597 place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT);
1599 spin_lock(&xe->pinned.lock);
1600 list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
1601 spin_unlock(&xe->pinned.lock);
1605 ttm_bo_pin(&bo->ttm);
1608 * FIXME: If we always use the reserve / unreserve functions for locking
1609 * we do not need this.
1611 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
1617 * xe_bo_unpin_external - unpin an external BO
1618 * @bo: buffer object to be unpinned
1620 * Unpin an external (not tied to a VM, can be exported via dma-buf / prime FD)
1621 * BO. Unique call compared to xe_bo_unpin as this function has it own set of
1622 * asserts and code to ensure evict / restore on suspend / resume.
1624 * Returns 0 for success, negative error code otherwise.
1626 void xe_bo_unpin_external(struct xe_bo *bo)
1628 struct xe_device *xe = xe_bo_device(bo);
1630 xe_assert(xe, !bo->vm);
1631 xe_assert(xe, xe_bo_is_pinned(bo));
1632 xe_assert(xe, xe_bo_is_user(bo));
1634 if (bo->ttm.pin_count == 1 && !list_empty(&bo->pinned_link)) {
1635 spin_lock(&xe->pinned.lock);
1636 list_del_init(&bo->pinned_link);
1637 spin_unlock(&xe->pinned.lock);
1640 ttm_bo_unpin(&bo->ttm);
1643 * FIXME: If we always use the reserve / unreserve functions for locking
1644 * we do not need this.
1646 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
1649 void xe_bo_unpin(struct xe_bo *bo)
1651 struct xe_device *xe = xe_bo_device(bo);
1653 xe_assert(xe, !bo->ttm.base.import_attach);
1654 xe_assert(xe, xe_bo_is_pinned(bo));
1656 if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
1657 bo->flags & XE_BO_INTERNAL_TEST)) {
1658 struct ttm_place *place = &(bo->placements[0]);
1660 if (mem_type_is_vram(place->mem_type)) {
1661 xe_assert(xe, !list_empty(&bo->pinned_link));
1663 spin_lock(&xe->pinned.lock);
1664 list_del_init(&bo->pinned_link);
1665 spin_unlock(&xe->pinned.lock);
1669 ttm_bo_unpin(&bo->ttm);
1673 * xe_bo_validate() - Make sure the bo is in an allowed placement
1675 * @vm: Pointer to a the vm the bo shares a locked dma_resv object with, or
1676 * NULL. Used together with @allow_res_evict.
1677 * @allow_res_evict: Whether it's allowed to evict bos sharing @vm's
1678 * reservation object.
1680 * Make sure the bo is in allowed placement, migrating it if necessary. If
1681 * needed, other bos will be evicted. If bos selected for eviction shares
1682 * the @vm's reservation object, they can be evicted iff @allow_res_evict is
1683 * set to true, otherwise they will be bypassed.
1685 * Return: 0 on success, negative error code on failure. May return
1686 * -EINTR or -ERESTARTSYS if internal waits are interrupted by a signal.
1688 int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
1690 struct ttm_operation_ctx ctx = {
1691 .interruptible = true,
1692 .no_wait_gpu = false,
1696 lockdep_assert_held(&vm->lock);
1697 xe_vm_assert_held(vm);
1699 ctx.allow_res_evict = allow_res_evict;
1700 ctx.resv = xe_vm_resv(vm);
1703 return ttm_bo_validate(&bo->ttm, &bo->placement, &ctx);
1706 bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo)
1708 if (bo->destroy == &xe_ttm_bo_destroy)
1715 * Resolve a BO address. There is no assert to check if the proper lock is held
1716 * so it should only be used in cases where it is not fatal to get the wrong
1717 * address, such as printing debug information, but not in cases where memory is
1718 * written based on this result.
1720 dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size)
1722 struct xe_device *xe = xe_bo_device(bo);
1723 struct xe_res_cursor cur;
1726 xe_assert(xe, page_size <= PAGE_SIZE);
1727 page = offset >> PAGE_SHIFT;
1728 offset &= (PAGE_SIZE - 1);
1730 if (!xe_bo_is_vram(bo) && !xe_bo_is_stolen(bo)) {
1731 xe_assert(xe, bo->ttm.ttm);
1733 xe_res_first_sg(xe_bo_get_sg(bo), page << PAGE_SHIFT,
1735 return xe_res_dma(&cur) + offset;
1737 struct xe_res_cursor cur;
1739 xe_res_first(bo->ttm.resource, page << PAGE_SHIFT,
1741 return cur.start + offset + vram_region_gpu_offset(bo->ttm.resource);
1745 dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size)
1747 if (!READ_ONCE(bo->ttm.pin_count))
1748 xe_bo_assert_held(bo);
1749 return __xe_bo_addr(bo, offset, page_size);
1752 int xe_bo_vmap(struct xe_bo *bo)
1758 xe_bo_assert_held(bo);
1760 if (!(bo->flags & XE_BO_NEEDS_CPU_ACCESS))
1763 if (!iosys_map_is_null(&bo->vmap))
1767 * We use this more or less deprecated interface for now since
1768 * ttm_bo_vmap() doesn't offer the optimization of kmapping
1769 * single page bos, which is done here.
1770 * TODO: Fix up ttm_bo_vmap to do that, or fix up ttm_bo_kmap
1771 * to use struct iosys_map.
1773 ret = ttm_bo_kmap(&bo->ttm, 0, bo->size >> PAGE_SHIFT, &bo->kmap);
1777 virtual = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
1779 iosys_map_set_vaddr_iomem(&bo->vmap, (void __iomem *)virtual);
1781 iosys_map_set_vaddr(&bo->vmap, virtual);
1786 static void __xe_bo_vunmap(struct xe_bo *bo)
1788 if (!iosys_map_is_null(&bo->vmap)) {
1789 iosys_map_clear(&bo->vmap);
1790 ttm_bo_kunmap(&bo->kmap);
1794 void xe_bo_vunmap(struct xe_bo *bo)
1796 xe_bo_assert_held(bo);
1800 int xe_gem_create_ioctl(struct drm_device *dev, void *data,
1801 struct drm_file *file)
1803 struct xe_device *xe = to_xe_device(dev);
1804 struct xe_file *xef = to_xe_file(file);
1805 struct drm_xe_gem_create *args = data;
1806 struct xe_vm *vm = NULL;
1808 unsigned int bo_flags = XE_BO_CREATE_USER_BIT;
1812 if (XE_IOCTL_DBG(xe, args->extensions) || XE_IOCTL_DBG(xe, args->pad) ||
1813 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1816 if (XE_IOCTL_DBG(xe, args->flags &
1817 ~(XE_GEM_CREATE_FLAG_DEFER_BACKING |
1818 XE_GEM_CREATE_FLAG_SCANOUT |
1819 XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM |
1820 xe->info.mem_region_mask)))
1823 /* at least one memory type must be specified */
1824 if (XE_IOCTL_DBG(xe, !(args->flags & xe->info.mem_region_mask)))
1827 if (XE_IOCTL_DBG(xe, args->handle))
1830 if (XE_IOCTL_DBG(xe, !args->size))
1833 if (XE_IOCTL_DBG(xe, args->size > SIZE_MAX))
1836 if (XE_IOCTL_DBG(xe, args->size & ~PAGE_MASK))
1839 if (args->flags & XE_GEM_CREATE_FLAG_DEFER_BACKING)
1840 bo_flags |= XE_BO_DEFER_BACKING;
1842 if (args->flags & XE_GEM_CREATE_FLAG_SCANOUT)
1843 bo_flags |= XE_BO_SCANOUT_BIT;
1845 bo_flags |= args->flags << (ffs(XE_BO_CREATE_SYSTEM_BIT) - 1);
1847 if (args->flags & XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM) {
1848 if (XE_IOCTL_DBG(xe, !(bo_flags & XE_BO_CREATE_VRAM_MASK)))
1851 bo_flags |= XE_BO_NEEDS_CPU_ACCESS;
1855 vm = xe_vm_lookup(xef, args->vm_id);
1856 if (XE_IOCTL_DBG(xe, !vm))
1858 err = xe_vm_lock(vm, true);
1865 bo = xe_bo_create(xe, NULL, vm, args->size, ttm_bo_type_device,
1872 err = drm_gem_handle_create(file, &bo->ttm.base, &handle);
1876 args->handle = handle;
1880 if (vm && !xe_vm_in_fault_mode(vm))
1881 __xe_bo_unset_bulk_move(bo);
1892 int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
1893 struct drm_file *file)
1895 struct xe_device *xe = to_xe_device(dev);
1896 struct drm_xe_gem_mmap_offset *args = data;
1897 struct drm_gem_object *gem_obj;
1899 if (XE_IOCTL_DBG(xe, args->extensions) ||
1900 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1903 if (XE_IOCTL_DBG(xe, args->flags))
1906 gem_obj = drm_gem_object_lookup(file, args->handle);
1907 if (XE_IOCTL_DBG(xe, !gem_obj))
1910 /* The mmap offset was set up at BO allocation time. */
1911 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
1913 xe_bo_put(gem_to_xe_bo(gem_obj));
1918 * xe_bo_lock() - Lock the buffer object's dma_resv object
1919 * @bo: The struct xe_bo whose lock is to be taken
1920 * @intr: Whether to perform any wait interruptible
1922 * Locks the buffer object's dma_resv object. If the buffer object is
1923 * pointing to a shared dma_resv object, that shared lock is locked.
1925 * Return: 0 on success, -EINTR if @intr is true and the wait for a
1926 * contended lock was interrupted. If @intr is set to false, the
1927 * function always returns 0.
1929 int xe_bo_lock(struct xe_bo *bo, bool intr)
1932 return dma_resv_lock_interruptible(bo->ttm.base.resv, NULL);
1934 dma_resv_lock(bo->ttm.base.resv, NULL);
1940 * xe_bo_unlock() - Unlock the buffer object's dma_resv object
1941 * @bo: The struct xe_bo whose lock is to be released.
1943 * Unlock a buffer object lock that was locked by xe_bo_lock().
1945 void xe_bo_unlock(struct xe_bo *bo)
1947 dma_resv_unlock(bo->ttm.base.resv);
1951 * xe_bo_can_migrate - Whether a buffer object likely can be migrated
1952 * @bo: The buffer object to migrate
1953 * @mem_type: The TTM memory type intended to migrate to
1955 * Check whether the buffer object supports migration to the
1956 * given memory type. Note that pinning may affect the ability to migrate as
1957 * returned by this function.
1959 * This function is primarily intended as a helper for checking the
1960 * possibility to migrate buffer objects and can be called without
1961 * the object lock held.
1963 * Return: true if migration is possible, false otherwise.
1965 bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type)
1967 unsigned int cur_place;
1969 if (bo->ttm.type == ttm_bo_type_kernel)
1972 if (bo->ttm.type == ttm_bo_type_sg)
1975 for (cur_place = 0; cur_place < bo->placement.num_placement;
1977 if (bo->placements[cur_place].mem_type == mem_type)
1984 static void xe_place_from_ttm_type(u32 mem_type, struct ttm_place *place)
1986 memset(place, 0, sizeof(*place));
1987 place->mem_type = mem_type;
1991 * xe_bo_migrate - Migrate an object to the desired region id
1992 * @bo: The buffer object to migrate.
1993 * @mem_type: The TTM region type to migrate to.
1995 * Attempt to migrate the buffer object to the desired memory region. The
1996 * buffer object may not be pinned, and must be locked.
1997 * On successful completion, the object memory type will be updated,
1998 * but an async migration task may not have completed yet, and to
1999 * accomplish that, the object's kernel fences must be signaled with
2000 * the object lock held.
2002 * Return: 0 on success. Negative error code on failure. In particular may
2003 * return -EINTR or -ERESTARTSYS if signal pending.
2005 int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
2007 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
2008 struct ttm_operation_ctx ctx = {
2009 .interruptible = true,
2010 .no_wait_gpu = false,
2012 struct ttm_placement placement;
2013 struct ttm_place requested;
2015 xe_bo_assert_held(bo);
2017 if (bo->ttm.resource->mem_type == mem_type)
2020 if (xe_bo_is_pinned(bo))
2023 if (!xe_bo_can_migrate(bo, mem_type))
2026 xe_place_from_ttm_type(mem_type, &requested);
2027 placement.num_placement = 1;
2028 placement.num_busy_placement = 1;
2029 placement.placement = &requested;
2030 placement.busy_placement = &requested;
2033 * Stolen needs to be handled like below VRAM handling if we ever need
2036 drm_WARN_ON(&xe->drm, mem_type == XE_PL_STOLEN);
2038 if (mem_type_is_vram(mem_type)) {
2041 add_vram(xe, bo, &requested, bo->flags, mem_type, &c);
2044 return ttm_bo_validate(&bo->ttm, &placement, &ctx);
2048 * xe_bo_evict - Evict an object to evict placement
2049 * @bo: The buffer object to migrate.
2050 * @force_alloc: Set force_alloc in ttm_operation_ctx
2052 * On successful completion, the object memory will be moved to evict
2053 * placement. Ths function blocks until the object has been fully moved.
2055 * Return: 0 on success. Negative error code on failure.
2057 int xe_bo_evict(struct xe_bo *bo, bool force_alloc)
2059 struct ttm_operation_ctx ctx = {
2060 .interruptible = false,
2061 .no_wait_gpu = false,
2062 .force_alloc = force_alloc,
2064 struct ttm_placement placement;
2067 xe_evict_flags(&bo->ttm, &placement);
2068 ret = ttm_bo_validate(&bo->ttm, &placement, &ctx);
2072 dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
2073 false, MAX_SCHEDULE_TIMEOUT);
2079 * xe_bo_needs_ccs_pages - Whether a bo needs to back up CCS pages when
2080 * placed in system memory.
2083 * If a bo has an allowable placement in XE_PL_TT memory, it can't use
2084 * flat CCS compression, because the GPU then has no way to access the
2085 * CCS metadata using relevant commands. For the opposite case, we need to
2086 * allocate storage for the CCS metadata when the BO is not resident in
2089 * Return: true if extra pages need to be allocated, false otherwise.
2091 bool xe_bo_needs_ccs_pages(struct xe_bo *bo)
2093 return bo->ttm.type == ttm_bo_type_device &&
2094 !(bo->flags & XE_BO_CREATE_SYSTEM_BIT) &&
2095 (bo->flags & XE_BO_CREATE_VRAM_MASK);
2099 * __xe_bo_release_dummy() - Dummy kref release function
2100 * @kref: The embedded struct kref.
2102 * Dummy release function for xe_bo_put_deferred(). Keep off.
2104 void __xe_bo_release_dummy(struct kref *kref)
2109 * xe_bo_put_commit() - Put bos whose put was deferred by xe_bo_put_deferred().
2110 * @deferred: The lockless list used for the call to xe_bo_put_deferred().
2112 * Puts all bos whose put was deferred by xe_bo_put_deferred().
2113 * The @deferred list can be either an onstack local list or a global
2114 * shared list used by a workqueue.
2116 void xe_bo_put_commit(struct llist_head *deferred)
2118 struct llist_node *freed;
2119 struct xe_bo *bo, *next;
2124 freed = llist_del_all(deferred);
2128 llist_for_each_entry_safe(bo, next, freed, freed)
2129 drm_gem_object_free(&bo->ttm.base.refcount);
2133 * xe_bo_dumb_create - Create a dumb bo as backing for a fb
2138 * See dumb_create() hook in include/drm/drm_drv.h
2142 int xe_bo_dumb_create(struct drm_file *file_priv,
2143 struct drm_device *dev,
2144 struct drm_mode_create_dumb *args)
2146 struct xe_device *xe = to_xe_device(dev);
2149 int cpp = DIV_ROUND_UP(args->bpp, 8);
2151 u32 page_size = max_t(u32, PAGE_SIZE,
2152 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K);
2154 args->pitch = ALIGN(args->width * cpp, 64);
2155 args->size = ALIGN(mul_u32_u32(args->pitch, args->height),
2158 bo = xe_bo_create(xe, NULL, NULL, args->size, ttm_bo_type_device,
2159 XE_BO_CREATE_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
2160 XE_BO_CREATE_USER_BIT | XE_BO_SCANOUT_BIT |
2161 XE_BO_NEEDS_CPU_ACCESS);
2165 err = drm_gem_handle_create(file_priv, &bo->ttm.base, &handle);
2166 /* drop reference from allocate - handle holds it now */
2167 drm_gem_object_put(&bo->ttm.base);
2169 args->handle = handle;
2173 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
2174 #include "tests/xe_bo.c"