1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
6 #include "xe_migrate.h"
8 #include <linux/bitfield.h>
9 #include <linux/sizes.h>
11 #include <drm/drm_managed.h>
12 #include <drm/ttm/ttm_tt.h>
13 #include <drm/xe_drm.h>
15 #include "generated/xe_wa_oob.h"
16 #include "instructions/xe_mi_commands.h"
17 #include "regs/xe_gpu_commands.h"
18 #include "tests/xe_test.h"
19 #include "xe_assert.h"
22 #include "xe_exec_queue.h"
25 #include "xe_hw_engine.h"
30 #include "xe_res_cursor.h"
31 #include "xe_sched_job.h"
38 * struct xe_migrate - migrate context.
41 /** @q: Default exec queue used for migration */
42 struct xe_exec_queue *q;
43 /** @tile: Backpointer to the tile this struct xe_migrate belongs to. */
45 /** @job_mutex: Timeline mutex for @eng. */
46 struct mutex job_mutex;
47 /** @pt_bo: Page-table buffer object. */
49 /** @batch_base_ofs: VM offset of the migration batch buffer */
51 /** @usm_batch_base_ofs: VM offset of the usm batch buffer */
52 u64 usm_batch_base_ofs;
53 /** @cleared_mem_ofs: VM offset of @cleared_bo. */
56 * @fence: dma-fence representing the last migration job batch.
57 * Protected by @job_mutex.
59 struct dma_fence *fence;
61 * @vm_update_sa: For integrated, used to suballocate page-tables
64 struct drm_suballoc_manager vm_update_sa;
67 #define MAX_PREEMPTDISABLE_TRANSFER SZ_8M /* Around 1ms. */
68 #define MAX_CCS_LIMITED_TRANSFER SZ_4M /* XE_PAGE_SIZE * (FIELD_MAX(XE2_CCS_SIZE_MASK) + 1) */
69 #define NUM_KERNEL_PDE 17
70 #define NUM_PT_SLOTS 32
71 #define LEVEL0_PAGE_TABLE_ENCODE_SIZE SZ_2M
74 * xe_tile_migrate_engine() - Get this tile's migrate engine.
77 * Returns the default migrate engine of this tile.
78 * TODO: Perhaps this function is slightly misplaced, and even unneeded?
80 * Return: The default migrate engine
82 struct xe_exec_queue *xe_tile_migrate_engine(struct xe_tile *tile)
84 return tile->migrate->q;
87 static void xe_migrate_fini(struct drm_device *dev, void *arg)
89 struct xe_migrate *m = arg;
91 xe_vm_lock(m->q->vm, false);
92 xe_bo_unpin(m->pt_bo);
93 xe_vm_unlock(m->q->vm);
95 dma_fence_put(m->fence);
97 drm_suballoc_manager_fini(&m->vm_update_sa);
98 mutex_destroy(&m->job_mutex);
99 xe_vm_close_and_put(m->q->vm);
100 xe_exec_queue_put(m->q);
103 static u64 xe_migrate_vm_addr(u64 slot, u32 level)
105 XE_WARN_ON(slot >= NUM_PT_SLOTS);
107 /* First slot is reserved for mapping of PT bo and bb, start from 1 */
108 return (slot + 1ULL) << xe_pt_shift(level + 1);
111 static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr)
114 * Remove the DPA to get a correct offset into identity table for the
117 addr -= xe->mem.vram.dpa_base;
118 return addr + (256ULL << xe_pt_shift(2));
121 static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
124 struct xe_device *xe = tile_to_xe(tile);
125 u16 pat_index = xe->pat.idx[XE_CACHE_WB];
127 u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level;
128 u32 map_ofs, level, i;
129 struct xe_bo *bo, *batch = tile->mem.kernel_bb_pool->bo;
132 /* Can't bump NUM_PT_SLOTS too high */
133 BUILD_BUG_ON(NUM_PT_SLOTS > SZ_2M/XE_PAGE_SIZE);
134 /* Must be a multiple of 64K to support all platforms */
135 BUILD_BUG_ON(NUM_PT_SLOTS * XE_PAGE_SIZE % SZ_64K);
136 /* And one slot reserved for the 4KiB page table updates */
137 BUILD_BUG_ON(!(NUM_KERNEL_PDE & 1));
139 /* Need to be sure everything fits in the first PT, or create more */
140 xe_tile_assert(tile, m->batch_base_ofs + batch->size < SZ_2M);
142 bo = xe_bo_create_pin_map(vm->xe, tile, vm,
143 num_entries * XE_PAGE_SIZE,
145 XE_BO_CREATE_VRAM_IF_DGFX(tile) |
146 XE_BO_CREATE_PINNED_BIT);
150 entry = vm->pt_ops->pde_encode_bo(bo, bo->size - XE_PAGE_SIZE, pat_index);
151 xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry);
153 map_ofs = (num_entries - num_level) * XE_PAGE_SIZE;
155 /* Map the entire BO in our level 0 pt */
156 for (i = 0, level = 0; i < num_entries; level++) {
157 entry = vm->pt_ops->pte_encode_bo(bo, i * XE_PAGE_SIZE,
160 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, entry);
162 if (vm->flags & XE_VM_FLAG_64K)
169 /* Write out batch too */
170 m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE;
171 if (xe->info.has_usm) {
172 batch = tile->primary_gt->usm.bb_pool->bo;
173 m->usm_batch_base_ofs = m->batch_base_ofs;
176 for (i = 0; i < batch->size;
177 i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
179 entry = vm->pt_ops->pte_encode_bo(batch, i,
182 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
187 u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
189 m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr);
191 if (xe->info.has_usm) {
192 batch = tile->primary_gt->usm.bb_pool->bo;
193 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
194 m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr);
198 for (level = 1; level < num_level; level++) {
201 if (vm->flags & XE_VM_FLAG_64K && level == 1)
204 entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (level - 1) *
205 XE_PAGE_SIZE, pat_index);
206 xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64,
210 /* Write PDE's that point to our BO. */
211 for (i = 0; i < num_entries - num_level; i++) {
212 entry = vm->pt_ops->pde_encode_bo(bo, i * XE_PAGE_SIZE,
215 xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE +
216 (i + 1) * 8, u64, entry);
219 /* Set up a 1GiB NULL mapping at 255GiB offset. */
221 xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level + 255 * 8, u64,
222 vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, IS_DGFX(xe), 0)
224 m->cleared_mem_ofs = (255ULL << xe_pt_shift(level));
226 /* Identity map the entire vram at 256GiB offset */
231 ofs = map_ofs + XE_PAGE_SIZE * level + 256 * 8;
232 flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
236 * Use 1GB pages, it shouldn't matter the physical amount of
237 * vram is less, when we don't access it.
239 for (pos = xe->mem.vram.dpa_base;
240 pos < xe->mem.vram.actual_physical_size + xe->mem.vram.dpa_base;
241 pos += SZ_1G, ofs += 8)
242 xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
246 * Example layout created above, with root level = 3:
247 * [PT0...PT7]: kernel PT's for copy/clear; 64 or 4KiB PTE's
248 * [PT8]: Kernel PT for VM_BIND, 4 KiB PTE's
249 * [PT9...PT28]: Userspace PT's for VM_BIND, 4 KiB PTE's
250 * [PT29 = PDE 0] [PT30 = PDE 1] [PT31 = PDE 2]
252 * This makes the lowest part of the VM point to the pagetables.
253 * Hence the lowest 2M in the vm should point to itself, with a few writes
254 * and flushes, other parts of the VM can be used either for copying and
257 * For performance, the kernel reserves PDE's, so about 20 are left
258 * for async VM updates.
260 * To make it easier to work, each scratch PT is put in slot (1 + PT #)
261 * everywhere, this allows lockless updates to scratch pages by using
262 * the different addresses in VM.
264 #define NUM_VMUSA_UNIT_PER_PAGE 32
265 #define VM_SA_UPDATE_UNIT_SIZE (XE_PAGE_SIZE / NUM_VMUSA_UNIT_PER_PAGE)
266 #define NUM_VMUSA_WRITES_PER_UNIT (VM_SA_UPDATE_UNIT_SIZE / sizeof(u64))
267 drm_suballoc_manager_init(&m->vm_update_sa,
268 (map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) *
269 NUM_VMUSA_UNIT_PER_PAGE, 0);
276 * Due to workaround 16017236439, odd instance hardware copy engines are
277 * faster than even instance ones.
278 * This function returns the mask involving all fast copy engines and the
279 * reserved copy engine to be used as logical mask for migrate engine.
280 * Including the reserved copy engine is required to avoid deadlocks due to
281 * migrate jobs servicing the faults gets stuck behind the job that faulted.
283 static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt)
285 u32 logical_mask = 0;
286 struct xe_hw_engine *hwe;
287 enum xe_hw_engine_id id;
289 for_each_hw_engine(hwe, gt, id) {
290 if (hwe->class != XE_ENGINE_CLASS_COPY)
293 if (!XE_WA(gt, 16017236439) ||
294 xe_gt_is_usm_hwe(gt, hwe) || hwe->instance & 1)
295 logical_mask |= BIT(hwe->logical_instance);
302 * xe_migrate_init() - Initialize a migrate context
303 * @tile: Back-pointer to the tile we're initializing for.
305 * Return: Pointer to a migrate context on success. Error pointer on error.
307 struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
309 struct xe_device *xe = tile_to_xe(tile);
310 struct xe_gt *primary_gt = tile->primary_gt;
311 struct xe_migrate *m;
315 m = drmm_kzalloc(&xe->drm, sizeof(*m), GFP_KERNEL);
317 return ERR_PTR(-ENOMEM);
321 /* Special layout, prepared below.. */
322 vm = xe_vm_create(xe, XE_VM_FLAG_MIGRATION |
323 XE_VM_FLAG_SET_TILE_ID(tile));
327 xe_vm_lock(vm, false);
328 err = xe_migrate_prepare_vm(tile, m, vm);
331 xe_vm_close_and_put(vm);
335 if (xe->info.has_usm) {
336 struct xe_hw_engine *hwe = xe_gt_hw_engine(primary_gt,
337 XE_ENGINE_CLASS_COPY,
338 primary_gt->usm.reserved_bcs_instance,
340 u32 logical_mask = xe_migrate_usm_logical_mask(primary_gt);
342 if (!hwe || !logical_mask)
343 return ERR_PTR(-EINVAL);
345 m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
346 EXEC_QUEUE_FLAG_KERNEL |
347 EXEC_QUEUE_FLAG_PERMANENT |
348 EXEC_QUEUE_FLAG_HIGH_PRIORITY);
350 m->q = xe_exec_queue_create_class(xe, primary_gt, vm,
351 XE_ENGINE_CLASS_COPY,
352 EXEC_QUEUE_FLAG_KERNEL |
353 EXEC_QUEUE_FLAG_PERMANENT);
356 xe_vm_close_and_put(vm);
357 return ERR_CAST(m->q);
360 mutex_init(&m->job_mutex);
362 err = drmm_add_action_or_reset(&xe->drm, xe_migrate_fini, m);
369 static u64 max_mem_transfer_per_pass(struct xe_device *xe)
371 if (!IS_DGFX(xe) && xe_device_has_flat_ccs(xe))
372 return MAX_CCS_LIMITED_TRANSFER;
374 return MAX_PREEMPTDISABLE_TRANSFER;
377 static u64 xe_migrate_res_sizes(struct xe_device *xe, struct xe_res_cursor *cur)
380 * For VRAM we use identity mapped pages so we are limited to current
381 * cursor size. For system we program the pages ourselves so we have no
384 return min_t(u64, max_mem_transfer_per_pass(xe),
385 mem_type_is_vram(cur->mem_type) ? cur->size :
389 static u32 pte_update_size(struct xe_migrate *m,
391 struct ttm_resource *res,
392 struct xe_res_cursor *cur,
393 u64 *L0, u64 *L0_ofs, u32 *L0_pt,
394 u32 cmd_size, u32 pt_ofs, u32 avail_pts)
400 /* Clip L0 to available size */
401 u64 size = min(*L0, (u64)avail_pts * SZ_2M);
402 u64 num_4k_pages = DIV_ROUND_UP(size, XE_PAGE_SIZE);
405 *L0_ofs = xe_migrate_vm_addr(pt_ofs, 0);
407 /* MI_STORE_DATA_IMM */
408 cmds += 3 * DIV_ROUND_UP(num_4k_pages, 0x1ff);
411 cmds += num_4k_pages * 2;
413 /* Each chunk has a single blit command */
416 /* Offset into identity map. */
417 *L0_ofs = xe_migrate_vram_ofs(tile_to_xe(m->tile),
418 cur->start + vram_region_gpu_offset(res));
425 static void emit_pte(struct xe_migrate *m,
426 struct xe_bb *bb, u32 at_pt,
427 bool is_vram, bool is_comp_pte,
428 struct xe_res_cursor *cur,
429 u32 size, struct xe_bo *bo)
431 struct xe_device *xe = tile_to_xe(m->tile);
435 u64 ofs = at_pt * XE_PAGE_SIZE;
438 /* Indirect access needs compression enabled uncached PAT index */
439 if (GRAPHICS_VERx100(xe) >= 2000)
440 pat_index = is_comp_pte ? xe->pat.idx[XE_CACHE_NONE_COMPRESSION] :
441 xe->pat.idx[XE_CACHE_NONE];
443 pat_index = xe->pat.idx[XE_CACHE_WB];
446 * FIXME: Emitting VRAM PTEs to L0 PTs is forbidden. Currently
447 * we're only emitting VRAM PTEs during sanity tests, so when
448 * that's moved to a Kunit test, we should condition VRAM PTEs
452 ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE);
455 u32 chunk = min(0x1ffU, ptes);
457 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
458 bb->cs[bb->len++] = ofs;
459 bb->cs[bb->len++] = 0;
469 addr = xe_res_dma(cur) & PAGE_MASK;
471 /* Is this a 64K PTE entry? */
472 if ((m->q->vm->flags & XE_VM_FLAG_64K) &&
473 !(cur_ofs & (16 * 8 - 1))) {
474 xe_tile_assert(m->tile, IS_ALIGNED(addr, SZ_64K));
475 flags |= XE_PTE_PS64;
478 addr += vram_region_gpu_offset(bo->ttm.resource);
482 addr = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe,
485 bb->cs[bb->len++] = lower_32_bits(addr);
486 bb->cs[bb->len++] = upper_32_bits(addr);
488 xe_res_next(cur, min_t(u32, size, PAGE_SIZE));
494 #define EMIT_COPY_CCS_DW 5
495 static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb,
496 u64 dst_ofs, bool dst_is_indirect,
497 u64 src_ofs, bool src_is_indirect,
500 struct xe_device *xe = gt_to_xe(gt);
501 u32 *cs = bb->cs + bb->len;
507 if (GRAPHICS_VERx100(xe) >= 2000) {
508 num_pages = DIV_ROUND_UP(size, XE_PAGE_SIZE);
509 xe_gt_assert(gt, FIELD_FIT(XE2_CCS_SIZE_MASK, num_pages - 1));
511 ccs_copy_size = REG_FIELD_PREP(XE2_CCS_SIZE_MASK, num_pages - 1);
512 mocs = FIELD_PREP(XE2_XY_CTRL_SURF_MOCS_INDEX_MASK, gt->mocs.uc_index);
515 num_ccs_blks = DIV_ROUND_UP(xe_device_ccs_bytes(gt_to_xe(gt), size),
516 NUM_CCS_BYTES_PER_BLOCK);
517 xe_gt_assert(gt, FIELD_FIT(CCS_SIZE_MASK, num_ccs_blks - 1));
519 ccs_copy_size = REG_FIELD_PREP(CCS_SIZE_MASK, num_ccs_blks - 1);
520 mocs = FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, gt->mocs.uc_index);
523 *cs++ = XY_CTRL_SURF_COPY_BLT |
524 (src_is_indirect ? 0x0 : 0x1) << SRC_ACCESS_TYPE_SHIFT |
525 (dst_is_indirect ? 0x0 : 0x1) << DST_ACCESS_TYPE_SHIFT |
527 *cs++ = lower_32_bits(src_ofs);
528 *cs++ = upper_32_bits(src_ofs) | mocs;
529 *cs++ = lower_32_bits(dst_ofs);
530 *cs++ = upper_32_bits(dst_ofs) | mocs;
532 bb->len = cs - bb->cs;
535 #define EMIT_COPY_DW 10
536 static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
537 u64 src_ofs, u64 dst_ofs, unsigned int size,
540 struct xe_device *xe = gt_to_xe(gt);
544 xe_gt_assert(gt, size / pitch <= S16_MAX);
545 xe_gt_assert(gt, pitch / 4 <= S16_MAX);
546 xe_gt_assert(gt, pitch <= U16_MAX);
548 if (GRAPHICS_VER(xe) >= 20)
549 mocs = FIELD_PREP(XE2_XY_FAST_COPY_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index);
551 if (GRAPHICS_VERx100(xe) >= 1250)
552 tile_y = XY_FAST_COPY_BLT_D1_SRC_TILE4 | XY_FAST_COPY_BLT_D1_DST_TILE4;
554 bb->cs[bb->len++] = XY_FAST_COPY_BLT_CMD | (10 - 2);
555 bb->cs[bb->len++] = XY_FAST_COPY_BLT_DEPTH_32 | pitch | tile_y | mocs;
556 bb->cs[bb->len++] = 0;
557 bb->cs[bb->len++] = (size / pitch) << 16 | pitch / 4;
558 bb->cs[bb->len++] = lower_32_bits(dst_ofs);
559 bb->cs[bb->len++] = upper_32_bits(dst_ofs);
560 bb->cs[bb->len++] = 0;
561 bb->cs[bb->len++] = pitch | mocs;
562 bb->cs[bb->len++] = lower_32_bits(src_ofs);
563 bb->cs[bb->len++] = upper_32_bits(src_ofs);
566 static int job_add_deps(struct xe_sched_job *job, struct dma_resv *resv,
567 enum dma_resv_usage usage)
569 return drm_sched_job_add_resv_dependencies(&job->drm, resv, usage);
572 static u64 xe_migrate_batch_base(struct xe_migrate *m, bool usm)
574 return usm ? m->usm_batch_base_ofs : m->batch_base_ofs;
577 static u32 xe_migrate_ccs_copy(struct xe_migrate *m,
579 u64 src_ofs, bool src_is_indirect,
580 u64 dst_ofs, bool dst_is_indirect, u32 dst_size,
581 u64 ccs_ofs, bool copy_ccs)
583 struct xe_gt *gt = m->tile->primary_gt;
586 if (xe_device_has_flat_ccs(gt_to_xe(gt)) && !copy_ccs && dst_is_indirect) {
588 * If the src is already in vram, then it should already
589 * have been cleared by us, or has been populated by the
590 * user. Make sure we copy the CCS aux state as-is.
592 * Otherwise if the bo doesn't have any CCS metadata attached,
593 * we still need to clear it for security reasons.
595 u64 ccs_src_ofs = src_is_indirect ? src_ofs : m->cleared_mem_ofs;
597 emit_copy_ccs(gt, bb,
599 ccs_src_ofs, src_is_indirect, dst_size);
601 flush_flags = MI_FLUSH_DW_CCS;
602 } else if (copy_ccs) {
603 if (!src_is_indirect)
605 else if (!dst_is_indirect)
608 xe_gt_assert(gt, src_is_indirect || dst_is_indirect);
610 emit_copy_ccs(gt, bb, dst_ofs, dst_is_indirect, src_ofs,
611 src_is_indirect, dst_size);
613 flush_flags = MI_FLUSH_DW_CCS;
620 * xe_migrate_copy() - Copy content of TTM resources.
621 * @m: The migration context.
622 * @src_bo: The buffer object @src is currently bound to.
623 * @dst_bo: If copying between resources created for the same bo, set this to
624 * the same value as @src_bo. If copying between buffer objects, set it to
625 * the buffer object @dst is currently bound to.
626 * @src: The source TTM resource.
627 * @dst: The dst TTM resource.
628 * @copy_only_ccs: If true copy only CCS metadata
630 * Copies the contents of @src to @dst: On flat CCS devices,
631 * the CCS metadata is copied as well if needed, or if not present,
632 * the CCS metadata of @dst is cleared for security reasons.
634 * Return: Pointer to a dma_fence representing the last copy batch, or
635 * an error pointer on failure. If there is a failure, any copy operation
636 * started by the function call has been synced.
638 struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
639 struct xe_bo *src_bo,
640 struct xe_bo *dst_bo,
641 struct ttm_resource *src,
642 struct ttm_resource *dst,
645 struct xe_gt *gt = m->tile->primary_gt;
646 struct xe_device *xe = gt_to_xe(gt);
647 struct dma_fence *fence = NULL;
648 u64 size = src_bo->size;
649 struct xe_res_cursor src_it, dst_it, ccs_it;
650 u64 src_L0_ofs, dst_L0_ofs;
651 u32 src_L0_pt, dst_L0_pt;
655 bool src_is_pltt = src->mem_type == XE_PL_TT;
656 bool dst_is_pltt = dst->mem_type == XE_PL_TT;
657 bool src_is_vram = mem_type_is_vram(src->mem_type);
658 bool dst_is_vram = mem_type_is_vram(dst->mem_type);
659 bool copy_ccs = xe_device_has_flat_ccs(xe) &&
660 xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo);
661 bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram);
663 /* Copying CCS between two different BOs is not supported yet. */
664 if (XE_WARN_ON(copy_ccs && src_bo != dst_bo))
665 return ERR_PTR(-EINVAL);
667 if (src_bo != dst_bo && XE_WARN_ON(src_bo->size != dst_bo->size))
668 return ERR_PTR(-EINVAL);
671 xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
673 xe_res_first(src, 0, size, &src_it);
675 xe_res_first_sg(xe_bo_sg(dst_bo), 0, size, &dst_it);
677 xe_res_first(dst, 0, size, &dst_it);
680 xe_res_first_sg(xe_bo_sg(src_bo), xe_bo_ccs_pages_start(src_bo),
681 PAGE_ALIGN(xe_device_ccs_bytes(xe, size)),
685 u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
686 struct xe_sched_job *job;
690 u64 ccs_ofs, ccs_size;
693 bool usm = xe->info.has_usm;
694 u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
696 src_L0 = xe_migrate_res_sizes(xe, &src_it);
697 dst_L0 = xe_migrate_res_sizes(xe, &dst_it);
699 drm_dbg(&xe->drm, "Pass %u, sizes: %llu & %llu\n",
700 pass++, src_L0, dst_L0);
702 src_L0 = min(src_L0, dst_L0);
704 batch_size += pte_update_size(m, src_is_vram, src, &src_it, &src_L0,
705 &src_L0_ofs, &src_L0_pt, 0, 0,
708 batch_size += pte_update_size(m, dst_is_vram, dst, &dst_it, &src_L0,
709 &dst_L0_ofs, &dst_L0_pt, 0,
710 avail_pts, avail_pts);
712 if (copy_system_ccs) {
713 ccs_size = xe_device_ccs_bytes(xe, src_L0);
714 batch_size += pte_update_size(m, false, NULL, &ccs_it, &ccs_size,
715 &ccs_ofs, &ccs_pt, 0,
720 /* Add copy commands size here */
721 batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) +
722 ((xe_device_has_flat_ccs(xe) ? EMIT_COPY_CCS_DW : 0));
724 bb = xe_bb_new(gt, batch_size, usm);
731 emit_pte(m, bb, src_L0_pt, src_is_vram, true, &src_it, src_L0,
734 xe_res_next(&src_it, src_L0);
737 emit_pte(m, bb, dst_L0_pt, dst_is_vram, true, &dst_it, src_L0,
740 xe_res_next(&dst_it, src_L0);
743 emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src_bo);
745 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
746 update_idx = bb->len;
749 emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
751 flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
752 IS_DGFX(xe) ? src_is_vram : src_is_pltt,
754 IS_DGFX(xe) ? dst_is_vram : dst_is_pltt,
755 src_L0, ccs_ofs, copy_ccs);
757 mutex_lock(&m->job_mutex);
758 job = xe_bb_create_migration_job(m->q, bb,
759 xe_migrate_batch_base(m, usm),
766 xe_sched_job_add_migrate_flush(job, flush_flags);
768 err = job_add_deps(job, src_bo->ttm.base.resv,
769 DMA_RESV_USAGE_BOOKKEEP);
770 if (!err && src_bo != dst_bo)
771 err = job_add_deps(job, dst_bo->ttm.base.resv,
772 DMA_RESV_USAGE_BOOKKEEP);
777 xe_sched_job_arm(job);
778 dma_fence_put(fence);
779 fence = dma_fence_get(&job->drm.s_fence->finished);
780 xe_sched_job_push(job);
782 dma_fence_put(m->fence);
783 m->fence = dma_fence_get(fence);
785 mutex_unlock(&m->job_mutex);
787 xe_bb_free(bb, fence);
792 xe_sched_job_put(job);
794 mutex_unlock(&m->job_mutex);
795 xe_bb_free(bb, NULL);
798 /* Sync partial copy if any. FIXME: under job_mutex? */
800 dma_fence_wait(fence, false);
801 dma_fence_put(fence);
810 static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
813 struct xe_device *xe = gt_to_xe(gt);
814 u32 *cs = bb->cs + bb->len;
815 u32 len = PVC_MEM_SET_CMD_LEN_DW;
817 *cs++ = PVC_MEM_SET_CMD | PVC_MEM_SET_MATRIX | (len - 2);
819 *cs++ = (size / pitch) - 1;
821 *cs++ = lower_32_bits(src_ofs);
822 *cs++ = upper_32_bits(src_ofs);
823 if (GRAPHICS_VERx100(xe) >= 2000)
824 *cs++ = FIELD_PREP(XE2_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index);
826 *cs++ = FIELD_PREP(PVC_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index);
828 xe_gt_assert(gt, cs - bb->cs == len + bb->len);
833 static void emit_clear_main_copy(struct xe_gt *gt, struct xe_bb *bb,
834 u64 src_ofs, u32 size, u32 pitch, bool is_vram)
836 struct xe_device *xe = gt_to_xe(gt);
837 u32 *cs = bb->cs + bb->len;
838 u32 len = XY_FAST_COLOR_BLT_DW;
840 if (GRAPHICS_VERx100(xe) < 1250)
843 *cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 |
845 if (GRAPHICS_VERx100(xe) >= 2000)
846 *cs++ = FIELD_PREP(XE2_XY_FAST_COLOR_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index) |
849 *cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, gt->mocs.uc_index) |
852 *cs++ = (size / pitch) << 16 | pitch / 4;
853 *cs++ = lower_32_bits(src_ofs);
854 *cs++ = upper_32_bits(src_ofs);
855 *cs++ = (is_vram ? 0x0 : 0x1) << XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT;
869 xe_gt_assert(gt, cs - bb->cs == len + bb->len);
874 static bool has_service_copy_support(struct xe_gt *gt)
877 * What we care about is whether the architecture was designed with
878 * service copy functionality (specifically the new MEM_SET / MEM_COPY
879 * instructions) so check the architectural engine list rather than the
880 * actual list since these instructions are usable on BCS0 even if
881 * all of the actual service copy engines (BCS1-BCS8) have been fused
884 return gt->info.__engine_mask & GENMASK(XE_HW_ENGINE_BCS8,
888 static u32 emit_clear_cmd_len(struct xe_gt *gt)
890 if (has_service_copy_support(gt))
891 return PVC_MEM_SET_CMD_LEN_DW;
893 return XY_FAST_COLOR_BLT_DW;
896 static void emit_clear(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
897 u32 size, u32 pitch, bool is_vram)
899 if (has_service_copy_support(gt))
900 emit_clear_link_copy(gt, bb, src_ofs, size, pitch);
902 emit_clear_main_copy(gt, bb, src_ofs, size, pitch,
907 * xe_migrate_clear() - Copy content of TTM resources.
908 * @m: The migration context.
909 * @bo: The buffer object @dst is currently bound to.
910 * @dst: The dst TTM resource to be cleared.
912 * Clear the contents of @dst to zero. On flat CCS devices,
913 * the CCS metadata is cleared to zero as well on VRAM destinations.
914 * TODO: Eliminate the @bo argument.
916 * Return: Pointer to a dma_fence representing the last clear batch, or
917 * an error pointer on failure. If there is a failure, any clear operation
918 * started by the function call has been synced.
920 struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
922 struct ttm_resource *dst)
924 bool clear_vram = mem_type_is_vram(dst->mem_type);
925 struct xe_gt *gt = m->tile->primary_gt;
926 struct xe_device *xe = gt_to_xe(gt);
927 bool clear_system_ccs = (xe_bo_needs_ccs_pages(bo) && !IS_DGFX(xe)) ? true : false;
928 struct dma_fence *fence = NULL;
930 struct xe_res_cursor src_it;
931 struct ttm_resource *src = dst;
936 xe_res_first_sg(xe_bo_sg(bo), 0, bo->size, &src_it);
938 xe_res_first(src, 0, bo->size, &src_it);
945 struct xe_sched_job *job;
947 u32 batch_size, update_idx;
949 bool usm = xe->info.has_usm;
950 u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
952 clear_L0 = xe_migrate_res_sizes(xe, &src_it);
954 drm_dbg(&xe->drm, "Pass %u, size: %llu\n", pass++, clear_L0);
956 /* Calculate final sizes and batch size.. */
958 pte_update_size(m, clear_vram, src, &src_it,
959 &clear_L0, &clear_L0_ofs, &clear_L0_pt,
960 clear_system_ccs ? 0 : emit_clear_cmd_len(gt), 0,
963 if (xe_device_has_flat_ccs(xe))
964 batch_size += EMIT_COPY_CCS_DW;
968 if (WARN_ON_ONCE(!clear_L0))
971 bb = xe_bb_new(gt, batch_size, usm);
978 /* Preemption is enabled again by the ring ops. */
980 emit_pte(m, bb, clear_L0_pt, clear_vram, true, &src_it, clear_L0,
983 xe_res_next(&src_it, clear_L0);
985 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
986 update_idx = bb->len;
988 if (!clear_system_ccs)
989 emit_clear(gt, bb, clear_L0_ofs, clear_L0, XE_PAGE_SIZE, clear_vram);
991 if (xe_device_has_flat_ccs(xe)) {
992 emit_copy_ccs(gt, bb, clear_L0_ofs, true,
993 m->cleared_mem_ofs, false, clear_L0);
994 flush_flags = MI_FLUSH_DW_CCS;
997 mutex_lock(&m->job_mutex);
998 job = xe_bb_create_migration_job(m->q, bb,
999 xe_migrate_batch_base(m, usm),
1006 xe_sched_job_add_migrate_flush(job, flush_flags);
1009 * There can't be anything userspace related at this
1010 * point, so we just need to respect any potential move
1011 * fences, which are always tracked as
1012 * DMA_RESV_USAGE_KERNEL.
1014 err = job_add_deps(job, bo->ttm.base.resv,
1015 DMA_RESV_USAGE_KERNEL);
1020 xe_sched_job_arm(job);
1021 dma_fence_put(fence);
1022 fence = dma_fence_get(&job->drm.s_fence->finished);
1023 xe_sched_job_push(job);
1025 dma_fence_put(m->fence);
1026 m->fence = dma_fence_get(fence);
1028 mutex_unlock(&m->job_mutex);
1030 xe_bb_free(bb, fence);
1034 xe_sched_job_put(job);
1036 mutex_unlock(&m->job_mutex);
1037 xe_bb_free(bb, NULL);
1039 /* Sync partial copies if any. FIXME: job_mutex? */
1041 dma_fence_wait(m->fence, false);
1042 dma_fence_put(fence);
1045 return ERR_PTR(err);
1048 if (clear_system_ccs)
1049 bo->ccs_cleared = true;
1054 static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
1055 const struct xe_vm_pgtable_update *update,
1056 struct xe_migrate_pt_update *pt_update)
1058 const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1060 u32 ofs = update->ofs, size = update->qwords;
1063 * If we have 512 entries (max), we would populate it ourselves,
1064 * and update the PDE above it to the new pointer.
1065 * The only time this can only happen if we have to update the top
1066 * PDE. This requires a BO that is almost vm->size big.
1068 * This shouldn't be possible in practice.. might change when 16K
1069 * pages are used. Hence the assert.
1071 xe_tile_assert(tile, update->qwords <= 0x1ff);
1073 ppgtt_ofs = xe_migrate_vram_ofs(tile_to_xe(tile),
1074 xe_bo_addr(update->pt_bo, 0,
1078 u64 addr = ppgtt_ofs + ofs * 8;
1080 chunk = min(update->qwords, 0x1ffU);
1082 /* Ensure populatefn can do memset64 by aligning bb->cs */
1084 bb->cs[bb->len++] = MI_NOOP;
1086 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
1087 bb->cs[bb->len++] = lower_32_bits(addr);
1088 bb->cs[bb->len++] = upper_32_bits(addr);
1089 ops->populate(pt_update, tile, NULL, bb->cs + bb->len, ofs, chunk,
1092 bb->len += chunk * 2;
1098 struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m)
1100 return xe_vm_get(m->q->vm);
1103 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1104 struct migrate_test_params {
1105 struct xe_test_priv base;
1109 #define to_migrate_test_params(_priv) \
1110 container_of(_priv, struct migrate_test_params, base)
1113 static struct dma_fence *
1114 xe_migrate_update_pgtables_cpu(struct xe_migrate *m,
1115 struct xe_vm *vm, struct xe_bo *bo,
1116 const struct xe_vm_pgtable_update *updates,
1117 u32 num_updates, bool wait_vm,
1118 struct xe_migrate_pt_update *pt_update)
1120 XE_TEST_DECLARE(struct migrate_test_params *test =
1121 to_migrate_test_params
1122 (xe_cur_kunit_priv(XE_TEST_LIVE_MIGRATE));)
1123 const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1124 struct dma_fence *fence;
1128 if (XE_TEST_ONLY(test && test->force_gpu))
1129 return ERR_PTR(-ETIME);
1131 if (bo && !dma_resv_test_signaled(bo->ttm.base.resv,
1132 DMA_RESV_USAGE_KERNEL))
1133 return ERR_PTR(-ETIME);
1135 if (wait_vm && !dma_resv_test_signaled(xe_vm_resv(vm),
1136 DMA_RESV_USAGE_BOOKKEEP))
1137 return ERR_PTR(-ETIME);
1139 if (ops->pre_commit) {
1140 pt_update->job = NULL;
1141 err = ops->pre_commit(pt_update);
1143 return ERR_PTR(err);
1145 for (i = 0; i < num_updates; i++) {
1146 const struct xe_vm_pgtable_update *update = &updates[i];
1148 ops->populate(pt_update, m->tile, &update->pt_bo->vmap, NULL,
1149 update->ofs, update->qwords, update);
1153 trace_xe_vm_cpu_bind(vm);
1154 xe_device_wmb(vm->xe);
1157 fence = dma_fence_get_stub();
1162 static bool no_in_syncs(struct xe_vm *vm, struct xe_exec_queue *q,
1163 struct xe_sync_entry *syncs, u32 num_syncs)
1165 struct dma_fence *fence;
1168 for (i = 0; i < num_syncs; i++) {
1169 fence = syncs[i].fence;
1171 if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1176 fence = xe_exec_queue_last_fence_get(q, vm);
1177 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1185 * xe_migrate_update_pgtables() - Pipelined page-table update
1186 * @m: The migrate context.
1187 * @vm: The vm we'll be updating.
1188 * @bo: The bo whose dma-resv we will await before updating, or NULL if userptr.
1189 * @q: The exec queue to be used for the update or NULL if the default
1190 * migration engine is to be used.
1191 * @updates: An array of update descriptors.
1192 * @num_updates: Number of descriptors in @updates.
1193 * @syncs: Array of xe_sync_entry to await before updating. Note that waits
1194 * will block the engine timeline.
1195 * @num_syncs: Number of entries in @syncs.
1196 * @pt_update: Pointer to a struct xe_migrate_pt_update, which contains
1197 * pointers to callback functions and, if subclassed, private arguments to
1200 * Perform a pipelined page-table update. The update descriptors are typically
1201 * built under the same lock critical section as a call to this function. If
1202 * using the default engine for the updates, they will be performed in the
1203 * order they grab the job_mutex. If different engines are used, external
1204 * synchronization is needed for overlapping updates to maintain page-table
1205 * consistency. Note that the meaing of "overlapping" is that the updates
1206 * touch the same page-table, which might be a higher-level page-directory.
1207 * If no pipelining is needed, then updates may be performed by the cpu.
1209 * Return: A dma_fence that, when signaled, indicates the update completion.
1212 xe_migrate_update_pgtables(struct xe_migrate *m,
1215 struct xe_exec_queue *q,
1216 const struct xe_vm_pgtable_update *updates,
1218 struct xe_sync_entry *syncs, u32 num_syncs,
1219 struct xe_migrate_pt_update *pt_update)
1221 const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1222 struct xe_tile *tile = m->tile;
1223 struct xe_gt *gt = tile->primary_gt;
1224 struct xe_device *xe = tile_to_xe(tile);
1225 struct xe_sched_job *job;
1226 struct dma_fence *fence;
1227 struct drm_suballoc *sa_bo = NULL;
1228 struct xe_vma *vma = pt_update->vma;
1230 u32 i, batch_size, ppgtt_ofs, update_idx, page_ofs = 0;
1233 bool usm = !q && xe->info.has_usm;
1234 bool first_munmap_rebind = vma &&
1235 vma->gpuva.flags & XE_VMA_FIRST_REBIND;
1236 struct xe_exec_queue *q_override = !q ? m->q : q;
1237 u16 pat_index = xe->pat.idx[XE_CACHE_WB];
1239 /* Use the CPU if no in syncs and engine is idle */
1240 if (no_in_syncs(vm, q, syncs, num_syncs) && xe_exec_queue_is_idle(q_override)) {
1241 fence = xe_migrate_update_pgtables_cpu(m, vm, bo, updates,
1243 first_munmap_rebind,
1245 if (!IS_ERR(fence) || fence == ERR_PTR(-EAGAIN))
1249 /* fixed + PTE entries */
1253 batch_size = 6 + num_updates * 2;
1255 for (i = 0; i < num_updates; i++) {
1256 u32 num_cmds = DIV_ROUND_UP(updates[i].qwords, 0x1ff);
1258 /* align noop + MI_STORE_DATA_IMM cmd prefix */
1259 batch_size += 4 * num_cmds + updates[i].qwords * 2;
1263 * XXX: Create temp bo to copy from, if batch_size becomes too big?
1265 * Worst case: Sum(2 * (each lower level page size) + (top level page size))
1266 * Should be reasonably bound..
1268 xe_tile_assert(tile, batch_size < SZ_128K);
1270 bb = xe_bb_new(gt, batch_size, !q && xe->info.has_usm);
1272 return ERR_CAST(bb);
1274 /* For sysmem PTE's, need to map them in our hole.. */
1276 ppgtt_ofs = NUM_KERNEL_PDE - 1;
1278 xe_tile_assert(tile, num_updates <= NUM_VMUSA_WRITES_PER_UNIT);
1280 sa_bo = drm_suballoc_new(&m->vm_update_sa, 1,
1281 GFP_KERNEL, true, 0);
1282 if (IS_ERR(sa_bo)) {
1283 err = PTR_ERR(sa_bo);
1287 ppgtt_ofs = NUM_KERNEL_PDE +
1288 (drm_suballoc_soffset(sa_bo) /
1289 NUM_VMUSA_UNIT_PER_PAGE);
1290 page_ofs = (drm_suballoc_soffset(sa_bo) %
1291 NUM_VMUSA_UNIT_PER_PAGE) *
1292 VM_SA_UPDATE_UNIT_SIZE;
1295 /* Map our PT's to gtt */
1296 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(num_updates);
1297 bb->cs[bb->len++] = ppgtt_ofs * XE_PAGE_SIZE + page_ofs;
1298 bb->cs[bb->len++] = 0; /* upper_32_bits */
1300 for (i = 0; i < num_updates; i++) {
1301 struct xe_bo *pt_bo = updates[i].pt_bo;
1303 xe_tile_assert(tile, pt_bo->size == SZ_4K);
1305 addr = vm->pt_ops->pte_encode_bo(pt_bo, 0, pat_index, 0);
1306 bb->cs[bb->len++] = lower_32_bits(addr);
1307 bb->cs[bb->len++] = upper_32_bits(addr);
1310 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1311 update_idx = bb->len;
1313 addr = xe_migrate_vm_addr(ppgtt_ofs, 0) +
1314 (page_ofs / sizeof(u64)) * XE_PAGE_SIZE;
1315 for (i = 0; i < num_updates; i++)
1316 write_pgtable(tile, bb, addr + i * XE_PAGE_SIZE,
1317 &updates[i], pt_update);
1319 /* phys pages, no preamble required */
1320 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1321 update_idx = bb->len;
1323 for (i = 0; i < num_updates; i++)
1324 write_pgtable(tile, bb, 0, &updates[i], pt_update);
1328 mutex_lock(&m->job_mutex);
1330 job = xe_bb_create_migration_job(q ?: m->q, bb,
1331 xe_migrate_batch_base(m, usm),
1338 /* Wait on BO move */
1340 err = job_add_deps(job, bo->ttm.base.resv,
1341 DMA_RESV_USAGE_KERNEL);
1347 * Munmap style VM unbind, need to wait for all jobs to be complete /
1348 * trigger preempts before moving forward
1350 if (first_munmap_rebind) {
1351 err = job_add_deps(job, xe_vm_resv(vm),
1352 DMA_RESV_USAGE_BOOKKEEP);
1357 err = xe_sched_job_last_fence_add_dep(job, vm);
1358 for (i = 0; !err && i < num_syncs; i++)
1359 err = xe_sync_entry_add_deps(&syncs[i], job);
1364 if (ops->pre_commit) {
1365 pt_update->job = job;
1366 err = ops->pre_commit(pt_update);
1370 xe_sched_job_arm(job);
1371 fence = dma_fence_get(&job->drm.s_fence->finished);
1372 xe_sched_job_push(job);
1375 mutex_unlock(&m->job_mutex);
1377 xe_bb_free(bb, fence);
1378 drm_suballoc_free(sa_bo, fence);
1383 xe_sched_job_put(job);
1386 mutex_unlock(&m->job_mutex);
1387 xe_bb_free(bb, NULL);
1389 drm_suballoc_free(sa_bo, NULL);
1390 return ERR_PTR(err);
1394 * xe_migrate_wait() - Complete all operations using the xe_migrate context
1395 * @m: Migrate context to wait for.
1397 * Waits until the GPU no longer uses the migrate context's default engine
1398 * or its page-table objects. FIXME: What about separate page-table update
1401 void xe_migrate_wait(struct xe_migrate *m)
1404 dma_fence_wait(m->fence, false);
1407 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1408 #include "tests/xe_migrate.c"