1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
6 #include "xe_migrate.h"
8 #include <linux/bitfield.h>
9 #include <linux/sizes.h>
11 #include <drm/drm_managed.h>
12 #include <drm/ttm/ttm_tt.h>
13 #include <uapi/drm/xe_drm.h>
15 #include <generated/xe_wa_oob.h>
17 #include "instructions/xe_gpu_commands.h"
18 #include "instructions/xe_mi_commands.h"
19 #include "regs/xe_gtt_defs.h"
20 #include "tests/xe_test.h"
21 #include "xe_assert.h"
24 #include "xe_exec_queue.h"
27 #include "xe_hw_engine.h"
32 #include "xe_res_cursor.h"
33 #include "xe_sched_job.h"
35 #include "xe_trace_bo.h"
39 * struct xe_migrate - migrate context.
42 /** @q: Default exec queue used for migration */
43 struct xe_exec_queue *q;
44 /** @tile: Backpointer to the tile this struct xe_migrate belongs to. */
46 /** @job_mutex: Timeline mutex for @eng. */
47 struct mutex job_mutex;
48 /** @pt_bo: Page-table buffer object. */
50 /** @batch_base_ofs: VM offset of the migration batch buffer */
52 /** @usm_batch_base_ofs: VM offset of the usm batch buffer */
53 u64 usm_batch_base_ofs;
54 /** @cleared_mem_ofs: VM offset of @cleared_bo. */
57 * @fence: dma-fence representing the last migration job batch.
58 * Protected by @job_mutex.
60 struct dma_fence *fence;
62 * @vm_update_sa: For integrated, used to suballocate page-tables
65 struct drm_suballoc_manager vm_update_sa;
66 /** @min_chunk_size: For dgfx, Minimum chunk size */
70 #define MAX_PREEMPTDISABLE_TRANSFER SZ_8M /* Around 1ms. */
71 #define MAX_CCS_LIMITED_TRANSFER SZ_4M /* XE_PAGE_SIZE * (FIELD_MAX(XE2_CCS_SIZE_MASK) + 1) */
72 #define NUM_KERNEL_PDE 15
73 #define NUM_PT_SLOTS 32
74 #define LEVEL0_PAGE_TABLE_ENCODE_SIZE SZ_2M
75 #define MAX_NUM_PTE 512
76 #define IDENTITY_OFFSET 256ULL
79 * Although MI_STORE_DATA_IMM's "length" field is 10-bits, 0x3FE is the largest
80 * legal value accepted. Since that instruction field is always stored in
81 * (val-2) format, this translates to 0x400 dwords for the true maximum length
82 * of the instruction. Subtracting the instruction header (1 dword) and
83 * address (2 dwords), that leaves 0x3FD dwords (0x1FE qwords) for PTE values.
85 #define MAX_PTE_PER_SDI 0x1FE
88 * xe_tile_migrate_exec_queue() - Get this tile's migrate exec queue.
91 * Returns the default migrate exec queue of this tile.
93 * Return: The default migrate exec queue
95 struct xe_exec_queue *xe_tile_migrate_exec_queue(struct xe_tile *tile)
97 return tile->migrate->q;
100 static void xe_migrate_fini(struct drm_device *dev, void *arg)
102 struct xe_migrate *m = arg;
104 xe_vm_lock(m->q->vm, false);
105 xe_bo_unpin(m->pt_bo);
106 xe_vm_unlock(m->q->vm);
108 dma_fence_put(m->fence);
110 drm_suballoc_manager_fini(&m->vm_update_sa);
111 mutex_destroy(&m->job_mutex);
112 xe_vm_close_and_put(m->q->vm);
113 xe_exec_queue_put(m->q);
116 static u64 xe_migrate_vm_addr(u64 slot, u32 level)
118 XE_WARN_ON(slot >= NUM_PT_SLOTS);
120 /* First slot is reserved for mapping of PT bo and bb, start from 1 */
121 return (slot + 1ULL) << xe_pt_shift(level + 1);
124 static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr, bool is_comp_pte)
127 * Remove the DPA to get a correct offset into identity table for the
130 u64 identity_offset = IDENTITY_OFFSET;
132 if (GRAPHICS_VER(xe) >= 20 && is_comp_pte)
133 identity_offset += DIV_ROUND_UP_ULL(xe->mem.vram.actual_physical_size, SZ_1G);
135 addr -= xe->mem.vram.dpa_base;
136 return addr + (identity_offset << xe_pt_shift(2));
139 static void xe_migrate_program_identity(struct xe_device *xe, struct xe_vm *vm, struct xe_bo *bo,
140 u64 map_ofs, u64 vram_offset, u16 pat_index, u64 pt_2m_ofs)
144 /* XXX: Unclear if this should be usable_size? */
145 u64 vram_limit = xe->mem.vram.actual_physical_size +
146 xe->mem.vram.dpa_base;
149 ofs = map_ofs + XE_PAGE_SIZE * level + vram_offset * 8;
150 flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
153 xe_assert(xe, IS_ALIGNED(xe->mem.vram.usable_size, SZ_2M));
156 * Use 1GB pages when possible, last chunk always use 2M
157 * pages as mixing reserved memory (stolen, WOCPM) with a single
158 * mapping is not allowed on certain platforms.
160 for (pos = xe->mem.vram.dpa_base; pos < vram_limit;
161 pos += SZ_1G, ofs += 8) {
162 if (pos + SZ_1G >= vram_limit) {
163 entry = vm->pt_ops->pde_encode_bo(bo, pt_2m_ofs,
165 xe_map_wr(xe, &bo->vmap, ofs, u64, entry);
167 flags = vm->pt_ops->pte_encode_addr(xe, 0,
172 for (ofs = pt_2m_ofs; pos < vram_limit;
173 pos += SZ_2M, ofs += 8)
174 xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
175 break; /* Ensure pos == vram_limit assert correct */
178 xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
181 xe_assert(xe, pos == vram_limit);
184 static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
187 struct xe_device *xe = tile_to_xe(tile);
188 u16 pat_index = xe->pat.idx[XE_CACHE_WB];
190 u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level;
191 #define VRAM_IDENTITY_MAP_COUNT 2
192 u32 num_setup = num_level + VRAM_IDENTITY_MAP_COUNT;
193 #undef VRAM_IDENTITY_MAP_COUNT
194 u32 map_ofs, level, i;
195 struct xe_bo *bo, *batch = tile->mem.kernel_bb_pool->bo;
198 /* Can't bump NUM_PT_SLOTS too high */
199 BUILD_BUG_ON(NUM_PT_SLOTS > SZ_2M/XE_PAGE_SIZE);
200 /* Must be a multiple of 64K to support all platforms */
201 BUILD_BUG_ON(NUM_PT_SLOTS * XE_PAGE_SIZE % SZ_64K);
202 /* And one slot reserved for the 4KiB page table updates */
203 BUILD_BUG_ON(!(NUM_KERNEL_PDE & 1));
205 /* Need to be sure everything fits in the first PT, or create more */
206 xe_tile_assert(tile, m->batch_base_ofs + batch->size < SZ_2M);
208 bo = xe_bo_create_pin_map(vm->xe, tile, vm,
209 num_entries * XE_PAGE_SIZE,
211 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
216 /* PT30 & PT31 reserved for 2M identity map */
217 pt29_ofs = bo->size - 3 * XE_PAGE_SIZE;
218 entry = vm->pt_ops->pde_encode_bo(bo, pt29_ofs, pat_index);
219 xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry);
221 map_ofs = (num_entries - num_setup) * XE_PAGE_SIZE;
223 /* Map the entire BO in our level 0 pt */
224 for (i = 0, level = 0; i < num_entries; level++) {
225 entry = vm->pt_ops->pte_encode_bo(bo, i * XE_PAGE_SIZE,
228 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, entry);
230 if (vm->flags & XE_VM_FLAG_64K)
237 /* Write out batch too */
238 m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE;
239 for (i = 0; i < batch->size;
240 i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
242 entry = vm->pt_ops->pte_encode_bo(batch, i,
245 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
249 if (xe->info.has_usm) {
250 xe_tile_assert(tile, batch->size == SZ_1M);
252 batch = tile->primary_gt->usm.bb_pool->bo;
253 m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M;
254 xe_tile_assert(tile, batch->size == SZ_512K);
256 for (i = 0; i < batch->size;
257 i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
259 entry = vm->pt_ops->pte_encode_bo(batch, i,
262 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
268 u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
270 m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false);
272 if (xe->info.has_usm) {
273 batch = tile->primary_gt->usm.bb_pool->bo;
274 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
275 m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false);
279 for (level = 1; level < num_level; level++) {
282 if (vm->flags & XE_VM_FLAG_64K && level == 1)
285 entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (u64)(level - 1) *
286 XE_PAGE_SIZE, pat_index);
287 xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64,
291 /* Write PDE's that point to our BO. */
292 for (i = 0; i < map_ofs / PAGE_SIZE; i++) {
293 entry = vm->pt_ops->pde_encode_bo(bo, (u64)i * XE_PAGE_SIZE,
296 xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE +
297 (i + 1) * 8, u64, entry);
300 /* Set up a 1GiB NULL mapping at 255GiB offset. */
302 xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level + 255 * 8, u64,
303 vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, IS_DGFX(xe), 0)
305 m->cleared_mem_ofs = (255ULL << xe_pt_shift(level));
307 /* Identity map the entire vram at 256GiB offset */
309 u64 pt30_ofs = bo->size - 2 * XE_PAGE_SIZE;
311 xe_migrate_program_identity(xe, vm, bo, map_ofs, IDENTITY_OFFSET,
312 pat_index, pt30_ofs);
313 xe_assert(xe, xe->mem.vram.actual_physical_size <=
314 (MAX_NUM_PTE - IDENTITY_OFFSET) * SZ_1G);
317 * Identity map the entire vram for compressed pat_index for xe2+
318 * if flat ccs is enabled.
320 if (GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe)) {
321 u16 comp_pat_index = xe->pat.idx[XE_CACHE_NONE_COMPRESSION];
322 u64 vram_offset = IDENTITY_OFFSET +
323 DIV_ROUND_UP_ULL(xe->mem.vram.actual_physical_size, SZ_1G);
324 u64 pt31_ofs = bo->size - XE_PAGE_SIZE;
326 xe_assert(xe, xe->mem.vram.actual_physical_size <= (MAX_NUM_PTE -
327 IDENTITY_OFFSET - IDENTITY_OFFSET / 2) * SZ_1G);
328 xe_migrate_program_identity(xe, vm, bo, map_ofs, vram_offset,
329 comp_pat_index, pt31_ofs);
334 * Example layout created above, with root level = 3:
335 * [PT0...PT7]: kernel PT's for copy/clear; 64 or 4KiB PTE's
336 * [PT8]: Kernel PT for VM_BIND, 4 KiB PTE's
337 * [PT9...PT26]: Userspace PT's for VM_BIND, 4 KiB PTE's
338 * [PT27 = PDE 0] [PT28 = PDE 1] [PT29 = PDE 2] [PT30 & PT31 = 2M vram identity map]
340 * This makes the lowest part of the VM point to the pagetables.
341 * Hence the lowest 2M in the vm should point to itself, with a few writes
342 * and flushes, other parts of the VM can be used either for copying and
345 * For performance, the kernel reserves PDE's, so about 20 are left
346 * for async VM updates.
348 * To make it easier to work, each scratch PT is put in slot (1 + PT #)
349 * everywhere, this allows lockless updates to scratch pages by using
350 * the different addresses in VM.
352 #define NUM_VMUSA_UNIT_PER_PAGE 32
353 #define VM_SA_UPDATE_UNIT_SIZE (XE_PAGE_SIZE / NUM_VMUSA_UNIT_PER_PAGE)
354 #define NUM_VMUSA_WRITES_PER_UNIT (VM_SA_UPDATE_UNIT_SIZE / sizeof(u64))
355 drm_suballoc_manager_init(&m->vm_update_sa,
356 (size_t)(map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) *
357 NUM_VMUSA_UNIT_PER_PAGE, 0);
364 * Including the reserved copy engine is required to avoid deadlocks due to
365 * migrate jobs servicing the faults gets stuck behind the job that faulted.
367 static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt)
369 u32 logical_mask = 0;
370 struct xe_hw_engine *hwe;
371 enum xe_hw_engine_id id;
373 for_each_hw_engine(hwe, gt, id) {
374 if (hwe->class != XE_ENGINE_CLASS_COPY)
377 if (xe_gt_is_usm_hwe(gt, hwe))
378 logical_mask |= BIT(hwe->logical_instance);
384 static bool xe_migrate_needs_ccs_emit(struct xe_device *xe)
386 return xe_device_has_flat_ccs(xe) && !(GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe));
390 * xe_migrate_init() - Initialize a migrate context
391 * @tile: Back-pointer to the tile we're initializing for.
393 * Return: Pointer to a migrate context on success. Error pointer on error.
395 struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
397 struct xe_device *xe = tile_to_xe(tile);
398 struct xe_gt *primary_gt = tile->primary_gt;
399 struct xe_migrate *m;
403 m = drmm_kzalloc(&xe->drm, sizeof(*m), GFP_KERNEL);
405 return ERR_PTR(-ENOMEM);
409 /* Special layout, prepared below.. */
410 vm = xe_vm_create(xe, XE_VM_FLAG_MIGRATION |
411 XE_VM_FLAG_SET_TILE_ID(tile));
415 xe_vm_lock(vm, false);
416 err = xe_migrate_prepare_vm(tile, m, vm);
419 xe_vm_close_and_put(vm);
423 if (xe->info.has_usm) {
424 struct xe_hw_engine *hwe = xe_gt_hw_engine(primary_gt,
425 XE_ENGINE_CLASS_COPY,
426 primary_gt->usm.reserved_bcs_instance,
428 u32 logical_mask = xe_migrate_usm_logical_mask(primary_gt);
430 if (!hwe || !logical_mask)
431 return ERR_PTR(-EINVAL);
434 * XXX: Currently only reserving 1 (likely slow) BCS instance on
435 * PVC, may want to revisit if performance is needed.
437 m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
438 EXEC_QUEUE_FLAG_KERNEL |
439 EXEC_QUEUE_FLAG_PERMANENT |
440 EXEC_QUEUE_FLAG_HIGH_PRIORITY, 0);
442 m->q = xe_exec_queue_create_class(xe, primary_gt, vm,
443 XE_ENGINE_CLASS_COPY,
444 EXEC_QUEUE_FLAG_KERNEL |
445 EXEC_QUEUE_FLAG_PERMANENT, 0);
448 xe_vm_close_and_put(vm);
449 return ERR_CAST(m->q);
452 mutex_init(&m->job_mutex);
453 fs_reclaim_acquire(GFP_KERNEL);
454 might_lock(&m->job_mutex);
455 fs_reclaim_release(GFP_KERNEL);
457 err = drmm_add_action_or_reset(&xe->drm, xe_migrate_fini, m);
462 if (xe_migrate_needs_ccs_emit(xe))
463 /* min chunk size corresponds to 4K of CCS Metadata */
464 m->min_chunk_size = SZ_4K * SZ_64K /
465 xe_device_ccs_bytes(xe, SZ_64K);
467 /* Somewhat arbitrary to avoid a huge amount of blits */
468 m->min_chunk_size = SZ_64K;
469 m->min_chunk_size = roundup_pow_of_two(m->min_chunk_size);
470 drm_dbg(&xe->drm, "Migrate min chunk size is 0x%08llx\n",
471 (unsigned long long)m->min_chunk_size);
477 static u64 max_mem_transfer_per_pass(struct xe_device *xe)
479 if (!IS_DGFX(xe) && xe_device_has_flat_ccs(xe))
480 return MAX_CCS_LIMITED_TRANSFER;
482 return MAX_PREEMPTDISABLE_TRANSFER;
485 static u64 xe_migrate_res_sizes(struct xe_migrate *m, struct xe_res_cursor *cur)
487 struct xe_device *xe = tile_to_xe(m->tile);
488 u64 size = min_t(u64, max_mem_transfer_per_pass(xe), cur->remaining);
490 if (mem_type_is_vram(cur->mem_type)) {
492 * VRAM we want to blit in chunks with sizes aligned to
493 * min_chunk_size in order for the offset to CCS metadata to be
494 * page-aligned. If it's the last chunk it may be smaller.
496 * Another constraint is that we need to limit the blit to
497 * the VRAM block size, unless size is smaller than
500 u64 chunk = max_t(u64, cur->size, m->min_chunk_size);
502 size = min_t(u64, size, chunk);
503 if (size > m->min_chunk_size)
504 size = round_down(size, m->min_chunk_size);
510 static bool xe_migrate_allow_identity(u64 size, const struct xe_res_cursor *cur)
512 /* If the chunk is not fragmented, allow identity map. */
513 return cur->size >= size;
516 #define PTE_UPDATE_FLAG_IS_VRAM BIT(0)
517 #define PTE_UPDATE_FLAG_IS_COMP_PTE BIT(1)
519 static u32 pte_update_size(struct xe_migrate *m,
521 struct ttm_resource *res,
522 struct xe_res_cursor *cur,
523 u64 *L0, u64 *L0_ofs, u32 *L0_pt,
524 u32 cmd_size, u32 pt_ofs, u32 avail_pts)
527 bool is_vram = PTE_UPDATE_FLAG_IS_VRAM & flags;
528 bool is_comp_pte = PTE_UPDATE_FLAG_IS_COMP_PTE & flags;
531 if (is_vram && xe_migrate_allow_identity(*L0, cur)) {
532 /* Offset into identity map. */
533 *L0_ofs = xe_migrate_vram_ofs(tile_to_xe(m->tile),
534 cur->start + vram_region_gpu_offset(res),
538 /* Clip L0 to available size */
539 u64 size = min(*L0, (u64)avail_pts * SZ_2M);
540 u32 num_4k_pages = (size + XE_PAGE_SIZE - 1) >> XE_PTE_SHIFT;
543 *L0_ofs = xe_migrate_vm_addr(pt_ofs, 0);
545 /* MI_STORE_DATA_IMM */
546 cmds += 3 * DIV_ROUND_UP(num_4k_pages, MAX_PTE_PER_SDI);
549 cmds += num_4k_pages * 2;
551 /* Each chunk has a single blit command */
558 static void emit_pte(struct xe_migrate *m,
559 struct xe_bb *bb, u32 at_pt,
560 bool is_vram, bool is_comp_pte,
561 struct xe_res_cursor *cur,
562 u32 size, struct ttm_resource *res)
564 struct xe_device *xe = tile_to_xe(m->tile);
565 struct xe_vm *vm = m->q->vm;
568 u64 ofs = (u64)at_pt * XE_PAGE_SIZE;
571 /* Indirect access needs compression enabled uncached PAT index */
572 if (GRAPHICS_VERx100(xe) >= 2000)
573 pat_index = is_comp_pte ? xe->pat.idx[XE_CACHE_NONE_COMPRESSION] :
574 xe->pat.idx[XE_CACHE_WB];
576 pat_index = xe->pat.idx[XE_CACHE_WB];
578 ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE);
581 u32 chunk = min(MAX_PTE_PER_SDI, ptes);
583 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
584 bb->cs[bb->len++] = ofs;
585 bb->cs[bb->len++] = 0;
595 addr = xe_res_dma(cur) & PAGE_MASK;
597 if (vm->flags & XE_VM_FLAG_64K) {
598 u64 va = cur_ofs * XE_PAGE_SIZE / 8;
600 xe_assert(xe, (va & (SZ_64K - 1)) ==
601 (addr & (SZ_64K - 1)));
603 flags |= XE_PTE_PS64;
606 addr += vram_region_gpu_offset(res);
610 addr = vm->pt_ops->pte_encode_addr(m->tile->xe,
613 bb->cs[bb->len++] = lower_32_bits(addr);
614 bb->cs[bb->len++] = upper_32_bits(addr);
616 xe_res_next(cur, min_t(u32, size, PAGE_SIZE));
622 #define EMIT_COPY_CCS_DW 5
623 static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb,
624 u64 dst_ofs, bool dst_is_indirect,
625 u64 src_ofs, bool src_is_indirect,
628 struct xe_device *xe = gt_to_xe(gt);
629 u32 *cs = bb->cs + bb->len;
635 if (GRAPHICS_VERx100(xe) >= 2000) {
636 num_pages = DIV_ROUND_UP(size, XE_PAGE_SIZE);
637 xe_gt_assert(gt, FIELD_FIT(XE2_CCS_SIZE_MASK, num_pages - 1));
639 ccs_copy_size = REG_FIELD_PREP(XE2_CCS_SIZE_MASK, num_pages - 1);
640 mocs = FIELD_PREP(XE2_XY_CTRL_SURF_MOCS_INDEX_MASK, gt->mocs.uc_index);
643 num_ccs_blks = DIV_ROUND_UP(xe_device_ccs_bytes(gt_to_xe(gt), size),
644 NUM_CCS_BYTES_PER_BLOCK);
645 xe_gt_assert(gt, FIELD_FIT(CCS_SIZE_MASK, num_ccs_blks - 1));
647 ccs_copy_size = REG_FIELD_PREP(CCS_SIZE_MASK, num_ccs_blks - 1);
648 mocs = FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, gt->mocs.uc_index);
651 *cs++ = XY_CTRL_SURF_COPY_BLT |
652 (src_is_indirect ? 0x0 : 0x1) << SRC_ACCESS_TYPE_SHIFT |
653 (dst_is_indirect ? 0x0 : 0x1) << DST_ACCESS_TYPE_SHIFT |
655 *cs++ = lower_32_bits(src_ofs);
656 *cs++ = upper_32_bits(src_ofs) | mocs;
657 *cs++ = lower_32_bits(dst_ofs);
658 *cs++ = upper_32_bits(dst_ofs) | mocs;
660 bb->len = cs - bb->cs;
663 #define EMIT_COPY_DW 10
664 static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
665 u64 src_ofs, u64 dst_ofs, unsigned int size,
668 struct xe_device *xe = gt_to_xe(gt);
672 xe_gt_assert(gt, size / pitch <= S16_MAX);
673 xe_gt_assert(gt, pitch / 4 <= S16_MAX);
674 xe_gt_assert(gt, pitch <= U16_MAX);
676 if (GRAPHICS_VER(xe) >= 20)
677 mocs = FIELD_PREP(XE2_XY_FAST_COPY_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index);
679 if (GRAPHICS_VERx100(xe) >= 1250)
680 tile_y = XY_FAST_COPY_BLT_D1_SRC_TILE4 | XY_FAST_COPY_BLT_D1_DST_TILE4;
682 bb->cs[bb->len++] = XY_FAST_COPY_BLT_CMD | (10 - 2);
683 bb->cs[bb->len++] = XY_FAST_COPY_BLT_DEPTH_32 | pitch | tile_y | mocs;
684 bb->cs[bb->len++] = 0;
685 bb->cs[bb->len++] = (size / pitch) << 16 | pitch / 4;
686 bb->cs[bb->len++] = lower_32_bits(dst_ofs);
687 bb->cs[bb->len++] = upper_32_bits(dst_ofs);
688 bb->cs[bb->len++] = 0;
689 bb->cs[bb->len++] = pitch | mocs;
690 bb->cs[bb->len++] = lower_32_bits(src_ofs);
691 bb->cs[bb->len++] = upper_32_bits(src_ofs);
694 static u64 xe_migrate_batch_base(struct xe_migrate *m, bool usm)
696 return usm ? m->usm_batch_base_ofs : m->batch_base_ofs;
699 static u32 xe_migrate_ccs_copy(struct xe_migrate *m,
701 u64 src_ofs, bool src_is_indirect,
702 u64 dst_ofs, bool dst_is_indirect, u32 dst_size,
703 u64 ccs_ofs, bool copy_ccs)
705 struct xe_gt *gt = m->tile->primary_gt;
708 if (!copy_ccs && dst_is_indirect) {
710 * If the src is already in vram, then it should already
711 * have been cleared by us, or has been populated by the
712 * user. Make sure we copy the CCS aux state as-is.
714 * Otherwise if the bo doesn't have any CCS metadata attached,
715 * we still need to clear it for security reasons.
717 u64 ccs_src_ofs = src_is_indirect ? src_ofs : m->cleared_mem_ofs;
719 emit_copy_ccs(gt, bb,
721 ccs_src_ofs, src_is_indirect, dst_size);
723 flush_flags = MI_FLUSH_DW_CCS;
724 } else if (copy_ccs) {
725 if (!src_is_indirect)
727 else if (!dst_is_indirect)
730 xe_gt_assert(gt, src_is_indirect || dst_is_indirect);
732 emit_copy_ccs(gt, bb, dst_ofs, dst_is_indirect, src_ofs,
733 src_is_indirect, dst_size);
735 flush_flags = MI_FLUSH_DW_CCS;
742 * xe_migrate_copy() - Copy content of TTM resources.
743 * @m: The migration context.
744 * @src_bo: The buffer object @src is currently bound to.
745 * @dst_bo: If copying between resources created for the same bo, set this to
746 * the same value as @src_bo. If copying between buffer objects, set it to
747 * the buffer object @dst is currently bound to.
748 * @src: The source TTM resource.
749 * @dst: The dst TTM resource.
750 * @copy_only_ccs: If true copy only CCS metadata
752 * Copies the contents of @src to @dst: On flat CCS devices,
753 * the CCS metadata is copied as well if needed, or if not present,
754 * the CCS metadata of @dst is cleared for security reasons.
756 * Return: Pointer to a dma_fence representing the last copy batch, or
757 * an error pointer on failure. If there is a failure, any copy operation
758 * started by the function call has been synced.
760 struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
761 struct xe_bo *src_bo,
762 struct xe_bo *dst_bo,
763 struct ttm_resource *src,
764 struct ttm_resource *dst,
767 struct xe_gt *gt = m->tile->primary_gt;
768 struct xe_device *xe = gt_to_xe(gt);
769 struct dma_fence *fence = NULL;
770 u64 size = src_bo->size;
771 struct xe_res_cursor src_it, dst_it, ccs_it;
772 u64 src_L0_ofs, dst_L0_ofs;
773 u32 src_L0_pt, dst_L0_pt;
777 bool src_is_pltt = src->mem_type == XE_PL_TT;
778 bool dst_is_pltt = dst->mem_type == XE_PL_TT;
779 bool src_is_vram = mem_type_is_vram(src->mem_type);
780 bool dst_is_vram = mem_type_is_vram(dst->mem_type);
781 bool copy_ccs = xe_device_has_flat_ccs(xe) &&
782 xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo);
783 bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram);
784 bool use_comp_pat = xe_device_has_flat_ccs(xe) &&
785 GRAPHICS_VER(xe) >= 20 && src_is_vram && !dst_is_vram;
787 /* Copying CCS between two different BOs is not supported yet. */
788 if (XE_WARN_ON(copy_ccs && src_bo != dst_bo))
789 return ERR_PTR(-EINVAL);
791 if (src_bo != dst_bo && XE_WARN_ON(src_bo->size != dst_bo->size))
792 return ERR_PTR(-EINVAL);
795 xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
797 xe_res_first(src, 0, size, &src_it);
799 xe_res_first_sg(xe_bo_sg(dst_bo), 0, size, &dst_it);
801 xe_res_first(dst, 0, size, &dst_it);
804 xe_res_first_sg(xe_bo_sg(src_bo), xe_bo_ccs_pages_start(src_bo),
805 PAGE_ALIGN(xe_device_ccs_bytes(xe, size)),
809 u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
810 struct xe_sched_job *job;
814 u64 ccs_ofs, ccs_size;
818 bool usm = xe->info.has_usm;
819 u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
821 src_L0 = xe_migrate_res_sizes(m, &src_it);
822 dst_L0 = xe_migrate_res_sizes(m, &dst_it);
824 drm_dbg(&xe->drm, "Pass %u, sizes: %llu & %llu\n",
825 pass++, src_L0, dst_L0);
827 src_L0 = min(src_L0, dst_L0);
829 pte_flags = src_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
830 pte_flags |= use_comp_pat ? PTE_UPDATE_FLAG_IS_COMP_PTE : 0;
831 batch_size += pte_update_size(m, pte_flags, src, &src_it, &src_L0,
832 &src_L0_ofs, &src_L0_pt, 0, 0,
835 pte_flags = dst_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
836 batch_size += pte_update_size(m, pte_flags, dst, &dst_it, &src_L0,
837 &dst_L0_ofs, &dst_L0_pt, 0,
838 avail_pts, avail_pts);
840 if (copy_system_ccs) {
841 ccs_size = xe_device_ccs_bytes(xe, src_L0);
842 batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size,
843 &ccs_ofs, &ccs_pt, 0,
846 xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
849 /* Add copy commands size here */
850 batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) +
851 ((xe_migrate_needs_ccs_emit(xe) ? EMIT_COPY_CCS_DW : 0));
853 bb = xe_bb_new(gt, batch_size, usm);
859 if (src_is_vram && xe_migrate_allow_identity(src_L0, &src_it))
860 xe_res_next(&src_it, src_L0);
862 emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs,
863 &src_it, src_L0, src);
865 if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it))
866 xe_res_next(&dst_it, src_L0);
868 emit_pte(m, bb, dst_L0_pt, dst_is_vram, copy_system_ccs,
869 &dst_it, src_L0, dst);
872 emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src);
874 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
875 update_idx = bb->len;
878 emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
880 if (xe_migrate_needs_ccs_emit(xe))
881 flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
882 IS_DGFX(xe) ? src_is_vram : src_is_pltt,
884 IS_DGFX(xe) ? dst_is_vram : dst_is_pltt,
885 src_L0, ccs_ofs, copy_ccs);
887 job = xe_bb_create_migration_job(m->q, bb,
888 xe_migrate_batch_base(m, usm),
895 xe_sched_job_add_migrate_flush(job, flush_flags);
897 err = xe_sched_job_add_deps(job, src_bo->ttm.base.resv,
898 DMA_RESV_USAGE_BOOKKEEP);
899 if (!err && src_bo != dst_bo)
900 err = xe_sched_job_add_deps(job, dst_bo->ttm.base.resv,
901 DMA_RESV_USAGE_BOOKKEEP);
906 mutex_lock(&m->job_mutex);
907 xe_sched_job_arm(job);
908 dma_fence_put(fence);
909 fence = dma_fence_get(&job->drm.s_fence->finished);
910 xe_sched_job_push(job);
912 dma_fence_put(m->fence);
913 m->fence = dma_fence_get(fence);
915 mutex_unlock(&m->job_mutex);
917 xe_bb_free(bb, fence);
922 xe_sched_job_put(job);
924 xe_bb_free(bb, NULL);
927 /* Sync partial copy if any. FIXME: under job_mutex? */
929 dma_fence_wait(fence, false);
930 dma_fence_put(fence);
939 static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
942 struct xe_device *xe = gt_to_xe(gt);
943 u32 *cs = bb->cs + bb->len;
944 u32 len = PVC_MEM_SET_CMD_LEN_DW;
946 *cs++ = PVC_MEM_SET_CMD | PVC_MEM_SET_MATRIX | (len - 2);
948 *cs++ = (size / pitch) - 1;
950 *cs++ = lower_32_bits(src_ofs);
951 *cs++ = upper_32_bits(src_ofs);
952 if (GRAPHICS_VERx100(xe) >= 2000)
953 *cs++ = FIELD_PREP(XE2_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index);
955 *cs++ = FIELD_PREP(PVC_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index);
957 xe_gt_assert(gt, cs - bb->cs == len + bb->len);
962 static void emit_clear_main_copy(struct xe_gt *gt, struct xe_bb *bb,
963 u64 src_ofs, u32 size, u32 pitch, bool is_vram)
965 struct xe_device *xe = gt_to_xe(gt);
966 u32 *cs = bb->cs + bb->len;
967 u32 len = XY_FAST_COLOR_BLT_DW;
969 if (GRAPHICS_VERx100(xe) < 1250)
972 *cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 |
974 if (GRAPHICS_VERx100(xe) >= 2000)
975 *cs++ = FIELD_PREP(XE2_XY_FAST_COLOR_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index) |
978 *cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, gt->mocs.uc_index) |
981 *cs++ = (size / pitch) << 16 | pitch / 4;
982 *cs++ = lower_32_bits(src_ofs);
983 *cs++ = upper_32_bits(src_ofs);
984 *cs++ = (is_vram ? 0x0 : 0x1) << XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT;
998 xe_gt_assert(gt, cs - bb->cs == len + bb->len);
1003 static bool has_service_copy_support(struct xe_gt *gt)
1006 * What we care about is whether the architecture was designed with
1007 * service copy functionality (specifically the new MEM_SET / MEM_COPY
1008 * instructions) so check the architectural engine list rather than the
1009 * actual list since these instructions are usable on BCS0 even if
1010 * all of the actual service copy engines (BCS1-BCS8) have been fused
1013 return gt->info.engine_mask & GENMASK(XE_HW_ENGINE_BCS8,
1017 static u32 emit_clear_cmd_len(struct xe_gt *gt)
1019 if (has_service_copy_support(gt))
1020 return PVC_MEM_SET_CMD_LEN_DW;
1022 return XY_FAST_COLOR_BLT_DW;
1025 static void emit_clear(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
1026 u32 size, u32 pitch, bool is_vram)
1028 if (has_service_copy_support(gt))
1029 emit_clear_link_copy(gt, bb, src_ofs, size, pitch);
1031 emit_clear_main_copy(gt, bb, src_ofs, size, pitch,
1036 * xe_migrate_clear() - Copy content of TTM resources.
1037 * @m: The migration context.
1038 * @bo: The buffer object @dst is currently bound to.
1039 * @dst: The dst TTM resource to be cleared.
1040 * @clear_flags: flags to specify which data to clear: CCS, BO, or both.
1042 * Clear the contents of @dst to zero when XE_MIGRATE_CLEAR_FLAG_BO_DATA is set.
1043 * On flat CCS devices, the CCS metadata is cleared to zero with XE_MIGRATE_CLEAR_FLAG_CCS_DATA.
1044 * Set XE_MIGRATE_CLEAR_FLAG_FULL to clear bo as well as CCS metadata.
1045 * TODO: Eliminate the @bo argument.
1047 * Return: Pointer to a dma_fence representing the last clear batch, or
1048 * an error pointer on failure. If there is a failure, any clear operation
1049 * started by the function call has been synced.
1051 struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
1053 struct ttm_resource *dst,
1056 bool clear_vram = mem_type_is_vram(dst->mem_type);
1057 bool clear_bo_data = XE_MIGRATE_CLEAR_FLAG_BO_DATA & clear_flags;
1058 bool clear_ccs = XE_MIGRATE_CLEAR_FLAG_CCS_DATA & clear_flags;
1059 struct xe_gt *gt = m->tile->primary_gt;
1060 struct xe_device *xe = gt_to_xe(gt);
1061 bool clear_only_system_ccs = false;
1062 struct dma_fence *fence = NULL;
1063 u64 size = bo->size;
1064 struct xe_res_cursor src_it;
1065 struct ttm_resource *src = dst;
1068 if (WARN_ON(!clear_bo_data && !clear_ccs))
1071 if (!clear_bo_data && clear_ccs && !IS_DGFX(xe))
1072 clear_only_system_ccs = true;
1075 xe_res_first_sg(xe_bo_sg(bo), 0, bo->size, &src_it);
1077 xe_res_first(src, 0, bo->size, &src_it);
1082 u32 flush_flags = 0;
1084 struct xe_sched_job *job;
1086 u32 batch_size, update_idx;
1089 bool usm = xe->info.has_usm;
1090 u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
1092 clear_L0 = xe_migrate_res_sizes(m, &src_it);
1094 /* Calculate final sizes and batch size.. */
1095 pte_flags = clear_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
1097 pte_update_size(m, pte_flags, src, &src_it,
1098 &clear_L0, &clear_L0_ofs, &clear_L0_pt,
1099 clear_bo_data ? emit_clear_cmd_len(gt) : 0, 0,
1102 if (xe_migrate_needs_ccs_emit(xe))
1103 batch_size += EMIT_COPY_CCS_DW;
1105 /* Clear commands */
1107 if (WARN_ON_ONCE(!clear_L0))
1110 bb = xe_bb_new(gt, batch_size, usm);
1117 /* Preemption is enabled again by the ring ops. */
1118 if (clear_vram && xe_migrate_allow_identity(clear_L0, &src_it))
1119 xe_res_next(&src_it, clear_L0);
1121 emit_pte(m, bb, clear_L0_pt, clear_vram, clear_only_system_ccs,
1122 &src_it, clear_L0, dst);
1124 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1125 update_idx = bb->len;
1128 emit_clear(gt, bb, clear_L0_ofs, clear_L0, XE_PAGE_SIZE, clear_vram);
1130 if (xe_migrate_needs_ccs_emit(xe)) {
1131 emit_copy_ccs(gt, bb, clear_L0_ofs, true,
1132 m->cleared_mem_ofs, false, clear_L0);
1133 flush_flags = MI_FLUSH_DW_CCS;
1136 job = xe_bb_create_migration_job(m->q, bb,
1137 xe_migrate_batch_base(m, usm),
1144 xe_sched_job_add_migrate_flush(job, flush_flags);
1147 * There can't be anything userspace related at this
1148 * point, so we just need to respect any potential move
1149 * fences, which are always tracked as
1150 * DMA_RESV_USAGE_KERNEL.
1152 err = xe_sched_job_add_deps(job, bo->ttm.base.resv,
1153 DMA_RESV_USAGE_KERNEL);
1158 mutex_lock(&m->job_mutex);
1159 xe_sched_job_arm(job);
1160 dma_fence_put(fence);
1161 fence = dma_fence_get(&job->drm.s_fence->finished);
1162 xe_sched_job_push(job);
1164 dma_fence_put(m->fence);
1165 m->fence = dma_fence_get(fence);
1167 mutex_unlock(&m->job_mutex);
1169 xe_bb_free(bb, fence);
1173 xe_sched_job_put(job);
1175 xe_bb_free(bb, NULL);
1177 /* Sync partial copies if any. FIXME: job_mutex? */
1179 dma_fence_wait(m->fence, false);
1180 dma_fence_put(fence);
1183 return ERR_PTR(err);
1187 bo->ccs_cleared = true;
1192 static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
1193 const struct xe_vm_pgtable_update_op *pt_op,
1194 const struct xe_vm_pgtable_update *update,
1195 struct xe_migrate_pt_update *pt_update)
1197 const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1199 u32 ofs = update->ofs, size = update->qwords;
1202 * If we have 512 entries (max), we would populate it ourselves,
1203 * and update the PDE above it to the new pointer.
1204 * The only time this can only happen if we have to update the top
1205 * PDE. This requires a BO that is almost vm->size big.
1207 * This shouldn't be possible in practice.. might change when 16K
1208 * pages are used. Hence the assert.
1210 xe_tile_assert(tile, update->qwords < MAX_NUM_PTE);
1212 ppgtt_ofs = xe_migrate_vram_ofs(tile_to_xe(tile),
1213 xe_bo_addr(update->pt_bo, 0,
1214 XE_PAGE_SIZE), false);
1217 u64 addr = ppgtt_ofs + ofs * 8;
1219 chunk = min(size, MAX_PTE_PER_SDI);
1221 /* Ensure populatefn can do memset64 by aligning bb->cs */
1223 bb->cs[bb->len++] = MI_NOOP;
1225 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
1226 bb->cs[bb->len++] = lower_32_bits(addr);
1227 bb->cs[bb->len++] = upper_32_bits(addr);
1229 ops->populate(pt_update, tile, NULL, bb->cs + bb->len,
1230 ofs, chunk, update);
1232 ops->clear(pt_update, tile, NULL, bb->cs + bb->len,
1233 ofs, chunk, update);
1235 bb->len += chunk * 2;
1241 struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m)
1243 return xe_vm_get(m->q->vm);
1246 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1247 struct migrate_test_params {
1248 struct xe_test_priv base;
1252 #define to_migrate_test_params(_priv) \
1253 container_of(_priv, struct migrate_test_params, base)
1256 static struct dma_fence *
1257 xe_migrate_update_pgtables_cpu(struct xe_migrate *m,
1258 struct xe_migrate_pt_update *pt_update)
1260 XE_TEST_DECLARE(struct migrate_test_params *test =
1261 to_migrate_test_params
1262 (xe_cur_kunit_priv(XE_TEST_LIVE_MIGRATE));)
1263 const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1264 struct xe_vm *vm = pt_update->vops->vm;
1265 struct xe_vm_pgtable_update_ops *pt_update_ops =
1266 &pt_update->vops->pt_update_ops[pt_update->tile_id];
1270 if (XE_TEST_ONLY(test && test->force_gpu))
1271 return ERR_PTR(-ETIME);
1273 if (ops->pre_commit) {
1274 pt_update->job = NULL;
1275 err = ops->pre_commit(pt_update);
1277 return ERR_PTR(err);
1280 for (i = 0; i < pt_update_ops->num_ops; ++i) {
1281 const struct xe_vm_pgtable_update_op *pt_op =
1282 &pt_update_ops->ops[i];
1284 for (j = 0; j < pt_op->num_entries; j++) {
1285 const struct xe_vm_pgtable_update *update =
1289 ops->populate(pt_update, m->tile,
1290 &update->pt_bo->vmap, NULL,
1291 update->ofs, update->qwords,
1294 ops->clear(pt_update, m->tile,
1295 &update->pt_bo->vmap, NULL,
1296 update->ofs, update->qwords, update);
1300 trace_xe_vm_cpu_bind(vm);
1301 xe_device_wmb(vm->xe);
1303 return dma_fence_get_stub();
1306 static struct dma_fence *
1307 __xe_migrate_update_pgtables(struct xe_migrate *m,
1308 struct xe_migrate_pt_update *pt_update,
1309 struct xe_vm_pgtable_update_ops *pt_update_ops)
1311 const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1312 struct xe_tile *tile = m->tile;
1313 struct xe_gt *gt = tile->primary_gt;
1314 struct xe_device *xe = tile_to_xe(tile);
1315 struct xe_sched_job *job;
1316 struct dma_fence *fence;
1317 struct drm_suballoc *sa_bo = NULL;
1319 u32 i, j, batch_size = 0, ppgtt_ofs, update_idx, page_ofs = 0;
1320 u32 num_updates = 0, current_update = 0;
1323 bool is_migrate = pt_update_ops->q == m->q;
1324 bool usm = is_migrate && xe->info.has_usm;
1326 for (i = 0; i < pt_update_ops->num_ops; ++i) {
1327 struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i];
1328 struct xe_vm_pgtable_update *updates = pt_op->entries;
1330 num_updates += pt_op->num_entries;
1331 for (j = 0; j < pt_op->num_entries; ++j) {
1332 u32 num_cmds = DIV_ROUND_UP(updates[j].qwords,
1335 /* align noop + MI_STORE_DATA_IMM cmd prefix */
1336 batch_size += 4 * num_cmds + updates[j].qwords * 2;
1340 /* fixed + PTE entries */
1344 batch_size += 6 * (num_updates / MAX_PTE_PER_SDI + 1) +
1347 bb = xe_bb_new(gt, batch_size, usm);
1349 return ERR_CAST(bb);
1351 /* For sysmem PTE's, need to map them in our hole.. */
1355 ppgtt_ofs = NUM_KERNEL_PDE - 1;
1357 u32 num_units = DIV_ROUND_UP(num_updates,
1358 NUM_VMUSA_WRITES_PER_UNIT);
1360 if (num_units > m->vm_update_sa.size) {
1364 sa_bo = drm_suballoc_new(&m->vm_update_sa, num_units,
1365 GFP_KERNEL, true, 0);
1366 if (IS_ERR(sa_bo)) {
1367 err = PTR_ERR(sa_bo);
1371 ppgtt_ofs = NUM_KERNEL_PDE +
1372 (drm_suballoc_soffset(sa_bo) /
1373 NUM_VMUSA_UNIT_PER_PAGE);
1374 page_ofs = (drm_suballoc_soffset(sa_bo) %
1375 NUM_VMUSA_UNIT_PER_PAGE) *
1376 VM_SA_UPDATE_UNIT_SIZE;
1379 /* Map our PT's to gtt */
1383 ofs = ppgtt_ofs * XE_PAGE_SIZE + page_ofs;
1385 u32 chunk = min(MAX_PTE_PER_SDI, ptes);
1388 bb->cs[bb->len++] = MI_STORE_DATA_IMM |
1389 MI_SDI_NUM_QW(chunk);
1390 bb->cs[bb->len++] = ofs;
1391 bb->cs[bb->len++] = 0; /* upper_32_bits */
1393 for (; i < pt_update_ops->num_ops; ++i) {
1394 struct xe_vm_pgtable_update_op *pt_op =
1395 &pt_update_ops->ops[i];
1396 struct xe_vm_pgtable_update *updates = pt_op->entries;
1398 for (; j < pt_op->num_entries; ++j, ++current_update, ++idx) {
1399 struct xe_vm *vm = pt_update->vops->vm;
1400 struct xe_bo *pt_bo = updates[j].pt_bo;
1405 xe_tile_assert(tile, pt_bo->size == SZ_4K);
1407 /* Map a PT at most once */
1408 if (pt_bo->update_index < 0)
1409 pt_bo->update_index = current_update;
1411 addr = vm->pt_ops->pte_encode_bo(pt_bo, 0,
1413 bb->cs[bb->len++] = lower_32_bits(addr);
1414 bb->cs[bb->len++] = upper_32_bits(addr);
1422 ofs += chunk * sizeof(u64);
1425 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1426 update_idx = bb->len;
1428 addr = xe_migrate_vm_addr(ppgtt_ofs, 0) +
1429 (page_ofs / sizeof(u64)) * XE_PAGE_SIZE;
1430 for (i = 0; i < pt_update_ops->num_ops; ++i) {
1431 struct xe_vm_pgtable_update_op *pt_op =
1432 &pt_update_ops->ops[i];
1433 struct xe_vm_pgtable_update *updates = pt_op->entries;
1435 for (j = 0; j < pt_op->num_entries; ++j) {
1436 struct xe_bo *pt_bo = updates[j].pt_bo;
1438 write_pgtable(tile, bb, addr +
1439 pt_bo->update_index * XE_PAGE_SIZE,
1440 pt_op, &updates[j], pt_update);
1444 /* phys pages, no preamble required */
1445 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1446 update_idx = bb->len;
1448 for (i = 0; i < pt_update_ops->num_ops; ++i) {
1449 struct xe_vm_pgtable_update_op *pt_op =
1450 &pt_update_ops->ops[i];
1451 struct xe_vm_pgtable_update *updates = pt_op->entries;
1453 for (j = 0; j < pt_op->num_entries; ++j)
1454 write_pgtable(tile, bb, 0, pt_op, &updates[j],
1459 job = xe_bb_create_migration_job(pt_update_ops->q, bb,
1460 xe_migrate_batch_base(m, usm),
1467 if (ops->pre_commit) {
1468 pt_update->job = job;
1469 err = ops->pre_commit(pt_update);
1474 mutex_lock(&m->job_mutex);
1476 xe_sched_job_arm(job);
1477 fence = dma_fence_get(&job->drm.s_fence->finished);
1478 xe_sched_job_push(job);
1481 mutex_unlock(&m->job_mutex);
1483 xe_bb_free(bb, fence);
1484 drm_suballoc_free(sa_bo, fence);
1489 xe_sched_job_put(job);
1491 drm_suballoc_free(sa_bo, NULL);
1493 xe_bb_free(bb, NULL);
1494 return ERR_PTR(err);
1498 * xe_migrate_update_pgtables() - Pipelined page-table update
1499 * @m: The migrate context.
1500 * @pt_update: PT update arguments
1502 * Perform a pipelined page-table update. The update descriptors are typically
1503 * built under the same lock critical section as a call to this function. If
1504 * using the default engine for the updates, they will be performed in the
1505 * order they grab the job_mutex. If different engines are used, external
1506 * synchronization is needed for overlapping updates to maintain page-table
1507 * consistency. Note that the meaing of "overlapping" is that the updates
1508 * touch the same page-table, which might be a higher-level page-directory.
1509 * If no pipelining is needed, then updates may be performed by the cpu.
1511 * Return: A dma_fence that, when signaled, indicates the update completion.
1514 xe_migrate_update_pgtables(struct xe_migrate *m,
1515 struct xe_migrate_pt_update *pt_update)
1518 struct xe_vm_pgtable_update_ops *pt_update_ops =
1519 &pt_update->vops->pt_update_ops[pt_update->tile_id];
1520 struct dma_fence *fence;
1522 fence = xe_migrate_update_pgtables_cpu(m, pt_update);
1524 /* -ETIME indicates a job is needed, anything else is legit error */
1525 if (!IS_ERR(fence) || PTR_ERR(fence) != -ETIME)
1528 return __xe_migrate_update_pgtables(m, pt_update, pt_update_ops);
1532 * xe_migrate_wait() - Complete all operations using the xe_migrate context
1533 * @m: Migrate context to wait for.
1535 * Waits until the GPU no longer uses the migrate context's default engine
1536 * or its page-table objects. FIXME: What about separate page-table update
1539 void xe_migrate_wait(struct xe_migrate *m)
1542 dma_fence_wait(m->fence, false);
1545 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1546 #include "tests/xe_migrate.c"