1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
7 #include "intel_context.h"
8 #include "intel_gpu_commands.h"
10 #include "intel_gtt.h"
11 #include "intel_migrate.h"
12 #include "intel_ring.h"
14 struct insert_pte_data {
19 #define CHUNK_SZ SZ_8M /* ~1ms at 8GiB/s preemption delay */
21 static bool engine_supports_migration(struct intel_engine_cs *engine)
27 * We need the ability to prevent aribtration (MI_ARB_ON_OFF),
28 * the ability to write PTE using inline data (MI_STORE_DATA)
29 * and of course the ability to do the block transfer (blits).
31 GEM_BUG_ON(engine->class != COPY_ENGINE_CLASS);
36 static void insert_pte(struct i915_address_space *vm,
37 struct i915_page_table *pt,
40 struct insert_pte_data *d = data;
42 vm->insert_page(vm, px_dma(pt), d->offset, I915_CACHE_NONE,
43 d->is_lmem ? PTE_LM : 0);
44 d->offset += PAGE_SIZE;
47 static struct i915_address_space *migrate_vm(struct intel_gt *gt)
49 struct i915_vm_pt_stash stash = {};
50 struct i915_ppgtt *vm;
55 * We construct a very special VM for use by all migration contexts,
56 * it is kept pinned so that it can be used at any time. As we need
57 * to pre-allocate the page directories for the migration VM, this
58 * limits us to only using a small number of prepared vma.
60 * To be able to pipeline and reschedule migration operations while
61 * avoiding unnecessary contention on the vm itself, the PTE updates
62 * are inline with the blits. All the blits use the same fixed
63 * addresses, with the backing store redirection being updated on the
64 * fly. Only 2 implicit vma are used for all migration operations.
66 * We lay the ppGTT out as:
68 * [0, CHUNK_SZ) -> first object
69 * [CHUNK_SZ, 2 * CHUNK_SZ) -> second object
70 * [2 * CHUNK_SZ, 2 * CHUNK_SZ + 2 * CHUNK_SZ >> 9] -> PTE
72 * By exposing the dma addresses of the page directories themselves
73 * within the ppGTT, we are then able to rewrite the PTE prior to use.
74 * But the PTE update and subsequent migration operation must be atomic,
75 * i.e. within the same non-preemptible window so that we do not switch
76 * to another migration context that overwrites the PTE.
78 * TODO: Add support for huge LMEM PTEs
81 vm = i915_ppgtt_create(gt);
85 if (!vm->vm.allocate_va_range || !vm->vm.foreach) {
91 * Each engine instance is assigned its own chunk in the VM, so
92 * that we can run multiple instances concurrently
94 for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) {
95 struct intel_engine_cs *engine;
96 u64 base = (u64)i << 32;
97 struct insert_pte_data d = {};
98 struct i915_gem_ww_ctx ww;
101 engine = gt->engine_class[COPY_ENGINE_CLASS][i];
102 if (!engine_supports_migration(engine))
106 * We copy in 8MiB chunks. Each PDE covers 2MiB, so we need
107 * 4x2 page directories for source/destination.
110 d.offset = base + sz;
113 * We need another page directory setup so that we can write
114 * the 8x512 PTE in each chunk.
116 sz += (sz >> 12) * sizeof(u64);
118 err = i915_vm_alloc_pt_stash(&vm->vm, &stash, sz);
122 for_i915_gem_ww(&ww, err, true) {
123 err = i915_vm_lock_objects(&vm->vm, &ww);
126 err = i915_vm_map_pt_stash(&vm->vm, &stash);
130 vm->vm.allocate_va_range(&vm->vm, &stash, base, sz);
132 i915_vm_free_pt_stash(&vm->vm, &stash);
136 /* Now allow the GPU to rewrite the PTE via its own ppGTT */
137 d.is_lmem = i915_gem_object_is_lmem(vm->vm.scratch[0]);
138 vm->vm.foreach(&vm->vm, base, base + sz, insert_pte, &d);
144 i915_vm_put(&vm->vm);
148 static struct intel_engine_cs *first_copy_engine(struct intel_gt *gt)
150 struct intel_engine_cs *engine;
153 for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) {
154 engine = gt->engine_class[COPY_ENGINE_CLASS][i];
155 if (engine_supports_migration(engine))
162 static struct intel_context *pinned_context(struct intel_gt *gt)
164 static struct lock_class_key key;
165 struct intel_engine_cs *engine;
166 struct i915_address_space *vm;
167 struct intel_context *ce;
169 engine = first_copy_engine(gt);
171 return ERR_PTR(-ENODEV);
177 ce = intel_engine_create_pinned_context(engine, vm, SZ_512K,
178 I915_GEM_HWS_MIGRATE,
184 int intel_migrate_init(struct intel_migrate *m, struct intel_gt *gt)
186 struct intel_context *ce;
188 memset(m, 0, sizeof(*m));
190 ce = pinned_context(gt);
198 static int random_index(unsigned int max)
200 return upper_32_bits(mul_u32_u32(get_random_u32(), max));
203 static struct intel_context *__migrate_engines(struct intel_gt *gt)
205 struct intel_engine_cs *engines[MAX_ENGINE_INSTANCE];
206 struct intel_engine_cs *engine;
207 unsigned int count, i;
210 for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) {
211 engine = gt->engine_class[COPY_ENGINE_CLASS][i];
212 if (engine_supports_migration(engine))
213 engines[count++] = engine;
216 return intel_context_create(engines[random_index(count)]);
219 struct intel_context *intel_migrate_create_context(struct intel_migrate *m)
221 struct intel_context *ce;
224 * We randomly distribute contexts across the engines upon constrction,
225 * as they all share the same pinned vm, and so in order to allow
226 * multiple blits to run in parallel, we must construct each blit
227 * to use a different range of the vm for its GTT. This has to be
228 * known at construction, so we can not use the late greedy load
229 * balancing of the virtual-engine.
231 ce = __migrate_engines(m->context->engine->gt);
236 ce->ring_size = SZ_256K;
239 ce->vm = i915_vm_get(m->context->vm);
244 static inline struct sgt_dma sg_sgt(struct scatterlist *sg)
246 dma_addr_t addr = sg_dma_address(sg);
248 return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) };
251 static int emit_no_arbitration(struct i915_request *rq)
255 cs = intel_ring_begin(rq, 2);
259 /* Explicitly disable preemption for this request. */
260 *cs++ = MI_ARB_ON_OFF;
262 intel_ring_advance(rq, cs);
267 static int emit_pte(struct i915_request *rq,
269 enum i915_cache_level cache_level,
274 const u64 encode = rq->context->vm->pte_encode(0, cache_level,
275 is_lmem ? PTE_LM : 0);
276 struct intel_ring *ring = rq->ring;
281 GEM_BUG_ON(GRAPHICS_VER(rq->engine->i915) < 8);
283 /* Compute the page directory offset for the target address range */
284 offset += (u64)rq->engine->instance << 32;
286 offset *= sizeof(u64);
287 offset += 2 * CHUNK_SZ;
289 cs = intel_ring_begin(rq, 6);
293 /* Pack as many PTE updates as possible into a single MI command */
294 pkt = min_t(int, 0x400, ring->space / sizeof(u32) + 5);
295 pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5);
298 *cs++ = MI_STORE_DATA_IMM | REG_BIT(21); /* as qword elements */
299 *cs++ = lower_32_bits(offset);
300 *cs++ = upper_32_bits(offset);
303 if (cs - hdr >= pkt) {
304 *hdr += cs - hdr - 2;
307 ring->emit = (void *)cs - ring->vaddr;
308 intel_ring_advance(rq, cs);
309 intel_ring_update_space(ring);
311 cs = intel_ring_begin(rq, 6);
315 pkt = min_t(int, 0x400, ring->space / sizeof(u32) + 5);
316 pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5);
319 *cs++ = MI_STORE_DATA_IMM | REG_BIT(21);
320 *cs++ = lower_32_bits(offset);
321 *cs++ = upper_32_bits(offset);
324 *cs++ = lower_32_bits(encode | it->dma);
325 *cs++ = upper_32_bits(encode | it->dma);
328 total += I915_GTT_PAGE_SIZE;
330 it->dma += I915_GTT_PAGE_SIZE;
331 if (it->dma >= it->max) {
332 it->sg = __sg_next(it->sg);
333 if (!it->sg || sg_dma_len(it->sg) == 0)
336 it->dma = sg_dma_address(it->sg);
337 it->max = it->dma + sg_dma_len(it->sg);
339 } while (total < length);
341 *hdr += cs - hdr - 2;
344 ring->emit = (void *)cs - ring->vaddr;
345 intel_ring_advance(rq, cs);
346 intel_ring_update_space(ring);
351 static bool wa_1209644611_applies(int ver, u32 size)
353 u32 height = size >> PAGE_SHIFT;
358 return height % 4 == 3 && height <= 8;
361 static int emit_copy(struct i915_request *rq, int size)
363 const int ver = GRAPHICS_VER(rq->engine->i915);
364 u32 instance = rq->engine->instance;
367 cs = intel_ring_begin(rq, ver >= 8 ? 10 : 6);
371 if (ver >= 9 && !wa_1209644611_applies(ver, size)) {
372 *cs++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2);
373 *cs++ = BLT_DEPTH_32 | PAGE_SIZE;
375 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
376 *cs++ = CHUNK_SZ; /* dst offset */
380 *cs++ = 0; /* src offset */
382 } else if (ver >= 8) {
383 *cs++ = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (10 - 2);
384 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
386 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
387 *cs++ = CHUNK_SZ; /* dst offset */
391 *cs++ = 0; /* src offset */
394 GEM_BUG_ON(instance);
395 *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
396 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
397 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE;
398 *cs++ = CHUNK_SZ; /* dst offset */
400 *cs++ = 0; /* src offset */
403 intel_ring_advance(rq, cs);
408 intel_context_migrate_copy(struct intel_context *ce,
409 struct dma_fence *await,
410 struct scatterlist *src,
411 enum i915_cache_level src_cache_level,
413 struct scatterlist *dst,
414 enum i915_cache_level dst_cache_level,
416 struct i915_request **out)
418 struct sgt_dma it_src = sg_sgt(src), it_dst = sg_sgt(dst);
419 struct i915_request *rq;
422 GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
425 GEM_BUG_ON(ce->ring->size < SZ_64K);
430 rq = i915_request_create(ce);
437 err = i915_request_await_dma_fence(rq, await);
441 if (rq->engine->emit_init_breadcrumb) {
442 err = rq->engine->emit_init_breadcrumb(rq);
450 /* The PTE updates + copy must not be interrupted. */
451 err = emit_no_arbitration(rq);
455 len = emit_pte(rq, &it_src, src_cache_level, src_is_lmem, 0,
462 err = emit_pte(rq, &it_dst, dst_cache_level, dst_is_lmem,
471 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
475 err = emit_copy(rq, len);
477 /* Arbitration is re-enabled between requests. */
480 i915_request_put(*out);
481 *out = i915_request_get(rq);
482 i915_request_add(rq);
483 if (err || !it_src.sg || !sg_dma_len(it_src.sg))
493 static int emit_clear(struct i915_request *rq, int size, u32 value)
495 const int ver = GRAPHICS_VER(rq->engine->i915);
496 u32 instance = rq->engine->instance;
499 GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
501 cs = intel_ring_begin(rq, ver >= 8 ? 8 : 6);
506 *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2);
507 *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
509 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
510 *cs++ = 0; /* offset */
515 GEM_BUG_ON(instance);
516 *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
517 *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
519 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
524 intel_ring_advance(rq, cs);
529 intel_context_migrate_clear(struct intel_context *ce,
530 struct dma_fence *await,
531 struct scatterlist *sg,
532 enum i915_cache_level cache_level,
535 struct i915_request **out)
537 struct sgt_dma it = sg_sgt(sg);
538 struct i915_request *rq;
541 GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
544 GEM_BUG_ON(ce->ring->size < SZ_64K);
549 rq = i915_request_create(ce);
556 err = i915_request_await_dma_fence(rq, await);
560 if (rq->engine->emit_init_breadcrumb) {
561 err = rq->engine->emit_init_breadcrumb(rq);
569 /* The PTE updates + clear must not be interrupted. */
570 err = emit_no_arbitration(rq);
574 len = emit_pte(rq, &it, cache_level, is_lmem, 0, CHUNK_SZ);
580 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
584 err = emit_clear(rq, len, value);
586 /* Arbitration is re-enabled between requests. */
589 i915_request_put(*out);
590 *out = i915_request_get(rq);
591 i915_request_add(rq);
592 if (err || !it.sg || !sg_dma_len(it.sg))
602 int intel_migrate_copy(struct intel_migrate *m,
603 struct i915_gem_ww_ctx *ww,
604 struct dma_fence *await,
605 struct scatterlist *src,
606 enum i915_cache_level src_cache_level,
608 struct scatterlist *dst,
609 enum i915_cache_level dst_cache_level,
611 struct i915_request **out)
613 struct intel_context *ce;
620 ce = intel_migrate_create_context(m);
622 ce = intel_context_get(m->context);
623 GEM_BUG_ON(IS_ERR(ce));
625 err = intel_context_pin_ww(ce, ww);
629 err = intel_context_migrate_copy(ce, await,
630 src, src_cache_level, src_is_lmem,
631 dst, dst_cache_level, dst_is_lmem,
634 intel_context_unpin(ce);
636 intel_context_put(ce);
641 intel_migrate_clear(struct intel_migrate *m,
642 struct i915_gem_ww_ctx *ww,
643 struct dma_fence *await,
644 struct scatterlist *sg,
645 enum i915_cache_level cache_level,
648 struct i915_request **out)
650 struct intel_context *ce;
657 ce = intel_migrate_create_context(m);
659 ce = intel_context_get(m->context);
660 GEM_BUG_ON(IS_ERR(ce));
662 err = intel_context_pin_ww(ce, ww);
666 err = intel_context_migrate_clear(ce, await, sg, cache_level,
667 is_lmem, value, out);
669 intel_context_unpin(ce);
671 intel_context_put(ce);
675 void intel_migrate_fini(struct intel_migrate *m)
677 struct intel_context *ce;
679 ce = fetch_and_zero(&m->context);
683 intel_engine_destroy_pinned_context(ce);
686 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
687 #include "selftest_migrate.c"