1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include "i915_selftest.h"
8 #include "gt/intel_context.h"
9 #include "gt/intel_engine_regs.h"
10 #include "gt/intel_engine_user.h"
11 #include "gt/intel_gpu_commands.h"
12 #include "gt/intel_gt.h"
13 #include "gt/intel_gt_regs.h"
14 #include "gem/i915_gem_lmem.h"
16 #include "selftests/igt_flush_test.h"
17 #include "selftests/mock_drm.h"
18 #include "selftests/i915_random.h"
19 #include "huge_gem_object.h"
20 #include "mock_context.h"
22 #define OW_SIZE 16 /* in bytes */
23 #define F_SUBTILE_SIZE 64 /* in bytes */
24 #define F_TILE_WIDTH 128 /* in bytes */
25 #define F_TILE_HEIGHT 32 /* in pixels */
26 #define F_SUBTILE_WIDTH OW_SIZE /* in bytes */
27 #define F_SUBTILE_HEIGHT 4 /* in pixels */
29 static int linear_x_y_to_ftiled_pos(int x, int y, u32 stride, int bpp)
34 int pixel_size = bpp / 8;
38 * Subtile remapping for F tile. Note that map[a]==b implies map[b]==a
39 * so we can use the same table to tile and until.
41 static const u8 f_subtile_map[] = {
42 0, 1, 2, 3, 8, 9, 10, 11,
43 4, 5, 6, 7, 12, 13, 14, 15,
44 16, 17, 18, 19, 24, 25, 26, 27,
45 20, 21, 22, 23, 28, 29, 30, 31,
46 32, 33, 34, 35, 40, 41, 42, 43,
47 36, 37, 38, 39, 44, 45, 46, 47,
48 48, 49, 50, 51, 56, 57, 58, 59,
49 52, 53, 54, 55, 60, 61, 62, 63
54 * Where does the 4k tile start (in bytes)? This is the same for Y and
55 * F so we can use the Y-tile algorithm to get to that point.
58 y / F_TILE_HEIGHT * stride * F_TILE_HEIGHT +
59 x / F_TILE_WIDTH * 4096;
61 /* Find pixel within tile */
62 tile_x = x % F_TILE_WIDTH;
63 tile_y = y % F_TILE_HEIGHT;
65 /* And figure out the subtile within the 4k tile */
66 subtile = tile_y / F_SUBTILE_HEIGHT * 8 + tile_x / F_SUBTILE_WIDTH;
68 /* Swizzle the subtile number according to the bspec diagram */
69 swizzle = f_subtile_map[subtile];
71 /* Calculate new position */
73 swizzle * F_SUBTILE_SIZE +
74 tile_y % F_SUBTILE_HEIGHT * OW_SIZE +
75 tile_x % F_SUBTILE_WIDTH;
77 GEM_BUG_ON(!IS_ALIGNED(pos, pixel_size));
79 return pos / pixel_size * 4;
87 CLIENT_NUM_TILING_TYPES
96 enum client_tiling tiling;
100 struct intel_context *ce;
101 struct blit_buffer buffers[3];
102 struct blit_buffer scratch;
103 struct i915_vma *batch;
110 static bool supports_x_tiling(const struct drm_i915_private *i915)
112 int gen = GRAPHICS_VER(i915);
117 if (!HAS_LMEM(i915) || IS_DG1(i915))
123 static bool fast_blit_ok(const struct blit_buffer *buf)
125 int gen = GRAPHICS_VER(buf->vma->vm->i915);
133 /* filter out platforms with unsupported X-tile support in fastblit */
134 if (buf->tiling == CLIENT_TILING_X && !supports_x_tiling(buf->vma->vm->i915))
140 static int prepare_blit(const struct tiled_blits *t,
141 struct blit_buffer *dst,
142 struct blit_buffer *src,
143 struct drm_i915_gem_object *batch)
145 const int ver = GRAPHICS_VER(to_i915(batch->base.dev));
146 bool use_64b_reloc = ver >= 8;
147 u32 src_pitch, dst_pitch;
150 cs = i915_gem_object_pin_map_unlocked(batch, I915_MAP_WC);
154 if (fast_blit_ok(dst) && fast_blit_ok(src)) {
155 struct intel_gt *gt = t->ce->engine->gt;
156 u32 src_tiles = 0, dst_tiles = 0;
157 u32 src_4t = 0, dst_4t = 0;
159 /* Need to program BLIT_CCTL if it is not done previously
160 * before using XY_FAST_COPY_BLT
162 *cs++ = MI_LOAD_REGISTER_IMM(1);
163 *cs++ = i915_mmio_reg_offset(BLIT_CCTL(t->ce->engine->mmio_base));
164 *cs++ = (BLIT_CCTL_SRC_MOCS(gt->mocs.uc_index) |
165 BLIT_CCTL_DST_MOCS(gt->mocs.uc_index));
167 src_pitch = t->width; /* in dwords */
168 if (src->tiling == CLIENT_TILING_4) {
169 src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(YMAJOR);
170 src_4t = XY_FAST_COPY_BLT_D1_SRC_TILE4;
171 } else if (src->tiling == CLIENT_TILING_Y) {
172 src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(YMAJOR);
173 } else if (src->tiling == CLIENT_TILING_X) {
174 src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(TILE_X);
176 src_pitch *= 4; /* in bytes */
179 dst_pitch = t->width; /* in dwords */
180 if (dst->tiling == CLIENT_TILING_4) {
181 dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(YMAJOR);
182 dst_4t = XY_FAST_COPY_BLT_D1_DST_TILE4;
183 } else if (dst->tiling == CLIENT_TILING_Y) {
184 dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(YMAJOR);
185 } else if (dst->tiling == CLIENT_TILING_X) {
186 dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(TILE_X);
188 dst_pitch *= 4; /* in bytes */
191 *cs++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2) |
192 src_tiles | dst_tiles;
193 *cs++ = src_4t | dst_4t | BLT_DEPTH_32 | dst_pitch;
195 *cs++ = t->height << 16 | t->width;
196 *cs++ = lower_32_bits(dst->vma->node.start);
197 *cs++ = upper_32_bits(dst->vma->node.start);
200 *cs++ = lower_32_bits(src->vma->node.start);
201 *cs++ = upper_32_bits(src->vma->node.start);
204 *cs++ = MI_LOAD_REGISTER_IMM(1);
205 *cs++ = i915_mmio_reg_offset(BCS_SWCTRL);
206 cmd = (BCS_SRC_Y | BCS_DST_Y) << 16;
207 if (src->tiling == CLIENT_TILING_Y)
209 if (dst->tiling == CLIENT_TILING_Y)
222 cmd = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (8 - 2);
226 src_pitch = t->width * 4;
228 cmd |= XY_SRC_COPY_BLT_SRC_TILED;
232 dst_pitch = t->width * 4;
234 cmd |= XY_SRC_COPY_BLT_DST_TILED;
239 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | dst_pitch;
241 *cs++ = t->height << 16 | t->width;
242 *cs++ = lower_32_bits(dst->vma->node.start);
244 *cs++ = upper_32_bits(dst->vma->node.start);
247 *cs++ = lower_32_bits(src->vma->node.start);
249 *cs++ = upper_32_bits(src->vma->node.start);
252 *cs++ = MI_BATCH_BUFFER_END;
254 i915_gem_object_flush_map(batch);
255 i915_gem_object_unpin_map(batch);
260 static void tiled_blits_destroy_buffers(struct tiled_blits *t)
264 for (i = 0; i < ARRAY_SIZE(t->buffers); i++)
265 i915_vma_put(t->buffers[i].vma);
267 i915_vma_put(t->scratch.vma);
268 i915_vma_put(t->batch);
271 static struct i915_vma *
272 __create_vma(struct tiled_blits *t, size_t size, bool lmem)
274 struct drm_i915_private *i915 = t->ce->vm->i915;
275 struct drm_i915_gem_object *obj;
276 struct i915_vma *vma;
279 obj = i915_gem_object_create_lmem(i915, size, 0);
281 obj = i915_gem_object_create_shmem(i915, size);
283 return ERR_CAST(obj);
285 vma = i915_vma_instance(obj, t->ce->vm, NULL);
287 i915_gem_object_put(obj);
292 static struct i915_vma *create_vma(struct tiled_blits *t, bool lmem)
294 return __create_vma(t, PAGE_ALIGN(t->width * t->height * 4), lmem);
297 static int tiled_blits_create_buffers(struct tiled_blits *t,
298 int width, int height,
299 struct rnd_state *prng)
301 struct drm_i915_private *i915 = t->ce->engine->i915;
307 t->batch = __create_vma(t, PAGE_SIZE, false);
308 if (IS_ERR(t->batch))
309 return PTR_ERR(t->batch);
311 t->scratch.vma = create_vma(t, false);
312 if (IS_ERR(t->scratch.vma)) {
313 i915_vma_put(t->batch);
314 return PTR_ERR(t->scratch.vma);
317 for (i = 0; i < ARRAY_SIZE(t->buffers); i++) {
318 struct i915_vma *vma;
320 vma = create_vma(t, HAS_LMEM(i915) && i % 2);
322 tiled_blits_destroy_buffers(t);
326 t->buffers[i].vma = vma;
327 t->buffers[i].tiling =
328 i915_prandom_u32_max_state(CLIENT_NUM_TILING_TYPES, prng);
330 /* Platforms support either TileY or Tile4, not both */
331 if (HAS_4TILE(i915) && t->buffers[i].tiling == CLIENT_TILING_Y)
332 t->buffers[i].tiling = CLIENT_TILING_4;
333 else if (!HAS_4TILE(i915) && t->buffers[i].tiling == CLIENT_TILING_4)
334 t->buffers[i].tiling = CLIENT_TILING_Y;
340 static void fill_scratch(struct tiled_blits *t, u32 *vaddr, u32 val)
344 t->scratch.start_val = val;
345 for (i = 0; i < t->width * t->height; i++)
348 i915_gem_object_flush_map(t->scratch.vma->obj);
351 static u64 swizzle_bit(unsigned int bit, u64 offset)
353 return (offset & BIT_ULL(bit)) >> (bit - 6);
356 static u64 tiled_offset(const struct intel_gt *gt,
359 enum client_tiling tiling,
360 int x_pos, int y_pos)
362 unsigned int swizzle;
365 if (tiling == CLIENT_TILING_LINEAR)
368 y = div64_u64_rem(v, stride, &x);
370 if (tiling == CLIENT_TILING_4) {
371 v = linear_x_y_to_ftiled_pos(x_pos, y_pos, stride, 32);
373 /* no swizzling for f-tiling */
374 swizzle = I915_BIT_6_SWIZZLE_NONE;
375 } else if (tiling == CLIENT_TILING_X) {
376 v = div64_u64_rem(y, 8, &y) * stride * 8;
378 v += div64_u64_rem(x, 512, &x) << 12;
381 swizzle = gt->ggtt->bit_6_swizzle_x;
383 const unsigned int ytile_span = 16;
384 const unsigned int ytile_height = 512;
386 v = div64_u64_rem(y, 32, &y) * stride * 32;
388 v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
391 swizzle = gt->ggtt->bit_6_swizzle_y;
395 case I915_BIT_6_SWIZZLE_9:
396 v ^= swizzle_bit(9, v);
398 case I915_BIT_6_SWIZZLE_9_10:
399 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
401 case I915_BIT_6_SWIZZLE_9_11:
402 v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
404 case I915_BIT_6_SWIZZLE_9_10_11:
405 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
412 static const char *repr_tiling(enum client_tiling tiling)
415 case CLIENT_TILING_LINEAR: return "linear";
416 case CLIENT_TILING_X: return "X";
417 case CLIENT_TILING_Y: return "Y";
418 case CLIENT_TILING_4: return "F";
419 default: return "unknown";
423 static int verify_buffer(const struct tiled_blits *t,
424 struct blit_buffer *buf,
425 struct rnd_state *prng)
431 x = i915_prandom_u32_max_state(t->width, prng);
432 y = i915_prandom_u32_max_state(t->height, prng);
433 p = y * t->width + x;
435 vaddr = i915_gem_object_pin_map_unlocked(buf->vma->obj, I915_MAP_WC);
437 return PTR_ERR(vaddr);
439 if (vaddr[0] != buf->start_val) {
442 u64 v = tiled_offset(buf->vma->vm->gt,
446 if (vaddr[v / sizeof(*vaddr)] != buf->start_val + p)
450 pr_err("Invalid %s tiling detected at (%d, %d), start_val %x\n",
451 repr_tiling(buf->tiling),
452 x, y, buf->start_val);
453 igt_hexdump(vaddr, 4096);
456 i915_gem_object_unpin_map(buf->vma->obj);
460 static int move_to_active(struct i915_vma *vma,
461 struct i915_request *rq,
467 err = i915_request_await_object(rq, vma->obj, false);
469 err = i915_vma_move_to_active(vma, rq, flags);
470 i915_vma_unlock(vma);
475 static int pin_buffer(struct i915_vma *vma, u64 addr)
479 if (drm_mm_node_allocated(&vma->node) && vma->node.start != addr) {
480 err = i915_vma_unbind_unlocked(vma);
485 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED | addr);
493 tiled_blit(struct tiled_blits *t,
494 struct blit_buffer *dst, u64 dst_addr,
495 struct blit_buffer *src, u64 src_addr)
497 struct i915_request *rq;
500 err = pin_buffer(src->vma, src_addr);
502 pr_err("Cannot pin src @ %llx\n", src_addr);
506 err = pin_buffer(dst->vma, dst_addr);
508 pr_err("Cannot pin dst @ %llx\n", dst_addr);
512 err = i915_vma_pin(t->batch, 0, 0, PIN_USER | PIN_HIGH);
514 pr_err("cannot pin batch\n");
518 err = prepare_blit(t, dst, src, t->batch->obj);
522 rq = intel_context_create_request(t->ce);
528 err = move_to_active(t->batch, rq, 0);
530 err = move_to_active(src->vma, rq, 0);
532 err = move_to_active(dst->vma, rq, 0);
534 err = rq->engine->emit_bb_start(rq,
535 t->batch->node.start,
538 i915_request_get(rq);
539 i915_request_add(rq);
540 if (i915_request_wait(rq, 0, HZ / 2) < 0)
542 i915_request_put(rq);
544 dst->start_val = src->start_val;
546 i915_vma_unpin(t->batch);
548 i915_vma_unpin(dst->vma);
550 i915_vma_unpin(src->vma);
554 static struct tiled_blits *
555 tiled_blits_create(struct intel_engine_cs *engine, struct rnd_state *prng)
557 struct drm_mm_node hole;
558 struct tiled_blits *t;
562 t = kzalloc(sizeof(*t), GFP_KERNEL);
564 return ERR_PTR(-ENOMEM);
566 t->ce = intel_context_create(engine);
568 err = PTR_ERR(t->ce);
572 t->align = i915_vm_min_alignment(t->ce->vm, INTEL_MEMORY_LOCAL);
573 t->align = max(t->align,
574 i915_vm_min_alignment(t->ce->vm, INTEL_MEMORY_SYSTEM));
576 hole_size = 2 * round_up(WIDTH * HEIGHT * 4, t->align);
577 hole_size *= 2; /* room to maneuver */
578 hole_size += 2 * t->align; /* padding on either side */
580 mutex_lock(&t->ce->vm->mutex);
581 memset(&hole, 0, sizeof(hole));
582 err = drm_mm_insert_node_in_range(&t->ce->vm->mm, &hole,
584 I915_COLOR_UNEVICTABLE,
588 drm_mm_remove_node(&hole);
589 mutex_unlock(&t->ce->vm->mutex);
595 t->hole = hole.start + t->align;
596 pr_info("Using hole at %llx\n", t->hole);
598 err = tiled_blits_create_buffers(t, WIDTH, HEIGHT, prng);
605 intel_context_put(t->ce);
611 static void tiled_blits_destroy(struct tiled_blits *t)
613 tiled_blits_destroy_buffers(t);
615 intel_context_put(t->ce);
619 static int tiled_blits_prepare(struct tiled_blits *t,
620 struct rnd_state *prng)
622 u64 offset = round_up(t->width * t->height * 4, t->align);
627 map = i915_gem_object_pin_map_unlocked(t->scratch.vma->obj, I915_MAP_WC);
631 /* Use scratch to fill objects */
632 for (i = 0; i < ARRAY_SIZE(t->buffers); i++) {
633 fill_scratch(t, map, prandom_u32_state(prng));
634 GEM_BUG_ON(verify_buffer(t, &t->scratch, prng));
637 &t->buffers[i], t->hole + offset,
638 &t->scratch, t->hole);
640 err = verify_buffer(t, &t->buffers[i], prng);
642 pr_err("Failed to create buffer %d\n", i);
647 i915_gem_object_unpin_map(t->scratch.vma->obj);
651 static int tiled_blits_bounce(struct tiled_blits *t, struct rnd_state *prng)
653 u64 offset = round_up(t->width * t->height * 4, 2 * t->align);
656 /* We want to check position invariant tiling across GTT eviction */
659 &t->buffers[1], t->hole + offset / 2,
660 &t->buffers[0], t->hole + 2 * offset);
664 /* Simulating GTT eviction of the same buffer / layout */
665 t->buffers[2].tiling = t->buffers[0].tiling;
667 /* Reposition so that we overlap the old addresses, and slightly off */
669 &t->buffers[2], t->hole + t->align,
670 &t->buffers[1], t->hole + 3 * offset / 2);
674 err = verify_buffer(t, &t->buffers[2], prng);
681 static int __igt_client_tiled_blits(struct intel_engine_cs *engine,
682 struct rnd_state *prng)
684 struct tiled_blits *t;
687 t = tiled_blits_create(engine, prng);
691 err = tiled_blits_prepare(t, prng);
695 err = tiled_blits_bounce(t, prng);
700 tiled_blits_destroy(t);
704 static bool has_bit17_swizzle(int sw)
706 return (sw == I915_BIT_6_SWIZZLE_9_10_17 ||
707 sw == I915_BIT_6_SWIZZLE_9_17);
710 static bool bad_swizzling(struct drm_i915_private *i915)
712 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
714 if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
717 if (has_bit17_swizzle(ggtt->bit_6_swizzle_x) ||
718 has_bit17_swizzle(ggtt->bit_6_swizzle_y))
724 static int igt_client_tiled_blits(void *arg)
726 struct drm_i915_private *i915 = arg;
727 I915_RND_STATE(prng);
730 /* Test requires explicit BLT tiling controls */
731 if (GRAPHICS_VER(i915) < 4)
734 if (bad_swizzling(i915)) /* Requires sane (sub-page) swizzling */
738 struct intel_engine_cs *engine;
741 engine = intel_engine_lookup_user(i915,
742 I915_ENGINE_CLASS_COPY,
747 err = __igt_client_tiled_blits(engine, &prng);
755 int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915)
757 static const struct i915_subtest tests[] = {
758 SUBTEST(igt_client_tiled_blits),
761 if (intel_gt_is_wedged(to_gt(i915)))
764 return i915_live_subtests(tests, i915);