1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include "gen7_renderclear.h"
8 #include "intel_gpu_commands.h"
10 #define GT3_INLINE_DATA_DELAYS 0x1E00
11 #define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS))
18 #define CB_KERNEL(name) { .data = (name), .size = sizeof(name) }
20 #include "ivb_clear_kernel.c"
21 static const struct cb_kernel cb_kernel_ivb = CB_KERNEL(ivb_clear_kernel);
23 #include "hsw_clear_kernel.c"
24 static const struct cb_kernel cb_kernel_hsw = CB_KERNEL(hsw_clear_kernel);
43 static int num_primitives(const struct batch_vals *bv)
46 * We need to saturate the GPU with work in order to dispatch
47 * a shader on every HW thread, and clear the thread-local registers.
48 * In short, we have to dispatch work faster than the shaders can
49 * run in order to fill the EU and occupy each HW thread.
51 return bv->max_threads;
55 batch_get_defaults(struct drm_i915_private *i915, struct batch_vals *bv)
57 if (IS_HASWELL(i915)) {
58 switch (INTEL_INFO(i915)->gt) {
64 bv->max_threads = 140;
67 bv->max_threads = 280;
70 bv->surface_height = 16 * 16;
71 bv->surface_width = 32 * 2 * 16;
73 switch (INTEL_INFO(i915)->gt) {
75 case 1: /* including vlv */
79 bv->max_threads = 128;
82 bv->surface_height = 16 * 8;
83 bv->surface_width = 32 * 16;
85 bv->state_start = round_up(SZ_1K + num_primitives(bv) * 64, SZ_4K);
86 bv->surface_start = bv->state_start + SZ_4K;
87 bv->size = bv->surface_start + bv->surface_height * bv->surface_width;
90 static void batch_init(struct batch_chunk *bc,
92 u32 *start, u32 offset, u32 max_bytes)
96 bc->start = start + bc->offset / sizeof(*bc->start);
98 bc->max_items = max_bytes / sizeof(*bc->start);
101 static u32 batch_offset(const struct batch_chunk *bc, u32 *cs)
103 return (cs - bc->start) * sizeof(*bc->start) + bc->offset;
106 static u32 batch_addr(const struct batch_chunk *bc)
108 return bc->vma->node.start;
111 static void batch_add(struct batch_chunk *bc, const u32 d)
113 GEM_BUG_ON((bc->end - bc->start) >= bc->max_items);
117 static u32 *batch_alloc_items(struct batch_chunk *bc, u32 align, u32 items)
122 u32 *end = PTR_ALIGN(bc->end, align);
124 memset32(bc->end, 0, end - bc->end);
134 static u32 *batch_alloc_bytes(struct batch_chunk *bc, u32 align, u32 bytes)
136 GEM_BUG_ON(!IS_ALIGNED(bytes, sizeof(*bc->start)));
137 return batch_alloc_items(bc, align, bytes / sizeof(*bc->start));
141 gen7_fill_surface_state(struct batch_chunk *state,
142 const u32 dst_offset,
143 const struct batch_vals *bv)
145 u32 surface_h = bv->surface_height;
146 u32 surface_w = bv->surface_width;
147 u32 *cs = batch_alloc_items(state, 32, 8);
148 u32 offset = batch_offset(state, cs);
151 #define SURFACEFORMAT_B8G8R8A8_UNORM 0x0C0
152 #define RENDER_CACHE_READ_WRITE 1
154 *cs++ = SURFACE_2D << 29 |
155 (SURFACEFORMAT_B8G8R8A8_UNORM << 18) |
156 (RENDER_CACHE_READ_WRITE << 8);
158 *cs++ = batch_addr(state) + dst_offset;
160 *cs++ = ((surface_h / 4 - 1) << 16) | (surface_w / 4 - 1);
165 #define SHADER_CHANNELS(r, g, b, a) \
166 (((r) << 25) | ((g) << 22) | ((b) << 19) | ((a) << 16))
167 *cs++ = SHADER_CHANNELS(4, 5, 6, 7);
168 batch_advance(state, cs);
174 gen7_fill_binding_table(struct batch_chunk *state,
175 const struct batch_vals *bv)
178 gen7_fill_surface_state(state, bv->surface_start, bv);
179 u32 *cs = batch_alloc_items(state, 32, 8);
180 u32 offset = batch_offset(state, cs);
182 *cs++ = surface_start - state->offset;
190 batch_advance(state, cs);
196 gen7_fill_kernel_data(struct batch_chunk *state,
200 return batch_offset(state,
201 memcpy(batch_alloc_bytes(state, 64, size),
206 gen7_fill_interface_descriptor(struct batch_chunk *state,
207 const struct batch_vals *bv,
208 const struct cb_kernel *kernel,
212 gen7_fill_kernel_data(state, kernel->data, kernel->size);
213 u32 binding_table = gen7_fill_binding_table(state, bv);
214 u32 *cs = batch_alloc_items(state, 32, 8 * count);
215 u32 offset = batch_offset(state, cs);
217 *cs++ = kernel_offset;
218 *cs++ = (1 << 7) | (1 << 13);
220 *cs++ = (binding_table - state->offset) | 1;
226 /* 1 - 63dummy idds */
227 memset32(cs, 0x00, (count - 1) * 8);
228 batch_advance(state, cs + (count - 1) * 8);
234 gen7_emit_state_base_address(struct batch_chunk *batch,
235 u32 surface_state_base)
237 u32 *cs = batch_alloc_items(batch, 0, 10);
239 *cs++ = STATE_BASE_ADDRESS | (10 - 2);
241 *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
243 *cs++ = (batch_addr(batch) + surface_state_base) | BASE_ADDRESS_MODIFY;
245 *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
247 *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
249 *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
251 /* general/dynamic/indirect/instruction access Bound */
253 *cs++ = BASE_ADDRESS_MODIFY;
255 *cs++ = BASE_ADDRESS_MODIFY;
256 batch_advance(batch, cs);
260 gen7_emit_vfe_state(struct batch_chunk *batch,
261 const struct batch_vals *bv,
262 u32 urb_size, u32 curbe_size,
265 u32 threads = bv->max_threads - 1;
266 u32 *cs = batch_alloc_items(batch, 32, 8);
268 *cs++ = MEDIA_VFE_STATE | (8 - 2);
273 /* number of threads & urb entries for GPGPU vs Media Mode */
274 *cs++ = threads << 16 | 1 << 8 | mode << 2;
278 /* urb entry size & curbe size in 256 bits unit */
279 *cs++ = urb_size << 16 | curbe_size;
285 batch_advance(batch, cs);
289 gen7_emit_interface_descriptor_load(struct batch_chunk *batch,
290 const u32 interface_descriptor,
293 u32 *cs = batch_alloc_items(batch, 8, 4);
295 *cs++ = MEDIA_INTERFACE_DESCRIPTOR_LOAD | (4 - 2);
297 *cs++ = count * 8 * sizeof(*cs);
300 * interface descriptor address - it is relative to the dynamics base
303 *cs++ = interface_descriptor;
304 batch_advance(batch, cs);
308 gen7_emit_media_object(struct batch_chunk *batch,
309 unsigned int media_object_index)
311 unsigned int x_offset = (media_object_index % 16) * 64;
312 unsigned int y_offset = (media_object_index / 16) * 16;
313 unsigned int pkt = 6 + 3;
316 cs = batch_alloc_items(batch, 8, pkt);
318 *cs++ = MEDIA_OBJECT | (pkt - 2);
320 /* interface descriptor offset */
323 /* without indirect data */
332 *cs++ = y_offset << 16 | x_offset;
334 *cs++ = GT3_INLINE_DATA_DELAYS;
336 batch_advance(batch, cs);
339 static void gen7_emit_pipeline_flush(struct batch_chunk *batch)
341 u32 *cs = batch_alloc_items(batch, 0, 4);
343 *cs++ = GFX_OP_PIPE_CONTROL(4);
344 *cs++ = PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
345 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
346 PIPE_CONTROL_DC_FLUSH_ENABLE |
347 PIPE_CONTROL_CS_STALL;
351 batch_advance(batch, cs);
354 static void gen7_emit_pipeline_invalidate(struct batch_chunk *batch)
356 u32 *cs = batch_alloc_items(batch, 0, 10);
358 /* ivb: Stall before STATE_CACHE_INVALIDATE */
359 *cs++ = GFX_OP_PIPE_CONTROL(5);
360 *cs++ = PIPE_CONTROL_STALL_AT_SCOREBOARD |
361 PIPE_CONTROL_CS_STALL;
366 *cs++ = GFX_OP_PIPE_CONTROL(5);
367 *cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE;
372 batch_advance(batch, cs);
375 static void emit_batch(struct i915_vma * const vma,
377 const struct batch_vals *bv)
379 struct drm_i915_private *i915 = vma->vm->i915;
380 const unsigned int desc_count = 1;
381 const unsigned int urb_size = 1;
382 struct batch_chunk cmds, state;
386 batch_init(&cmds, vma, start, 0, bv->state_start);
387 batch_init(&state, vma, start, bv->state_start, SZ_4K);
389 descriptors = gen7_fill_interface_descriptor(&state, bv,
395 /* Reset inherited context registers */
396 gen7_emit_pipeline_flush(&cmds);
397 gen7_emit_pipeline_invalidate(&cmds);
398 batch_add(&cmds, MI_LOAD_REGISTER_IMM(2));
399 batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7));
400 batch_add(&cmds, 0xffff0000 |
401 ((IS_IVB_GT1(i915) || IS_VALLEYVIEW(i915)) ?
402 HIZ_RAW_STALL_OPT_DISABLE :
404 batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1));
405 batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
406 gen7_emit_pipeline_invalidate(&cmds);
407 gen7_emit_pipeline_flush(&cmds);
409 /* Switch to the media pipeline and our base address */
410 gen7_emit_pipeline_invalidate(&cmds);
411 batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
412 batch_add(&cmds, MI_NOOP);
413 gen7_emit_pipeline_invalidate(&cmds);
415 gen7_emit_pipeline_flush(&cmds);
416 gen7_emit_state_base_address(&cmds, descriptors);
417 gen7_emit_pipeline_invalidate(&cmds);
419 /* Set the clear-residual kernel state */
420 gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0);
421 gen7_emit_interface_descriptor_load(&cmds, descriptors, desc_count);
423 /* Execute the kernel on all HW threads */
424 for (i = 0; i < num_primitives(bv); i++)
425 gen7_emit_media_object(&cmds, i);
427 batch_add(&cmds, MI_BATCH_BUFFER_END);
430 int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
431 struct i915_vma * const vma)
433 struct batch_vals bv;
436 batch_get_defaults(engine->i915, &bv);
440 GEM_BUG_ON(vma->obj->base.size < bv.size);
442 batch = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
444 return PTR_ERR(batch);
446 emit_batch(vma, memset(batch, 0, bv.size), &bv);
448 i915_gem_object_flush_map(vma->obj);
449 __i915_gem_object_release_map(vma->obj);