1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include "gen7_renderclear.h"
8 #include "intel_gpu_commands.h"
10 #define MAX_URB_ENTRIES 64
11 #define STATE_SIZE (4 * 1024)
12 #define GT3_INLINE_DATA_DELAYS 0x1E00
13 #define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS))
20 #define CB_KERNEL(name) { .data = (name), .size = sizeof(name) }
22 #include "ivb_clear_kernel.c"
23 static const struct cb_kernel cb_kernel_ivb = CB_KERNEL(ivb_clear_kernel);
25 #include "hsw_clear_kernel.c"
26 static const struct cb_kernel cb_kernel_hsw = CB_KERNEL(hsw_clear_kernel);
50 batch_get_defaults(struct drm_i915_private *i915, struct batch_vals *bv)
52 if (IS_HASWELL(i915)) {
53 bv->max_primitives = 280;
54 bv->max_urb_entries = MAX_URB_ENTRIES;
55 bv->surface_height = 16 * 16;
56 bv->surface_width = 32 * 2 * 16;
58 bv->max_primitives = 128;
59 bv->max_urb_entries = MAX_URB_ENTRIES / 2;
60 bv->surface_height = 16 * 8;
61 bv->surface_width = 32 * 16;
63 bv->cmd_size = bv->max_primitives * 4096;
64 bv->state_size = STATE_SIZE;
65 bv->state_start = bv->cmd_size;
66 bv->batch_size = bv->cmd_size + bv->state_size;
67 bv->scratch_size = bv->surface_height * bv->surface_width;
68 bv->max_size = bv->batch_size + bv->scratch_size;
71 static void batch_init(struct batch_chunk *bc,
73 u32 *start, u32 offset, u32 max_bytes)
77 bc->start = start + bc->offset / sizeof(*bc->start);
79 bc->max_items = max_bytes / sizeof(*bc->start);
82 static u32 batch_offset(const struct batch_chunk *bc, u32 *cs)
84 return (cs - bc->start) * sizeof(*bc->start) + bc->offset;
87 static u32 batch_addr(const struct batch_chunk *bc)
89 return bc->vma->node.start;
92 static void batch_add(struct batch_chunk *bc, const u32 d)
94 GEM_BUG_ON((bc->end - bc->start) >= bc->max_items);
98 static u32 *batch_alloc_items(struct batch_chunk *bc, u32 align, u32 items)
103 u32 *end = PTR_ALIGN(bc->end, align);
105 memset32(bc->end, 0, end - bc->end);
115 static u32 *batch_alloc_bytes(struct batch_chunk *bc, u32 align, u32 bytes)
117 GEM_BUG_ON(!IS_ALIGNED(bytes, sizeof(*bc->start)));
118 return batch_alloc_items(bc, align, bytes / sizeof(*bc->start));
122 gen7_fill_surface_state(struct batch_chunk *state,
123 const u32 dst_offset,
124 const struct batch_vals *bv)
126 u32 surface_h = bv->surface_height;
127 u32 surface_w = bv->surface_width;
128 u32 *cs = batch_alloc_items(state, 32, 8);
129 u32 offset = batch_offset(state, cs);
132 #define SURFACEFORMAT_B8G8R8A8_UNORM 0x0C0
133 #define RENDER_CACHE_READ_WRITE 1
135 *cs++ = SURFACE_2D << 29 |
136 (SURFACEFORMAT_B8G8R8A8_UNORM << 18) |
137 (RENDER_CACHE_READ_WRITE << 8);
139 *cs++ = batch_addr(state) + dst_offset;
141 *cs++ = ((surface_h / 4 - 1) << 16) | (surface_w / 4 - 1);
146 #define SHADER_CHANNELS(r, g, b, a) \
147 (((r) << 25) | ((g) << 22) | ((b) << 19) | ((a) << 16))
148 *cs++ = SHADER_CHANNELS(4, 5, 6, 7);
149 batch_advance(state, cs);
155 gen7_fill_binding_table(struct batch_chunk *state,
156 const struct batch_vals *bv)
158 u32 surface_start = gen7_fill_surface_state(state, bv->batch_size, bv);
159 u32 *cs = batch_alloc_items(state, 32, 8);
160 u32 offset = batch_offset(state, cs);
162 *cs++ = surface_start - state->offset;
170 batch_advance(state, cs);
176 gen7_fill_kernel_data(struct batch_chunk *state,
180 return batch_offset(state,
181 memcpy(batch_alloc_bytes(state, 64, size),
186 gen7_fill_interface_descriptor(struct batch_chunk *state,
187 const struct batch_vals *bv,
188 const struct cb_kernel *kernel,
192 gen7_fill_kernel_data(state, kernel->data, kernel->size);
193 u32 binding_table = gen7_fill_binding_table(state, bv);
194 u32 *cs = batch_alloc_items(state, 32, 8 * count);
195 u32 offset = batch_offset(state, cs);
197 *cs++ = kernel_offset;
198 *cs++ = (1 << 7) | (1 << 13);
200 *cs++ = (binding_table - state->offset) | 1;
206 /* 1 - 63dummy idds */
207 memset32(cs, 0x00, (count - 1) * 8);
208 batch_advance(state, cs + (count - 1) * 8);
214 gen7_emit_state_base_address(struct batch_chunk *batch,
215 u32 surface_state_base)
217 u32 *cs = batch_alloc_items(batch, 0, 12);
219 *cs++ = STATE_BASE_ADDRESS | (12 - 2);
221 *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
223 *cs++ = batch_addr(batch) | surface_state_base | BASE_ADDRESS_MODIFY;
225 *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
227 *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
229 *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
231 /* general/dynamic/indirect/instruction access Bound */
233 *cs++ = BASE_ADDRESS_MODIFY;
235 *cs++ = BASE_ADDRESS_MODIFY;
238 batch_advance(batch, cs);
242 gen7_emit_vfe_state(struct batch_chunk *batch,
243 const struct batch_vals *bv,
244 u32 urb_size, u32 curbe_size,
247 u32 urb_entries = bv->max_urb_entries;
248 u32 threads = bv->max_primitives - 1;
249 u32 *cs = batch_alloc_items(batch, 32, 8);
251 *cs++ = MEDIA_VFE_STATE | (8 - 2);
256 /* number of threads & urb entries for GPGPU vs Media Mode */
257 *cs++ = threads << 16 | urb_entries << 8 | mode << 2;
261 /* urb entry size & curbe size in 256 bits unit */
262 *cs++ = urb_size << 16 | curbe_size;
268 batch_advance(batch, cs);
272 gen7_emit_interface_descriptor_load(struct batch_chunk *batch,
273 const u32 interface_descriptor,
276 u32 *cs = batch_alloc_items(batch, 8, 4);
278 *cs++ = MEDIA_INTERFACE_DESCRIPTOR_LOAD | (4 - 2);
280 *cs++ = count * 8 * sizeof(*cs);
283 * interface descriptor address - it is relative to the dynamics base
286 *cs++ = interface_descriptor;
287 batch_advance(batch, cs);
291 gen7_emit_media_object(struct batch_chunk *batch,
292 unsigned int media_object_index)
294 unsigned int x_offset = (media_object_index % 16) * 64;
295 unsigned int y_offset = (media_object_index / 16) * 16;
296 unsigned int inline_data_size;
297 unsigned int media_batch_size;
301 inline_data_size = 112 * 8;
302 media_batch_size = inline_data_size + 6;
304 cs = batch_alloc_items(batch, 8, media_batch_size);
306 *cs++ = MEDIA_OBJECT | (media_batch_size - 2);
308 /* interface descriptor offset */
311 /* without indirect data */
320 *cs++ = (y_offset << 16) | (x_offset);
322 *cs++ = GT3_INLINE_DATA_DELAYS;
323 for (i = 3; i < inline_data_size; i++)
326 batch_advance(batch, cs);
329 static void gen7_emit_pipeline_flush(struct batch_chunk *batch)
331 u32 *cs = batch_alloc_items(batch, 0, 5);
333 *cs++ = GFX_OP_PIPE_CONTROL(5);
334 *cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE |
335 PIPE_CONTROL_GLOBAL_GTT_IVB;
339 batch_advance(batch, cs);
342 static void emit_batch(struct i915_vma * const vma,
344 const struct batch_vals *bv)
346 struct drm_i915_private *i915 = vma->vm->i915;
347 unsigned int desc_count = 64;
348 const u32 urb_size = 112;
349 struct batch_chunk cmds, state;
350 u32 interface_descriptor;
353 batch_init(&cmds, vma, start, 0, bv->cmd_size);
354 batch_init(&state, vma, start, bv->state_start, bv->state_size);
356 interface_descriptor =
357 gen7_fill_interface_descriptor(&state, bv,
362 gen7_emit_pipeline_flush(&cmds);
363 batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
364 batch_add(&cmds, MI_NOOP);
365 gen7_emit_state_base_address(&cmds, interface_descriptor);
366 gen7_emit_pipeline_flush(&cmds);
368 gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0);
370 gen7_emit_interface_descriptor_load(&cmds,
371 interface_descriptor,
374 for (i = 0; i < bv->max_primitives; i++)
375 gen7_emit_media_object(&cmds, i);
377 batch_add(&cmds, MI_BATCH_BUFFER_END);
380 int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
381 struct i915_vma * const vma)
383 struct batch_vals bv;
386 batch_get_defaults(engine->i915, &bv);
390 GEM_BUG_ON(vma->obj->base.size < bv.max_size);
392 batch = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
394 return PTR_ERR(batch);
396 emit_batch(vma, memset(batch, 0, bv.max_size), &bv);
398 i915_gem_object_flush_map(vma->obj);
399 __i915_gem_object_release_map(vma->obj);