2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #ifndef __INTEL_ENGINE_TYPES__
8 #define __INTEL_ENGINE_TYPES__
10 #include <linux/hashtable.h>
11 #include <linux/irq_work.h>
12 #include <linux/kref.h>
13 #include <linux/list.h>
14 #include <linux/types.h>
17 #include "i915_priolist_types.h"
18 #include "i915_selftest.h"
19 #include "i915_timeline_types.h"
20 #include "intel_workarounds_types.h"
22 #include "i915_gem_batch_pool.h"
25 #define I915_MAX_SLICES 3
26 #define I915_MAX_SUBSLICES 8
28 #define I915_CMD_HASH_ORDER 9
31 struct drm_i915_reg_table;
32 struct i915_gem_context;
34 struct i915_sched_attr;
37 typedef u8 intel_engine_mask_t;
38 #define ALL_ENGINES ((intel_engine_mask_t)~0ul)
40 struct intel_hw_status_page {
45 struct intel_instdone {
47 /* The following exist only in the RCS engine */
49 u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
50 u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
53 struct intel_engine_hangcheck {
57 unsigned long action_timestamp;
58 struct intel_instdone instdone;
66 struct i915_timeline *timeline;
67 struct list_head request_list;
68 struct list_head active_link;
80 * we use a single page to load ctx workarounds so all of these
81 * values are referred in terms of dwords
83 * struct i915_wa_ctx_bb:
84 * offset: specifies batch starting position, also helpful in case
85 * if we want to have multiple batches at different offsets based on
86 * some criteria. It is not a requirement at the moment but provides
87 * an option for future use.
88 * size: size of the batch in DWORDS
90 struct i915_ctx_workarounds {
91 struct i915_wa_ctx_bb {
94 } indirect_ctx, per_ctx;
98 #define I915_MAX_VCS 4
99 #define I915_MAX_VECS 2
102 * Engine IDs definitions.
103 * Keep instances of the same type engine together.
105 enum intel_engine_id {
112 #define _VCS(n) (VCS0 + (n))
115 #define _VECS(n) (VECS0 + (n))
119 struct st_preempt_hang {
120 struct completion completion;
126 * struct intel_engine_execlists - execlist submission queue and port state
128 * The struct intel_engine_execlists represents the combined logical state of
129 * driver and the hardware state for execlist mode of submission.
131 struct intel_engine_execlists {
133 * @tasklet: softirq tasklet for bottom handler
135 struct tasklet_struct tasklet;
138 * @default_priolist: priority list for I915_PRIORITY_NORMAL
140 struct i915_priolist default_priolist;
143 * @no_priolist: priority lists disabled
148 * @submit_reg: gen-specific execlist submission register
149 * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
150 * the ExecList Submission Queue Contents register array for Gen11+
152 u32 __iomem *submit_reg;
155 * @ctrl_reg: the enhanced execlists control register, used to load the
156 * submit queue on the HW and to request preemptions to idle
158 u32 __iomem *ctrl_reg;
161 * @port: execlist port states
163 * For each hardware ELSP (ExecList Submission Port) we keep
164 * track of the last request and the number of times we submitted
165 * that port to hw. We then count the number of times the hw reports
166 * a context completion or preemption. As only one context can
167 * be active on hw, we limit resubmission of context to port[0]. This
168 * is called Lite Restore, of the context.
170 struct execlist_port {
172 * @request_count: combined request and submission count
174 struct i915_request *request_count;
175 #define EXECLIST_COUNT_BITS 2
176 #define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
177 #define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
178 #define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
179 #define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
180 #define port_set(p, packed) ((p)->request_count = (packed))
181 #define port_isset(p) ((p)->request_count)
182 #define port_index(p, execlists) ((p) - (execlists)->port)
185 * @context_id: context ID for port
187 GEM_DEBUG_DECL(u32 context_id);
189 #define EXECLIST_MAX_PORTS 2
190 } port[EXECLIST_MAX_PORTS];
193 * @active: is the HW active? We consider the HW as active after
194 * submitting any context for execution and until we have seen the
195 * last context completion event. After that, we do not expect any
196 * more events until we submit, and so can park the HW.
198 * As we have a small number of different sources from which we feed
199 * the HW, we track the state of each inside a single bitfield.
202 #define EXECLISTS_ACTIVE_USER 0
203 #define EXECLISTS_ACTIVE_PREEMPT 1
204 #define EXECLISTS_ACTIVE_HWACK 2
207 * @port_mask: number of execlist ports - 1
209 unsigned int port_mask;
212 * @queue_priority_hint: Highest pending priority.
214 * When we add requests into the queue, or adjust the priority of
215 * executing requests, we compute the maximum priority of those
216 * pending requests. We can then use this value to determine if
217 * we need to preempt the executing requests to service the queue.
218 * However, since the we may have recorded the priority of an inflight
219 * request we wanted to preempt but since completed, at the time of
220 * dequeuing the priority hint may no longer may match the highest
221 * available request priority.
223 int queue_priority_hint;
226 * @queue: queue of requests, in priority lists
228 struct rb_root_cached queue;
231 * @csb_write: control register for Context Switch buffer
233 * Note this register may be either mmio or HWSP shadow.
238 * @csb_status: status array for Context Switch buffer
240 * Note these register may be either mmio or HWSP shadow.
245 * @preempt_complete_status: expected CSB upon completing preemption
247 u32 preempt_complete_status;
250 * @csb_head: context status buffer head
254 I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;)
257 #define INTEL_ENGINE_CS_MAX_NAME 8
259 struct intel_engine_cs {
260 struct drm_i915_private *i915;
261 struct intel_uncore *uncore;
262 char name[INTEL_ENGINE_CS_MAX_NAME];
264 enum intel_engine_id id;
267 intel_engine_mask_t mask;
276 struct intel_ring *buffer;
278 struct i915_timeline timeline;
280 struct intel_context *kernel_context; /* pinned */
281 struct intel_context *preempt_context; /* pinned; optional */
283 struct drm_i915_gem_object *default_state;
284 void *pinned_default_state;
286 /* Rather than have every client wait upon all user interrupts,
287 * with the herd waking after every interrupt and each doing the
288 * heavyweight seqno dance, we delegate the task (of being the
289 * bottom-half of the user interrupt) to the first client. After
290 * every interrupt, we wake up one client, who does the heavyweight
291 * coherent seqno read and either goes back to sleep (if incomplete),
292 * or wakes up all the completed clients in parallel, before then
293 * transferring the bottom-half status to the next client in the queue.
295 * Compared to walking the entire list of waiters in a single dedicated
296 * bottom-half, we reduce the latency of the first waiter by avoiding
297 * a context switch, but incur additional coherent seqno reads when
298 * following the chain of request breadcrumbs. Since it is most likely
299 * that we have a single client waiting on each seqno, then reducing
300 * the overhead of waking that client is much preferred.
302 struct intel_breadcrumbs {
304 struct list_head signalers;
306 struct irq_work irq_work; /* for use from inside irq_lock */
308 unsigned int irq_enabled;
313 struct intel_engine_pmu {
315 * @enable: Bitmask of enable sample events on this engine.
317 * Bits correspond to sample event types, for instance
318 * I915_SAMPLE_QUEUED is bit 0 etc.
322 * @enable_count: Reference count for the enabled samplers.
324 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
326 unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT];
328 * @sample: Counter values for sampling events.
330 * Our internal timer stores the current counters in this field.
332 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
334 struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT];
338 * A pool of objects to use as shadow copies of client batch buffers
339 * when the command parser is enabled. Prevents the client from
340 * modifying the batch contents after software parsing.
342 struct i915_gem_batch_pool batch_pool;
344 struct intel_hw_status_page status_page;
345 struct i915_ctx_workarounds wa_ctx;
346 struct i915_wa_list ctx_wa_list;
347 struct i915_wa_list wa_list;
348 struct i915_wa_list whitelist;
350 u32 irq_keep_mask; /* always keep these interrupts */
351 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
352 void (*irq_enable)(struct intel_engine_cs *engine);
353 void (*irq_disable)(struct intel_engine_cs *engine);
355 int (*init_hw)(struct intel_engine_cs *engine);
358 void (*prepare)(struct intel_engine_cs *engine);
359 void (*reset)(struct intel_engine_cs *engine, bool stalled);
360 void (*finish)(struct intel_engine_cs *engine);
363 void (*park)(struct intel_engine_cs *engine);
364 void (*unpark)(struct intel_engine_cs *engine);
366 void (*set_default_submission)(struct intel_engine_cs *engine);
368 const struct intel_context_ops *cops;
370 int (*request_alloc)(struct i915_request *rq);
371 int (*init_context)(struct i915_request *rq);
373 int (*emit_flush)(struct i915_request *request, u32 mode);
374 #define EMIT_INVALIDATE BIT(0)
375 #define EMIT_FLUSH BIT(1)
376 #define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
377 int (*emit_bb_start)(struct i915_request *rq,
378 u64 offset, u32 length,
379 unsigned int dispatch_flags);
380 #define I915_DISPATCH_SECURE BIT(0)
381 #define I915_DISPATCH_PINNED BIT(1)
382 int (*emit_init_breadcrumb)(struct i915_request *rq);
383 u32 *(*emit_fini_breadcrumb)(struct i915_request *rq,
385 unsigned int emit_fini_breadcrumb_dw;
387 /* Pass the request to the hardware queue (e.g. directly into
388 * the legacy ringbuffer or to the end of an execlist).
390 * This is called from an atomic context with irqs disabled; must
393 void (*submit_request)(struct i915_request *rq);
396 * Call when the priority on a request has changed and it and its
397 * dependencies may need rescheduling. Note the request itself may
398 * not be ready to run!
400 void (*schedule)(struct i915_request *request,
401 const struct i915_sched_attr *attr);
404 * Cancel all requests on the hardware, or queued for execution.
405 * This should only cancel the ready requests that have been
406 * submitted to the engine (via the engine->submit_request callback).
407 * This is called when marking the device as wedged.
409 void (*cancel_requests)(struct intel_engine_cs *engine);
411 void (*cleanup)(struct intel_engine_cs *engine);
413 struct intel_engine_execlists execlists;
415 /* Contexts are pinned whilst they are active on the GPU. The last
416 * context executed remains active whilst the GPU is idle - the
417 * switch away and write to the context object only occurs on the
418 * next execution. Contexts are only unpinned on retirement of the
419 * following request ensuring that we can always write to the object
420 * on the context switch even after idling. Across suspend, we switch
421 * to the kernel context and trash it as the save may not happen
422 * before the hardware is powered down.
424 struct intel_context *last_retired_context;
426 /* status_notifier: list of callbacks for context-switch changes */
427 struct atomic_notifier_head context_status_notifier;
429 struct intel_engine_hangcheck hangcheck;
431 #define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
432 #define I915_ENGINE_SUPPORTS_STATS BIT(1)
433 #define I915_ENGINE_HAS_PREEMPTION BIT(2)
434 #define I915_ENGINE_HAS_SEMAPHORES BIT(3)
435 #define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
439 * Table of commands the command parser needs to know about
442 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
445 * Table of registers allowed in commands that read/write registers.
447 const struct drm_i915_reg_table *reg_tables;
451 * Returns the bitmask for the length field of the specified command.
452 * Return 0 for an unrecognized/invalid command.
454 * If the command parser finds an entry for a command in the engine's
455 * cmd_tables, it gets the command's length based on the table entry.
456 * If not, it calls this function to determine the per-engine length
457 * field encoding for the command (i.e. different opcode ranges use
458 * certain bits to encode the command length in the header).
460 u32 (*get_cmd_length_mask)(u32 cmd_header);
464 * @lock: Lock protecting the below fields.
468 * @enabled: Reference count indicating number of listeners.
470 unsigned int enabled;
472 * @active: Number of contexts currently scheduled in.
476 * @enabled_at: Timestamp when busy stats were enabled.
480 * @start: Timestamp of the last idle to active transition.
482 * Idle is defined as active == 0, active is active > 0.
486 * @total: Total time this engine was busy.
488 * Accumulated time not counting the most recent block in cases
489 * where engine is currently busy (active > 0).
496 intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine)
498 return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
502 intel_engine_supports_stats(const struct intel_engine_cs *engine)
504 return engine->flags & I915_ENGINE_SUPPORTS_STATS;
508 intel_engine_has_preemption(const struct intel_engine_cs *engine)
510 return engine->flags & I915_ENGINE_HAS_PREEMPTION;
514 intel_engine_has_semaphores(const struct intel_engine_cs *engine)
516 return engine->flags & I915_ENGINE_HAS_SEMAPHORES;
520 intel_engine_needs_breadcrumb_tasklet(const struct intel_engine_cs *engine)
522 return engine->flags & I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
525 #define instdone_slice_mask(dev_priv__) \
526 (IS_GEN(dev_priv__, 7) ? \
527 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
529 #define instdone_subslice_mask(dev_priv__) \
530 (IS_GEN(dev_priv__, 7) ? \
531 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0])
533 #define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
534 for ((slice__) = 0, (subslice__) = 0; \
535 (slice__) < I915_MAX_SLICES; \
536 (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
537 (slice__) += ((subslice__) == 0)) \
538 for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
539 (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
541 #endif /* __INTEL_ENGINE_TYPES_H__ */