x86/platform/uv: Recognize UV5 hubless system identifier
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gt / intel_engine.h
1 /* SPDX-License-Identifier: MIT */
2 #ifndef _INTEL_RINGBUFFER_H_
3 #define _INTEL_RINGBUFFER_H_
4
5 #include <drm/drm_util.h>
6
7 #include <linux/hashtable.h>
8 #include <linux/irq_work.h>
9 #include <linux/random.h>
10 #include <linux/seqlock.h>
11
12 #include "i915_pmu.h"
13 #include "i915_reg.h"
14 #include "i915_request.h"
15 #include "i915_selftest.h"
16 #include "gt/intel_timeline.h"
17 #include "intel_engine_types.h"
18 #include "intel_gpu_commands.h"
19 #include "intel_workarounds.h"
20
21 struct drm_printer;
22 struct intel_gt;
23
24 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
25  * but keeps the logic simple. Indeed, the whole purpose of this macro is just
26  * to give some inclination as to some of the magic values used in the various
27  * workarounds!
28  */
29 #define CACHELINE_BYTES 64
30 #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
31
32 #define ENGINE_TRACE(e, fmt, ...) do {                                  \
33         const struct intel_engine_cs *e__ __maybe_unused = (e);         \
34         GEM_TRACE("%s %s: " fmt,                                        \
35                   dev_name(e__->i915->drm.dev), e__->name,              \
36                   ##__VA_ARGS__);                                       \
37 } while (0)
38
39 /*
40  * The register defines to be used with the following macros need to accept a
41  * base param, e.g:
42  *
43  * REG_FOO(base) _MMIO((base) + <relative offset>)
44  * ENGINE_READ(engine, REG_FOO);
45  *
46  * register arrays are to be defined and accessed as follows:
47  *
48  * REG_BAR(base, i) _MMIO((base) + <relative offset> + (i) * <shift>)
49  * ENGINE_READ_IDX(engine, REG_BAR, i)
50  */
51
52 #define __ENGINE_REG_OP(op__, engine__, ...) \
53         intel_uncore_##op__((engine__)->uncore, __VA_ARGS__)
54
55 #define __ENGINE_READ_OP(op__, engine__, reg__) \
56         __ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base))
57
58 #define ENGINE_READ16(...)      __ENGINE_READ_OP(read16, __VA_ARGS__)
59 #define ENGINE_READ(...)        __ENGINE_READ_OP(read, __VA_ARGS__)
60 #define ENGINE_READ_FW(...)     __ENGINE_READ_OP(read_fw, __VA_ARGS__)
61 #define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read_fw, __VA_ARGS__)
62 #define ENGINE_POSTING_READ16(...) __ENGINE_READ_OP(posting_read16, __VA_ARGS__)
63
64 #define ENGINE_READ64(engine__, lower_reg__, upper_reg__) \
65         __ENGINE_REG_OP(read64_2x32, (engine__), \
66                         lower_reg__((engine__)->mmio_base), \
67                         upper_reg__((engine__)->mmio_base))
68
69 #define ENGINE_READ_IDX(engine__, reg__, idx__) \
70         __ENGINE_REG_OP(read, (engine__), reg__((engine__)->mmio_base, (idx__)))
71
72 #define __ENGINE_WRITE_OP(op__, engine__, reg__, val__) \
73         __ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base), (val__))
74
75 #define ENGINE_WRITE16(...)     __ENGINE_WRITE_OP(write16, __VA_ARGS__)
76 #define ENGINE_WRITE(...)       __ENGINE_WRITE_OP(write, __VA_ARGS__)
77 #define ENGINE_WRITE_FW(...)    __ENGINE_WRITE_OP(write_fw, __VA_ARGS__)
78
79 #define GEN6_RING_FAULT_REG_READ(engine__) \
80         intel_uncore_read((engine__)->uncore, RING_FAULT_REG(engine__))
81
82 #define GEN6_RING_FAULT_REG_POSTING_READ(engine__) \
83         intel_uncore_posting_read((engine__)->uncore, RING_FAULT_REG(engine__))
84
85 #define GEN6_RING_FAULT_REG_RMW(engine__, clear__, set__) \
86 ({ \
87         u32 __val; \
88 \
89         __val = intel_uncore_read((engine__)->uncore, \
90                                   RING_FAULT_REG(engine__)); \
91         __val &= ~(clear__); \
92         __val |= (set__); \
93         intel_uncore_write((engine__)->uncore, RING_FAULT_REG(engine__), \
94                            __val); \
95 })
96
97 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
98  * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
99  */
100
101 static inline unsigned int
102 execlists_num_ports(const struct intel_engine_execlists * const execlists)
103 {
104         return execlists->port_mask + 1;
105 }
106
107 static inline struct i915_request *
108 execlists_active(const struct intel_engine_execlists *execlists)
109 {
110         struct i915_request * const *cur, * const *old, *active;
111
112         cur = READ_ONCE(execlists->active);
113         smp_rmb(); /* pairs with overwrite protection in process_csb() */
114         do {
115                 old = cur;
116
117                 active = READ_ONCE(*cur);
118                 cur = READ_ONCE(execlists->active);
119
120                 smp_rmb(); /* and complete the seqlock retry */
121         } while (unlikely(cur != old));
122
123         return active;
124 }
125
126 static inline void
127 execlists_active_lock_bh(struct intel_engine_execlists *execlists)
128 {
129         local_bh_disable(); /* prevent local softirq and lock recursion */
130         tasklet_lock(&execlists->tasklet);
131 }
132
133 static inline void
134 execlists_active_unlock_bh(struct intel_engine_execlists *execlists)
135 {
136         tasklet_unlock(&execlists->tasklet);
137         local_bh_enable(); /* restore softirq, and kick ksoftirqd! */
138 }
139
140 struct i915_request *
141 execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
142
143 static inline u32
144 intel_read_status_page(const struct intel_engine_cs *engine, int reg)
145 {
146         /* Ensure that the compiler doesn't optimize away the load. */
147         return READ_ONCE(engine->status_page.addr[reg]);
148 }
149
150 static inline void
151 intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
152 {
153         /* Writing into the status page should be done sparingly. Since
154          * we do when we are uncertain of the device state, we take a bit
155          * of extra paranoia to try and ensure that the HWS takes the value
156          * we give and that it doesn't end up trapped inside the CPU!
157          */
158         if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
159                 mb();
160                 clflush(&engine->status_page.addr[reg]);
161                 engine->status_page.addr[reg] = value;
162                 clflush(&engine->status_page.addr[reg]);
163                 mb();
164         } else {
165                 WRITE_ONCE(engine->status_page.addr[reg], value);
166         }
167 }
168
169 /*
170  * Reads a dword out of the status page, which is written to from the command
171  * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
172  * MI_STORE_DATA_IMM.
173  *
174  * The following dwords have a reserved meaning:
175  * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
176  * 0x04: ring 0 head pointer
177  * 0x05: ring 1 head pointer (915-class)
178  * 0x06: ring 2 head pointer (915-class)
179  * 0x10-0x1b: Context status DWords (GM45)
180  * 0x1f: Last written status offset. (GM45)
181  * 0x20-0x2f: Reserved (Gen6+)
182  *
183  * The area from dword 0x30 to 0x3ff is available for driver usage.
184  */
185 #define I915_GEM_HWS_PREEMPT            0x32
186 #define I915_GEM_HWS_PREEMPT_ADDR       (I915_GEM_HWS_PREEMPT * sizeof(u32))
187 #define I915_GEM_HWS_SEQNO              0x40
188 #define I915_GEM_HWS_SEQNO_ADDR         (I915_GEM_HWS_SEQNO * sizeof(u32))
189 #define I915_GEM_HWS_SCRATCH            0x80
190
191 #define I915_HWS_CSB_BUF0_INDEX         0x10
192 #define I915_HWS_CSB_WRITE_INDEX        0x1f
193 #define CNL_HWS_CSB_WRITE_INDEX         0x2f
194
195 void intel_engine_stop(struct intel_engine_cs *engine);
196 void intel_engine_cleanup(struct intel_engine_cs *engine);
197
198 int intel_engines_init_mmio(struct intel_gt *gt);
199 int intel_engines_init(struct intel_gt *gt);
200
201 void intel_engine_free_request_pool(struct intel_engine_cs *engine);
202
203 void intel_engines_release(struct intel_gt *gt);
204 void intel_engines_free(struct intel_gt *gt);
205
206 int intel_engine_init_common(struct intel_engine_cs *engine);
207 void intel_engine_cleanup_common(struct intel_engine_cs *engine);
208
209 int intel_engine_resume(struct intel_engine_cs *engine);
210
211 int intel_ring_submission_setup(struct intel_engine_cs *engine);
212
213 int intel_engine_stop_cs(struct intel_engine_cs *engine);
214 void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine);
215
216 void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask);
217
218 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
219 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
220
221 void intel_engine_get_instdone(const struct intel_engine_cs *engine,
222                                struct intel_instdone *instdone);
223
224 void intel_engine_init_execlists(struct intel_engine_cs *engine);
225
226 static inline u32 *__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
227 {
228         memset(batch, 0, 6 * sizeof(u32));
229
230         batch[0] = GFX_OP_PIPE_CONTROL(6) | flags0;
231         batch[1] = flags1;
232         batch[2] = offset;
233
234         return batch + 6;
235 }
236
237 static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
238 {
239         return __gen8_emit_pipe_control(batch, 0, flags, offset);
240 }
241
242 static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
243 {
244         return __gen8_emit_pipe_control(batch, flags0, flags1, offset);
245 }
246
247 static inline u32 *
248 __gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1)
249 {
250         /* We're using qword write, offset should be aligned to 8 bytes. */
251         GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
252
253         /* w/a for post sync ops following a GPGPU operation we
254          * need a prior CS_STALL, which is emitted by the flush
255          * following the batch.
256          */
257         *cs++ = GFX_OP_PIPE_CONTROL(6) | flags0;
258         *cs++ = flags1 | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_GLOBAL_GTT_IVB;
259         *cs++ = gtt_offset;
260         *cs++ = 0;
261         *cs++ = value;
262         /* We're thrashing one dword of HWS. */
263         *cs++ = 0;
264
265         return cs;
266 }
267
268 static inline u32*
269 gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
270 {
271         return __gen8_emit_ggtt_write_rcs(cs, value, gtt_offset, 0, flags);
272 }
273
274 static inline u32*
275 gen12_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1)
276 {
277         return __gen8_emit_ggtt_write_rcs(cs, value, gtt_offset, flags0, flags1);
278 }
279
280 static inline u32 *
281 gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
282 {
283         /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
284         GEM_BUG_ON(gtt_offset & (1 << 5));
285         /* Offset should be aligned to 8 bytes for both (QW/DW) write types */
286         GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
287
288         *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW | flags;
289         *cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT;
290         *cs++ = 0;
291         *cs++ = value;
292
293         return cs;
294 }
295
296 static inline void __intel_engine_reset(struct intel_engine_cs *engine,
297                                         bool stalled)
298 {
299         if (engine->reset.rewind)
300                 engine->reset.rewind(engine, stalled);
301         engine->serial++; /* contexts lost */
302 }
303
304 bool intel_engines_are_idle(struct intel_gt *gt);
305 bool intel_engine_is_idle(struct intel_engine_cs *engine);
306 void intel_engine_flush_submission(struct intel_engine_cs *engine);
307
308 void intel_engines_reset_default_submission(struct intel_gt *gt);
309
310 bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
311
312 __printf(3, 4)
313 void intel_engine_dump(struct intel_engine_cs *engine,
314                        struct drm_printer *m,
315                        const char *header, ...);
316
317 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine,
318                                    ktime_t *now);
319
320 struct i915_request *
321 intel_engine_find_active_request(struct intel_engine_cs *engine);
322
323 u32 intel_engine_context_size(struct intel_gt *gt, u8 class);
324
325 void intel_engine_init_active(struct intel_engine_cs *engine,
326                               unsigned int subclass);
327 #define ENGINE_PHYSICAL 0
328 #define ENGINE_MOCK     1
329 #define ENGINE_VIRTUAL  2
330
331 static inline bool
332 intel_engine_has_preempt_reset(const struct intel_engine_cs *engine)
333 {
334         if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
335                 return false;
336
337         return intel_engine_has_preemption(engine);
338 }
339
340 static inline bool
341 intel_engine_has_heartbeat(const struct intel_engine_cs *engine)
342 {
343         if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL))
344                 return false;
345
346         return READ_ONCE(engine->props.heartbeat_interval_ms);
347 }
348
349 #endif /* _INTEL_RINGBUFFER_H_ */