Merge tag 'drm-misc-next-2019-05-24' of git://anongit.freedesktop.org/drm/drm-misc...
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / intel_ringbuffer.h
1 /* SPDX-License-Identifier: MIT */
2 #ifndef _INTEL_RINGBUFFER_H_
3 #define _INTEL_RINGBUFFER_H_
4
5 #include <drm/drm_util.h>
6
7 #include <linux/hashtable.h>
8 #include <linux/irq_work.h>
9 #include <linux/random.h>
10 #include <linux/seqlock.h>
11
12 #include "i915_gem_batch_pool.h"
13 #include "i915_pmu.h"
14 #include "i915_reg.h"
15 #include "i915_request.h"
16 #include "i915_selftest.h"
17 #include "i915_timeline.h"
18 #include "intel_engine_types.h"
19 #include "intel_gpu_commands.h"
20 #include "intel_workarounds.h"
21
22 struct drm_printer;
23
24 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
25  * but keeps the logic simple. Indeed, the whole purpose of this macro is just
26  * to give some inclination as to some of the magic values used in the various
27  * workarounds!
28  */
29 #define CACHELINE_BYTES 64
30 #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
31
32 /*
33  * The register defines to be used with the following macros need to accept a
34  * base param, e.g:
35  *
36  * REG_FOO(base) _MMIO((base) + <relative offset>)
37  * ENGINE_READ(engine, REG_FOO);
38  *
39  * register arrays are to be defined and accessed as follows:
40  *
41  * REG_BAR(base, i) _MMIO((base) + <relative offset> + (i) * <shift>)
42  * ENGINE_READ_IDX(engine, REG_BAR, i)
43  */
44
45 #define __ENGINE_REG_OP(op__, engine__, ...) \
46         intel_uncore_##op__((engine__)->uncore, __VA_ARGS__)
47
48 #define __ENGINE_READ_OP(op__, engine__, reg__) \
49         __ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base))
50
51 #define ENGINE_READ16(...)      __ENGINE_READ_OP(read16, __VA_ARGS__)
52 #define ENGINE_READ(...)        __ENGINE_READ_OP(read, __VA_ARGS__)
53 #define ENGINE_READ_FW(...)     __ENGINE_READ_OP(read_fw, __VA_ARGS__)
54 #define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read, __VA_ARGS__)
55
56 #define ENGINE_READ64(engine__, lower_reg__, upper_reg__) \
57         __ENGINE_REG_OP(read64_2x32, (engine__), \
58                         lower_reg__((engine__)->mmio_base), \
59                         upper_reg__((engine__)->mmio_base))
60
61 #define ENGINE_READ_IDX(engine__, reg__, idx__) \
62         __ENGINE_REG_OP(read, (engine__), reg__((engine__)->mmio_base, (idx__)))
63
64 #define __ENGINE_WRITE_OP(op__, engine__, reg__, val__) \
65         __ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base), (val__))
66
67 #define ENGINE_WRITE16(...)     __ENGINE_WRITE_OP(write16, __VA_ARGS__)
68 #define ENGINE_WRITE(...)       __ENGINE_WRITE_OP(write, __VA_ARGS__)
69 #define ENGINE_WRITE_FW(...)    __ENGINE_WRITE_OP(write_fw, __VA_ARGS__)
70
71 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
72  * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
73  */
74 enum intel_engine_hangcheck_action {
75         ENGINE_IDLE = 0,
76         ENGINE_WAIT,
77         ENGINE_ACTIVE_SEQNO,
78         ENGINE_ACTIVE_HEAD,
79         ENGINE_ACTIVE_SUBUNITS,
80         ENGINE_WAIT_KICK,
81         ENGINE_DEAD,
82 };
83
84 static inline const char *
85 hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
86 {
87         switch (a) {
88         case ENGINE_IDLE:
89                 return "idle";
90         case ENGINE_WAIT:
91                 return "wait";
92         case ENGINE_ACTIVE_SEQNO:
93                 return "active seqno";
94         case ENGINE_ACTIVE_HEAD:
95                 return "active head";
96         case ENGINE_ACTIVE_SUBUNITS:
97                 return "active subunits";
98         case ENGINE_WAIT_KICK:
99                 return "wait kick";
100         case ENGINE_DEAD:
101                 return "dead";
102         }
103
104         return "unknown";
105 }
106
107 void intel_engines_set_scheduler_caps(struct drm_i915_private *i915);
108
109 static inline bool __execlists_need_preempt(int prio, int last)
110 {
111         /*
112          * Allow preemption of low -> normal -> high, but we do
113          * not allow low priority tasks to preempt other low priority
114          * tasks under the impression that latency for low priority
115          * tasks does not matter (as much as background throughput),
116          * so kiss.
117          *
118          * More naturally we would write
119          *      prio >= max(0, last);
120          * except that we wish to prevent triggering preemption at the same
121          * priority level: the task that is running should remain running
122          * to preserve FIFO ordering of dependencies.
123          */
124         return prio > max(I915_PRIORITY_NORMAL - 1, last);
125 }
126
127 static inline void
128 execlists_set_active(struct intel_engine_execlists *execlists,
129                      unsigned int bit)
130 {
131         __set_bit(bit, (unsigned long *)&execlists->active);
132 }
133
134 static inline bool
135 execlists_set_active_once(struct intel_engine_execlists *execlists,
136                           unsigned int bit)
137 {
138         return !__test_and_set_bit(bit, (unsigned long *)&execlists->active);
139 }
140
141 static inline void
142 execlists_clear_active(struct intel_engine_execlists *execlists,
143                        unsigned int bit)
144 {
145         __clear_bit(bit, (unsigned long *)&execlists->active);
146 }
147
148 static inline void
149 execlists_clear_all_active(struct intel_engine_execlists *execlists)
150 {
151         execlists->active = 0;
152 }
153
154 static inline bool
155 execlists_is_active(const struct intel_engine_execlists *execlists,
156                     unsigned int bit)
157 {
158         return test_bit(bit, (unsigned long *)&execlists->active);
159 }
160
161 void execlists_user_begin(struct intel_engine_execlists *execlists,
162                           const struct execlist_port *port);
163 void execlists_user_end(struct intel_engine_execlists *execlists);
164
165 void
166 execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);
167
168 struct i915_request *
169 execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
170
171 static inline unsigned int
172 execlists_num_ports(const struct intel_engine_execlists * const execlists)
173 {
174         return execlists->port_mask + 1;
175 }
176
177 static inline struct execlist_port *
178 execlists_port_complete(struct intel_engine_execlists * const execlists,
179                         struct execlist_port * const port)
180 {
181         const unsigned int m = execlists->port_mask;
182
183         GEM_BUG_ON(port_index(port, execlists) != 0);
184         GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
185
186         memmove(port, port + 1, m * sizeof(struct execlist_port));
187         memset(port + m, 0, sizeof(struct execlist_port));
188
189         return port;
190 }
191
192 static inline u32
193 intel_read_status_page(const struct intel_engine_cs *engine, int reg)
194 {
195         /* Ensure that the compiler doesn't optimize away the load. */
196         return READ_ONCE(engine->status_page.addr[reg]);
197 }
198
199 static inline void
200 intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
201 {
202         /* Writing into the status page should be done sparingly. Since
203          * we do when we are uncertain of the device state, we take a bit
204          * of extra paranoia to try and ensure that the HWS takes the value
205          * we give and that it doesn't end up trapped inside the CPU!
206          */
207         if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
208                 mb();
209                 clflush(&engine->status_page.addr[reg]);
210                 engine->status_page.addr[reg] = value;
211                 clflush(&engine->status_page.addr[reg]);
212                 mb();
213         } else {
214                 WRITE_ONCE(engine->status_page.addr[reg], value);
215         }
216 }
217
218 /*
219  * Reads a dword out of the status page, which is written to from the command
220  * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
221  * MI_STORE_DATA_IMM.
222  *
223  * The following dwords have a reserved meaning:
224  * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
225  * 0x04: ring 0 head pointer
226  * 0x05: ring 1 head pointer (915-class)
227  * 0x06: ring 2 head pointer (915-class)
228  * 0x10-0x1b: Context status DWords (GM45)
229  * 0x1f: Last written status offset. (GM45)
230  * 0x20-0x2f: Reserved (Gen6+)
231  *
232  * The area from dword 0x30 to 0x3ff is available for driver usage.
233  */
234 #define I915_GEM_HWS_PREEMPT            0x32
235 #define I915_GEM_HWS_PREEMPT_ADDR       (I915_GEM_HWS_PREEMPT * sizeof(u32))
236 #define I915_GEM_HWS_HANGCHECK          0x34
237 #define I915_GEM_HWS_HANGCHECK_ADDR     (I915_GEM_HWS_HANGCHECK * sizeof(u32))
238 #define I915_GEM_HWS_SEQNO              0x40
239 #define I915_GEM_HWS_SEQNO_ADDR         (I915_GEM_HWS_SEQNO * sizeof(u32))
240 #define I915_GEM_HWS_SCRATCH            0x80
241 #define I915_GEM_HWS_SCRATCH_ADDR       (I915_GEM_HWS_SCRATCH * sizeof(u32))
242
243 #define I915_HWS_CSB_BUF0_INDEX         0x10
244 #define I915_HWS_CSB_WRITE_INDEX        0x1f
245 #define CNL_HWS_CSB_WRITE_INDEX         0x2f
246
247 struct intel_ring *
248 intel_engine_create_ring(struct intel_engine_cs *engine,
249                          struct i915_timeline *timeline,
250                          int size);
251 int intel_ring_pin(struct intel_ring *ring);
252 void intel_ring_reset(struct intel_ring *ring, u32 tail);
253 unsigned int intel_ring_update_space(struct intel_ring *ring);
254 void intel_ring_unpin(struct intel_ring *ring);
255 void intel_ring_free(struct kref *ref);
256
257 static inline struct intel_ring *intel_ring_get(struct intel_ring *ring)
258 {
259         kref_get(&ring->ref);
260         return ring;
261 }
262
263 static inline void intel_ring_put(struct intel_ring *ring)
264 {
265         kref_put(&ring->ref, intel_ring_free);
266 }
267
268 void intel_engine_stop(struct intel_engine_cs *engine);
269 void intel_engine_cleanup(struct intel_engine_cs *engine);
270
271 int __must_check intel_ring_cacheline_align(struct i915_request *rq);
272
273 u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n);
274
275 static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
276 {
277         /* Dummy function.
278          *
279          * This serves as a placeholder in the code so that the reader
280          * can compare against the preceding intel_ring_begin() and
281          * check that the number of dwords emitted matches the space
282          * reserved for the command packet (i.e. the value passed to
283          * intel_ring_begin()).
284          */
285         GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
286 }
287
288 static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
289 {
290         return pos & (ring->size - 1);
291 }
292
293 static inline bool
294 intel_ring_offset_valid(const struct intel_ring *ring,
295                         unsigned int pos)
296 {
297         if (pos & -ring->size) /* must be strictly within the ring */
298                 return false;
299
300         if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */
301                 return false;
302
303         return true;
304 }
305
306 static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
307 {
308         /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
309         u32 offset = addr - rq->ring->vaddr;
310         GEM_BUG_ON(offset > rq->ring->size);
311         return intel_ring_wrap(rq->ring, offset);
312 }
313
314 static inline void
315 assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
316 {
317         GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
318
319         /*
320          * "Ring Buffer Use"
321          *      Gen2 BSpec "1. Programming Environment" / 1.4.4.6
322          *      Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
323          *      Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
324          * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
325          * same cacheline, the Head Pointer must not be greater than the Tail
326          * Pointer."
327          *
328          * We use ring->head as the last known location of the actual RING_HEAD,
329          * it may have advanced but in the worst case it is equally the same
330          * as ring->head and so we should never program RING_TAIL to advance
331          * into the same cacheline as ring->head.
332          */
333 #define cacheline(a) round_down(a, CACHELINE_BYTES)
334         GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
335                    tail < ring->head);
336 #undef cacheline
337 }
338
339 static inline unsigned int
340 intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
341 {
342         /* Whilst writes to the tail are strictly order, there is no
343          * serialisation between readers and the writers. The tail may be
344          * read by i915_request_retire() just as it is being updated
345          * by execlists, as although the breadcrumb is complete, the context
346          * switch hasn't been seen.
347          */
348         assert_ring_tail_valid(ring, tail);
349         ring->tail = tail;
350         return tail;
351 }
352
353 static inline unsigned int
354 __intel_ring_space(unsigned int head, unsigned int tail, unsigned int size)
355 {
356         /*
357          * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
358          * same cacheline, the Head Pointer must not be greater than the Tail
359          * Pointer."
360          */
361         GEM_BUG_ON(!is_power_of_2(size));
362         return (head - tail - CACHELINE_BYTES) & (size - 1);
363 }
364
365 int intel_engine_setup_common(struct intel_engine_cs *engine);
366 int intel_engine_init_common(struct intel_engine_cs *engine);
367 void intel_engine_cleanup_common(struct intel_engine_cs *engine);
368
369 int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
370 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
371 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
372 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
373
374 int intel_engine_stop_cs(struct intel_engine_cs *engine);
375 void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine);
376
377 void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask);
378
379 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
380 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
381
382 void intel_engine_get_instdone(struct intel_engine_cs *engine,
383                                struct intel_instdone *instdone);
384
385 void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
386 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
387
388 void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
389 void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);
390
391 void intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine);
392 void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
393
394 static inline void
395 intel_engine_queue_breadcrumbs(struct intel_engine_cs *engine)
396 {
397         irq_work_queue(&engine->breadcrumbs.irq_work);
398 }
399
400 void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine);
401
402 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
403 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
404
405 void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
406                                     struct drm_printer *p);
407
408 static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
409 {
410         memset(batch, 0, 6 * sizeof(u32));
411
412         batch[0] = GFX_OP_PIPE_CONTROL(6);
413         batch[1] = flags;
414         batch[2] = offset;
415
416         return batch + 6;
417 }
418
419 static inline u32 *
420 gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
421 {
422         /* We're using qword write, offset should be aligned to 8 bytes. */
423         GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
424
425         /* w/a for post sync ops following a GPGPU operation we
426          * need a prior CS_STALL, which is emitted by the flush
427          * following the batch.
428          */
429         *cs++ = GFX_OP_PIPE_CONTROL(6);
430         *cs++ = flags | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_GLOBAL_GTT_IVB;
431         *cs++ = gtt_offset;
432         *cs++ = 0;
433         *cs++ = value;
434         /* We're thrashing one dword of HWS. */
435         *cs++ = 0;
436
437         return cs;
438 }
439
440 static inline u32 *
441 gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
442 {
443         /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
444         GEM_BUG_ON(gtt_offset & (1 << 5));
445         /* Offset should be aligned to 8 bytes for both (QW/DW) write types */
446         GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
447
448         *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW | flags;
449         *cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT;
450         *cs++ = 0;
451         *cs++ = value;
452
453         return cs;
454 }
455
456 static inline void intel_engine_reset(struct intel_engine_cs *engine,
457                                       bool stalled)
458 {
459         if (engine->reset.reset)
460                 engine->reset.reset(engine, stalled);
461 }
462
463 void intel_engines_sanitize(struct drm_i915_private *i915, bool force);
464 void intel_gt_resume(struct drm_i915_private *i915);
465
466 bool intel_engine_is_idle(struct intel_engine_cs *engine);
467 bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
468
469 void intel_engine_lost_context(struct intel_engine_cs *engine);
470
471 void intel_engines_park(struct drm_i915_private *i915);
472 void intel_engines_unpark(struct drm_i915_private *i915);
473
474 void intel_engines_reset_default_submission(struct drm_i915_private *i915);
475 unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
476
477 bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
478
479 __printf(3, 4)
480 void intel_engine_dump(struct intel_engine_cs *engine,
481                        struct drm_printer *m,
482                        const char *header, ...);
483
484 struct intel_engine_cs *
485 intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);
486
487 static inline void intel_engine_context_in(struct intel_engine_cs *engine)
488 {
489         unsigned long flags;
490
491         if (READ_ONCE(engine->stats.enabled) == 0)
492                 return;
493
494         write_seqlock_irqsave(&engine->stats.lock, flags);
495
496         if (engine->stats.enabled > 0) {
497                 if (engine->stats.active++ == 0)
498                         engine->stats.start = ktime_get();
499                 GEM_BUG_ON(engine->stats.active == 0);
500         }
501
502         write_sequnlock_irqrestore(&engine->stats.lock, flags);
503 }
504
505 static inline void intel_engine_context_out(struct intel_engine_cs *engine)
506 {
507         unsigned long flags;
508
509         if (READ_ONCE(engine->stats.enabled) == 0)
510                 return;
511
512         write_seqlock_irqsave(&engine->stats.lock, flags);
513
514         if (engine->stats.enabled > 0) {
515                 ktime_t last;
516
517                 if (engine->stats.active && --engine->stats.active == 0) {
518                         /*
519                          * Decrement the active context count and in case GPU
520                          * is now idle add up to the running total.
521                          */
522                         last = ktime_sub(ktime_get(), engine->stats.start);
523
524                         engine->stats.total = ktime_add(engine->stats.total,
525                                                         last);
526                 } else if (engine->stats.active == 0) {
527                         /*
528                          * After turning on engine stats, context out might be
529                          * the first event in which case we account from the
530                          * time stats gathering was turned on.
531                          */
532                         last = ktime_sub(ktime_get(), engine->stats.enabled_at);
533
534                         engine->stats.total = ktime_add(engine->stats.total,
535                                                         last);
536                 }
537         }
538
539         write_sequnlock_irqrestore(&engine->stats.lock, flags);
540 }
541
542 int intel_enable_engine_stats(struct intel_engine_cs *engine);
543 void intel_disable_engine_stats(struct intel_engine_cs *engine);
544
545 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
546
547 struct i915_request *
548 intel_engine_find_active_request(struct intel_engine_cs *engine);
549
550 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
551
552 static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
553 {
554         if (!execlists->preempt_hang.inject_hang)
555                 return false;
556
557         complete(&execlists->preempt_hang.completion);
558         return true;
559 }
560
561 #else
562
563 static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
564 {
565         return false;
566 }
567
568 #endif
569
570 static inline u32
571 intel_engine_next_hangcheck_seqno(struct intel_engine_cs *engine)
572 {
573         return engine->hangcheck.next_seqno =
574                 next_pseudo_random32(engine->hangcheck.next_seqno);
575 }
576
577 static inline u32
578 intel_engine_get_hangcheck_seqno(struct intel_engine_cs *engine)
579 {
580         return intel_read_status_page(engine, I915_GEM_HWS_HANGCHECK);
581 }
582
583 #endif /* _INTEL_RINGBUFFER_H_ */