73516abef662f00f8b418a78a757adefb7bf7eef
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ring_mux.c
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/slab.h>
24 #include <drm/drm_print.h>
25
26 #include "amdgpu_ring_mux.h"
27 #include "amdgpu_ring.h"
28 #include "amdgpu.h"
29
30 #define AMDGPU_MUX_RESUBMIT_JIFFIES_TIMEOUT (HZ / 2)
31 #define AMDGPU_MAX_LAST_UNSIGNALED_THRESHOLD_US 10000
32
33 static const struct ring_info {
34         unsigned int hw_pio;
35         const char *ring_name;
36 } sw_ring_info[] = {
37         { AMDGPU_RING_PRIO_DEFAULT, "gfx_low"},
38         { AMDGPU_RING_PRIO_2, "gfx_high"},
39 };
40
41 static struct kmem_cache *amdgpu_mux_chunk_slab;
42
43 static inline struct amdgpu_mux_entry *amdgpu_ring_mux_sw_entry(struct amdgpu_ring_mux *mux,
44                                                                 struct amdgpu_ring *ring)
45 {
46         return ring->entry_index < mux->ring_entry_size ?
47                         &mux->ring_entry[ring->entry_index] : NULL;
48 }
49
50 /* copy packages on sw ring range[begin, end) */
51 static void amdgpu_ring_mux_copy_pkt_from_sw_ring(struct amdgpu_ring_mux *mux,
52                                                   struct amdgpu_ring *ring,
53                                                   u64 s_start, u64 s_end)
54 {
55         u64 start, end;
56         struct amdgpu_ring *real_ring = mux->real_ring;
57
58         start = s_start & ring->buf_mask;
59         end = s_end & ring->buf_mask;
60
61         if (start == end) {
62                 DRM_ERROR("no more data copied from sw ring\n");
63                 return;
64         }
65         if (start > end) {
66                 amdgpu_ring_alloc(real_ring, (ring->ring_size >> 2) + end - start);
67                 amdgpu_ring_write_multiple(real_ring, (void *)&ring->ring[start],
68                                            (ring->ring_size >> 2) - start);
69                 amdgpu_ring_write_multiple(real_ring, (void *)&ring->ring[0], end);
70         } else {
71                 amdgpu_ring_alloc(real_ring, end - start);
72                 amdgpu_ring_write_multiple(real_ring, (void *)&ring->ring[start], end - start);
73         }
74 }
75
76 static void amdgpu_mux_resubmit_chunks(struct amdgpu_ring_mux *mux)
77 {
78         struct amdgpu_mux_entry *e = NULL;
79         struct amdgpu_mux_chunk *chunk;
80         uint32_t seq, last_seq;
81         int i;
82
83         /*find low priority entries:*/
84         if (!mux->s_resubmit)
85                 return;
86
87         for (i = 0; i < mux->num_ring_entries; i++) {
88                 if (mux->ring_entry[i].ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT) {
89                         e = &mux->ring_entry[i];
90                         break;
91                 }
92         }
93
94         if (!e) {
95                 DRM_ERROR("%s no low priority ring found\n", __func__);
96                 return;
97         }
98
99         last_seq = atomic_read(&e->ring->fence_drv.last_seq);
100         seq = mux->seqno_to_resubmit;
101         if (last_seq < seq) {
102                 /*resubmit all the fences between (last_seq, seq]*/
103                 list_for_each_entry(chunk, &e->list, entry) {
104                         if (chunk->sync_seq > last_seq && chunk->sync_seq <= seq) {
105                                 amdgpu_fence_update_start_timestamp(e->ring,
106                                                                     chunk->sync_seq,
107                                                                     ktime_get());
108                                 if (chunk->sync_seq ==
109                                         le32_to_cpu(*(e->ring->fence_drv.cpu_addr + 2))) {
110                                         if (chunk->cntl_offset <= e->ring->buf_mask)
111                                                 amdgpu_ring_patch_cntl(e->ring,
112                                                                        chunk->cntl_offset);
113                                         if (chunk->ce_offset <= e->ring->buf_mask)
114                                                 amdgpu_ring_patch_ce(e->ring, chunk->ce_offset);
115                                         if (chunk->de_offset <= e->ring->buf_mask)
116                                                 amdgpu_ring_patch_de(e->ring, chunk->de_offset);
117                                 }
118                                 amdgpu_ring_mux_copy_pkt_from_sw_ring(mux, e->ring,
119                                                                       chunk->start,
120                                                                       chunk->end);
121                                 mux->wptr_resubmit = chunk->end;
122                                 amdgpu_ring_commit(mux->real_ring);
123                         }
124                 }
125         }
126
127         del_timer(&mux->resubmit_timer);
128         mux->s_resubmit = false;
129 }
130
131 static void amdgpu_ring_mux_schedule_resubmit(struct amdgpu_ring_mux *mux)
132 {
133         mod_timer(&mux->resubmit_timer, jiffies + AMDGPU_MUX_RESUBMIT_JIFFIES_TIMEOUT);
134 }
135
136 static void amdgpu_mux_resubmit_fallback(struct timer_list *t)
137 {
138         struct amdgpu_ring_mux *mux = from_timer(mux, t, resubmit_timer);
139
140         if (!spin_trylock(&mux->lock)) {
141                 amdgpu_ring_mux_schedule_resubmit(mux);
142                 DRM_ERROR("reschedule resubmit\n");
143                 return;
144         }
145         amdgpu_mux_resubmit_chunks(mux);
146         spin_unlock(&mux->lock);
147 }
148
149 int amdgpu_ring_mux_init(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring,
150                          unsigned int entry_size)
151 {
152         mux->real_ring = ring;
153         mux->num_ring_entries = 0;
154
155         mux->ring_entry = kcalloc(entry_size, sizeof(struct amdgpu_mux_entry), GFP_KERNEL);
156         if (!mux->ring_entry)
157                 return -ENOMEM;
158
159         mux->ring_entry_size = entry_size;
160         mux->s_resubmit = false;
161
162         amdgpu_mux_chunk_slab = kmem_cache_create("amdgpu_mux_chunk",
163                                                   sizeof(struct amdgpu_mux_chunk), 0,
164                                                   SLAB_HWCACHE_ALIGN, NULL);
165         if (!amdgpu_mux_chunk_slab) {
166                 DRM_ERROR("create amdgpu_mux_chunk cache failed\n");
167                 return -ENOMEM;
168         }
169
170         spin_lock_init(&mux->lock);
171         timer_setup(&mux->resubmit_timer, amdgpu_mux_resubmit_fallback, 0);
172
173         return 0;
174 }
175
176 void amdgpu_ring_mux_fini(struct amdgpu_ring_mux *mux)
177 {
178         struct amdgpu_mux_entry *e;
179         struct amdgpu_mux_chunk *chunk, *chunk2;
180         int i;
181
182         for (i = 0; i < mux->num_ring_entries; i++) {
183                 e = &mux->ring_entry[i];
184                 list_for_each_entry_safe(chunk, chunk2, &e->list, entry) {
185                         list_del(&chunk->entry);
186                         kmem_cache_free(amdgpu_mux_chunk_slab, chunk);
187                 }
188         }
189         kmem_cache_destroy(amdgpu_mux_chunk_slab);
190         kfree(mux->ring_entry);
191         mux->ring_entry = NULL;
192         mux->num_ring_entries = 0;
193         mux->ring_entry_size = 0;
194 }
195
196 int amdgpu_ring_mux_add_sw_ring(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
197 {
198         struct amdgpu_mux_entry *e;
199
200         if (mux->num_ring_entries >= mux->ring_entry_size) {
201                 DRM_ERROR("add sw ring exceeding max entry size\n");
202                 return -ENOENT;
203         }
204
205         e = &mux->ring_entry[mux->num_ring_entries];
206         ring->entry_index = mux->num_ring_entries;
207         e->ring = ring;
208
209         INIT_LIST_HEAD(&e->list);
210         mux->num_ring_entries += 1;
211         return 0;
212 }
213
214 void amdgpu_ring_mux_set_wptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring, u64 wptr)
215 {
216         struct amdgpu_mux_entry *e;
217
218         spin_lock(&mux->lock);
219
220         if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT)
221                 amdgpu_mux_resubmit_chunks(mux);
222
223         e = amdgpu_ring_mux_sw_entry(mux, ring);
224         if (!e) {
225                 DRM_ERROR("cannot find entry for sw ring\n");
226                 spin_unlock(&mux->lock);
227                 return;
228         }
229
230         /* We could skip this set wptr as preemption in process. */
231         if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT && mux->pending_trailing_fence_signaled) {
232                 spin_unlock(&mux->lock);
233                 return;
234         }
235
236         e->sw_cptr = e->sw_wptr;
237         /* Update cptr if the package already copied in resubmit functions */
238         if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT && e->sw_cptr < mux->wptr_resubmit)
239                 e->sw_cptr = mux->wptr_resubmit;
240         e->sw_wptr = wptr;
241         e->start_ptr_in_hw_ring = mux->real_ring->wptr;
242
243         /* Skip copying for the packages already resubmitted.*/
244         if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT || mux->wptr_resubmit < wptr) {
245                 amdgpu_ring_mux_copy_pkt_from_sw_ring(mux, ring, e->sw_cptr, wptr);
246                 e->end_ptr_in_hw_ring = mux->real_ring->wptr;
247                 amdgpu_ring_commit(mux->real_ring);
248         } else {
249                 e->end_ptr_in_hw_ring = mux->real_ring->wptr;
250         }
251         spin_unlock(&mux->lock);
252 }
253
254 u64 amdgpu_ring_mux_get_wptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
255 {
256         struct amdgpu_mux_entry *e;
257
258         e = amdgpu_ring_mux_sw_entry(mux, ring);
259         if (!e) {
260                 DRM_ERROR("cannot find entry for sw ring\n");
261                 return 0;
262         }
263
264         return e->sw_wptr;
265 }
266
267 /**
268  * amdgpu_ring_mux_get_rptr - get the readptr of the software ring
269  * @mux: the multiplexer the software rings attach to
270  * @ring: the software ring of which we calculate the readptr
271  *
272  * The return value of the readptr is not precise while the other rings could
273  * write data onto the real ring buffer.After overwriting on the real ring, we
274  * can not decide if our packages have been excuted or not read yet. However,
275  * this function is only called by the tools such as umr to collect the latest
276  * packages for the hang analysis. We assume the hang happens near our latest
277  * submit. Thus we could use the following logic to give the clue:
278  * If the readptr is between start and end, then we return the copy pointer
279  * plus the distance from start to readptr. If the readptr is before start, we
280  * return the copy pointer. Lastly, if the readptr is past end, we return the
281  * write pointer.
282  */
283 u64 amdgpu_ring_mux_get_rptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
284 {
285         struct amdgpu_mux_entry *e;
286         u64 readp, offset, start, end;
287
288         e = amdgpu_ring_mux_sw_entry(mux, ring);
289         if (!e) {
290                 DRM_ERROR("no sw entry found!\n");
291                 return 0;
292         }
293
294         readp = amdgpu_ring_get_rptr(mux->real_ring);
295
296         start = e->start_ptr_in_hw_ring & mux->real_ring->buf_mask;
297         end = e->end_ptr_in_hw_ring & mux->real_ring->buf_mask;
298         if (start > end) {
299                 if (readp <= end)
300                         readp += mux->real_ring->ring_size >> 2;
301                 end += mux->real_ring->ring_size >> 2;
302         }
303
304         if (start <= readp && readp <= end) {
305                 offset = readp - start;
306                 e->sw_rptr = (e->sw_cptr + offset) & ring->buf_mask;
307         } else if (readp < start) {
308                 e->sw_rptr = e->sw_cptr;
309         } else {
310                 /* end < readptr */
311                 e->sw_rptr = e->sw_wptr;
312         }
313
314         return e->sw_rptr;
315 }
316
317 u64 amdgpu_sw_ring_get_rptr_gfx(struct amdgpu_ring *ring)
318 {
319         struct amdgpu_device *adev = ring->adev;
320         struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
321
322         WARN_ON(!ring->is_sw_ring);
323         return amdgpu_ring_mux_get_rptr(mux, ring);
324 }
325
326 u64 amdgpu_sw_ring_get_wptr_gfx(struct amdgpu_ring *ring)
327 {
328         struct amdgpu_device *adev = ring->adev;
329         struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
330
331         WARN_ON(!ring->is_sw_ring);
332         return amdgpu_ring_mux_get_wptr(mux, ring);
333 }
334
335 void amdgpu_sw_ring_set_wptr_gfx(struct amdgpu_ring *ring)
336 {
337         struct amdgpu_device *adev = ring->adev;
338         struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
339
340         WARN_ON(!ring->is_sw_ring);
341         amdgpu_ring_mux_set_wptr(mux, ring, ring->wptr);
342 }
343
344 /* Override insert_nop to prevent emitting nops to the software rings */
345 void amdgpu_sw_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
346 {
347         WARN_ON(!ring->is_sw_ring);
348 }
349
350 const char *amdgpu_sw_ring_name(int idx)
351 {
352         return idx < ARRAY_SIZE(sw_ring_info) ?
353                 sw_ring_info[idx].ring_name : NULL;
354 }
355
356 unsigned int amdgpu_sw_ring_priority(int idx)
357 {
358         return idx < ARRAY_SIZE(sw_ring_info) ?
359                 sw_ring_info[idx].hw_pio : AMDGPU_RING_PRIO_DEFAULT;
360 }
361
362 /*Scan on low prio rings to have unsignaled fence and high ring has no fence.*/
363 static int amdgpu_mcbp_scan(struct amdgpu_ring_mux *mux)
364 {
365         struct amdgpu_ring *ring;
366         int i, need_preempt;
367
368         need_preempt = 0;
369         for (i = 0; i < mux->num_ring_entries; i++) {
370                 ring = mux->ring_entry[i].ring;
371                 if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT &&
372                     amdgpu_fence_count_emitted(ring) > 0)
373                         return 0;
374                 if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT &&
375                     amdgpu_fence_last_unsignaled_time_us(ring) >
376                     AMDGPU_MAX_LAST_UNSIGNALED_THRESHOLD_US)
377                         need_preempt = 1;
378         }
379         return need_preempt && !mux->s_resubmit;
380 }
381
382 /* Trigger Mid-Command Buffer Preemption (MCBP) and find if we need to resubmit. */
383 static int amdgpu_mcbp_trigger_preempt(struct amdgpu_ring_mux *mux)
384 {
385         int r;
386
387         spin_lock(&mux->lock);
388         mux->pending_trailing_fence_signaled = true;
389         r = amdgpu_ring_preempt_ib(mux->real_ring);
390         spin_unlock(&mux->lock);
391         return r;
392 }
393
394 void amdgpu_sw_ring_ib_begin(struct amdgpu_ring *ring)
395 {
396         struct amdgpu_device *adev = ring->adev;
397         struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
398
399         WARN_ON(!ring->is_sw_ring);
400         if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT) {
401                 if (amdgpu_mcbp_scan(mux) > 0)
402                         amdgpu_mcbp_trigger_preempt(mux);
403                 return;
404         }
405
406         amdgpu_ring_mux_start_ib(mux, ring);
407 }
408
409 void amdgpu_sw_ring_ib_end(struct amdgpu_ring *ring)
410 {
411         struct amdgpu_device *adev = ring->adev;
412         struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
413
414         WARN_ON(!ring->is_sw_ring);
415         if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT)
416                 return;
417         amdgpu_ring_mux_end_ib(mux, ring);
418 }
419
420 void amdgpu_sw_ring_ib_mark_offset(struct amdgpu_ring *ring, enum amdgpu_ring_mux_offset_type type)
421 {
422         struct amdgpu_device *adev = ring->adev;
423         struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
424         unsigned offset;
425
426         offset = ring->wptr & ring->buf_mask;
427
428         amdgpu_ring_mux_ib_mark_offset(mux, ring, offset, type);
429 }
430
431 void amdgpu_ring_mux_start_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
432 {
433         struct amdgpu_mux_entry *e;
434         struct amdgpu_mux_chunk *chunk;
435
436         spin_lock(&mux->lock);
437         amdgpu_mux_resubmit_chunks(mux);
438         spin_unlock(&mux->lock);
439
440         e = amdgpu_ring_mux_sw_entry(mux, ring);
441         if (!e) {
442                 DRM_ERROR("cannot find entry!\n");
443                 return;
444         }
445
446         chunk = kmem_cache_alloc(amdgpu_mux_chunk_slab, GFP_KERNEL);
447         if (!chunk) {
448                 DRM_ERROR("alloc amdgpu_mux_chunk_slab failed\n");
449                 return;
450         }
451
452         chunk->start = ring->wptr;
453         /* the initialized value used to check if they are set by the ib submission*/
454         chunk->cntl_offset = ring->buf_mask + 1;
455         chunk->de_offset = ring->buf_mask + 1;
456         chunk->ce_offset = ring->buf_mask + 1;
457         list_add_tail(&chunk->entry, &e->list);
458 }
459
460 static void scan_and_remove_signaled_chunk(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
461 {
462         uint32_t last_seq = 0;
463         struct amdgpu_mux_entry *e;
464         struct amdgpu_mux_chunk *chunk, *tmp;
465
466         e = amdgpu_ring_mux_sw_entry(mux, ring);
467         if (!e) {
468                 DRM_ERROR("cannot find entry!\n");
469                 return;
470         }
471
472         last_seq = atomic_read(&ring->fence_drv.last_seq);
473
474         list_for_each_entry_safe(chunk, tmp, &e->list, entry) {
475                 if (chunk->sync_seq <= last_seq) {
476                         list_del(&chunk->entry);
477                         kmem_cache_free(amdgpu_mux_chunk_slab, chunk);
478                 }
479         }
480 }
481
482 void amdgpu_ring_mux_ib_mark_offset(struct amdgpu_ring_mux *mux,
483                                     struct amdgpu_ring *ring, u64 offset,
484                                     enum amdgpu_ring_mux_offset_type type)
485 {
486         struct amdgpu_mux_entry *e;
487         struct amdgpu_mux_chunk *chunk;
488
489         e = amdgpu_ring_mux_sw_entry(mux, ring);
490         if (!e) {
491                 DRM_ERROR("cannot find entry!\n");
492                 return;
493         }
494
495         chunk = list_last_entry(&e->list, struct amdgpu_mux_chunk, entry);
496         if (!chunk) {
497                 DRM_ERROR("cannot find chunk!\n");
498                 return;
499         }
500
501         switch (type) {
502         case AMDGPU_MUX_OFFSET_TYPE_CONTROL:
503                 chunk->cntl_offset = offset;
504                 break;
505         case AMDGPU_MUX_OFFSET_TYPE_DE:
506                 chunk->de_offset = offset;
507                 break;
508         case AMDGPU_MUX_OFFSET_TYPE_CE:
509                 chunk->ce_offset = offset;
510                 break;
511         default:
512                 DRM_ERROR("invalid type (%d)\n", type);
513                 break;
514         }
515 }
516
517 void amdgpu_ring_mux_end_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
518 {
519         struct amdgpu_mux_entry *e;
520         struct amdgpu_mux_chunk *chunk;
521
522         e = amdgpu_ring_mux_sw_entry(mux, ring);
523         if (!e) {
524                 DRM_ERROR("cannot find entry!\n");
525                 return;
526         }
527
528         chunk = list_last_entry(&e->list, struct amdgpu_mux_chunk, entry);
529         if (!chunk) {
530                 DRM_ERROR("cannot find chunk!\n");
531                 return;
532         }
533
534         chunk->end = ring->wptr;
535         chunk->sync_seq = READ_ONCE(ring->fence_drv.sync_seq);
536
537         scan_and_remove_signaled_chunk(mux, ring);
538 }
539
540 bool amdgpu_mcbp_handle_trailing_fence_irq(struct amdgpu_ring_mux *mux)
541 {
542         struct amdgpu_mux_entry *e;
543         struct amdgpu_ring *ring = NULL;
544         int i;
545
546         if (!mux->pending_trailing_fence_signaled)
547                 return false;
548
549         if (mux->real_ring->trail_seq != le32_to_cpu(*mux->real_ring->trail_fence_cpu_addr))
550                 return false;
551
552         for (i = 0; i < mux->num_ring_entries; i++) {
553                 e = &mux->ring_entry[i];
554                 if (e->ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT) {
555                         ring = e->ring;
556                         break;
557                 }
558         }
559
560         if (!ring) {
561                 DRM_ERROR("cannot find low priority ring\n");
562                 return false;
563         }
564
565         amdgpu_fence_process(ring);
566         if (amdgpu_fence_count_emitted(ring) > 0) {
567                 mux->s_resubmit = true;
568                 mux->seqno_to_resubmit = ring->fence_drv.sync_seq;
569                 amdgpu_ring_mux_schedule_resubmit(mux);
570         }
571
572         mux->pending_trailing_fence_signaled = false;
573         return true;
574 }