2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
32 #include <linux/atomic.h>
33 #include <linux/firmware.h>
34 #include <linux/kref.h>
35 #include <linux/sched/signal.h>
36 #include <linux/seq_file.h>
37 #include <linux/slab.h>
38 #include <linux/wait.h>
40 #include <drm/drm_device.h>
41 #include <drm/drm_file.h>
44 #include "radeon_reg.h"
45 #include "radeon_trace.h"
48 * Fences mark an event in the GPUs pipeline and are used
49 * for GPU/CPU synchronization. When the fence is written,
50 * it is expected that all buffers associated with that fence
51 * are no longer in use by the associated ring on the GPU and
52 * that the relevant GPU caches have been flushed. Whether
53 * we use a scratch register or memory location depends on the asic
54 * and whether writeback is enabled.
58 * radeon_fence_write - write a fence value
60 * @rdev: radeon_device pointer
61 * @seq: sequence number to write
62 * @ring: ring index the fence is associated with
64 * Writes a fence value to memory or a scratch register (all asics).
66 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
68 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
70 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
72 *drv->cpu_addr = cpu_to_le32(seq);
74 WREG32(drv->scratch_reg, seq);
79 * radeon_fence_read - read a fence value
81 * @rdev: radeon_device pointer
82 * @ring: ring index the fence is associated with
84 * Reads a fence value from memory or a scratch register (all asics).
85 * Returns the value of the fence read from memory or register.
87 static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
89 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
92 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
94 seq = le32_to_cpu(*drv->cpu_addr);
96 seq = lower_32_bits(atomic64_read(&drv->last_seq));
98 seq = RREG32(drv->scratch_reg);
104 * radeon_fence_schedule_check - schedule lockup check
106 * @rdev: radeon_device pointer
107 * @ring: ring index we should work with
109 * Queues a delayed work item to check for lockups.
111 static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring)
114 * Do not reset the timer here with mod_delayed_work,
115 * this can livelock in an interaction with TTM delayed destroy.
117 queue_delayed_work(system_power_efficient_wq,
118 &rdev->fence_drv[ring].lockup_work,
119 RADEON_FENCE_JIFFIES_TIMEOUT);
123 * radeon_fence_emit - emit a fence on the requested ring
125 * @rdev: radeon_device pointer
126 * @fence: radeon fence object
127 * @ring: ring index the fence is associated with
129 * Emits a fence command on the requested ring (all asics).
130 * Returns 0 on success, -ENOMEM on failure.
132 int radeon_fence_emit(struct radeon_device *rdev,
133 struct radeon_fence **fence,
138 /* we are protected by the ring emission mutex */
139 *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
140 if ((*fence) == NULL)
143 (*fence)->rdev = rdev;
144 (*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring];
145 (*fence)->ring = ring;
146 (*fence)->is_vm_update = false;
147 dma_fence_init(&(*fence)->base, &radeon_fence_ops,
148 &rdev->fence_queue.lock,
149 rdev->fence_context + ring,
151 radeon_fence_ring_emit(rdev, ring, *fence);
152 trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
153 radeon_fence_schedule_check(rdev, ring);
158 * radeon_fence_check_signaled - callback from fence_queue
160 * this function is called with fence_queue lock held, which is also used
161 * for the fence locking itself, so unlocked variants are used for
162 * fence_signal, and remove_wait_queue.
164 static int radeon_fence_check_signaled(wait_queue_entry_t *wait,
165 unsigned int mode, int flags, void *key)
167 struct radeon_fence *fence;
170 fence = container_of(wait, struct radeon_fence, fence_wake);
173 * We cannot use radeon_fence_process here because we're already
174 * in the waitqueue, in a call from wake_up_all.
176 seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
177 if (seq >= fence->seq) {
178 dma_fence_signal_locked(&fence->base);
179 radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
180 __remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
181 dma_fence_put(&fence->base);
187 * radeon_fence_activity - check for fence activity
189 * @rdev: radeon_device pointer
190 * @ring: ring index the fence is associated with
192 * Checks the current fence value and calculates the last
193 * signalled fence value. Returns true if activity occured
194 * on the ring, and the fence_queue should be waken up.
196 static bool radeon_fence_activity(struct radeon_device *rdev, int ring)
198 uint64_t seq, last_seq, last_emitted;
199 unsigned int count_loop = 0;
202 /* Note there is a scenario here for an infinite loop but it's
203 * very unlikely to happen. For it to happen, the current polling
204 * process need to be interrupted by another process and another
205 * process needs to update the last_seq btw the atomic read and
206 * xchg of the current process.
208 * More over for this to go in infinite loop there need to be
209 * continuously new fence signaled ie radeon_fence_read needs
210 * to return a different value each time for both the currently
211 * polling process and the other process that xchg the last_seq
212 * btw atomic read and xchg of the current process. And the
213 * value the other process set as last seq must be higher than
214 * the seq value we just read. Which means that current process
215 * need to be interrupted after radeon_fence_read and before
218 * To be even more safe we count the number of time we loop and
219 * we bail after 10 loop just accepting the fact that we might
220 * have temporarly set the last_seq not to the true real last
221 * seq but to an older one.
223 last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
225 last_emitted = rdev->fence_drv[ring].sync_seq[ring];
226 seq = radeon_fence_read(rdev, ring);
227 seq |= last_seq & 0xffffffff00000000LL;
228 if (seq < last_seq) {
230 seq |= last_emitted & 0xffffffff00000000LL;
233 if (seq <= last_seq || seq > last_emitted)
236 /* If we loop over we don't want to return without
237 * checking if a fence is signaled as it means that the
238 * seq we just read is different from the previous on.
242 if ((count_loop++) > 10) {
243 /* We looped over too many time leave with the
244 * fact that we might have set an older fence
245 * seq then the current real last seq as signaled
250 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
252 if (seq < last_emitted)
253 radeon_fence_schedule_check(rdev, ring);
259 * radeon_fence_check_lockup - check for hardware lockup
261 * @work: delayed work item
263 * Checks for fence activity and if there is none probe
264 * the hardware if a lockup occured.
266 static void radeon_fence_check_lockup(struct work_struct *work)
268 struct radeon_fence_driver *fence_drv;
269 struct radeon_device *rdev;
272 fence_drv = container_of(work, struct radeon_fence_driver,
274 rdev = fence_drv->rdev;
275 ring = fence_drv - &rdev->fence_drv[0];
277 if (!down_read_trylock(&rdev->exclusive_lock)) {
278 /* just reschedule the check if a reset is going on */
279 radeon_fence_schedule_check(rdev, ring);
283 if (fence_drv->delayed_irq && rdev->irq.installed) {
284 unsigned long irqflags;
286 fence_drv->delayed_irq = false;
287 spin_lock_irqsave(&rdev->irq.lock, irqflags);
288 radeon_irq_set(rdev);
289 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
292 if (radeon_fence_activity(rdev, ring))
293 wake_up_all(&rdev->fence_queue);
295 else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
297 /* good news we believe it's a lockup */
298 dev_warn(rdev->dev, "GPU lockup (current fence id 0x%016llx last fence id 0x%016llx on ring %d)\n",
299 (uint64_t)atomic64_read(&fence_drv->last_seq),
300 fence_drv->sync_seq[ring], ring);
302 /* remember that we need an reset */
303 rdev->needs_reset = true;
304 wake_up_all(&rdev->fence_queue);
306 up_read(&rdev->exclusive_lock);
310 * radeon_fence_process - process a fence
312 * @rdev: radeon_device pointer
313 * @ring: ring index the fence is associated with
315 * Checks the current fence value and wakes the fence queue
316 * if the sequence number has increased (all asics).
318 void radeon_fence_process(struct radeon_device *rdev, int ring)
320 if (radeon_fence_activity(rdev, ring))
321 wake_up_all(&rdev->fence_queue);
325 * radeon_fence_seq_signaled - check if a fence sequence number has signaled
327 * @rdev: radeon device pointer
328 * @seq: sequence number
329 * @ring: ring index the fence is associated with
331 * Check if the last signaled fence sequnce number is >= the requested
332 * sequence number (all asics).
333 * Returns true if the fence has signaled (current fence value
334 * is >= requested value) or false if it has not (current fence
335 * value is < the requested value. Helper function for
336 * radeon_fence_signaled().
338 static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
339 u64 seq, unsigned int ring)
341 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
344 /* poll new last sequence at least once */
345 radeon_fence_process(rdev, ring);
346 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
352 static bool radeon_fence_is_signaled(struct dma_fence *f)
354 struct radeon_fence *fence = to_radeon_fence(f);
355 struct radeon_device *rdev = fence->rdev;
356 unsigned int ring = fence->ring;
357 u64 seq = fence->seq;
359 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
362 if (down_read_trylock(&rdev->exclusive_lock)) {
363 radeon_fence_process(rdev, ring);
364 up_read(&rdev->exclusive_lock);
366 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
373 * radeon_fence_enable_signaling - enable signalling on fence
376 * This function is called with fence_queue lock held, and adds a callback
377 * to fence_queue that checks if this fence is signaled, and if so it
378 * signals the fence and removes itself.
380 static bool radeon_fence_enable_signaling(struct dma_fence *f)
382 struct radeon_fence *fence = to_radeon_fence(f);
383 struct radeon_device *rdev = fence->rdev;
385 if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq)
388 if (down_read_trylock(&rdev->exclusive_lock)) {
389 radeon_irq_kms_sw_irq_get(rdev, fence->ring);
391 if (radeon_fence_activity(rdev, fence->ring))
392 wake_up_all_locked(&rdev->fence_queue);
394 /* did fence get signaled after we enabled the sw irq? */
395 if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) {
396 radeon_irq_kms_sw_irq_put(rdev, fence->ring);
397 up_read(&rdev->exclusive_lock);
401 up_read(&rdev->exclusive_lock);
403 /* we're probably in a lockup, lets not fiddle too much */
404 if (radeon_irq_kms_sw_irq_get_delayed(rdev, fence->ring))
405 rdev->fence_drv[fence->ring].delayed_irq = true;
406 radeon_fence_schedule_check(rdev, fence->ring);
409 fence->fence_wake.flags = 0;
410 fence->fence_wake.private = NULL;
411 fence->fence_wake.func = radeon_fence_check_signaled;
412 __add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
418 * radeon_fence_signaled - check if a fence has signaled
420 * @fence: radeon fence object
422 * Check if the requested fence has signaled (all asics).
423 * Returns true if the fence has signaled or false if it has not.
425 bool radeon_fence_signaled(struct radeon_fence *fence)
430 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
431 dma_fence_signal(&fence->base);
438 * radeon_fence_any_seq_signaled - check if any sequence number is signaled
440 * @rdev: radeon device pointer
441 * @seq: sequence numbers
443 * Check if the last signaled fence sequnce number is >= the requested
444 * sequence number (all asics).
445 * Returns true if any has signaled (current value is >= requested value)
446 * or false if it has not. Helper function for radeon_fence_wait_seq.
448 static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
452 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
453 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i))
460 * radeon_fence_wait_seq_timeout - wait for a specific sequence numbers
462 * @rdev: radeon device pointer
463 * @target_seq: sequence number(s) we want to wait for
464 * @intr: use interruptable sleep
465 * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
467 * Wait for the requested sequence number(s) to be written by any ring
468 * (all asics). Sequnce number array is indexed by ring id.
469 * @intr selects whether to use interruptable (true) or non-interruptable
470 * (false) sleep when waiting for the sequence number. Helper function
471 * for radeon_fence_wait_*().
472 * Returns remaining time if the sequence number has passed, 0 when
473 * the wait timeout, or an error for all other cases.
474 * -EDEADLK is returned when a GPU lockup has been detected.
476 static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev,
477 u64 *target_seq, bool intr,
483 if (radeon_fence_any_seq_signaled(rdev, target_seq))
486 /* enable IRQs and tracing */
487 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
491 trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]);
492 radeon_irq_kms_sw_irq_get(rdev, i);
496 r = wait_event_interruptible_timeout(rdev->fence_queue, (
497 radeon_fence_any_seq_signaled(rdev, target_seq)
498 || rdev->needs_reset), timeout);
500 r = wait_event_timeout(rdev->fence_queue, (
501 radeon_fence_any_seq_signaled(rdev, target_seq)
502 || rdev->needs_reset), timeout);
505 if (rdev->needs_reset)
508 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
512 radeon_irq_kms_sw_irq_put(rdev, i);
513 trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]);
520 * radeon_fence_wait_timeout - wait for a fence to signal with timeout
522 * @fence: radeon fence object
523 * @intr: use interruptible sleep
525 * Wait for the requested fence to signal (all asics).
526 * @intr selects whether to use interruptable (true) or non-interruptable
527 * (false) sleep when waiting for the fence.
528 * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
529 * Returns remaining time if the sequence number has passed, 0 when
530 * the wait timeout, or an error for all other cases.
532 long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeout)
534 uint64_t seq[RADEON_NUM_RINGS] = {};
538 * This function should not be called on !radeon fences.
539 * If this is the case, it would mean this function can
540 * also be called on radeon fences belonging to another card.
541 * exclusive_lock is not held in that case.
543 if (WARN_ON_ONCE(!to_radeon_fence(&fence->base)))
544 return dma_fence_wait(&fence->base, intr);
546 seq[fence->ring] = fence->seq;
547 r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout);
551 dma_fence_signal(&fence->base);
556 * radeon_fence_wait - wait for a fence to signal
558 * @fence: radeon fence object
559 * @intr: use interruptible sleep
561 * Wait for the requested fence to signal (all asics).
562 * @intr selects whether to use interruptable (true) or non-interruptable
563 * (false) sleep when waiting for the fence.
564 * Returns 0 if the fence has passed, error for all other cases.
566 int radeon_fence_wait(struct radeon_fence *fence, bool intr)
568 long r = radeon_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
577 * radeon_fence_wait_any - wait for a fence to signal on any ring
579 * @rdev: radeon device pointer
580 * @fences: radeon fence object(s)
581 * @intr: use interruptable sleep
583 * Wait for any requested fence to signal (all asics). Fence
584 * array is indexed by ring id. @intr selects whether to use
585 * interruptable (true) or non-interruptable (false) sleep when
586 * waiting for the fences. Used by the suballocator.
587 * Returns 0 if any fence has passed, error for all other cases.
589 int radeon_fence_wait_any(struct radeon_device *rdev,
590 struct radeon_fence **fences,
593 uint64_t seq[RADEON_NUM_RINGS];
594 unsigned int i, num_rings = 0;
597 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
603 seq[i] = fences[i]->seq;
607 /* nothing to wait for ? */
611 r = radeon_fence_wait_seq_timeout(rdev, seq, intr, MAX_SCHEDULE_TIMEOUT);
619 * radeon_fence_wait_next - wait for the next fence to signal
621 * @rdev: radeon device pointer
622 * @ring: ring index the fence is associated with
624 * Wait for the next fence on the requested ring to signal (all asics).
625 * Returns 0 if the next fence has passed, error for all other cases.
626 * Caller must hold ring lock.
628 int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
630 uint64_t seq[RADEON_NUM_RINGS] = {};
633 seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
634 if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
635 /* nothing to wait for, last_seq is already
636 * the last emited fence
641 r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
649 * radeon_fence_wait_empty - wait for all fences to signal
651 * @rdev: radeon device pointer
652 * @ring: ring index the fence is associated with
654 * Wait for all fences on the requested ring to signal (all asics).
655 * Returns 0 if the fences have passed, error for all other cases.
656 * Caller must hold ring lock.
658 int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
660 uint64_t seq[RADEON_NUM_RINGS] = {};
663 seq[ring] = rdev->fence_drv[ring].sync_seq[ring];
667 r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
672 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%ld)\n",
679 * radeon_fence_ref - take a ref on a fence
681 * @fence: radeon fence object
683 * Take a reference on a fence (all asics).
686 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
688 dma_fence_get(&fence->base);
693 * radeon_fence_unref - remove a ref on a fence
695 * @fence: radeon fence object
697 * Remove a reference on a fence (all asics).
699 void radeon_fence_unref(struct radeon_fence **fence)
701 struct radeon_fence *tmp = *fence;
705 dma_fence_put(&tmp->base);
709 * radeon_fence_count_emitted - get the count of emitted fences
711 * @rdev: radeon device pointer
712 * @ring: ring index the fence is associated with
714 * Get the number of fences emitted on the requested ring (all asics).
715 * Returns the number of emitted fences on the ring. Used by the
716 * dynpm code to ring track activity.
718 unsigned int radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
722 /* We are not protected by ring lock when reading the last sequence
723 * but it's ok to report slightly wrong fence count here.
725 radeon_fence_process(rdev, ring);
726 emitted = rdev->fence_drv[ring].sync_seq[ring]
727 - atomic64_read(&rdev->fence_drv[ring].last_seq);
728 /* to avoid 32bits warp around */
729 if (emitted > 0x10000000)
730 emitted = 0x10000000;
732 return (unsigned int)emitted;
736 * radeon_fence_need_sync - do we need a semaphore
738 * @fence: radeon fence object
739 * @dst_ring: which ring to check against
741 * Check if the fence needs to be synced against another ring
742 * (all asics). If so, we need to emit a semaphore.
743 * Returns true if we need to sync with another ring, false if
746 bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
748 struct radeon_fence_driver *fdrv;
753 if (fence->ring == dst_ring)
756 /* we are protected by the ring mutex */
757 fdrv = &fence->rdev->fence_drv[dst_ring];
758 if (fence->seq <= fdrv->sync_seq[fence->ring])
765 * radeon_fence_note_sync - record the sync point
767 * @fence: radeon fence object
768 * @dst_ring: which ring to check against
770 * Note the sequence number at which point the fence will
771 * be synced with the requested ring (all asics).
773 void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
775 struct radeon_fence_driver *dst, *src;
781 if (fence->ring == dst_ring)
784 /* we are protected by the ring mutex */
785 src = &fence->rdev->fence_drv[fence->ring];
786 dst = &fence->rdev->fence_drv[dst_ring];
787 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
791 dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
796 * radeon_fence_driver_start_ring - make the fence driver
797 * ready for use on the requested ring.
799 * @rdev: radeon device pointer
800 * @ring: ring index to start the fence driver on
802 * Make the fence driver ready for processing (all asics).
803 * Not all asics have all rings, so each asic will only
804 * start the fence driver on the rings it has.
805 * Returns 0 for success, errors for failure.
807 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
812 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
813 if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
814 rdev->fence_drv[ring].scratch_reg = 0;
815 if (ring != R600_RING_TYPE_UVD_INDEX) {
816 index = R600_WB_EVENT_OFFSET + ring * 4;
817 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
818 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr +
822 /* put fence directly behind firmware */
823 index = ALIGN(rdev->uvd_fw->size, 8);
824 rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index;
825 rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
829 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
831 dev_err(rdev->dev, "fence failed to get scratch register\n");
834 index = RADEON_WB_SCRATCH_OFFSET +
835 rdev->fence_drv[ring].scratch_reg -
836 rdev->scratch.reg_base;
837 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
838 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
840 radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
841 rdev->fence_drv[ring].initialized = true;
842 dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx\n",
843 ring, rdev->fence_drv[ring].gpu_addr);
848 * radeon_fence_driver_init_ring - init the fence driver
849 * for the requested ring.
851 * @rdev: radeon device pointer
852 * @ring: ring index to start the fence driver on
854 * Init the fence driver for the requested ring (all asics).
855 * Helper function for radeon_fence_driver_init().
857 static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
861 rdev->fence_drv[ring].scratch_reg = -1;
862 rdev->fence_drv[ring].cpu_addr = NULL;
863 rdev->fence_drv[ring].gpu_addr = 0;
864 for (i = 0; i < RADEON_NUM_RINGS; ++i)
865 rdev->fence_drv[ring].sync_seq[i] = 0;
866 atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
867 rdev->fence_drv[ring].initialized = false;
868 INIT_DELAYED_WORK(&rdev->fence_drv[ring].lockup_work,
869 radeon_fence_check_lockup);
870 rdev->fence_drv[ring].rdev = rdev;
874 * radeon_fence_driver_init - init the fence driver
875 * for all possible rings.
877 * @rdev: radeon device pointer
879 * Init the fence driver for all possible rings (all asics).
880 * Not all asics have all rings, so each asic will only
881 * start the fence driver on the rings it has using
882 * radeon_fence_driver_start_ring().
884 void radeon_fence_driver_init(struct radeon_device *rdev)
888 init_waitqueue_head(&rdev->fence_queue);
889 for (ring = 0; ring < RADEON_NUM_RINGS; ring++)
890 radeon_fence_driver_init_ring(rdev, ring);
892 radeon_debugfs_fence_init(rdev);
896 * radeon_fence_driver_fini - tear down the fence driver
897 * for all possible rings.
899 * @rdev: radeon device pointer
901 * Tear down the fence driver for all possible rings (all asics).
903 void radeon_fence_driver_fini(struct radeon_device *rdev)
907 mutex_lock(&rdev->ring_lock);
908 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
909 if (!rdev->fence_drv[ring].initialized)
911 r = radeon_fence_wait_empty(rdev, ring);
913 /* no need to trigger GPU reset as we are unloading */
914 radeon_fence_driver_force_completion(rdev, ring);
916 cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
917 wake_up_all(&rdev->fence_queue);
918 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
919 rdev->fence_drv[ring].initialized = false;
921 mutex_unlock(&rdev->ring_lock);
925 * radeon_fence_driver_force_completion - force all fence waiter to complete
927 * @rdev: radeon device pointer
928 * @ring: the ring to complete
930 * In case of GPU reset failure make sure no process keep waiting on fence
931 * that will never complete.
933 void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring)
935 if (rdev->fence_drv[ring].initialized) {
936 radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
937 cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
945 #if defined(CONFIG_DEBUG_FS)
946 static int radeon_debugfs_fence_info_show(struct seq_file *m, void *data)
948 struct radeon_device *rdev = m->private;
951 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
952 if (!rdev->fence_drv[i].initialized)
955 radeon_fence_process(rdev, i);
957 seq_printf(m, "--- ring %d ---\n", i);
958 seq_printf(m, "Last signaled fence 0x%016llx\n",
959 (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
960 seq_printf(m, "Last emitted 0x%016llx\n",
961 rdev->fence_drv[i].sync_seq[i]);
963 for (j = 0; j < RADEON_NUM_RINGS; ++j) {
964 if (i != j && rdev->fence_drv[j].initialized)
965 seq_printf(m, "Last sync to ring %d 0x%016llx\n",
966 j, rdev->fence_drv[i].sync_seq[j]);
973 * radeon_debugfs_gpu_reset - manually trigger a gpu reset
975 * Manually trigger a gpu reset at the next fence wait.
977 static int radeon_debugfs_gpu_reset(void *data, u64 *val)
979 struct radeon_device *rdev = (struct radeon_device *)data;
981 down_read(&rdev->exclusive_lock);
982 *val = rdev->needs_reset;
983 rdev->needs_reset = true;
984 wake_up_all(&rdev->fence_queue);
985 up_read(&rdev->exclusive_lock);
989 DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_fence_info);
990 DEFINE_DEBUGFS_ATTRIBUTE(radeon_debugfs_gpu_reset_fops,
991 radeon_debugfs_gpu_reset, NULL, "%lld\n");
994 void radeon_debugfs_fence_init(struct radeon_device *rdev)
996 #if defined(CONFIG_DEBUG_FS)
997 struct dentry *root = rdev->ddev->primary->debugfs_root;
999 debugfs_create_file("radeon_gpu_reset", 0444, root, rdev,
1000 &radeon_debugfs_gpu_reset_fops);
1001 debugfs_create_file("radeon_fence_info", 0444, root, rdev,
1002 &radeon_debugfs_fence_info_fops);
1008 static const char *radeon_fence_get_driver_name(struct dma_fence *fence)
1013 static const char *radeon_fence_get_timeline_name(struct dma_fence *f)
1015 struct radeon_fence *fence = to_radeon_fence(f);
1017 switch (fence->ring) {
1018 case RADEON_RING_TYPE_GFX_INDEX: return "radeon.gfx";
1019 case CAYMAN_RING_TYPE_CP1_INDEX: return "radeon.cp1";
1020 case CAYMAN_RING_TYPE_CP2_INDEX: return "radeon.cp2";
1021 case R600_RING_TYPE_DMA_INDEX: return "radeon.dma";
1022 case CAYMAN_RING_TYPE_DMA1_INDEX: return "radeon.dma1";
1023 case R600_RING_TYPE_UVD_INDEX: return "radeon.uvd";
1024 case TN_RING_TYPE_VCE1_INDEX: return "radeon.vce1";
1025 case TN_RING_TYPE_VCE2_INDEX: return "radeon.vce2";
1028 return "radeon.unk";
1032 static inline bool radeon_test_signaled(struct radeon_fence *fence)
1034 return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
1037 struct radeon_wait_cb {
1038 struct dma_fence_cb base;
1039 struct task_struct *task;
1043 radeon_fence_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1045 struct radeon_wait_cb *wait =
1046 container_of(cb, struct radeon_wait_cb, base);
1048 wake_up_process(wait->task);
1051 static signed long radeon_fence_default_wait(struct dma_fence *f, bool intr,
1054 struct radeon_fence *fence = to_radeon_fence(f);
1055 struct radeon_device *rdev = fence->rdev;
1056 struct radeon_wait_cb cb;
1060 if (dma_fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
1065 set_current_state(TASK_INTERRUPTIBLE);
1067 set_current_state(TASK_UNINTERRUPTIBLE);
1070 * radeon_test_signaled must be called after
1071 * set_current_state to prevent a race with wake_up_process
1073 if (radeon_test_signaled(fence))
1076 if (rdev->needs_reset) {
1081 t = schedule_timeout(t);
1083 if (t > 0 && intr && signal_pending(current))
1087 __set_current_state(TASK_RUNNING);
1088 dma_fence_remove_callback(f, &cb.base);
1093 const struct dma_fence_ops radeon_fence_ops = {
1094 .get_driver_name = radeon_fence_get_driver_name,
1095 .get_timeline_name = radeon_fence_get_timeline_name,
1096 .enable_signaling = radeon_fence_enable_signaling,
1097 .signaled = radeon_fence_is_signaled,
1098 .wait = radeon_fence_default_wait,