2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include <linux/seq_file.h>
30 #include <linux/slab.h>
31 #include <linux/uaccess.h>
32 #include <linux/debugfs.h>
34 #include <drm/amdgpu_drm.h>
40 * Most engines on the GPU are fed via ring buffers. Ring
41 * buffers are areas of GPU accessible memory that the host
42 * writes commands into and the GPU reads commands out of.
43 * There is a rptr (read pointer) that determines where the
44 * GPU is currently reading, and a wptr (write pointer)
45 * which determines where the host has written. When the
46 * pointers are equal, the ring is idle. When the host
47 * writes commands to the ring buffer, it increments the
48 * wptr. The GPU then starts fetching commands and executes
49 * them until the pointers are equal again.
53 * amdgpu_ring_alloc - allocate space on the ring buffer
55 * @adev: amdgpu_device pointer
56 * @ring: amdgpu_ring structure holding ring information
57 * @ndw: number of dwords to allocate in the ring buffer
59 * Allocate @ndw dwords in the ring buffer (all asics).
60 * Returns 0 on success, error on failure.
62 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
64 /* Align requested size with padding so unlock_commit can
66 ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
68 /* Make sure we aren't trying to allocate more space
69 * than the maximum for one submission
71 if (WARN_ON_ONCE(ndw > ring->max_dw))
75 ring->wptr_old = ring->wptr;
77 if (ring->funcs->begin_use)
78 ring->funcs->begin_use(ring);
83 /** amdgpu_ring_insert_nop - insert NOP packets
85 * @ring: amdgpu_ring structure holding ring information
86 * @count: the number of NOP packets to insert
88 * This is the generic insert_nop function for rings except SDMA
90 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
94 for (i = 0; i < count; i++)
95 amdgpu_ring_write(ring, ring->funcs->nop);
98 /** amdgpu_ring_generic_pad_ib - pad IB with NOP packets
100 * @ring: amdgpu_ring structure holding ring information
101 * @ib: IB to add NOP packets to
103 * This is the generic pad_ib function for rings except SDMA
105 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
107 while (ib->length_dw & ring->funcs->align_mask)
108 ib->ptr[ib->length_dw++] = ring->funcs->nop;
112 * amdgpu_ring_commit - tell the GPU to execute the new
113 * commands on the ring buffer
115 * @adev: amdgpu_device pointer
116 * @ring: amdgpu_ring structure holding ring information
118 * Update the wptr (write pointer) to tell the GPU to
119 * execute new commands on the ring buffer (all asics).
121 void amdgpu_ring_commit(struct amdgpu_ring *ring)
125 /* We pad to match fetch size */
126 count = ring->funcs->align_mask + 1 -
127 (ring->wptr & ring->funcs->align_mask);
128 count %= ring->funcs->align_mask + 1;
129 ring->funcs->insert_nop(ring, count);
132 amdgpu_ring_set_wptr(ring);
134 if (ring->funcs->end_use)
135 ring->funcs->end_use(ring);
139 * amdgpu_ring_undo - reset the wptr
141 * @ring: amdgpu_ring structure holding ring information
143 * Reset the driver's copy of the wptr (all asics).
145 void amdgpu_ring_undo(struct amdgpu_ring *ring)
147 ring->wptr = ring->wptr_old;
149 if (ring->funcs->end_use)
150 ring->funcs->end_use(ring);
154 * amdgpu_ring_priority_put - restore a ring's priority
156 * @ring: amdgpu_ring structure holding the information
157 * @priority: target priority
159 * Release a request for executing at @priority
161 void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
162 enum drm_sched_priority priority)
166 if (!ring->funcs->set_priority)
169 if (atomic_dec_return(&ring->num_jobs[priority]) > 0)
172 /* no need to restore if the job is already at the lowest priority */
173 if (priority == DRM_SCHED_PRIORITY_NORMAL)
176 mutex_lock(&ring->priority_mutex);
177 /* something higher prio is executing, no need to decay */
178 if (ring->priority > priority)
181 /* decay priority to the next level with a job available */
182 for (i = priority; i >= DRM_SCHED_PRIORITY_MIN; i--) {
183 if (i == DRM_SCHED_PRIORITY_NORMAL
184 || atomic_read(&ring->num_jobs[i])) {
186 ring->funcs->set_priority(ring, i);
192 mutex_unlock(&ring->priority_mutex);
196 * amdgpu_ring_priority_get - change the ring's priority
198 * @ring: amdgpu_ring structure holding the information
199 * @priority: target priority
201 * Request a ring's priority to be raised to @priority (refcounted).
203 void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
204 enum drm_sched_priority priority)
206 if (!ring->funcs->set_priority)
209 if (atomic_inc_return(&ring->num_jobs[priority]) <= 0)
212 mutex_lock(&ring->priority_mutex);
213 if (priority <= ring->priority)
216 ring->priority = priority;
217 ring->funcs->set_priority(ring, priority);
220 mutex_unlock(&ring->priority_mutex);
224 * amdgpu_ring_init - init driver ring struct.
226 * @adev: amdgpu_device pointer
227 * @ring: amdgpu_ring structure holding ring information
228 * @max_ndw: maximum number of dw for ring alloc
229 * @nop: nop packet for this ring
231 * Initialize the driver information for the selected ring (all asics).
232 * Returns 0 on success, error on failure.
234 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
235 unsigned max_dw, struct amdgpu_irq_src *irq_src,
239 int sched_hw_submission = amdgpu_sched_hw_submission;
241 /* Set the hw submission limit higher for KIQ because
242 * it's used for a number of gfx/compute tasks by both
243 * KFD and KGD which may have outstanding fences and
244 * it doesn't really use the gpu scheduler anyway;
245 * KIQ tasks get submitted directly to the ring.
247 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
248 sched_hw_submission = max(sched_hw_submission, 256);
249 else if (ring == &adev->sdma.instance[0].page)
250 sched_hw_submission = 256;
252 if (ring->adev == NULL) {
253 if (adev->num_rings >= AMDGPU_MAX_RINGS)
257 ring->idx = adev->num_rings++;
258 adev->rings[ring->idx] = ring;
259 r = amdgpu_fence_driver_init_ring(ring, sched_hw_submission);
264 r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
266 dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
270 r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
272 dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
276 r = amdgpu_device_wb_get(adev, &ring->fence_offs);
278 dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
282 r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
285 "(%d) ring trail_fence_offs wb alloc failed\n", r);
288 ring->trail_fence_gpu_addr =
289 adev->wb.gpu_addr + (ring->trail_fence_offs * 4);
290 ring->trail_fence_cpu_addr = &adev->wb.wb[ring->trail_fence_offs];
292 r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
294 dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
297 ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4);
298 ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs];
299 /* always set cond_exec_polling to CONTINUE */
300 *ring->cond_exe_cpu_addr = 1;
302 r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
304 dev_err(adev->dev, "failed initializing fences (%d).\n", r);
308 ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission);
310 ring->buf_mask = (ring->ring_size / 4) - 1;
311 ring->ptr_mask = ring->funcs->support_64bit_ptrs ?
312 0xffffffffffffffff : ring->buf_mask;
313 /* Allocate ring buffer */
314 if (ring->ring_obj == NULL) {
315 r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
316 AMDGPU_GEM_DOMAIN_GTT,
319 (void **)&ring->ring);
321 dev_err(adev->dev, "(%d) ring create failed\n", r);
324 amdgpu_ring_clear_ring(ring);
327 ring->max_dw = max_dw;
328 ring->priority = DRM_SCHED_PRIORITY_NORMAL;
329 mutex_init(&ring->priority_mutex);
331 for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i)
332 atomic_set(&ring->num_jobs[i], 0);
338 * amdgpu_ring_fini - tear down the driver ring struct.
340 * @adev: amdgpu_device pointer
341 * @ring: amdgpu_ring structure holding ring information
343 * Tear down the driver information for the selected ring (all asics).
345 void amdgpu_ring_fini(struct amdgpu_ring *ring)
348 /* Not to finish a ring which is not initialized */
349 if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
352 ring->sched.ready = false;
354 amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
355 amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
357 amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
358 amdgpu_device_wb_free(ring->adev, ring->fence_offs);
360 amdgpu_bo_free_kernel(&ring->ring_obj,
362 (void **)&ring->ring);
364 dma_fence_put(ring->vmid_wait);
365 ring->vmid_wait = NULL;
368 ring->adev->rings[ring->idx] = NULL;
372 * amdgpu_ring_emit_reg_write_reg_wait_helper - ring helper
374 * @adev: amdgpu_device pointer
375 * @reg0: register to write
376 * @reg1: register to wait on
377 * @ref: reference value to write/wait on
378 * @mask: mask to wait on
380 * Helper for rings that don't support write and wait in a
381 * single oneshot packet.
383 void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
384 uint32_t reg0, uint32_t reg1,
385 uint32_t ref, uint32_t mask)
387 amdgpu_ring_emit_wreg(ring, reg0, ref);
388 amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
392 * amdgpu_ring_soft_recovery - try to soft recover a ring lockup
394 * @ring: ring to try the recovery on
395 * @vmid: VMID we try to get going again
396 * @fence: timedout fence
398 * Tries to get a ring proceeding again when it is stuck.
400 bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
401 struct dma_fence *fence)
403 ktime_t deadline = ktime_add_us(ktime_get(), 10000);
405 if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
408 atomic_inc(&ring->adev->gpu_reset_counter);
409 while (!dma_fence_is_signaled(fence) &&
410 ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
411 ring->funcs->soft_recovery(ring, vmid);
413 return dma_fence_is_signaled(fence);
419 #if defined(CONFIG_DEBUG_FS)
421 /* Layout of file is 12 bytes consisting of
424 * - driver's copy of wptr
426 * followed by n-words of ring data
428 static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
429 size_t size, loff_t *pos)
431 struct amdgpu_ring *ring = file_inode(f)->i_private;
433 uint32_t value, result, early[3];
435 if (*pos & 3 || size & 3)
441 early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
442 early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
443 early[2] = ring->wptr & ring->buf_mask;
444 for (i = *pos / 4; i < 3 && size; i++) {
445 r = put_user(early[i], (uint32_t *)buf);
456 if (*pos >= (ring->ring_size + 12))
459 value = ring->ring[(*pos - 12)/4];
460 r = put_user(value, (uint32_t*)buf);
472 static const struct file_operations amdgpu_debugfs_ring_fops = {
473 .owner = THIS_MODULE,
474 .read = amdgpu_debugfs_ring_read,
475 .llseek = default_llseek
480 int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
481 struct amdgpu_ring *ring)
483 #if defined(CONFIG_DEBUG_FS)
484 struct drm_minor *minor = adev->ddev->primary;
485 struct dentry *ent, *root = minor->debugfs_root;
488 sprintf(name, "amdgpu_ring_%s", ring->name);
490 ent = debugfs_create_file(name,
491 S_IFREG | S_IRUGO, root,
492 ring, &amdgpu_debugfs_ring_fops);
496 i_size_write(ent->d_inode, ring->ring_size + 12);
502 void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring)
504 #if defined(CONFIG_DEBUG_FS)
505 debugfs_remove(ring->ent);
510 * amdgpu_ring_test_helper - tests ring and set sched readiness status
512 * @ring: ring to try the recovery on
514 * Tests ring and set sched readiness status
516 * Returns 0 on success, error on failure.
518 int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
520 struct amdgpu_device *adev = ring->adev;
523 r = amdgpu_ring_test_ring(ring);
525 DRM_DEV_ERROR(adev->dev, "ring %s test failed (%d)\n",
528 DRM_DEV_DEBUG(adev->dev, "ring test on %s succeeded\n",
531 ring->sched.ready = !r;