2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include <linux/seq_file.h>
30 #include <linux/slab.h>
32 #include <drm/amdgpu_drm.h>
36 #define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000)
40 * IBs (Indirect Buffers) and areas of GPU accessible memory where
41 * commands are stored. You can put a pointer to the IB in the
42 * command ring and the hw will fetch the commands from the IB
43 * and execute them. Generally userspace acceleration drivers
44 * produce command buffers which are send to the kernel and
45 * put in IBs for execution by the requested ring.
47 static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
50 * amdgpu_ib_get - request an IB (Indirect Buffer)
52 * @ring: ring index the IB is associated with
53 * @size: requested IB size
54 * @ib: IB object returned
56 * Request an IB (all asics). IBs are allocated using the
58 * Returns 0 on success, error on failure.
60 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
61 unsigned size, struct amdgpu_ib *ib)
66 r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
67 &ib->sa_bo, size, 256);
69 dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
73 ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo);
76 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
83 * amdgpu_ib_free - free an IB (Indirect Buffer)
85 * @adev: amdgpu_device pointer
86 * @ib: IB object to free
87 * @f: the fence SA bo need wait on for the ib alloation
89 * Free an IB (all asics).
91 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
94 amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
98 * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring
100 * @adev: amdgpu_device pointer
101 * @num_ibs: number of IBs to schedule
102 * @ibs: IB objects to schedule
103 * @f: fence created during this submission
105 * Schedule an IB on the associated ring (all asics).
106 * Returns 0 on success, error on failure.
108 * On SI, there are two parallel engines fed from the primary ring,
109 * the CE (Constant Engine) and the DE (Drawing Engine). Since
110 * resource descriptors have moved to memory, the CE allows you to
111 * prime the caches while the DE is updating register state so that
112 * the resource descriptors will be already in cache when the draw is
113 * processed. To accomplish this, the userspace driver submits two
114 * IBs, one for the CE and one for the DE. If there is a CE IB (called
115 * a CONST_IB), it will be put on the ring prior to the DE IB. Prior
116 * to SI there was just a DE IB.
118 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
119 struct amdgpu_ib *ibs, struct amdgpu_job *job,
120 struct dma_fence **f)
122 struct amdgpu_device *adev = ring->adev;
123 struct amdgpu_ib *ib = &ibs[0];
124 struct dma_fence *tmp = NULL;
125 bool skip_preamble, need_ctx_switch;
126 unsigned patch_offset = ~0;
127 struct amdgpu_vm *vm;
129 uint32_t status = 0, alloc_size;
130 unsigned fence_flags = 0;
134 bool need_pipe_sync = false;
139 /* ring tests don't use a job */
142 fence_ctx = job->base.s_fence->scheduled.context;
149 dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
153 if (vm && !job->vmid) {
154 dev_err(adev->dev, "VM IB without ID\n");
158 alloc_size = ring->funcs->emit_frame_size + num_ibs *
159 ring->funcs->emit_ib_size;
161 r = amdgpu_ring_alloc(ring, alloc_size);
163 dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
167 if (ring->funcs->emit_pipeline_sync && job &&
168 ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) ||
169 amdgpu_vm_need_pipeline_sync(ring, job))) {
170 need_pipe_sync = true;
174 if (ring->funcs->insert_start)
175 ring->funcs->insert_start(ring);
178 r = amdgpu_vm_flush(ring, job, need_pipe_sync);
180 amdgpu_ring_undo(ring);
185 if (job && ring->funcs->init_cond_exec)
186 patch_offset = amdgpu_ring_init_cond_exec(ring);
189 if (!(adev->flags & AMD_IS_APU))
192 if (ring->funcs->emit_hdp_flush)
193 amdgpu_ring_emit_hdp_flush(ring);
195 amdgpu_asic_flush_hdp(adev, ring);
198 skip_preamble = ring->current_ctx == fence_ctx;
199 need_ctx_switch = ring->current_ctx != fence_ctx;
200 if (job && ring->funcs->emit_cntxcntl) {
202 status |= AMDGPU_HAVE_CTX_SWITCH;
203 status |= job->preamble_status;
205 amdgpu_ring_emit_cntxcntl(ring, status);
208 for (i = 0; i < num_ibs; ++i) {
211 /* drop preamble IBs if we don't have a context switch */
212 if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
214 !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) &&
215 !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
218 amdgpu_ring_emit_ib(ring, ib, job ? job->vmid : 0,
220 need_ctx_switch = false;
223 if (ring->funcs->emit_tmz)
224 amdgpu_ring_emit_tmz(ring, false);
227 if (!(adev->flags & AMD_IS_APU))
229 amdgpu_asic_invalidate_hdp(adev, ring);
231 if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
232 fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
234 /* wrap the last IB with fence */
235 if (job && job->uf_addr) {
236 amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
237 fence_flags | AMDGPU_FENCE_FLAG_64BIT);
240 r = amdgpu_fence_emit(ring, f, fence_flags);
242 dev_err(adev->dev, "failed to emit fence (%d)\n", r);
243 if (job && job->vmid)
244 amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vmid);
245 amdgpu_ring_undo(ring);
249 if (ring->funcs->insert_end)
250 ring->funcs->insert_end(ring);
252 if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
253 amdgpu_ring_patch_cond_exec(ring, patch_offset);
255 ring->current_ctx = fence_ctx;
256 if (vm && ring->funcs->emit_switch_buffer)
257 amdgpu_ring_emit_switch_buffer(ring);
258 amdgpu_ring_commit(ring);
263 * amdgpu_ib_pool_init - Init the IB (Indirect Buffer) pool
265 * @adev: amdgpu_device pointer
267 * Initialize the suballocator to manage a pool of memory
268 * for use as IBs (all asics).
269 * Returns 0 on success, error on failure.
271 int amdgpu_ib_pool_init(struct amdgpu_device *adev)
275 if (adev->ib_pool_ready) {
278 r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo,
279 AMDGPU_IB_POOL_SIZE*64*1024,
280 AMDGPU_GPU_PAGE_SIZE,
281 AMDGPU_GEM_DOMAIN_GTT);
286 adev->ib_pool_ready = true;
287 if (amdgpu_debugfs_sa_init(adev)) {
288 dev_err(adev->dev, "failed to register debugfs file for SA\n");
294 * amdgpu_ib_pool_fini - Free the IB (Indirect Buffer) pool
296 * @adev: amdgpu_device pointer
298 * Tear down the suballocator managing the pool of memory
299 * for use as IBs (all asics).
301 void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
303 if (adev->ib_pool_ready) {
304 amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo);
305 adev->ib_pool_ready = false;
310 * amdgpu_ib_ring_tests - test IBs on the rings
312 * @adev: amdgpu_device pointer
314 * Test an IB (Indirect Buffer) on each ring.
315 * If the test fails, disable the ring.
316 * Returns 0 on success, error if the primary GFX ring
319 int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
323 long tmo_gfx, tmo_mm;
325 tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT;
326 if (amdgpu_sriov_vf(adev)) {
327 /* for MM engines in hypervisor side they are not scheduled together
328 * with CP and SDMA engines, so even in exclusive mode MM engine could
329 * still running on other VF thus the IB TEST TIMEOUT for MM engines
330 * under SR-IOV should be set to a long time. 8 sec should be enough
331 * for the MM comes back to this VF.
333 tmo_mm = 8 * AMDGPU_IB_TEST_TIMEOUT;
336 if (amdgpu_sriov_runtime(adev)) {
337 /* for CP & SDMA engines since they are scheduled together so
338 * need to make the timeout width enough to cover the time
339 * cost waiting for it coming back under RUNTIME only
341 tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
344 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
345 struct amdgpu_ring *ring = adev->rings[i];
348 if (!ring || !ring->ready)
351 /* MM engine need more time */
352 if (ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
353 ring->funcs->type == AMDGPU_RING_TYPE_VCE ||
354 ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC ||
355 ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC ||
356 ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
357 ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
362 r = amdgpu_ring_test_ib(ring, tmo);
366 if (ring == &adev->gfx.gfx_ring[0]) {
367 /* oh, oh, that's really bad */
368 DRM_ERROR("amdgpu: failed testing IB on GFX ring (%d).\n", r);
369 adev->accel_working = false;
373 /* still not good, but we can live with it */
374 DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r);
385 #if defined(CONFIG_DEBUG_FS)
387 static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
389 struct drm_info_node *node = (struct drm_info_node *) m->private;
390 struct drm_device *dev = node->minor->dev;
391 struct amdgpu_device *adev = dev->dev_private;
393 amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo, m);
399 static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
400 {"amdgpu_sa_info", &amdgpu_debugfs_sa_info, 0, NULL},
405 static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
407 #if defined(CONFIG_DEBUG_FS)
408 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list, 1);