2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
27 #include "amdgpu_vcn.h"
30 #include "soc15_common.h"
32 #include "vcn/vcn_1_0_offset.h"
33 #include "vcn/vcn_1_0_sh_mask.h"
34 #include "hdp/hdp_4_0_offset.h"
35 #include "mmhub/mmhub_9_1_offset.h"
36 #include "mmhub/mmhub_9_1_sh_mask.h"
38 #include "ivsrcid/vcn/irqsrcs_vcn_1_0.h"
40 #define mmUVD_RBC_XX_IB_REG_CHECK 0x05ab
41 #define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1
42 #define mmUVD_REG_XX_MASK 0x05ac
43 #define mmUVD_REG_XX_MASK_BASE_IDX 1
45 static int vcn_v1_0_stop(struct amdgpu_device *adev);
46 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
47 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
48 static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
49 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
50 static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr);
51 static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state);
54 * vcn_v1_0_early_init - set function pointers
56 * @handle: amdgpu_device pointer
58 * Set ring and irq function pointers
60 static int vcn_v1_0_early_init(void *handle)
62 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
64 adev->vcn.num_enc_rings = 2;
66 vcn_v1_0_set_dec_ring_funcs(adev);
67 vcn_v1_0_set_enc_ring_funcs(adev);
68 vcn_v1_0_set_jpeg_ring_funcs(adev);
69 vcn_v1_0_set_irq_funcs(adev);
75 * vcn_v1_0_sw_init - sw init for VCN block
77 * @handle: amdgpu_device pointer
79 * Load firmware and sw initialization
81 static int vcn_v1_0_sw_init(void *handle)
83 struct amdgpu_ring *ring;
85 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
88 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.irq);
93 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
94 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
101 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->vcn.irq);
105 r = amdgpu_vcn_sw_init(adev);
109 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
110 const struct common_firmware_header *hdr;
111 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
112 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
113 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
114 adev->firmware.fw_size +=
115 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
116 DRM_INFO("PSP loading VCN firmware\n");
119 r = amdgpu_vcn_resume(adev);
123 ring = &adev->vcn.ring_dec;
124 sprintf(ring->name, "vcn_dec");
125 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
129 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
130 ring = &adev->vcn.ring_enc[i];
131 sprintf(ring->name, "vcn_enc%d", i);
132 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
137 ring = &adev->vcn.ring_jpeg;
138 sprintf(ring->name, "vcn_jpeg");
139 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
147 * vcn_v1_0_sw_fini - sw fini for VCN block
149 * @handle: amdgpu_device pointer
151 * VCN suspend and free up sw allocation
153 static int vcn_v1_0_sw_fini(void *handle)
156 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
158 r = amdgpu_vcn_suspend(adev);
162 r = amdgpu_vcn_sw_fini(adev);
168 * vcn_v1_0_hw_init - start and test VCN block
170 * @handle: amdgpu_device pointer
172 * Initialize the hardware, boot up the VCPU and do some testing
174 static int vcn_v1_0_hw_init(void *handle)
176 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
177 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
180 r = amdgpu_ring_test_helper(ring);
184 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
185 ring = &adev->vcn.ring_enc[i];
186 ring->sched.ready = true;
187 r = amdgpu_ring_test_helper(ring);
192 ring = &adev->vcn.ring_jpeg;
193 r = amdgpu_ring_test_helper(ring);
199 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
200 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
206 * vcn_v1_0_hw_fini - stop the hardware block
208 * @handle: amdgpu_device pointer
210 * Stop the VCN block, mark ring as not ready any more
212 static int vcn_v1_0_hw_fini(void *handle)
214 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
215 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
217 if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
218 vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
220 ring->sched.ready = false;
226 * vcn_v1_0_suspend - suspend VCN block
228 * @handle: amdgpu_device pointer
230 * HW fini and suspend VCN block
232 static int vcn_v1_0_suspend(void *handle)
235 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
237 r = vcn_v1_0_hw_fini(adev);
241 r = amdgpu_vcn_suspend(adev);
247 * vcn_v1_0_resume - resume VCN block
249 * @handle: amdgpu_device pointer
251 * Resume firmware and hw init VCN block
253 static int vcn_v1_0_resume(void *handle)
256 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
258 r = amdgpu_vcn_resume(adev);
262 r = vcn_v1_0_hw_init(adev);
268 * vcn_v1_0_mc_resume_spg_mode - memory controller programming
270 * @adev: amdgpu_device pointer
272 * Let the VCN memory controller know it's offsets
274 static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
276 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
279 /* cache window 0: fw */
280 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
281 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
282 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
283 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
284 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
285 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
288 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
289 lower_32_bits(adev->vcn.gpu_addr));
290 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
291 upper_32_bits(adev->vcn.gpu_addr));
293 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
294 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
297 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
299 /* cache window 1: stack */
300 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
301 lower_32_bits(adev->vcn.gpu_addr + offset));
302 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
303 upper_32_bits(adev->vcn.gpu_addr + offset));
304 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
305 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
307 /* cache window 2: context */
308 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
309 lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
310 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
311 upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
312 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
313 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
315 WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
316 adev->gfx.config.gb_addr_config);
317 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
318 adev->gfx.config.gb_addr_config);
319 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
320 adev->gfx.config.gb_addr_config);
321 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG,
322 adev->gfx.config.gb_addr_config);
323 WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG,
324 adev->gfx.config.gb_addr_config);
325 WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG,
326 adev->gfx.config.gb_addr_config);
327 WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG,
328 adev->gfx.config.gb_addr_config);
329 WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG,
330 adev->gfx.config.gb_addr_config);
331 WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG,
332 adev->gfx.config.gb_addr_config);
333 WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG,
334 adev->gfx.config.gb_addr_config);
335 WREG32_SOC15(UVD, 0, mmUVD_JPEG_ADDR_CONFIG,
336 adev->gfx.config.gb_addr_config);
337 WREG32_SOC15(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG,
338 adev->gfx.config.gb_addr_config);
341 static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
343 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
346 /* cache window 0: fw */
347 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
348 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
349 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo),
351 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
352 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi),
354 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0,
358 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
359 lower_32_bits(adev->vcn.gpu_addr), 0xFFFFFFFF, 0);
360 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
361 upper_32_bits(adev->vcn.gpu_addr), 0xFFFFFFFF, 0);
363 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
364 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0xFFFFFFFF, 0);
367 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size, 0xFFFFFFFF, 0);
369 /* cache window 1: stack */
370 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
371 lower_32_bits(adev->vcn.gpu_addr + offset), 0xFFFFFFFF, 0);
372 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
373 upper_32_bits(adev->vcn.gpu_addr + offset), 0xFFFFFFFF, 0);
374 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0,
376 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE,
379 /* cache window 2: context */
380 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
381 lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
383 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
384 upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
386 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0, 0xFFFFFFFF, 0);
387 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE,
390 /* VCN global tiling registers */
391 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
392 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
393 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
394 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
395 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
396 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
397 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG,
398 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
399 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG,
400 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
401 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG,
402 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
403 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG,
404 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
405 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG,
406 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
407 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG,
408 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
409 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG,
410 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
414 * vcn_v1_0_disable_clock_gating - disable VCN clock gating
416 * @adev: amdgpu_device pointer
417 * @sw: enable SW clock gating
419 * Disable clock gating for VCN block
421 static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev)
425 /* JPEG disable CGC */
426 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
428 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
429 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
431 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK;
433 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
434 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
435 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
437 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
438 data &= ~(JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
439 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
441 /* UVD disable CGC */
442 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
443 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
444 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
446 data &= ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
448 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
449 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
450 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
452 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE);
453 data &= ~(UVD_CGC_GATE__SYS_MASK
454 | UVD_CGC_GATE__UDEC_MASK
455 | UVD_CGC_GATE__MPEG2_MASK
456 | UVD_CGC_GATE__REGS_MASK
457 | UVD_CGC_GATE__RBC_MASK
458 | UVD_CGC_GATE__LMI_MC_MASK
459 | UVD_CGC_GATE__LMI_UMC_MASK
460 | UVD_CGC_GATE__IDCT_MASK
461 | UVD_CGC_GATE__MPRD_MASK
462 | UVD_CGC_GATE__MPC_MASK
463 | UVD_CGC_GATE__LBSI_MASK
464 | UVD_CGC_GATE__LRBBM_MASK
465 | UVD_CGC_GATE__UDEC_RE_MASK
466 | UVD_CGC_GATE__UDEC_CM_MASK
467 | UVD_CGC_GATE__UDEC_IT_MASK
468 | UVD_CGC_GATE__UDEC_DB_MASK
469 | UVD_CGC_GATE__UDEC_MP_MASK
470 | UVD_CGC_GATE__WCB_MASK
471 | UVD_CGC_GATE__VCPU_MASK
472 | UVD_CGC_GATE__SCPU_MASK);
473 WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data);
475 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
476 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
477 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
478 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
479 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
480 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
481 | UVD_CGC_CTRL__SYS_MODE_MASK
482 | UVD_CGC_CTRL__UDEC_MODE_MASK
483 | UVD_CGC_CTRL__MPEG2_MODE_MASK
484 | UVD_CGC_CTRL__REGS_MODE_MASK
485 | UVD_CGC_CTRL__RBC_MODE_MASK
486 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
487 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
488 | UVD_CGC_CTRL__IDCT_MODE_MASK
489 | UVD_CGC_CTRL__MPRD_MODE_MASK
490 | UVD_CGC_CTRL__MPC_MODE_MASK
491 | UVD_CGC_CTRL__LBSI_MODE_MASK
492 | UVD_CGC_CTRL__LRBBM_MODE_MASK
493 | UVD_CGC_CTRL__WCB_MODE_MASK
494 | UVD_CGC_CTRL__VCPU_MODE_MASK
495 | UVD_CGC_CTRL__SCPU_MODE_MASK);
496 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
499 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE);
500 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
501 | UVD_SUVD_CGC_GATE__SIT_MASK
502 | UVD_SUVD_CGC_GATE__SMP_MASK
503 | UVD_SUVD_CGC_GATE__SCM_MASK
504 | UVD_SUVD_CGC_GATE__SDB_MASK
505 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
506 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
507 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
508 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
509 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
510 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
511 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
512 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
513 | UVD_SUVD_CGC_GATE__SCLR_MASK
514 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
515 | UVD_SUVD_CGC_GATE__ENT_MASK
516 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
517 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
518 | UVD_SUVD_CGC_GATE__SITE_MASK
519 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
520 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
521 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
522 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
523 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
524 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data);
526 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
527 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
528 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
529 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
530 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
531 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
532 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
533 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
534 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
535 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
536 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
537 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
541 * vcn_v1_0_enable_clock_gating - enable VCN clock gating
543 * @adev: amdgpu_device pointer
544 * @sw: enable SW clock gating
546 * Enable clock gating for VCN block
548 static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev)
552 /* enable JPEG CGC */
553 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
554 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
555 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
557 data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
558 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
559 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
560 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
562 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
563 data |= (JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
564 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
567 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
568 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
569 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
571 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
572 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
573 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
574 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
576 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
577 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
578 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
579 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
580 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
581 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
582 | UVD_CGC_CTRL__SYS_MODE_MASK
583 | UVD_CGC_CTRL__UDEC_MODE_MASK
584 | UVD_CGC_CTRL__MPEG2_MODE_MASK
585 | UVD_CGC_CTRL__REGS_MODE_MASK
586 | UVD_CGC_CTRL__RBC_MODE_MASK
587 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
588 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
589 | UVD_CGC_CTRL__IDCT_MODE_MASK
590 | UVD_CGC_CTRL__MPRD_MODE_MASK
591 | UVD_CGC_CTRL__MPC_MODE_MASK
592 | UVD_CGC_CTRL__LBSI_MODE_MASK
593 | UVD_CGC_CTRL__LRBBM_MODE_MASK
594 | UVD_CGC_CTRL__WCB_MODE_MASK
595 | UVD_CGC_CTRL__VCPU_MODE_MASK
596 | UVD_CGC_CTRL__SCPU_MODE_MASK);
597 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
599 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
600 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
601 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
602 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
603 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
604 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
605 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
606 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
607 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
608 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
609 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
610 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
613 static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel)
615 uint32_t reg_data = 0;
617 /* disable JPEG CGC */
618 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
619 reg_data = 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
621 reg_data = 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
622 reg_data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
623 reg_data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
624 WREG32_SOC15_DPG_MODE(UVD, 0, mmJPEG_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
626 WREG32_SOC15_DPG_MODE(UVD, 0, mmJPEG_CGC_GATE, 0, 0xFFFFFFFF, sram_sel);
628 /* enable sw clock gating control */
629 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
630 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
632 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
633 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
634 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
635 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
636 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
637 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
638 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
639 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
640 UVD_CGC_CTRL__SYS_MODE_MASK |
641 UVD_CGC_CTRL__UDEC_MODE_MASK |
642 UVD_CGC_CTRL__MPEG2_MODE_MASK |
643 UVD_CGC_CTRL__REGS_MODE_MASK |
644 UVD_CGC_CTRL__RBC_MODE_MASK |
645 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
646 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
647 UVD_CGC_CTRL__IDCT_MODE_MASK |
648 UVD_CGC_CTRL__MPRD_MODE_MASK |
649 UVD_CGC_CTRL__MPC_MODE_MASK |
650 UVD_CGC_CTRL__LBSI_MODE_MASK |
651 UVD_CGC_CTRL__LRBBM_MODE_MASK |
652 UVD_CGC_CTRL__WCB_MODE_MASK |
653 UVD_CGC_CTRL__VCPU_MODE_MASK |
654 UVD_CGC_CTRL__SCPU_MODE_MASK);
655 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
657 /* turn off clock gating */
658 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, sram_sel);
660 /* turn on SUVD clock gating */
661 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SUVD_CGC_GATE, 1, 0xFFFFFFFF, sram_sel);
663 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
664 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SUVD_CGC_CTRL, 0, 0xFFFFFFFF, sram_sel);
667 static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev)
672 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
673 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
674 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
675 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
676 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
677 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
678 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
679 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
680 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
681 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
682 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
683 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
685 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
686 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON, 0xFFFFFF, ret);
688 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
689 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
690 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
691 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
692 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
693 | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
694 | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
695 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
696 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
697 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
698 | 1 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
699 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
700 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFFFFF, ret);
703 /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS , UVDU_PWR_STATUS are 0 (power on) */
705 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
707 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
708 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | UVD_POWER_STATUS__UVD_PG_EN_MASK;
710 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
713 static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
718 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
719 /* Before power off, this indicator has to be turned on */
720 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
721 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
722 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
723 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
726 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
727 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
728 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
729 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
730 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
731 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
732 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
733 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
734 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
735 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
736 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
738 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
740 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
741 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
742 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
743 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
744 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
745 | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
746 | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
747 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
748 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
749 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
750 | 2 << UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT);
751 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFFFFF, ret);
756 * vcn_v1_0_start - start VCN block
758 * @adev: amdgpu_device pointer
760 * Setup and start the VCN block
762 static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
764 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
765 uint32_t rb_bufsz, tmp;
766 uint32_t lmi_swap_cntl;
769 /* disable byte swapping */
772 vcn_1_0_disable_static_power_gating(adev);
774 tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
775 WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
777 /* disable clock gating */
778 vcn_v1_0_disable_clock_gating(adev);
780 /* disable interupt */
781 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
782 ~UVD_MASTINT_EN__VCPU_EN_MASK);
784 /* initialize VCN memory controller */
785 tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL);
786 WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp |
787 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
788 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
789 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
790 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
793 /* swap (8 in 32) RB and IB */
796 WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
798 tmp = RREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL);
799 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
800 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
801 WREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL, tmp);
803 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0,
804 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
805 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
806 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
807 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
809 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0,
810 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
811 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
812 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
813 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
815 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX,
816 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
817 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
818 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
820 vcn_v1_0_mc_resume_spg_mode(adev);
822 WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK, 0x10);
823 WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK,
824 RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK) | 0x3);
826 /* enable VCPU clock */
827 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
829 /* boot up the VCPU */
830 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
831 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
834 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
835 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
837 tmp = RREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET);
838 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
839 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
840 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, tmp);
842 for (i = 0; i < 10; ++i) {
845 for (j = 0; j < 100; ++j) {
846 status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
847 if (status & UVD_STATUS__IDLE)
852 if (status & UVD_STATUS__IDLE)
855 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
856 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
857 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
858 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
860 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
861 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
867 DRM_ERROR("VCN decode not responding, giving up!!!\n");
870 /* enable master interrupt */
871 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
872 UVD_MASTINT_EN__VCPU_EN_MASK, ~UVD_MASTINT_EN__VCPU_EN_MASK);
874 /* enable system interrupt for JRBC, TODO: move to set interrupt*/
875 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SYS_INT_EN),
876 UVD_SYS_INT_EN__UVD_JRBC_EN_MASK,
877 ~UVD_SYS_INT_EN__UVD_JRBC_EN_MASK);
879 /* clear the busy bit of UVD_STATUS */
880 tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) & ~UVD_STATUS__UVD_BUSY;
881 WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
883 /* force RBC into idle state */
884 rb_bufsz = order_base_2(ring->ring_size);
885 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
886 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
887 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
888 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
889 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
890 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
892 /* set the write pointer delay */
893 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
895 /* set the wb address */
896 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
897 (upper_32_bits(ring->gpu_addr) >> 2));
899 /* programm the RB_BASE for ring buffer */
900 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
901 lower_32_bits(ring->gpu_addr));
902 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
903 upper_32_bits(ring->gpu_addr));
905 /* Initialize the ring buffer's read and write pointers */
906 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
908 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
910 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
911 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
912 lower_32_bits(ring->wptr));
914 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
915 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
917 ring = &adev->vcn.ring_enc[0];
918 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
919 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
920 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
921 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
922 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
924 ring = &adev->vcn.ring_enc[1];
925 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
926 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
927 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
928 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
929 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
931 ring = &adev->vcn.ring_jpeg;
932 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
933 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
934 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
935 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr));
936 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr));
937 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0);
938 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0);
939 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
941 /* initialize wptr */
942 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
944 /* copy patch commands to the jpeg ring */
945 vcn_v1_0_jpeg_ring_set_patch_ring(ring,
946 (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
951 static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
953 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
954 uint32_t rb_bufsz, tmp;
955 uint32_t lmi_swap_cntl;
957 /* disable byte swapping */
960 vcn_1_0_enable_static_power_gating(adev);
962 /* enable dynamic power gating mode */
963 tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
964 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
965 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
966 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp);
968 /* enable clock gating */
969 vcn_v1_0_clock_gating_dpg_mode(adev, 0);
971 /* enable VCPU clock */
972 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
973 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
974 tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
975 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CNTL, tmp, 0xFFFFFFFF, 0);
977 /* disable interupt */
978 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MASTINT_EN,
979 0, UVD_MASTINT_EN__VCPU_EN_MASK, 0);
981 /* initialize VCN memory controller */
982 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL,
983 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
984 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
985 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
986 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
987 UVD_LMI_CTRL__REQ_MODE_MASK |
988 UVD_LMI_CTRL__CRC_RESET_MASK |
989 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
990 0x00100000L, 0xFFFFFFFF, 0);
993 /* swap (8 in 32) RB and IB */
996 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl, 0xFFFFFFFF, 0);
998 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_CNTL,
999 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0xFFFFFFFF, 0);
1001 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXA0,
1002 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1003 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1004 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1005 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0xFFFFFFFF, 0);
1007 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXB0,
1008 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1009 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1010 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1011 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0xFFFFFFFF, 0);
1013 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUX,
1014 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1015 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1016 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0xFFFFFFFF, 0);
1018 vcn_v1_0_mc_resume_dpg_mode(adev);
1020 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_REG_XX_MASK, 0x10, 0xFFFFFFFF, 0);
1021 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK, 0x3, 0xFFFFFFFF, 0);
1023 /* boot up the VCPU */
1024 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SOFT_RESET, 0, 0xFFFFFFFF, 0);
1027 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL2,
1028 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT,
1031 /* enable master interrupt */
1032 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MASTINT_EN,
1033 UVD_MASTINT_EN__VCPU_EN_MASK, UVD_MASTINT_EN__VCPU_EN_MASK, 0);
1035 vcn_v1_0_clock_gating_dpg_mode(adev, 1);
1036 /* setup mmUVD_LMI_CTRL */
1037 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL,
1038 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
1039 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1040 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1041 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
1042 UVD_LMI_CTRL__REQ_MODE_MASK |
1043 UVD_LMI_CTRL__CRC_RESET_MASK |
1044 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1045 0x00100000L, 0xFFFFFFFF, 1);
1047 tmp = adev->gfx.config.gb_addr_config;
1048 /* setup VCN global tiling registers */
1049 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1);
1050 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1);
1052 /* enable System Interrupt for JRBC */
1053 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SYS_INT_EN,
1054 UVD_SYS_INT_EN__UVD_JRBC_EN_MASK, 0xFFFFFFFF, 1);
1056 /* force RBC into idle state */
1057 rb_bufsz = order_base_2(ring->ring_size);
1058 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1059 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1060 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1061 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1062 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1063 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
1065 /* set the write pointer delay */
1066 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
1068 /* set the wb address */
1069 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
1070 (upper_32_bits(ring->gpu_addr) >> 2));
1072 /* programm the RB_BASE for ring buffer */
1073 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1074 lower_32_bits(ring->gpu_addr));
1075 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1076 upper_32_bits(ring->gpu_addr));
1078 /* Initialize the ring buffer's read and write pointers */
1079 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
1081 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
1083 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1084 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1085 lower_32_bits(ring->wptr));
1087 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
1088 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1090 /* initialize wptr */
1091 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
1093 /* copy patch commands to the jpeg ring */
1094 vcn_v1_0_jpeg_ring_set_patch_ring(ring,
1095 (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
1100 static int vcn_v1_0_start(struct amdgpu_device *adev)
1104 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1105 r = vcn_v1_0_start_dpg_mode(adev);
1107 r = vcn_v1_0_start_spg_mode(adev);
1112 * vcn_v1_0_stop - stop VCN block
1114 * @adev: amdgpu_device pointer
1116 * stop the VCN block
1118 static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
1122 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, ret_code);
1124 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1125 UVD_LMI_STATUS__READ_CLEAN_MASK |
1126 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1127 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1128 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp, ret_code);
1130 /* put VCPU into reset */
1131 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1132 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1133 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1135 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
1136 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1137 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp, ret_code);
1139 /* disable VCPU clock */
1140 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0,
1141 ~UVD_VCPU_CNTL__CLK_EN_MASK);
1143 /* reset LMI UMC/LMI */
1144 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1145 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK,
1146 ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
1148 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1149 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK,
1150 ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
1152 WREG32_SOC15(UVD, 0, mmUVD_STATUS, 0);
1154 vcn_v1_0_enable_clock_gating(adev);
1155 vcn_1_0_enable_static_power_gating(adev);
1159 static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev)
1163 /* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */
1164 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1165 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1166 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1169 int tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1170 /* wait for read ptr to be equal to write ptr */
1171 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
1173 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1174 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1175 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1178 /* disable dynamic power gating mode */
1179 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
1180 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1185 static int vcn_v1_0_stop(struct amdgpu_device *adev)
1189 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1190 r = vcn_v1_0_stop_dpg_mode(adev);
1192 r = vcn_v1_0_stop_spg_mode(adev);
1197 static bool vcn_v1_0_is_idle(void *handle)
1199 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1201 return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
1204 static int vcn_v1_0_wait_for_idle(void *handle)
1206 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1209 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
1210 UVD_STATUS__IDLE, ret);
1215 static int vcn_v1_0_set_clockgating_state(void *handle,
1216 enum amd_clockgating_state state)
1218 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1219 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1222 /* wait for STATUS to clear */
1223 if (vcn_v1_0_is_idle(handle))
1225 vcn_v1_0_enable_clock_gating(adev);
1227 /* disable HW gating and enable Sw gating */
1228 vcn_v1_0_disable_clock_gating(adev);
1234 * vcn_v1_0_dec_ring_get_rptr - get read pointer
1236 * @ring: amdgpu_ring pointer
1238 * Returns the current hardware read pointer
1240 static uint64_t vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
1242 struct amdgpu_device *adev = ring->adev;
1244 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1248 * vcn_v1_0_dec_ring_get_wptr - get write pointer
1250 * @ring: amdgpu_ring pointer
1252 * Returns the current hardware write pointer
1254 static uint64_t vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
1256 struct amdgpu_device *adev = ring->adev;
1258 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
1262 * vcn_v1_0_dec_ring_set_wptr - set write pointer
1264 * @ring: amdgpu_ring pointer
1266 * Commits the write pointer to the hardware
1268 static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
1270 struct amdgpu_device *adev = ring->adev;
1272 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1273 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2,
1274 lower_32_bits(ring->wptr) | 0x80000000);
1276 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1280 * vcn_v1_0_dec_ring_insert_start - insert a start command
1282 * @ring: amdgpu_ring pointer
1284 * Write a start command to the ring.
1286 static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring *ring)
1288 struct amdgpu_device *adev = ring->adev;
1290 amdgpu_ring_write(ring,
1291 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1292 amdgpu_ring_write(ring, 0);
1293 amdgpu_ring_write(ring,
1294 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1295 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_START << 1);
1299 * vcn_v1_0_dec_ring_insert_end - insert a end command
1301 * @ring: amdgpu_ring pointer
1303 * Write a end command to the ring.
1305 static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring)
1307 struct amdgpu_device *adev = ring->adev;
1309 amdgpu_ring_write(ring,
1310 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1311 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1);
1315 * vcn_v1_0_dec_ring_emit_fence - emit an fence & trap command
1317 * @ring: amdgpu_ring pointer
1318 * @fence: fence to emit
1320 * Write a fence and a trap command to the ring.
1322 static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1325 struct amdgpu_device *adev = ring->adev;
1327 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1329 amdgpu_ring_write(ring,
1330 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
1331 amdgpu_ring_write(ring, seq);
1332 amdgpu_ring_write(ring,
1333 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1334 amdgpu_ring_write(ring, addr & 0xffffffff);
1335 amdgpu_ring_write(ring,
1336 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1337 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1338 amdgpu_ring_write(ring,
1339 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1340 amdgpu_ring_write(ring, VCN_DEC_CMD_FENCE << 1);
1342 amdgpu_ring_write(ring,
1343 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1344 amdgpu_ring_write(ring, 0);
1345 amdgpu_ring_write(ring,
1346 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1347 amdgpu_ring_write(ring, 0);
1348 amdgpu_ring_write(ring,
1349 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1350 amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP << 1);
1354 * vcn_v1_0_dec_ring_emit_ib - execute indirect buffer
1356 * @ring: amdgpu_ring pointer
1357 * @ib: indirect buffer to execute
1359 * Write ring commands to execute the indirect buffer
1361 static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
1362 struct amdgpu_job *job,
1363 struct amdgpu_ib *ib,
1366 struct amdgpu_device *adev = ring->adev;
1367 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1369 amdgpu_ring_write(ring,
1370 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
1371 amdgpu_ring_write(ring, vmid);
1373 amdgpu_ring_write(ring,
1374 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1375 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1376 amdgpu_ring_write(ring,
1377 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1378 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1379 amdgpu_ring_write(ring,
1380 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0));
1381 amdgpu_ring_write(ring, ib->length_dw);
1384 static void vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring,
1385 uint32_t reg, uint32_t val,
1388 struct amdgpu_device *adev = ring->adev;
1390 amdgpu_ring_write(ring,
1391 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1392 amdgpu_ring_write(ring, reg << 2);
1393 amdgpu_ring_write(ring,
1394 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1395 amdgpu_ring_write(ring, val);
1396 amdgpu_ring_write(ring,
1397 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
1398 amdgpu_ring_write(ring, mask);
1399 amdgpu_ring_write(ring,
1400 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1401 amdgpu_ring_write(ring, VCN_DEC_CMD_REG_READ_COND_WAIT << 1);
1404 static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
1405 unsigned vmid, uint64_t pd_addr)
1407 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1408 uint32_t data0, data1, mask;
1410 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1412 /* wait for register write */
1413 data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
1414 data1 = lower_32_bits(pd_addr);
1416 vcn_v1_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
1419 static void vcn_v1_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
1420 uint32_t reg, uint32_t val)
1422 struct amdgpu_device *adev = ring->adev;
1424 amdgpu_ring_write(ring,
1425 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1426 amdgpu_ring_write(ring, reg << 2);
1427 amdgpu_ring_write(ring,
1428 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1429 amdgpu_ring_write(ring, val);
1430 amdgpu_ring_write(ring,
1431 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1432 amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1);
1436 * vcn_v1_0_enc_ring_get_rptr - get enc read pointer
1438 * @ring: amdgpu_ring pointer
1440 * Returns the current hardware enc read pointer
1442 static uint64_t vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
1444 struct amdgpu_device *adev = ring->adev;
1446 if (ring == &adev->vcn.ring_enc[0])
1447 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
1449 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
1453 * vcn_v1_0_enc_ring_get_wptr - get enc write pointer
1455 * @ring: amdgpu_ring pointer
1457 * Returns the current hardware enc write pointer
1459 static uint64_t vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
1461 struct amdgpu_device *adev = ring->adev;
1463 if (ring == &adev->vcn.ring_enc[0])
1464 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1466 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1470 * vcn_v1_0_enc_ring_set_wptr - set enc write pointer
1472 * @ring: amdgpu_ring pointer
1474 * Commits the enc write pointer to the hardware
1476 static void vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
1478 struct amdgpu_device *adev = ring->adev;
1480 if (ring == &adev->vcn.ring_enc[0])
1481 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR,
1482 lower_32_bits(ring->wptr));
1484 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2,
1485 lower_32_bits(ring->wptr));
1489 * vcn_v1_0_enc_ring_emit_fence - emit an enc fence & trap command
1491 * @ring: amdgpu_ring pointer
1492 * @fence: fence to emit
1494 * Write enc a fence and a trap command to the ring.
1496 static void vcn_v1_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1497 u64 seq, unsigned flags)
1499 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1501 amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE);
1502 amdgpu_ring_write(ring, addr);
1503 amdgpu_ring_write(ring, upper_32_bits(addr));
1504 amdgpu_ring_write(ring, seq);
1505 amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP);
1508 static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1510 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
1514 * vcn_v1_0_enc_ring_emit_ib - enc execute indirect buffer
1516 * @ring: amdgpu_ring pointer
1517 * @ib: indirect buffer to execute
1519 * Write enc ring commands to execute the indirect buffer
1521 static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1522 struct amdgpu_job *job,
1523 struct amdgpu_ib *ib,
1526 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1528 amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
1529 amdgpu_ring_write(ring, vmid);
1530 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1531 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1532 amdgpu_ring_write(ring, ib->length_dw);
1535 static void vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1536 uint32_t reg, uint32_t val,
1539 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
1540 amdgpu_ring_write(ring, reg << 2);
1541 amdgpu_ring_write(ring, mask);
1542 amdgpu_ring_write(ring, val);
1545 static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1546 unsigned int vmid, uint64_t pd_addr)
1548 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1550 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1552 /* wait for reg writes */
1553 vcn_v1_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
1554 lower_32_bits(pd_addr), 0xffffffff);
1557 static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1558 uint32_t reg, uint32_t val)
1560 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
1561 amdgpu_ring_write(ring, reg << 2);
1562 amdgpu_ring_write(ring, val);
1567 * vcn_v1_0_jpeg_ring_get_rptr - get read pointer
1569 * @ring: amdgpu_ring pointer
1571 * Returns the current hardware read pointer
1573 static uint64_t vcn_v1_0_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
1575 struct amdgpu_device *adev = ring->adev;
1577 return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR);
1581 * vcn_v1_0_jpeg_ring_get_wptr - get write pointer
1583 * @ring: amdgpu_ring pointer
1585 * Returns the current hardware write pointer
1587 static uint64_t vcn_v1_0_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
1589 struct amdgpu_device *adev = ring->adev;
1591 return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
1595 * vcn_v1_0_jpeg_ring_set_wptr - set write pointer
1597 * @ring: amdgpu_ring pointer
1599 * Commits the write pointer to the hardware
1601 static void vcn_v1_0_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
1603 struct amdgpu_device *adev = ring->adev;
1605 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
1609 * vcn_v1_0_jpeg_ring_insert_start - insert a start command
1611 * @ring: amdgpu_ring pointer
1613 * Write a start command to the ring.
1615 static void vcn_v1_0_jpeg_ring_insert_start(struct amdgpu_ring *ring)
1617 struct amdgpu_device *adev = ring->adev;
1619 amdgpu_ring_write(ring,
1620 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1621 amdgpu_ring_write(ring, 0x68e04);
1623 amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
1624 amdgpu_ring_write(ring, 0x80010000);
1628 * vcn_v1_0_jpeg_ring_insert_end - insert a end command
1630 * @ring: amdgpu_ring pointer
1632 * Write a end command to the ring.
1634 static void vcn_v1_0_jpeg_ring_insert_end(struct amdgpu_ring *ring)
1636 struct amdgpu_device *adev = ring->adev;
1638 amdgpu_ring_write(ring,
1639 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1640 amdgpu_ring_write(ring, 0x68e04);
1642 amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
1643 amdgpu_ring_write(ring, 0x00010000);
1647 * vcn_v1_0_jpeg_ring_emit_fence - emit an fence & trap command
1649 * @ring: amdgpu_ring pointer
1650 * @fence: fence to emit
1652 * Write a fence and a trap command to the ring.
1654 static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1657 struct amdgpu_device *adev = ring->adev;
1659 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1661 amdgpu_ring_write(ring,
1662 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA0), 0, 0, PACKETJ_TYPE0));
1663 amdgpu_ring_write(ring, seq);
1665 amdgpu_ring_write(ring,
1666 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA1), 0, 0, PACKETJ_TYPE0));
1667 amdgpu_ring_write(ring, seq);
1669 amdgpu_ring_write(ring,
1670 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
1671 amdgpu_ring_write(ring, lower_32_bits(addr));
1673 amdgpu_ring_write(ring,
1674 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
1675 amdgpu_ring_write(ring, upper_32_bits(addr));
1677 amdgpu_ring_write(ring,
1678 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, 0, PACKETJ_TYPE0));
1679 amdgpu_ring_write(ring, 0x8);
1681 amdgpu_ring_write(ring,
1682 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
1683 amdgpu_ring_write(ring, 0);
1685 amdgpu_ring_write(ring,
1686 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
1687 amdgpu_ring_write(ring, 0x01400200);
1689 amdgpu_ring_write(ring,
1690 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
1691 amdgpu_ring_write(ring, seq);
1693 amdgpu_ring_write(ring,
1694 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
1695 amdgpu_ring_write(ring, lower_32_bits(addr));
1697 amdgpu_ring_write(ring,
1698 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
1699 amdgpu_ring_write(ring, upper_32_bits(addr));
1701 amdgpu_ring_write(ring,
1702 PACKETJ(0, 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE2));
1703 amdgpu_ring_write(ring, 0xffffffff);
1705 amdgpu_ring_write(ring,
1706 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1707 amdgpu_ring_write(ring, 0x3fbc);
1709 amdgpu_ring_write(ring,
1710 PACKETJ(0, 0, 0, PACKETJ_TYPE0));
1711 amdgpu_ring_write(ring, 0x1);
1714 amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7));
1715 amdgpu_ring_write(ring, 0);
1719 * vcn_v1_0_jpeg_ring_emit_ib - execute indirect buffer
1721 * @ring: amdgpu_ring pointer
1722 * @ib: indirect buffer to execute
1724 * Write ring commands to execute the indirect buffer.
1726 static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
1727 struct amdgpu_job *job,
1728 struct amdgpu_ib *ib,
1731 struct amdgpu_device *adev = ring->adev;
1732 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1734 amdgpu_ring_write(ring,
1735 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
1736 amdgpu_ring_write(ring, (vmid | (vmid << 4)));
1738 amdgpu_ring_write(ring,
1739 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0));
1740 amdgpu_ring_write(ring, (vmid | (vmid << 4)));
1742 amdgpu_ring_write(ring,
1743 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
1744 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1746 amdgpu_ring_write(ring,
1747 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
1748 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1750 amdgpu_ring_write(ring,
1751 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_IB_SIZE), 0, 0, PACKETJ_TYPE0));
1752 amdgpu_ring_write(ring, ib->length_dw);
1754 amdgpu_ring_write(ring,
1755 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
1756 amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
1758 amdgpu_ring_write(ring,
1759 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
1760 amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
1762 amdgpu_ring_write(ring,
1763 PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
1764 amdgpu_ring_write(ring, 0);
1766 amdgpu_ring_write(ring,
1767 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
1768 amdgpu_ring_write(ring, 0x01400200);
1770 amdgpu_ring_write(ring,
1771 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
1772 amdgpu_ring_write(ring, 0x2);
1774 amdgpu_ring_write(ring,
1775 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_STATUS), 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
1776 amdgpu_ring_write(ring, 0x2);
1779 static void vcn_v1_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring,
1780 uint32_t reg, uint32_t val,
1783 struct amdgpu_device *adev = ring->adev;
1784 uint32_t reg_offset = (reg << 2);
1786 amdgpu_ring_write(ring,
1787 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
1788 amdgpu_ring_write(ring, 0x01400200);
1790 amdgpu_ring_write(ring,
1791 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
1792 amdgpu_ring_write(ring, val);
1794 amdgpu_ring_write(ring,
1795 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1796 if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
1797 ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
1798 amdgpu_ring_write(ring, 0);
1799 amdgpu_ring_write(ring,
1800 PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
1802 amdgpu_ring_write(ring, reg_offset);
1803 amdgpu_ring_write(ring,
1804 PACKETJ(0, 0, 0, PACKETJ_TYPE3));
1806 amdgpu_ring_write(ring, mask);
1809 static void vcn_v1_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring,
1810 unsigned vmid, uint64_t pd_addr)
1812 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1813 uint32_t data0, data1, mask;
1815 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1817 /* wait for register write */
1818 data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
1819 data1 = lower_32_bits(pd_addr);
1821 vcn_v1_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask);
1824 static void vcn_v1_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring,
1825 uint32_t reg, uint32_t val)
1827 struct amdgpu_device *adev = ring->adev;
1828 uint32_t reg_offset = (reg << 2);
1830 amdgpu_ring_write(ring,
1831 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1832 if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
1833 ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
1834 amdgpu_ring_write(ring, 0);
1835 amdgpu_ring_write(ring,
1836 PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
1838 amdgpu_ring_write(ring, reg_offset);
1839 amdgpu_ring_write(ring,
1840 PACKETJ(0, 0, 0, PACKETJ_TYPE0));
1842 amdgpu_ring_write(ring, val);
1845 static void vcn_v1_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count)
1849 WARN_ON(ring->wptr % 2 || count % 2);
1851 for (i = 0; i < count / 2; i++) {
1852 amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
1853 amdgpu_ring_write(ring, 0);
1857 static void vcn_v1_0_jpeg_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val)
1859 struct amdgpu_device *adev = ring->adev;
1860 ring->ring[(*ptr)++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
1861 if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
1862 ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
1863 ring->ring[(*ptr)++] = 0;
1864 ring->ring[(*ptr)++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0);
1866 ring->ring[(*ptr)++] = reg_offset;
1867 ring->ring[(*ptr)++] = PACKETJ(0, 0, 0, PACKETJ_TYPE0);
1869 ring->ring[(*ptr)++] = val;
1872 static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr)
1874 struct amdgpu_device *adev = ring->adev;
1876 uint32_t reg, reg_offset, val, mask, i;
1878 // 1st: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW
1879 reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW);
1880 reg_offset = (reg << 2);
1881 val = lower_32_bits(ring->gpu_addr);
1882 vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
1884 // 2nd: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH
1885 reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH);
1886 reg_offset = (reg << 2);
1887 val = upper_32_bits(ring->gpu_addr);
1888 vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
1890 // 3rd to 5th: issue MEM_READ commands
1891 for (i = 0; i <= 2; i++) {
1892 ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE2);
1893 ring->ring[ptr++] = 0;
1896 // 6th: program mmUVD_JRBC_RB_CNTL register to enable NO_FETCH and RPTR write ability
1897 reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
1898 reg_offset = (reg << 2);
1900 vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
1902 // 7th: program mmUVD_JRBC_RB_REF_DATA
1903 reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA);
1904 reg_offset = (reg << 2);
1906 vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
1908 // 8th: issue conditional register read mmUVD_JRBC_RB_CNTL
1909 reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
1910 reg_offset = (reg << 2);
1914 ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0);
1915 ring->ring[ptr++] = 0x01400200;
1916 ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0);
1917 ring->ring[ptr++] = val;
1918 ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
1919 if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
1920 ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
1921 ring->ring[ptr++] = 0;
1922 ring->ring[ptr++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3);
1924 ring->ring[ptr++] = reg_offset;
1925 ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE3);
1927 ring->ring[ptr++] = mask;
1929 //9th to 21st: insert no-op
1930 for (i = 0; i <= 12; i++) {
1931 ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
1932 ring->ring[ptr++] = 0;
1935 //22nd: reset mmUVD_JRBC_RB_RPTR
1936 reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_RPTR);
1937 reg_offset = (reg << 2);
1939 vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
1941 //23rd: program mmUVD_JRBC_RB_CNTL to disable no_fetch
1942 reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
1943 reg_offset = (reg << 2);
1945 vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
1948 static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
1949 struct amdgpu_irq_src *source,
1951 enum amdgpu_interrupt_state state)
1956 static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
1957 struct amdgpu_irq_src *source,
1958 struct amdgpu_iv_entry *entry)
1960 DRM_DEBUG("IH: VCN TRAP\n");
1962 switch (entry->src_id) {
1964 amdgpu_fence_process(&adev->vcn.ring_dec);
1967 amdgpu_fence_process(&adev->vcn.ring_enc[0]);
1970 amdgpu_fence_process(&adev->vcn.ring_enc[1]);
1973 amdgpu_fence_process(&adev->vcn.ring_jpeg);
1976 DRM_ERROR("Unhandled interrupt: %d %d\n",
1977 entry->src_id, entry->src_data[0]);
1984 static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1986 struct amdgpu_device *adev = ring->adev;
1989 WARN_ON(ring->wptr % 2 || count % 2);
1991 for (i = 0; i < count / 2; i++) {
1992 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
1993 amdgpu_ring_write(ring, 0);
1997 static int vcn_v1_0_set_powergating_state(void *handle,
1998 enum amd_powergating_state state)
2000 /* This doesn't actually powergate the VCN block.
2001 * That's done in the dpm code via the SMC. This
2002 * just re-inits the block as necessary. The actual
2003 * gating still happens in the dpm code. We should
2004 * revisit this when there is a cleaner line between
2005 * the smc and the hw blocks
2008 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2010 if(state == adev->vcn.cur_state)
2013 if (state == AMD_PG_STATE_GATE)
2014 ret = vcn_v1_0_stop(adev);
2016 ret = vcn_v1_0_start(adev);
2019 adev->vcn.cur_state = state;
2023 static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
2025 .early_init = vcn_v1_0_early_init,
2027 .sw_init = vcn_v1_0_sw_init,
2028 .sw_fini = vcn_v1_0_sw_fini,
2029 .hw_init = vcn_v1_0_hw_init,
2030 .hw_fini = vcn_v1_0_hw_fini,
2031 .suspend = vcn_v1_0_suspend,
2032 .resume = vcn_v1_0_resume,
2033 .is_idle = vcn_v1_0_is_idle,
2034 .wait_for_idle = vcn_v1_0_wait_for_idle,
2035 .check_soft_reset = NULL /* vcn_v1_0_check_soft_reset */,
2036 .pre_soft_reset = NULL /* vcn_v1_0_pre_soft_reset */,
2037 .soft_reset = NULL /* vcn_v1_0_soft_reset */,
2038 .post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */,
2039 .set_clockgating_state = vcn_v1_0_set_clockgating_state,
2040 .set_powergating_state = vcn_v1_0_set_powergating_state,
2043 static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
2044 .type = AMDGPU_RING_TYPE_VCN_DEC,
2046 .support_64bit_ptrs = false,
2047 .vmhub = AMDGPU_MMHUB,
2048 .get_rptr = vcn_v1_0_dec_ring_get_rptr,
2049 .get_wptr = vcn_v1_0_dec_ring_get_wptr,
2050 .set_wptr = vcn_v1_0_dec_ring_set_wptr,
2052 6 + 6 + /* hdp invalidate / flush */
2053 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
2054 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
2055 8 + /* vcn_v1_0_dec_ring_emit_vm_flush */
2056 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
2058 .emit_ib_size = 8, /* vcn_v1_0_dec_ring_emit_ib */
2059 .emit_ib = vcn_v1_0_dec_ring_emit_ib,
2060 .emit_fence = vcn_v1_0_dec_ring_emit_fence,
2061 .emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush,
2062 .test_ring = amdgpu_vcn_dec_ring_test_ring,
2063 .test_ib = amdgpu_vcn_dec_ring_test_ib,
2064 .insert_nop = vcn_v1_0_dec_ring_insert_nop,
2065 .insert_start = vcn_v1_0_dec_ring_insert_start,
2066 .insert_end = vcn_v1_0_dec_ring_insert_end,
2067 .pad_ib = amdgpu_ring_generic_pad_ib,
2068 .begin_use = amdgpu_vcn_ring_begin_use,
2069 .end_use = amdgpu_vcn_ring_end_use,
2070 .emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
2071 .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
2072 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2075 static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
2076 .type = AMDGPU_RING_TYPE_VCN_ENC,
2078 .nop = VCN_ENC_CMD_NO_OP,
2079 .support_64bit_ptrs = false,
2080 .vmhub = AMDGPU_MMHUB,
2081 .get_rptr = vcn_v1_0_enc_ring_get_rptr,
2082 .get_wptr = vcn_v1_0_enc_ring_get_wptr,
2083 .set_wptr = vcn_v1_0_enc_ring_set_wptr,
2085 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2086 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
2087 4 + /* vcn_v1_0_enc_ring_emit_vm_flush */
2088 5 + 5 + /* vcn_v1_0_enc_ring_emit_fence x2 vm fence */
2089 1, /* vcn_v1_0_enc_ring_insert_end */
2090 .emit_ib_size = 5, /* vcn_v1_0_enc_ring_emit_ib */
2091 .emit_ib = vcn_v1_0_enc_ring_emit_ib,
2092 .emit_fence = vcn_v1_0_enc_ring_emit_fence,
2093 .emit_vm_flush = vcn_v1_0_enc_ring_emit_vm_flush,
2094 .test_ring = amdgpu_vcn_enc_ring_test_ring,
2095 .test_ib = amdgpu_vcn_enc_ring_test_ib,
2096 .insert_nop = amdgpu_ring_insert_nop,
2097 .insert_end = vcn_v1_0_enc_ring_insert_end,
2098 .pad_ib = amdgpu_ring_generic_pad_ib,
2099 .begin_use = amdgpu_vcn_ring_begin_use,
2100 .end_use = amdgpu_vcn_ring_end_use,
2101 .emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
2102 .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
2103 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2106 static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs = {
2107 .type = AMDGPU_RING_TYPE_VCN_JPEG,
2109 .nop = PACKET0(0x81ff, 0),
2110 .support_64bit_ptrs = false,
2111 .vmhub = AMDGPU_MMHUB,
2113 .get_rptr = vcn_v1_0_jpeg_ring_get_rptr,
2114 .get_wptr = vcn_v1_0_jpeg_ring_get_wptr,
2115 .set_wptr = vcn_v1_0_jpeg_ring_set_wptr,
2117 6 + 6 + /* hdp invalidate / flush */
2118 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
2119 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
2120 8 + /* vcn_v1_0_jpeg_ring_emit_vm_flush */
2121 26 + 26 + /* vcn_v1_0_jpeg_ring_emit_fence x2 vm fence */
2123 .emit_ib_size = 22, /* vcn_v1_0_jpeg_ring_emit_ib */
2124 .emit_ib = vcn_v1_0_jpeg_ring_emit_ib,
2125 .emit_fence = vcn_v1_0_jpeg_ring_emit_fence,
2126 .emit_vm_flush = vcn_v1_0_jpeg_ring_emit_vm_flush,
2127 .test_ring = amdgpu_vcn_jpeg_ring_test_ring,
2128 .test_ib = amdgpu_vcn_jpeg_ring_test_ib,
2129 .insert_nop = vcn_v1_0_jpeg_ring_nop,
2130 .insert_start = vcn_v1_0_jpeg_ring_insert_start,
2131 .insert_end = vcn_v1_0_jpeg_ring_insert_end,
2132 .pad_ib = amdgpu_ring_generic_pad_ib,
2133 .begin_use = amdgpu_vcn_ring_begin_use,
2134 .end_use = amdgpu_vcn_ring_end_use,
2135 .emit_wreg = vcn_v1_0_jpeg_ring_emit_wreg,
2136 .emit_reg_wait = vcn_v1_0_jpeg_ring_emit_reg_wait,
2137 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2140 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
2142 adev->vcn.ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
2143 DRM_INFO("VCN decode is enabled in VM mode\n");
2146 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
2150 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
2151 adev->vcn.ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs;
2153 DRM_INFO("VCN encode is enabled in VM mode\n");
2156 static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev)
2158 adev->vcn.ring_jpeg.funcs = &vcn_v1_0_jpeg_ring_vm_funcs;
2159 DRM_INFO("VCN jpeg decode is enabled in VM mode\n");
2162 static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
2163 .set = vcn_v1_0_set_interrupt_state,
2164 .process = vcn_v1_0_process_interrupt,
2167 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
2169 adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 2;
2170 adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs;
2173 const struct amdgpu_ip_block_version vcn_v1_0_ip_block =
2175 .type = AMD_IP_BLOCK_TYPE_VCN,
2179 .funcs = &vcn_v1_0_ip_funcs,