2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
27 #include "amdgpu_vcn.h"
30 #include "soc15_common.h"
32 #include "vcn/vcn_1_0_offset.h"
33 #include "vcn/vcn_1_0_sh_mask.h"
34 #include "hdp/hdp_4_0_offset.h"
35 #include "mmhub/mmhub_9_1_offset.h"
36 #include "mmhub/mmhub_9_1_sh_mask.h"
38 #include "ivsrcid/vcn/irqsrcs_vcn_1_0.h"
40 #define mmUVD_RBC_XX_IB_REG_CHECK 0x05ab
41 #define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1
42 #define mmUVD_REG_XX_MASK 0x05ac
43 #define mmUVD_REG_XX_MASK_BASE_IDX 1
45 static int vcn_v1_0_stop(struct amdgpu_device *adev);
46 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
47 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
48 static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
49 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
50 static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr);
51 static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state);
52 static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
53 struct dpg_pause_state *new_state);
56 * vcn_v1_0_early_init - set function pointers
58 * @handle: amdgpu_device pointer
60 * Set ring and irq function pointers
62 static int vcn_v1_0_early_init(void *handle)
64 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
66 adev->vcn.num_vcn_inst = 1;
67 adev->vcn.num_enc_rings = 2;
69 vcn_v1_0_set_dec_ring_funcs(adev);
70 vcn_v1_0_set_enc_ring_funcs(adev);
71 vcn_v1_0_set_jpeg_ring_funcs(adev);
72 vcn_v1_0_set_irq_funcs(adev);
78 * vcn_v1_0_sw_init - sw init for VCN block
80 * @handle: amdgpu_device pointer
82 * Load firmware and sw initialization
84 static int vcn_v1_0_sw_init(void *handle)
86 struct amdgpu_ring *ring;
88 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
91 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
92 VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst->irq);
97 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
98 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
99 &adev->vcn.inst->irq);
105 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->vcn.inst->irq);
109 r = amdgpu_vcn_sw_init(adev);
113 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
114 const struct common_firmware_header *hdr;
115 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
116 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
117 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
118 adev->firmware.fw_size +=
119 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
120 DRM_INFO("PSP loading VCN firmware\n");
123 r = amdgpu_vcn_resume(adev);
127 ring = &adev->vcn.inst->ring_dec;
128 sprintf(ring->name, "vcn_dec");
129 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
133 adev->vcn.internal.scratch9 = adev->vcn.inst->external.scratch9 =
134 SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
135 adev->vcn.internal.data0 = adev->vcn.inst->external.data0 =
136 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
137 adev->vcn.internal.data1 = adev->vcn.inst->external.data1 =
138 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
139 adev->vcn.internal.cmd = adev->vcn.inst->external.cmd =
140 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
141 adev->vcn.internal.nop = adev->vcn.inst->external.nop =
142 SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
144 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
145 ring = &adev->vcn.inst->ring_enc[i];
146 sprintf(ring->name, "vcn_enc%d", i);
147 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
152 ring = &adev->vcn.inst->ring_jpeg;
153 sprintf(ring->name, "vcn_jpeg");
154 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
158 adev->vcn.pause_dpg_mode = vcn_v1_0_pause_dpg_mode;
159 adev->vcn.internal.jpeg_pitch = adev->vcn.inst->external.jpeg_pitch =
160 SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH);
166 * vcn_v1_0_sw_fini - sw fini for VCN block
168 * @handle: amdgpu_device pointer
170 * VCN suspend and free up sw allocation
172 static int vcn_v1_0_sw_fini(void *handle)
175 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
177 r = amdgpu_vcn_suspend(adev);
181 r = amdgpu_vcn_sw_fini(adev);
187 * vcn_v1_0_hw_init - start and test VCN block
189 * @handle: amdgpu_device pointer
191 * Initialize the hardware, boot up the VCPU and do some testing
193 static int vcn_v1_0_hw_init(void *handle)
195 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
196 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
199 r = amdgpu_ring_test_helper(ring);
203 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
204 ring = &adev->vcn.inst->ring_enc[i];
205 r = amdgpu_ring_test_helper(ring);
210 ring = &adev->vcn.inst->ring_jpeg;
211 r = amdgpu_ring_test_helper(ring);
217 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
218 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
224 * vcn_v1_0_hw_fini - stop the hardware block
226 * @handle: amdgpu_device pointer
228 * Stop the VCN block, mark ring as not ready any more
230 static int vcn_v1_0_hw_fini(void *handle)
232 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
233 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
235 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
236 RREG32_SOC15(VCN, 0, mmUVD_STATUS))
237 vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
239 ring->sched.ready = false;
245 * vcn_v1_0_suspend - suspend VCN block
247 * @handle: amdgpu_device pointer
249 * HW fini and suspend VCN block
251 static int vcn_v1_0_suspend(void *handle)
254 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
256 r = vcn_v1_0_hw_fini(adev);
260 r = amdgpu_vcn_suspend(adev);
266 * vcn_v1_0_resume - resume VCN block
268 * @handle: amdgpu_device pointer
270 * Resume firmware and hw init VCN block
272 static int vcn_v1_0_resume(void *handle)
275 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
277 r = amdgpu_vcn_resume(adev);
281 r = vcn_v1_0_hw_init(adev);
287 * vcn_v1_0_mc_resume_spg_mode - memory controller programming
289 * @adev: amdgpu_device pointer
291 * Let the VCN memory controller know it's offsets
293 static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
295 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
298 /* cache window 0: fw */
299 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
300 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
301 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
302 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
303 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
304 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
307 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
308 lower_32_bits(adev->vcn.inst->gpu_addr));
309 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
310 upper_32_bits(adev->vcn.inst->gpu_addr));
312 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
313 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
316 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
318 /* cache window 1: stack */
319 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
320 lower_32_bits(adev->vcn.inst->gpu_addr + offset));
321 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
322 upper_32_bits(adev->vcn.inst->gpu_addr + offset));
323 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
324 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
326 /* cache window 2: context */
327 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
328 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
329 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
330 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
331 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
332 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
334 WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
335 adev->gfx.config.gb_addr_config);
336 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
337 adev->gfx.config.gb_addr_config);
338 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
339 adev->gfx.config.gb_addr_config);
340 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG,
341 adev->gfx.config.gb_addr_config);
342 WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG,
343 adev->gfx.config.gb_addr_config);
344 WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG,
345 adev->gfx.config.gb_addr_config);
346 WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG,
347 adev->gfx.config.gb_addr_config);
348 WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG,
349 adev->gfx.config.gb_addr_config);
350 WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG,
351 adev->gfx.config.gb_addr_config);
352 WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG,
353 adev->gfx.config.gb_addr_config);
354 WREG32_SOC15(UVD, 0, mmUVD_JPEG_ADDR_CONFIG,
355 adev->gfx.config.gb_addr_config);
356 WREG32_SOC15(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG,
357 adev->gfx.config.gb_addr_config);
360 static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
362 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
365 /* cache window 0: fw */
366 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
367 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
368 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo),
370 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
371 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi),
373 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0,
377 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
378 lower_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0);
379 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
380 upper_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0);
382 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
383 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0xFFFFFFFF, 0);
386 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size, 0xFFFFFFFF, 0);
388 /* cache window 1: stack */
389 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
390 lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0);
391 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
392 upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0);
393 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0,
395 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE,
398 /* cache window 2: context */
399 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
400 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
402 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
403 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
405 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0, 0xFFFFFFFF, 0);
406 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE,
409 /* VCN global tiling registers */
410 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
411 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
412 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
413 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
414 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
415 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
416 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG,
417 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
418 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG,
419 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
420 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG,
421 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
422 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG,
423 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
424 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG,
425 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
426 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG,
427 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
428 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG,
429 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
433 * vcn_v1_0_disable_clock_gating - disable VCN clock gating
435 * @adev: amdgpu_device pointer
436 * @sw: enable SW clock gating
438 * Disable clock gating for VCN block
440 static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev)
444 /* JPEG disable CGC */
445 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
447 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
448 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
450 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK;
452 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
453 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
454 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
456 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
457 data &= ~(JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
458 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
460 /* UVD disable CGC */
461 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
462 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
463 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
465 data &= ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
467 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
468 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
469 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
471 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE);
472 data &= ~(UVD_CGC_GATE__SYS_MASK
473 | UVD_CGC_GATE__UDEC_MASK
474 | UVD_CGC_GATE__MPEG2_MASK
475 | UVD_CGC_GATE__REGS_MASK
476 | UVD_CGC_GATE__RBC_MASK
477 | UVD_CGC_GATE__LMI_MC_MASK
478 | UVD_CGC_GATE__LMI_UMC_MASK
479 | UVD_CGC_GATE__IDCT_MASK
480 | UVD_CGC_GATE__MPRD_MASK
481 | UVD_CGC_GATE__MPC_MASK
482 | UVD_CGC_GATE__LBSI_MASK
483 | UVD_CGC_GATE__LRBBM_MASK
484 | UVD_CGC_GATE__UDEC_RE_MASK
485 | UVD_CGC_GATE__UDEC_CM_MASK
486 | UVD_CGC_GATE__UDEC_IT_MASK
487 | UVD_CGC_GATE__UDEC_DB_MASK
488 | UVD_CGC_GATE__UDEC_MP_MASK
489 | UVD_CGC_GATE__WCB_MASK
490 | UVD_CGC_GATE__VCPU_MASK
491 | UVD_CGC_GATE__SCPU_MASK);
492 WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data);
494 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
495 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
496 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
497 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
498 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
499 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
500 | UVD_CGC_CTRL__SYS_MODE_MASK
501 | UVD_CGC_CTRL__UDEC_MODE_MASK
502 | UVD_CGC_CTRL__MPEG2_MODE_MASK
503 | UVD_CGC_CTRL__REGS_MODE_MASK
504 | UVD_CGC_CTRL__RBC_MODE_MASK
505 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
506 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
507 | UVD_CGC_CTRL__IDCT_MODE_MASK
508 | UVD_CGC_CTRL__MPRD_MODE_MASK
509 | UVD_CGC_CTRL__MPC_MODE_MASK
510 | UVD_CGC_CTRL__LBSI_MODE_MASK
511 | UVD_CGC_CTRL__LRBBM_MODE_MASK
512 | UVD_CGC_CTRL__WCB_MODE_MASK
513 | UVD_CGC_CTRL__VCPU_MODE_MASK
514 | UVD_CGC_CTRL__SCPU_MODE_MASK);
515 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
518 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE);
519 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
520 | UVD_SUVD_CGC_GATE__SIT_MASK
521 | UVD_SUVD_CGC_GATE__SMP_MASK
522 | UVD_SUVD_CGC_GATE__SCM_MASK
523 | UVD_SUVD_CGC_GATE__SDB_MASK
524 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
525 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
526 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
527 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
528 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
529 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
530 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
531 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
532 | UVD_SUVD_CGC_GATE__SCLR_MASK
533 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
534 | UVD_SUVD_CGC_GATE__ENT_MASK
535 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
536 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
537 | UVD_SUVD_CGC_GATE__SITE_MASK
538 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
539 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
540 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
541 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
542 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
543 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data);
545 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
546 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
547 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
548 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
549 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
550 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
551 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
552 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
553 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
554 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
555 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
556 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
560 * vcn_v1_0_enable_clock_gating - enable VCN clock gating
562 * @adev: amdgpu_device pointer
563 * @sw: enable SW clock gating
565 * Enable clock gating for VCN block
567 static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev)
571 /* enable JPEG CGC */
572 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
573 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
574 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
576 data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
577 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
578 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
579 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
581 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
582 data |= (JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
583 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
586 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
587 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
588 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
590 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
591 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
592 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
593 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
595 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
596 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
597 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
598 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
599 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
600 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
601 | UVD_CGC_CTRL__SYS_MODE_MASK
602 | UVD_CGC_CTRL__UDEC_MODE_MASK
603 | UVD_CGC_CTRL__MPEG2_MODE_MASK
604 | UVD_CGC_CTRL__REGS_MODE_MASK
605 | UVD_CGC_CTRL__RBC_MODE_MASK
606 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
607 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
608 | UVD_CGC_CTRL__IDCT_MODE_MASK
609 | UVD_CGC_CTRL__MPRD_MODE_MASK
610 | UVD_CGC_CTRL__MPC_MODE_MASK
611 | UVD_CGC_CTRL__LBSI_MODE_MASK
612 | UVD_CGC_CTRL__LRBBM_MODE_MASK
613 | UVD_CGC_CTRL__WCB_MODE_MASK
614 | UVD_CGC_CTRL__VCPU_MODE_MASK
615 | UVD_CGC_CTRL__SCPU_MODE_MASK);
616 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
618 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
619 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
620 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
621 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
622 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
623 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
624 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
625 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
626 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
627 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
628 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
629 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
632 static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel)
634 uint32_t reg_data = 0;
636 /* disable JPEG CGC */
637 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
638 reg_data = 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
640 reg_data = 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
641 reg_data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
642 reg_data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
643 WREG32_SOC15_DPG_MODE(UVD, 0, mmJPEG_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
645 WREG32_SOC15_DPG_MODE(UVD, 0, mmJPEG_CGC_GATE, 0, 0xFFFFFFFF, sram_sel);
647 /* enable sw clock gating control */
648 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
649 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
651 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
652 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
653 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
654 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
655 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
656 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
657 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
658 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
659 UVD_CGC_CTRL__SYS_MODE_MASK |
660 UVD_CGC_CTRL__UDEC_MODE_MASK |
661 UVD_CGC_CTRL__MPEG2_MODE_MASK |
662 UVD_CGC_CTRL__REGS_MODE_MASK |
663 UVD_CGC_CTRL__RBC_MODE_MASK |
664 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
665 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
666 UVD_CGC_CTRL__IDCT_MODE_MASK |
667 UVD_CGC_CTRL__MPRD_MODE_MASK |
668 UVD_CGC_CTRL__MPC_MODE_MASK |
669 UVD_CGC_CTRL__LBSI_MODE_MASK |
670 UVD_CGC_CTRL__LRBBM_MODE_MASK |
671 UVD_CGC_CTRL__WCB_MODE_MASK |
672 UVD_CGC_CTRL__VCPU_MODE_MASK |
673 UVD_CGC_CTRL__SCPU_MODE_MASK);
674 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
676 /* turn off clock gating */
677 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, sram_sel);
679 /* turn on SUVD clock gating */
680 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SUVD_CGC_GATE, 1, 0xFFFFFFFF, sram_sel);
682 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
683 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SUVD_CGC_CTRL, 0, 0xFFFFFFFF, sram_sel);
686 static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev)
691 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
692 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
693 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
694 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
695 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
696 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
697 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
698 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
699 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
700 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
701 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
702 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
704 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
705 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON, 0xFFFFFF, ret);
707 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
708 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
709 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
710 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
711 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
712 | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
713 | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
714 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
715 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
716 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
717 | 1 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
718 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
719 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFFFFF, ret);
722 /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS , UVDU_PWR_STATUS are 0 (power on) */
724 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
726 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
727 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | UVD_POWER_STATUS__UVD_PG_EN_MASK;
729 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
732 static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
737 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
738 /* Before power off, this indicator has to be turned on */
739 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
740 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
741 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
742 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
745 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
746 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
747 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
748 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
749 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
750 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
751 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
752 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
753 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
754 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
755 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
757 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
759 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
760 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
761 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
762 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
763 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
764 | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
765 | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
766 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
767 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
768 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
769 | 2 << UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT);
770 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFFFFF, ret);
775 * vcn_v1_0_start - start VCN block
777 * @adev: amdgpu_device pointer
779 * Setup and start the VCN block
781 static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
783 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
784 uint32_t rb_bufsz, tmp;
785 uint32_t lmi_swap_cntl;
788 /* disable byte swapping */
791 vcn_1_0_disable_static_power_gating(adev);
793 tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
794 WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
796 /* disable clock gating */
797 vcn_v1_0_disable_clock_gating(adev);
799 /* disable interupt */
800 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
801 ~UVD_MASTINT_EN__VCPU_EN_MASK);
803 /* initialize VCN memory controller */
804 tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL);
805 WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp |
806 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
807 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
808 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
809 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
812 /* swap (8 in 32) RB and IB */
815 WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
817 tmp = RREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL);
818 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
819 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
820 WREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL, tmp);
822 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0,
823 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
824 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
825 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
826 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
828 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0,
829 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
830 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
831 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
832 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
834 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX,
835 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
836 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
837 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
839 vcn_v1_0_mc_resume_spg_mode(adev);
841 WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK, 0x10);
842 WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK,
843 RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK) | 0x3);
845 /* enable VCPU clock */
846 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
848 /* boot up the VCPU */
849 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
850 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
853 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
854 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
856 tmp = RREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET);
857 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
858 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
859 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, tmp);
861 for (i = 0; i < 10; ++i) {
864 for (j = 0; j < 100; ++j) {
865 status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
866 if (status & UVD_STATUS__IDLE)
871 if (status & UVD_STATUS__IDLE)
874 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
875 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
876 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
877 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
879 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
880 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
886 DRM_ERROR("VCN decode not responding, giving up!!!\n");
889 /* enable master interrupt */
890 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
891 UVD_MASTINT_EN__VCPU_EN_MASK, ~UVD_MASTINT_EN__VCPU_EN_MASK);
893 /* enable system interrupt for JRBC, TODO: move to set interrupt*/
894 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SYS_INT_EN),
895 UVD_SYS_INT_EN__UVD_JRBC_EN_MASK,
896 ~UVD_SYS_INT_EN__UVD_JRBC_EN_MASK);
898 /* clear the busy bit of UVD_STATUS */
899 tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) & ~UVD_STATUS__UVD_BUSY;
900 WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
902 /* force RBC into idle state */
903 rb_bufsz = order_base_2(ring->ring_size);
904 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
905 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
906 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
907 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
908 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
909 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
911 /* set the write pointer delay */
912 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
914 /* set the wb address */
915 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
916 (upper_32_bits(ring->gpu_addr) >> 2));
918 /* programm the RB_BASE for ring buffer */
919 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
920 lower_32_bits(ring->gpu_addr));
921 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
922 upper_32_bits(ring->gpu_addr));
924 /* Initialize the ring buffer's read and write pointers */
925 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
927 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
929 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
930 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
931 lower_32_bits(ring->wptr));
933 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
934 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
936 ring = &adev->vcn.inst->ring_enc[0];
937 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
938 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
939 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
940 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
941 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
943 ring = &adev->vcn.inst->ring_enc[1];
944 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
945 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
946 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
947 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
948 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
950 ring = &adev->vcn.inst->ring_jpeg;
951 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
952 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
953 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
954 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr));
955 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr));
956 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0);
957 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0);
958 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
960 /* initialize wptr */
961 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
963 /* copy patch commands to the jpeg ring */
964 vcn_v1_0_jpeg_ring_set_patch_ring(ring,
965 (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
970 static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
972 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
973 uint32_t rb_bufsz, tmp;
974 uint32_t lmi_swap_cntl;
976 /* disable byte swapping */
979 vcn_1_0_enable_static_power_gating(adev);
981 /* enable dynamic power gating mode */
982 tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
983 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
984 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
985 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp);
987 /* enable clock gating */
988 vcn_v1_0_clock_gating_dpg_mode(adev, 0);
990 /* enable VCPU clock */
991 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
992 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
993 tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
994 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CNTL, tmp, 0xFFFFFFFF, 0);
996 /* disable interupt */
997 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MASTINT_EN,
998 0, UVD_MASTINT_EN__VCPU_EN_MASK, 0);
1000 /* initialize VCN memory controller */
1001 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL,
1002 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
1003 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1004 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1005 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
1006 UVD_LMI_CTRL__REQ_MODE_MASK |
1007 UVD_LMI_CTRL__CRC_RESET_MASK |
1008 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1009 0x00100000L, 0xFFFFFFFF, 0);
1012 /* swap (8 in 32) RB and IB */
1013 lmi_swap_cntl = 0xa;
1015 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl, 0xFFFFFFFF, 0);
1017 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_CNTL,
1018 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0xFFFFFFFF, 0);
1020 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXA0,
1021 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1022 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1023 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1024 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0xFFFFFFFF, 0);
1026 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXB0,
1027 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1028 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1029 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1030 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0xFFFFFFFF, 0);
1032 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUX,
1033 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1034 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1035 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0xFFFFFFFF, 0);
1037 vcn_v1_0_mc_resume_dpg_mode(adev);
1039 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_REG_XX_MASK, 0x10, 0xFFFFFFFF, 0);
1040 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK, 0x3, 0xFFFFFFFF, 0);
1042 /* boot up the VCPU */
1043 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SOFT_RESET, 0, 0xFFFFFFFF, 0);
1046 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL2,
1047 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT,
1050 /* enable master interrupt */
1051 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MASTINT_EN,
1052 UVD_MASTINT_EN__VCPU_EN_MASK, UVD_MASTINT_EN__VCPU_EN_MASK, 0);
1054 vcn_v1_0_clock_gating_dpg_mode(adev, 1);
1055 /* setup mmUVD_LMI_CTRL */
1056 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL,
1057 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
1058 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1059 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1060 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
1061 UVD_LMI_CTRL__REQ_MODE_MASK |
1062 UVD_LMI_CTRL__CRC_RESET_MASK |
1063 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1064 0x00100000L, 0xFFFFFFFF, 1);
1066 tmp = adev->gfx.config.gb_addr_config;
1067 /* setup VCN global tiling registers */
1068 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1);
1069 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1);
1071 /* enable System Interrupt for JRBC */
1072 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SYS_INT_EN,
1073 UVD_SYS_INT_EN__UVD_JRBC_EN_MASK, 0xFFFFFFFF, 1);
1075 /* force RBC into idle state */
1076 rb_bufsz = order_base_2(ring->ring_size);
1077 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1078 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1079 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1080 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1081 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1082 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
1084 /* set the write pointer delay */
1085 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
1087 /* set the wb address */
1088 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
1089 (upper_32_bits(ring->gpu_addr) >> 2));
1091 /* programm the RB_BASE for ring buffer */
1092 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1093 lower_32_bits(ring->gpu_addr));
1094 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1095 upper_32_bits(ring->gpu_addr));
1097 /* Initialize the ring buffer's read and write pointers */
1098 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
1100 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
1102 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1103 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1104 lower_32_bits(ring->wptr));
1106 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
1107 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1109 /* initialize JPEG wptr */
1110 ring = &adev->vcn.inst->ring_jpeg;
1111 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
1113 /* copy patch commands to the jpeg ring */
1114 vcn_v1_0_jpeg_ring_set_patch_ring(ring,
1115 (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
1120 static int vcn_v1_0_start(struct amdgpu_device *adev)
1124 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1125 r = vcn_v1_0_start_dpg_mode(adev);
1127 r = vcn_v1_0_start_spg_mode(adev);
1132 * vcn_v1_0_stop - stop VCN block
1134 * @adev: amdgpu_device pointer
1136 * stop the VCN block
1138 static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
1142 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, ret_code);
1144 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1145 UVD_LMI_STATUS__READ_CLEAN_MASK |
1146 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1147 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1148 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp, ret_code);
1150 /* put VCPU into reset */
1151 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1152 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1153 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1155 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
1156 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1157 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp, ret_code);
1159 /* disable VCPU clock */
1160 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0,
1161 ~UVD_VCPU_CNTL__CLK_EN_MASK);
1163 /* reset LMI UMC/LMI */
1164 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1165 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK,
1166 ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
1168 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1169 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK,
1170 ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
1172 WREG32_SOC15(UVD, 0, mmUVD_STATUS, 0);
1174 vcn_v1_0_enable_clock_gating(adev);
1175 vcn_1_0_enable_static_power_gating(adev);
1179 static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev)
1184 /* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */
1185 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1186 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1187 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1189 /* wait for read ptr to be equal to write ptr */
1190 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1191 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
1193 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1194 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
1196 tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
1197 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
1199 tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1200 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
1202 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1203 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1204 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1206 /* disable dynamic power gating mode */
1207 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
1208 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1213 static int vcn_v1_0_stop(struct amdgpu_device *adev)
1217 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1218 r = vcn_v1_0_stop_dpg_mode(adev);
1220 r = vcn_v1_0_stop_spg_mode(adev);
1225 static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
1226 struct dpg_pause_state *new_state)
1229 uint32_t reg_data = 0;
1230 uint32_t reg_data2 = 0;
1231 struct amdgpu_ring *ring;
1233 /* pause/unpause if state is changed */
1234 if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
1235 DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
1236 adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
1237 new_state->fw_based, new_state->jpeg);
1239 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
1240 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1242 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1245 if (!(reg_data & UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK))
1246 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1247 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1248 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1251 /* pause DPG non-jpeg */
1252 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1253 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1254 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
1255 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1256 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
1259 ring = &adev->vcn.inst->ring_enc[0];
1260 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1261 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1262 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1263 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1264 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1266 ring = &adev->vcn.inst->ring_enc[1];
1267 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1268 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1269 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
1270 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1271 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1273 ring = &adev->vcn.inst->ring_dec;
1274 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1275 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1276 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1277 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
1278 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1281 /* unpause dpg non-jpeg, no need to wait */
1282 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1283 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1285 adev->vcn.pause_state.fw_based = new_state->fw_based;
1288 /* pause/unpause if state is changed */
1289 if (adev->vcn.pause_state.jpeg != new_state->jpeg) {
1290 DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
1291 adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
1292 new_state->fw_based, new_state->jpeg);
1294 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
1295 (~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK);
1297 if (new_state->jpeg == VCN_DPG_STATE__PAUSE) {
1300 if (!(reg_data & UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK))
1301 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1302 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1303 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1306 /* Make sure JPRG Snoop is disabled before sending the pause */
1307 reg_data2 = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
1308 reg_data2 |= UVD_POWER_STATUS__JRBC_SNOOP_DIS_MASK;
1309 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, reg_data2);
1311 /* pause DPG jpeg */
1312 reg_data |= UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
1313 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1314 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
1315 UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK,
1316 UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, ret_code);
1319 ring = &adev->vcn.inst->ring_jpeg;
1320 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
1321 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
1322 UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
1323 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
1324 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
1325 lower_32_bits(ring->gpu_addr));
1326 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
1327 upper_32_bits(ring->gpu_addr));
1328 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, ring->wptr);
1329 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, ring->wptr);
1330 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
1331 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
1333 ring = &adev->vcn.inst->ring_dec;
1334 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1335 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1336 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1337 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
1338 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1341 /* unpause dpg jpeg, no need to wait */
1342 reg_data &= ~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
1343 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1345 adev->vcn.pause_state.jpeg = new_state->jpeg;
1351 static bool vcn_v1_0_is_idle(void *handle)
1353 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1355 return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
1358 static int vcn_v1_0_wait_for_idle(void *handle)
1360 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1363 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
1364 UVD_STATUS__IDLE, ret);
1369 static int vcn_v1_0_set_clockgating_state(void *handle,
1370 enum amd_clockgating_state state)
1372 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1373 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1376 /* wait for STATUS to clear */
1377 if (vcn_v1_0_is_idle(handle))
1379 vcn_v1_0_enable_clock_gating(adev);
1381 /* disable HW gating and enable Sw gating */
1382 vcn_v1_0_disable_clock_gating(adev);
1388 * vcn_v1_0_dec_ring_get_rptr - get read pointer
1390 * @ring: amdgpu_ring pointer
1392 * Returns the current hardware read pointer
1394 static uint64_t vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
1396 struct amdgpu_device *adev = ring->adev;
1398 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1402 * vcn_v1_0_dec_ring_get_wptr - get write pointer
1404 * @ring: amdgpu_ring pointer
1406 * Returns the current hardware write pointer
1408 static uint64_t vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
1410 struct amdgpu_device *adev = ring->adev;
1412 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
1416 * vcn_v1_0_dec_ring_set_wptr - set write pointer
1418 * @ring: amdgpu_ring pointer
1420 * Commits the write pointer to the hardware
1422 static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
1424 struct amdgpu_device *adev = ring->adev;
1426 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1427 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2,
1428 lower_32_bits(ring->wptr) | 0x80000000);
1430 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1434 * vcn_v1_0_dec_ring_insert_start - insert a start command
1436 * @ring: amdgpu_ring pointer
1438 * Write a start command to the ring.
1440 static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring *ring)
1442 struct amdgpu_device *adev = ring->adev;
1444 amdgpu_ring_write(ring,
1445 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1446 amdgpu_ring_write(ring, 0);
1447 amdgpu_ring_write(ring,
1448 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1449 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_START << 1);
1453 * vcn_v1_0_dec_ring_insert_end - insert a end command
1455 * @ring: amdgpu_ring pointer
1457 * Write a end command to the ring.
1459 static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring)
1461 struct amdgpu_device *adev = ring->adev;
1463 amdgpu_ring_write(ring,
1464 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1465 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1);
1469 * vcn_v1_0_dec_ring_emit_fence - emit an fence & trap command
1471 * @ring: amdgpu_ring pointer
1472 * @fence: fence to emit
1474 * Write a fence and a trap command to the ring.
1476 static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1479 struct amdgpu_device *adev = ring->adev;
1481 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1483 amdgpu_ring_write(ring,
1484 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
1485 amdgpu_ring_write(ring, seq);
1486 amdgpu_ring_write(ring,
1487 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1488 amdgpu_ring_write(ring, addr & 0xffffffff);
1489 amdgpu_ring_write(ring,
1490 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1491 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1492 amdgpu_ring_write(ring,
1493 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1494 amdgpu_ring_write(ring, VCN_DEC_CMD_FENCE << 1);
1496 amdgpu_ring_write(ring,
1497 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1498 amdgpu_ring_write(ring, 0);
1499 amdgpu_ring_write(ring,
1500 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1501 amdgpu_ring_write(ring, 0);
1502 amdgpu_ring_write(ring,
1503 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1504 amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP << 1);
1508 * vcn_v1_0_dec_ring_emit_ib - execute indirect buffer
1510 * @ring: amdgpu_ring pointer
1511 * @ib: indirect buffer to execute
1513 * Write ring commands to execute the indirect buffer
1515 static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
1516 struct amdgpu_job *job,
1517 struct amdgpu_ib *ib,
1520 struct amdgpu_device *adev = ring->adev;
1521 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1523 amdgpu_ring_write(ring,
1524 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
1525 amdgpu_ring_write(ring, vmid);
1527 amdgpu_ring_write(ring,
1528 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1529 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1530 amdgpu_ring_write(ring,
1531 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1532 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1533 amdgpu_ring_write(ring,
1534 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0));
1535 amdgpu_ring_write(ring, ib->length_dw);
1538 static void vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring,
1539 uint32_t reg, uint32_t val,
1542 struct amdgpu_device *adev = ring->adev;
1544 amdgpu_ring_write(ring,
1545 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1546 amdgpu_ring_write(ring, reg << 2);
1547 amdgpu_ring_write(ring,
1548 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1549 amdgpu_ring_write(ring, val);
1550 amdgpu_ring_write(ring,
1551 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
1552 amdgpu_ring_write(ring, mask);
1553 amdgpu_ring_write(ring,
1554 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1555 amdgpu_ring_write(ring, VCN_DEC_CMD_REG_READ_COND_WAIT << 1);
1558 static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
1559 unsigned vmid, uint64_t pd_addr)
1561 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1562 uint32_t data0, data1, mask;
1564 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1566 /* wait for register write */
1567 data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
1568 data1 = lower_32_bits(pd_addr);
1570 vcn_v1_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
1573 static void vcn_v1_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
1574 uint32_t reg, uint32_t val)
1576 struct amdgpu_device *adev = ring->adev;
1578 amdgpu_ring_write(ring,
1579 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1580 amdgpu_ring_write(ring, reg << 2);
1581 amdgpu_ring_write(ring,
1582 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1583 amdgpu_ring_write(ring, val);
1584 amdgpu_ring_write(ring,
1585 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1586 amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1);
1590 * vcn_v1_0_enc_ring_get_rptr - get enc read pointer
1592 * @ring: amdgpu_ring pointer
1594 * Returns the current hardware enc read pointer
1596 static uint64_t vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
1598 struct amdgpu_device *adev = ring->adev;
1600 if (ring == &adev->vcn.inst->ring_enc[0])
1601 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
1603 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
1607 * vcn_v1_0_enc_ring_get_wptr - get enc write pointer
1609 * @ring: amdgpu_ring pointer
1611 * Returns the current hardware enc write pointer
1613 static uint64_t vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
1615 struct amdgpu_device *adev = ring->adev;
1617 if (ring == &adev->vcn.inst->ring_enc[0])
1618 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1620 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1624 * vcn_v1_0_enc_ring_set_wptr - set enc write pointer
1626 * @ring: amdgpu_ring pointer
1628 * Commits the enc write pointer to the hardware
1630 static void vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
1632 struct amdgpu_device *adev = ring->adev;
1634 if (ring == &adev->vcn.inst->ring_enc[0])
1635 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR,
1636 lower_32_bits(ring->wptr));
1638 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2,
1639 lower_32_bits(ring->wptr));
1643 * vcn_v1_0_enc_ring_emit_fence - emit an enc fence & trap command
1645 * @ring: amdgpu_ring pointer
1646 * @fence: fence to emit
1648 * Write enc a fence and a trap command to the ring.
1650 static void vcn_v1_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1651 u64 seq, unsigned flags)
1653 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1655 amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE);
1656 amdgpu_ring_write(ring, addr);
1657 amdgpu_ring_write(ring, upper_32_bits(addr));
1658 amdgpu_ring_write(ring, seq);
1659 amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP);
1662 static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1664 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
1668 * vcn_v1_0_enc_ring_emit_ib - enc execute indirect buffer
1670 * @ring: amdgpu_ring pointer
1671 * @ib: indirect buffer to execute
1673 * Write enc ring commands to execute the indirect buffer
1675 static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1676 struct amdgpu_job *job,
1677 struct amdgpu_ib *ib,
1680 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1682 amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
1683 amdgpu_ring_write(ring, vmid);
1684 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1685 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1686 amdgpu_ring_write(ring, ib->length_dw);
1689 static void vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1690 uint32_t reg, uint32_t val,
1693 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
1694 amdgpu_ring_write(ring, reg << 2);
1695 amdgpu_ring_write(ring, mask);
1696 amdgpu_ring_write(ring, val);
1699 static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1700 unsigned int vmid, uint64_t pd_addr)
1702 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1704 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1706 /* wait for reg writes */
1707 vcn_v1_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
1708 lower_32_bits(pd_addr), 0xffffffff);
1711 static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1712 uint32_t reg, uint32_t val)
1714 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
1715 amdgpu_ring_write(ring, reg << 2);
1716 amdgpu_ring_write(ring, val);
1721 * vcn_v1_0_jpeg_ring_get_rptr - get read pointer
1723 * @ring: amdgpu_ring pointer
1725 * Returns the current hardware read pointer
1727 static uint64_t vcn_v1_0_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
1729 struct amdgpu_device *adev = ring->adev;
1731 return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR);
1735 * vcn_v1_0_jpeg_ring_get_wptr - get write pointer
1737 * @ring: amdgpu_ring pointer
1739 * Returns the current hardware write pointer
1741 static uint64_t vcn_v1_0_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
1743 struct amdgpu_device *adev = ring->adev;
1745 return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
1749 * vcn_v1_0_jpeg_ring_set_wptr - set write pointer
1751 * @ring: amdgpu_ring pointer
1753 * Commits the write pointer to the hardware
1755 static void vcn_v1_0_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
1757 struct amdgpu_device *adev = ring->adev;
1759 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
1763 * vcn_v1_0_jpeg_ring_insert_start - insert a start command
1765 * @ring: amdgpu_ring pointer
1767 * Write a start command to the ring.
1769 static void vcn_v1_0_jpeg_ring_insert_start(struct amdgpu_ring *ring)
1771 struct amdgpu_device *adev = ring->adev;
1773 amdgpu_ring_write(ring,
1774 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1775 amdgpu_ring_write(ring, 0x68e04);
1777 amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
1778 amdgpu_ring_write(ring, 0x80010000);
1782 * vcn_v1_0_jpeg_ring_insert_end - insert a end command
1784 * @ring: amdgpu_ring pointer
1786 * Write a end command to the ring.
1788 static void vcn_v1_0_jpeg_ring_insert_end(struct amdgpu_ring *ring)
1790 struct amdgpu_device *adev = ring->adev;
1792 amdgpu_ring_write(ring,
1793 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1794 amdgpu_ring_write(ring, 0x68e04);
1796 amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
1797 amdgpu_ring_write(ring, 0x00010000);
1801 * vcn_v1_0_jpeg_ring_emit_fence - emit an fence & trap command
1803 * @ring: amdgpu_ring pointer
1804 * @fence: fence to emit
1806 * Write a fence and a trap command to the ring.
1808 static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1811 struct amdgpu_device *adev = ring->adev;
1813 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1815 amdgpu_ring_write(ring,
1816 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA0), 0, 0, PACKETJ_TYPE0));
1817 amdgpu_ring_write(ring, seq);
1819 amdgpu_ring_write(ring,
1820 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA1), 0, 0, PACKETJ_TYPE0));
1821 amdgpu_ring_write(ring, seq);
1823 amdgpu_ring_write(ring,
1824 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
1825 amdgpu_ring_write(ring, lower_32_bits(addr));
1827 amdgpu_ring_write(ring,
1828 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
1829 amdgpu_ring_write(ring, upper_32_bits(addr));
1831 amdgpu_ring_write(ring,
1832 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, 0, PACKETJ_TYPE0));
1833 amdgpu_ring_write(ring, 0x8);
1835 amdgpu_ring_write(ring,
1836 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
1837 amdgpu_ring_write(ring, 0);
1839 amdgpu_ring_write(ring,
1840 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
1841 amdgpu_ring_write(ring, 0x01400200);
1843 amdgpu_ring_write(ring,
1844 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
1845 amdgpu_ring_write(ring, seq);
1847 amdgpu_ring_write(ring,
1848 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
1849 amdgpu_ring_write(ring, lower_32_bits(addr));
1851 amdgpu_ring_write(ring,
1852 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
1853 amdgpu_ring_write(ring, upper_32_bits(addr));
1855 amdgpu_ring_write(ring,
1856 PACKETJ(0, 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE2));
1857 amdgpu_ring_write(ring, 0xffffffff);
1859 amdgpu_ring_write(ring,
1860 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1861 amdgpu_ring_write(ring, 0x3fbc);
1863 amdgpu_ring_write(ring,
1864 PACKETJ(0, 0, 0, PACKETJ_TYPE0));
1865 amdgpu_ring_write(ring, 0x1);
1868 amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7));
1869 amdgpu_ring_write(ring, 0);
1873 * vcn_v1_0_jpeg_ring_emit_ib - execute indirect buffer
1875 * @ring: amdgpu_ring pointer
1876 * @ib: indirect buffer to execute
1878 * Write ring commands to execute the indirect buffer.
1880 static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
1881 struct amdgpu_job *job,
1882 struct amdgpu_ib *ib,
1885 struct amdgpu_device *adev = ring->adev;
1886 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1888 amdgpu_ring_write(ring,
1889 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
1890 amdgpu_ring_write(ring, (vmid | (vmid << 4)));
1892 amdgpu_ring_write(ring,
1893 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0));
1894 amdgpu_ring_write(ring, (vmid | (vmid << 4)));
1896 amdgpu_ring_write(ring,
1897 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
1898 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1900 amdgpu_ring_write(ring,
1901 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
1902 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1904 amdgpu_ring_write(ring,
1905 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_IB_SIZE), 0, 0, PACKETJ_TYPE0));
1906 amdgpu_ring_write(ring, ib->length_dw);
1908 amdgpu_ring_write(ring,
1909 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
1910 amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
1912 amdgpu_ring_write(ring,
1913 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
1914 amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
1916 amdgpu_ring_write(ring,
1917 PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
1918 amdgpu_ring_write(ring, 0);
1920 amdgpu_ring_write(ring,
1921 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
1922 amdgpu_ring_write(ring, 0x01400200);
1924 amdgpu_ring_write(ring,
1925 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
1926 amdgpu_ring_write(ring, 0x2);
1928 amdgpu_ring_write(ring,
1929 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_STATUS), 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
1930 amdgpu_ring_write(ring, 0x2);
1933 static void vcn_v1_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring,
1934 uint32_t reg, uint32_t val,
1937 struct amdgpu_device *adev = ring->adev;
1938 uint32_t reg_offset = (reg << 2);
1940 amdgpu_ring_write(ring,
1941 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
1942 amdgpu_ring_write(ring, 0x01400200);
1944 amdgpu_ring_write(ring,
1945 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
1946 amdgpu_ring_write(ring, val);
1948 amdgpu_ring_write(ring,
1949 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1950 if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
1951 ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
1952 amdgpu_ring_write(ring, 0);
1953 amdgpu_ring_write(ring,
1954 PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
1956 amdgpu_ring_write(ring, reg_offset);
1957 amdgpu_ring_write(ring,
1958 PACKETJ(0, 0, 0, PACKETJ_TYPE3));
1960 amdgpu_ring_write(ring, mask);
1963 static void vcn_v1_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring,
1964 unsigned vmid, uint64_t pd_addr)
1966 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1967 uint32_t data0, data1, mask;
1969 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1971 /* wait for register write */
1972 data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
1973 data1 = lower_32_bits(pd_addr);
1975 vcn_v1_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask);
1978 static void vcn_v1_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring,
1979 uint32_t reg, uint32_t val)
1981 struct amdgpu_device *adev = ring->adev;
1982 uint32_t reg_offset = (reg << 2);
1984 amdgpu_ring_write(ring,
1985 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1986 if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
1987 ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
1988 amdgpu_ring_write(ring, 0);
1989 amdgpu_ring_write(ring,
1990 PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
1992 amdgpu_ring_write(ring, reg_offset);
1993 amdgpu_ring_write(ring,
1994 PACKETJ(0, 0, 0, PACKETJ_TYPE0));
1996 amdgpu_ring_write(ring, val);
1999 static void vcn_v1_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count)
2003 WARN_ON(ring->wptr % 2 || count % 2);
2005 for (i = 0; i < count / 2; i++) {
2006 amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
2007 amdgpu_ring_write(ring, 0);
2011 static void vcn_v1_0_jpeg_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val)
2013 struct amdgpu_device *adev = ring->adev;
2014 ring->ring[(*ptr)++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
2015 if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
2016 ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
2017 ring->ring[(*ptr)++] = 0;
2018 ring->ring[(*ptr)++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0);
2020 ring->ring[(*ptr)++] = reg_offset;
2021 ring->ring[(*ptr)++] = PACKETJ(0, 0, 0, PACKETJ_TYPE0);
2023 ring->ring[(*ptr)++] = val;
2026 static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr)
2028 struct amdgpu_device *adev = ring->adev;
2030 uint32_t reg, reg_offset, val, mask, i;
2032 // 1st: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW
2033 reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW);
2034 reg_offset = (reg << 2);
2035 val = lower_32_bits(ring->gpu_addr);
2036 vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
2038 // 2nd: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH
2039 reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH);
2040 reg_offset = (reg << 2);
2041 val = upper_32_bits(ring->gpu_addr);
2042 vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
2044 // 3rd to 5th: issue MEM_READ commands
2045 for (i = 0; i <= 2; i++) {
2046 ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE2);
2047 ring->ring[ptr++] = 0;
2050 // 6th: program mmUVD_JRBC_RB_CNTL register to enable NO_FETCH and RPTR write ability
2051 reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
2052 reg_offset = (reg << 2);
2054 vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
2056 // 7th: program mmUVD_JRBC_RB_REF_DATA
2057 reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA);
2058 reg_offset = (reg << 2);
2060 vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
2062 // 8th: issue conditional register read mmUVD_JRBC_RB_CNTL
2063 reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
2064 reg_offset = (reg << 2);
2068 ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0);
2069 ring->ring[ptr++] = 0x01400200;
2070 ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0);
2071 ring->ring[ptr++] = val;
2072 ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
2073 if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
2074 ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
2075 ring->ring[ptr++] = 0;
2076 ring->ring[ptr++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3);
2078 ring->ring[ptr++] = reg_offset;
2079 ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE3);
2081 ring->ring[ptr++] = mask;
2083 //9th to 21st: insert no-op
2084 for (i = 0; i <= 12; i++) {
2085 ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
2086 ring->ring[ptr++] = 0;
2089 //22nd: reset mmUVD_JRBC_RB_RPTR
2090 reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_RPTR);
2091 reg_offset = (reg << 2);
2093 vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
2095 //23rd: program mmUVD_JRBC_RB_CNTL to disable no_fetch
2096 reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
2097 reg_offset = (reg << 2);
2099 vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
2102 static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
2103 struct amdgpu_irq_src *source,
2105 enum amdgpu_interrupt_state state)
2110 static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
2111 struct amdgpu_irq_src *source,
2112 struct amdgpu_iv_entry *entry)
2114 DRM_DEBUG("IH: VCN TRAP\n");
2116 switch (entry->src_id) {
2118 amdgpu_fence_process(&adev->vcn.inst->ring_dec);
2121 amdgpu_fence_process(&adev->vcn.inst->ring_enc[0]);
2124 amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]);
2127 amdgpu_fence_process(&adev->vcn.inst->ring_jpeg);
2130 DRM_ERROR("Unhandled interrupt: %d %d\n",
2131 entry->src_id, entry->src_data[0]);
2138 static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
2140 struct amdgpu_device *adev = ring->adev;
2143 WARN_ON(ring->wptr % 2 || count % 2);
2145 for (i = 0; i < count / 2; i++) {
2146 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
2147 amdgpu_ring_write(ring, 0);
2151 static int vcn_v1_0_set_powergating_state(void *handle,
2152 enum amd_powergating_state state)
2154 /* This doesn't actually powergate the VCN block.
2155 * That's done in the dpm code via the SMC. This
2156 * just re-inits the block as necessary. The actual
2157 * gating still happens in the dpm code. We should
2158 * revisit this when there is a cleaner line between
2159 * the smc and the hw blocks
2162 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2164 if(state == adev->vcn.cur_state)
2167 if (state == AMD_PG_STATE_GATE)
2168 ret = vcn_v1_0_stop(adev);
2170 ret = vcn_v1_0_start(adev);
2173 adev->vcn.cur_state = state;
2177 static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
2179 .early_init = vcn_v1_0_early_init,
2181 .sw_init = vcn_v1_0_sw_init,
2182 .sw_fini = vcn_v1_0_sw_fini,
2183 .hw_init = vcn_v1_0_hw_init,
2184 .hw_fini = vcn_v1_0_hw_fini,
2185 .suspend = vcn_v1_0_suspend,
2186 .resume = vcn_v1_0_resume,
2187 .is_idle = vcn_v1_0_is_idle,
2188 .wait_for_idle = vcn_v1_0_wait_for_idle,
2189 .check_soft_reset = NULL /* vcn_v1_0_check_soft_reset */,
2190 .pre_soft_reset = NULL /* vcn_v1_0_pre_soft_reset */,
2191 .soft_reset = NULL /* vcn_v1_0_soft_reset */,
2192 .post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */,
2193 .set_clockgating_state = vcn_v1_0_set_clockgating_state,
2194 .set_powergating_state = vcn_v1_0_set_powergating_state,
2197 static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
2198 .type = AMDGPU_RING_TYPE_VCN_DEC,
2200 .support_64bit_ptrs = false,
2201 .no_user_fence = true,
2202 .vmhub = AMDGPU_MMHUB_0,
2203 .get_rptr = vcn_v1_0_dec_ring_get_rptr,
2204 .get_wptr = vcn_v1_0_dec_ring_get_wptr,
2205 .set_wptr = vcn_v1_0_dec_ring_set_wptr,
2207 6 + 6 + /* hdp invalidate / flush */
2208 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
2209 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
2210 8 + /* vcn_v1_0_dec_ring_emit_vm_flush */
2211 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
2213 .emit_ib_size = 8, /* vcn_v1_0_dec_ring_emit_ib */
2214 .emit_ib = vcn_v1_0_dec_ring_emit_ib,
2215 .emit_fence = vcn_v1_0_dec_ring_emit_fence,
2216 .emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush,
2217 .test_ring = amdgpu_vcn_dec_ring_test_ring,
2218 .test_ib = amdgpu_vcn_dec_ring_test_ib,
2219 .insert_nop = vcn_v1_0_dec_ring_insert_nop,
2220 .insert_start = vcn_v1_0_dec_ring_insert_start,
2221 .insert_end = vcn_v1_0_dec_ring_insert_end,
2222 .pad_ib = amdgpu_ring_generic_pad_ib,
2223 .begin_use = amdgpu_vcn_ring_begin_use,
2224 .end_use = amdgpu_vcn_ring_end_use,
2225 .emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
2226 .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
2227 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2230 static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
2231 .type = AMDGPU_RING_TYPE_VCN_ENC,
2233 .nop = VCN_ENC_CMD_NO_OP,
2234 .support_64bit_ptrs = false,
2235 .no_user_fence = true,
2236 .vmhub = AMDGPU_MMHUB_0,
2237 .get_rptr = vcn_v1_0_enc_ring_get_rptr,
2238 .get_wptr = vcn_v1_0_enc_ring_get_wptr,
2239 .set_wptr = vcn_v1_0_enc_ring_set_wptr,
2241 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2242 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
2243 4 + /* vcn_v1_0_enc_ring_emit_vm_flush */
2244 5 + 5 + /* vcn_v1_0_enc_ring_emit_fence x2 vm fence */
2245 1, /* vcn_v1_0_enc_ring_insert_end */
2246 .emit_ib_size = 5, /* vcn_v1_0_enc_ring_emit_ib */
2247 .emit_ib = vcn_v1_0_enc_ring_emit_ib,
2248 .emit_fence = vcn_v1_0_enc_ring_emit_fence,
2249 .emit_vm_flush = vcn_v1_0_enc_ring_emit_vm_flush,
2250 .test_ring = amdgpu_vcn_enc_ring_test_ring,
2251 .test_ib = amdgpu_vcn_enc_ring_test_ib,
2252 .insert_nop = amdgpu_ring_insert_nop,
2253 .insert_end = vcn_v1_0_enc_ring_insert_end,
2254 .pad_ib = amdgpu_ring_generic_pad_ib,
2255 .begin_use = amdgpu_vcn_ring_begin_use,
2256 .end_use = amdgpu_vcn_ring_end_use,
2257 .emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
2258 .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
2259 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2262 static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs = {
2263 .type = AMDGPU_RING_TYPE_VCN_JPEG,
2265 .nop = PACKET0(0x81ff, 0),
2266 .support_64bit_ptrs = false,
2267 .no_user_fence = true,
2268 .vmhub = AMDGPU_MMHUB_0,
2270 .get_rptr = vcn_v1_0_jpeg_ring_get_rptr,
2271 .get_wptr = vcn_v1_0_jpeg_ring_get_wptr,
2272 .set_wptr = vcn_v1_0_jpeg_ring_set_wptr,
2274 6 + 6 + /* hdp invalidate / flush */
2275 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
2276 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
2277 8 + /* vcn_v1_0_jpeg_ring_emit_vm_flush */
2278 26 + 26 + /* vcn_v1_0_jpeg_ring_emit_fence x2 vm fence */
2280 .emit_ib_size = 22, /* vcn_v1_0_jpeg_ring_emit_ib */
2281 .emit_ib = vcn_v1_0_jpeg_ring_emit_ib,
2282 .emit_fence = vcn_v1_0_jpeg_ring_emit_fence,
2283 .emit_vm_flush = vcn_v1_0_jpeg_ring_emit_vm_flush,
2284 .test_ring = amdgpu_vcn_jpeg_ring_test_ring,
2285 .test_ib = amdgpu_vcn_jpeg_ring_test_ib,
2286 .insert_nop = vcn_v1_0_jpeg_ring_nop,
2287 .insert_start = vcn_v1_0_jpeg_ring_insert_start,
2288 .insert_end = vcn_v1_0_jpeg_ring_insert_end,
2289 .pad_ib = amdgpu_ring_generic_pad_ib,
2290 .begin_use = amdgpu_vcn_ring_begin_use,
2291 .end_use = amdgpu_vcn_ring_end_use,
2292 .emit_wreg = vcn_v1_0_jpeg_ring_emit_wreg,
2293 .emit_reg_wait = vcn_v1_0_jpeg_ring_emit_reg_wait,
2294 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2297 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
2299 adev->vcn.inst->ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
2300 DRM_INFO("VCN decode is enabled in VM mode\n");
2303 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
2307 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
2308 adev->vcn.inst->ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs;
2310 DRM_INFO("VCN encode is enabled in VM mode\n");
2313 static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev)
2315 adev->vcn.inst->ring_jpeg.funcs = &vcn_v1_0_jpeg_ring_vm_funcs;
2316 DRM_INFO("VCN jpeg decode is enabled in VM mode\n");
2319 static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
2320 .set = vcn_v1_0_set_interrupt_state,
2321 .process = vcn_v1_0_process_interrupt,
2324 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
2326 adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 2;
2327 adev->vcn.inst->irq.funcs = &vcn_v1_0_irq_funcs;
2330 const struct amdgpu_ip_block_version vcn_v1_0_ip_block =
2332 .type = AMD_IP_BLOCK_TYPE_VCN,
2336 .funcs = &vcn_v1_0_ip_funcs,