2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
26 #include "amdgpu_vcn.h"
27 #include "amdgpu_pm.h"
31 #include "mmsch_v3_0.h"
33 #include "vcn/vcn_3_0_0_offset.h"
34 #include "vcn/vcn_3_0_0_sh_mask.h"
35 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
37 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
38 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
39 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x10
40 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x11
41 #define mmUVD_NO_OP_INTERNAL_OFFSET 0x29
42 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x66
43 #define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d
45 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x431
46 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x3b4
47 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5
48 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c
50 #define VCN_INSTANCES_SIENNA_CICHLID 2
51 #define DEC_SW_RING_ENABLED FALSE
53 #define RDECODE_MSG_CREATE 0x00000000
54 #define RDECODE_MESSAGE_CREATE 0x00000001
56 static int amdgpu_ih_clientid_vcns[] = {
57 SOC15_IH_CLIENTID_VCN,
58 SOC15_IH_CLIENTID_VCN1
61 static int amdgpu_ucode_id_vcns[] = {
66 static int vcn_v3_0_start_sriov(struct amdgpu_device *adev);
67 static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev);
68 static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev);
69 static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev);
70 static int vcn_v3_0_set_powergating_state(void *handle,
71 enum amd_powergating_state state);
72 static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
73 int inst_idx, struct dpg_pause_state *new_state);
75 static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring);
76 static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring);
79 * vcn_v3_0_early_init - set function pointers
81 * @handle: amdgpu_device pointer
83 * Set ring and irq function pointers
85 static int vcn_v3_0_early_init(void *handle)
87 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
89 if (amdgpu_sriov_vf(adev)) {
90 adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID;
91 adev->vcn.harvest_config = 0;
92 adev->vcn.num_enc_rings = 1;
95 if (adev->asic_type == CHIP_SIENNA_CICHLID) {
99 adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID;
100 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
101 harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
102 if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
103 adev->vcn.harvest_config |= 1 << i;
106 if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
107 AMDGPU_VCN_HARVEST_VCN1))
108 /* both instances are harvested, disable the block */
111 adev->vcn.num_vcn_inst = 1;
113 adev->vcn.num_enc_rings = 2;
116 vcn_v3_0_set_dec_ring_funcs(adev);
117 vcn_v3_0_set_enc_ring_funcs(adev);
118 vcn_v3_0_set_irq_funcs(adev);
124 * vcn_v3_0_sw_init - sw init for VCN block
126 * @handle: amdgpu_device pointer
128 * Load firmware and sw initialization
130 static int vcn_v3_0_sw_init(void *handle)
132 struct amdgpu_ring *ring;
134 int vcn_doorbell_index = 0;
135 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
137 r = amdgpu_vcn_sw_init(adev);
141 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
142 const struct common_firmware_header *hdr;
143 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
144 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
145 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
146 adev->firmware.fw_size +=
147 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
149 if (adev->vcn.num_vcn_inst == VCN_INSTANCES_SIENNA_CICHLID) {
150 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id = AMDGPU_UCODE_ID_VCN1;
151 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev->vcn.fw;
152 adev->firmware.fw_size +=
153 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
155 DRM_INFO("PSP loading VCN firmware\n");
158 r = amdgpu_vcn_resume(adev);
163 * Note: doorbell assignment is fixed for SRIOV multiple VCN engines
165 * vcn_db_base = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
166 * dec_ring_i = vcn_db_base + i * (adev->vcn.num_enc_rings + 1)
167 * enc_ring_i,j = vcn_db_base + i * (adev->vcn.num_enc_rings + 1) + 1 + j
169 if (amdgpu_sriov_vf(adev)) {
170 vcn_doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1;
171 /* get DWORD offset */
172 vcn_doorbell_index = vcn_doorbell_index << 1;
175 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
176 volatile struct amdgpu_fw_shared *fw_shared;
178 if (adev->vcn.harvest_config & (1 << i))
181 adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
182 adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
183 adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
184 adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
185 adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
186 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
188 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
189 adev->vcn.inst[i].external.scratch9 = SOC15_REG_OFFSET(VCN, i, mmUVD_SCRATCH9);
190 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
191 adev->vcn.inst[i].external.data0 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA0);
192 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
193 adev->vcn.inst[i].external.data1 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA1);
194 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
195 adev->vcn.inst[i].external.cmd = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_CMD);
196 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
197 adev->vcn.inst[i].external.nop = SOC15_REG_OFFSET(VCN, i, mmUVD_NO_OP);
200 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
201 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[i].irq);
205 atomic_set(&adev->vcn.inst[i].sched_score, 0);
207 ring = &adev->vcn.inst[i].ring_dec;
208 ring->use_doorbell = true;
209 if (amdgpu_sriov_vf(adev)) {
210 ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.num_enc_rings + 1);
212 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i;
214 sprintf(ring->name, "vcn_dec_%d", i);
215 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
216 AMDGPU_RING_PRIO_DEFAULT,
217 &adev->vcn.inst[i].sched_score);
221 for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
223 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
224 j + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq);
228 ring = &adev->vcn.inst[i].ring_enc[j];
229 ring->use_doorbell = true;
230 if (amdgpu_sriov_vf(adev)) {
231 ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.num_enc_rings + 1) + 1 + j;
233 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i;
235 sprintf(ring->name, "vcn_enc_%d.%d", i, j);
236 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
237 AMDGPU_RING_PRIO_DEFAULT,
238 &adev->vcn.inst[i].sched_score);
243 fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
244 fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SW_RING_FLAG) |
245 cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG) |
246 cpu_to_le32(AMDGPU_VCN_FW_SHARED_FLAG_0_RB);
247 fw_shared->sw_ring.is_enabled = cpu_to_le32(DEC_SW_RING_ENABLED);
250 if (amdgpu_sriov_vf(adev)) {
251 r = amdgpu_virt_alloc_mm_table(adev);
255 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
256 adev->vcn.pause_dpg_mode = vcn_v3_0_pause_dpg_mode;
262 * vcn_v3_0_sw_fini - sw fini for VCN block
264 * @handle: amdgpu_device pointer
266 * VCN suspend and free up sw allocation
268 static int vcn_v3_0_sw_fini(void *handle)
270 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
273 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
274 volatile struct amdgpu_fw_shared *fw_shared;
276 if (adev->vcn.harvest_config & (1 << i))
278 fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
279 fw_shared->present_flag_0 = 0;
280 fw_shared->sw_ring.is_enabled = false;
283 if (amdgpu_sriov_vf(adev))
284 amdgpu_virt_free_mm_table(adev);
286 r = amdgpu_vcn_suspend(adev);
290 r = amdgpu_vcn_sw_fini(adev);
296 * vcn_v3_0_hw_init - start and test VCN block
298 * @handle: amdgpu_device pointer
300 * Initialize the hardware, boot up the VCPU and do some testing
302 static int vcn_v3_0_hw_init(void *handle)
304 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
305 struct amdgpu_ring *ring;
308 if (amdgpu_sriov_vf(adev)) {
309 r = vcn_v3_0_start_sriov(adev);
313 /* initialize VCN dec and enc ring buffers */
314 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
315 if (adev->vcn.harvest_config & (1 << i))
318 ring = &adev->vcn.inst[i].ring_dec;
319 if (ring->sched.ready) {
322 vcn_v3_0_dec_ring_set_wptr(ring);
325 for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
326 ring = &adev->vcn.inst[i].ring_enc[j];
327 if (ring->sched.ready) {
330 vcn_v3_0_enc_ring_set_wptr(ring);
335 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
336 if (adev->vcn.harvest_config & (1 << i))
339 ring = &adev->vcn.inst[i].ring_dec;
341 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
342 ring->doorbell_index, i);
344 r = amdgpu_ring_test_helper(ring);
348 for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
349 ring = &adev->vcn.inst[i].ring_enc[j];
350 r = amdgpu_ring_test_helper(ring);
359 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
360 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
366 * vcn_v3_0_hw_fini - stop the hardware block
368 * @handle: amdgpu_device pointer
370 * Stop the VCN block, mark ring as not ready any more
372 static int vcn_v3_0_hw_fini(void *handle)
374 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
375 struct amdgpu_ring *ring;
378 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
379 if (adev->vcn.harvest_config & (1 << i))
382 ring = &adev->vcn.inst[i].ring_dec;
384 if (!amdgpu_sriov_vf(adev)) {
385 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
386 (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
387 RREG32_SOC15(VCN, i, mmUVD_STATUS))) {
388 vcn_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
391 ring->sched.ready = false;
393 for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
394 ring = &adev->vcn.inst[i].ring_enc[j];
395 ring->sched.ready = false;
403 * vcn_v3_0_suspend - suspend VCN block
405 * @handle: amdgpu_device pointer
407 * HW fini and suspend VCN block
409 static int vcn_v3_0_suspend(void *handle)
412 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
414 r = vcn_v3_0_hw_fini(adev);
418 r = amdgpu_vcn_suspend(adev);
424 * vcn_v3_0_resume - resume VCN block
426 * @handle: amdgpu_device pointer
428 * Resume firmware and hw init VCN block
430 static int vcn_v3_0_resume(void *handle)
433 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
435 r = amdgpu_vcn_resume(adev);
439 r = vcn_v3_0_hw_init(adev);
445 * vcn_v3_0_mc_resume - memory controller programming
447 * @adev: amdgpu_device pointer
448 * @inst: instance number
450 * Let the VCN memory controller know it's offsets
452 static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst)
454 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
457 /* cache window 0: fw */
458 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
459 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
460 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo));
461 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
462 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi));
463 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET0, 0);
466 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
467 lower_32_bits(adev->vcn.inst[inst].gpu_addr));
468 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
469 upper_32_bits(adev->vcn.inst[inst].gpu_addr));
471 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET0,
472 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
474 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE0, size);
476 /* cache window 1: stack */
477 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
478 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
479 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
480 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
481 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET1, 0);
482 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
484 /* cache window 2: context */
485 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
486 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
487 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
488 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
489 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET2, 0);
490 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
492 /* non-cache window */
493 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
494 lower_32_bits(adev->vcn.inst[inst].fw_shared_gpu_addr));
495 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
496 upper_32_bits(adev->vcn.inst[inst].fw_shared_gpu_addr));
497 WREG32_SOC15(VCN, inst, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
498 WREG32_SOC15(VCN, inst, mmUVD_VCPU_NONCACHE_SIZE0,
499 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
502 static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
504 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
507 /* cache window 0: fw */
508 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
510 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
511 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
512 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
513 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
514 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
515 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
516 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
517 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
519 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
520 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
521 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
522 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
523 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
524 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
528 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
529 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
530 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
531 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
532 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
533 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
535 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
536 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0),
537 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
541 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
542 VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
544 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
545 VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
547 /* cache window 1: stack */
549 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
550 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
551 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
552 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
553 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
554 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
555 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
556 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
558 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
559 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
560 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
561 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
562 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
563 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
565 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
566 VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
568 /* cache window 2: context */
569 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
570 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
571 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
572 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
573 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
574 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
575 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
576 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
577 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
578 VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
580 /* non-cache window */
581 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
582 VCN, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
583 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect);
584 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
585 VCN, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
586 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect);
587 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
588 VCN, inst_idx, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
589 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
590 VCN, inst_idx, mmUVD_VCPU_NONCACHE_SIZE0),
591 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
593 /* VCN global tiling registers */
594 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
595 UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
598 static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
602 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
603 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
604 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
605 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
606 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
607 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
608 | 2 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT
609 | 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
610 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
611 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
612 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
613 | 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
614 | 2 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT
615 | 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
616 | 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
618 WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data);
619 SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS,
620 UVD_PGFSM_STATUS__UVDM_UVDU_UVDLM_PWR_ON_3_0, 0x3F3FFFFF);
622 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
623 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
624 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
625 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
626 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
627 | 1 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT
628 | 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
629 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
630 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
631 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
632 | 1 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
633 | 1 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT
634 | 1 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
635 | 1 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
636 WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data);
637 SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS, 0, 0x3F3FFFFF);
640 data = RREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS);
642 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
643 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
644 UVD_POWER_STATUS__UVD_PG_EN_MASK;
646 WREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS, data);
649 static void vcn_v3_0_enable_static_power_gating(struct amdgpu_device *adev, int inst)
653 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
654 /* Before power off, this indicator has to be turned on */
655 data = RREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS);
656 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
657 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
658 WREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS, data);
660 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
661 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
662 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
663 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
664 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
665 | 2 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT
666 | 2 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
667 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
668 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
669 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
670 | 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
671 | 2 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT
672 | 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
673 | 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
674 WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data);
676 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
677 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
678 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
679 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
680 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
681 | 2 << UVD_PGFSM_STATUS__UVDIRL_PWR_STATUS__SHIFT
682 | 2 << UVD_PGFSM_STATUS__UVDLM_PWR_STATUS__SHIFT
683 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
684 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
685 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
686 | 2 << UVD_PGFSM_STATUS__UVDAB_PWR_STATUS__SHIFT
687 | 2 << UVD_PGFSM_STATUS__UVDATD_PWR_STATUS__SHIFT
688 | 2 << UVD_PGFSM_STATUS__UVDNA_PWR_STATUS__SHIFT
689 | 2 << UVD_PGFSM_STATUS__UVDNB_PWR_STATUS__SHIFT);
690 SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS, data, 0x3F3FFFFF);
695 * vcn_v3_0_disable_clock_gating - disable VCN clock gating
697 * @adev: amdgpu_device pointer
698 * @inst: instance number
700 * Disable clock gating for VCN block
702 static void vcn_v3_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
706 /* VCN disable CGC */
707 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
708 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
709 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
711 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
712 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
713 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
714 WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
716 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_GATE);
717 data &= ~(UVD_CGC_GATE__SYS_MASK
718 | UVD_CGC_GATE__UDEC_MASK
719 | UVD_CGC_GATE__MPEG2_MASK
720 | UVD_CGC_GATE__REGS_MASK
721 | UVD_CGC_GATE__RBC_MASK
722 | UVD_CGC_GATE__LMI_MC_MASK
723 | UVD_CGC_GATE__LMI_UMC_MASK
724 | UVD_CGC_GATE__IDCT_MASK
725 | UVD_CGC_GATE__MPRD_MASK
726 | UVD_CGC_GATE__MPC_MASK
727 | UVD_CGC_GATE__LBSI_MASK
728 | UVD_CGC_GATE__LRBBM_MASK
729 | UVD_CGC_GATE__UDEC_RE_MASK
730 | UVD_CGC_GATE__UDEC_CM_MASK
731 | UVD_CGC_GATE__UDEC_IT_MASK
732 | UVD_CGC_GATE__UDEC_DB_MASK
733 | UVD_CGC_GATE__UDEC_MP_MASK
734 | UVD_CGC_GATE__WCB_MASK
735 | UVD_CGC_GATE__VCPU_MASK
736 | UVD_CGC_GATE__MMSCH_MASK);
738 WREG32_SOC15(VCN, inst, mmUVD_CGC_GATE, data);
740 SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_CGC_GATE, 0, 0xFFFFFFFF);
742 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
743 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
744 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
745 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
746 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
747 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
748 | UVD_CGC_CTRL__SYS_MODE_MASK
749 | UVD_CGC_CTRL__UDEC_MODE_MASK
750 | UVD_CGC_CTRL__MPEG2_MODE_MASK
751 | UVD_CGC_CTRL__REGS_MODE_MASK
752 | UVD_CGC_CTRL__RBC_MODE_MASK
753 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
754 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
755 | UVD_CGC_CTRL__IDCT_MODE_MASK
756 | UVD_CGC_CTRL__MPRD_MODE_MASK
757 | UVD_CGC_CTRL__MPC_MODE_MASK
758 | UVD_CGC_CTRL__LBSI_MODE_MASK
759 | UVD_CGC_CTRL__LRBBM_MODE_MASK
760 | UVD_CGC_CTRL__WCB_MODE_MASK
761 | UVD_CGC_CTRL__VCPU_MODE_MASK
762 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
763 WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
765 data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE);
766 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
767 | UVD_SUVD_CGC_GATE__SIT_MASK
768 | UVD_SUVD_CGC_GATE__SMP_MASK
769 | UVD_SUVD_CGC_GATE__SCM_MASK
770 | UVD_SUVD_CGC_GATE__SDB_MASK
771 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
772 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
773 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
774 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
775 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
776 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
777 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
778 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
779 | UVD_SUVD_CGC_GATE__SCLR_MASK
780 | UVD_SUVD_CGC_GATE__ENT_MASK
781 | UVD_SUVD_CGC_GATE__IME_MASK
782 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
783 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
784 | UVD_SUVD_CGC_GATE__SITE_MASK
785 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
786 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
787 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
788 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
789 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK
790 | UVD_SUVD_CGC_GATE__EFC_MASK
791 | UVD_SUVD_CGC_GATE__SAOE_MASK
792 | UVD_SUVD_CGC_GATE__SRE_AV1_MASK
793 | UVD_SUVD_CGC_GATE__FBC_PCLK_MASK
794 | UVD_SUVD_CGC_GATE__FBC_CCLK_MASK
795 | UVD_SUVD_CGC_GATE__SCM_AV1_MASK
796 | UVD_SUVD_CGC_GATE__SMPA_MASK);
797 WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE, data);
799 data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2);
800 data |= (UVD_SUVD_CGC_GATE2__MPBE0_MASK
801 | UVD_SUVD_CGC_GATE2__MPBE1_MASK
802 | UVD_SUVD_CGC_GATE2__SIT_AV1_MASK
803 | UVD_SUVD_CGC_GATE2__SDB_AV1_MASK
804 | UVD_SUVD_CGC_GATE2__MPC1_MASK);
805 WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2, data);
807 data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL);
808 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
809 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
810 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
811 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
812 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
813 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
814 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
815 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
816 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK
817 | UVD_SUVD_CGC_CTRL__EFC_MODE_MASK
818 | UVD_SUVD_CGC_CTRL__SAOE_MODE_MASK
819 | UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK
820 | UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK
821 | UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK
822 | UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK
823 | UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK
824 | UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK
825 | UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK
826 | UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK);
827 WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL, data);
830 static void vcn_v3_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
831 uint8_t sram_sel, int inst_idx, uint8_t indirect)
833 uint32_t reg_data = 0;
835 /* enable sw clock gating control */
836 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
837 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
839 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
840 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
841 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
842 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
843 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
844 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
845 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
846 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
847 UVD_CGC_CTRL__SYS_MODE_MASK |
848 UVD_CGC_CTRL__UDEC_MODE_MASK |
849 UVD_CGC_CTRL__MPEG2_MODE_MASK |
850 UVD_CGC_CTRL__REGS_MODE_MASK |
851 UVD_CGC_CTRL__RBC_MODE_MASK |
852 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
853 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
854 UVD_CGC_CTRL__IDCT_MODE_MASK |
855 UVD_CGC_CTRL__MPRD_MODE_MASK |
856 UVD_CGC_CTRL__MPC_MODE_MASK |
857 UVD_CGC_CTRL__LBSI_MODE_MASK |
858 UVD_CGC_CTRL__LRBBM_MODE_MASK |
859 UVD_CGC_CTRL__WCB_MODE_MASK |
860 UVD_CGC_CTRL__VCPU_MODE_MASK |
861 UVD_CGC_CTRL__MMSCH_MODE_MASK);
862 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
863 VCN, inst_idx, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
865 /* turn off clock gating */
866 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
867 VCN, inst_idx, mmUVD_CGC_GATE), 0, sram_sel, indirect);
869 /* turn on SUVD clock gating */
870 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
871 VCN, inst_idx, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
873 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
874 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
875 VCN, inst_idx, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
879 * vcn_v3_0_enable_clock_gating - enable VCN clock gating
881 * @adev: amdgpu_device pointer
882 * @inst: instance number
884 * Enable clock gating for VCN block
886 static void vcn_v3_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
891 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
892 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
893 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
895 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
896 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
897 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
898 WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
900 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
901 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
902 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
903 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
904 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
905 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
906 | UVD_CGC_CTRL__SYS_MODE_MASK
907 | UVD_CGC_CTRL__UDEC_MODE_MASK
908 | UVD_CGC_CTRL__MPEG2_MODE_MASK
909 | UVD_CGC_CTRL__REGS_MODE_MASK
910 | UVD_CGC_CTRL__RBC_MODE_MASK
911 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
912 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
913 | UVD_CGC_CTRL__IDCT_MODE_MASK
914 | UVD_CGC_CTRL__MPRD_MODE_MASK
915 | UVD_CGC_CTRL__MPC_MODE_MASK
916 | UVD_CGC_CTRL__LBSI_MODE_MASK
917 | UVD_CGC_CTRL__LRBBM_MODE_MASK
918 | UVD_CGC_CTRL__WCB_MODE_MASK
919 | UVD_CGC_CTRL__VCPU_MODE_MASK
920 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
921 WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
923 data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL);
924 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
925 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
926 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
927 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
928 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
929 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
930 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
931 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
932 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK
933 | UVD_SUVD_CGC_CTRL__EFC_MODE_MASK
934 | UVD_SUVD_CGC_CTRL__SAOE_MODE_MASK
935 | UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK
936 | UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK
937 | UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK
938 | UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK
939 | UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK
940 | UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK
941 | UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK
942 | UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK);
943 WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL, data);
946 static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
948 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
949 struct amdgpu_ring *ring;
950 uint32_t rb_bufsz, tmp;
952 /* disable register anti-hang mechanism */
953 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
954 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
955 /* enable dynamic power gating mode */
956 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS);
957 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
958 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
959 WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp);
962 adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
964 /* enable clock gating */
965 vcn_v3_0_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
967 /* enable VCPU clock */
968 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
969 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
970 tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
971 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
972 VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
974 /* disable master interupt */
975 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
976 VCN, inst_idx, mmUVD_MASTINT_EN), 0, 0, indirect);
978 /* setup mmUVD_LMI_CTRL */
979 tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
980 UVD_LMI_CTRL__REQ_MODE_MASK |
981 UVD_LMI_CTRL__CRC_RESET_MASK |
982 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
983 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
984 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
985 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
987 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
988 VCN, inst_idx, mmUVD_LMI_CTRL), tmp, 0, indirect);
990 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
991 VCN, inst_idx, mmUVD_MPC_CNTL),
992 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
994 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
995 VCN, inst_idx, mmUVD_MPC_SET_MUXA0),
996 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
997 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
998 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
999 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
1001 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1002 VCN, inst_idx, mmUVD_MPC_SET_MUXB0),
1003 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1004 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1005 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1006 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
1008 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1009 VCN, inst_idx, mmUVD_MPC_SET_MUX),
1010 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1011 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1012 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
1014 vcn_v3_0_mc_resume_dpg_mode(adev, inst_idx, indirect);
1016 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1017 VCN, inst_idx, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
1018 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1019 VCN, inst_idx, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
1021 /* enable LMI MC and UMC channels */
1022 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1023 VCN, inst_idx, mmUVD_LMI_CTRL2), 0, 0, indirect);
1025 /* unblock VCPU register access */
1026 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1027 VCN, inst_idx, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
1029 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
1030 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
1031 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1032 VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
1034 /* enable master interrupt */
1035 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1036 VCN, inst_idx, mmUVD_MASTINT_EN),
1037 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
1039 /* add nop to workaround PSP size check */
1040 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1041 VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
1044 psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
1045 (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
1046 (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
1048 ring = &adev->vcn.inst[inst_idx].ring_dec;
1049 /* force RBC into idle state */
1050 rb_bufsz = order_base_2(ring->ring_size);
1051 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1052 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1053 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1054 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1055 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1056 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
1058 /* Stall DPG before WPTR/RPTR reset */
1059 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1060 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1061 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1062 fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1064 /* set the write pointer delay */
1065 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
1067 /* set the wb address */
1068 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
1069 (upper_32_bits(ring->gpu_addr) >> 2));
1071 /* programm the RB_BASE for ring buffer */
1072 WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1073 lower_32_bits(ring->gpu_addr));
1074 WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1075 upper_32_bits(ring->gpu_addr));
1077 /* Initialize the ring buffer's read and write pointers */
1078 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0);
1080 WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0);
1082 ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR);
1083 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
1084 lower_32_bits(ring->wptr));
1086 /* Reset FW shared memory RBC WPTR/RPTR */
1087 fw_shared->rb.rptr = 0;
1088 fw_shared->rb.wptr = lower_32_bits(ring->wptr);
1090 /*resetting done, fw can check RB ring */
1091 fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1094 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1095 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1100 static int vcn_v3_0_start(struct amdgpu_device *adev)
1102 volatile struct amdgpu_fw_shared *fw_shared;
1103 struct amdgpu_ring *ring;
1104 uint32_t rb_bufsz, tmp;
1107 if (adev->pm.dpm_enabled)
1108 amdgpu_dpm_enable_uvd(adev, true);
1110 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1111 if (adev->vcn.harvest_config & (1 << i))
1114 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG){
1115 r = vcn_v3_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
1119 /* disable VCN power gating */
1120 vcn_v3_0_disable_static_power_gating(adev, i);
1122 /* set VCN status busy */
1123 tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
1124 WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
1126 /*SW clock gating */
1127 vcn_v3_0_disable_clock_gating(adev, i);
1129 /* enable VCPU clock */
1130 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1131 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
1133 /* disable master interrupt */
1134 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
1135 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1137 /* enable LMI MC and UMC channels */
1138 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
1139 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1141 tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
1142 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1143 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1144 WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
1146 /* setup mmUVD_LMI_CTRL */
1147 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
1148 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp |
1149 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1150 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1151 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1152 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
1154 /* setup mmUVD_MPC_CNTL */
1155 tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
1156 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
1157 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
1158 WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
1160 /* setup UVD_MPC_SET_MUXA0 */
1161 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
1162 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1163 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1164 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1165 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
1167 /* setup UVD_MPC_SET_MUXB0 */
1168 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
1169 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1170 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1171 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1172 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
1174 /* setup mmUVD_MPC_SET_MUX */
1175 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
1176 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1177 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1178 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
1180 vcn_v3_0_mc_resume(adev, i);
1182 /* VCN global tiling registers */
1183 WREG32_SOC15(VCN, i, mmUVD_GFX10_ADDR_CONFIG,
1184 adev->gfx.config.gb_addr_config);
1186 /* unblock VCPU register access */
1187 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
1188 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1190 /* release VCPU reset to boot */
1191 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1192 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1194 for (j = 0; j < 10; ++j) {
1197 for (k = 0; k < 100; ++k) {
1198 status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
1207 DRM_ERROR("VCN[%d] decode not responding, trying to reset the VCPU!!!\n", i);
1208 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1209 UVD_VCPU_CNTL__BLK_RST_MASK,
1210 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1212 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1213 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1220 DRM_ERROR("VCN[%d] decode not responding, giving up!!!\n", i);
1224 /* enable master interrupt */
1225 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
1226 UVD_MASTINT_EN__VCPU_EN_MASK,
1227 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1229 /* clear the busy bit of VCN_STATUS */
1230 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
1231 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1233 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
1235 ring = &adev->vcn.inst[i].ring_dec;
1236 /* force RBC into idle state */
1237 rb_bufsz = order_base_2(ring->ring_size);
1238 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1239 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1240 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1241 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1242 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1243 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
1245 fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
1246 fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1248 /* programm the RB_BASE for ring buffer */
1249 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1250 lower_32_bits(ring->gpu_addr));
1251 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1252 upper_32_bits(ring->gpu_addr));
1254 /* Initialize the ring buffer's read and write pointers */
1255 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
1257 WREG32_SOC15(VCN, i, mmUVD_SCRATCH2, 0);
1258 ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
1259 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
1260 lower_32_bits(ring->wptr));
1261 fw_shared->rb.wptr = lower_32_bits(ring->wptr);
1262 fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1264 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1265 ring = &adev->vcn.inst[i].ring_enc[0];
1266 WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1267 WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1268 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
1269 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1270 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
1271 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1273 fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1274 ring = &adev->vcn.inst[i].ring_enc[1];
1275 WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1276 WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1277 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1278 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1279 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
1280 fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1286 static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
1289 struct amdgpu_ring *ring;
1290 uint64_t cache_addr;
1293 uint32_t param, resp, expected;
1294 uint32_t offset, cache_size;
1295 uint32_t tmp, timeout;
1298 struct amdgpu_mm_table *table = &adev->virt.mm_table;
1299 uint32_t *table_loc;
1300 uint32_t table_size;
1301 uint32_t size, size_dw;
1305 struct mmsch_v3_0_cmd_direct_write
1306 direct_wt = { {0} };
1307 struct mmsch_v3_0_cmd_direct_read_modify_write
1308 direct_rd_mod_wt = { {0} };
1309 struct mmsch_v3_0_cmd_end end = { {0} };
1310 struct mmsch_v3_0_init_header header;
1312 direct_wt.cmd_header.command_type =
1313 MMSCH_COMMAND__DIRECT_REG_WRITE;
1314 direct_rd_mod_wt.cmd_header.command_type =
1315 MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1316 end.cmd_header.command_type =
1319 header.version = MMSCH_VERSION;
1320 header.total_size = sizeof(struct mmsch_v3_0_init_header) >> 2;
1321 for (i = 0; i < AMDGPU_MAX_VCN_INSTANCES; i++) {
1322 header.inst[i].init_status = 0;
1323 header.inst[i].table_offset = 0;
1324 header.inst[i].table_size = 0;
1327 table_loc = (uint32_t *)table->cpu_addr;
1328 table_loc += header.total_size;
1329 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1330 if (adev->vcn.harvest_config & (1 << i))
1335 MMSCH_V3_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, i,
1337 ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
1339 cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
1341 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1342 id = amdgpu_ucode_id_vcns[i];
1343 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1344 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1345 adev->firmware.ucode[id].tmr_mc_addr_lo);
1346 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1347 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1348 adev->firmware.ucode[id].tmr_mc_addr_hi);
1350 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1351 mmUVD_VCPU_CACHE_OFFSET0),
1354 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1355 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1356 lower_32_bits(adev->vcn.inst[i].gpu_addr));
1357 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1358 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1359 upper_32_bits(adev->vcn.inst[i].gpu_addr));
1360 offset = cache_size;
1361 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1362 mmUVD_VCPU_CACHE_OFFSET0),
1363 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1366 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1367 mmUVD_VCPU_CACHE_SIZE0),
1370 cache_addr = adev->vcn.inst[i].gpu_addr + offset;
1371 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1372 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1373 lower_32_bits(cache_addr));
1374 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1375 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1376 upper_32_bits(cache_addr));
1377 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1378 mmUVD_VCPU_CACHE_OFFSET1),
1380 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1381 mmUVD_VCPU_CACHE_SIZE1),
1382 AMDGPU_VCN_STACK_SIZE);
1384 cache_addr = adev->vcn.inst[i].gpu_addr + offset +
1385 AMDGPU_VCN_STACK_SIZE;
1386 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1387 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1388 lower_32_bits(cache_addr));
1389 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1390 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1391 upper_32_bits(cache_addr));
1392 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1393 mmUVD_VCPU_CACHE_OFFSET2),
1395 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1396 mmUVD_VCPU_CACHE_SIZE2),
1397 AMDGPU_VCN_CONTEXT_SIZE);
1399 for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
1400 ring = &adev->vcn.inst[i].ring_enc[j];
1402 rb_addr = ring->gpu_addr;
1403 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1405 lower_32_bits(rb_addr));
1406 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1408 upper_32_bits(rb_addr));
1409 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1411 ring->ring_size / 4);
1414 ring = &adev->vcn.inst[i].ring_dec;
1416 rb_addr = ring->gpu_addr;
1417 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1418 mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
1419 lower_32_bits(rb_addr));
1420 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1421 mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
1422 upper_32_bits(rb_addr));
1423 /* force RBC into idle state */
1424 tmp = order_base_2(ring->ring_size);
1425 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, tmp);
1426 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1427 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1428 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1429 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1430 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1434 /* add end packet */
1435 MMSCH_V3_0_INSERT_END();
1438 header.inst[i].init_status = 0;
1439 header.inst[i].table_offset = header.total_size;
1440 header.inst[i].table_size = table_size;
1441 header.total_size += table_size;
1444 /* Update init table header in memory */
1445 size = sizeof(struct mmsch_v3_0_init_header);
1446 table_loc = (uint32_t *)table->cpu_addr;
1447 memcpy((void *)table_loc, &header, size);
1449 /* message MMSCH (in VCN[0]) to initialize this client
1450 * 1, write to mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
1451 * of memory descriptor location
1453 ctx_addr = table->gpu_addr;
1454 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
1455 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
1457 /* 2, update vmid of descriptor */
1458 tmp = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID);
1459 tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1460 /* use domain0 for MM scheduler */
1461 tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1462 WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, tmp);
1464 /* 3, notify mmsch about the size of this descriptor */
1465 size = header.total_size;
1466 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size);
1468 /* 4, set resp to zero */
1469 WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
1471 /* 5, kick off the initialization and wait until
1472 * MMSCH_VF_MAILBOX_RESP becomes non-zero
1475 WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, param);
1479 expected = param + 1;
1480 while (resp != expected) {
1481 resp = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1482 if (resp == expected)
1487 if (tmp >= timeout) {
1488 DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
1489 " waiting for mmMMSCH_VF_MAILBOX_RESP "\
1490 "(expected=0x%08x, readback=0x%08x)\n",
1491 tmp, expected, resp);
1496 /* 6, check each VCN's init_status
1497 * if it remains as 0, then this VCN is not assigned to current VF
1498 * do not start ring for this VCN
1500 size = sizeof(struct mmsch_v3_0_init_header);
1501 table_loc = (uint32_t *)table->cpu_addr;
1502 memcpy(&header, (void *)table_loc, size);
1504 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1505 if (adev->vcn.harvest_config & (1 << i))
1508 is_vcn_ready = (header.inst[i].init_status == 1);
1510 DRM_INFO("VCN(%d) engine is disabled by hypervisor\n", i);
1512 ring = &adev->vcn.inst[i].ring_dec;
1513 ring->sched.ready = is_vcn_ready;
1514 for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
1515 ring = &adev->vcn.inst[i].ring_enc[j];
1516 ring->sched.ready = is_vcn_ready;
1523 static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
1527 /* Wait for power status to be 1 */
1528 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1529 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1531 /* wait for read ptr to be equal to write ptr */
1532 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
1533 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1535 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
1536 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1538 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1539 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1541 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1542 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1544 /* disable dynamic power gating mode */
1545 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
1546 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1551 static int vcn_v3_0_stop(struct amdgpu_device *adev)
1556 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1557 if (adev->vcn.harvest_config & (1 << i))
1560 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1561 r = vcn_v3_0_stop_dpg_mode(adev, i);
1565 /* wait for vcn idle */
1566 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1570 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1571 UVD_LMI_STATUS__READ_CLEAN_MASK |
1572 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1573 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1574 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1578 /* disable LMI UMC channel */
1579 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
1580 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1581 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
1582 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1583 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1584 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1588 /* block VCPU register access */
1589 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
1590 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1591 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1594 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1595 UVD_VCPU_CNTL__BLK_RST_MASK,
1596 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1598 /* disable VCPU clock */
1599 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1600 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1602 /* apply soft reset */
1603 tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
1604 tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1605 WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
1606 tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
1607 tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1608 WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
1611 WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
1613 /* apply HW clock gating */
1614 vcn_v3_0_enable_clock_gating(adev, i);
1616 /* enable VCN power gating */
1617 vcn_v3_0_enable_static_power_gating(adev, i);
1620 if (adev->pm.dpm_enabled)
1621 amdgpu_dpm_enable_uvd(adev, false);
1626 static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
1627 int inst_idx, struct dpg_pause_state *new_state)
1629 volatile struct amdgpu_fw_shared *fw_shared;
1630 struct amdgpu_ring *ring;
1631 uint32_t reg_data = 0;
1634 /* pause/unpause if state is changed */
1635 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1636 DRM_DEBUG("dpg pause state changed %d -> %d",
1637 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
1638 reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) &
1639 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1641 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1642 ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1643 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1647 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1648 WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1651 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
1652 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1653 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1655 /* Stall DPG before WPTR/RPTR reset */
1656 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1657 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1658 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1661 fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
1662 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1663 ring = &adev->vcn.inst[inst_idx].ring_enc[0];
1665 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
1666 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1667 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
1668 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1669 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1670 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1672 fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1673 ring = &adev->vcn.inst[inst_idx].ring_enc[1];
1675 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1676 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1677 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
1678 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1679 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1680 fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1682 /* restore wptr/rptr with pointers saved in FW shared memory*/
1683 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, fw_shared->rb.rptr);
1684 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, fw_shared->rb.wptr);
1687 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1688 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1690 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
1691 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1694 /* unpause dpg, no need to wait */
1695 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1696 WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1698 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1705 * vcn_v3_0_dec_ring_get_rptr - get read pointer
1707 * @ring: amdgpu_ring pointer
1709 * Returns the current hardware read pointer
1711 static uint64_t vcn_v3_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
1713 struct amdgpu_device *adev = ring->adev;
1715 return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR);
1719 * vcn_v3_0_dec_ring_get_wptr - get write pointer
1721 * @ring: amdgpu_ring pointer
1723 * Returns the current hardware write pointer
1725 static uint64_t vcn_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
1727 struct amdgpu_device *adev = ring->adev;
1729 if (ring->use_doorbell)
1730 return adev->wb.wb[ring->wptr_offs];
1732 return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR);
1736 * vcn_v3_0_dec_ring_set_wptr - set write pointer
1738 * @ring: amdgpu_ring pointer
1740 * Commits the write pointer to the hardware
1742 static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
1744 struct amdgpu_device *adev = ring->adev;
1745 volatile struct amdgpu_fw_shared *fw_shared;
1747 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1748 /*whenever update RBC_RB_WPTR, we save the wptr in shared rb.wptr and scratch2 */
1749 fw_shared = adev->vcn.inst[ring->me].fw_shared_cpu_addr;
1750 fw_shared->rb.wptr = lower_32_bits(ring->wptr);
1751 WREG32_SOC15(VCN, ring->me, mmUVD_SCRATCH2,
1752 lower_32_bits(ring->wptr));
1755 if (ring->use_doorbell) {
1756 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1757 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1759 WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1763 static void vcn_v3_0_dec_sw_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1764 u64 seq, uint32_t flags)
1766 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1768 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_FENCE);
1769 amdgpu_ring_write(ring, addr);
1770 amdgpu_ring_write(ring, upper_32_bits(addr));
1771 amdgpu_ring_write(ring, seq);
1772 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_TRAP);
1775 static void vcn_v3_0_dec_sw_ring_insert_end(struct amdgpu_ring *ring)
1777 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END);
1780 static void vcn_v3_0_dec_sw_ring_emit_ib(struct amdgpu_ring *ring,
1781 struct amdgpu_job *job,
1782 struct amdgpu_ib *ib,
1785 uint32_t vmid = AMDGPU_JOB_GET_VMID(job);
1787 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_IB);
1788 amdgpu_ring_write(ring, vmid);
1789 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1790 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1791 amdgpu_ring_write(ring, ib->length_dw);
1794 static void vcn_v3_0_dec_sw_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1795 uint32_t val, uint32_t mask)
1797 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_REG_WAIT);
1798 amdgpu_ring_write(ring, reg << 2);
1799 amdgpu_ring_write(ring, mask);
1800 amdgpu_ring_write(ring, val);
1803 static void vcn_v3_0_dec_sw_ring_emit_vm_flush(struct amdgpu_ring *ring,
1804 uint32_t vmid, uint64_t pd_addr)
1806 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1807 uint32_t data0, data1, mask;
1809 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1811 /* wait for register write */
1812 data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1813 data1 = lower_32_bits(pd_addr);
1815 vcn_v3_0_dec_sw_ring_emit_reg_wait(ring, data0, data1, mask);
1818 static void vcn_v3_0_dec_sw_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
1820 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_REG_WRITE);
1821 amdgpu_ring_write(ring, reg << 2);
1822 amdgpu_ring_write(ring, val);
1825 static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = {
1826 .type = AMDGPU_RING_TYPE_VCN_DEC,
1828 .nop = VCN_DEC_SW_CMD_NO_OP,
1829 .vmhub = AMDGPU_MMHUB_0,
1830 .get_rptr = vcn_v3_0_dec_ring_get_rptr,
1831 .get_wptr = vcn_v3_0_dec_ring_get_wptr,
1832 .set_wptr = vcn_v3_0_dec_ring_set_wptr,
1834 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1835 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1836 4 + /* vcn_v3_0_dec_sw_ring_emit_vm_flush */
1837 5 + 5 + /* vcn_v3_0_dec_sw_ring_emit_fdec_swe x2 vm fdec_swe */
1838 1, /* vcn_v3_0_dec_sw_ring_insert_end */
1839 .emit_ib_size = 5, /* vcn_v3_0_dec_sw_ring_emit_ib */
1840 .emit_ib = vcn_v3_0_dec_sw_ring_emit_ib,
1841 .emit_fence = vcn_v3_0_dec_sw_ring_emit_fence,
1842 .emit_vm_flush = vcn_v3_0_dec_sw_ring_emit_vm_flush,
1843 .test_ring = amdgpu_vcn_dec_sw_ring_test_ring,
1844 .test_ib = NULL,//amdgpu_vcn_dec_sw_ring_test_ib,
1845 .insert_nop = amdgpu_ring_insert_nop,
1846 .insert_end = vcn_v3_0_dec_sw_ring_insert_end,
1847 .pad_ib = amdgpu_ring_generic_pad_ib,
1848 .begin_use = amdgpu_vcn_ring_begin_use,
1849 .end_use = amdgpu_vcn_ring_end_use,
1850 .emit_wreg = vcn_v3_0_dec_sw_ring_emit_wreg,
1851 .emit_reg_wait = vcn_v3_0_dec_sw_ring_emit_reg_wait,
1852 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1855 static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p)
1857 struct drm_gpu_scheduler **scheds;
1859 /* The create msg must be in the first IB submitted */
1860 if (atomic_read(&p->entity->fence_seq))
1863 scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
1864 [AMDGPU_RING_PRIO_DEFAULT].sched;
1865 drm_sched_entity_modify_sched(p->entity, scheds, 1);
1869 static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr)
1871 struct ttm_operation_ctx ctx = { false, false };
1872 struct amdgpu_bo_va_mapping *map;
1873 uint32_t *msg, num_buffers;
1874 struct amdgpu_bo *bo;
1875 uint64_t start, end;
1880 addr &= AMDGPU_GMC_HOLE_MASK;
1881 r = amdgpu_cs_find_mapping(p, addr, &bo, &map);
1883 DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
1887 start = map->start * AMDGPU_GPU_PAGE_SIZE;
1888 end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE;
1890 DRM_ERROR("VCN messages must be 8 byte aligned!\n");
1894 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1895 amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
1896 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1898 DRM_ERROR("Failed validating the VCN message BO (%d)!\n", r);
1902 r = amdgpu_bo_kmap(bo, &ptr);
1904 DRM_ERROR("Failed mapping the VCN message (%d)!\n", r);
1908 msg = ptr + addr - start;
1911 if (msg[1] > end - addr) {
1916 if (msg[3] != RDECODE_MSG_CREATE)
1919 num_buffers = msg[2];
1920 for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) {
1921 uint32_t offset, size, *create;
1923 if (msg[0] != RDECODE_MESSAGE_CREATE)
1929 if (offset + size > end) {
1934 create = ptr + addr + offset - start;
1936 /* H246, HEVC and VP9 can run on any instance */
1937 if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
1940 r = vcn_v3_0_limit_sched(p);
1946 amdgpu_bo_kunmap(bo);
1950 static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1953 struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
1954 struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
1955 uint32_t msg_lo = 0, msg_hi = 0;
1959 /* The first instance can decode anything */
1963 for (i = 0; i < ib->length_dw; i += 2) {
1964 uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
1965 uint32_t val = amdgpu_get_ib_value(p, ib_idx, i + 1);
1967 if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) {
1969 } else if (reg == PACKET0(p->adev->vcn.internal.data1, 0)) {
1971 } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0) &&
1973 r = vcn_v3_0_dec_msg(p, ((u64)msg_hi) << 32 | msg_lo);
1981 static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = {
1982 .type = AMDGPU_RING_TYPE_VCN_DEC,
1984 .vmhub = AMDGPU_MMHUB_0,
1985 .get_rptr = vcn_v3_0_dec_ring_get_rptr,
1986 .get_wptr = vcn_v3_0_dec_ring_get_wptr,
1987 .set_wptr = vcn_v3_0_dec_ring_set_wptr,
1988 .patch_cs_in_place = vcn_v3_0_ring_patch_cs_in_place,
1990 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1991 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1992 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
1993 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
1995 .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
1996 .emit_ib = vcn_v2_0_dec_ring_emit_ib,
1997 .emit_fence = vcn_v2_0_dec_ring_emit_fence,
1998 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
1999 .test_ring = vcn_v2_0_dec_ring_test_ring,
2000 .test_ib = amdgpu_vcn_dec_ring_test_ib,
2001 .insert_nop = vcn_v2_0_dec_ring_insert_nop,
2002 .insert_start = vcn_v2_0_dec_ring_insert_start,
2003 .insert_end = vcn_v2_0_dec_ring_insert_end,
2004 .pad_ib = amdgpu_ring_generic_pad_ib,
2005 .begin_use = amdgpu_vcn_ring_begin_use,
2006 .end_use = amdgpu_vcn_ring_end_use,
2007 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
2008 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
2009 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2013 * vcn_v3_0_enc_ring_get_rptr - get enc read pointer
2015 * @ring: amdgpu_ring pointer
2017 * Returns the current hardware enc read pointer
2019 static uint64_t vcn_v3_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
2021 struct amdgpu_device *adev = ring->adev;
2023 if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
2024 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR);
2026 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2);
2030 * vcn_v3_0_enc_ring_get_wptr - get enc write pointer
2032 * @ring: amdgpu_ring pointer
2034 * Returns the current hardware enc write pointer
2036 static uint64_t vcn_v3_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
2038 struct amdgpu_device *adev = ring->adev;
2040 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
2041 if (ring->use_doorbell)
2042 return adev->wb.wb[ring->wptr_offs];
2044 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR);
2046 if (ring->use_doorbell)
2047 return adev->wb.wb[ring->wptr_offs];
2049 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2);
2054 * vcn_v3_0_enc_ring_set_wptr - set enc write pointer
2056 * @ring: amdgpu_ring pointer
2058 * Commits the enc write pointer to the hardware
2060 static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
2062 struct amdgpu_device *adev = ring->adev;
2064 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
2065 if (ring->use_doorbell) {
2066 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
2067 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
2069 WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
2072 if (ring->use_doorbell) {
2073 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
2074 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
2076 WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
2081 static const struct amdgpu_ring_funcs vcn_v3_0_enc_ring_vm_funcs = {
2082 .type = AMDGPU_RING_TYPE_VCN_ENC,
2084 .nop = VCN_ENC_CMD_NO_OP,
2085 .vmhub = AMDGPU_MMHUB_0,
2086 .get_rptr = vcn_v3_0_enc_ring_get_rptr,
2087 .get_wptr = vcn_v3_0_enc_ring_get_wptr,
2088 .set_wptr = vcn_v3_0_enc_ring_set_wptr,
2090 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2091 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
2092 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
2093 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
2094 1, /* vcn_v2_0_enc_ring_insert_end */
2095 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
2096 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
2097 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
2098 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
2099 .test_ring = amdgpu_vcn_enc_ring_test_ring,
2100 .test_ib = amdgpu_vcn_enc_ring_test_ib,
2101 .insert_nop = amdgpu_ring_insert_nop,
2102 .insert_end = vcn_v2_0_enc_ring_insert_end,
2103 .pad_ib = amdgpu_ring_generic_pad_ib,
2104 .begin_use = amdgpu_vcn_ring_begin_use,
2105 .end_use = amdgpu_vcn_ring_end_use,
2106 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
2107 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
2108 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2111 static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev)
2115 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2116 if (adev->vcn.harvest_config & (1 << i))
2119 if (!DEC_SW_RING_ENABLED)
2120 adev->vcn.inst[i].ring_dec.funcs = &vcn_v3_0_dec_ring_vm_funcs;
2122 adev->vcn.inst[i].ring_dec.funcs = &vcn_v3_0_dec_sw_ring_vm_funcs;
2123 adev->vcn.inst[i].ring_dec.me = i;
2124 DRM_INFO("VCN(%d) decode%s is enabled in VM mode\n", i,
2125 DEC_SW_RING_ENABLED?"(Software Ring)":"");
2129 static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev)
2133 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2134 if (adev->vcn.harvest_config & (1 << i))
2137 for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
2138 adev->vcn.inst[i].ring_enc[j].funcs = &vcn_v3_0_enc_ring_vm_funcs;
2139 adev->vcn.inst[i].ring_enc[j].me = i;
2141 DRM_INFO("VCN(%d) encode is enabled in VM mode\n", i);
2145 static bool vcn_v3_0_is_idle(void *handle)
2147 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2150 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2151 if (adev->vcn.harvest_config & (1 << i))
2154 ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
2160 static int vcn_v3_0_wait_for_idle(void *handle)
2162 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2165 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2166 if (adev->vcn.harvest_config & (1 << i))
2169 ret = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
2178 static int vcn_v3_0_set_clockgating_state(void *handle,
2179 enum amd_clockgating_state state)
2181 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2182 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
2185 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2186 if (adev->vcn.harvest_config & (1 << i))
2190 if (RREG32_SOC15(VCN, i, mmUVD_STATUS) != UVD_STATUS__IDLE)
2192 vcn_v3_0_enable_clock_gating(adev, i);
2194 vcn_v3_0_disable_clock_gating(adev, i);
2201 static int vcn_v3_0_set_powergating_state(void *handle,
2202 enum amd_powergating_state state)
2204 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2207 /* for SRIOV, guest should not control VCN Power-gating
2208 * MMSCH FW should control Power-gating and clock-gating
2209 * guest should avoid touching CGC and PG
2211 if (amdgpu_sriov_vf(adev)) {
2212 adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
2216 if(state == adev->vcn.cur_state)
2219 if (state == AMD_PG_STATE_GATE)
2220 ret = vcn_v3_0_stop(adev);
2222 ret = vcn_v3_0_start(adev);
2225 adev->vcn.cur_state = state;
2230 static int vcn_v3_0_set_interrupt_state(struct amdgpu_device *adev,
2231 struct amdgpu_irq_src *source,
2233 enum amdgpu_interrupt_state state)
2238 static int vcn_v3_0_process_interrupt(struct amdgpu_device *adev,
2239 struct amdgpu_irq_src *source,
2240 struct amdgpu_iv_entry *entry)
2242 uint32_t ip_instance;
2244 switch (entry->client_id) {
2245 case SOC15_IH_CLIENTID_VCN:
2248 case SOC15_IH_CLIENTID_VCN1:
2252 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
2256 DRM_DEBUG("IH: VCN TRAP\n");
2258 switch (entry->src_id) {
2259 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
2260 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec);
2262 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
2263 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
2265 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
2266 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
2269 DRM_ERROR("Unhandled interrupt: %d %d\n",
2270 entry->src_id, entry->src_data[0]);
2277 static const struct amdgpu_irq_src_funcs vcn_v3_0_irq_funcs = {
2278 .set = vcn_v3_0_set_interrupt_state,
2279 .process = vcn_v3_0_process_interrupt,
2282 static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev)
2286 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2287 if (adev->vcn.harvest_config & (1 << i))
2290 adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
2291 adev->vcn.inst[i].irq.funcs = &vcn_v3_0_irq_funcs;
2295 static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
2297 .early_init = vcn_v3_0_early_init,
2299 .sw_init = vcn_v3_0_sw_init,
2300 .sw_fini = vcn_v3_0_sw_fini,
2301 .hw_init = vcn_v3_0_hw_init,
2302 .hw_fini = vcn_v3_0_hw_fini,
2303 .suspend = vcn_v3_0_suspend,
2304 .resume = vcn_v3_0_resume,
2305 .is_idle = vcn_v3_0_is_idle,
2306 .wait_for_idle = vcn_v3_0_wait_for_idle,
2307 .check_soft_reset = NULL,
2308 .pre_soft_reset = NULL,
2310 .post_soft_reset = NULL,
2311 .set_clockgating_state = vcn_v3_0_set_clockgating_state,
2312 .set_powergating_state = vcn_v3_0_set_powergating_state,
2315 const struct amdgpu_ip_block_version vcn_v3_0_ip_block =
2317 .type = AMD_IP_BLOCK_TYPE_VCN,
2321 .funcs = &vcn_v3_0_ip_funcs,