2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
26 #include "amdgpu_vcn.h"
27 #include "amdgpu_pm.h"
31 #include "mmsch_v3_0.h"
33 #include "vcn/vcn_3_0_0_offset.h"
34 #include "vcn/vcn_3_0_0_sh_mask.h"
35 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
37 #include <drm/drm_drv.h>
39 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
40 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
41 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x10
42 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x11
43 #define mmUVD_NO_OP_INTERNAL_OFFSET 0x29
44 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x66
45 #define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d
47 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x431
48 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x3b4
49 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5
50 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c
52 #define VCN_INSTANCES_SIENNA_CICHLID 2
53 #define DEC_SW_RING_ENABLED FALSE
55 #define RDECODE_MSG_CREATE 0x00000000
56 #define RDECODE_MESSAGE_CREATE 0x00000001
58 static int amdgpu_ih_clientid_vcns[] = {
59 SOC15_IH_CLIENTID_VCN,
60 SOC15_IH_CLIENTID_VCN1
63 static int amdgpu_ucode_id_vcns[] = {
68 static int vcn_v3_0_start_sriov(struct amdgpu_device *adev);
69 static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev);
70 static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev);
71 static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev);
72 static int vcn_v3_0_set_powergating_state(void *handle,
73 enum amd_powergating_state state);
74 static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
75 int inst_idx, struct dpg_pause_state *new_state);
77 static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring);
78 static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring);
81 * vcn_v3_0_early_init - set function pointers
83 * @handle: amdgpu_device pointer
85 * Set ring and irq function pointers
87 static int vcn_v3_0_early_init(void *handle)
89 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
91 if (amdgpu_sriov_vf(adev)) {
92 adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID;
93 adev->vcn.harvest_config = 0;
94 adev->vcn.num_enc_rings = 1;
96 if (adev->asic_type == CHIP_BEIGE_GOBY) {
97 adev->vcn.num_vcn_inst = 1;
98 adev->vcn.num_enc_rings = 0;
102 if (adev->asic_type == CHIP_SIENNA_CICHLID) {
106 adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID;
107 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
108 harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
109 if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
110 adev->vcn.harvest_config |= 1 << i;
113 if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
114 AMDGPU_VCN_HARVEST_VCN1))
115 /* both instances are harvested, disable the block */
118 adev->vcn.num_vcn_inst = 1;
120 if (adev->asic_type == CHIP_BEIGE_GOBY)
121 adev->vcn.num_enc_rings = 0;
123 adev->vcn.num_enc_rings = 2;
126 vcn_v3_0_set_dec_ring_funcs(adev);
127 vcn_v3_0_set_enc_ring_funcs(adev);
128 vcn_v3_0_set_irq_funcs(adev);
134 * vcn_v3_0_sw_init - sw init for VCN block
136 * @handle: amdgpu_device pointer
138 * Load firmware and sw initialization
140 static int vcn_v3_0_sw_init(void *handle)
142 struct amdgpu_ring *ring;
144 int vcn_doorbell_index = 0;
145 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
147 r = amdgpu_vcn_sw_init(adev);
151 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
152 const struct common_firmware_header *hdr;
153 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
154 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
155 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
156 adev->firmware.fw_size +=
157 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
159 if (adev->vcn.num_vcn_inst == VCN_INSTANCES_SIENNA_CICHLID) {
160 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id = AMDGPU_UCODE_ID_VCN1;
161 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev->vcn.fw;
162 adev->firmware.fw_size +=
163 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
165 DRM_INFO("PSP loading VCN firmware\n");
168 r = amdgpu_vcn_resume(adev);
173 * Note: doorbell assignment is fixed for SRIOV multiple VCN engines
175 * vcn_db_base = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
176 * dec_ring_i = vcn_db_base + i * (adev->vcn.num_enc_rings + 1)
177 * enc_ring_i,j = vcn_db_base + i * (adev->vcn.num_enc_rings + 1) + 1 + j
179 if (amdgpu_sriov_vf(adev)) {
180 vcn_doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1;
181 /* get DWORD offset */
182 vcn_doorbell_index = vcn_doorbell_index << 1;
185 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
186 volatile struct amdgpu_fw_shared *fw_shared;
188 if (adev->vcn.harvest_config & (1 << i))
191 adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
192 adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
193 adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
194 adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
195 adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
196 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
198 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
199 adev->vcn.inst[i].external.scratch9 = SOC15_REG_OFFSET(VCN, i, mmUVD_SCRATCH9);
200 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
201 adev->vcn.inst[i].external.data0 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA0);
202 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
203 adev->vcn.inst[i].external.data1 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA1);
204 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
205 adev->vcn.inst[i].external.cmd = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_CMD);
206 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
207 adev->vcn.inst[i].external.nop = SOC15_REG_OFFSET(VCN, i, mmUVD_NO_OP);
210 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
211 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[i].irq);
215 atomic_set(&adev->vcn.inst[i].sched_score, 0);
217 ring = &adev->vcn.inst[i].ring_dec;
218 ring->use_doorbell = true;
219 if (amdgpu_sriov_vf(adev)) {
220 ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.num_enc_rings + 1);
222 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i;
224 sprintf(ring->name, "vcn_dec_%d", i);
225 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
226 AMDGPU_RING_PRIO_DEFAULT,
227 &adev->vcn.inst[i].sched_score);
231 for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
233 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
234 j + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq);
238 ring = &adev->vcn.inst[i].ring_enc[j];
239 ring->use_doorbell = true;
240 if (amdgpu_sriov_vf(adev)) {
241 ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.num_enc_rings + 1) + 1 + j;
243 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i;
245 sprintf(ring->name, "vcn_enc_%d.%d", i, j);
246 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
247 AMDGPU_RING_PRIO_DEFAULT,
248 &adev->vcn.inst[i].sched_score);
253 fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
254 fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SW_RING_FLAG) |
255 cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG) |
256 cpu_to_le32(AMDGPU_VCN_FW_SHARED_FLAG_0_RB);
257 fw_shared->sw_ring.is_enabled = cpu_to_le32(DEC_SW_RING_ENABLED);
260 if (amdgpu_sriov_vf(adev)) {
261 r = amdgpu_virt_alloc_mm_table(adev);
265 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
266 adev->vcn.pause_dpg_mode = vcn_v3_0_pause_dpg_mode;
272 * vcn_v3_0_sw_fini - sw fini for VCN block
274 * @handle: amdgpu_device pointer
276 * VCN suspend and free up sw allocation
278 static int vcn_v3_0_sw_fini(void *handle)
280 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
283 if (drm_dev_enter(&adev->ddev, &idx)) {
284 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
285 volatile struct amdgpu_fw_shared *fw_shared;
287 if (adev->vcn.harvest_config & (1 << i))
289 fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
290 fw_shared->present_flag_0 = 0;
291 fw_shared->sw_ring.is_enabled = false;
297 if (amdgpu_sriov_vf(adev))
298 amdgpu_virt_free_mm_table(adev);
300 r = amdgpu_vcn_suspend(adev);
304 r = amdgpu_vcn_sw_fini(adev);
310 * vcn_v3_0_hw_init - start and test VCN block
312 * @handle: amdgpu_device pointer
314 * Initialize the hardware, boot up the VCPU and do some testing
316 static int vcn_v3_0_hw_init(void *handle)
318 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
319 struct amdgpu_ring *ring;
322 if (amdgpu_sriov_vf(adev)) {
323 r = vcn_v3_0_start_sriov(adev);
327 /* initialize VCN dec and enc ring buffers */
328 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
329 if (adev->vcn.harvest_config & (1 << i))
332 ring = &adev->vcn.inst[i].ring_dec;
333 if (ring->sched.ready) {
336 vcn_v3_0_dec_ring_set_wptr(ring);
339 for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
340 ring = &adev->vcn.inst[i].ring_enc[j];
341 if (ring->sched.ready) {
344 vcn_v3_0_enc_ring_set_wptr(ring);
349 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
350 if (adev->vcn.harvest_config & (1 << i))
353 ring = &adev->vcn.inst[i].ring_dec;
355 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
356 ring->doorbell_index, i);
358 r = amdgpu_ring_test_helper(ring);
362 for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
363 ring = &adev->vcn.inst[i].ring_enc[j];
364 r = amdgpu_ring_test_helper(ring);
373 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
374 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
380 * vcn_v3_0_hw_fini - stop the hardware block
382 * @handle: amdgpu_device pointer
384 * Stop the VCN block, mark ring as not ready any more
386 static int vcn_v3_0_hw_fini(void *handle)
388 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
389 struct amdgpu_ring *ring;
392 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
393 if (adev->vcn.harvest_config & (1 << i))
396 ring = &adev->vcn.inst[i].ring_dec;
398 if (!amdgpu_sriov_vf(adev)) {
399 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
400 (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
401 RREG32_SOC15(VCN, i, mmUVD_STATUS))) {
402 vcn_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
411 * vcn_v3_0_suspend - suspend VCN block
413 * @handle: amdgpu_device pointer
415 * HW fini and suspend VCN block
417 static int vcn_v3_0_suspend(void *handle)
420 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
422 r = vcn_v3_0_hw_fini(adev);
426 r = amdgpu_vcn_suspend(adev);
432 * vcn_v3_0_resume - resume VCN block
434 * @handle: amdgpu_device pointer
436 * Resume firmware and hw init VCN block
438 static int vcn_v3_0_resume(void *handle)
441 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
443 r = amdgpu_vcn_resume(adev);
447 r = vcn_v3_0_hw_init(adev);
453 * vcn_v3_0_mc_resume - memory controller programming
455 * @adev: amdgpu_device pointer
456 * @inst: instance number
458 * Let the VCN memory controller know it's offsets
460 static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst)
462 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
465 /* cache window 0: fw */
466 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
467 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
468 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo));
469 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
470 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi));
471 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET0, 0);
474 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
475 lower_32_bits(adev->vcn.inst[inst].gpu_addr));
476 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
477 upper_32_bits(adev->vcn.inst[inst].gpu_addr));
479 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET0,
480 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
482 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE0, size);
484 /* cache window 1: stack */
485 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
486 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
487 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
488 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
489 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET1, 0);
490 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
492 /* cache window 2: context */
493 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
494 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
495 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
496 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
497 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET2, 0);
498 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
500 /* non-cache window */
501 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
502 lower_32_bits(adev->vcn.inst[inst].fw_shared_gpu_addr));
503 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
504 upper_32_bits(adev->vcn.inst[inst].fw_shared_gpu_addr));
505 WREG32_SOC15(VCN, inst, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
506 WREG32_SOC15(VCN, inst, mmUVD_VCPU_NONCACHE_SIZE0,
507 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
510 static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
512 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
515 /* cache window 0: fw */
516 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
518 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
519 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
520 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
521 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
522 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
523 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
524 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
525 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
527 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
528 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
529 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
530 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
531 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
532 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
536 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
537 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
538 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
539 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
540 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
541 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
543 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
544 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0),
545 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
549 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
550 VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
552 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
553 VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
555 /* cache window 1: stack */
557 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
558 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
559 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
560 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
561 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
562 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
563 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
564 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
566 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
567 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
568 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
569 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
570 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
571 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
573 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
574 VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
576 /* cache window 2: context */
577 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
578 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
579 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
580 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
581 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
582 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
583 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
584 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
585 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
586 VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
588 /* non-cache window */
589 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
590 VCN, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
591 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect);
592 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
593 VCN, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
594 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect);
595 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
596 VCN, inst_idx, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
597 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
598 VCN, inst_idx, mmUVD_VCPU_NONCACHE_SIZE0),
599 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
601 /* VCN global tiling registers */
602 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
603 UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
606 static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
610 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
611 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
612 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
613 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
614 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
615 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
616 | 2 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT
617 | 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
618 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
619 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
620 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
621 | 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
622 | 2 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT
623 | 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
624 | 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
626 WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data);
627 SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS,
628 UVD_PGFSM_STATUS__UVDM_UVDU_UVDLM_PWR_ON_3_0, 0x3F3FFFFF);
630 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
631 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
632 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
633 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
634 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
635 | 1 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT
636 | 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
637 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
638 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
639 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
640 | 1 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
641 | 1 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT
642 | 1 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
643 | 1 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
644 WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data);
645 SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS, 0, 0x3F3FFFFF);
648 data = RREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS);
650 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
651 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
652 UVD_POWER_STATUS__UVD_PG_EN_MASK;
654 WREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS, data);
657 static void vcn_v3_0_enable_static_power_gating(struct amdgpu_device *adev, int inst)
661 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
662 /* Before power off, this indicator has to be turned on */
663 data = RREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS);
664 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
665 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
666 WREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS, data);
668 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
669 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
670 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
671 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
672 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
673 | 2 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT
674 | 2 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
675 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
676 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
677 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
678 | 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
679 | 2 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT
680 | 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
681 | 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
682 WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data);
684 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
685 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
686 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
687 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
688 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
689 | 2 << UVD_PGFSM_STATUS__UVDIRL_PWR_STATUS__SHIFT
690 | 2 << UVD_PGFSM_STATUS__UVDLM_PWR_STATUS__SHIFT
691 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
692 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
693 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
694 | 2 << UVD_PGFSM_STATUS__UVDAB_PWR_STATUS__SHIFT
695 | 2 << UVD_PGFSM_STATUS__UVDATD_PWR_STATUS__SHIFT
696 | 2 << UVD_PGFSM_STATUS__UVDNA_PWR_STATUS__SHIFT
697 | 2 << UVD_PGFSM_STATUS__UVDNB_PWR_STATUS__SHIFT);
698 SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS, data, 0x3F3FFFFF);
703 * vcn_v3_0_disable_clock_gating - disable VCN clock gating
705 * @adev: amdgpu_device pointer
706 * @inst: instance number
708 * Disable clock gating for VCN block
710 static void vcn_v3_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
714 /* VCN disable CGC */
715 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
716 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
717 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
719 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
720 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
721 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
722 WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
724 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_GATE);
725 data &= ~(UVD_CGC_GATE__SYS_MASK
726 | UVD_CGC_GATE__UDEC_MASK
727 | UVD_CGC_GATE__MPEG2_MASK
728 | UVD_CGC_GATE__REGS_MASK
729 | UVD_CGC_GATE__RBC_MASK
730 | UVD_CGC_GATE__LMI_MC_MASK
731 | UVD_CGC_GATE__LMI_UMC_MASK
732 | UVD_CGC_GATE__IDCT_MASK
733 | UVD_CGC_GATE__MPRD_MASK
734 | UVD_CGC_GATE__MPC_MASK
735 | UVD_CGC_GATE__LBSI_MASK
736 | UVD_CGC_GATE__LRBBM_MASK
737 | UVD_CGC_GATE__UDEC_RE_MASK
738 | UVD_CGC_GATE__UDEC_CM_MASK
739 | UVD_CGC_GATE__UDEC_IT_MASK
740 | UVD_CGC_GATE__UDEC_DB_MASK
741 | UVD_CGC_GATE__UDEC_MP_MASK
742 | UVD_CGC_GATE__WCB_MASK
743 | UVD_CGC_GATE__VCPU_MASK
744 | UVD_CGC_GATE__MMSCH_MASK);
746 WREG32_SOC15(VCN, inst, mmUVD_CGC_GATE, data);
748 SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_CGC_GATE, 0, 0xFFFFFFFF);
750 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
751 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
752 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
753 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
754 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
755 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
756 | UVD_CGC_CTRL__SYS_MODE_MASK
757 | UVD_CGC_CTRL__UDEC_MODE_MASK
758 | UVD_CGC_CTRL__MPEG2_MODE_MASK
759 | UVD_CGC_CTRL__REGS_MODE_MASK
760 | UVD_CGC_CTRL__RBC_MODE_MASK
761 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
762 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
763 | UVD_CGC_CTRL__IDCT_MODE_MASK
764 | UVD_CGC_CTRL__MPRD_MODE_MASK
765 | UVD_CGC_CTRL__MPC_MODE_MASK
766 | UVD_CGC_CTRL__LBSI_MODE_MASK
767 | UVD_CGC_CTRL__LRBBM_MODE_MASK
768 | UVD_CGC_CTRL__WCB_MODE_MASK
769 | UVD_CGC_CTRL__VCPU_MODE_MASK
770 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
771 WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
773 data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE);
774 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
775 | UVD_SUVD_CGC_GATE__SIT_MASK
776 | UVD_SUVD_CGC_GATE__SMP_MASK
777 | UVD_SUVD_CGC_GATE__SCM_MASK
778 | UVD_SUVD_CGC_GATE__SDB_MASK
779 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
780 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
781 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
782 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
783 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
784 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
785 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
786 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
787 | UVD_SUVD_CGC_GATE__SCLR_MASK
788 | UVD_SUVD_CGC_GATE__ENT_MASK
789 | UVD_SUVD_CGC_GATE__IME_MASK
790 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
791 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
792 | UVD_SUVD_CGC_GATE__SITE_MASK
793 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
794 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
795 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
796 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
797 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK
798 | UVD_SUVD_CGC_GATE__EFC_MASK
799 | UVD_SUVD_CGC_GATE__SAOE_MASK
800 | UVD_SUVD_CGC_GATE__SRE_AV1_MASK
801 | UVD_SUVD_CGC_GATE__FBC_PCLK_MASK
802 | UVD_SUVD_CGC_GATE__FBC_CCLK_MASK
803 | UVD_SUVD_CGC_GATE__SCM_AV1_MASK
804 | UVD_SUVD_CGC_GATE__SMPA_MASK);
805 WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE, data);
807 data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2);
808 data |= (UVD_SUVD_CGC_GATE2__MPBE0_MASK
809 | UVD_SUVD_CGC_GATE2__MPBE1_MASK
810 | UVD_SUVD_CGC_GATE2__SIT_AV1_MASK
811 | UVD_SUVD_CGC_GATE2__SDB_AV1_MASK
812 | UVD_SUVD_CGC_GATE2__MPC1_MASK);
813 WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2, data);
815 data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL);
816 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
817 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
818 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
819 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
820 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
821 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
822 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
823 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
824 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK
825 | UVD_SUVD_CGC_CTRL__EFC_MODE_MASK
826 | UVD_SUVD_CGC_CTRL__SAOE_MODE_MASK
827 | UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK
828 | UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK
829 | UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK
830 | UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK
831 | UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK
832 | UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK
833 | UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK
834 | UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK);
835 WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL, data);
838 static void vcn_v3_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
839 uint8_t sram_sel, int inst_idx, uint8_t indirect)
841 uint32_t reg_data = 0;
843 /* enable sw clock gating control */
844 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
845 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
847 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
848 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
849 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
850 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
851 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
852 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
853 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
854 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
855 UVD_CGC_CTRL__SYS_MODE_MASK |
856 UVD_CGC_CTRL__UDEC_MODE_MASK |
857 UVD_CGC_CTRL__MPEG2_MODE_MASK |
858 UVD_CGC_CTRL__REGS_MODE_MASK |
859 UVD_CGC_CTRL__RBC_MODE_MASK |
860 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
861 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
862 UVD_CGC_CTRL__IDCT_MODE_MASK |
863 UVD_CGC_CTRL__MPRD_MODE_MASK |
864 UVD_CGC_CTRL__MPC_MODE_MASK |
865 UVD_CGC_CTRL__LBSI_MODE_MASK |
866 UVD_CGC_CTRL__LRBBM_MODE_MASK |
867 UVD_CGC_CTRL__WCB_MODE_MASK |
868 UVD_CGC_CTRL__VCPU_MODE_MASK |
869 UVD_CGC_CTRL__MMSCH_MODE_MASK);
870 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
871 VCN, inst_idx, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
873 /* turn off clock gating */
874 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
875 VCN, inst_idx, mmUVD_CGC_GATE), 0, sram_sel, indirect);
877 /* turn on SUVD clock gating */
878 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
879 VCN, inst_idx, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
881 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
882 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
883 VCN, inst_idx, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
887 * vcn_v3_0_enable_clock_gating - enable VCN clock gating
889 * @adev: amdgpu_device pointer
890 * @inst: instance number
892 * Enable clock gating for VCN block
894 static void vcn_v3_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
899 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
900 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
901 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
903 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
904 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
905 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
906 WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
908 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
909 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
910 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
911 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
912 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
913 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
914 | UVD_CGC_CTRL__SYS_MODE_MASK
915 | UVD_CGC_CTRL__UDEC_MODE_MASK
916 | UVD_CGC_CTRL__MPEG2_MODE_MASK
917 | UVD_CGC_CTRL__REGS_MODE_MASK
918 | UVD_CGC_CTRL__RBC_MODE_MASK
919 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
920 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
921 | UVD_CGC_CTRL__IDCT_MODE_MASK
922 | UVD_CGC_CTRL__MPRD_MODE_MASK
923 | UVD_CGC_CTRL__MPC_MODE_MASK
924 | UVD_CGC_CTRL__LBSI_MODE_MASK
925 | UVD_CGC_CTRL__LRBBM_MODE_MASK
926 | UVD_CGC_CTRL__WCB_MODE_MASK
927 | UVD_CGC_CTRL__VCPU_MODE_MASK
928 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
929 WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
931 data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL);
932 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
933 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
934 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
935 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
936 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
937 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
938 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
939 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
940 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK
941 | UVD_SUVD_CGC_CTRL__EFC_MODE_MASK
942 | UVD_SUVD_CGC_CTRL__SAOE_MODE_MASK
943 | UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK
944 | UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK
945 | UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK
946 | UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK
947 | UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK
948 | UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK
949 | UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK
950 | UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK);
951 WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL, data);
954 static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
956 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
957 struct amdgpu_ring *ring;
958 uint32_t rb_bufsz, tmp;
960 /* disable register anti-hang mechanism */
961 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
962 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
963 /* enable dynamic power gating mode */
964 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS);
965 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
966 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
967 WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp);
970 adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
972 /* enable clock gating */
973 vcn_v3_0_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
975 /* enable VCPU clock */
976 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
977 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
978 tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
979 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
980 VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
982 /* disable master interupt */
983 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
984 VCN, inst_idx, mmUVD_MASTINT_EN), 0, 0, indirect);
986 /* setup mmUVD_LMI_CTRL */
987 tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
988 UVD_LMI_CTRL__REQ_MODE_MASK |
989 UVD_LMI_CTRL__CRC_RESET_MASK |
990 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
991 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
992 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
993 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
995 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
996 VCN, inst_idx, mmUVD_LMI_CTRL), tmp, 0, indirect);
998 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
999 VCN, inst_idx, mmUVD_MPC_CNTL),
1000 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
1002 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1003 VCN, inst_idx, mmUVD_MPC_SET_MUXA0),
1004 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1005 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1006 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1007 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
1009 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1010 VCN, inst_idx, mmUVD_MPC_SET_MUXB0),
1011 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1012 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1013 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1014 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
1016 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1017 VCN, inst_idx, mmUVD_MPC_SET_MUX),
1018 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1019 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1020 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
1022 vcn_v3_0_mc_resume_dpg_mode(adev, inst_idx, indirect);
1024 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1025 VCN, inst_idx, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
1026 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1027 VCN, inst_idx, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
1029 /* enable LMI MC and UMC channels */
1030 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1031 VCN, inst_idx, mmUVD_LMI_CTRL2), 0, 0, indirect);
1033 /* unblock VCPU register access */
1034 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1035 VCN, inst_idx, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
1037 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
1038 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
1039 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1040 VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
1042 /* enable master interrupt */
1043 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1044 VCN, inst_idx, mmUVD_MASTINT_EN),
1045 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
1047 /* add nop to workaround PSP size check */
1048 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1049 VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
1052 psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
1053 (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
1054 (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
1056 ring = &adev->vcn.inst[inst_idx].ring_dec;
1057 /* force RBC into idle state */
1058 rb_bufsz = order_base_2(ring->ring_size);
1059 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1060 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1061 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1062 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1063 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1064 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
1066 /* Stall DPG before WPTR/RPTR reset */
1067 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1068 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1069 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1070 fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1072 /* set the write pointer delay */
1073 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
1075 /* set the wb address */
1076 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
1077 (upper_32_bits(ring->gpu_addr) >> 2));
1079 /* programm the RB_BASE for ring buffer */
1080 WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1081 lower_32_bits(ring->gpu_addr));
1082 WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1083 upper_32_bits(ring->gpu_addr));
1085 /* Initialize the ring buffer's read and write pointers */
1086 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0);
1088 WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0);
1090 ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR);
1091 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
1092 lower_32_bits(ring->wptr));
1094 /* Reset FW shared memory RBC WPTR/RPTR */
1095 fw_shared->rb.rptr = 0;
1096 fw_shared->rb.wptr = lower_32_bits(ring->wptr);
1098 /*resetting done, fw can check RB ring */
1099 fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1102 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1103 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1108 static int vcn_v3_0_start(struct amdgpu_device *adev)
1110 volatile struct amdgpu_fw_shared *fw_shared;
1111 struct amdgpu_ring *ring;
1112 uint32_t rb_bufsz, tmp;
1115 if (adev->pm.dpm_enabled)
1116 amdgpu_dpm_enable_uvd(adev, true);
1118 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1119 if (adev->vcn.harvest_config & (1 << i))
1122 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG){
1123 r = vcn_v3_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
1127 /* disable VCN power gating */
1128 vcn_v3_0_disable_static_power_gating(adev, i);
1130 /* set VCN status busy */
1131 tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
1132 WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
1134 /*SW clock gating */
1135 vcn_v3_0_disable_clock_gating(adev, i);
1137 /* enable VCPU clock */
1138 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1139 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
1141 /* disable master interrupt */
1142 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
1143 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1145 /* enable LMI MC and UMC channels */
1146 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
1147 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1149 tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
1150 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1151 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1152 WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
1154 /* setup mmUVD_LMI_CTRL */
1155 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
1156 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp |
1157 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1158 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1159 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1160 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
1162 /* setup mmUVD_MPC_CNTL */
1163 tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
1164 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
1165 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
1166 WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
1168 /* setup UVD_MPC_SET_MUXA0 */
1169 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
1170 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1171 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1172 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1173 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
1175 /* setup UVD_MPC_SET_MUXB0 */
1176 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
1177 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1178 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1179 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1180 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
1182 /* setup mmUVD_MPC_SET_MUX */
1183 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
1184 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1185 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1186 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
1188 vcn_v3_0_mc_resume(adev, i);
1190 /* VCN global tiling registers */
1191 WREG32_SOC15(VCN, i, mmUVD_GFX10_ADDR_CONFIG,
1192 adev->gfx.config.gb_addr_config);
1194 /* unblock VCPU register access */
1195 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
1196 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1198 /* release VCPU reset to boot */
1199 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1200 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1202 for (j = 0; j < 10; ++j) {
1205 for (k = 0; k < 100; ++k) {
1206 status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
1215 DRM_ERROR("VCN[%d] decode not responding, trying to reset the VCPU!!!\n", i);
1216 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1217 UVD_VCPU_CNTL__BLK_RST_MASK,
1218 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1220 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1221 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1228 DRM_ERROR("VCN[%d] decode not responding, giving up!!!\n", i);
1232 /* enable master interrupt */
1233 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
1234 UVD_MASTINT_EN__VCPU_EN_MASK,
1235 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1237 /* clear the busy bit of VCN_STATUS */
1238 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
1239 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1241 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
1243 ring = &adev->vcn.inst[i].ring_dec;
1244 /* force RBC into idle state */
1245 rb_bufsz = order_base_2(ring->ring_size);
1246 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1247 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1248 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1249 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1250 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1251 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
1253 fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
1254 fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1256 /* programm the RB_BASE for ring buffer */
1257 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1258 lower_32_bits(ring->gpu_addr));
1259 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1260 upper_32_bits(ring->gpu_addr));
1262 /* Initialize the ring buffer's read and write pointers */
1263 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
1265 WREG32_SOC15(VCN, i, mmUVD_SCRATCH2, 0);
1266 ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
1267 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
1268 lower_32_bits(ring->wptr));
1269 fw_shared->rb.wptr = lower_32_bits(ring->wptr);
1270 fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1272 if (adev->asic_type != CHIP_BEIGE_GOBY) {
1273 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1274 ring = &adev->vcn.inst[i].ring_enc[0];
1275 WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1276 WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1277 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
1278 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1279 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
1280 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1282 fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1283 ring = &adev->vcn.inst[i].ring_enc[1];
1284 WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1285 WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1286 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1287 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1288 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
1289 fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1296 static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
1299 struct amdgpu_ring *ring;
1300 uint64_t cache_addr;
1303 uint32_t param, resp, expected;
1304 uint32_t offset, cache_size;
1305 uint32_t tmp, timeout;
1308 struct amdgpu_mm_table *table = &adev->virt.mm_table;
1309 uint32_t *table_loc;
1310 uint32_t table_size;
1311 uint32_t size, size_dw;
1315 struct mmsch_v3_0_cmd_direct_write
1316 direct_wt = { {0} };
1317 struct mmsch_v3_0_cmd_direct_read_modify_write
1318 direct_rd_mod_wt = { {0} };
1319 struct mmsch_v3_0_cmd_end end = { {0} };
1320 struct mmsch_v3_0_init_header header;
1322 direct_wt.cmd_header.command_type =
1323 MMSCH_COMMAND__DIRECT_REG_WRITE;
1324 direct_rd_mod_wt.cmd_header.command_type =
1325 MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1326 end.cmd_header.command_type =
1329 header.version = MMSCH_VERSION;
1330 header.total_size = sizeof(struct mmsch_v3_0_init_header) >> 2;
1331 for (i = 0; i < AMDGPU_MAX_VCN_INSTANCES; i++) {
1332 header.inst[i].init_status = 0;
1333 header.inst[i].table_offset = 0;
1334 header.inst[i].table_size = 0;
1337 table_loc = (uint32_t *)table->cpu_addr;
1338 table_loc += header.total_size;
1339 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1340 if (adev->vcn.harvest_config & (1 << i))
1345 MMSCH_V3_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, i,
1347 ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
1349 cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
1351 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1352 id = amdgpu_ucode_id_vcns[i];
1353 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1354 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1355 adev->firmware.ucode[id].tmr_mc_addr_lo);
1356 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1357 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1358 adev->firmware.ucode[id].tmr_mc_addr_hi);
1360 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1361 mmUVD_VCPU_CACHE_OFFSET0),
1364 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1365 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1366 lower_32_bits(adev->vcn.inst[i].gpu_addr));
1367 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1368 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1369 upper_32_bits(adev->vcn.inst[i].gpu_addr));
1370 offset = cache_size;
1371 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1372 mmUVD_VCPU_CACHE_OFFSET0),
1373 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1376 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1377 mmUVD_VCPU_CACHE_SIZE0),
1380 cache_addr = adev->vcn.inst[i].gpu_addr + offset;
1381 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1382 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1383 lower_32_bits(cache_addr));
1384 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1385 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1386 upper_32_bits(cache_addr));
1387 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1388 mmUVD_VCPU_CACHE_OFFSET1),
1390 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1391 mmUVD_VCPU_CACHE_SIZE1),
1392 AMDGPU_VCN_STACK_SIZE);
1394 cache_addr = adev->vcn.inst[i].gpu_addr + offset +
1395 AMDGPU_VCN_STACK_SIZE;
1396 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1397 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1398 lower_32_bits(cache_addr));
1399 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1400 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1401 upper_32_bits(cache_addr));
1402 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1403 mmUVD_VCPU_CACHE_OFFSET2),
1405 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1406 mmUVD_VCPU_CACHE_SIZE2),
1407 AMDGPU_VCN_CONTEXT_SIZE);
1409 for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
1410 ring = &adev->vcn.inst[i].ring_enc[j];
1412 rb_addr = ring->gpu_addr;
1413 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1415 lower_32_bits(rb_addr));
1416 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1418 upper_32_bits(rb_addr));
1419 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1421 ring->ring_size / 4);
1424 ring = &adev->vcn.inst[i].ring_dec;
1426 rb_addr = ring->gpu_addr;
1427 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1428 mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
1429 lower_32_bits(rb_addr));
1430 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1431 mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
1432 upper_32_bits(rb_addr));
1433 /* force RBC into idle state */
1434 tmp = order_base_2(ring->ring_size);
1435 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, tmp);
1436 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1437 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1438 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1439 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1440 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1444 /* add end packet */
1445 MMSCH_V3_0_INSERT_END();
1448 header.inst[i].init_status = 0;
1449 header.inst[i].table_offset = header.total_size;
1450 header.inst[i].table_size = table_size;
1451 header.total_size += table_size;
1454 /* Update init table header in memory */
1455 size = sizeof(struct mmsch_v3_0_init_header);
1456 table_loc = (uint32_t *)table->cpu_addr;
1457 memcpy((void *)table_loc, &header, size);
1459 /* message MMSCH (in VCN[0]) to initialize this client
1460 * 1, write to mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
1461 * of memory descriptor location
1463 ctx_addr = table->gpu_addr;
1464 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
1465 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
1467 /* 2, update vmid of descriptor */
1468 tmp = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID);
1469 tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1470 /* use domain0 for MM scheduler */
1471 tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1472 WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, tmp);
1474 /* 3, notify mmsch about the size of this descriptor */
1475 size = header.total_size;
1476 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size);
1478 /* 4, set resp to zero */
1479 WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
1481 /* 5, kick off the initialization and wait until
1482 * MMSCH_VF_MAILBOX_RESP becomes non-zero
1485 WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, param);
1489 expected = param + 1;
1490 while (resp != expected) {
1491 resp = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1492 if (resp == expected)
1497 if (tmp >= timeout) {
1498 DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
1499 " waiting for mmMMSCH_VF_MAILBOX_RESP "\
1500 "(expected=0x%08x, readback=0x%08x)\n",
1501 tmp, expected, resp);
1506 /* 6, check each VCN's init_status
1507 * if it remains as 0, then this VCN is not assigned to current VF
1508 * do not start ring for this VCN
1510 size = sizeof(struct mmsch_v3_0_init_header);
1511 table_loc = (uint32_t *)table->cpu_addr;
1512 memcpy(&header, (void *)table_loc, size);
1514 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1515 if (adev->vcn.harvest_config & (1 << i))
1518 is_vcn_ready = (header.inst[i].init_status == 1);
1520 DRM_INFO("VCN(%d) engine is disabled by hypervisor\n", i);
1522 ring = &adev->vcn.inst[i].ring_dec;
1523 ring->sched.ready = is_vcn_ready;
1524 for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
1525 ring = &adev->vcn.inst[i].ring_enc[j];
1526 ring->sched.ready = is_vcn_ready;
1533 static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
1537 /* Wait for power status to be 1 */
1538 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1539 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1541 /* wait for read ptr to be equal to write ptr */
1542 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
1543 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1545 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
1546 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1548 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1549 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1551 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1552 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1554 /* disable dynamic power gating mode */
1555 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
1556 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1561 static int vcn_v3_0_stop(struct amdgpu_device *adev)
1566 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1567 if (adev->vcn.harvest_config & (1 << i))
1570 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1571 r = vcn_v3_0_stop_dpg_mode(adev, i);
1575 /* wait for vcn idle */
1576 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1580 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1581 UVD_LMI_STATUS__READ_CLEAN_MASK |
1582 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1583 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1584 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1588 /* disable LMI UMC channel */
1589 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
1590 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1591 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
1592 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1593 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1594 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1598 /* block VCPU register access */
1599 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
1600 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1601 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1604 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1605 UVD_VCPU_CNTL__BLK_RST_MASK,
1606 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1608 /* disable VCPU clock */
1609 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1610 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1612 /* apply soft reset */
1613 tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
1614 tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1615 WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
1616 tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
1617 tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1618 WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
1621 WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
1623 /* apply HW clock gating */
1624 vcn_v3_0_enable_clock_gating(adev, i);
1626 /* enable VCN power gating */
1627 vcn_v3_0_enable_static_power_gating(adev, i);
1630 if (adev->pm.dpm_enabled)
1631 amdgpu_dpm_enable_uvd(adev, false);
1636 static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
1637 int inst_idx, struct dpg_pause_state *new_state)
1639 volatile struct amdgpu_fw_shared *fw_shared;
1640 struct amdgpu_ring *ring;
1641 uint32_t reg_data = 0;
1644 /* pause/unpause if state is changed */
1645 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1646 DRM_DEBUG("dpg pause state changed %d -> %d",
1647 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
1648 reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) &
1649 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1651 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1652 ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1653 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1657 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1658 WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1661 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
1662 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1663 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1665 /* Stall DPG before WPTR/RPTR reset */
1666 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1667 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1668 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1670 if (adev->asic_type != CHIP_BEIGE_GOBY) {
1672 fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
1673 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1674 ring = &adev->vcn.inst[inst_idx].ring_enc[0];
1676 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
1677 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1678 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
1679 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1680 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1681 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1683 fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1684 ring = &adev->vcn.inst[inst_idx].ring_enc[1];
1686 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1687 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1688 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
1689 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1690 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1691 fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1693 /* restore wptr/rptr with pointers saved in FW shared memory*/
1694 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, fw_shared->rb.rptr);
1695 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, fw_shared->rb.wptr);
1699 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1700 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1702 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
1703 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1706 /* unpause dpg, no need to wait */
1707 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1708 WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1710 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1717 * vcn_v3_0_dec_ring_get_rptr - get read pointer
1719 * @ring: amdgpu_ring pointer
1721 * Returns the current hardware read pointer
1723 static uint64_t vcn_v3_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
1725 struct amdgpu_device *adev = ring->adev;
1727 return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR);
1731 * vcn_v3_0_dec_ring_get_wptr - get write pointer
1733 * @ring: amdgpu_ring pointer
1735 * Returns the current hardware write pointer
1737 static uint64_t vcn_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
1739 struct amdgpu_device *adev = ring->adev;
1741 if (ring->use_doorbell)
1742 return adev->wb.wb[ring->wptr_offs];
1744 return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR);
1748 * vcn_v3_0_dec_ring_set_wptr - set write pointer
1750 * @ring: amdgpu_ring pointer
1752 * Commits the write pointer to the hardware
1754 static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
1756 struct amdgpu_device *adev = ring->adev;
1757 volatile struct amdgpu_fw_shared *fw_shared;
1759 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1760 /*whenever update RBC_RB_WPTR, we save the wptr in shared rb.wptr and scratch2 */
1761 fw_shared = adev->vcn.inst[ring->me].fw_shared_cpu_addr;
1762 fw_shared->rb.wptr = lower_32_bits(ring->wptr);
1763 WREG32_SOC15(VCN, ring->me, mmUVD_SCRATCH2,
1764 lower_32_bits(ring->wptr));
1767 if (ring->use_doorbell) {
1768 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1769 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1771 WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1775 static void vcn_v3_0_dec_sw_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1776 u64 seq, uint32_t flags)
1778 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1780 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_FENCE);
1781 amdgpu_ring_write(ring, addr);
1782 amdgpu_ring_write(ring, upper_32_bits(addr));
1783 amdgpu_ring_write(ring, seq);
1784 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_TRAP);
1787 static void vcn_v3_0_dec_sw_ring_insert_end(struct amdgpu_ring *ring)
1789 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END);
1792 static void vcn_v3_0_dec_sw_ring_emit_ib(struct amdgpu_ring *ring,
1793 struct amdgpu_job *job,
1794 struct amdgpu_ib *ib,
1797 uint32_t vmid = AMDGPU_JOB_GET_VMID(job);
1799 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_IB);
1800 amdgpu_ring_write(ring, vmid);
1801 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1802 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1803 amdgpu_ring_write(ring, ib->length_dw);
1806 static void vcn_v3_0_dec_sw_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1807 uint32_t val, uint32_t mask)
1809 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_REG_WAIT);
1810 amdgpu_ring_write(ring, reg << 2);
1811 amdgpu_ring_write(ring, mask);
1812 amdgpu_ring_write(ring, val);
1815 static void vcn_v3_0_dec_sw_ring_emit_vm_flush(struct amdgpu_ring *ring,
1816 uint32_t vmid, uint64_t pd_addr)
1818 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1819 uint32_t data0, data1, mask;
1821 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1823 /* wait for register write */
1824 data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1825 data1 = lower_32_bits(pd_addr);
1827 vcn_v3_0_dec_sw_ring_emit_reg_wait(ring, data0, data1, mask);
1830 static void vcn_v3_0_dec_sw_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
1832 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_REG_WRITE);
1833 amdgpu_ring_write(ring, reg << 2);
1834 amdgpu_ring_write(ring, val);
1837 static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = {
1838 .type = AMDGPU_RING_TYPE_VCN_DEC,
1840 .nop = VCN_DEC_SW_CMD_NO_OP,
1841 .vmhub = AMDGPU_MMHUB_0,
1842 .get_rptr = vcn_v3_0_dec_ring_get_rptr,
1843 .get_wptr = vcn_v3_0_dec_ring_get_wptr,
1844 .set_wptr = vcn_v3_0_dec_ring_set_wptr,
1846 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1847 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1848 4 + /* vcn_v3_0_dec_sw_ring_emit_vm_flush */
1849 5 + 5 + /* vcn_v3_0_dec_sw_ring_emit_fdec_swe x2 vm fdec_swe */
1850 1, /* vcn_v3_0_dec_sw_ring_insert_end */
1851 .emit_ib_size = 5, /* vcn_v3_0_dec_sw_ring_emit_ib */
1852 .emit_ib = vcn_v3_0_dec_sw_ring_emit_ib,
1853 .emit_fence = vcn_v3_0_dec_sw_ring_emit_fence,
1854 .emit_vm_flush = vcn_v3_0_dec_sw_ring_emit_vm_flush,
1855 .test_ring = amdgpu_vcn_dec_sw_ring_test_ring,
1856 .test_ib = NULL,//amdgpu_vcn_dec_sw_ring_test_ib,
1857 .insert_nop = amdgpu_ring_insert_nop,
1858 .insert_end = vcn_v3_0_dec_sw_ring_insert_end,
1859 .pad_ib = amdgpu_ring_generic_pad_ib,
1860 .begin_use = amdgpu_vcn_ring_begin_use,
1861 .end_use = amdgpu_vcn_ring_end_use,
1862 .emit_wreg = vcn_v3_0_dec_sw_ring_emit_wreg,
1863 .emit_reg_wait = vcn_v3_0_dec_sw_ring_emit_reg_wait,
1864 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1867 static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p)
1869 struct drm_gpu_scheduler **scheds;
1871 /* The create msg must be in the first IB submitted */
1872 if (atomic_read(&p->entity->fence_seq))
1875 scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
1876 [AMDGPU_RING_PRIO_DEFAULT].sched;
1877 drm_sched_entity_modify_sched(p->entity, scheds, 1);
1881 static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr)
1883 struct ttm_operation_ctx ctx = { false, false };
1884 struct amdgpu_bo_va_mapping *map;
1885 uint32_t *msg, num_buffers;
1886 struct amdgpu_bo *bo;
1887 uint64_t start, end;
1892 addr &= AMDGPU_GMC_HOLE_MASK;
1893 r = amdgpu_cs_find_mapping(p, addr, &bo, &map);
1895 DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
1899 start = map->start * AMDGPU_GPU_PAGE_SIZE;
1900 end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE;
1902 DRM_ERROR("VCN messages must be 8 byte aligned!\n");
1906 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1907 amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
1908 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1910 DRM_ERROR("Failed validating the VCN message BO (%d)!\n", r);
1914 r = amdgpu_bo_kmap(bo, &ptr);
1916 DRM_ERROR("Failed mapping the VCN message (%d)!\n", r);
1920 msg = ptr + addr - start;
1923 if (msg[1] > end - addr) {
1928 if (msg[3] != RDECODE_MSG_CREATE)
1931 num_buffers = msg[2];
1932 for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) {
1933 uint32_t offset, size, *create;
1935 if (msg[0] != RDECODE_MESSAGE_CREATE)
1941 if (offset + size > end) {
1946 create = ptr + addr + offset - start;
1948 /* H246, HEVC and VP9 can run on any instance */
1949 if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
1952 r = vcn_v3_0_limit_sched(p);
1958 amdgpu_bo_kunmap(bo);
1962 static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1965 struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
1966 struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
1967 uint32_t msg_lo = 0, msg_hi = 0;
1971 /* The first instance can decode anything */
1975 for (i = 0; i < ib->length_dw; i += 2) {
1976 uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
1977 uint32_t val = amdgpu_get_ib_value(p, ib_idx, i + 1);
1979 if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) {
1981 } else if (reg == PACKET0(p->adev->vcn.internal.data1, 0)) {
1983 } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0) &&
1985 r = vcn_v3_0_dec_msg(p, ((u64)msg_hi) << 32 | msg_lo);
1993 static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = {
1994 .type = AMDGPU_RING_TYPE_VCN_DEC,
1996 .vmhub = AMDGPU_MMHUB_0,
1997 .get_rptr = vcn_v3_0_dec_ring_get_rptr,
1998 .get_wptr = vcn_v3_0_dec_ring_get_wptr,
1999 .set_wptr = vcn_v3_0_dec_ring_set_wptr,
2000 .patch_cs_in_place = vcn_v3_0_ring_patch_cs_in_place,
2002 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
2003 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
2004 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
2005 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
2007 .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
2008 .emit_ib = vcn_v2_0_dec_ring_emit_ib,
2009 .emit_fence = vcn_v2_0_dec_ring_emit_fence,
2010 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
2011 .test_ring = vcn_v2_0_dec_ring_test_ring,
2012 .test_ib = amdgpu_vcn_dec_ring_test_ib,
2013 .insert_nop = vcn_v2_0_dec_ring_insert_nop,
2014 .insert_start = vcn_v2_0_dec_ring_insert_start,
2015 .insert_end = vcn_v2_0_dec_ring_insert_end,
2016 .pad_ib = amdgpu_ring_generic_pad_ib,
2017 .begin_use = amdgpu_vcn_ring_begin_use,
2018 .end_use = amdgpu_vcn_ring_end_use,
2019 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
2020 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
2021 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2025 * vcn_v3_0_enc_ring_get_rptr - get enc read pointer
2027 * @ring: amdgpu_ring pointer
2029 * Returns the current hardware enc read pointer
2031 static uint64_t vcn_v3_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
2033 struct amdgpu_device *adev = ring->adev;
2035 if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
2036 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR);
2038 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2);
2042 * vcn_v3_0_enc_ring_get_wptr - get enc write pointer
2044 * @ring: amdgpu_ring pointer
2046 * Returns the current hardware enc write pointer
2048 static uint64_t vcn_v3_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
2050 struct amdgpu_device *adev = ring->adev;
2052 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
2053 if (ring->use_doorbell)
2054 return adev->wb.wb[ring->wptr_offs];
2056 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR);
2058 if (ring->use_doorbell)
2059 return adev->wb.wb[ring->wptr_offs];
2061 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2);
2066 * vcn_v3_0_enc_ring_set_wptr - set enc write pointer
2068 * @ring: amdgpu_ring pointer
2070 * Commits the enc write pointer to the hardware
2072 static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
2074 struct amdgpu_device *adev = ring->adev;
2076 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
2077 if (ring->use_doorbell) {
2078 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
2079 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
2081 WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
2084 if (ring->use_doorbell) {
2085 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
2086 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
2088 WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
2093 static const struct amdgpu_ring_funcs vcn_v3_0_enc_ring_vm_funcs = {
2094 .type = AMDGPU_RING_TYPE_VCN_ENC,
2096 .nop = VCN_ENC_CMD_NO_OP,
2097 .vmhub = AMDGPU_MMHUB_0,
2098 .get_rptr = vcn_v3_0_enc_ring_get_rptr,
2099 .get_wptr = vcn_v3_0_enc_ring_get_wptr,
2100 .set_wptr = vcn_v3_0_enc_ring_set_wptr,
2102 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2103 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
2104 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
2105 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
2106 1, /* vcn_v2_0_enc_ring_insert_end */
2107 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
2108 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
2109 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
2110 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
2111 .test_ring = amdgpu_vcn_enc_ring_test_ring,
2112 .test_ib = amdgpu_vcn_enc_ring_test_ib,
2113 .insert_nop = amdgpu_ring_insert_nop,
2114 .insert_end = vcn_v2_0_enc_ring_insert_end,
2115 .pad_ib = amdgpu_ring_generic_pad_ib,
2116 .begin_use = amdgpu_vcn_ring_begin_use,
2117 .end_use = amdgpu_vcn_ring_end_use,
2118 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
2119 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
2120 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2123 static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev)
2127 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2128 if (adev->vcn.harvest_config & (1 << i))
2131 if (!DEC_SW_RING_ENABLED)
2132 adev->vcn.inst[i].ring_dec.funcs = &vcn_v3_0_dec_ring_vm_funcs;
2134 adev->vcn.inst[i].ring_dec.funcs = &vcn_v3_0_dec_sw_ring_vm_funcs;
2135 adev->vcn.inst[i].ring_dec.me = i;
2136 DRM_INFO("VCN(%d) decode%s is enabled in VM mode\n", i,
2137 DEC_SW_RING_ENABLED?"(Software Ring)":"");
2141 static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev)
2145 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2146 if (adev->vcn.harvest_config & (1 << i))
2149 for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
2150 adev->vcn.inst[i].ring_enc[j].funcs = &vcn_v3_0_enc_ring_vm_funcs;
2151 adev->vcn.inst[i].ring_enc[j].me = i;
2153 if (adev->vcn.num_enc_rings > 0)
2154 DRM_INFO("VCN(%d) encode is enabled in VM mode\n", i);
2158 static bool vcn_v3_0_is_idle(void *handle)
2160 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2163 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2164 if (adev->vcn.harvest_config & (1 << i))
2167 ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
2173 static int vcn_v3_0_wait_for_idle(void *handle)
2175 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2178 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2179 if (adev->vcn.harvest_config & (1 << i))
2182 ret = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
2191 static int vcn_v3_0_set_clockgating_state(void *handle,
2192 enum amd_clockgating_state state)
2194 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2195 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
2198 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2199 if (adev->vcn.harvest_config & (1 << i))
2203 if (RREG32_SOC15(VCN, i, mmUVD_STATUS) != UVD_STATUS__IDLE)
2205 vcn_v3_0_enable_clock_gating(adev, i);
2207 vcn_v3_0_disable_clock_gating(adev, i);
2214 static int vcn_v3_0_set_powergating_state(void *handle,
2215 enum amd_powergating_state state)
2217 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2220 /* for SRIOV, guest should not control VCN Power-gating
2221 * MMSCH FW should control Power-gating and clock-gating
2222 * guest should avoid touching CGC and PG
2224 if (amdgpu_sriov_vf(adev)) {
2225 adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
2229 if(state == adev->vcn.cur_state)
2232 if (state == AMD_PG_STATE_GATE)
2233 ret = vcn_v3_0_stop(adev);
2235 ret = vcn_v3_0_start(adev);
2238 adev->vcn.cur_state = state;
2243 static int vcn_v3_0_set_interrupt_state(struct amdgpu_device *adev,
2244 struct amdgpu_irq_src *source,
2246 enum amdgpu_interrupt_state state)
2251 static int vcn_v3_0_process_interrupt(struct amdgpu_device *adev,
2252 struct amdgpu_irq_src *source,
2253 struct amdgpu_iv_entry *entry)
2255 uint32_t ip_instance;
2257 switch (entry->client_id) {
2258 case SOC15_IH_CLIENTID_VCN:
2261 case SOC15_IH_CLIENTID_VCN1:
2265 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
2269 DRM_DEBUG("IH: VCN TRAP\n");
2271 switch (entry->src_id) {
2272 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
2273 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec);
2275 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
2276 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
2278 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
2279 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
2282 DRM_ERROR("Unhandled interrupt: %d %d\n",
2283 entry->src_id, entry->src_data[0]);
2290 static const struct amdgpu_irq_src_funcs vcn_v3_0_irq_funcs = {
2291 .set = vcn_v3_0_set_interrupt_state,
2292 .process = vcn_v3_0_process_interrupt,
2295 static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev)
2299 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2300 if (adev->vcn.harvest_config & (1 << i))
2303 adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
2304 adev->vcn.inst[i].irq.funcs = &vcn_v3_0_irq_funcs;
2308 static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
2310 .early_init = vcn_v3_0_early_init,
2312 .sw_init = vcn_v3_0_sw_init,
2313 .sw_fini = vcn_v3_0_sw_fini,
2314 .hw_init = vcn_v3_0_hw_init,
2315 .hw_fini = vcn_v3_0_hw_fini,
2316 .suspend = vcn_v3_0_suspend,
2317 .resume = vcn_v3_0_resume,
2318 .is_idle = vcn_v3_0_is_idle,
2319 .wait_for_idle = vcn_v3_0_wait_for_idle,
2320 .check_soft_reset = NULL,
2321 .pre_soft_reset = NULL,
2323 .post_soft_reset = NULL,
2324 .set_clockgating_state = vcn_v3_0_set_clockgating_state,
2325 .set_powergating_state = vcn_v3_0_set_powergating_state,
2328 const struct amdgpu_ip_block_version vcn_v3_0_ip_block =
2330 .type = AMD_IP_BLOCK_TYPE_VCN,
2334 .funcs = &vcn_v3_0_ip_funcs,