2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
26 #include "amdgpu_vcn.h"
27 #include "amdgpu_pm.h"
31 #include "mmsch_v3_0.h"
33 #include "vcn/vcn_3_0_0_offset.h"
34 #include "vcn/vcn_3_0_0_sh_mask.h"
35 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
37 #include <drm/drm_drv.h>
39 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
40 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
41 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x10
42 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x11
43 #define mmUVD_NO_OP_INTERNAL_OFFSET 0x29
44 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x66
45 #define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d
47 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x431
48 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x3b4
49 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5
50 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c
52 #define VCN_INSTANCES_SIENNA_CICHLID 2
53 #define DEC_SW_RING_ENABLED FALSE
55 #define RDECODE_MSG_CREATE 0x00000000
56 #define RDECODE_MESSAGE_CREATE 0x00000001
58 static int amdgpu_ih_clientid_vcns[] = {
59 SOC15_IH_CLIENTID_VCN,
60 SOC15_IH_CLIENTID_VCN1
63 static int amdgpu_ucode_id_vcns[] = {
68 static int vcn_v3_0_start_sriov(struct amdgpu_device *adev);
69 static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev);
70 static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev);
71 static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev);
72 static int vcn_v3_0_set_powergating_state(void *handle,
73 enum amd_powergating_state state);
74 static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
75 int inst_idx, struct dpg_pause_state *new_state);
77 static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring);
78 static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring);
81 * vcn_v3_0_early_init - set function pointers
83 * @handle: amdgpu_device pointer
85 * Set ring and irq function pointers
87 static int vcn_v3_0_early_init(void *handle)
89 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
92 if (amdgpu_sriov_vf(adev)) {
93 adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID;
94 adev->vcn.harvest_config = 0;
95 adev->vcn.num_enc_rings = 1;
98 if (adev->asic_type == CHIP_SIENNA_CICHLID) {
101 adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID;
102 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
103 harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
104 if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
105 adev->vcn.harvest_config |= 1 << i;
108 if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
109 AMDGPU_VCN_HARVEST_VCN1))
110 /* both instances are harvested, disable the block */
113 adev->vcn.num_vcn_inst = 1;
115 if (adev->asic_type == CHIP_BEIGE_GOBY)
116 adev->vcn.num_enc_rings = 0;
118 adev->vcn.num_enc_rings = 2;
121 vcn_v3_0_set_dec_ring_funcs(adev);
122 vcn_v3_0_set_enc_ring_funcs(adev);
123 vcn_v3_0_set_irq_funcs(adev);
129 * vcn_v3_0_sw_init - sw init for VCN block
131 * @handle: amdgpu_device pointer
133 * Load firmware and sw initialization
135 static int vcn_v3_0_sw_init(void *handle)
137 struct amdgpu_ring *ring;
139 int vcn_doorbell_index = 0;
140 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
142 r = amdgpu_vcn_sw_init(adev);
146 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
147 const struct common_firmware_header *hdr;
148 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
149 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
150 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
151 adev->firmware.fw_size +=
152 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
154 if (adev->vcn.num_vcn_inst == VCN_INSTANCES_SIENNA_CICHLID) {
155 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id = AMDGPU_UCODE_ID_VCN1;
156 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev->vcn.fw;
157 adev->firmware.fw_size +=
158 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
160 dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
163 r = amdgpu_vcn_resume(adev);
168 * Note: doorbell assignment is fixed for SRIOV multiple VCN engines
170 * vcn_db_base = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
171 * dec_ring_i = vcn_db_base + i * (adev->vcn.num_enc_rings + 1)
172 * enc_ring_i,j = vcn_db_base + i * (adev->vcn.num_enc_rings + 1) + 1 + j
174 if (amdgpu_sriov_vf(adev)) {
175 vcn_doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1;
176 /* get DWORD offset */
177 vcn_doorbell_index = vcn_doorbell_index << 1;
180 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
181 volatile struct amdgpu_fw_shared *fw_shared;
183 if (adev->vcn.harvest_config & (1 << i))
186 adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
187 adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
188 adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
189 adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
190 adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
191 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
193 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
194 adev->vcn.inst[i].external.scratch9 = SOC15_REG_OFFSET(VCN, i, mmUVD_SCRATCH9);
195 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
196 adev->vcn.inst[i].external.data0 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA0);
197 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
198 adev->vcn.inst[i].external.data1 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA1);
199 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
200 adev->vcn.inst[i].external.cmd = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_CMD);
201 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
202 adev->vcn.inst[i].external.nop = SOC15_REG_OFFSET(VCN, i, mmUVD_NO_OP);
205 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
206 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[i].irq);
210 atomic_set(&adev->vcn.inst[i].sched_score, 0);
212 ring = &adev->vcn.inst[i].ring_dec;
213 ring->use_doorbell = true;
214 if (amdgpu_sriov_vf(adev)) {
215 ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.num_enc_rings + 1);
217 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i;
219 sprintf(ring->name, "vcn_dec_%d", i);
220 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
221 AMDGPU_RING_PRIO_DEFAULT,
222 &adev->vcn.inst[i].sched_score);
226 for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
228 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
229 j + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq);
233 ring = &adev->vcn.inst[i].ring_enc[j];
234 ring->use_doorbell = true;
235 if (amdgpu_sriov_vf(adev)) {
236 ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.num_enc_rings + 1) + 1 + j;
238 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i;
240 sprintf(ring->name, "vcn_enc_%d.%d", i, j);
241 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
242 AMDGPU_RING_PRIO_DEFAULT,
243 &adev->vcn.inst[i].sched_score);
248 fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
249 fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SW_RING_FLAG) |
250 cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG) |
251 cpu_to_le32(AMDGPU_VCN_FW_SHARED_FLAG_0_RB);
252 fw_shared->sw_ring.is_enabled = cpu_to_le32(DEC_SW_RING_ENABLED);
255 if (amdgpu_sriov_vf(adev)) {
256 r = amdgpu_virt_alloc_mm_table(adev);
260 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
261 adev->vcn.pause_dpg_mode = vcn_v3_0_pause_dpg_mode;
267 * vcn_v3_0_sw_fini - sw fini for VCN block
269 * @handle: amdgpu_device pointer
271 * VCN suspend and free up sw allocation
273 static int vcn_v3_0_sw_fini(void *handle)
275 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
278 if (drm_dev_enter(&adev->ddev, &idx)) {
279 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
280 volatile struct amdgpu_fw_shared *fw_shared;
282 if (adev->vcn.harvest_config & (1 << i))
284 fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
285 fw_shared->present_flag_0 = 0;
286 fw_shared->sw_ring.is_enabled = false;
292 if (amdgpu_sriov_vf(adev))
293 amdgpu_virt_free_mm_table(adev);
295 r = amdgpu_vcn_suspend(adev);
299 r = amdgpu_vcn_sw_fini(adev);
305 * vcn_v3_0_hw_init - start and test VCN block
307 * @handle: amdgpu_device pointer
309 * Initialize the hardware, boot up the VCPU and do some testing
311 static int vcn_v3_0_hw_init(void *handle)
313 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
314 struct amdgpu_ring *ring;
317 if (amdgpu_sriov_vf(adev)) {
318 r = vcn_v3_0_start_sriov(adev);
322 /* initialize VCN dec and enc ring buffers */
323 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
324 if (adev->vcn.harvest_config & (1 << i))
327 ring = &adev->vcn.inst[i].ring_dec;
328 if (amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, i)) {
329 ring->sched.ready = false;
330 dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name);
334 vcn_v3_0_dec_ring_set_wptr(ring);
335 ring->sched.ready = true;
338 for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
339 ring = &adev->vcn.inst[i].ring_enc[j];
340 if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, i)) {
341 ring->sched.ready = false;
342 dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name);
346 vcn_v3_0_enc_ring_set_wptr(ring);
347 ring->sched.ready = true;
352 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
353 if (adev->vcn.harvest_config & (1 << i))
356 ring = &adev->vcn.inst[i].ring_dec;
358 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
359 ring->doorbell_index, i);
361 r = amdgpu_ring_test_helper(ring);
365 for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
366 ring = &adev->vcn.inst[i].ring_enc[j];
367 r = amdgpu_ring_test_helper(ring);
376 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
377 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
383 * vcn_v3_0_hw_fini - stop the hardware block
385 * @handle: amdgpu_device pointer
387 * Stop the VCN block, mark ring as not ready any more
389 static int vcn_v3_0_hw_fini(void *handle)
391 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
394 cancel_delayed_work_sync(&adev->vcn.idle_work);
396 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
397 if (adev->vcn.harvest_config & (1 << i))
400 if (!amdgpu_sriov_vf(adev)) {
401 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
402 (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
403 RREG32_SOC15(VCN, i, mmUVD_STATUS))) {
404 vcn_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
413 * vcn_v3_0_suspend - suspend VCN block
415 * @handle: amdgpu_device pointer
417 * HW fini and suspend VCN block
419 static int vcn_v3_0_suspend(void *handle)
422 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
424 r = vcn_v3_0_hw_fini(adev);
428 r = amdgpu_vcn_suspend(adev);
434 * vcn_v3_0_resume - resume VCN block
436 * @handle: amdgpu_device pointer
438 * Resume firmware and hw init VCN block
440 static int vcn_v3_0_resume(void *handle)
443 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
445 r = amdgpu_vcn_resume(adev);
449 r = vcn_v3_0_hw_init(adev);
455 * vcn_v3_0_mc_resume - memory controller programming
457 * @adev: amdgpu_device pointer
458 * @inst: instance number
460 * Let the VCN memory controller know it's offsets
462 static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst)
464 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
467 /* cache window 0: fw */
468 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
469 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
470 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo));
471 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
472 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi));
473 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET0, 0);
476 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
477 lower_32_bits(adev->vcn.inst[inst].gpu_addr));
478 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
479 upper_32_bits(adev->vcn.inst[inst].gpu_addr));
481 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET0,
482 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
484 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE0, size);
486 /* cache window 1: stack */
487 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
488 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
489 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
490 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
491 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET1, 0);
492 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
494 /* cache window 2: context */
495 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
496 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
497 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
498 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
499 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET2, 0);
500 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
502 /* non-cache window */
503 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
504 lower_32_bits(adev->vcn.inst[inst].fw_shared_gpu_addr));
505 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
506 upper_32_bits(adev->vcn.inst[inst].fw_shared_gpu_addr));
507 WREG32_SOC15(VCN, inst, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
508 WREG32_SOC15(VCN, inst, mmUVD_VCPU_NONCACHE_SIZE0,
509 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
512 static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
514 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
517 /* cache window 0: fw */
518 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
520 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
521 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
522 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
523 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
524 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
525 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
526 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
527 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
529 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
530 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
531 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
532 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
533 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
534 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
538 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
539 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
540 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
541 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
542 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
543 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
545 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
546 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0),
547 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
551 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
552 VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
554 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
555 VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
557 /* cache window 1: stack */
559 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
560 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
561 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
562 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
563 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
564 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
565 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
566 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
568 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
569 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
570 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
571 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
572 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
573 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
575 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
576 VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
578 /* cache window 2: context */
579 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
580 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
581 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
582 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
583 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
584 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
585 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
586 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
587 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
588 VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
590 /* non-cache window */
591 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
592 VCN, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
593 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect);
594 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
595 VCN, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
596 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect);
597 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
598 VCN, inst_idx, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
599 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
600 VCN, inst_idx, mmUVD_VCPU_NONCACHE_SIZE0),
601 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
603 /* VCN global tiling registers */
604 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
605 UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
608 static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
612 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
613 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
614 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
615 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
616 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
617 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
618 | 2 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT
619 | 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
620 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
621 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
622 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
623 | 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
624 | 2 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT
625 | 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
626 | 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
628 WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data);
629 SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS,
630 UVD_PGFSM_STATUS__UVDM_UVDU_UVDLM_PWR_ON_3_0, 0x3F3FFFFF);
632 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
633 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
634 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
635 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
636 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
637 | 1 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT
638 | 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
639 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
640 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
641 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
642 | 1 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
643 | 1 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT
644 | 1 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
645 | 1 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
646 WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data);
647 SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS, 0, 0x3F3FFFFF);
650 data = RREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS);
652 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
653 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
654 UVD_POWER_STATUS__UVD_PG_EN_MASK;
656 WREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS, data);
659 static void vcn_v3_0_enable_static_power_gating(struct amdgpu_device *adev, int inst)
663 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
664 /* Before power off, this indicator has to be turned on */
665 data = RREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS);
666 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
667 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
668 WREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS, data);
670 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
671 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
672 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
673 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
674 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
675 | 2 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT
676 | 2 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
677 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
678 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
679 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
680 | 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
681 | 2 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT
682 | 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
683 | 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
684 WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data);
686 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
687 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
688 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
689 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
690 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
691 | 2 << UVD_PGFSM_STATUS__UVDIRL_PWR_STATUS__SHIFT
692 | 2 << UVD_PGFSM_STATUS__UVDLM_PWR_STATUS__SHIFT
693 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
694 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
695 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
696 | 2 << UVD_PGFSM_STATUS__UVDAB_PWR_STATUS__SHIFT
697 | 2 << UVD_PGFSM_STATUS__UVDATD_PWR_STATUS__SHIFT
698 | 2 << UVD_PGFSM_STATUS__UVDNA_PWR_STATUS__SHIFT
699 | 2 << UVD_PGFSM_STATUS__UVDNB_PWR_STATUS__SHIFT);
700 SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS, data, 0x3F3FFFFF);
705 * vcn_v3_0_disable_clock_gating - disable VCN clock gating
707 * @adev: amdgpu_device pointer
708 * @inst: instance number
710 * Disable clock gating for VCN block
712 static void vcn_v3_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
716 /* VCN disable CGC */
717 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
718 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
719 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
721 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
722 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
723 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
724 WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
726 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_GATE);
727 data &= ~(UVD_CGC_GATE__SYS_MASK
728 | UVD_CGC_GATE__UDEC_MASK
729 | UVD_CGC_GATE__MPEG2_MASK
730 | UVD_CGC_GATE__REGS_MASK
731 | UVD_CGC_GATE__RBC_MASK
732 | UVD_CGC_GATE__LMI_MC_MASK
733 | UVD_CGC_GATE__LMI_UMC_MASK
734 | UVD_CGC_GATE__IDCT_MASK
735 | UVD_CGC_GATE__MPRD_MASK
736 | UVD_CGC_GATE__MPC_MASK
737 | UVD_CGC_GATE__LBSI_MASK
738 | UVD_CGC_GATE__LRBBM_MASK
739 | UVD_CGC_GATE__UDEC_RE_MASK
740 | UVD_CGC_GATE__UDEC_CM_MASK
741 | UVD_CGC_GATE__UDEC_IT_MASK
742 | UVD_CGC_GATE__UDEC_DB_MASK
743 | UVD_CGC_GATE__UDEC_MP_MASK
744 | UVD_CGC_GATE__WCB_MASK
745 | UVD_CGC_GATE__VCPU_MASK
746 | UVD_CGC_GATE__MMSCH_MASK);
748 WREG32_SOC15(VCN, inst, mmUVD_CGC_GATE, data);
750 SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_CGC_GATE, 0, 0xFFFFFFFF);
752 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
753 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
754 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
755 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
756 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
757 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
758 | UVD_CGC_CTRL__SYS_MODE_MASK
759 | UVD_CGC_CTRL__UDEC_MODE_MASK
760 | UVD_CGC_CTRL__MPEG2_MODE_MASK
761 | UVD_CGC_CTRL__REGS_MODE_MASK
762 | UVD_CGC_CTRL__RBC_MODE_MASK
763 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
764 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
765 | UVD_CGC_CTRL__IDCT_MODE_MASK
766 | UVD_CGC_CTRL__MPRD_MODE_MASK
767 | UVD_CGC_CTRL__MPC_MODE_MASK
768 | UVD_CGC_CTRL__LBSI_MODE_MASK
769 | UVD_CGC_CTRL__LRBBM_MODE_MASK
770 | UVD_CGC_CTRL__WCB_MODE_MASK
771 | UVD_CGC_CTRL__VCPU_MODE_MASK
772 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
773 WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
775 data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE);
776 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
777 | UVD_SUVD_CGC_GATE__SIT_MASK
778 | UVD_SUVD_CGC_GATE__SMP_MASK
779 | UVD_SUVD_CGC_GATE__SCM_MASK
780 | UVD_SUVD_CGC_GATE__SDB_MASK
781 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
782 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
783 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
784 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
785 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
786 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
787 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
788 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
789 | UVD_SUVD_CGC_GATE__SCLR_MASK
790 | UVD_SUVD_CGC_GATE__ENT_MASK
791 | UVD_SUVD_CGC_GATE__IME_MASK
792 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
793 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
794 | UVD_SUVD_CGC_GATE__SITE_MASK
795 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
796 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
797 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
798 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
799 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK
800 | UVD_SUVD_CGC_GATE__EFC_MASK
801 | UVD_SUVD_CGC_GATE__SAOE_MASK
802 | UVD_SUVD_CGC_GATE__SRE_AV1_MASK
803 | UVD_SUVD_CGC_GATE__FBC_PCLK_MASK
804 | UVD_SUVD_CGC_GATE__FBC_CCLK_MASK
805 | UVD_SUVD_CGC_GATE__SCM_AV1_MASK
806 | UVD_SUVD_CGC_GATE__SMPA_MASK);
807 WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE, data);
809 data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2);
810 data |= (UVD_SUVD_CGC_GATE2__MPBE0_MASK
811 | UVD_SUVD_CGC_GATE2__MPBE1_MASK
812 | UVD_SUVD_CGC_GATE2__SIT_AV1_MASK
813 | UVD_SUVD_CGC_GATE2__SDB_AV1_MASK
814 | UVD_SUVD_CGC_GATE2__MPC1_MASK);
815 WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2, data);
817 data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL);
818 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
819 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
820 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
821 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
822 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
823 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
824 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
825 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
826 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK
827 | UVD_SUVD_CGC_CTRL__EFC_MODE_MASK
828 | UVD_SUVD_CGC_CTRL__SAOE_MODE_MASK
829 | UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK
830 | UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK
831 | UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK
832 | UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK
833 | UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK
834 | UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK
835 | UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK
836 | UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK);
837 WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL, data);
840 static void vcn_v3_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
841 uint8_t sram_sel, int inst_idx, uint8_t indirect)
843 uint32_t reg_data = 0;
845 /* enable sw clock gating control */
846 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
847 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
849 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
850 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
851 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
852 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
853 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
854 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
855 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
856 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
857 UVD_CGC_CTRL__SYS_MODE_MASK |
858 UVD_CGC_CTRL__UDEC_MODE_MASK |
859 UVD_CGC_CTRL__MPEG2_MODE_MASK |
860 UVD_CGC_CTRL__REGS_MODE_MASK |
861 UVD_CGC_CTRL__RBC_MODE_MASK |
862 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
863 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
864 UVD_CGC_CTRL__IDCT_MODE_MASK |
865 UVD_CGC_CTRL__MPRD_MODE_MASK |
866 UVD_CGC_CTRL__MPC_MODE_MASK |
867 UVD_CGC_CTRL__LBSI_MODE_MASK |
868 UVD_CGC_CTRL__LRBBM_MODE_MASK |
869 UVD_CGC_CTRL__WCB_MODE_MASK |
870 UVD_CGC_CTRL__VCPU_MODE_MASK |
871 UVD_CGC_CTRL__MMSCH_MODE_MASK);
872 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
873 VCN, inst_idx, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
875 /* turn off clock gating */
876 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
877 VCN, inst_idx, mmUVD_CGC_GATE), 0, sram_sel, indirect);
879 /* turn on SUVD clock gating */
880 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
881 VCN, inst_idx, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
883 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
884 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
885 VCN, inst_idx, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
889 * vcn_v3_0_enable_clock_gating - enable VCN clock gating
891 * @adev: amdgpu_device pointer
892 * @inst: instance number
894 * Enable clock gating for VCN block
896 static void vcn_v3_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
901 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
902 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
903 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
905 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
906 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
907 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
908 WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
910 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
911 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
912 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
913 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
914 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
915 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
916 | UVD_CGC_CTRL__SYS_MODE_MASK
917 | UVD_CGC_CTRL__UDEC_MODE_MASK
918 | UVD_CGC_CTRL__MPEG2_MODE_MASK
919 | UVD_CGC_CTRL__REGS_MODE_MASK
920 | UVD_CGC_CTRL__RBC_MODE_MASK
921 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
922 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
923 | UVD_CGC_CTRL__IDCT_MODE_MASK
924 | UVD_CGC_CTRL__MPRD_MODE_MASK
925 | UVD_CGC_CTRL__MPC_MODE_MASK
926 | UVD_CGC_CTRL__LBSI_MODE_MASK
927 | UVD_CGC_CTRL__LRBBM_MODE_MASK
928 | UVD_CGC_CTRL__WCB_MODE_MASK
929 | UVD_CGC_CTRL__VCPU_MODE_MASK
930 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
931 WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
933 data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL);
934 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
935 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
936 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
937 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
938 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
939 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
940 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
941 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
942 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK
943 | UVD_SUVD_CGC_CTRL__EFC_MODE_MASK
944 | UVD_SUVD_CGC_CTRL__SAOE_MODE_MASK
945 | UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK
946 | UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK
947 | UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK
948 | UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK
949 | UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK
950 | UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK
951 | UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK
952 | UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK);
953 WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL, data);
956 static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
958 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
959 struct amdgpu_ring *ring;
960 uint32_t rb_bufsz, tmp;
962 /* disable register anti-hang mechanism */
963 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
964 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
965 /* enable dynamic power gating mode */
966 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS);
967 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
968 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
969 WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp);
972 adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
974 /* enable clock gating */
975 vcn_v3_0_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
977 /* enable VCPU clock */
978 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
979 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
980 tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
981 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
982 VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
984 /* disable master interupt */
985 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
986 VCN, inst_idx, mmUVD_MASTINT_EN), 0, 0, indirect);
988 /* setup mmUVD_LMI_CTRL */
989 tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
990 UVD_LMI_CTRL__REQ_MODE_MASK |
991 UVD_LMI_CTRL__CRC_RESET_MASK |
992 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
993 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
994 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
995 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
997 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
998 VCN, inst_idx, mmUVD_LMI_CTRL), tmp, 0, indirect);
1000 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1001 VCN, inst_idx, mmUVD_MPC_CNTL),
1002 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
1004 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1005 VCN, inst_idx, mmUVD_MPC_SET_MUXA0),
1006 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1007 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1008 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1009 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
1011 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1012 VCN, inst_idx, mmUVD_MPC_SET_MUXB0),
1013 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1014 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1015 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1016 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
1018 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1019 VCN, inst_idx, mmUVD_MPC_SET_MUX),
1020 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1021 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1022 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
1024 vcn_v3_0_mc_resume_dpg_mode(adev, inst_idx, indirect);
1026 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1027 VCN, inst_idx, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
1028 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1029 VCN, inst_idx, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
1031 /* enable LMI MC and UMC channels */
1032 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1033 VCN, inst_idx, mmUVD_LMI_CTRL2), 0, 0, indirect);
1035 /* unblock VCPU register access */
1036 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1037 VCN, inst_idx, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
1039 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
1040 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
1041 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1042 VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
1044 /* enable master interrupt */
1045 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1046 VCN, inst_idx, mmUVD_MASTINT_EN),
1047 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
1049 /* add nop to workaround PSP size check */
1050 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1051 VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
1054 psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
1055 (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
1056 (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
1058 ring = &adev->vcn.inst[inst_idx].ring_dec;
1059 /* force RBC into idle state */
1060 rb_bufsz = order_base_2(ring->ring_size);
1061 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1062 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1063 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1064 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1065 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1066 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
1068 /* Stall DPG before WPTR/RPTR reset */
1069 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1070 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1071 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1072 fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1074 /* set the write pointer delay */
1075 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
1077 /* set the wb address */
1078 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
1079 (upper_32_bits(ring->gpu_addr) >> 2));
1081 /* programm the RB_BASE for ring buffer */
1082 WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1083 lower_32_bits(ring->gpu_addr));
1084 WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1085 upper_32_bits(ring->gpu_addr));
1087 /* Initialize the ring buffer's read and write pointers */
1088 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0);
1090 WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0);
1092 ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR);
1093 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
1094 lower_32_bits(ring->wptr));
1096 /* Reset FW shared memory RBC WPTR/RPTR */
1097 fw_shared->rb.rptr = 0;
1098 fw_shared->rb.wptr = lower_32_bits(ring->wptr);
1100 /*resetting done, fw can check RB ring */
1101 fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1104 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1105 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1110 static int vcn_v3_0_start(struct amdgpu_device *adev)
1112 volatile struct amdgpu_fw_shared *fw_shared;
1113 struct amdgpu_ring *ring;
1114 uint32_t rb_bufsz, tmp;
1117 if (adev->pm.dpm_enabled)
1118 amdgpu_dpm_enable_uvd(adev, true);
1120 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1121 if (adev->vcn.harvest_config & (1 << i))
1124 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG){
1125 r = vcn_v3_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
1129 /* disable VCN power gating */
1130 vcn_v3_0_disable_static_power_gating(adev, i);
1132 /* set VCN status busy */
1133 tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
1134 WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
1136 /*SW clock gating */
1137 vcn_v3_0_disable_clock_gating(adev, i);
1139 /* enable VCPU clock */
1140 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1141 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
1143 /* disable master interrupt */
1144 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
1145 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1147 /* enable LMI MC and UMC channels */
1148 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
1149 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1151 tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
1152 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1153 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1154 WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
1156 /* setup mmUVD_LMI_CTRL */
1157 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
1158 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp |
1159 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1160 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1161 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1162 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
1164 /* setup mmUVD_MPC_CNTL */
1165 tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
1166 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
1167 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
1168 WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
1170 /* setup UVD_MPC_SET_MUXA0 */
1171 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
1172 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1173 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1174 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1175 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
1177 /* setup UVD_MPC_SET_MUXB0 */
1178 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
1179 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1180 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1181 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1182 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
1184 /* setup mmUVD_MPC_SET_MUX */
1185 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
1186 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1187 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1188 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
1190 vcn_v3_0_mc_resume(adev, i);
1192 /* VCN global tiling registers */
1193 WREG32_SOC15(VCN, i, mmUVD_GFX10_ADDR_CONFIG,
1194 adev->gfx.config.gb_addr_config);
1196 /* unblock VCPU register access */
1197 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
1198 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1200 /* release VCPU reset to boot */
1201 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1202 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1204 for (j = 0; j < 10; ++j) {
1207 for (k = 0; k < 100; ++k) {
1208 status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
1217 DRM_ERROR("VCN[%d] decode not responding, trying to reset the VCPU!!!\n", i);
1218 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1219 UVD_VCPU_CNTL__BLK_RST_MASK,
1220 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1222 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1223 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1230 DRM_ERROR("VCN[%d] decode not responding, giving up!!!\n", i);
1234 /* enable master interrupt */
1235 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
1236 UVD_MASTINT_EN__VCPU_EN_MASK,
1237 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1239 /* clear the busy bit of VCN_STATUS */
1240 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
1241 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1243 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
1245 ring = &adev->vcn.inst[i].ring_dec;
1246 /* force RBC into idle state */
1247 rb_bufsz = order_base_2(ring->ring_size);
1248 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1249 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1250 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1251 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1252 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1253 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
1255 fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
1256 fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1258 /* programm the RB_BASE for ring buffer */
1259 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1260 lower_32_bits(ring->gpu_addr));
1261 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1262 upper_32_bits(ring->gpu_addr));
1264 /* Initialize the ring buffer's read and write pointers */
1265 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
1267 WREG32_SOC15(VCN, i, mmUVD_SCRATCH2, 0);
1268 ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
1269 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
1270 lower_32_bits(ring->wptr));
1271 fw_shared->rb.wptr = lower_32_bits(ring->wptr);
1272 fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1274 if (adev->asic_type != CHIP_BEIGE_GOBY) {
1275 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1276 ring = &adev->vcn.inst[i].ring_enc[0];
1277 WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1278 WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1279 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
1280 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1281 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
1282 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1284 fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1285 ring = &adev->vcn.inst[i].ring_enc[1];
1286 WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1287 WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1288 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1289 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1290 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
1291 fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1298 static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
1301 struct amdgpu_ring *ring;
1302 uint64_t cache_addr;
1305 uint32_t param, resp, expected;
1306 uint32_t offset, cache_size;
1307 uint32_t tmp, timeout;
1310 struct amdgpu_mm_table *table = &adev->virt.mm_table;
1311 uint32_t *table_loc;
1312 uint32_t table_size;
1313 uint32_t size, size_dw;
1315 struct mmsch_v3_0_cmd_direct_write
1316 direct_wt = { {0} };
1317 struct mmsch_v3_0_cmd_direct_read_modify_write
1318 direct_rd_mod_wt = { {0} };
1319 struct mmsch_v3_0_cmd_end end = { {0} };
1320 struct mmsch_v3_0_init_header header;
1322 direct_wt.cmd_header.command_type =
1323 MMSCH_COMMAND__DIRECT_REG_WRITE;
1324 direct_rd_mod_wt.cmd_header.command_type =
1325 MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1326 end.cmd_header.command_type =
1329 header.version = MMSCH_VERSION;
1330 header.total_size = sizeof(struct mmsch_v3_0_init_header) >> 2;
1331 for (i = 0; i < AMDGPU_MAX_VCN_INSTANCES; i++) {
1332 header.inst[i].init_status = 0;
1333 header.inst[i].table_offset = 0;
1334 header.inst[i].table_size = 0;
1337 table_loc = (uint32_t *)table->cpu_addr;
1338 table_loc += header.total_size;
1339 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1340 if (adev->vcn.harvest_config & (1 << i))
1345 MMSCH_V3_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, i,
1347 ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
1349 cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
1351 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1352 id = amdgpu_ucode_id_vcns[i];
1353 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1354 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1355 adev->firmware.ucode[id].tmr_mc_addr_lo);
1356 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1357 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1358 adev->firmware.ucode[id].tmr_mc_addr_hi);
1360 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1361 mmUVD_VCPU_CACHE_OFFSET0),
1364 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1365 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1366 lower_32_bits(adev->vcn.inst[i].gpu_addr));
1367 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1368 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1369 upper_32_bits(adev->vcn.inst[i].gpu_addr));
1370 offset = cache_size;
1371 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1372 mmUVD_VCPU_CACHE_OFFSET0),
1373 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1376 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1377 mmUVD_VCPU_CACHE_SIZE0),
1380 cache_addr = adev->vcn.inst[i].gpu_addr + offset;
1381 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1382 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1383 lower_32_bits(cache_addr));
1384 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1385 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1386 upper_32_bits(cache_addr));
1387 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1388 mmUVD_VCPU_CACHE_OFFSET1),
1390 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1391 mmUVD_VCPU_CACHE_SIZE1),
1392 AMDGPU_VCN_STACK_SIZE);
1394 cache_addr = adev->vcn.inst[i].gpu_addr + offset +
1395 AMDGPU_VCN_STACK_SIZE;
1396 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1397 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1398 lower_32_bits(cache_addr));
1399 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1400 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1401 upper_32_bits(cache_addr));
1402 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1403 mmUVD_VCPU_CACHE_OFFSET2),
1405 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1406 mmUVD_VCPU_CACHE_SIZE2),
1407 AMDGPU_VCN_CONTEXT_SIZE);
1409 for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
1410 ring = &adev->vcn.inst[i].ring_enc[j];
1412 rb_addr = ring->gpu_addr;
1413 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1415 lower_32_bits(rb_addr));
1416 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1418 upper_32_bits(rb_addr));
1419 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1421 ring->ring_size / 4);
1424 ring = &adev->vcn.inst[i].ring_dec;
1426 rb_addr = ring->gpu_addr;
1427 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1428 mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
1429 lower_32_bits(rb_addr));
1430 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1431 mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
1432 upper_32_bits(rb_addr));
1433 /* force RBC into idle state */
1434 tmp = order_base_2(ring->ring_size);
1435 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, tmp);
1436 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1437 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1438 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1439 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1440 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1444 /* add end packet */
1445 MMSCH_V3_0_INSERT_END();
1448 header.inst[i].init_status = 0;
1449 header.inst[i].table_offset = header.total_size;
1450 header.inst[i].table_size = table_size;
1451 header.total_size += table_size;
1454 /* Update init table header in memory */
1455 size = sizeof(struct mmsch_v3_0_init_header);
1456 table_loc = (uint32_t *)table->cpu_addr;
1457 memcpy((void *)table_loc, &header, size);
1459 /* message MMSCH (in VCN[0]) to initialize this client
1460 * 1, write to mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
1461 * of memory descriptor location
1463 ctx_addr = table->gpu_addr;
1464 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
1465 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
1467 /* 2, update vmid of descriptor */
1468 tmp = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID);
1469 tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1470 /* use domain0 for MM scheduler */
1471 tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1472 WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, tmp);
1474 /* 3, notify mmsch about the size of this descriptor */
1475 size = header.total_size;
1476 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size);
1478 /* 4, set resp to zero */
1479 WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
1481 /* 5, kick off the initialization and wait until
1482 * MMSCH_VF_MAILBOX_RESP becomes non-zero
1485 WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, param);
1489 expected = param + 1;
1490 while (resp != expected) {
1491 resp = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1492 if (resp == expected)
1497 if (tmp >= timeout) {
1498 DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
1499 " waiting for mmMMSCH_VF_MAILBOX_RESP "\
1500 "(expected=0x%08x, readback=0x%08x)\n",
1501 tmp, expected, resp);
1509 static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
1513 /* Wait for power status to be 1 */
1514 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1515 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1517 /* wait for read ptr to be equal to write ptr */
1518 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
1519 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1521 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
1522 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1524 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1525 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1527 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1528 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1530 /* disable dynamic power gating mode */
1531 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
1532 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1537 static int vcn_v3_0_stop(struct amdgpu_device *adev)
1542 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1543 if (adev->vcn.harvest_config & (1 << i))
1546 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1547 r = vcn_v3_0_stop_dpg_mode(adev, i);
1551 /* wait for vcn idle */
1552 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1556 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1557 UVD_LMI_STATUS__READ_CLEAN_MASK |
1558 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1559 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1560 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1564 /* disable LMI UMC channel */
1565 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
1566 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1567 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
1568 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1569 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1570 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1574 /* block VCPU register access */
1575 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
1576 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1577 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1580 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1581 UVD_VCPU_CNTL__BLK_RST_MASK,
1582 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1584 /* disable VCPU clock */
1585 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1586 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1588 /* apply soft reset */
1589 tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
1590 tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1591 WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
1592 tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
1593 tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1594 WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
1597 WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
1599 /* apply HW clock gating */
1600 vcn_v3_0_enable_clock_gating(adev, i);
1602 /* enable VCN power gating */
1603 vcn_v3_0_enable_static_power_gating(adev, i);
1606 if (adev->pm.dpm_enabled)
1607 amdgpu_dpm_enable_uvd(adev, false);
1612 static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
1613 int inst_idx, struct dpg_pause_state *new_state)
1615 volatile struct amdgpu_fw_shared *fw_shared;
1616 struct amdgpu_ring *ring;
1617 uint32_t reg_data = 0;
1620 /* pause/unpause if state is changed */
1621 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1622 DRM_DEBUG("dpg pause state changed %d -> %d",
1623 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
1624 reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) &
1625 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1627 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1628 ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1629 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1633 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1634 WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1637 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
1638 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1639 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1641 /* Stall DPG before WPTR/RPTR reset */
1642 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1643 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1644 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1646 if (adev->asic_type != CHIP_BEIGE_GOBY) {
1648 fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
1649 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1650 ring = &adev->vcn.inst[inst_idx].ring_enc[0];
1652 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
1653 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1654 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
1655 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1656 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1657 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1659 fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1660 ring = &adev->vcn.inst[inst_idx].ring_enc[1];
1662 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1663 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1664 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
1665 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1666 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1667 fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1669 /* restore wptr/rptr with pointers saved in FW shared memory*/
1670 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, fw_shared->rb.rptr);
1671 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, fw_shared->rb.wptr);
1675 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1676 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1678 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
1679 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1682 /* unpause dpg, no need to wait */
1683 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1684 WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1686 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1693 * vcn_v3_0_dec_ring_get_rptr - get read pointer
1695 * @ring: amdgpu_ring pointer
1697 * Returns the current hardware read pointer
1699 static uint64_t vcn_v3_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
1701 struct amdgpu_device *adev = ring->adev;
1703 return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR);
1707 * vcn_v3_0_dec_ring_get_wptr - get write pointer
1709 * @ring: amdgpu_ring pointer
1711 * Returns the current hardware write pointer
1713 static uint64_t vcn_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
1715 struct amdgpu_device *adev = ring->adev;
1717 if (ring->use_doorbell)
1718 return adev->wb.wb[ring->wptr_offs];
1720 return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR);
1724 * vcn_v3_0_dec_ring_set_wptr - set write pointer
1726 * @ring: amdgpu_ring pointer
1728 * Commits the write pointer to the hardware
1730 static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
1732 struct amdgpu_device *adev = ring->adev;
1733 volatile struct amdgpu_fw_shared *fw_shared;
1735 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1736 /*whenever update RBC_RB_WPTR, we save the wptr in shared rb.wptr and scratch2 */
1737 fw_shared = adev->vcn.inst[ring->me].fw_shared_cpu_addr;
1738 fw_shared->rb.wptr = lower_32_bits(ring->wptr);
1739 WREG32_SOC15(VCN, ring->me, mmUVD_SCRATCH2,
1740 lower_32_bits(ring->wptr));
1743 if (ring->use_doorbell) {
1744 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1745 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1747 WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1751 static void vcn_v3_0_dec_sw_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1752 u64 seq, uint32_t flags)
1754 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1756 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_FENCE);
1757 amdgpu_ring_write(ring, addr);
1758 amdgpu_ring_write(ring, upper_32_bits(addr));
1759 amdgpu_ring_write(ring, seq);
1760 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_TRAP);
1763 static void vcn_v3_0_dec_sw_ring_insert_end(struct amdgpu_ring *ring)
1765 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END);
1768 static void vcn_v3_0_dec_sw_ring_emit_ib(struct amdgpu_ring *ring,
1769 struct amdgpu_job *job,
1770 struct amdgpu_ib *ib,
1773 uint32_t vmid = AMDGPU_JOB_GET_VMID(job);
1775 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_IB);
1776 amdgpu_ring_write(ring, vmid);
1777 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1778 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1779 amdgpu_ring_write(ring, ib->length_dw);
1782 static void vcn_v3_0_dec_sw_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1783 uint32_t val, uint32_t mask)
1785 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_REG_WAIT);
1786 amdgpu_ring_write(ring, reg << 2);
1787 amdgpu_ring_write(ring, mask);
1788 amdgpu_ring_write(ring, val);
1791 static void vcn_v3_0_dec_sw_ring_emit_vm_flush(struct amdgpu_ring *ring,
1792 uint32_t vmid, uint64_t pd_addr)
1794 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1795 uint32_t data0, data1, mask;
1797 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1799 /* wait for register write */
1800 data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1801 data1 = lower_32_bits(pd_addr);
1803 vcn_v3_0_dec_sw_ring_emit_reg_wait(ring, data0, data1, mask);
1806 static void vcn_v3_0_dec_sw_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
1808 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_REG_WRITE);
1809 amdgpu_ring_write(ring, reg << 2);
1810 amdgpu_ring_write(ring, val);
1813 static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = {
1814 .type = AMDGPU_RING_TYPE_VCN_DEC,
1816 .nop = VCN_DEC_SW_CMD_NO_OP,
1817 .vmhub = AMDGPU_MMHUB_0,
1818 .get_rptr = vcn_v3_0_dec_ring_get_rptr,
1819 .get_wptr = vcn_v3_0_dec_ring_get_wptr,
1820 .set_wptr = vcn_v3_0_dec_ring_set_wptr,
1822 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1823 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1824 4 + /* vcn_v3_0_dec_sw_ring_emit_vm_flush */
1825 5 + 5 + /* vcn_v3_0_dec_sw_ring_emit_fdec_swe x2 vm fdec_swe */
1826 1, /* vcn_v3_0_dec_sw_ring_insert_end */
1827 .emit_ib_size = 5, /* vcn_v3_0_dec_sw_ring_emit_ib */
1828 .emit_ib = vcn_v3_0_dec_sw_ring_emit_ib,
1829 .emit_fence = vcn_v3_0_dec_sw_ring_emit_fence,
1830 .emit_vm_flush = vcn_v3_0_dec_sw_ring_emit_vm_flush,
1831 .test_ring = amdgpu_vcn_dec_sw_ring_test_ring,
1832 .test_ib = NULL,//amdgpu_vcn_dec_sw_ring_test_ib,
1833 .insert_nop = amdgpu_ring_insert_nop,
1834 .insert_end = vcn_v3_0_dec_sw_ring_insert_end,
1835 .pad_ib = amdgpu_ring_generic_pad_ib,
1836 .begin_use = amdgpu_vcn_ring_begin_use,
1837 .end_use = amdgpu_vcn_ring_end_use,
1838 .emit_wreg = vcn_v3_0_dec_sw_ring_emit_wreg,
1839 .emit_reg_wait = vcn_v3_0_dec_sw_ring_emit_reg_wait,
1840 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1843 static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p)
1845 struct drm_gpu_scheduler **scheds;
1847 /* The create msg must be in the first IB submitted */
1848 if (atomic_read(&p->entity->fence_seq))
1851 scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
1852 [AMDGPU_RING_PRIO_DEFAULT].sched;
1853 drm_sched_entity_modify_sched(p->entity, scheds, 1);
1857 static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr)
1859 struct ttm_operation_ctx ctx = { false, false };
1860 struct amdgpu_bo_va_mapping *map;
1861 uint32_t *msg, num_buffers;
1862 struct amdgpu_bo *bo;
1863 uint64_t start, end;
1868 addr &= AMDGPU_GMC_HOLE_MASK;
1869 r = amdgpu_cs_find_mapping(p, addr, &bo, &map);
1871 DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
1875 start = map->start * AMDGPU_GPU_PAGE_SIZE;
1876 end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE;
1878 DRM_ERROR("VCN messages must be 8 byte aligned!\n");
1882 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1883 amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
1884 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1886 DRM_ERROR("Failed validating the VCN message BO (%d)!\n", r);
1890 r = amdgpu_bo_kmap(bo, &ptr);
1892 DRM_ERROR("Failed mapping the VCN message (%d)!\n", r);
1896 msg = ptr + addr - start;
1899 if (msg[1] > end - addr) {
1904 if (msg[3] != RDECODE_MSG_CREATE)
1907 num_buffers = msg[2];
1908 for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) {
1909 uint32_t offset, size, *create;
1911 if (msg[0] != RDECODE_MESSAGE_CREATE)
1917 if (offset + size > end) {
1922 create = ptr + addr + offset - start;
1924 /* H246, HEVC and VP9 can run on any instance */
1925 if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
1928 r = vcn_v3_0_limit_sched(p);
1934 amdgpu_bo_kunmap(bo);
1938 static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1941 struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
1942 struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
1943 uint32_t msg_lo = 0, msg_hi = 0;
1947 /* The first instance can decode anything */
1951 for (i = 0; i < ib->length_dw; i += 2) {
1952 uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
1953 uint32_t val = amdgpu_get_ib_value(p, ib_idx, i + 1);
1955 if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) {
1957 } else if (reg == PACKET0(p->adev->vcn.internal.data1, 0)) {
1959 } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0) &&
1961 r = vcn_v3_0_dec_msg(p, ((u64)msg_hi) << 32 | msg_lo);
1969 static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = {
1970 .type = AMDGPU_RING_TYPE_VCN_DEC,
1972 .vmhub = AMDGPU_MMHUB_0,
1973 .get_rptr = vcn_v3_0_dec_ring_get_rptr,
1974 .get_wptr = vcn_v3_0_dec_ring_get_wptr,
1975 .set_wptr = vcn_v3_0_dec_ring_set_wptr,
1976 .patch_cs_in_place = vcn_v3_0_ring_patch_cs_in_place,
1978 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1979 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1980 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
1981 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
1983 .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
1984 .emit_ib = vcn_v2_0_dec_ring_emit_ib,
1985 .emit_fence = vcn_v2_0_dec_ring_emit_fence,
1986 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
1987 .test_ring = vcn_v2_0_dec_ring_test_ring,
1988 .test_ib = amdgpu_vcn_dec_ring_test_ib,
1989 .insert_nop = vcn_v2_0_dec_ring_insert_nop,
1990 .insert_start = vcn_v2_0_dec_ring_insert_start,
1991 .insert_end = vcn_v2_0_dec_ring_insert_end,
1992 .pad_ib = amdgpu_ring_generic_pad_ib,
1993 .begin_use = amdgpu_vcn_ring_begin_use,
1994 .end_use = amdgpu_vcn_ring_end_use,
1995 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
1996 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
1997 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2001 * vcn_v3_0_enc_ring_get_rptr - get enc read pointer
2003 * @ring: amdgpu_ring pointer
2005 * Returns the current hardware enc read pointer
2007 static uint64_t vcn_v3_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
2009 struct amdgpu_device *adev = ring->adev;
2011 if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
2012 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR);
2014 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2);
2018 * vcn_v3_0_enc_ring_get_wptr - get enc write pointer
2020 * @ring: amdgpu_ring pointer
2022 * Returns the current hardware enc write pointer
2024 static uint64_t vcn_v3_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
2026 struct amdgpu_device *adev = ring->adev;
2028 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
2029 if (ring->use_doorbell)
2030 return adev->wb.wb[ring->wptr_offs];
2032 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR);
2034 if (ring->use_doorbell)
2035 return adev->wb.wb[ring->wptr_offs];
2037 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2);
2042 * vcn_v3_0_enc_ring_set_wptr - set enc write pointer
2044 * @ring: amdgpu_ring pointer
2046 * Commits the enc write pointer to the hardware
2048 static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
2050 struct amdgpu_device *adev = ring->adev;
2052 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
2053 if (ring->use_doorbell) {
2054 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
2055 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
2057 WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
2060 if (ring->use_doorbell) {
2061 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
2062 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
2064 WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
2069 static const struct amdgpu_ring_funcs vcn_v3_0_enc_ring_vm_funcs = {
2070 .type = AMDGPU_RING_TYPE_VCN_ENC,
2072 .nop = VCN_ENC_CMD_NO_OP,
2073 .vmhub = AMDGPU_MMHUB_0,
2074 .get_rptr = vcn_v3_0_enc_ring_get_rptr,
2075 .get_wptr = vcn_v3_0_enc_ring_get_wptr,
2076 .set_wptr = vcn_v3_0_enc_ring_set_wptr,
2078 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2079 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
2080 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
2081 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
2082 1, /* vcn_v2_0_enc_ring_insert_end */
2083 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
2084 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
2085 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
2086 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
2087 .test_ring = amdgpu_vcn_enc_ring_test_ring,
2088 .test_ib = amdgpu_vcn_enc_ring_test_ib,
2089 .insert_nop = amdgpu_ring_insert_nop,
2090 .insert_end = vcn_v2_0_enc_ring_insert_end,
2091 .pad_ib = amdgpu_ring_generic_pad_ib,
2092 .begin_use = amdgpu_vcn_ring_begin_use,
2093 .end_use = amdgpu_vcn_ring_end_use,
2094 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
2095 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
2096 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2099 static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev)
2103 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2104 if (adev->vcn.harvest_config & (1 << i))
2107 if (!DEC_SW_RING_ENABLED)
2108 adev->vcn.inst[i].ring_dec.funcs = &vcn_v3_0_dec_ring_vm_funcs;
2110 adev->vcn.inst[i].ring_dec.funcs = &vcn_v3_0_dec_sw_ring_vm_funcs;
2111 adev->vcn.inst[i].ring_dec.me = i;
2112 DRM_INFO("VCN(%d) decode%s is enabled in VM mode\n", i,
2113 DEC_SW_RING_ENABLED?"(Software Ring)":"");
2117 static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev)
2121 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2122 if (adev->vcn.harvest_config & (1 << i))
2125 for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
2126 adev->vcn.inst[i].ring_enc[j].funcs = &vcn_v3_0_enc_ring_vm_funcs;
2127 adev->vcn.inst[i].ring_enc[j].me = i;
2129 if (adev->vcn.num_enc_rings > 0)
2130 DRM_INFO("VCN(%d) encode is enabled in VM mode\n", i);
2134 static bool vcn_v3_0_is_idle(void *handle)
2136 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2139 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2140 if (adev->vcn.harvest_config & (1 << i))
2143 ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
2149 static int vcn_v3_0_wait_for_idle(void *handle)
2151 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2154 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2155 if (adev->vcn.harvest_config & (1 << i))
2158 ret = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
2167 static int vcn_v3_0_set_clockgating_state(void *handle,
2168 enum amd_clockgating_state state)
2170 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2171 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
2174 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2175 if (adev->vcn.harvest_config & (1 << i))
2179 if (RREG32_SOC15(VCN, i, mmUVD_STATUS) != UVD_STATUS__IDLE)
2181 vcn_v3_0_enable_clock_gating(adev, i);
2183 vcn_v3_0_disable_clock_gating(adev, i);
2190 static int vcn_v3_0_set_powergating_state(void *handle,
2191 enum amd_powergating_state state)
2193 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2196 /* for SRIOV, guest should not control VCN Power-gating
2197 * MMSCH FW should control Power-gating and clock-gating
2198 * guest should avoid touching CGC and PG
2200 if (amdgpu_sriov_vf(adev)) {
2201 adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
2205 if(state == adev->vcn.cur_state)
2208 if (state == AMD_PG_STATE_GATE)
2209 ret = vcn_v3_0_stop(adev);
2211 ret = vcn_v3_0_start(adev);
2214 adev->vcn.cur_state = state;
2219 static int vcn_v3_0_set_interrupt_state(struct amdgpu_device *adev,
2220 struct amdgpu_irq_src *source,
2222 enum amdgpu_interrupt_state state)
2227 static int vcn_v3_0_process_interrupt(struct amdgpu_device *adev,
2228 struct amdgpu_irq_src *source,
2229 struct amdgpu_iv_entry *entry)
2231 uint32_t ip_instance;
2233 switch (entry->client_id) {
2234 case SOC15_IH_CLIENTID_VCN:
2237 case SOC15_IH_CLIENTID_VCN1:
2241 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
2245 DRM_DEBUG("IH: VCN TRAP\n");
2247 switch (entry->src_id) {
2248 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
2249 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec);
2251 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
2252 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
2254 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
2255 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
2258 DRM_ERROR("Unhandled interrupt: %d %d\n",
2259 entry->src_id, entry->src_data[0]);
2266 static const struct amdgpu_irq_src_funcs vcn_v3_0_irq_funcs = {
2267 .set = vcn_v3_0_set_interrupt_state,
2268 .process = vcn_v3_0_process_interrupt,
2271 static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev)
2275 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2276 if (adev->vcn.harvest_config & (1 << i))
2279 adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
2280 adev->vcn.inst[i].irq.funcs = &vcn_v3_0_irq_funcs;
2284 static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
2286 .early_init = vcn_v3_0_early_init,
2288 .sw_init = vcn_v3_0_sw_init,
2289 .sw_fini = vcn_v3_0_sw_fini,
2290 .hw_init = vcn_v3_0_hw_init,
2291 .hw_fini = vcn_v3_0_hw_fini,
2292 .suspend = vcn_v3_0_suspend,
2293 .resume = vcn_v3_0_resume,
2294 .is_idle = vcn_v3_0_is_idle,
2295 .wait_for_idle = vcn_v3_0_wait_for_idle,
2296 .check_soft_reset = NULL,
2297 .pre_soft_reset = NULL,
2299 .post_soft_reset = NULL,
2300 .set_clockgating_state = vcn_v3_0_set_clockgating_state,
2301 .set_powergating_state = vcn_v3_0_set_powergating_state,
2304 const struct amdgpu_ip_block_version vcn_v3_0_ip_block =
2306 .type = AMD_IP_BLOCK_TYPE_VCN,
2310 .funcs = &vcn_v3_0_ip_funcs,