2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
27 #include "amdgpu_uvd.h"
30 #include "soc15_common.h"
31 #include "mmsch_v1_0.h"
33 #include "uvd/uvd_7_0_offset.h"
34 #include "uvd/uvd_7_0_sh_mask.h"
35 #include "vce/vce_4_0_offset.h"
36 #include "vce/vce_4_0_default.h"
37 #include "vce/vce_4_0_sh_mask.h"
38 #include "nbif/nbif_6_1_offset.h"
39 #include "mmhub/mmhub_1_0_offset.h"
40 #include "mmhub/mmhub_1_0_sh_mask.h"
41 #include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
43 #define mmUVD_PG0_CC_UVD_HARVESTING 0x00c7
44 #define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX 1
45 //UVD_PG0_CC_UVD_HARVESTING
46 #define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT 0x1
47 #define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK 0x00000002L
49 #define UVD7_MAX_HW_INSTANCES_VEGA20 2
51 static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
52 static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
53 static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
54 static int uvd_v7_0_start(struct amdgpu_device *adev);
55 static void uvd_v7_0_stop(struct amdgpu_device *adev);
56 static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);
58 static int amdgpu_ih_clientid_uvds[] = {
59 SOC15_IH_CLIENTID_UVD,
60 SOC15_IH_CLIENTID_UVD1
64 * uvd_v7_0_ring_get_rptr - get read pointer
66 * @ring: amdgpu_ring pointer
68 * Returns the current hardware read pointer
70 static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
72 struct amdgpu_device *adev = ring->adev;
74 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
78 * uvd_v7_0_enc_ring_get_rptr - get enc read pointer
80 * @ring: amdgpu_ring pointer
82 * Returns the current hardware enc read pointer
84 static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
86 struct amdgpu_device *adev = ring->adev;
88 if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
89 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
91 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
95 * uvd_v7_0_ring_get_wptr - get write pointer
97 * @ring: amdgpu_ring pointer
99 * Returns the current hardware write pointer
101 static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
103 struct amdgpu_device *adev = ring->adev;
105 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
109 * uvd_v7_0_enc_ring_get_wptr - get enc write pointer
111 * @ring: amdgpu_ring pointer
113 * Returns the current hardware enc write pointer
115 static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
117 struct amdgpu_device *adev = ring->adev;
119 if (ring->use_doorbell)
120 return adev->wb.wb[ring->wptr_offs];
122 if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
123 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
125 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
129 * uvd_v7_0_ring_set_wptr - set write pointer
131 * @ring: amdgpu_ring pointer
133 * Commits the write pointer to the hardware
135 static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
137 struct amdgpu_device *adev = ring->adev;
139 WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
143 * uvd_v7_0_enc_ring_set_wptr - set enc write pointer
145 * @ring: amdgpu_ring pointer
147 * Commits the enc write pointer to the hardware
149 static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
151 struct amdgpu_device *adev = ring->adev;
153 if (ring->use_doorbell) {
154 /* XXX check if swapping is necessary on BE */
155 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
156 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
160 if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
161 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR,
162 lower_32_bits(ring->wptr));
164 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2,
165 lower_32_bits(ring->wptr));
169 * uvd_v7_0_enc_ring_test_ring - test if UVD ENC ring is working
171 * @ring: the engine to test on
174 static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
176 struct amdgpu_device *adev = ring->adev;
181 if (amdgpu_sriov_vf(adev))
184 r = amdgpu_ring_alloc(ring, 16);
188 rptr = amdgpu_ring_get_rptr(ring);
190 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
191 amdgpu_ring_commit(ring);
193 for (i = 0; i < adev->usec_timeout; i++) {
194 if (amdgpu_ring_get_rptr(ring) != rptr)
199 if (i >= adev->usec_timeout)
206 * uvd_v7_0_enc_get_create_msg - generate a UVD ENC create msg
208 * @ring: ring we should submit the msg to
209 * @handle: session handle to use
210 * @bo: amdgpu object for which we query the offset
211 * @fence: optional fence to return
213 * Open up a stream for HW test
215 static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
216 struct amdgpu_bo *bo,
217 struct dma_fence **fence)
219 const unsigned ib_size_dw = 16;
220 struct amdgpu_job *job;
221 struct amdgpu_ib *ib;
222 struct dma_fence *f = NULL;
226 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
227 AMDGPU_IB_POOL_DIRECT, &job);
232 addr = amdgpu_bo_gpu_offset(bo);
235 ib->ptr[ib->length_dw++] = 0x00000018;
236 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
237 ib->ptr[ib->length_dw++] = handle;
238 ib->ptr[ib->length_dw++] = 0x00000000;
239 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
240 ib->ptr[ib->length_dw++] = addr;
242 ib->ptr[ib->length_dw++] = 0x00000014;
243 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
244 ib->ptr[ib->length_dw++] = 0x0000001c;
245 ib->ptr[ib->length_dw++] = 0x00000000;
246 ib->ptr[ib->length_dw++] = 0x00000000;
248 ib->ptr[ib->length_dw++] = 0x00000008;
249 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
251 for (i = ib->length_dw; i < ib_size_dw; ++i)
254 r = amdgpu_job_submit_direct(job, ring, &f);
259 *fence = dma_fence_get(f);
264 amdgpu_job_free(job);
269 * uvd_v7_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
271 * @ring: ring we should submit the msg to
272 * @handle: session handle to use
273 * @bo: amdgpu object for which we query the offset
274 * @fence: optional fence to return
276 * Close up a stream for HW test or if userspace failed to do so
278 static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
279 struct amdgpu_bo *bo,
280 struct dma_fence **fence)
282 const unsigned ib_size_dw = 16;
283 struct amdgpu_job *job;
284 struct amdgpu_ib *ib;
285 struct dma_fence *f = NULL;
289 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
290 AMDGPU_IB_POOL_DIRECT, &job);
295 addr = amdgpu_bo_gpu_offset(bo);
298 ib->ptr[ib->length_dw++] = 0x00000018;
299 ib->ptr[ib->length_dw++] = 0x00000001;
300 ib->ptr[ib->length_dw++] = handle;
301 ib->ptr[ib->length_dw++] = 0x00000000;
302 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
303 ib->ptr[ib->length_dw++] = addr;
305 ib->ptr[ib->length_dw++] = 0x00000014;
306 ib->ptr[ib->length_dw++] = 0x00000002;
307 ib->ptr[ib->length_dw++] = 0x0000001c;
308 ib->ptr[ib->length_dw++] = 0x00000000;
309 ib->ptr[ib->length_dw++] = 0x00000000;
311 ib->ptr[ib->length_dw++] = 0x00000008;
312 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
314 for (i = ib->length_dw; i < ib_size_dw; ++i)
317 r = amdgpu_job_submit_direct(job, ring, &f);
322 *fence = dma_fence_get(f);
327 amdgpu_job_free(job);
332 * uvd_v7_0_enc_ring_test_ib - test if UVD ENC IBs are working
334 * @ring: the engine to test on
335 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
338 static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
340 struct dma_fence *fence = NULL;
341 struct amdgpu_bo *bo = NULL;
344 r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
345 AMDGPU_GEM_DOMAIN_VRAM,
350 r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
354 r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
358 r = dma_fence_wait_timeout(fence, false, timeout);
365 dma_fence_put(fence);
367 amdgpu_bo_unreserve(bo);
368 amdgpu_bo_unref(&bo);
372 static int uvd_v7_0_early_init(void *handle)
374 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
376 if (adev->asic_type == CHIP_VEGA20) {
380 adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
381 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
382 harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
383 if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
384 adev->uvd.harvest_config |= 1 << i;
387 if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
388 AMDGPU_UVD_HARVEST_UVD1))
389 /* both instances are harvested, disable the block */
392 adev->uvd.num_uvd_inst = 1;
395 if (amdgpu_sriov_vf(adev))
396 adev->uvd.num_enc_rings = 1;
398 adev->uvd.num_enc_rings = 2;
399 uvd_v7_0_set_ring_funcs(adev);
400 uvd_v7_0_set_enc_ring_funcs(adev);
401 uvd_v7_0_set_irq_funcs(adev);
406 static int uvd_v7_0_sw_init(void *handle)
408 struct amdgpu_ring *ring;
411 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
413 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
414 if (adev->uvd.harvest_config & (1 << j))
417 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
422 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
423 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq);
429 r = amdgpu_uvd_sw_init(adev);
433 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
434 const struct common_firmware_header *hdr;
435 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
436 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].ucode_id = AMDGPU_UCODE_ID_UVD;
437 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw;
438 adev->firmware.fw_size +=
439 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
441 if (adev->uvd.num_uvd_inst == UVD7_MAX_HW_INSTANCES_VEGA20) {
442 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].ucode_id = AMDGPU_UCODE_ID_UVD1;
443 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].fw = adev->uvd.fw;
444 adev->firmware.fw_size +=
445 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
447 DRM_INFO("PSP loading UVD firmware\n");
450 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
451 if (adev->uvd.harvest_config & (1 << j))
453 if (!amdgpu_sriov_vf(adev)) {
454 ring = &adev->uvd.inst[j].ring;
455 sprintf(ring->name, "uvd_%d", ring->me);
456 r = amdgpu_ring_init(adev, ring, 512,
457 &adev->uvd.inst[j].irq, 0,
458 AMDGPU_RING_PRIO_DEFAULT, NULL);
463 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
464 ring = &adev->uvd.inst[j].ring_enc[i];
465 sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i);
466 if (amdgpu_sriov_vf(adev)) {
467 ring->use_doorbell = true;
469 /* currently only use the first enconding ring for
470 * sriov, so set unused location for other unused rings.
473 ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2;
475 ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
477 r = amdgpu_ring_init(adev, ring, 512,
478 &adev->uvd.inst[j].irq, 0,
479 AMDGPU_RING_PRIO_DEFAULT, NULL);
485 r = amdgpu_uvd_resume(adev);
489 r = amdgpu_uvd_entity_init(adev);
493 r = amdgpu_virt_alloc_mm_table(adev);
500 static int uvd_v7_0_sw_fini(void *handle)
503 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
505 amdgpu_virt_free_mm_table(adev);
507 r = amdgpu_uvd_suspend(adev);
511 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
512 if (adev->uvd.harvest_config & (1 << j))
514 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
515 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
517 return amdgpu_uvd_sw_fini(adev);
521 * uvd_v7_0_hw_init - start and test UVD block
523 * @handle: handle used to pass amdgpu_device pointer
525 * Initialize the hardware, boot up the VCPU and do some testing
527 static int uvd_v7_0_hw_init(void *handle)
529 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
530 struct amdgpu_ring *ring;
534 if (amdgpu_sriov_vf(adev))
535 r = uvd_v7_0_sriov_start(adev);
537 r = uvd_v7_0_start(adev);
541 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
542 if (adev->uvd.harvest_config & (1 << j))
544 ring = &adev->uvd.inst[j].ring;
546 if (!amdgpu_sriov_vf(adev)) {
547 r = amdgpu_ring_test_helper(ring);
551 r = amdgpu_ring_alloc(ring, 10);
553 DRM_ERROR("amdgpu: (%d)ring failed to lock UVD ring (%d).\n", j, r);
557 tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
558 mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
559 amdgpu_ring_write(ring, tmp);
560 amdgpu_ring_write(ring, 0xFFFFF);
562 tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
563 mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
564 amdgpu_ring_write(ring, tmp);
565 amdgpu_ring_write(ring, 0xFFFFF);
567 tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
568 mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
569 amdgpu_ring_write(ring, tmp);
570 amdgpu_ring_write(ring, 0xFFFFF);
572 /* Clear timeout status bits */
573 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
574 mmUVD_SEMA_TIMEOUT_STATUS), 0));
575 amdgpu_ring_write(ring, 0x8);
577 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
578 mmUVD_SEMA_CNTL), 0));
579 amdgpu_ring_write(ring, 3);
581 amdgpu_ring_commit(ring);
584 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
585 ring = &adev->uvd.inst[j].ring_enc[i];
586 r = amdgpu_ring_test_helper(ring);
593 DRM_INFO("UVD and UVD ENC initialized successfully.\n");
599 * uvd_v7_0_hw_fini - stop the hardware block
601 * @handle: handle used to pass amdgpu_device pointer
603 * Stop the UVD block, mark ring as not ready any more
605 static int uvd_v7_0_hw_fini(void *handle)
607 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
610 * Proper cleanups before halting the HW engine:
611 * - cancel the delayed idle work
612 * - enable powergating
613 * - enable clockgating
616 * TODO: to align with the VCN implementation, move the
617 * jobs for clockgating/powergating/dpm setting to
618 * ->set_powergating_state().
620 cancel_delayed_work_sync(&adev->uvd.idle_work);
622 if (adev->pm.dpm_enabled) {
623 amdgpu_dpm_enable_uvd(adev, false);
625 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
626 /* shutdown the UVD block */
627 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
629 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
633 if (!amdgpu_sriov_vf(adev))
636 /* full access mode, so don't touch any UVD register */
637 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
643 static int uvd_v7_0_suspend(void *handle)
646 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
648 r = uvd_v7_0_hw_fini(adev);
652 return amdgpu_uvd_suspend(adev);
655 static int uvd_v7_0_resume(void *handle)
658 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
660 r = amdgpu_uvd_resume(adev);
664 return uvd_v7_0_hw_init(adev);
668 * uvd_v7_0_mc_resume - memory controller programming
670 * @adev: amdgpu_device pointer
672 * Let the UVD memory controller know it's offsets
674 static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
676 uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
680 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
681 if (adev->uvd.harvest_config & (1 << i))
683 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
684 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
686 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo:
687 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_lo);
688 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
690 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi:
691 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_hi);
692 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
695 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
696 lower_32_bits(adev->uvd.inst[i].gpu_addr));
697 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
698 upper_32_bits(adev->uvd.inst[i].gpu_addr));
700 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
701 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
704 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
706 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
707 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
708 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
709 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
710 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
711 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
713 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
714 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
715 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
716 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
717 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
718 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2,
719 AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
721 WREG32_SOC15(UVD, i, mmUVD_UDEC_ADDR_CONFIG,
722 adev->gfx.config.gb_addr_config);
723 WREG32_SOC15(UVD, i, mmUVD_UDEC_DB_ADDR_CONFIG,
724 adev->gfx.config.gb_addr_config);
725 WREG32_SOC15(UVD, i, mmUVD_UDEC_DBW_ADDR_CONFIG,
726 adev->gfx.config.gb_addr_config);
728 WREG32_SOC15(UVD, i, mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
732 static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
733 struct amdgpu_mm_table *table)
735 uint32_t data = 0, loop;
736 uint64_t addr = table->gpu_addr;
737 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
741 size = header->header_size + header->vce_table_size + header->uvd_table_size;
743 /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
744 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
745 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
747 /* 2, update vmid of descriptor */
748 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID);
749 data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
750 data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
751 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID, data);
753 /* 3, notify mmsch about the size of this descriptor */
754 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE, size);
756 /* 4, set resp to zero */
757 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
759 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
760 if (adev->uvd.harvest_config & (1 << i))
762 WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
763 adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0;
764 adev->uvd.inst[i].ring_enc[0].wptr = 0;
765 adev->uvd.inst[i].ring_enc[0].wptr_old = 0;
767 /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
768 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
770 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
772 while ((data & 0x10000002) != 0x10000002) {
774 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
781 dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
788 static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
790 struct amdgpu_ring *ring;
791 uint32_t offset, size, tmp;
792 uint32_t table_size = 0;
793 struct mmsch_v1_0_cmd_direct_write direct_wt = { {0} };
794 struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
795 struct mmsch_v1_0_cmd_direct_polling direct_poll = { {0} };
796 struct mmsch_v1_0_cmd_end end = { {0} };
797 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
798 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
801 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
802 direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
803 direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
804 end.cmd_header.command_type = MMSCH_COMMAND__END;
806 if (header->uvd_table_offset == 0 && header->uvd_table_size == 0) {
807 header->version = MMSCH_VERSION;
808 header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
810 if (header->vce_table_offset == 0 && header->vce_table_size == 0)
811 header->uvd_table_offset = header->header_size;
813 header->uvd_table_offset = header->vce_table_size + header->vce_table_offset;
815 init_table += header->uvd_table_offset;
817 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
818 if (adev->uvd.harvest_config & (1 << i))
820 ring = &adev->uvd.inst[i].ring;
822 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
824 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
825 0xFFFFFFFF, 0x00000004);
827 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
828 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
829 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
830 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo);
831 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
832 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
833 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi);
834 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0);
837 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
838 lower_32_bits(adev->uvd.inst[i].gpu_addr));
839 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
840 upper_32_bits(adev->uvd.inst[i].gpu_addr));
842 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
843 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
847 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
849 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
850 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
851 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
852 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
853 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
854 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
856 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
857 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
858 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
859 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
860 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
861 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
862 AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
864 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
867 /* disable clock gating */
868 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_CGC_CTRL),
869 ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
871 /* disable interupt */
872 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
873 ~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
875 /* stall UMC and register bus before resetting VCPU */
876 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
877 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
878 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
880 /* put LMI, VCPU, RBC etc... into reset */
881 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
882 (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
883 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
884 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
885 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
886 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
887 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
888 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
889 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
891 /* initialize UVD memory controller */
892 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL),
893 (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
894 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
895 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
896 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
897 UVD_LMI_CTRL__REQ_MODE_MASK |
900 /* take all subblocks out of reset, except VCPU */
901 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
902 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
904 /* enable VCPU clock */
905 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
906 UVD_VCPU_CNTL__CLK_EN_MASK);
908 /* enable master interrupt */
909 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
910 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
911 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
913 /* clear the bit 4 of UVD_STATUS */
914 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
915 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
917 /* force RBC into idle state */
918 size = order_base_2(ring->ring_size);
919 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
920 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
921 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
923 ring = &adev->uvd.inst[i].ring_enc[0];
925 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), ring->gpu_addr);
926 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
927 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), ring->ring_size / 4);
929 /* boot up the VCPU */
930 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), 0);
933 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
934 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
936 MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0x02, 0x02);
939 memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
940 table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
941 header->uvd_table_size = table_size;
944 return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
948 * uvd_v7_0_start - start UVD block
950 * @adev: amdgpu_device pointer
952 * Setup and start the UVD block
954 static int uvd_v7_0_start(struct amdgpu_device *adev)
956 struct amdgpu_ring *ring;
957 uint32_t rb_bufsz, tmp;
958 uint32_t lmi_swap_cntl;
959 uint32_t mp_swap_cntl;
962 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
963 if (adev->uvd.harvest_config & (1 << k))
966 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
967 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
970 /* disable byte swapping */
974 uvd_v7_0_mc_resume(adev);
976 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
977 if (adev->uvd.harvest_config & (1 << k))
979 ring = &adev->uvd.inst[k].ring;
980 /* disable clock gating */
981 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
982 ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
984 /* disable interupt */
985 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), 0,
986 ~UVD_MASTINT_EN__VCPU_EN_MASK);
988 /* stall UMC and register bus before resetting VCPU */
989 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2),
990 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
991 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
994 /* put LMI, VCPU, RBC etc... into reset */
995 WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
996 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
997 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
998 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
999 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
1000 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
1001 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
1002 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
1003 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
1006 /* initialize UVD memory controller */
1007 WREG32_SOC15(UVD, k, mmUVD_LMI_CTRL,
1008 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
1009 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1010 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1011 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
1012 UVD_LMI_CTRL__REQ_MODE_MASK |
1016 /* swap (8 in 32) RB and IB */
1017 lmi_swap_cntl = 0xa;
1020 WREG32_SOC15(UVD, k, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
1021 WREG32_SOC15(UVD, k, mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
1023 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA0, 0x40c2040);
1024 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA1, 0x0);
1025 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB0, 0x40c2040);
1026 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB1, 0x0);
1027 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_ALU, 0);
1028 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUX, 0x88);
1030 /* take all subblocks out of reset, except VCPU */
1031 WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
1032 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1035 /* enable VCPU clock */
1036 WREG32_SOC15(UVD, k, mmUVD_VCPU_CNTL,
1037 UVD_VCPU_CNTL__CLK_EN_MASK);
1040 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), 0,
1041 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1043 /* boot up the VCPU */
1044 WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, 0);
1047 for (i = 0; i < 10; ++i) {
1050 for (j = 0; j < 100; ++j) {
1051 status = RREG32_SOC15(UVD, k, mmUVD_STATUS);
1060 DRM_ERROR("UVD(%d) not responding, trying to reset the VCPU!!!\n", k);
1061 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET),
1062 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1063 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1065 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), 0,
1066 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1072 DRM_ERROR("UVD(%d) not responding, giving up!!!\n", k);
1075 /* enable master interrupt */
1076 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN),
1077 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
1078 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
1080 /* clear the bit 4 of UVD_STATUS */
1081 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_STATUS), 0,
1082 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1084 /* force RBC into idle state */
1085 rb_bufsz = order_base_2(ring->ring_size);
1086 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1087 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1088 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1089 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
1090 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1091 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1092 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_CNTL, tmp);
1094 /* set the write pointer delay */
1095 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR_CNTL, 0);
1097 /* set the wb address */
1098 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR,
1099 (upper_32_bits(ring->gpu_addr) >> 2));
1101 /* program the RB_BASE for ring buffer */
1102 WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1103 lower_32_bits(ring->gpu_addr));
1104 WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1105 upper_32_bits(ring->gpu_addr));
1107 /* Initialize the ring buffer's read and write pointers */
1108 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR, 0);
1110 ring->wptr = RREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR);
1111 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR,
1112 lower_32_bits(ring->wptr));
1114 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_RBC_RB_CNTL), 0,
1115 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1117 ring = &adev->uvd.inst[k].ring_enc[0];
1118 WREG32_SOC15(UVD, k, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1119 WREG32_SOC15(UVD, k, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1120 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO, ring->gpu_addr);
1121 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1122 WREG32_SOC15(UVD, k, mmUVD_RB_SIZE, ring->ring_size / 4);
1124 ring = &adev->uvd.inst[k].ring_enc[1];
1125 WREG32_SOC15(UVD, k, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1126 WREG32_SOC15(UVD, k, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1127 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1128 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1129 WREG32_SOC15(UVD, k, mmUVD_RB_SIZE2, ring->ring_size / 4);
1135 * uvd_v7_0_stop - stop UVD block
1137 * @adev: amdgpu_device pointer
1139 * stop the UVD block
1141 static void uvd_v7_0_stop(struct amdgpu_device *adev)
1145 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1146 if (adev->uvd.harvest_config & (1 << i))
1148 /* force RBC into idle state */
1149 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
1151 /* Stall UMC and register bus before resetting VCPU */
1152 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
1153 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
1154 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1157 /* put VCPU into reset */
1158 WREG32_SOC15(UVD, i, mmUVD_SOFT_RESET,
1159 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1162 /* disable VCPU clock */
1163 WREG32_SOC15(UVD, i, mmUVD_VCPU_CNTL, 0x0);
1165 /* Unstall UMC and register bus */
1166 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
1167 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1172 * uvd_v7_0_ring_emit_fence - emit an fence & trap command
1174 * @ring: amdgpu_ring pointer
1176 * @seq: sequence number
1177 * @flags: fence related flags
1179 * Write a fence and a trap command to the ring.
1181 static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1184 struct amdgpu_device *adev = ring->adev;
1186 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1188 amdgpu_ring_write(ring,
1189 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1190 amdgpu_ring_write(ring, seq);
1191 amdgpu_ring_write(ring,
1192 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1193 amdgpu_ring_write(ring, addr & 0xffffffff);
1194 amdgpu_ring_write(ring,
1195 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1196 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1197 amdgpu_ring_write(ring,
1198 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1199 amdgpu_ring_write(ring, 0);
1201 amdgpu_ring_write(ring,
1202 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1203 amdgpu_ring_write(ring, 0);
1204 amdgpu_ring_write(ring,
1205 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1206 amdgpu_ring_write(ring, 0);
1207 amdgpu_ring_write(ring,
1208 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1209 amdgpu_ring_write(ring, 2);
1213 * uvd_v7_0_enc_ring_emit_fence - emit an enc fence & trap command
1215 * @ring: amdgpu_ring pointer
1217 * @seq: sequence number
1218 * @flags: fence related flags
1220 * Write enc a fence and a trap command to the ring.
1222 static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1223 u64 seq, unsigned flags)
1226 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1228 amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
1229 amdgpu_ring_write(ring, addr);
1230 amdgpu_ring_write(ring, upper_32_bits(addr));
1231 amdgpu_ring_write(ring, seq);
1232 amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
1236 * uvd_v7_0_ring_emit_hdp_flush - skip HDP flushing
1238 * @ring: amdgpu_ring pointer
1240 static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
1242 /* The firmware doesn't seem to like touching registers at this point. */
1246 * uvd_v7_0_ring_test_ring - register write test
1248 * @ring: amdgpu_ring pointer
1250 * Test if we can successfully write to the context register
1252 static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
1254 struct amdgpu_device *adev = ring->adev;
1259 WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
1260 r = amdgpu_ring_alloc(ring, 3);
1264 amdgpu_ring_write(ring,
1265 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1266 amdgpu_ring_write(ring, 0xDEADBEEF);
1267 amdgpu_ring_commit(ring);
1268 for (i = 0; i < adev->usec_timeout; i++) {
1269 tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID);
1270 if (tmp == 0xDEADBEEF)
1275 if (i >= adev->usec_timeout)
1282 * uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission.
1284 * @p: the CS parser with the IBs
1285 * @ib_idx: which IB to patch
1288 static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1291 struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
1292 struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
1295 /* No patching necessary for the first instance */
1299 for (i = 0; i < ib->length_dw; i += 2) {
1300 uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
1302 reg -= p->adev->reg_offset[UVD_HWIP][0][1];
1303 reg += p->adev->reg_offset[UVD_HWIP][1][1];
1305 amdgpu_set_ib_value(p, ib_idx, i, reg);
1311 * uvd_v7_0_ring_emit_ib - execute indirect buffer
1313 * @ring: amdgpu_ring pointer
1314 * @job: job to retrieve vmid from
1315 * @ib: indirect buffer to execute
1318 * Write ring commands to execute the indirect buffer
1320 static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
1321 struct amdgpu_job *job,
1322 struct amdgpu_ib *ib,
1325 struct amdgpu_device *adev = ring->adev;
1326 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1328 amdgpu_ring_write(ring,
1329 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
1330 amdgpu_ring_write(ring, vmid);
1332 amdgpu_ring_write(ring,
1333 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1334 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1335 amdgpu_ring_write(ring,
1336 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1337 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1338 amdgpu_ring_write(ring,
1339 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_RBC_IB_SIZE), 0));
1340 amdgpu_ring_write(ring, ib->length_dw);
1344 * uvd_v7_0_enc_ring_emit_ib - enc execute indirect buffer
1346 * @ring: amdgpu_ring pointer
1347 * @job: job to retrive vmid from
1348 * @ib: indirect buffer to execute
1351 * Write enc ring commands to execute the indirect buffer
1353 static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1354 struct amdgpu_job *job,
1355 struct amdgpu_ib *ib,
1358 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1360 amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1361 amdgpu_ring_write(ring, vmid);
1362 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1363 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1364 amdgpu_ring_write(ring, ib->length_dw);
1367 static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
1368 uint32_t reg, uint32_t val)
1370 struct amdgpu_device *adev = ring->adev;
1372 amdgpu_ring_write(ring,
1373 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1374 amdgpu_ring_write(ring, reg << 2);
1375 amdgpu_ring_write(ring,
1376 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1377 amdgpu_ring_write(ring, val);
1378 amdgpu_ring_write(ring,
1379 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1380 amdgpu_ring_write(ring, 8);
1383 static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1384 uint32_t val, uint32_t mask)
1386 struct amdgpu_device *adev = ring->adev;
1388 amdgpu_ring_write(ring,
1389 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1390 amdgpu_ring_write(ring, reg << 2);
1391 amdgpu_ring_write(ring,
1392 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1393 amdgpu_ring_write(ring, val);
1394 amdgpu_ring_write(ring,
1395 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GP_SCRATCH8), 0));
1396 amdgpu_ring_write(ring, mask);
1397 amdgpu_ring_write(ring,
1398 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1399 amdgpu_ring_write(ring, 12);
1402 static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1403 unsigned vmid, uint64_t pd_addr)
1405 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1406 uint32_t data0, data1, mask;
1408 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1410 /* wait for reg writes */
1411 data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1412 data1 = lower_32_bits(pd_addr);
1414 uvd_v7_0_ring_emit_reg_wait(ring, data0, data1, mask);
1417 static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1419 struct amdgpu_device *adev = ring->adev;
1422 WARN_ON(ring->wptr % 2 || count % 2);
1424 for (i = 0; i < count / 2; i++) {
1425 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0));
1426 amdgpu_ring_write(ring, 0);
1430 static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1432 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1435 static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1436 uint32_t reg, uint32_t val,
1439 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1440 amdgpu_ring_write(ring, reg << 2);
1441 amdgpu_ring_write(ring, mask);
1442 amdgpu_ring_write(ring, val);
1445 static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1446 unsigned int vmid, uint64_t pd_addr)
1448 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1450 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1452 /* wait for reg writes */
1453 uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1454 vmid * hub->ctx_addr_distance,
1455 lower_32_bits(pd_addr), 0xffffffff);
1458 static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1459 uint32_t reg, uint32_t val)
1461 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1462 amdgpu_ring_write(ring, reg << 2);
1463 amdgpu_ring_write(ring, val);
1467 static bool uvd_v7_0_is_idle(void *handle)
1469 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1471 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1474 static int uvd_v7_0_wait_for_idle(void *handle)
1477 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1479 for (i = 0; i < adev->usec_timeout; i++) {
1480 if (uvd_v7_0_is_idle(handle))
1486 #define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
1487 static bool uvd_v7_0_check_soft_reset(void *handle)
1489 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1490 u32 srbm_soft_reset = 0;
1491 u32 tmp = RREG32(mmSRBM_STATUS);
1493 if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1494 REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1495 (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) &
1496 AMDGPU_UVD_STATUS_BUSY_MASK))
1497 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1498 SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1500 if (srbm_soft_reset) {
1501 adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset;
1504 adev->uvd.inst[ring->me].srbm_soft_reset = 0;
1509 static int uvd_v7_0_pre_soft_reset(void *handle)
1511 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1513 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1516 uvd_v7_0_stop(adev);
1520 static int uvd_v7_0_soft_reset(void *handle)
1522 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1523 u32 srbm_soft_reset;
1525 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1527 srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset;
1529 if (srbm_soft_reset) {
1532 tmp = RREG32(mmSRBM_SOFT_RESET);
1533 tmp |= srbm_soft_reset;
1534 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1535 WREG32(mmSRBM_SOFT_RESET, tmp);
1536 tmp = RREG32(mmSRBM_SOFT_RESET);
1540 tmp &= ~srbm_soft_reset;
1541 WREG32(mmSRBM_SOFT_RESET, tmp);
1542 tmp = RREG32(mmSRBM_SOFT_RESET);
1544 /* Wait a little for things to settle down */
1551 static int uvd_v7_0_post_soft_reset(void *handle)
1553 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1555 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1560 return uvd_v7_0_start(adev);
1564 static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev,
1565 struct amdgpu_irq_src *source,
1567 enum amdgpu_interrupt_state state)
1573 static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
1574 struct amdgpu_irq_src *source,
1575 struct amdgpu_iv_entry *entry)
1577 uint32_t ip_instance;
1579 switch (entry->client_id) {
1580 case SOC15_IH_CLIENTID_UVD:
1583 case SOC15_IH_CLIENTID_UVD1:
1587 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1591 DRM_DEBUG("IH: UVD TRAP\n");
1593 switch (entry->src_id) {
1595 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring);
1598 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[0]);
1601 if (!amdgpu_sriov_vf(adev))
1602 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[1]);
1605 DRM_ERROR("Unhandled interrupt: %d %d\n",
1606 entry->src_id, entry->src_data[0]);
1614 static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
1616 uint32_t data, data1, data2, suvd_flags;
1618 data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL);
1619 data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1620 data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL);
1622 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1623 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1625 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1626 UVD_SUVD_CGC_GATE__SIT_MASK |
1627 UVD_SUVD_CGC_GATE__SMP_MASK |
1628 UVD_SUVD_CGC_GATE__SCM_MASK |
1629 UVD_SUVD_CGC_GATE__SDB_MASK;
1631 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1632 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1633 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1635 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1636 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1637 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1638 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1639 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1640 UVD_CGC_CTRL__SYS_MODE_MASK |
1641 UVD_CGC_CTRL__UDEC_MODE_MASK |
1642 UVD_CGC_CTRL__MPEG2_MODE_MASK |
1643 UVD_CGC_CTRL__REGS_MODE_MASK |
1644 UVD_CGC_CTRL__RBC_MODE_MASK |
1645 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1646 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1647 UVD_CGC_CTRL__IDCT_MODE_MASK |
1648 UVD_CGC_CTRL__MPRD_MODE_MASK |
1649 UVD_CGC_CTRL__MPC_MODE_MASK |
1650 UVD_CGC_CTRL__LBSI_MODE_MASK |
1651 UVD_CGC_CTRL__LRBBM_MODE_MASK |
1652 UVD_CGC_CTRL__WCB_MODE_MASK |
1653 UVD_CGC_CTRL__VCPU_MODE_MASK |
1654 UVD_CGC_CTRL__JPEG_MODE_MASK |
1655 UVD_CGC_CTRL__JPEG2_MODE_MASK |
1656 UVD_CGC_CTRL__SCPU_MODE_MASK);
1657 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1658 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1659 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1660 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1661 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1662 data1 |= suvd_flags;
1664 WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data);
1665 WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0);
1666 WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1667 WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2);
1670 static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
1672 uint32_t data, data1, cgc_flags, suvd_flags;
1674 data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE);
1675 data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1677 cgc_flags = UVD_CGC_GATE__SYS_MASK |
1678 UVD_CGC_GATE__UDEC_MASK |
1679 UVD_CGC_GATE__MPEG2_MASK |
1680 UVD_CGC_GATE__RBC_MASK |
1681 UVD_CGC_GATE__LMI_MC_MASK |
1682 UVD_CGC_GATE__IDCT_MASK |
1683 UVD_CGC_GATE__MPRD_MASK |
1684 UVD_CGC_GATE__MPC_MASK |
1685 UVD_CGC_GATE__LBSI_MASK |
1686 UVD_CGC_GATE__LRBBM_MASK |
1687 UVD_CGC_GATE__UDEC_RE_MASK |
1688 UVD_CGC_GATE__UDEC_CM_MASK |
1689 UVD_CGC_GATE__UDEC_IT_MASK |
1690 UVD_CGC_GATE__UDEC_DB_MASK |
1691 UVD_CGC_GATE__UDEC_MP_MASK |
1692 UVD_CGC_GATE__WCB_MASK |
1693 UVD_CGC_GATE__VCPU_MASK |
1694 UVD_CGC_GATE__SCPU_MASK |
1695 UVD_CGC_GATE__JPEG_MASK |
1696 UVD_CGC_GATE__JPEG2_MASK;
1698 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1699 UVD_SUVD_CGC_GATE__SIT_MASK |
1700 UVD_SUVD_CGC_GATE__SMP_MASK |
1701 UVD_SUVD_CGC_GATE__SCM_MASK |
1702 UVD_SUVD_CGC_GATE__SDB_MASK;
1705 data1 |= suvd_flags;
1707 WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data);
1708 WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1711 static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
1713 u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
1716 tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1717 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1719 tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1720 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1722 WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
1726 static int uvd_v7_0_set_clockgating_state(void *handle,
1727 enum amd_clockgating_state state)
1729 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1730 bool enable = (state == AMD_CG_STATE_GATE);
1732 uvd_v7_0_set_bypass_mode(adev, enable);
1734 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
1738 /* disable HW gating and enable Sw gating */
1739 uvd_v7_0_set_sw_clock_gating(adev);
1741 /* wait for STATUS to clear */
1742 if (uvd_v7_0_wait_for_idle(handle))
1745 /* enable HW gates because UVD is idle */
1746 /* uvd_v7_0_set_hw_clock_gating(adev); */
1752 static int uvd_v7_0_set_powergating_state(void *handle,
1753 enum amd_powergating_state state)
1755 /* This doesn't actually powergate the UVD block.
1756 * That's done in the dpm code via the SMC. This
1757 * just re-inits the block as necessary. The actual
1758 * gating still happens in the dpm code. We should
1759 * revisit this when there is a cleaner line between
1760 * the smc and the hw blocks
1762 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1764 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
1767 WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1769 if (state == AMD_PG_STATE_GATE) {
1770 uvd_v7_0_stop(adev);
1773 return uvd_v7_0_start(adev);
1778 static int uvd_v7_0_set_clockgating_state(void *handle,
1779 enum amd_clockgating_state state)
1781 /* needed for driver unload*/
1785 const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
1787 .early_init = uvd_v7_0_early_init,
1789 .sw_init = uvd_v7_0_sw_init,
1790 .sw_fini = uvd_v7_0_sw_fini,
1791 .hw_init = uvd_v7_0_hw_init,
1792 .hw_fini = uvd_v7_0_hw_fini,
1793 .suspend = uvd_v7_0_suspend,
1794 .resume = uvd_v7_0_resume,
1795 .is_idle = NULL /* uvd_v7_0_is_idle */,
1796 .wait_for_idle = NULL /* uvd_v7_0_wait_for_idle */,
1797 .check_soft_reset = NULL /* uvd_v7_0_check_soft_reset */,
1798 .pre_soft_reset = NULL /* uvd_v7_0_pre_soft_reset */,
1799 .soft_reset = NULL /* uvd_v7_0_soft_reset */,
1800 .post_soft_reset = NULL /* uvd_v7_0_post_soft_reset */,
1801 .set_clockgating_state = uvd_v7_0_set_clockgating_state,
1802 .set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */,
1805 static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
1806 .type = AMDGPU_RING_TYPE_UVD,
1808 .support_64bit_ptrs = false,
1809 .no_user_fence = true,
1810 .vmhub = AMDGPU_MMHUB_0,
1811 .get_rptr = uvd_v7_0_ring_get_rptr,
1812 .get_wptr = uvd_v7_0_ring_get_wptr,
1813 .set_wptr = uvd_v7_0_ring_set_wptr,
1814 .patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place,
1816 6 + /* hdp invalidate */
1817 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1818 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1819 8 + /* uvd_v7_0_ring_emit_vm_flush */
1820 14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */
1821 .emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */
1822 .emit_ib = uvd_v7_0_ring_emit_ib,
1823 .emit_fence = uvd_v7_0_ring_emit_fence,
1824 .emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
1825 .emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
1826 .test_ring = uvd_v7_0_ring_test_ring,
1827 .test_ib = amdgpu_uvd_ring_test_ib,
1828 .insert_nop = uvd_v7_0_ring_insert_nop,
1829 .pad_ib = amdgpu_ring_generic_pad_ib,
1830 .begin_use = amdgpu_uvd_ring_begin_use,
1831 .end_use = amdgpu_uvd_ring_end_use,
1832 .emit_wreg = uvd_v7_0_ring_emit_wreg,
1833 .emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
1834 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1837 static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
1838 .type = AMDGPU_RING_TYPE_UVD_ENC,
1840 .nop = HEVC_ENC_CMD_NO_OP,
1841 .support_64bit_ptrs = false,
1842 .no_user_fence = true,
1843 .vmhub = AMDGPU_MMHUB_0,
1844 .get_rptr = uvd_v7_0_enc_ring_get_rptr,
1845 .get_wptr = uvd_v7_0_enc_ring_get_wptr,
1846 .set_wptr = uvd_v7_0_enc_ring_set_wptr,
1848 3 + 3 + /* hdp flush / invalidate */
1849 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1850 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1851 4 + /* uvd_v7_0_enc_ring_emit_vm_flush */
1852 5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */
1853 1, /* uvd_v7_0_enc_ring_insert_end */
1854 .emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */
1855 .emit_ib = uvd_v7_0_enc_ring_emit_ib,
1856 .emit_fence = uvd_v7_0_enc_ring_emit_fence,
1857 .emit_vm_flush = uvd_v7_0_enc_ring_emit_vm_flush,
1858 .test_ring = uvd_v7_0_enc_ring_test_ring,
1859 .test_ib = uvd_v7_0_enc_ring_test_ib,
1860 .insert_nop = amdgpu_ring_insert_nop,
1861 .insert_end = uvd_v7_0_enc_ring_insert_end,
1862 .pad_ib = amdgpu_ring_generic_pad_ib,
1863 .begin_use = amdgpu_uvd_ring_begin_use,
1864 .end_use = amdgpu_uvd_ring_end_use,
1865 .emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
1866 .emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
1867 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1870 static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
1874 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1875 if (adev->uvd.harvest_config & (1 << i))
1877 adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
1878 adev->uvd.inst[i].ring.me = i;
1879 DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
1883 static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1887 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
1888 if (adev->uvd.harvest_config & (1 << j))
1890 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
1891 adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
1892 adev->uvd.inst[j].ring_enc[i].me = j;
1895 DRM_INFO("UVD(%d) ENC is enabled in VM mode\n", j);
1899 static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
1900 .set = uvd_v7_0_set_interrupt_state,
1901 .process = uvd_v7_0_process_interrupt,
1904 static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1908 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1909 if (adev->uvd.harvest_config & (1 << i))
1911 adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
1912 adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
1916 const struct amdgpu_ip_block_version uvd_v7_0_ip_block =
1918 .type = AMD_IP_BLOCK_TYPE_UVD,
1922 .funcs = &uvd_v7_0_ip_funcs,