2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
27 #include "amdgpu_uvd.h"
30 #include "soc15_common.h"
31 #include "mmsch_v1_0.h"
33 #include "uvd/uvd_7_0_offset.h"
34 #include "uvd/uvd_7_0_sh_mask.h"
35 #include "vce/vce_4_0_offset.h"
36 #include "vce/vce_4_0_default.h"
37 #include "vce/vce_4_0_sh_mask.h"
38 #include "nbif/nbif_6_1_offset.h"
39 #include "mmhub/mmhub_1_0_offset.h"
40 #include "mmhub/mmhub_1_0_sh_mask.h"
41 #include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
43 #define mmUVD_PG0_CC_UVD_HARVESTING 0x00c7
44 #define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX 1
45 //UVD_PG0_CC_UVD_HARVESTING
46 #define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT 0x1
47 #define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK 0x00000002L
49 #define UVD7_MAX_HW_INSTANCES_VEGA20 2
51 static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
52 static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
53 static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
54 static int uvd_v7_0_start(struct amdgpu_device *adev);
55 static void uvd_v7_0_stop(struct amdgpu_device *adev);
56 static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);
58 static int amdgpu_ih_clientid_uvds[] = {
59 SOC15_IH_CLIENTID_UVD,
60 SOC15_IH_CLIENTID_UVD1
64 * uvd_v7_0_ring_get_rptr - get read pointer
66 * @ring: amdgpu_ring pointer
68 * Returns the current hardware read pointer
70 static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
72 struct amdgpu_device *adev = ring->adev;
74 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
78 * uvd_v7_0_enc_ring_get_rptr - get enc read pointer
80 * @ring: amdgpu_ring pointer
82 * Returns the current hardware enc read pointer
84 static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
86 struct amdgpu_device *adev = ring->adev;
88 if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
89 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
91 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
95 * uvd_v7_0_ring_get_wptr - get write pointer
97 * @ring: amdgpu_ring pointer
99 * Returns the current hardware write pointer
101 static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
103 struct amdgpu_device *adev = ring->adev;
105 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
109 * uvd_v7_0_enc_ring_get_wptr - get enc write pointer
111 * @ring: amdgpu_ring pointer
113 * Returns the current hardware enc write pointer
115 static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
117 struct amdgpu_device *adev = ring->adev;
119 if (ring->use_doorbell)
120 return adev->wb.wb[ring->wptr_offs];
122 if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
123 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
125 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
129 * uvd_v7_0_ring_set_wptr - set write pointer
131 * @ring: amdgpu_ring pointer
133 * Commits the write pointer to the hardware
135 static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
137 struct amdgpu_device *adev = ring->adev;
139 WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
143 * uvd_v7_0_enc_ring_set_wptr - set enc write pointer
145 * @ring: amdgpu_ring pointer
147 * Commits the enc write pointer to the hardware
149 static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
151 struct amdgpu_device *adev = ring->adev;
153 if (ring->use_doorbell) {
154 /* XXX check if swapping is necessary on BE */
155 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
156 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
160 if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
161 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR,
162 lower_32_bits(ring->wptr));
164 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2,
165 lower_32_bits(ring->wptr));
169 * uvd_v7_0_enc_ring_test_ring - test if UVD ENC ring is working
171 * @ring: the engine to test on
174 static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
176 struct amdgpu_device *adev = ring->adev;
181 if (amdgpu_sriov_vf(adev))
184 r = amdgpu_ring_alloc(ring, 16);
188 rptr = amdgpu_ring_get_rptr(ring);
190 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
191 amdgpu_ring_commit(ring);
193 for (i = 0; i < adev->usec_timeout; i++) {
194 if (amdgpu_ring_get_rptr(ring) != rptr)
199 if (i >= adev->usec_timeout)
206 * uvd_v7_0_enc_get_create_msg - generate a UVD ENC create msg
208 * @ring: ring we should submit the msg to
209 * @handle: session handle to use
210 * @bo: amdgpu object for which we query the offset
211 * @fence: optional fence to return
213 * Open up a stream for HW test
215 static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
216 struct amdgpu_bo *bo,
217 struct dma_fence **fence)
219 const unsigned ib_size_dw = 16;
220 struct amdgpu_job *job;
221 struct amdgpu_ib *ib;
222 struct dma_fence *f = NULL;
226 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
227 AMDGPU_IB_POOL_DIRECT, &job);
232 addr = amdgpu_bo_gpu_offset(bo);
235 ib->ptr[ib->length_dw++] = 0x00000018;
236 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
237 ib->ptr[ib->length_dw++] = handle;
238 ib->ptr[ib->length_dw++] = 0x00000000;
239 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
240 ib->ptr[ib->length_dw++] = addr;
242 ib->ptr[ib->length_dw++] = 0x00000014;
243 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
244 ib->ptr[ib->length_dw++] = 0x0000001c;
245 ib->ptr[ib->length_dw++] = 0x00000000;
246 ib->ptr[ib->length_dw++] = 0x00000000;
248 ib->ptr[ib->length_dw++] = 0x00000008;
249 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
251 for (i = ib->length_dw; i < ib_size_dw; ++i)
254 r = amdgpu_job_submit_direct(job, ring, &f);
259 *fence = dma_fence_get(f);
264 amdgpu_job_free(job);
269 * uvd_v7_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
271 * @ring: ring we should submit the msg to
272 * @handle: session handle to use
273 * @bo: amdgpu object for which we query the offset
274 * @fence: optional fence to return
276 * Close up a stream for HW test or if userspace failed to do so
278 static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
279 struct amdgpu_bo *bo,
280 struct dma_fence **fence)
282 const unsigned ib_size_dw = 16;
283 struct amdgpu_job *job;
284 struct amdgpu_ib *ib;
285 struct dma_fence *f = NULL;
289 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
290 AMDGPU_IB_POOL_DIRECT, &job);
295 addr = amdgpu_bo_gpu_offset(bo);
298 ib->ptr[ib->length_dw++] = 0x00000018;
299 ib->ptr[ib->length_dw++] = 0x00000001;
300 ib->ptr[ib->length_dw++] = handle;
301 ib->ptr[ib->length_dw++] = 0x00000000;
302 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
303 ib->ptr[ib->length_dw++] = addr;
305 ib->ptr[ib->length_dw++] = 0x00000014;
306 ib->ptr[ib->length_dw++] = 0x00000002;
307 ib->ptr[ib->length_dw++] = 0x0000001c;
308 ib->ptr[ib->length_dw++] = 0x00000000;
309 ib->ptr[ib->length_dw++] = 0x00000000;
311 ib->ptr[ib->length_dw++] = 0x00000008;
312 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
314 for (i = ib->length_dw; i < ib_size_dw; ++i)
317 r = amdgpu_job_submit_direct(job, ring, &f);
322 *fence = dma_fence_get(f);
327 amdgpu_job_free(job);
332 * uvd_v7_0_enc_ring_test_ib - test if UVD ENC IBs are working
334 * @ring: the engine to test on
335 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
338 static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
340 struct dma_fence *fence = NULL;
341 struct amdgpu_bo *bo = ring->adev->uvd.ib_bo;
344 r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
348 r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
352 r = dma_fence_wait_timeout(fence, false, timeout);
359 dma_fence_put(fence);
363 static int uvd_v7_0_early_init(void *handle)
365 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
367 if (adev->asic_type == CHIP_VEGA20) {
371 adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
372 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
373 harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
374 if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
375 adev->uvd.harvest_config |= 1 << i;
378 if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
379 AMDGPU_UVD_HARVEST_UVD1))
380 /* both instances are harvested, disable the block */
383 adev->uvd.num_uvd_inst = 1;
386 if (amdgpu_sriov_vf(adev))
387 adev->uvd.num_enc_rings = 1;
389 adev->uvd.num_enc_rings = 2;
390 uvd_v7_0_set_ring_funcs(adev);
391 uvd_v7_0_set_enc_ring_funcs(adev);
392 uvd_v7_0_set_irq_funcs(adev);
397 static int uvd_v7_0_sw_init(void *handle)
399 struct amdgpu_ring *ring;
402 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
404 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
405 if (adev->uvd.harvest_config & (1 << j))
408 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
413 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
414 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq);
420 r = amdgpu_uvd_sw_init(adev);
424 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
425 const struct common_firmware_header *hdr;
426 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
427 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].ucode_id = AMDGPU_UCODE_ID_UVD;
428 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw;
429 adev->firmware.fw_size +=
430 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
432 if (adev->uvd.num_uvd_inst == UVD7_MAX_HW_INSTANCES_VEGA20) {
433 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].ucode_id = AMDGPU_UCODE_ID_UVD1;
434 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].fw = adev->uvd.fw;
435 adev->firmware.fw_size +=
436 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
438 DRM_INFO("PSP loading UVD firmware\n");
441 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
442 if (adev->uvd.harvest_config & (1 << j))
444 if (!amdgpu_sriov_vf(adev)) {
445 ring = &adev->uvd.inst[j].ring;
446 sprintf(ring->name, "uvd_%d", ring->me);
447 r = amdgpu_ring_init(adev, ring, 512,
448 &adev->uvd.inst[j].irq, 0,
449 AMDGPU_RING_PRIO_DEFAULT, NULL);
454 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
455 ring = &adev->uvd.inst[j].ring_enc[i];
456 sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i);
457 if (amdgpu_sriov_vf(adev)) {
458 ring->use_doorbell = true;
460 /* currently only use the first enconding ring for
461 * sriov, so set unused location for other unused rings.
464 ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2;
466 ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
468 r = amdgpu_ring_init(adev, ring, 512,
469 &adev->uvd.inst[j].irq, 0,
470 AMDGPU_RING_PRIO_DEFAULT, NULL);
476 r = amdgpu_uvd_resume(adev);
480 r = amdgpu_uvd_entity_init(adev);
484 r = amdgpu_virt_alloc_mm_table(adev);
491 static int uvd_v7_0_sw_fini(void *handle)
494 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
496 amdgpu_virt_free_mm_table(adev);
498 r = amdgpu_uvd_suspend(adev);
502 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
503 if (adev->uvd.harvest_config & (1 << j))
505 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
506 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
508 return amdgpu_uvd_sw_fini(adev);
512 * uvd_v7_0_hw_init - start and test UVD block
514 * @handle: handle used to pass amdgpu_device pointer
516 * Initialize the hardware, boot up the VCPU and do some testing
518 static int uvd_v7_0_hw_init(void *handle)
520 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
521 struct amdgpu_ring *ring;
525 if (amdgpu_sriov_vf(adev))
526 r = uvd_v7_0_sriov_start(adev);
528 r = uvd_v7_0_start(adev);
532 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
533 if (adev->uvd.harvest_config & (1 << j))
535 ring = &adev->uvd.inst[j].ring;
537 if (!amdgpu_sriov_vf(adev)) {
538 r = amdgpu_ring_test_helper(ring);
542 r = amdgpu_ring_alloc(ring, 10);
544 DRM_ERROR("amdgpu: (%d)ring failed to lock UVD ring (%d).\n", j, r);
548 tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
549 mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
550 amdgpu_ring_write(ring, tmp);
551 amdgpu_ring_write(ring, 0xFFFFF);
553 tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
554 mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
555 amdgpu_ring_write(ring, tmp);
556 amdgpu_ring_write(ring, 0xFFFFF);
558 tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
559 mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
560 amdgpu_ring_write(ring, tmp);
561 amdgpu_ring_write(ring, 0xFFFFF);
563 /* Clear timeout status bits */
564 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
565 mmUVD_SEMA_TIMEOUT_STATUS), 0));
566 amdgpu_ring_write(ring, 0x8);
568 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
569 mmUVD_SEMA_CNTL), 0));
570 amdgpu_ring_write(ring, 3);
572 amdgpu_ring_commit(ring);
575 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
576 ring = &adev->uvd.inst[j].ring_enc[i];
577 r = amdgpu_ring_test_helper(ring);
584 DRM_INFO("UVD and UVD ENC initialized successfully.\n");
590 * uvd_v7_0_hw_fini - stop the hardware block
592 * @handle: handle used to pass amdgpu_device pointer
594 * Stop the UVD block, mark ring as not ready any more
596 static int uvd_v7_0_hw_fini(void *handle)
598 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
600 cancel_delayed_work_sync(&adev->uvd.idle_work);
602 if (!amdgpu_sriov_vf(adev))
605 /* full access mode, so don't touch any UVD register */
606 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
612 static int uvd_v7_0_suspend(void *handle)
615 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
618 * Proper cleanups before halting the HW engine:
619 * - cancel the delayed idle work
620 * - enable powergating
621 * - enable clockgating
624 * TODO: to align with the VCN implementation, move the
625 * jobs for clockgating/powergating/dpm setting to
626 * ->set_powergating_state().
628 cancel_delayed_work_sync(&adev->uvd.idle_work);
630 if (adev->pm.dpm_enabled) {
631 amdgpu_dpm_enable_uvd(adev, false);
633 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
634 /* shutdown the UVD block */
635 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
637 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
641 r = uvd_v7_0_hw_fini(adev);
645 return amdgpu_uvd_suspend(adev);
648 static int uvd_v7_0_resume(void *handle)
651 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
653 r = amdgpu_uvd_resume(adev);
657 return uvd_v7_0_hw_init(adev);
661 * uvd_v7_0_mc_resume - memory controller programming
663 * @adev: amdgpu_device pointer
665 * Let the UVD memory controller know it's offsets
667 static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
669 uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
673 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
674 if (adev->uvd.harvest_config & (1 << i))
676 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
677 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
679 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo:
680 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_lo);
681 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
683 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi:
684 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_hi);
685 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
688 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
689 lower_32_bits(adev->uvd.inst[i].gpu_addr));
690 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
691 upper_32_bits(adev->uvd.inst[i].gpu_addr));
693 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
694 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
697 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
699 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
700 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
701 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
702 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
703 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
704 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
706 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
707 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
708 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
709 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
710 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
711 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2,
712 AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
714 WREG32_SOC15(UVD, i, mmUVD_UDEC_ADDR_CONFIG,
715 adev->gfx.config.gb_addr_config);
716 WREG32_SOC15(UVD, i, mmUVD_UDEC_DB_ADDR_CONFIG,
717 adev->gfx.config.gb_addr_config);
718 WREG32_SOC15(UVD, i, mmUVD_UDEC_DBW_ADDR_CONFIG,
719 adev->gfx.config.gb_addr_config);
721 WREG32_SOC15(UVD, i, mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
725 static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
726 struct amdgpu_mm_table *table)
728 uint32_t data = 0, loop;
729 uint64_t addr = table->gpu_addr;
730 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
734 size = header->header_size + header->vce_table_size + header->uvd_table_size;
736 /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
737 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
738 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
740 /* 2, update vmid of descriptor */
741 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID);
742 data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
743 data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
744 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID, data);
746 /* 3, notify mmsch about the size of this descriptor */
747 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE, size);
749 /* 4, set resp to zero */
750 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
752 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
753 if (adev->uvd.harvest_config & (1 << i))
755 WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
756 adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0;
757 adev->uvd.inst[i].ring_enc[0].wptr = 0;
758 adev->uvd.inst[i].ring_enc[0].wptr_old = 0;
760 /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
761 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
763 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
765 while ((data & 0x10000002) != 0x10000002) {
767 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
774 dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
781 static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
783 struct amdgpu_ring *ring;
784 uint32_t offset, size, tmp;
785 uint32_t table_size = 0;
786 struct mmsch_v1_0_cmd_direct_write direct_wt = { {0} };
787 struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
788 struct mmsch_v1_0_cmd_direct_polling direct_poll = { {0} };
789 struct mmsch_v1_0_cmd_end end = { {0} };
790 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
791 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
794 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
795 direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
796 direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
797 end.cmd_header.command_type = MMSCH_COMMAND__END;
799 if (header->uvd_table_offset == 0 && header->uvd_table_size == 0) {
800 header->version = MMSCH_VERSION;
801 header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
803 if (header->vce_table_offset == 0 && header->vce_table_size == 0)
804 header->uvd_table_offset = header->header_size;
806 header->uvd_table_offset = header->vce_table_size + header->vce_table_offset;
808 init_table += header->uvd_table_offset;
810 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
811 if (adev->uvd.harvest_config & (1 << i))
813 ring = &adev->uvd.inst[i].ring;
815 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
817 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
818 0xFFFFFFFF, 0x00000004);
820 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
821 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
822 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
823 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo);
824 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
825 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
826 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi);
827 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0);
830 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
831 lower_32_bits(adev->uvd.inst[i].gpu_addr));
832 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
833 upper_32_bits(adev->uvd.inst[i].gpu_addr));
835 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
836 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
840 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
842 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
843 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
844 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
845 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
846 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
847 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
849 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
850 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
851 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
852 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
853 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
854 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
855 AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
857 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
860 /* disable clock gating */
861 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_CGC_CTRL),
862 ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
864 /* disable interupt */
865 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
866 ~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
868 /* stall UMC and register bus before resetting VCPU */
869 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
870 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
871 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
873 /* put LMI, VCPU, RBC etc... into reset */
874 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
875 (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
876 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
877 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
878 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
879 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
880 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
881 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
882 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
884 /* initialize UVD memory controller */
885 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL),
886 (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
887 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
888 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
889 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
890 UVD_LMI_CTRL__REQ_MODE_MASK |
893 /* take all subblocks out of reset, except VCPU */
894 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
895 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
897 /* enable VCPU clock */
898 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
899 UVD_VCPU_CNTL__CLK_EN_MASK);
901 /* enable master interrupt */
902 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
903 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
904 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
906 /* clear the bit 4 of UVD_STATUS */
907 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
908 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
910 /* force RBC into idle state */
911 size = order_base_2(ring->ring_size);
912 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
913 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
914 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
916 ring = &adev->uvd.inst[i].ring_enc[0];
918 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), ring->gpu_addr);
919 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
920 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), ring->ring_size / 4);
922 /* boot up the VCPU */
923 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), 0);
926 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
927 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
929 MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0x02, 0x02);
932 memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
933 table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
934 header->uvd_table_size = table_size;
937 return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
941 * uvd_v7_0_start - start UVD block
943 * @adev: amdgpu_device pointer
945 * Setup and start the UVD block
947 static int uvd_v7_0_start(struct amdgpu_device *adev)
949 struct amdgpu_ring *ring;
950 uint32_t rb_bufsz, tmp;
951 uint32_t lmi_swap_cntl;
952 uint32_t mp_swap_cntl;
955 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
956 if (adev->uvd.harvest_config & (1 << k))
959 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
960 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
963 /* disable byte swapping */
967 uvd_v7_0_mc_resume(adev);
969 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
970 if (adev->uvd.harvest_config & (1 << k))
972 ring = &adev->uvd.inst[k].ring;
973 /* disable clock gating */
974 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
975 ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
977 /* disable interupt */
978 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), 0,
979 ~UVD_MASTINT_EN__VCPU_EN_MASK);
981 /* stall UMC and register bus before resetting VCPU */
982 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2),
983 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
984 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
987 /* put LMI, VCPU, RBC etc... into reset */
988 WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
989 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
990 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
991 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
992 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
993 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
994 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
995 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
996 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
999 /* initialize UVD memory controller */
1000 WREG32_SOC15(UVD, k, mmUVD_LMI_CTRL,
1001 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
1002 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1003 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1004 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
1005 UVD_LMI_CTRL__REQ_MODE_MASK |
1009 /* swap (8 in 32) RB and IB */
1010 lmi_swap_cntl = 0xa;
1013 WREG32_SOC15(UVD, k, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
1014 WREG32_SOC15(UVD, k, mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
1016 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA0, 0x40c2040);
1017 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA1, 0x0);
1018 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB0, 0x40c2040);
1019 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB1, 0x0);
1020 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_ALU, 0);
1021 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUX, 0x88);
1023 /* take all subblocks out of reset, except VCPU */
1024 WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
1025 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1028 /* enable VCPU clock */
1029 WREG32_SOC15(UVD, k, mmUVD_VCPU_CNTL,
1030 UVD_VCPU_CNTL__CLK_EN_MASK);
1033 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), 0,
1034 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1036 /* boot up the VCPU */
1037 WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, 0);
1040 for (i = 0; i < 10; ++i) {
1043 for (j = 0; j < 100; ++j) {
1044 status = RREG32_SOC15(UVD, k, mmUVD_STATUS);
1053 DRM_ERROR("UVD(%d) not responding, trying to reset the VCPU!!!\n", k);
1054 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET),
1055 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1056 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1058 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), 0,
1059 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1065 DRM_ERROR("UVD(%d) not responding, giving up!!!\n", k);
1068 /* enable master interrupt */
1069 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN),
1070 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
1071 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
1073 /* clear the bit 4 of UVD_STATUS */
1074 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_STATUS), 0,
1075 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1077 /* force RBC into idle state */
1078 rb_bufsz = order_base_2(ring->ring_size);
1079 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1080 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1081 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1082 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
1083 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1084 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1085 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_CNTL, tmp);
1087 /* set the write pointer delay */
1088 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR_CNTL, 0);
1090 /* set the wb address */
1091 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR,
1092 (upper_32_bits(ring->gpu_addr) >> 2));
1094 /* program the RB_BASE for ring buffer */
1095 WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1096 lower_32_bits(ring->gpu_addr));
1097 WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1098 upper_32_bits(ring->gpu_addr));
1100 /* Initialize the ring buffer's read and write pointers */
1101 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR, 0);
1103 ring->wptr = RREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR);
1104 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR,
1105 lower_32_bits(ring->wptr));
1107 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_RBC_RB_CNTL), 0,
1108 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1110 ring = &adev->uvd.inst[k].ring_enc[0];
1111 WREG32_SOC15(UVD, k, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1112 WREG32_SOC15(UVD, k, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1113 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO, ring->gpu_addr);
1114 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1115 WREG32_SOC15(UVD, k, mmUVD_RB_SIZE, ring->ring_size / 4);
1117 ring = &adev->uvd.inst[k].ring_enc[1];
1118 WREG32_SOC15(UVD, k, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1119 WREG32_SOC15(UVD, k, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1120 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1121 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1122 WREG32_SOC15(UVD, k, mmUVD_RB_SIZE2, ring->ring_size / 4);
1128 * uvd_v7_0_stop - stop UVD block
1130 * @adev: amdgpu_device pointer
1132 * stop the UVD block
1134 static void uvd_v7_0_stop(struct amdgpu_device *adev)
1138 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1139 if (adev->uvd.harvest_config & (1 << i))
1141 /* force RBC into idle state */
1142 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
1144 /* Stall UMC and register bus before resetting VCPU */
1145 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
1146 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
1147 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1150 /* put VCPU into reset */
1151 WREG32_SOC15(UVD, i, mmUVD_SOFT_RESET,
1152 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1155 /* disable VCPU clock */
1156 WREG32_SOC15(UVD, i, mmUVD_VCPU_CNTL, 0x0);
1158 /* Unstall UMC and register bus */
1159 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
1160 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1165 * uvd_v7_0_ring_emit_fence - emit an fence & trap command
1167 * @ring: amdgpu_ring pointer
1169 * @seq: sequence number
1170 * @flags: fence related flags
1172 * Write a fence and a trap command to the ring.
1174 static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1177 struct amdgpu_device *adev = ring->adev;
1179 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1181 amdgpu_ring_write(ring,
1182 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1183 amdgpu_ring_write(ring, seq);
1184 amdgpu_ring_write(ring,
1185 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1186 amdgpu_ring_write(ring, addr & 0xffffffff);
1187 amdgpu_ring_write(ring,
1188 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1189 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1190 amdgpu_ring_write(ring,
1191 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1192 amdgpu_ring_write(ring, 0);
1194 amdgpu_ring_write(ring,
1195 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1196 amdgpu_ring_write(ring, 0);
1197 amdgpu_ring_write(ring,
1198 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1199 amdgpu_ring_write(ring, 0);
1200 amdgpu_ring_write(ring,
1201 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1202 amdgpu_ring_write(ring, 2);
1206 * uvd_v7_0_enc_ring_emit_fence - emit an enc fence & trap command
1208 * @ring: amdgpu_ring pointer
1210 * @seq: sequence number
1211 * @flags: fence related flags
1213 * Write enc a fence and a trap command to the ring.
1215 static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1216 u64 seq, unsigned flags)
1219 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1221 amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
1222 amdgpu_ring_write(ring, addr);
1223 amdgpu_ring_write(ring, upper_32_bits(addr));
1224 amdgpu_ring_write(ring, seq);
1225 amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
1229 * uvd_v7_0_ring_emit_hdp_flush - skip HDP flushing
1231 * @ring: amdgpu_ring pointer
1233 static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
1235 /* The firmware doesn't seem to like touching registers at this point. */
1239 * uvd_v7_0_ring_test_ring - register write test
1241 * @ring: amdgpu_ring pointer
1243 * Test if we can successfully write to the context register
1245 static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
1247 struct amdgpu_device *adev = ring->adev;
1252 WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
1253 r = amdgpu_ring_alloc(ring, 3);
1257 amdgpu_ring_write(ring,
1258 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1259 amdgpu_ring_write(ring, 0xDEADBEEF);
1260 amdgpu_ring_commit(ring);
1261 for (i = 0; i < adev->usec_timeout; i++) {
1262 tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID);
1263 if (tmp == 0xDEADBEEF)
1268 if (i >= adev->usec_timeout)
1275 * uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission.
1277 * @p: the CS parser with the IBs
1278 * @ib_idx: which IB to patch
1281 static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1284 struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
1285 struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
1288 /* No patching necessary for the first instance */
1292 for (i = 0; i < ib->length_dw; i += 2) {
1293 uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
1295 reg -= p->adev->reg_offset[UVD_HWIP][0][1];
1296 reg += p->adev->reg_offset[UVD_HWIP][1][1];
1298 amdgpu_set_ib_value(p, ib_idx, i, reg);
1304 * uvd_v7_0_ring_emit_ib - execute indirect buffer
1306 * @ring: amdgpu_ring pointer
1307 * @job: job to retrieve vmid from
1308 * @ib: indirect buffer to execute
1311 * Write ring commands to execute the indirect buffer
1313 static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
1314 struct amdgpu_job *job,
1315 struct amdgpu_ib *ib,
1318 struct amdgpu_device *adev = ring->adev;
1319 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1321 amdgpu_ring_write(ring,
1322 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
1323 amdgpu_ring_write(ring, vmid);
1325 amdgpu_ring_write(ring,
1326 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1327 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1328 amdgpu_ring_write(ring,
1329 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1330 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1331 amdgpu_ring_write(ring,
1332 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_RBC_IB_SIZE), 0));
1333 amdgpu_ring_write(ring, ib->length_dw);
1337 * uvd_v7_0_enc_ring_emit_ib - enc execute indirect buffer
1339 * @ring: amdgpu_ring pointer
1340 * @job: job to retrive vmid from
1341 * @ib: indirect buffer to execute
1344 * Write enc ring commands to execute the indirect buffer
1346 static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1347 struct amdgpu_job *job,
1348 struct amdgpu_ib *ib,
1351 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1353 amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1354 amdgpu_ring_write(ring, vmid);
1355 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1356 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1357 amdgpu_ring_write(ring, ib->length_dw);
1360 static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
1361 uint32_t reg, uint32_t val)
1363 struct amdgpu_device *adev = ring->adev;
1365 amdgpu_ring_write(ring,
1366 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1367 amdgpu_ring_write(ring, reg << 2);
1368 amdgpu_ring_write(ring,
1369 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1370 amdgpu_ring_write(ring, val);
1371 amdgpu_ring_write(ring,
1372 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1373 amdgpu_ring_write(ring, 8);
1376 static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1377 uint32_t val, uint32_t mask)
1379 struct amdgpu_device *adev = ring->adev;
1381 amdgpu_ring_write(ring,
1382 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1383 amdgpu_ring_write(ring, reg << 2);
1384 amdgpu_ring_write(ring,
1385 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1386 amdgpu_ring_write(ring, val);
1387 amdgpu_ring_write(ring,
1388 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GP_SCRATCH8), 0));
1389 amdgpu_ring_write(ring, mask);
1390 amdgpu_ring_write(ring,
1391 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1392 amdgpu_ring_write(ring, 12);
1395 static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1396 unsigned vmid, uint64_t pd_addr)
1398 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1399 uint32_t data0, data1, mask;
1401 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1403 /* wait for reg writes */
1404 data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1405 data1 = lower_32_bits(pd_addr);
1407 uvd_v7_0_ring_emit_reg_wait(ring, data0, data1, mask);
1410 static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1412 struct amdgpu_device *adev = ring->adev;
1415 WARN_ON(ring->wptr % 2 || count % 2);
1417 for (i = 0; i < count / 2; i++) {
1418 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0));
1419 amdgpu_ring_write(ring, 0);
1423 static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1425 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1428 static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1429 uint32_t reg, uint32_t val,
1432 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1433 amdgpu_ring_write(ring, reg << 2);
1434 amdgpu_ring_write(ring, mask);
1435 amdgpu_ring_write(ring, val);
1438 static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1439 unsigned int vmid, uint64_t pd_addr)
1441 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1443 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1445 /* wait for reg writes */
1446 uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1447 vmid * hub->ctx_addr_distance,
1448 lower_32_bits(pd_addr), 0xffffffff);
1451 static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1452 uint32_t reg, uint32_t val)
1454 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1455 amdgpu_ring_write(ring, reg << 2);
1456 amdgpu_ring_write(ring, val);
1460 static bool uvd_v7_0_is_idle(void *handle)
1462 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1464 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1467 static int uvd_v7_0_wait_for_idle(void *handle)
1470 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1472 for (i = 0; i < adev->usec_timeout; i++) {
1473 if (uvd_v7_0_is_idle(handle))
1479 #define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
1480 static bool uvd_v7_0_check_soft_reset(void *handle)
1482 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1483 u32 srbm_soft_reset = 0;
1484 u32 tmp = RREG32(mmSRBM_STATUS);
1486 if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1487 REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1488 (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) &
1489 AMDGPU_UVD_STATUS_BUSY_MASK))
1490 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1491 SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1493 if (srbm_soft_reset) {
1494 adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset;
1497 adev->uvd.inst[ring->me].srbm_soft_reset = 0;
1502 static int uvd_v7_0_pre_soft_reset(void *handle)
1504 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1506 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1509 uvd_v7_0_stop(adev);
1513 static int uvd_v7_0_soft_reset(void *handle)
1515 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1516 u32 srbm_soft_reset;
1518 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1520 srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset;
1522 if (srbm_soft_reset) {
1525 tmp = RREG32(mmSRBM_SOFT_RESET);
1526 tmp |= srbm_soft_reset;
1527 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1528 WREG32(mmSRBM_SOFT_RESET, tmp);
1529 tmp = RREG32(mmSRBM_SOFT_RESET);
1533 tmp &= ~srbm_soft_reset;
1534 WREG32(mmSRBM_SOFT_RESET, tmp);
1535 tmp = RREG32(mmSRBM_SOFT_RESET);
1537 /* Wait a little for things to settle down */
1544 static int uvd_v7_0_post_soft_reset(void *handle)
1546 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1548 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1553 return uvd_v7_0_start(adev);
1557 static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev,
1558 struct amdgpu_irq_src *source,
1560 enum amdgpu_interrupt_state state)
1566 static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
1567 struct amdgpu_irq_src *source,
1568 struct amdgpu_iv_entry *entry)
1570 uint32_t ip_instance;
1572 switch (entry->client_id) {
1573 case SOC15_IH_CLIENTID_UVD:
1576 case SOC15_IH_CLIENTID_UVD1:
1580 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1584 DRM_DEBUG("IH: UVD TRAP\n");
1586 switch (entry->src_id) {
1588 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring);
1591 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[0]);
1594 if (!amdgpu_sriov_vf(adev))
1595 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[1]);
1598 DRM_ERROR("Unhandled interrupt: %d %d\n",
1599 entry->src_id, entry->src_data[0]);
1607 static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
1609 uint32_t data, data1, data2, suvd_flags;
1611 data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL);
1612 data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1613 data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL);
1615 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1616 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1618 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1619 UVD_SUVD_CGC_GATE__SIT_MASK |
1620 UVD_SUVD_CGC_GATE__SMP_MASK |
1621 UVD_SUVD_CGC_GATE__SCM_MASK |
1622 UVD_SUVD_CGC_GATE__SDB_MASK;
1624 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1625 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1626 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1628 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1629 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1630 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1631 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1632 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1633 UVD_CGC_CTRL__SYS_MODE_MASK |
1634 UVD_CGC_CTRL__UDEC_MODE_MASK |
1635 UVD_CGC_CTRL__MPEG2_MODE_MASK |
1636 UVD_CGC_CTRL__REGS_MODE_MASK |
1637 UVD_CGC_CTRL__RBC_MODE_MASK |
1638 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1639 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1640 UVD_CGC_CTRL__IDCT_MODE_MASK |
1641 UVD_CGC_CTRL__MPRD_MODE_MASK |
1642 UVD_CGC_CTRL__MPC_MODE_MASK |
1643 UVD_CGC_CTRL__LBSI_MODE_MASK |
1644 UVD_CGC_CTRL__LRBBM_MODE_MASK |
1645 UVD_CGC_CTRL__WCB_MODE_MASK |
1646 UVD_CGC_CTRL__VCPU_MODE_MASK |
1647 UVD_CGC_CTRL__JPEG_MODE_MASK |
1648 UVD_CGC_CTRL__JPEG2_MODE_MASK |
1649 UVD_CGC_CTRL__SCPU_MODE_MASK);
1650 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1651 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1652 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1653 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1654 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1655 data1 |= suvd_flags;
1657 WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data);
1658 WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0);
1659 WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1660 WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2);
1663 static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
1665 uint32_t data, data1, cgc_flags, suvd_flags;
1667 data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE);
1668 data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1670 cgc_flags = UVD_CGC_GATE__SYS_MASK |
1671 UVD_CGC_GATE__UDEC_MASK |
1672 UVD_CGC_GATE__MPEG2_MASK |
1673 UVD_CGC_GATE__RBC_MASK |
1674 UVD_CGC_GATE__LMI_MC_MASK |
1675 UVD_CGC_GATE__IDCT_MASK |
1676 UVD_CGC_GATE__MPRD_MASK |
1677 UVD_CGC_GATE__MPC_MASK |
1678 UVD_CGC_GATE__LBSI_MASK |
1679 UVD_CGC_GATE__LRBBM_MASK |
1680 UVD_CGC_GATE__UDEC_RE_MASK |
1681 UVD_CGC_GATE__UDEC_CM_MASK |
1682 UVD_CGC_GATE__UDEC_IT_MASK |
1683 UVD_CGC_GATE__UDEC_DB_MASK |
1684 UVD_CGC_GATE__UDEC_MP_MASK |
1685 UVD_CGC_GATE__WCB_MASK |
1686 UVD_CGC_GATE__VCPU_MASK |
1687 UVD_CGC_GATE__SCPU_MASK |
1688 UVD_CGC_GATE__JPEG_MASK |
1689 UVD_CGC_GATE__JPEG2_MASK;
1691 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1692 UVD_SUVD_CGC_GATE__SIT_MASK |
1693 UVD_SUVD_CGC_GATE__SMP_MASK |
1694 UVD_SUVD_CGC_GATE__SCM_MASK |
1695 UVD_SUVD_CGC_GATE__SDB_MASK;
1698 data1 |= suvd_flags;
1700 WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data);
1701 WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1704 static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
1706 u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
1709 tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1710 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1712 tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1713 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1715 WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
1719 static int uvd_v7_0_set_clockgating_state(void *handle,
1720 enum amd_clockgating_state state)
1722 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1723 bool enable = (state == AMD_CG_STATE_GATE);
1725 uvd_v7_0_set_bypass_mode(adev, enable);
1727 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
1731 /* disable HW gating and enable Sw gating */
1732 uvd_v7_0_set_sw_clock_gating(adev);
1734 /* wait for STATUS to clear */
1735 if (uvd_v7_0_wait_for_idle(handle))
1738 /* enable HW gates because UVD is idle */
1739 /* uvd_v7_0_set_hw_clock_gating(adev); */
1745 static int uvd_v7_0_set_powergating_state(void *handle,
1746 enum amd_powergating_state state)
1748 /* This doesn't actually powergate the UVD block.
1749 * That's done in the dpm code via the SMC. This
1750 * just re-inits the block as necessary. The actual
1751 * gating still happens in the dpm code. We should
1752 * revisit this when there is a cleaner line between
1753 * the smc and the hw blocks
1755 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1757 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
1760 WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1762 if (state == AMD_PG_STATE_GATE) {
1763 uvd_v7_0_stop(adev);
1766 return uvd_v7_0_start(adev);
1771 static int uvd_v7_0_set_clockgating_state(void *handle,
1772 enum amd_clockgating_state state)
1774 /* needed for driver unload*/
1778 const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
1780 .early_init = uvd_v7_0_early_init,
1782 .sw_init = uvd_v7_0_sw_init,
1783 .sw_fini = uvd_v7_0_sw_fini,
1784 .hw_init = uvd_v7_0_hw_init,
1785 .hw_fini = uvd_v7_0_hw_fini,
1786 .suspend = uvd_v7_0_suspend,
1787 .resume = uvd_v7_0_resume,
1788 .is_idle = NULL /* uvd_v7_0_is_idle */,
1789 .wait_for_idle = NULL /* uvd_v7_0_wait_for_idle */,
1790 .check_soft_reset = NULL /* uvd_v7_0_check_soft_reset */,
1791 .pre_soft_reset = NULL /* uvd_v7_0_pre_soft_reset */,
1792 .soft_reset = NULL /* uvd_v7_0_soft_reset */,
1793 .post_soft_reset = NULL /* uvd_v7_0_post_soft_reset */,
1794 .set_clockgating_state = uvd_v7_0_set_clockgating_state,
1795 .set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */,
1798 static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
1799 .type = AMDGPU_RING_TYPE_UVD,
1801 .support_64bit_ptrs = false,
1802 .no_user_fence = true,
1803 .vmhub = AMDGPU_MMHUB_0,
1804 .get_rptr = uvd_v7_0_ring_get_rptr,
1805 .get_wptr = uvd_v7_0_ring_get_wptr,
1806 .set_wptr = uvd_v7_0_ring_set_wptr,
1807 .patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place,
1809 6 + /* hdp invalidate */
1810 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1811 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1812 8 + /* uvd_v7_0_ring_emit_vm_flush */
1813 14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */
1814 .emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */
1815 .emit_ib = uvd_v7_0_ring_emit_ib,
1816 .emit_fence = uvd_v7_0_ring_emit_fence,
1817 .emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
1818 .emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
1819 .test_ring = uvd_v7_0_ring_test_ring,
1820 .test_ib = amdgpu_uvd_ring_test_ib,
1821 .insert_nop = uvd_v7_0_ring_insert_nop,
1822 .pad_ib = amdgpu_ring_generic_pad_ib,
1823 .begin_use = amdgpu_uvd_ring_begin_use,
1824 .end_use = amdgpu_uvd_ring_end_use,
1825 .emit_wreg = uvd_v7_0_ring_emit_wreg,
1826 .emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
1827 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1830 static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
1831 .type = AMDGPU_RING_TYPE_UVD_ENC,
1833 .nop = HEVC_ENC_CMD_NO_OP,
1834 .support_64bit_ptrs = false,
1835 .no_user_fence = true,
1836 .vmhub = AMDGPU_MMHUB_0,
1837 .get_rptr = uvd_v7_0_enc_ring_get_rptr,
1838 .get_wptr = uvd_v7_0_enc_ring_get_wptr,
1839 .set_wptr = uvd_v7_0_enc_ring_set_wptr,
1841 3 + 3 + /* hdp flush / invalidate */
1842 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1843 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1844 4 + /* uvd_v7_0_enc_ring_emit_vm_flush */
1845 5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */
1846 1, /* uvd_v7_0_enc_ring_insert_end */
1847 .emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */
1848 .emit_ib = uvd_v7_0_enc_ring_emit_ib,
1849 .emit_fence = uvd_v7_0_enc_ring_emit_fence,
1850 .emit_vm_flush = uvd_v7_0_enc_ring_emit_vm_flush,
1851 .test_ring = uvd_v7_0_enc_ring_test_ring,
1852 .test_ib = uvd_v7_0_enc_ring_test_ib,
1853 .insert_nop = amdgpu_ring_insert_nop,
1854 .insert_end = uvd_v7_0_enc_ring_insert_end,
1855 .pad_ib = amdgpu_ring_generic_pad_ib,
1856 .begin_use = amdgpu_uvd_ring_begin_use,
1857 .end_use = amdgpu_uvd_ring_end_use,
1858 .emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
1859 .emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
1860 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1863 static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
1867 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1868 if (adev->uvd.harvest_config & (1 << i))
1870 adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
1871 adev->uvd.inst[i].ring.me = i;
1872 DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
1876 static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1880 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
1881 if (adev->uvd.harvest_config & (1 << j))
1883 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
1884 adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
1885 adev->uvd.inst[j].ring_enc[i].me = j;
1888 DRM_INFO("UVD(%d) ENC is enabled in VM mode\n", j);
1892 static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
1893 .set = uvd_v7_0_set_interrupt_state,
1894 .process = uvd_v7_0_process_interrupt,
1897 static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1901 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1902 if (adev->uvd.harvest_config & (1 << i))
1904 adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
1905 adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
1909 const struct amdgpu_ip_block_version uvd_v7_0_ip_block =
1911 .type = AMD_IP_BLOCK_TYPE_UVD,
1915 .funcs = &uvd_v7_0_ip_funcs,