2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
27 #include "amdgpu_uvd.h"
28 #include "amdgpu_cs.h"
31 #include "soc15_common.h"
32 #include "mmsch_v1_0.h"
34 #include "uvd/uvd_7_0_offset.h"
35 #include "uvd/uvd_7_0_sh_mask.h"
36 #include "vce/vce_4_0_offset.h"
37 #include "vce/vce_4_0_default.h"
38 #include "vce/vce_4_0_sh_mask.h"
39 #include "nbif/nbif_6_1_offset.h"
40 #include "mmhub/mmhub_1_0_offset.h"
41 #include "mmhub/mmhub_1_0_sh_mask.h"
42 #include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
44 #define mmUVD_PG0_CC_UVD_HARVESTING 0x00c7
45 #define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX 1
46 //UVD_PG0_CC_UVD_HARVESTING
47 #define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT 0x1
48 #define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK 0x00000002L
50 #define UVD7_MAX_HW_INSTANCES_VEGA20 2
52 static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
53 static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
54 static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
55 static int uvd_v7_0_start(struct amdgpu_device *adev);
56 static void uvd_v7_0_stop(struct amdgpu_device *adev);
57 static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);
59 static int amdgpu_ih_clientid_uvds[] = {
60 SOC15_IH_CLIENTID_UVD,
61 SOC15_IH_CLIENTID_UVD1
65 * uvd_v7_0_ring_get_rptr - get read pointer
67 * @ring: amdgpu_ring pointer
69 * Returns the current hardware read pointer
71 static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
73 struct amdgpu_device *adev = ring->adev;
75 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
79 * uvd_v7_0_enc_ring_get_rptr - get enc read pointer
81 * @ring: amdgpu_ring pointer
83 * Returns the current hardware enc read pointer
85 static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
87 struct amdgpu_device *adev = ring->adev;
89 if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
90 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
92 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
96 * uvd_v7_0_ring_get_wptr - get write pointer
98 * @ring: amdgpu_ring pointer
100 * Returns the current hardware write pointer
102 static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
104 struct amdgpu_device *adev = ring->adev;
106 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
110 * uvd_v7_0_enc_ring_get_wptr - get enc write pointer
112 * @ring: amdgpu_ring pointer
114 * Returns the current hardware enc write pointer
116 static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
118 struct amdgpu_device *adev = ring->adev;
120 if (ring->use_doorbell)
121 return *ring->wptr_cpu_addr;
123 if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
124 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
126 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
130 * uvd_v7_0_ring_set_wptr - set write pointer
132 * @ring: amdgpu_ring pointer
134 * Commits the write pointer to the hardware
136 static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
138 struct amdgpu_device *adev = ring->adev;
140 WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
144 * uvd_v7_0_enc_ring_set_wptr - set enc write pointer
146 * @ring: amdgpu_ring pointer
148 * Commits the enc write pointer to the hardware
150 static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
152 struct amdgpu_device *adev = ring->adev;
154 if (ring->use_doorbell) {
155 /* XXX check if swapping is necessary on BE */
156 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
157 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
161 if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
162 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR,
163 lower_32_bits(ring->wptr));
165 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2,
166 lower_32_bits(ring->wptr));
170 * uvd_v7_0_enc_ring_test_ring - test if UVD ENC ring is working
172 * @ring: the engine to test on
175 static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
177 struct amdgpu_device *adev = ring->adev;
182 if (amdgpu_sriov_vf(adev))
185 r = amdgpu_ring_alloc(ring, 16);
189 rptr = amdgpu_ring_get_rptr(ring);
191 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
192 amdgpu_ring_commit(ring);
194 for (i = 0; i < adev->usec_timeout; i++) {
195 if (amdgpu_ring_get_rptr(ring) != rptr)
200 if (i >= adev->usec_timeout)
207 * uvd_v7_0_enc_get_create_msg - generate a UVD ENC create msg
209 * @ring: ring we should submit the msg to
210 * @handle: session handle to use
211 * @bo: amdgpu object for which we query the offset
212 * @fence: optional fence to return
214 * Open up a stream for HW test
216 static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, u32 handle,
217 struct amdgpu_bo *bo,
218 struct dma_fence **fence)
220 const unsigned ib_size_dw = 16;
221 struct amdgpu_job *job;
222 struct amdgpu_ib *ib;
223 struct dma_fence *f = NULL;
227 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
228 AMDGPU_IB_POOL_DIRECT, &job);
233 addr = amdgpu_bo_gpu_offset(bo);
236 ib->ptr[ib->length_dw++] = 0x00000018;
237 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
238 ib->ptr[ib->length_dw++] = handle;
239 ib->ptr[ib->length_dw++] = 0x00000000;
240 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
241 ib->ptr[ib->length_dw++] = addr;
243 ib->ptr[ib->length_dw++] = 0x00000014;
244 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
245 ib->ptr[ib->length_dw++] = 0x0000001c;
246 ib->ptr[ib->length_dw++] = 0x00000000;
247 ib->ptr[ib->length_dw++] = 0x00000000;
249 ib->ptr[ib->length_dw++] = 0x00000008;
250 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
252 for (i = ib->length_dw; i < ib_size_dw; ++i)
255 r = amdgpu_job_submit_direct(job, ring, &f);
260 *fence = dma_fence_get(f);
265 amdgpu_job_free(job);
270 * uvd_v7_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
272 * @ring: ring we should submit the msg to
273 * @handle: session handle to use
274 * @bo: amdgpu object for which we query the offset
275 * @fence: optional fence to return
277 * Close up a stream for HW test or if userspace failed to do so
279 static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, u32 handle,
280 struct amdgpu_bo *bo,
281 struct dma_fence **fence)
283 const unsigned ib_size_dw = 16;
284 struct amdgpu_job *job;
285 struct amdgpu_ib *ib;
286 struct dma_fence *f = NULL;
290 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
291 AMDGPU_IB_POOL_DIRECT, &job);
296 addr = amdgpu_bo_gpu_offset(bo);
299 ib->ptr[ib->length_dw++] = 0x00000018;
300 ib->ptr[ib->length_dw++] = 0x00000001;
301 ib->ptr[ib->length_dw++] = handle;
302 ib->ptr[ib->length_dw++] = 0x00000000;
303 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
304 ib->ptr[ib->length_dw++] = addr;
306 ib->ptr[ib->length_dw++] = 0x00000014;
307 ib->ptr[ib->length_dw++] = 0x00000002;
308 ib->ptr[ib->length_dw++] = 0x0000001c;
309 ib->ptr[ib->length_dw++] = 0x00000000;
310 ib->ptr[ib->length_dw++] = 0x00000000;
312 ib->ptr[ib->length_dw++] = 0x00000008;
313 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
315 for (i = ib->length_dw; i < ib_size_dw; ++i)
318 r = amdgpu_job_submit_direct(job, ring, &f);
323 *fence = dma_fence_get(f);
328 amdgpu_job_free(job);
333 * uvd_v7_0_enc_ring_test_ib - test if UVD ENC IBs are working
335 * @ring: the engine to test on
336 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
339 static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
341 struct dma_fence *fence = NULL;
342 struct amdgpu_bo *bo = ring->adev->uvd.ib_bo;
345 r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
349 r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
353 r = dma_fence_wait_timeout(fence, false, timeout);
360 dma_fence_put(fence);
364 static int uvd_v7_0_early_init(void *handle)
366 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
368 if (adev->asic_type == CHIP_VEGA20) {
372 adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
373 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
374 harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
375 if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
376 adev->uvd.harvest_config |= 1 << i;
379 if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
380 AMDGPU_UVD_HARVEST_UVD1))
381 /* both instances are harvested, disable the block */
384 adev->uvd.num_uvd_inst = 1;
387 if (amdgpu_sriov_vf(adev))
388 adev->uvd.num_enc_rings = 1;
390 adev->uvd.num_enc_rings = 2;
391 uvd_v7_0_set_ring_funcs(adev);
392 uvd_v7_0_set_enc_ring_funcs(adev);
393 uvd_v7_0_set_irq_funcs(adev);
398 static int uvd_v7_0_sw_init(void *handle)
400 struct amdgpu_ring *ring;
403 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
405 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
406 if (adev->uvd.harvest_config & (1 << j))
409 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
414 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
415 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq);
421 r = amdgpu_uvd_sw_init(adev);
425 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
426 const struct common_firmware_header *hdr;
427 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
428 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].ucode_id = AMDGPU_UCODE_ID_UVD;
429 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw;
430 adev->firmware.fw_size +=
431 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
433 if (adev->uvd.num_uvd_inst == UVD7_MAX_HW_INSTANCES_VEGA20) {
434 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].ucode_id = AMDGPU_UCODE_ID_UVD1;
435 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].fw = adev->uvd.fw;
436 adev->firmware.fw_size +=
437 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
439 DRM_INFO("PSP loading UVD firmware\n");
442 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
443 if (adev->uvd.harvest_config & (1 << j))
445 if (!amdgpu_sriov_vf(adev)) {
446 ring = &adev->uvd.inst[j].ring;
447 ring->vm_hub = AMDGPU_MMHUB0(0);
448 sprintf(ring->name, "uvd_%d", ring->me);
449 r = amdgpu_ring_init(adev, ring, 512,
450 &adev->uvd.inst[j].irq, 0,
451 AMDGPU_RING_PRIO_DEFAULT, NULL);
456 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
457 ring = &adev->uvd.inst[j].ring_enc[i];
458 ring->vm_hub = AMDGPU_MMHUB0(0);
459 sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i);
460 if (amdgpu_sriov_vf(adev)) {
461 ring->use_doorbell = true;
463 /* currently only use the first enconding ring for
464 * sriov, so set unused location for other unused rings.
467 ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2;
469 ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
471 r = amdgpu_ring_init(adev, ring, 512,
472 &adev->uvd.inst[j].irq, 0,
473 AMDGPU_RING_PRIO_DEFAULT, NULL);
479 r = amdgpu_uvd_resume(adev);
483 r = amdgpu_uvd_entity_init(adev);
487 r = amdgpu_virt_alloc_mm_table(adev);
494 static int uvd_v7_0_sw_fini(void *handle)
497 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
499 amdgpu_virt_free_mm_table(adev);
501 r = amdgpu_uvd_suspend(adev);
505 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
506 if (adev->uvd.harvest_config & (1 << j))
508 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
509 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
511 return amdgpu_uvd_sw_fini(adev);
515 * uvd_v7_0_hw_init - start and test UVD block
517 * @handle: handle used to pass amdgpu_device pointer
519 * Initialize the hardware, boot up the VCPU and do some testing
521 static int uvd_v7_0_hw_init(void *handle)
523 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
524 struct amdgpu_ring *ring;
528 if (amdgpu_sriov_vf(adev))
529 r = uvd_v7_0_sriov_start(adev);
531 r = uvd_v7_0_start(adev);
535 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
536 if (adev->uvd.harvest_config & (1 << j))
538 ring = &adev->uvd.inst[j].ring;
540 if (!amdgpu_sriov_vf(adev)) {
541 r = amdgpu_ring_test_helper(ring);
545 r = amdgpu_ring_alloc(ring, 10);
547 DRM_ERROR("amdgpu: (%d)ring failed to lock UVD ring (%d).\n", j, r);
551 tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
552 mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
553 amdgpu_ring_write(ring, tmp);
554 amdgpu_ring_write(ring, 0xFFFFF);
556 tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
557 mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
558 amdgpu_ring_write(ring, tmp);
559 amdgpu_ring_write(ring, 0xFFFFF);
561 tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
562 mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
563 amdgpu_ring_write(ring, tmp);
564 amdgpu_ring_write(ring, 0xFFFFF);
566 /* Clear timeout status bits */
567 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
568 mmUVD_SEMA_TIMEOUT_STATUS), 0));
569 amdgpu_ring_write(ring, 0x8);
571 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
572 mmUVD_SEMA_CNTL), 0));
573 amdgpu_ring_write(ring, 3);
575 amdgpu_ring_commit(ring);
578 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
579 ring = &adev->uvd.inst[j].ring_enc[i];
580 r = amdgpu_ring_test_helper(ring);
587 DRM_INFO("UVD and UVD ENC initialized successfully.\n");
593 * uvd_v7_0_hw_fini - stop the hardware block
595 * @handle: handle used to pass amdgpu_device pointer
597 * Stop the UVD block, mark ring as not ready any more
599 static int uvd_v7_0_hw_fini(void *handle)
601 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
603 cancel_delayed_work_sync(&adev->uvd.idle_work);
605 if (!amdgpu_sriov_vf(adev))
608 /* full access mode, so don't touch any UVD register */
609 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
615 static int uvd_v7_0_prepare_suspend(void *handle)
617 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
619 return amdgpu_uvd_prepare_suspend(adev);
622 static int uvd_v7_0_suspend(void *handle)
625 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
628 * Proper cleanups before halting the HW engine:
629 * - cancel the delayed idle work
630 * - enable powergating
631 * - enable clockgating
634 * TODO: to align with the VCN implementation, move the
635 * jobs for clockgating/powergating/dpm setting to
636 * ->set_powergating_state().
638 cancel_delayed_work_sync(&adev->uvd.idle_work);
640 if (adev->pm.dpm_enabled) {
641 amdgpu_dpm_enable_uvd(adev, false);
643 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
644 /* shutdown the UVD block */
645 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
647 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
651 r = uvd_v7_0_hw_fini(adev);
655 return amdgpu_uvd_suspend(adev);
658 static int uvd_v7_0_resume(void *handle)
661 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
663 r = amdgpu_uvd_resume(adev);
667 return uvd_v7_0_hw_init(adev);
671 * uvd_v7_0_mc_resume - memory controller programming
673 * @adev: amdgpu_device pointer
675 * Let the UVD memory controller know it's offsets
677 static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
679 uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
683 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
684 if (adev->uvd.harvest_config & (1 << i))
686 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
687 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
689 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo :
690 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_lo);
691 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
693 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi :
694 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_hi);
695 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
698 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
699 lower_32_bits(adev->uvd.inst[i].gpu_addr));
700 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
701 upper_32_bits(adev->uvd.inst[i].gpu_addr));
703 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
704 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
707 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
709 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
710 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
711 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
712 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
713 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
714 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
716 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
717 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
718 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
719 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
720 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
721 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2,
722 AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
724 WREG32_SOC15(UVD, i, mmUVD_UDEC_ADDR_CONFIG,
725 adev->gfx.config.gb_addr_config);
726 WREG32_SOC15(UVD, i, mmUVD_UDEC_DB_ADDR_CONFIG,
727 adev->gfx.config.gb_addr_config);
728 WREG32_SOC15(UVD, i, mmUVD_UDEC_DBW_ADDR_CONFIG,
729 adev->gfx.config.gb_addr_config);
731 WREG32_SOC15(UVD, i, mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
735 static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
736 struct amdgpu_mm_table *table)
738 uint32_t data = 0, loop;
739 uint64_t addr = table->gpu_addr;
740 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
744 size = header->header_size + header->vce_table_size + header->uvd_table_size;
746 /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
747 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
748 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
750 /* 2, update vmid of descriptor */
751 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID);
752 data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
753 data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
754 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID, data);
756 /* 3, notify mmsch about the size of this descriptor */
757 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE, size);
759 /* 4, set resp to zero */
760 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
762 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
763 if (adev->uvd.harvest_config & (1 << i))
765 WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
766 *adev->uvd.inst[i].ring_enc[0].wptr_cpu_addr = 0;
767 adev->uvd.inst[i].ring_enc[0].wptr = 0;
768 adev->uvd.inst[i].ring_enc[0].wptr_old = 0;
770 /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
771 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
773 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
775 while ((data & 0x10000002) != 0x10000002) {
777 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
784 dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
791 static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
793 struct amdgpu_ring *ring;
794 uint32_t offset, size, tmp;
795 uint32_t table_size = 0;
796 struct mmsch_v1_0_cmd_direct_write direct_wt = { {0} };
797 struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
798 struct mmsch_v1_0_cmd_direct_polling direct_poll = { {0} };
799 struct mmsch_v1_0_cmd_end end = { {0} };
800 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
801 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
804 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
805 direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
806 direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
807 end.cmd_header.command_type = MMSCH_COMMAND__END;
809 if (header->uvd_table_offset == 0 && header->uvd_table_size == 0) {
810 header->version = MMSCH_VERSION;
811 header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
813 if (header->vce_table_offset == 0 && header->vce_table_size == 0)
814 header->uvd_table_offset = header->header_size;
816 header->uvd_table_offset = header->vce_table_size + header->vce_table_offset;
818 init_table += header->uvd_table_offset;
820 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
821 if (adev->uvd.harvest_config & (1 << i))
823 ring = &adev->uvd.inst[i].ring;
825 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
827 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
828 0xFFFFFFFF, 0x00000004);
830 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
831 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
832 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
833 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo);
834 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
835 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
836 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi);
837 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0);
840 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
841 lower_32_bits(adev->uvd.inst[i].gpu_addr));
842 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
843 upper_32_bits(adev->uvd.inst[i].gpu_addr));
845 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
846 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
850 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
852 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
853 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
854 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
855 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
856 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
857 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
859 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
860 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
861 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
862 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
863 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
864 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
865 AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
867 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
870 /* disable clock gating */
871 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_CGC_CTRL),
872 ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
874 /* disable interupt */
875 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
876 ~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
878 /* stall UMC and register bus before resetting VCPU */
879 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
880 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
881 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
883 /* put LMI, VCPU, RBC etc... into reset */
884 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
885 (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
886 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
887 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
888 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
889 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
890 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
891 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
892 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
894 /* initialize UVD memory controller */
895 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL),
896 (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
897 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
898 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
899 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
900 UVD_LMI_CTRL__REQ_MODE_MASK |
903 /* take all subblocks out of reset, except VCPU */
904 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
905 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
907 /* enable VCPU clock */
908 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
909 UVD_VCPU_CNTL__CLK_EN_MASK);
911 /* enable master interrupt */
912 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
913 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
914 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
916 /* clear the bit 4 of UVD_STATUS */
917 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
918 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
920 /* force RBC into idle state */
921 size = order_base_2(ring->ring_size);
922 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
923 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
924 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
926 ring = &adev->uvd.inst[i].ring_enc[0];
928 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), ring->gpu_addr);
929 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
930 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), ring->ring_size / 4);
932 /* boot up the VCPU */
933 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), 0);
936 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
937 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
939 MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0x02, 0x02);
942 memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
943 table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
944 header->uvd_table_size = table_size;
947 return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
951 * uvd_v7_0_start - start UVD block
953 * @adev: amdgpu_device pointer
955 * Setup and start the UVD block
957 static int uvd_v7_0_start(struct amdgpu_device *adev)
959 struct amdgpu_ring *ring;
960 uint32_t rb_bufsz, tmp;
961 uint32_t lmi_swap_cntl;
962 uint32_t mp_swap_cntl;
965 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
966 if (adev->uvd.harvest_config & (1 << k))
969 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
970 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
973 /* disable byte swapping */
977 uvd_v7_0_mc_resume(adev);
979 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
980 if (adev->uvd.harvest_config & (1 << k))
982 ring = &adev->uvd.inst[k].ring;
983 /* disable clock gating */
984 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
985 ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
987 /* disable interupt */
988 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), 0,
989 ~UVD_MASTINT_EN__VCPU_EN_MASK);
991 /* stall UMC and register bus before resetting VCPU */
992 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2),
993 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
994 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
997 /* put LMI, VCPU, RBC etc... into reset */
998 WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
999 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
1000 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
1001 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
1002 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
1003 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
1004 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
1005 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
1006 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
1009 /* initialize UVD memory controller */
1010 WREG32_SOC15(UVD, k, mmUVD_LMI_CTRL,
1011 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
1012 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1013 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1014 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
1015 UVD_LMI_CTRL__REQ_MODE_MASK |
1019 /* swap (8 in 32) RB and IB */
1020 lmi_swap_cntl = 0xa;
1023 WREG32_SOC15(UVD, k, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
1024 WREG32_SOC15(UVD, k, mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
1026 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA0, 0x40c2040);
1027 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA1, 0x0);
1028 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB0, 0x40c2040);
1029 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB1, 0x0);
1030 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_ALU, 0);
1031 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUX, 0x88);
1033 /* take all subblocks out of reset, except VCPU */
1034 WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
1035 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1038 /* enable VCPU clock */
1039 WREG32_SOC15(UVD, k, mmUVD_VCPU_CNTL,
1040 UVD_VCPU_CNTL__CLK_EN_MASK);
1043 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), 0,
1044 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1046 /* boot up the VCPU */
1047 WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, 0);
1050 for (i = 0; i < 10; ++i) {
1053 for (j = 0; j < 100; ++j) {
1054 status = RREG32_SOC15(UVD, k, mmUVD_STATUS);
1063 DRM_ERROR("UVD(%d) not responding, trying to reset the VCPU!!!\n", k);
1064 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET),
1065 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1066 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1068 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), 0,
1069 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1075 DRM_ERROR("UVD(%d) not responding, giving up!!!\n", k);
1078 /* enable master interrupt */
1079 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN),
1080 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
1081 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
1083 /* clear the bit 4 of UVD_STATUS */
1084 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_STATUS), 0,
1085 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1087 /* force RBC into idle state */
1088 rb_bufsz = order_base_2(ring->ring_size);
1089 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1090 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1091 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1092 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
1093 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1094 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1095 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_CNTL, tmp);
1097 /* set the write pointer delay */
1098 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR_CNTL, 0);
1100 /* set the wb address */
1101 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR,
1102 (upper_32_bits(ring->gpu_addr) >> 2));
1104 /* program the RB_BASE for ring buffer */
1105 WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1106 lower_32_bits(ring->gpu_addr));
1107 WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1108 upper_32_bits(ring->gpu_addr));
1110 /* Initialize the ring buffer's read and write pointers */
1111 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR, 0);
1113 ring->wptr = RREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR);
1114 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR,
1115 lower_32_bits(ring->wptr));
1117 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_RBC_RB_CNTL), 0,
1118 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1120 ring = &adev->uvd.inst[k].ring_enc[0];
1121 WREG32_SOC15(UVD, k, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1122 WREG32_SOC15(UVD, k, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1123 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO, ring->gpu_addr);
1124 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1125 WREG32_SOC15(UVD, k, mmUVD_RB_SIZE, ring->ring_size / 4);
1127 ring = &adev->uvd.inst[k].ring_enc[1];
1128 WREG32_SOC15(UVD, k, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1129 WREG32_SOC15(UVD, k, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1130 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1131 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1132 WREG32_SOC15(UVD, k, mmUVD_RB_SIZE2, ring->ring_size / 4);
1138 * uvd_v7_0_stop - stop UVD block
1140 * @adev: amdgpu_device pointer
1142 * stop the UVD block
1144 static void uvd_v7_0_stop(struct amdgpu_device *adev)
1148 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1149 if (adev->uvd.harvest_config & (1 << i))
1151 /* force RBC into idle state */
1152 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
1154 /* Stall UMC and register bus before resetting VCPU */
1155 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
1156 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
1157 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1160 /* put VCPU into reset */
1161 WREG32_SOC15(UVD, i, mmUVD_SOFT_RESET,
1162 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1165 /* disable VCPU clock */
1166 WREG32_SOC15(UVD, i, mmUVD_VCPU_CNTL, 0x0);
1168 /* Unstall UMC and register bus */
1169 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
1170 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1175 * uvd_v7_0_ring_emit_fence - emit an fence & trap command
1177 * @ring: amdgpu_ring pointer
1179 * @seq: sequence number
1180 * @flags: fence related flags
1182 * Write a fence and a trap command to the ring.
1184 static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1187 struct amdgpu_device *adev = ring->adev;
1189 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1191 amdgpu_ring_write(ring,
1192 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1193 amdgpu_ring_write(ring, seq);
1194 amdgpu_ring_write(ring,
1195 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1196 amdgpu_ring_write(ring, addr & 0xffffffff);
1197 amdgpu_ring_write(ring,
1198 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1199 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1200 amdgpu_ring_write(ring,
1201 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1202 amdgpu_ring_write(ring, 0);
1204 amdgpu_ring_write(ring,
1205 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1206 amdgpu_ring_write(ring, 0);
1207 amdgpu_ring_write(ring,
1208 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1209 amdgpu_ring_write(ring, 0);
1210 amdgpu_ring_write(ring,
1211 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1212 amdgpu_ring_write(ring, 2);
1216 * uvd_v7_0_enc_ring_emit_fence - emit an enc fence & trap command
1218 * @ring: amdgpu_ring pointer
1220 * @seq: sequence number
1221 * @flags: fence related flags
1223 * Write enc a fence and a trap command to the ring.
1225 static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1226 u64 seq, unsigned flags)
1229 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1231 amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
1232 amdgpu_ring_write(ring, addr);
1233 amdgpu_ring_write(ring, upper_32_bits(addr));
1234 amdgpu_ring_write(ring, seq);
1235 amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
1239 * uvd_v7_0_ring_emit_hdp_flush - skip HDP flushing
1241 * @ring: amdgpu_ring pointer
1243 static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
1245 /* The firmware doesn't seem to like touching registers at this point. */
1249 * uvd_v7_0_ring_test_ring - register write test
1251 * @ring: amdgpu_ring pointer
1253 * Test if we can successfully write to the context register
1255 static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
1257 struct amdgpu_device *adev = ring->adev;
1262 WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
1263 r = amdgpu_ring_alloc(ring, 3);
1267 amdgpu_ring_write(ring,
1268 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1269 amdgpu_ring_write(ring, 0xDEADBEEF);
1270 amdgpu_ring_commit(ring);
1271 for (i = 0; i < adev->usec_timeout; i++) {
1272 tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID);
1273 if (tmp == 0xDEADBEEF)
1278 if (i >= adev->usec_timeout)
1285 * uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission.
1287 * @p: the CS parser with the IBs
1288 * @job: which job this ib is in
1289 * @ib: which IB to patch
1292 static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1293 struct amdgpu_job *job,
1294 struct amdgpu_ib *ib)
1296 struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
1299 /* No patching necessary for the first instance */
1303 for (i = 0; i < ib->length_dw; i += 2) {
1304 uint32_t reg = amdgpu_ib_get_value(ib, i);
1306 reg -= p->adev->reg_offset[UVD_HWIP][0][1];
1307 reg += p->adev->reg_offset[UVD_HWIP][1][1];
1309 amdgpu_ib_set_value(ib, i, reg);
1315 * uvd_v7_0_ring_emit_ib - execute indirect buffer
1317 * @ring: amdgpu_ring pointer
1318 * @job: job to retrieve vmid from
1319 * @ib: indirect buffer to execute
1322 * Write ring commands to execute the indirect buffer
1324 static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
1325 struct amdgpu_job *job,
1326 struct amdgpu_ib *ib,
1329 struct amdgpu_device *adev = ring->adev;
1330 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1332 amdgpu_ring_write(ring,
1333 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
1334 amdgpu_ring_write(ring, vmid);
1336 amdgpu_ring_write(ring,
1337 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1338 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1339 amdgpu_ring_write(ring,
1340 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1341 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1342 amdgpu_ring_write(ring,
1343 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_RBC_IB_SIZE), 0));
1344 amdgpu_ring_write(ring, ib->length_dw);
1348 * uvd_v7_0_enc_ring_emit_ib - enc execute indirect buffer
1350 * @ring: amdgpu_ring pointer
1351 * @job: job to retrive vmid from
1352 * @ib: indirect buffer to execute
1355 * Write enc ring commands to execute the indirect buffer
1357 static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1358 struct amdgpu_job *job,
1359 struct amdgpu_ib *ib,
1362 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1364 amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1365 amdgpu_ring_write(ring, vmid);
1366 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1367 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1368 amdgpu_ring_write(ring, ib->length_dw);
1371 static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
1372 uint32_t reg, uint32_t val)
1374 struct amdgpu_device *adev = ring->adev;
1376 amdgpu_ring_write(ring,
1377 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1378 amdgpu_ring_write(ring, reg << 2);
1379 amdgpu_ring_write(ring,
1380 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1381 amdgpu_ring_write(ring, val);
1382 amdgpu_ring_write(ring,
1383 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1384 amdgpu_ring_write(ring, 8);
1387 static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1388 uint32_t val, uint32_t mask)
1390 struct amdgpu_device *adev = ring->adev;
1392 amdgpu_ring_write(ring,
1393 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1394 amdgpu_ring_write(ring, reg << 2);
1395 amdgpu_ring_write(ring,
1396 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1397 amdgpu_ring_write(ring, val);
1398 amdgpu_ring_write(ring,
1399 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GP_SCRATCH8), 0));
1400 amdgpu_ring_write(ring, mask);
1401 amdgpu_ring_write(ring,
1402 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1403 amdgpu_ring_write(ring, 12);
1406 static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1407 unsigned vmid, uint64_t pd_addr)
1409 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
1410 uint32_t data0, data1, mask;
1412 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1414 /* wait for reg writes */
1415 data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1416 data1 = lower_32_bits(pd_addr);
1418 uvd_v7_0_ring_emit_reg_wait(ring, data0, data1, mask);
1421 static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1423 struct amdgpu_device *adev = ring->adev;
1426 WARN_ON(ring->wptr % 2 || count % 2);
1428 for (i = 0; i < count / 2; i++) {
1429 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0));
1430 amdgpu_ring_write(ring, 0);
1434 static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1436 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1439 static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1440 uint32_t reg, uint32_t val,
1443 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1444 amdgpu_ring_write(ring, reg << 2);
1445 amdgpu_ring_write(ring, mask);
1446 amdgpu_ring_write(ring, val);
1449 static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1450 unsigned int vmid, uint64_t pd_addr)
1452 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
1454 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1456 /* wait for reg writes */
1457 uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1458 vmid * hub->ctx_addr_distance,
1459 lower_32_bits(pd_addr), 0xffffffff);
1462 static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1463 uint32_t reg, uint32_t val)
1465 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1466 amdgpu_ring_write(ring, reg << 2);
1467 amdgpu_ring_write(ring, val);
1471 static bool uvd_v7_0_is_idle(void *handle)
1473 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1475 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1478 static int uvd_v7_0_wait_for_idle(void *handle)
1481 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1483 for (i = 0; i < adev->usec_timeout; i++) {
1484 if (uvd_v7_0_is_idle(handle))
1490 #define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
1491 static bool uvd_v7_0_check_soft_reset(void *handle)
1493 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1494 u32 srbm_soft_reset = 0;
1495 u32 tmp = RREG32(mmSRBM_STATUS);
1497 if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1498 REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1499 (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) &
1500 AMDGPU_UVD_STATUS_BUSY_MASK))
1501 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1502 SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1504 if (srbm_soft_reset) {
1505 adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset;
1508 adev->uvd.inst[ring->me].srbm_soft_reset = 0;
1513 static int uvd_v7_0_pre_soft_reset(void *handle)
1515 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1517 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1520 uvd_v7_0_stop(adev);
1524 static int uvd_v7_0_soft_reset(void *handle)
1526 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1527 u32 srbm_soft_reset;
1529 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1531 srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset;
1533 if (srbm_soft_reset) {
1536 tmp = RREG32(mmSRBM_SOFT_RESET);
1537 tmp |= srbm_soft_reset;
1538 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1539 WREG32(mmSRBM_SOFT_RESET, tmp);
1540 tmp = RREG32(mmSRBM_SOFT_RESET);
1544 tmp &= ~srbm_soft_reset;
1545 WREG32(mmSRBM_SOFT_RESET, tmp);
1546 tmp = RREG32(mmSRBM_SOFT_RESET);
1548 /* Wait a little for things to settle down */
1555 static int uvd_v7_0_post_soft_reset(void *handle)
1557 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1559 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1564 return uvd_v7_0_start(adev);
1568 static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev,
1569 struct amdgpu_irq_src *source,
1571 enum amdgpu_interrupt_state state)
1577 static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
1578 struct amdgpu_irq_src *source,
1579 struct amdgpu_iv_entry *entry)
1581 uint32_t ip_instance;
1583 switch (entry->client_id) {
1584 case SOC15_IH_CLIENTID_UVD:
1587 case SOC15_IH_CLIENTID_UVD1:
1591 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1595 DRM_DEBUG("IH: UVD TRAP\n");
1597 switch (entry->src_id) {
1599 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring);
1602 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[0]);
1605 if (!amdgpu_sriov_vf(adev))
1606 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[1]);
1609 DRM_ERROR("Unhandled interrupt: %d %d\n",
1610 entry->src_id, entry->src_data[0]);
1618 static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
1620 uint32_t data, data1, data2, suvd_flags;
1622 data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL);
1623 data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1624 data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL);
1626 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1627 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1629 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1630 UVD_SUVD_CGC_GATE__SIT_MASK |
1631 UVD_SUVD_CGC_GATE__SMP_MASK |
1632 UVD_SUVD_CGC_GATE__SCM_MASK |
1633 UVD_SUVD_CGC_GATE__SDB_MASK;
1635 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1636 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1637 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1639 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1640 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1641 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1642 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1643 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1644 UVD_CGC_CTRL__SYS_MODE_MASK |
1645 UVD_CGC_CTRL__UDEC_MODE_MASK |
1646 UVD_CGC_CTRL__MPEG2_MODE_MASK |
1647 UVD_CGC_CTRL__REGS_MODE_MASK |
1648 UVD_CGC_CTRL__RBC_MODE_MASK |
1649 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1650 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1651 UVD_CGC_CTRL__IDCT_MODE_MASK |
1652 UVD_CGC_CTRL__MPRD_MODE_MASK |
1653 UVD_CGC_CTRL__MPC_MODE_MASK |
1654 UVD_CGC_CTRL__LBSI_MODE_MASK |
1655 UVD_CGC_CTRL__LRBBM_MODE_MASK |
1656 UVD_CGC_CTRL__WCB_MODE_MASK |
1657 UVD_CGC_CTRL__VCPU_MODE_MASK |
1658 UVD_CGC_CTRL__JPEG_MODE_MASK |
1659 UVD_CGC_CTRL__JPEG2_MODE_MASK |
1660 UVD_CGC_CTRL__SCPU_MODE_MASK);
1661 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1662 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1663 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1664 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1665 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1666 data1 |= suvd_flags;
1668 WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data);
1669 WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0);
1670 WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1671 WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2);
1674 static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
1676 uint32_t data, data1, cgc_flags, suvd_flags;
1678 data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE);
1679 data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1681 cgc_flags = UVD_CGC_GATE__SYS_MASK |
1682 UVD_CGC_GATE__UDEC_MASK |
1683 UVD_CGC_GATE__MPEG2_MASK |
1684 UVD_CGC_GATE__RBC_MASK |
1685 UVD_CGC_GATE__LMI_MC_MASK |
1686 UVD_CGC_GATE__IDCT_MASK |
1687 UVD_CGC_GATE__MPRD_MASK |
1688 UVD_CGC_GATE__MPC_MASK |
1689 UVD_CGC_GATE__LBSI_MASK |
1690 UVD_CGC_GATE__LRBBM_MASK |
1691 UVD_CGC_GATE__UDEC_RE_MASK |
1692 UVD_CGC_GATE__UDEC_CM_MASK |
1693 UVD_CGC_GATE__UDEC_IT_MASK |
1694 UVD_CGC_GATE__UDEC_DB_MASK |
1695 UVD_CGC_GATE__UDEC_MP_MASK |
1696 UVD_CGC_GATE__WCB_MASK |
1697 UVD_CGC_GATE__VCPU_MASK |
1698 UVD_CGC_GATE__SCPU_MASK |
1699 UVD_CGC_GATE__JPEG_MASK |
1700 UVD_CGC_GATE__JPEG2_MASK;
1702 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1703 UVD_SUVD_CGC_GATE__SIT_MASK |
1704 UVD_SUVD_CGC_GATE__SMP_MASK |
1705 UVD_SUVD_CGC_GATE__SCM_MASK |
1706 UVD_SUVD_CGC_GATE__SDB_MASK;
1709 data1 |= suvd_flags;
1711 WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data);
1712 WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1715 static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
1717 u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
1720 tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1721 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1723 tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1724 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1726 WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
1730 static int uvd_v7_0_set_clockgating_state(void *handle,
1731 enum amd_clockgating_state state)
1733 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1734 bool enable = (state == AMD_CG_STATE_GATE);
1736 uvd_v7_0_set_bypass_mode(adev, enable);
1738 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
1742 /* disable HW gating and enable Sw gating */
1743 uvd_v7_0_set_sw_clock_gating(adev);
1745 /* wait for STATUS to clear */
1746 if (uvd_v7_0_wait_for_idle(handle))
1749 /* enable HW gates because UVD is idle */
1750 /* uvd_v7_0_set_hw_clock_gating(adev); */
1756 static int uvd_v7_0_set_powergating_state(void *handle,
1757 enum amd_powergating_state state)
1759 /* This doesn't actually powergate the UVD block.
1760 * That's done in the dpm code via the SMC. This
1761 * just re-inits the block as necessary. The actual
1762 * gating still happens in the dpm code. We should
1763 * revisit this when there is a cleaner line between
1764 * the smc and the hw blocks
1766 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1768 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
1771 WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1773 if (state == AMD_PG_STATE_GATE) {
1774 uvd_v7_0_stop(adev);
1777 return uvd_v7_0_start(adev);
1782 static int uvd_v7_0_set_clockgating_state(void *handle,
1783 enum amd_clockgating_state state)
1785 /* needed for driver unload*/
1789 const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
1791 .early_init = uvd_v7_0_early_init,
1793 .sw_init = uvd_v7_0_sw_init,
1794 .sw_fini = uvd_v7_0_sw_fini,
1795 .hw_init = uvd_v7_0_hw_init,
1796 .hw_fini = uvd_v7_0_hw_fini,
1797 .prepare_suspend = uvd_v7_0_prepare_suspend,
1798 .suspend = uvd_v7_0_suspend,
1799 .resume = uvd_v7_0_resume,
1800 .is_idle = NULL /* uvd_v7_0_is_idle */,
1801 .wait_for_idle = NULL /* uvd_v7_0_wait_for_idle */,
1802 .check_soft_reset = NULL /* uvd_v7_0_check_soft_reset */,
1803 .pre_soft_reset = NULL /* uvd_v7_0_pre_soft_reset */,
1804 .soft_reset = NULL /* uvd_v7_0_soft_reset */,
1805 .post_soft_reset = NULL /* uvd_v7_0_post_soft_reset */,
1806 .set_clockgating_state = uvd_v7_0_set_clockgating_state,
1807 .set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */,
1810 static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
1811 .type = AMDGPU_RING_TYPE_UVD,
1813 .support_64bit_ptrs = false,
1814 .no_user_fence = true,
1815 .get_rptr = uvd_v7_0_ring_get_rptr,
1816 .get_wptr = uvd_v7_0_ring_get_wptr,
1817 .set_wptr = uvd_v7_0_ring_set_wptr,
1818 .patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place,
1820 6 + /* hdp invalidate */
1821 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1822 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1823 8 + /* uvd_v7_0_ring_emit_vm_flush */
1824 14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */
1825 .emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */
1826 .emit_ib = uvd_v7_0_ring_emit_ib,
1827 .emit_fence = uvd_v7_0_ring_emit_fence,
1828 .emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
1829 .emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
1830 .test_ring = uvd_v7_0_ring_test_ring,
1831 .test_ib = amdgpu_uvd_ring_test_ib,
1832 .insert_nop = uvd_v7_0_ring_insert_nop,
1833 .pad_ib = amdgpu_ring_generic_pad_ib,
1834 .begin_use = amdgpu_uvd_ring_begin_use,
1835 .end_use = amdgpu_uvd_ring_end_use,
1836 .emit_wreg = uvd_v7_0_ring_emit_wreg,
1837 .emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
1838 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1841 static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
1842 .type = AMDGPU_RING_TYPE_UVD_ENC,
1844 .nop = HEVC_ENC_CMD_NO_OP,
1845 .support_64bit_ptrs = false,
1846 .no_user_fence = true,
1847 .get_rptr = uvd_v7_0_enc_ring_get_rptr,
1848 .get_wptr = uvd_v7_0_enc_ring_get_wptr,
1849 .set_wptr = uvd_v7_0_enc_ring_set_wptr,
1851 3 + 3 + /* hdp flush / invalidate */
1852 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1853 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1854 4 + /* uvd_v7_0_enc_ring_emit_vm_flush */
1855 5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */
1856 1, /* uvd_v7_0_enc_ring_insert_end */
1857 .emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */
1858 .emit_ib = uvd_v7_0_enc_ring_emit_ib,
1859 .emit_fence = uvd_v7_0_enc_ring_emit_fence,
1860 .emit_vm_flush = uvd_v7_0_enc_ring_emit_vm_flush,
1861 .test_ring = uvd_v7_0_enc_ring_test_ring,
1862 .test_ib = uvd_v7_0_enc_ring_test_ib,
1863 .insert_nop = amdgpu_ring_insert_nop,
1864 .insert_end = uvd_v7_0_enc_ring_insert_end,
1865 .pad_ib = amdgpu_ring_generic_pad_ib,
1866 .begin_use = amdgpu_uvd_ring_begin_use,
1867 .end_use = amdgpu_uvd_ring_end_use,
1868 .emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
1869 .emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
1870 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1873 static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
1877 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1878 if (adev->uvd.harvest_config & (1 << i))
1880 adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
1881 adev->uvd.inst[i].ring.me = i;
1882 DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
1886 static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1890 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
1891 if (adev->uvd.harvest_config & (1 << j))
1893 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
1894 adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
1895 adev->uvd.inst[j].ring_enc[i].me = j;
1898 DRM_INFO("UVD(%d) ENC is enabled in VM mode\n", j);
1902 static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
1903 .set = uvd_v7_0_set_interrupt_state,
1904 .process = uvd_v7_0_process_interrupt,
1907 static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1911 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1912 if (adev->uvd.harvest_config & (1 << i))
1914 adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
1915 adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
1919 const struct amdgpu_ip_block_version uvd_v7_0_ip_block = {
1920 .type = AMD_IP_BLOCK_TYPE_UVD,
1924 .funcs = &uvd_v7_0_ip_funcs,