2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
27 #include "amdgpu_uvd.h"
30 #include "soc15_common.h"
31 #include "mmsch_v1_0.h"
33 #include "uvd/uvd_7_0_offset.h"
34 #include "uvd/uvd_7_0_sh_mask.h"
35 #include "vce/vce_4_0_offset.h"
36 #include "vce/vce_4_0_default.h"
37 #include "vce/vce_4_0_sh_mask.h"
38 #include "nbif/nbif_6_1_offset.h"
39 #include "hdp/hdp_4_0_offset.h"
40 #include "mmhub/mmhub_1_0_offset.h"
41 #include "mmhub/mmhub_1_0_sh_mask.h"
42 #include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
44 #define mmUVD_PG0_CC_UVD_HARVESTING 0x00c7
45 #define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX 1
46 //UVD_PG0_CC_UVD_HARVESTING
47 #define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT 0x1
48 #define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK 0x00000002L
50 #define UVD7_MAX_HW_INSTANCES_VEGA20 2
52 static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
53 static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
54 static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
55 static int uvd_v7_0_start(struct amdgpu_device *adev);
56 static void uvd_v7_0_stop(struct amdgpu_device *adev);
57 static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);
59 static int amdgpu_ih_clientid_uvds[] = {
60 SOC15_IH_CLIENTID_UVD,
61 SOC15_IH_CLIENTID_UVD1
65 * uvd_v7_0_ring_get_rptr - get read pointer
67 * @ring: amdgpu_ring pointer
69 * Returns the current hardware read pointer
71 static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
73 struct amdgpu_device *adev = ring->adev;
75 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
79 * uvd_v7_0_enc_ring_get_rptr - get enc read pointer
81 * @ring: amdgpu_ring pointer
83 * Returns the current hardware enc read pointer
85 static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
87 struct amdgpu_device *adev = ring->adev;
89 if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
90 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
92 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
96 * uvd_v7_0_ring_get_wptr - get write pointer
98 * @ring: amdgpu_ring pointer
100 * Returns the current hardware write pointer
102 static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
104 struct amdgpu_device *adev = ring->adev;
106 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
110 * uvd_v7_0_enc_ring_get_wptr - get enc write pointer
112 * @ring: amdgpu_ring pointer
114 * Returns the current hardware enc write pointer
116 static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
118 struct amdgpu_device *adev = ring->adev;
120 if (ring->use_doorbell)
121 return adev->wb.wb[ring->wptr_offs];
123 if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
124 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
126 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
130 * uvd_v7_0_ring_set_wptr - set write pointer
132 * @ring: amdgpu_ring pointer
134 * Commits the write pointer to the hardware
136 static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
138 struct amdgpu_device *adev = ring->adev;
140 WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
144 * uvd_v7_0_enc_ring_set_wptr - set enc write pointer
146 * @ring: amdgpu_ring pointer
148 * Commits the enc write pointer to the hardware
150 static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
152 struct amdgpu_device *adev = ring->adev;
154 if (ring->use_doorbell) {
155 /* XXX check if swapping is necessary on BE */
156 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
157 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
161 if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
162 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR,
163 lower_32_bits(ring->wptr));
165 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2,
166 lower_32_bits(ring->wptr));
170 * uvd_v7_0_enc_ring_test_ring - test if UVD ENC ring is working
172 * @ring: the engine to test on
175 static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
177 struct amdgpu_device *adev = ring->adev;
182 if (amdgpu_sriov_vf(adev))
185 r = amdgpu_ring_alloc(ring, 16);
189 rptr = amdgpu_ring_get_rptr(ring);
191 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
192 amdgpu_ring_commit(ring);
194 for (i = 0; i < adev->usec_timeout; i++) {
195 if (amdgpu_ring_get_rptr(ring) != rptr)
200 if (i >= adev->usec_timeout)
207 * uvd_v7_0_enc_get_create_msg - generate a UVD ENC create msg
209 * @adev: amdgpu_device pointer
210 * @ring: ring we should submit the msg to
211 * @handle: session handle to use
212 * @fence: optional fence to return
214 * Open up a stream for HW test
216 static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
217 struct amdgpu_bo *bo,
218 struct dma_fence **fence)
220 const unsigned ib_size_dw = 16;
221 struct amdgpu_job *job;
222 struct amdgpu_ib *ib;
223 struct dma_fence *f = NULL;
227 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
228 AMDGPU_IB_POOL_DIRECT, &job);
233 addr = amdgpu_bo_gpu_offset(bo);
236 ib->ptr[ib->length_dw++] = 0x00000018;
237 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
238 ib->ptr[ib->length_dw++] = handle;
239 ib->ptr[ib->length_dw++] = 0x00000000;
240 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
241 ib->ptr[ib->length_dw++] = addr;
243 ib->ptr[ib->length_dw++] = 0x00000014;
244 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
245 ib->ptr[ib->length_dw++] = 0x0000001c;
246 ib->ptr[ib->length_dw++] = 0x00000000;
247 ib->ptr[ib->length_dw++] = 0x00000000;
249 ib->ptr[ib->length_dw++] = 0x00000008;
250 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
252 for (i = ib->length_dw; i < ib_size_dw; ++i)
255 r = amdgpu_job_submit_direct(job, ring, &f);
260 *fence = dma_fence_get(f);
265 amdgpu_job_free(job);
270 * uvd_v7_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
272 * @adev: amdgpu_device pointer
273 * @ring: ring we should submit the msg to
274 * @handle: session handle to use
275 * @fence: optional fence to return
277 * Close up a stream for HW test or if userspace failed to do so
279 static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
280 struct amdgpu_bo *bo,
281 struct dma_fence **fence)
283 const unsigned ib_size_dw = 16;
284 struct amdgpu_job *job;
285 struct amdgpu_ib *ib;
286 struct dma_fence *f = NULL;
290 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
291 AMDGPU_IB_POOL_DIRECT, &job);
296 addr = amdgpu_bo_gpu_offset(bo);
299 ib->ptr[ib->length_dw++] = 0x00000018;
300 ib->ptr[ib->length_dw++] = 0x00000001;
301 ib->ptr[ib->length_dw++] = handle;
302 ib->ptr[ib->length_dw++] = 0x00000000;
303 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
304 ib->ptr[ib->length_dw++] = addr;
306 ib->ptr[ib->length_dw++] = 0x00000014;
307 ib->ptr[ib->length_dw++] = 0x00000002;
308 ib->ptr[ib->length_dw++] = 0x0000001c;
309 ib->ptr[ib->length_dw++] = 0x00000000;
310 ib->ptr[ib->length_dw++] = 0x00000000;
312 ib->ptr[ib->length_dw++] = 0x00000008;
313 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
315 for (i = ib->length_dw; i < ib_size_dw; ++i)
318 r = amdgpu_job_submit_direct(job, ring, &f);
323 *fence = dma_fence_get(f);
328 amdgpu_job_free(job);
333 * uvd_v7_0_enc_ring_test_ib - test if UVD ENC IBs are working
335 * @ring: the engine to test on
338 static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
340 struct dma_fence *fence = NULL;
341 struct amdgpu_bo *bo = NULL;
344 r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
345 AMDGPU_GEM_DOMAIN_VRAM,
350 r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
354 r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
358 r = dma_fence_wait_timeout(fence, false, timeout);
365 dma_fence_put(fence);
366 amdgpu_bo_unreserve(bo);
367 amdgpu_bo_unref(&bo);
371 static int uvd_v7_0_early_init(void *handle)
373 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
375 if (adev->asic_type == CHIP_VEGA20) {
379 adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
380 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
381 harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
382 if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
383 adev->uvd.harvest_config |= 1 << i;
386 if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
387 AMDGPU_UVD_HARVEST_UVD1))
388 /* both instances are harvested, disable the block */
391 adev->uvd.num_uvd_inst = 1;
394 if (amdgpu_sriov_vf(adev))
395 adev->uvd.num_enc_rings = 1;
397 adev->uvd.num_enc_rings = 2;
398 uvd_v7_0_set_ring_funcs(adev);
399 uvd_v7_0_set_enc_ring_funcs(adev);
400 uvd_v7_0_set_irq_funcs(adev);
405 static int uvd_v7_0_sw_init(void *handle)
407 struct amdgpu_ring *ring;
410 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
412 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
413 if (adev->uvd.harvest_config & (1 << j))
416 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
421 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
422 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq);
428 r = amdgpu_uvd_sw_init(adev);
432 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
433 const struct common_firmware_header *hdr;
434 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
435 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].ucode_id = AMDGPU_UCODE_ID_UVD;
436 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw;
437 adev->firmware.fw_size +=
438 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
440 if (adev->uvd.num_uvd_inst == UVD7_MAX_HW_INSTANCES_VEGA20) {
441 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].ucode_id = AMDGPU_UCODE_ID_UVD1;
442 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].fw = adev->uvd.fw;
443 adev->firmware.fw_size +=
444 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
446 DRM_INFO("PSP loading UVD firmware\n");
449 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
450 if (adev->uvd.harvest_config & (1 << j))
452 if (!amdgpu_sriov_vf(adev)) {
453 ring = &adev->uvd.inst[j].ring;
454 sprintf(ring->name, "uvd_%d", ring->me);
455 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
460 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
461 ring = &adev->uvd.inst[j].ring_enc[i];
462 sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i);
463 if (amdgpu_sriov_vf(adev)) {
464 ring->use_doorbell = true;
466 /* currently only use the first enconding ring for
467 * sriov, so set unused location for other unused rings.
470 ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2;
472 ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
474 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
480 r = amdgpu_uvd_resume(adev);
484 r = amdgpu_uvd_entity_init(adev);
488 r = amdgpu_virt_alloc_mm_table(adev);
495 static int uvd_v7_0_sw_fini(void *handle)
498 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
500 amdgpu_virt_free_mm_table(adev);
502 r = amdgpu_uvd_suspend(adev);
506 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
507 if (adev->uvd.harvest_config & (1 << j))
509 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
510 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
512 return amdgpu_uvd_sw_fini(adev);
516 * uvd_v7_0_hw_init - start and test UVD block
518 * @adev: amdgpu_device pointer
520 * Initialize the hardware, boot up the VCPU and do some testing
522 static int uvd_v7_0_hw_init(void *handle)
524 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
525 struct amdgpu_ring *ring;
529 if (amdgpu_sriov_vf(adev))
530 r = uvd_v7_0_sriov_start(adev);
532 r = uvd_v7_0_start(adev);
536 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
537 if (adev->uvd.harvest_config & (1 << j))
539 ring = &adev->uvd.inst[j].ring;
541 if (!amdgpu_sriov_vf(adev)) {
542 r = amdgpu_ring_test_helper(ring);
546 r = amdgpu_ring_alloc(ring, 10);
548 DRM_ERROR("amdgpu: (%d)ring failed to lock UVD ring (%d).\n", j, r);
552 tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
553 mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
554 amdgpu_ring_write(ring, tmp);
555 amdgpu_ring_write(ring, 0xFFFFF);
557 tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
558 mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
559 amdgpu_ring_write(ring, tmp);
560 amdgpu_ring_write(ring, 0xFFFFF);
562 tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
563 mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
564 amdgpu_ring_write(ring, tmp);
565 amdgpu_ring_write(ring, 0xFFFFF);
567 /* Clear timeout status bits */
568 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
569 mmUVD_SEMA_TIMEOUT_STATUS), 0));
570 amdgpu_ring_write(ring, 0x8);
572 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
573 mmUVD_SEMA_CNTL), 0));
574 amdgpu_ring_write(ring, 3);
576 amdgpu_ring_commit(ring);
579 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
580 ring = &adev->uvd.inst[j].ring_enc[i];
581 r = amdgpu_ring_test_helper(ring);
588 DRM_INFO("UVD and UVD ENC initialized successfully.\n");
594 * uvd_v7_0_hw_fini - stop the hardware block
596 * @adev: amdgpu_device pointer
598 * Stop the UVD block, mark ring as not ready any more
600 static int uvd_v7_0_hw_fini(void *handle)
602 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
605 if (!amdgpu_sriov_vf(adev))
608 /* full access mode, so don't touch any UVD register */
609 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
612 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
613 if (adev->uvd.harvest_config & (1 << i))
615 adev->uvd.inst[i].ring.sched.ready = false;
621 static int uvd_v7_0_suspend(void *handle)
624 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
626 r = uvd_v7_0_hw_fini(adev);
630 return amdgpu_uvd_suspend(adev);
633 static int uvd_v7_0_resume(void *handle)
636 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
638 r = amdgpu_uvd_resume(adev);
642 return uvd_v7_0_hw_init(adev);
646 * uvd_v7_0_mc_resume - memory controller programming
648 * @adev: amdgpu_device pointer
650 * Let the UVD memory controller know it's offsets
652 static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
654 uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
658 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
659 if (adev->uvd.harvest_config & (1 << i))
661 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
662 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
664 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo:
665 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_lo);
666 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
668 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi:
669 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_hi);
670 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
673 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
674 lower_32_bits(adev->uvd.inst[i].gpu_addr));
675 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
676 upper_32_bits(adev->uvd.inst[i].gpu_addr));
678 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
679 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
682 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
684 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
685 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
686 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
687 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
688 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
689 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
691 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
692 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
693 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
694 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
695 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
696 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2,
697 AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
699 WREG32_SOC15(UVD, i, mmUVD_UDEC_ADDR_CONFIG,
700 adev->gfx.config.gb_addr_config);
701 WREG32_SOC15(UVD, i, mmUVD_UDEC_DB_ADDR_CONFIG,
702 adev->gfx.config.gb_addr_config);
703 WREG32_SOC15(UVD, i, mmUVD_UDEC_DBW_ADDR_CONFIG,
704 adev->gfx.config.gb_addr_config);
706 WREG32_SOC15(UVD, i, mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
710 static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
711 struct amdgpu_mm_table *table)
713 uint32_t data = 0, loop;
714 uint64_t addr = table->gpu_addr;
715 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
719 size = header->header_size + header->vce_table_size + header->uvd_table_size;
721 /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
722 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
723 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
725 /* 2, update vmid of descriptor */
726 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID);
727 data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
728 data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
729 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID, data);
731 /* 3, notify mmsch about the size of this descriptor */
732 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE, size);
734 /* 4, set resp to zero */
735 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
737 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
738 if (adev->uvd.harvest_config & (1 << i))
740 WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
741 adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0;
742 adev->uvd.inst[i].ring_enc[0].wptr = 0;
743 adev->uvd.inst[i].ring_enc[0].wptr_old = 0;
745 /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
746 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
748 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
750 while ((data & 0x10000002) != 0x10000002) {
752 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
759 dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
766 static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
768 struct amdgpu_ring *ring;
769 uint32_t offset, size, tmp;
770 uint32_t table_size = 0;
771 struct mmsch_v1_0_cmd_direct_write direct_wt = { {0} };
772 struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
773 struct mmsch_v1_0_cmd_direct_polling direct_poll = { {0} };
774 struct mmsch_v1_0_cmd_end end = { {0} };
775 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
776 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
779 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
780 direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
781 direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
782 end.cmd_header.command_type = MMSCH_COMMAND__END;
784 if (header->uvd_table_offset == 0 && header->uvd_table_size == 0) {
785 header->version = MMSCH_VERSION;
786 header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
788 if (header->vce_table_offset == 0 && header->vce_table_size == 0)
789 header->uvd_table_offset = header->header_size;
791 header->uvd_table_offset = header->vce_table_size + header->vce_table_offset;
793 init_table += header->uvd_table_offset;
795 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
796 if (adev->uvd.harvest_config & (1 << i))
798 ring = &adev->uvd.inst[i].ring;
800 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
802 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
803 0xFFFFFFFF, 0x00000004);
805 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
806 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
807 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
808 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo);
809 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
810 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
811 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi);
812 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0);
815 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
816 lower_32_bits(adev->uvd.inst[i].gpu_addr));
817 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
818 upper_32_bits(adev->uvd.inst[i].gpu_addr));
820 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
821 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
825 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
827 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
828 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
829 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
830 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
831 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
832 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
834 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
835 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
836 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
837 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
838 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
839 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
840 AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
842 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
845 /* disable clock gating */
846 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_CGC_CTRL),
847 ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
849 /* disable interupt */
850 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
851 ~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
853 /* stall UMC and register bus before resetting VCPU */
854 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
855 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
856 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
858 /* put LMI, VCPU, RBC etc... into reset */
859 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
860 (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
861 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
862 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
863 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
864 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
865 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
866 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
867 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
869 /* initialize UVD memory controller */
870 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL),
871 (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
872 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
873 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
874 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
875 UVD_LMI_CTRL__REQ_MODE_MASK |
878 /* take all subblocks out of reset, except VCPU */
879 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
880 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
882 /* enable VCPU clock */
883 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
884 UVD_VCPU_CNTL__CLK_EN_MASK);
886 /* enable master interrupt */
887 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
888 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
889 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
891 /* clear the bit 4 of UVD_STATUS */
892 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
893 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
895 /* force RBC into idle state */
896 size = order_base_2(ring->ring_size);
897 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
898 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
899 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
901 ring = &adev->uvd.inst[i].ring_enc[0];
903 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), ring->gpu_addr);
904 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
905 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), ring->ring_size / 4);
907 /* boot up the VCPU */
908 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), 0);
911 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
912 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
914 MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0x02, 0x02);
917 memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
918 table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
919 header->uvd_table_size = table_size;
922 return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
926 * uvd_v7_0_start - start UVD block
928 * @adev: amdgpu_device pointer
930 * Setup and start the UVD block
932 static int uvd_v7_0_start(struct amdgpu_device *adev)
934 struct amdgpu_ring *ring;
935 uint32_t rb_bufsz, tmp;
936 uint32_t lmi_swap_cntl;
937 uint32_t mp_swap_cntl;
940 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
941 if (adev->uvd.harvest_config & (1 << k))
944 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
945 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
948 /* disable byte swapping */
952 uvd_v7_0_mc_resume(adev);
954 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
955 if (adev->uvd.harvest_config & (1 << k))
957 ring = &adev->uvd.inst[k].ring;
958 /* disable clock gating */
959 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
960 ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
962 /* disable interupt */
963 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), 0,
964 ~UVD_MASTINT_EN__VCPU_EN_MASK);
966 /* stall UMC and register bus before resetting VCPU */
967 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2),
968 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
969 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
972 /* put LMI, VCPU, RBC etc... into reset */
973 WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
974 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
975 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
976 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
977 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
978 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
979 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
980 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
981 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
984 /* initialize UVD memory controller */
985 WREG32_SOC15(UVD, k, mmUVD_LMI_CTRL,
986 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
987 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
988 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
989 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
990 UVD_LMI_CTRL__REQ_MODE_MASK |
994 /* swap (8 in 32) RB and IB */
998 WREG32_SOC15(UVD, k, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
999 WREG32_SOC15(UVD, k, mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
1001 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA0, 0x40c2040);
1002 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA1, 0x0);
1003 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB0, 0x40c2040);
1004 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB1, 0x0);
1005 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_ALU, 0);
1006 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUX, 0x88);
1008 /* take all subblocks out of reset, except VCPU */
1009 WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
1010 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1013 /* enable VCPU clock */
1014 WREG32_SOC15(UVD, k, mmUVD_VCPU_CNTL,
1015 UVD_VCPU_CNTL__CLK_EN_MASK);
1018 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), 0,
1019 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1021 /* boot up the VCPU */
1022 WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, 0);
1025 for (i = 0; i < 10; ++i) {
1028 for (j = 0; j < 100; ++j) {
1029 status = RREG32_SOC15(UVD, k, mmUVD_STATUS);
1038 DRM_ERROR("UVD(%d) not responding, trying to reset the VCPU!!!\n", k);
1039 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET),
1040 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1041 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1043 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), 0,
1044 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1050 DRM_ERROR("UVD(%d) not responding, giving up!!!\n", k);
1053 /* enable master interrupt */
1054 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN),
1055 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
1056 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
1058 /* clear the bit 4 of UVD_STATUS */
1059 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_STATUS), 0,
1060 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1062 /* force RBC into idle state */
1063 rb_bufsz = order_base_2(ring->ring_size);
1064 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1065 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1066 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1067 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
1068 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1069 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1070 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_CNTL, tmp);
1072 /* set the write pointer delay */
1073 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR_CNTL, 0);
1075 /* set the wb address */
1076 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR,
1077 (upper_32_bits(ring->gpu_addr) >> 2));
1079 /* programm the RB_BASE for ring buffer */
1080 WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1081 lower_32_bits(ring->gpu_addr));
1082 WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1083 upper_32_bits(ring->gpu_addr));
1085 /* Initialize the ring buffer's read and write pointers */
1086 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR, 0);
1088 ring->wptr = RREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR);
1089 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR,
1090 lower_32_bits(ring->wptr));
1092 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_RBC_RB_CNTL), 0,
1093 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1095 ring = &adev->uvd.inst[k].ring_enc[0];
1096 WREG32_SOC15(UVD, k, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1097 WREG32_SOC15(UVD, k, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1098 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO, ring->gpu_addr);
1099 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1100 WREG32_SOC15(UVD, k, mmUVD_RB_SIZE, ring->ring_size / 4);
1102 ring = &adev->uvd.inst[k].ring_enc[1];
1103 WREG32_SOC15(UVD, k, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1104 WREG32_SOC15(UVD, k, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1105 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1106 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1107 WREG32_SOC15(UVD, k, mmUVD_RB_SIZE2, ring->ring_size / 4);
1113 * uvd_v7_0_stop - stop UVD block
1115 * @adev: amdgpu_device pointer
1117 * stop the UVD block
1119 static void uvd_v7_0_stop(struct amdgpu_device *adev)
1123 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1124 if (adev->uvd.harvest_config & (1 << i))
1126 /* force RBC into idle state */
1127 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
1129 /* Stall UMC and register bus before resetting VCPU */
1130 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
1131 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
1132 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1135 /* put VCPU into reset */
1136 WREG32_SOC15(UVD, i, mmUVD_SOFT_RESET,
1137 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1140 /* disable VCPU clock */
1141 WREG32_SOC15(UVD, i, mmUVD_VCPU_CNTL, 0x0);
1143 /* Unstall UMC and register bus */
1144 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
1145 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1150 * uvd_v7_0_ring_emit_fence - emit an fence & trap command
1152 * @ring: amdgpu_ring pointer
1153 * @fence: fence to emit
1155 * Write a fence and a trap command to the ring.
1157 static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1160 struct amdgpu_device *adev = ring->adev;
1162 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1164 amdgpu_ring_write(ring,
1165 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1166 amdgpu_ring_write(ring, seq);
1167 amdgpu_ring_write(ring,
1168 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1169 amdgpu_ring_write(ring, addr & 0xffffffff);
1170 amdgpu_ring_write(ring,
1171 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1172 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1173 amdgpu_ring_write(ring,
1174 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1175 amdgpu_ring_write(ring, 0);
1177 amdgpu_ring_write(ring,
1178 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1179 amdgpu_ring_write(ring, 0);
1180 amdgpu_ring_write(ring,
1181 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1182 amdgpu_ring_write(ring, 0);
1183 amdgpu_ring_write(ring,
1184 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1185 amdgpu_ring_write(ring, 2);
1189 * uvd_v7_0_enc_ring_emit_fence - emit an enc fence & trap command
1191 * @ring: amdgpu_ring pointer
1192 * @fence: fence to emit
1194 * Write enc a fence and a trap command to the ring.
1196 static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1197 u64 seq, unsigned flags)
1200 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1202 amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
1203 amdgpu_ring_write(ring, addr);
1204 amdgpu_ring_write(ring, upper_32_bits(addr));
1205 amdgpu_ring_write(ring, seq);
1206 amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
1210 * uvd_v7_0_ring_emit_hdp_flush - skip HDP flushing
1212 * @ring: amdgpu_ring pointer
1214 static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
1216 /* The firmware doesn't seem to like touching registers at this point. */
1220 * uvd_v7_0_ring_test_ring - register write test
1222 * @ring: amdgpu_ring pointer
1224 * Test if we can successfully write to the context register
1226 static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
1228 struct amdgpu_device *adev = ring->adev;
1233 WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
1234 r = amdgpu_ring_alloc(ring, 3);
1238 amdgpu_ring_write(ring,
1239 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1240 amdgpu_ring_write(ring, 0xDEADBEEF);
1241 amdgpu_ring_commit(ring);
1242 for (i = 0; i < adev->usec_timeout; i++) {
1243 tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID);
1244 if (tmp == 0xDEADBEEF)
1249 if (i >= adev->usec_timeout)
1256 * uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission.
1258 * @p: the CS parser with the IBs
1259 * @ib_idx: which IB to patch
1262 static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1265 struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
1266 struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
1269 /* No patching necessary for the first instance */
1273 for (i = 0; i < ib->length_dw; i += 2) {
1274 uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
1276 reg -= p->adev->reg_offset[UVD_HWIP][0][1];
1277 reg += p->adev->reg_offset[UVD_HWIP][1][1];
1279 amdgpu_set_ib_value(p, ib_idx, i, reg);
1285 * uvd_v7_0_ring_emit_ib - execute indirect buffer
1287 * @ring: amdgpu_ring pointer
1288 * @ib: indirect buffer to execute
1290 * Write ring commands to execute the indirect buffer
1292 static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
1293 struct amdgpu_job *job,
1294 struct amdgpu_ib *ib,
1297 struct amdgpu_device *adev = ring->adev;
1298 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1300 amdgpu_ring_write(ring,
1301 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
1302 amdgpu_ring_write(ring, vmid);
1304 amdgpu_ring_write(ring,
1305 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1306 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1307 amdgpu_ring_write(ring,
1308 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1309 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1310 amdgpu_ring_write(ring,
1311 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_RBC_IB_SIZE), 0));
1312 amdgpu_ring_write(ring, ib->length_dw);
1316 * uvd_v7_0_enc_ring_emit_ib - enc execute indirect buffer
1318 * @ring: amdgpu_ring pointer
1319 * @ib: indirect buffer to execute
1321 * Write enc ring commands to execute the indirect buffer
1323 static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1324 struct amdgpu_job *job,
1325 struct amdgpu_ib *ib,
1328 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1330 amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1331 amdgpu_ring_write(ring, vmid);
1332 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1333 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1334 amdgpu_ring_write(ring, ib->length_dw);
1337 static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
1338 uint32_t reg, uint32_t val)
1340 struct amdgpu_device *adev = ring->adev;
1342 amdgpu_ring_write(ring,
1343 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1344 amdgpu_ring_write(ring, reg << 2);
1345 amdgpu_ring_write(ring,
1346 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1347 amdgpu_ring_write(ring, val);
1348 amdgpu_ring_write(ring,
1349 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1350 amdgpu_ring_write(ring, 8);
1353 static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1354 uint32_t val, uint32_t mask)
1356 struct amdgpu_device *adev = ring->adev;
1358 amdgpu_ring_write(ring,
1359 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1360 amdgpu_ring_write(ring, reg << 2);
1361 amdgpu_ring_write(ring,
1362 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1363 amdgpu_ring_write(ring, val);
1364 amdgpu_ring_write(ring,
1365 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GP_SCRATCH8), 0));
1366 amdgpu_ring_write(ring, mask);
1367 amdgpu_ring_write(ring,
1368 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1369 amdgpu_ring_write(ring, 12);
1372 static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1373 unsigned vmid, uint64_t pd_addr)
1375 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1376 uint32_t data0, data1, mask;
1378 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1380 /* wait for reg writes */
1381 data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
1382 data1 = lower_32_bits(pd_addr);
1384 uvd_v7_0_ring_emit_reg_wait(ring, data0, data1, mask);
1387 static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1389 struct amdgpu_device *adev = ring->adev;
1392 WARN_ON(ring->wptr % 2 || count % 2);
1394 for (i = 0; i < count / 2; i++) {
1395 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0));
1396 amdgpu_ring_write(ring, 0);
1400 static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1402 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1405 static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1406 uint32_t reg, uint32_t val,
1409 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1410 amdgpu_ring_write(ring, reg << 2);
1411 amdgpu_ring_write(ring, mask);
1412 amdgpu_ring_write(ring, val);
1415 static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1416 unsigned int vmid, uint64_t pd_addr)
1418 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1420 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1422 /* wait for reg writes */
1423 uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
1424 lower_32_bits(pd_addr), 0xffffffff);
1427 static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1428 uint32_t reg, uint32_t val)
1430 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1431 amdgpu_ring_write(ring, reg << 2);
1432 amdgpu_ring_write(ring, val);
1436 static bool uvd_v7_0_is_idle(void *handle)
1438 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1440 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1443 static int uvd_v7_0_wait_for_idle(void *handle)
1446 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1448 for (i = 0; i < adev->usec_timeout; i++) {
1449 if (uvd_v7_0_is_idle(handle))
1455 #define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
1456 static bool uvd_v7_0_check_soft_reset(void *handle)
1458 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1459 u32 srbm_soft_reset = 0;
1460 u32 tmp = RREG32(mmSRBM_STATUS);
1462 if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1463 REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1464 (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) &
1465 AMDGPU_UVD_STATUS_BUSY_MASK))
1466 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1467 SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1469 if (srbm_soft_reset) {
1470 adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset;
1473 adev->uvd.inst[ring->me].srbm_soft_reset = 0;
1478 static int uvd_v7_0_pre_soft_reset(void *handle)
1480 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1482 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1485 uvd_v7_0_stop(adev);
1489 static int uvd_v7_0_soft_reset(void *handle)
1491 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1492 u32 srbm_soft_reset;
1494 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1496 srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset;
1498 if (srbm_soft_reset) {
1501 tmp = RREG32(mmSRBM_SOFT_RESET);
1502 tmp |= srbm_soft_reset;
1503 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1504 WREG32(mmSRBM_SOFT_RESET, tmp);
1505 tmp = RREG32(mmSRBM_SOFT_RESET);
1509 tmp &= ~srbm_soft_reset;
1510 WREG32(mmSRBM_SOFT_RESET, tmp);
1511 tmp = RREG32(mmSRBM_SOFT_RESET);
1513 /* Wait a little for things to settle down */
1520 static int uvd_v7_0_post_soft_reset(void *handle)
1522 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1524 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1529 return uvd_v7_0_start(adev);
1533 static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev,
1534 struct amdgpu_irq_src *source,
1536 enum amdgpu_interrupt_state state)
1542 static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
1543 struct amdgpu_irq_src *source,
1544 struct amdgpu_iv_entry *entry)
1546 uint32_t ip_instance;
1548 switch (entry->client_id) {
1549 case SOC15_IH_CLIENTID_UVD:
1552 case SOC15_IH_CLIENTID_UVD1:
1556 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1560 DRM_DEBUG("IH: UVD TRAP\n");
1562 switch (entry->src_id) {
1564 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring);
1567 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[0]);
1570 if (!amdgpu_sriov_vf(adev))
1571 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[1]);
1574 DRM_ERROR("Unhandled interrupt: %d %d\n",
1575 entry->src_id, entry->src_data[0]);
1583 static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
1585 uint32_t data, data1, data2, suvd_flags;
1587 data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL);
1588 data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1589 data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL);
1591 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1592 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1594 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1595 UVD_SUVD_CGC_GATE__SIT_MASK |
1596 UVD_SUVD_CGC_GATE__SMP_MASK |
1597 UVD_SUVD_CGC_GATE__SCM_MASK |
1598 UVD_SUVD_CGC_GATE__SDB_MASK;
1600 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1601 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1602 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1604 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1605 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1606 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1607 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1608 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1609 UVD_CGC_CTRL__SYS_MODE_MASK |
1610 UVD_CGC_CTRL__UDEC_MODE_MASK |
1611 UVD_CGC_CTRL__MPEG2_MODE_MASK |
1612 UVD_CGC_CTRL__REGS_MODE_MASK |
1613 UVD_CGC_CTRL__RBC_MODE_MASK |
1614 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1615 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1616 UVD_CGC_CTRL__IDCT_MODE_MASK |
1617 UVD_CGC_CTRL__MPRD_MODE_MASK |
1618 UVD_CGC_CTRL__MPC_MODE_MASK |
1619 UVD_CGC_CTRL__LBSI_MODE_MASK |
1620 UVD_CGC_CTRL__LRBBM_MODE_MASK |
1621 UVD_CGC_CTRL__WCB_MODE_MASK |
1622 UVD_CGC_CTRL__VCPU_MODE_MASK |
1623 UVD_CGC_CTRL__JPEG_MODE_MASK |
1624 UVD_CGC_CTRL__JPEG2_MODE_MASK |
1625 UVD_CGC_CTRL__SCPU_MODE_MASK);
1626 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1627 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1628 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1629 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1630 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1631 data1 |= suvd_flags;
1633 WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data);
1634 WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0);
1635 WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1636 WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2);
1639 static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
1641 uint32_t data, data1, cgc_flags, suvd_flags;
1643 data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE);
1644 data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1646 cgc_flags = UVD_CGC_GATE__SYS_MASK |
1647 UVD_CGC_GATE__UDEC_MASK |
1648 UVD_CGC_GATE__MPEG2_MASK |
1649 UVD_CGC_GATE__RBC_MASK |
1650 UVD_CGC_GATE__LMI_MC_MASK |
1651 UVD_CGC_GATE__IDCT_MASK |
1652 UVD_CGC_GATE__MPRD_MASK |
1653 UVD_CGC_GATE__MPC_MASK |
1654 UVD_CGC_GATE__LBSI_MASK |
1655 UVD_CGC_GATE__LRBBM_MASK |
1656 UVD_CGC_GATE__UDEC_RE_MASK |
1657 UVD_CGC_GATE__UDEC_CM_MASK |
1658 UVD_CGC_GATE__UDEC_IT_MASK |
1659 UVD_CGC_GATE__UDEC_DB_MASK |
1660 UVD_CGC_GATE__UDEC_MP_MASK |
1661 UVD_CGC_GATE__WCB_MASK |
1662 UVD_CGC_GATE__VCPU_MASK |
1663 UVD_CGC_GATE__SCPU_MASK |
1664 UVD_CGC_GATE__JPEG_MASK |
1665 UVD_CGC_GATE__JPEG2_MASK;
1667 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1668 UVD_SUVD_CGC_GATE__SIT_MASK |
1669 UVD_SUVD_CGC_GATE__SMP_MASK |
1670 UVD_SUVD_CGC_GATE__SCM_MASK |
1671 UVD_SUVD_CGC_GATE__SDB_MASK;
1674 data1 |= suvd_flags;
1676 WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data);
1677 WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1680 static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
1682 u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
1685 tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1686 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1688 tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1689 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1691 WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
1695 static int uvd_v7_0_set_clockgating_state(void *handle,
1696 enum amd_clockgating_state state)
1698 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1699 bool enable = (state == AMD_CG_STATE_GATE);
1701 uvd_v7_0_set_bypass_mode(adev, enable);
1703 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
1707 /* disable HW gating and enable Sw gating */
1708 uvd_v7_0_set_sw_clock_gating(adev);
1710 /* wait for STATUS to clear */
1711 if (uvd_v7_0_wait_for_idle(handle))
1714 /* enable HW gates because UVD is idle */
1715 /* uvd_v7_0_set_hw_clock_gating(adev); */
1721 static int uvd_v7_0_set_powergating_state(void *handle,
1722 enum amd_powergating_state state)
1724 /* This doesn't actually powergate the UVD block.
1725 * That's done in the dpm code via the SMC. This
1726 * just re-inits the block as necessary. The actual
1727 * gating still happens in the dpm code. We should
1728 * revisit this when there is a cleaner line between
1729 * the smc and the hw blocks
1731 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1733 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
1736 WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1738 if (state == AMD_PG_STATE_GATE) {
1739 uvd_v7_0_stop(adev);
1742 return uvd_v7_0_start(adev);
1747 static int uvd_v7_0_set_clockgating_state(void *handle,
1748 enum amd_clockgating_state state)
1750 /* needed for driver unload*/
1754 const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
1756 .early_init = uvd_v7_0_early_init,
1758 .sw_init = uvd_v7_0_sw_init,
1759 .sw_fini = uvd_v7_0_sw_fini,
1760 .hw_init = uvd_v7_0_hw_init,
1761 .hw_fini = uvd_v7_0_hw_fini,
1762 .suspend = uvd_v7_0_suspend,
1763 .resume = uvd_v7_0_resume,
1764 .is_idle = NULL /* uvd_v7_0_is_idle */,
1765 .wait_for_idle = NULL /* uvd_v7_0_wait_for_idle */,
1766 .check_soft_reset = NULL /* uvd_v7_0_check_soft_reset */,
1767 .pre_soft_reset = NULL /* uvd_v7_0_pre_soft_reset */,
1768 .soft_reset = NULL /* uvd_v7_0_soft_reset */,
1769 .post_soft_reset = NULL /* uvd_v7_0_post_soft_reset */,
1770 .set_clockgating_state = uvd_v7_0_set_clockgating_state,
1771 .set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */,
1774 static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
1775 .type = AMDGPU_RING_TYPE_UVD,
1777 .support_64bit_ptrs = false,
1778 .no_user_fence = true,
1779 .vmhub = AMDGPU_MMHUB_0,
1780 .get_rptr = uvd_v7_0_ring_get_rptr,
1781 .get_wptr = uvd_v7_0_ring_get_wptr,
1782 .set_wptr = uvd_v7_0_ring_set_wptr,
1783 .patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place,
1785 6 + /* hdp invalidate */
1786 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1787 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1788 8 + /* uvd_v7_0_ring_emit_vm_flush */
1789 14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */
1790 .emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */
1791 .emit_ib = uvd_v7_0_ring_emit_ib,
1792 .emit_fence = uvd_v7_0_ring_emit_fence,
1793 .emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
1794 .emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
1795 .test_ring = uvd_v7_0_ring_test_ring,
1796 .test_ib = amdgpu_uvd_ring_test_ib,
1797 .insert_nop = uvd_v7_0_ring_insert_nop,
1798 .pad_ib = amdgpu_ring_generic_pad_ib,
1799 .begin_use = amdgpu_uvd_ring_begin_use,
1800 .end_use = amdgpu_uvd_ring_end_use,
1801 .emit_wreg = uvd_v7_0_ring_emit_wreg,
1802 .emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
1803 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1806 static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
1807 .type = AMDGPU_RING_TYPE_UVD_ENC,
1809 .nop = HEVC_ENC_CMD_NO_OP,
1810 .support_64bit_ptrs = false,
1811 .no_user_fence = true,
1812 .vmhub = AMDGPU_MMHUB_0,
1813 .get_rptr = uvd_v7_0_enc_ring_get_rptr,
1814 .get_wptr = uvd_v7_0_enc_ring_get_wptr,
1815 .set_wptr = uvd_v7_0_enc_ring_set_wptr,
1817 3 + 3 + /* hdp flush / invalidate */
1818 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1819 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1820 4 + /* uvd_v7_0_enc_ring_emit_vm_flush */
1821 5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */
1822 1, /* uvd_v7_0_enc_ring_insert_end */
1823 .emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */
1824 .emit_ib = uvd_v7_0_enc_ring_emit_ib,
1825 .emit_fence = uvd_v7_0_enc_ring_emit_fence,
1826 .emit_vm_flush = uvd_v7_0_enc_ring_emit_vm_flush,
1827 .test_ring = uvd_v7_0_enc_ring_test_ring,
1828 .test_ib = uvd_v7_0_enc_ring_test_ib,
1829 .insert_nop = amdgpu_ring_insert_nop,
1830 .insert_end = uvd_v7_0_enc_ring_insert_end,
1831 .pad_ib = amdgpu_ring_generic_pad_ib,
1832 .begin_use = amdgpu_uvd_ring_begin_use,
1833 .end_use = amdgpu_uvd_ring_end_use,
1834 .emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
1835 .emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
1836 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1839 static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
1843 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1844 if (adev->uvd.harvest_config & (1 << i))
1846 adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
1847 adev->uvd.inst[i].ring.me = i;
1848 DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
1852 static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1856 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
1857 if (adev->uvd.harvest_config & (1 << j))
1859 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
1860 adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
1861 adev->uvd.inst[j].ring_enc[i].me = j;
1864 DRM_INFO("UVD(%d) ENC is enabled in VM mode\n", j);
1868 static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
1869 .set = uvd_v7_0_set_interrupt_state,
1870 .process = uvd_v7_0_process_interrupt,
1873 static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1877 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1878 if (adev->uvd.harvest_config & (1 << i))
1880 adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
1881 adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
1885 const struct amdgpu_ip_block_version uvd_v7_0_ip_block =
1887 .type = AMD_IP_BLOCK_TYPE_UVD,
1891 .funcs = &uvd_v7_0_ip_funcs,