2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
29 #include "amdgpu_atombios.h"
30 #include "amdgpu_ih.h"
31 #include "amdgpu_uvd.h"
32 #include "amdgpu_vce.h"
33 #include "amdgpu_ucode.h"
34 #include "amdgpu_psp.h"
35 #include "amdgpu_smu.h"
39 #include "gc/gc_10_1_0_offset.h"
40 #include "gc/gc_10_1_0_sh_mask.h"
41 #include "hdp/hdp_5_0_0_offset.h"
42 #include "hdp/hdp_5_0_0_sh_mask.h"
43 #include "smuio/smuio_11_0_0_offset.h"
46 #include "soc15_common.h"
47 #include "gmc_v10_0.h"
48 #include "gfxhub_v2_0.h"
49 #include "mmhub_v2_0.h"
50 #include "nbio_v2_3.h"
52 #include "navi10_ih.h"
53 #include "gfx_v10_0.h"
54 #include "sdma_v5_0.h"
56 #include "dce_virtual.h"
57 #include "mes_v10_1.h"
60 static const struct amd_ip_funcs nv_common_ip_funcs;
63 * Indirect registers accessor
65 static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
67 unsigned long flags, address, data;
69 address = adev->nbio.funcs->get_pcie_index_offset(adev);
70 data = adev->nbio.funcs->get_pcie_data_offset(adev);
72 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
74 (void)RREG32(address);
76 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
80 static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
82 unsigned long flags, address, data;
84 address = adev->nbio.funcs->get_pcie_index_offset(adev);
85 data = adev->nbio.funcs->get_pcie_data_offset(adev);
87 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
89 (void)RREG32(address);
92 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
95 static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
97 unsigned long flags, address, data;
100 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
101 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
103 spin_lock_irqsave(&adev->didt_idx_lock, flags);
104 WREG32(address, (reg));
106 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
110 static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
112 unsigned long flags, address, data;
114 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
115 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
117 spin_lock_irqsave(&adev->didt_idx_lock, flags);
118 WREG32(address, (reg));
120 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
123 static u32 nv_get_config_memsize(struct amdgpu_device *adev)
125 return adev->nbio.funcs->get_memsize(adev);
128 static u32 nv_get_xclk(struct amdgpu_device *adev)
130 return adev->clock.spll.reference_freq;
134 void nv_grbm_select(struct amdgpu_device *adev,
135 u32 me, u32 pipe, u32 queue, u32 vmid)
137 u32 grbm_gfx_cntl = 0;
138 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
139 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
140 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
141 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
143 WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
146 static void nv_vga_set_state(struct amdgpu_device *adev, bool state)
151 static bool nv_read_disabled_bios(struct amdgpu_device *adev)
157 static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
158 u8 *bios, u32 length_bytes)
165 if (length_bytes == 0)
167 /* APU vbios image is part of sbios image */
168 if (adev->flags & AMD_IS_APU)
171 dw_ptr = (u32 *)bios;
172 length_dw = ALIGN(length_bytes, 4) / 4;
174 /* set rom index to 0 */
175 WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
176 /* read out the rom data */
177 for (i = 0; i < length_dw; i++)
178 dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
183 static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
184 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
185 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
186 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
187 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
188 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
189 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
190 #if 0 /* TODO: will set it when SDMA header is available */
191 { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
192 { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
194 { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
195 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
196 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
197 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
198 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
199 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
200 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
201 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
202 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
203 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
204 { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
207 static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
208 u32 sh_num, u32 reg_offset)
212 mutex_lock(&adev->grbm_idx_mutex);
213 if (se_num != 0xffffffff || sh_num != 0xffffffff)
214 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
216 val = RREG32(reg_offset);
218 if (se_num != 0xffffffff || sh_num != 0xffffffff)
219 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
220 mutex_unlock(&adev->grbm_idx_mutex);
224 static uint32_t nv_get_register_value(struct amdgpu_device *adev,
225 bool indexed, u32 se_num,
226 u32 sh_num, u32 reg_offset)
229 return nv_read_indexed_register(adev, se_num, sh_num, reg_offset);
231 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
232 return adev->gfx.config.gb_addr_config;
233 return RREG32(reg_offset);
237 static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
238 u32 sh_num, u32 reg_offset, u32 *value)
241 struct soc15_allowed_register_entry *en;
244 for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
245 en = &nv_allowed_read_registers[i];
247 (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset))
250 *value = nv_get_register_value(adev,
251 nv_allowed_read_registers[i].grbm_indexed,
252 se_num, sh_num, reg_offset);
259 static void nv_gpu_pci_config_reset(struct amdgpu_device *adev)
263 dev_info(adev->dev, "GPU pci config reset\n");
266 pci_clear_master(adev->pdev);
268 amdgpu_pci_config_reset(adev);
272 /* wait for asic to come out of reset */
273 for (i = 0; i < adev->usec_timeout; i++) {
274 u32 memsize = nbio_v2_3_get_memsize(adev);
275 if (memsize != 0xffffffff)
283 static int nv_asic_mode1_reset(struct amdgpu_device *adev)
288 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
290 dev_info(adev->dev, "GPU mode1 reset\n");
293 pci_clear_master(adev->pdev);
295 pci_save_state(adev->pdev);
297 ret = psp_gpu_reset(adev);
299 dev_err(adev->dev, "GPU mode1 reset failed\n");
301 pci_restore_state(adev->pdev);
303 /* wait for asic to come out of reset */
304 for (i = 0; i < adev->usec_timeout; i++) {
305 u32 memsize = adev->nbio.funcs->get_memsize(adev);
307 if (memsize != 0xffffffff)
312 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
317 static enum amd_reset_method
318 nv_asic_reset_method(struct amdgpu_device *adev)
320 struct smu_context *smu = &adev->smu;
322 if (!amdgpu_sriov_vf(adev) && smu_baco_is_support(smu))
323 return AMD_RESET_METHOD_BACO;
325 return AMD_RESET_METHOD_MODE1;
328 static int nv_asic_reset(struct amdgpu_device *adev)
331 /* FIXME: it doesn't work since vega10 */
333 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
335 nv_gpu_pci_config_reset(adev);
337 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
340 struct smu_context *smu = &adev->smu;
342 if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
343 if (!adev->in_suspend)
344 amdgpu_inc_vram_lost(adev);
345 ret = smu_baco_reset(smu);
347 if (!adev->in_suspend)
348 amdgpu_inc_vram_lost(adev);
349 ret = nv_asic_mode1_reset(adev);
355 static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
361 static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
367 static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
369 if (pci_is_root_bus(adev->pdev->bus))
372 if (amdgpu_pcie_gen2 == 0)
375 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
376 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
382 static void nv_program_aspm(struct amdgpu_device *adev)
385 if (amdgpu_aspm == 0)
391 static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
394 adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
395 adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
398 static const struct amdgpu_ip_block_version nv_common_ip_block =
400 .type = AMD_IP_BLOCK_TYPE_COMMON,
404 .funcs = &nv_common_ip_funcs,
407 static int nv_reg_base_init(struct amdgpu_device *adev)
411 if (amdgpu_discovery) {
412 r = amdgpu_discovery_reg_base_init(adev);
414 DRM_WARN("failed to init reg base from ip discovery table, "
415 "fallback to legacy init method\n");
423 switch (adev->asic_type) {
425 navi10_reg_base_init(adev);
428 navi14_reg_base_init(adev);
431 navi12_reg_base_init(adev);
440 int nv_set_ip_blocks(struct amdgpu_device *adev)
444 /* Set IP register base before any HW register access */
445 r = nv_reg_base_init(adev);
449 adev->nbio.funcs = &nbio_v2_3_funcs;
450 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
452 adev->nbio.funcs->detect_hw_virt(adev);
454 if (amdgpu_sriov_vf(adev))
455 adev->virt.ops = &xgpu_nv_virt_ops;
457 switch (adev->asic_type) {
460 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
461 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
462 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
463 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
464 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
465 is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
466 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
467 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
468 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
469 #if defined(CONFIG_DRM_AMD_DC)
470 else if (amdgpu_device_has_dc_support(adev))
471 amdgpu_device_ip_block_add(adev, &dm_ip_block);
473 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
474 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
475 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
476 is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
477 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
478 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
479 if (adev->enable_mes)
480 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
483 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
484 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
485 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
486 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
487 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
488 is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
489 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
490 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
491 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
492 #if defined(CONFIG_DRM_AMD_DC)
493 else if (amdgpu_device_has_dc_support(adev))
494 amdgpu_device_ip_block_add(adev, &dm_ip_block);
496 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
497 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
498 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
499 is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
500 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
501 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
510 static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
512 return adev->nbio.funcs->get_rev_id(adev);
515 static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
517 adev->nbio.funcs->hdp_flush(adev, ring);
520 static void nv_invalidate_hdp(struct amdgpu_device *adev,
521 struct amdgpu_ring *ring)
523 if (!ring || !ring->funcs->emit_wreg) {
524 WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
526 amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
527 HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
531 static bool nv_need_full_reset(struct amdgpu_device *adev)
536 static void nv_get_pcie_usage(struct amdgpu_device *adev,
543 static bool nv_need_reset_on_init(struct amdgpu_device *adev)
548 if (adev->flags & AMD_IS_APU)
551 /* Check sOS sign of life register to confirm sys driver and sOS
552 * are already been loaded.
554 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
558 /* TODO: re-enable it when mode1 reset is functional */
562 static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev)
566 * dummy implement for pcie_replay_count sysfs interface
572 static void nv_init_doorbell_index(struct amdgpu_device *adev)
574 adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
575 adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0;
576 adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1;
577 adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2;
578 adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3;
579 adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4;
580 adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5;
581 adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6;
582 adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7;
583 adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START;
584 adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END;
585 adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0;
586 adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1;
587 adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0;
588 adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1;
589 adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH;
590 adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1;
591 adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3;
592 adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5;
593 adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7;
594 adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP;
595 adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP;
597 adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1;
598 adev->doorbell_index.sdma_doorbell_range = 20;
601 static const struct amdgpu_asic_funcs nv_asic_funcs =
603 .read_disabled_bios = &nv_read_disabled_bios,
604 .read_bios_from_rom = &nv_read_bios_from_rom,
605 .read_register = &nv_read_register,
606 .reset = &nv_asic_reset,
607 .reset_method = &nv_asic_reset_method,
608 .set_vga_state = &nv_vga_set_state,
609 .get_xclk = &nv_get_xclk,
610 .set_uvd_clocks = &nv_set_uvd_clocks,
611 .set_vce_clocks = &nv_set_vce_clocks,
612 .get_config_memsize = &nv_get_config_memsize,
613 .flush_hdp = &nv_flush_hdp,
614 .invalidate_hdp = &nv_invalidate_hdp,
615 .init_doorbell_index = &nv_init_doorbell_index,
616 .need_full_reset = &nv_need_full_reset,
617 .get_pcie_usage = &nv_get_pcie_usage,
618 .need_reset_on_init = &nv_need_reset_on_init,
619 .get_pcie_replay_count = &nv_get_pcie_replay_count,
622 static int nv_common_early_init(void *handle)
624 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
625 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
627 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
628 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
629 adev->smc_rreg = NULL;
630 adev->smc_wreg = NULL;
631 adev->pcie_rreg = &nv_pcie_rreg;
632 adev->pcie_wreg = &nv_pcie_wreg;
634 /* TODO: will add them during VCN v2 implementation */
635 adev->uvd_ctx_rreg = NULL;
636 adev->uvd_ctx_wreg = NULL;
638 adev->didt_rreg = &nv_didt_rreg;
639 adev->didt_wreg = &nv_didt_wreg;
641 adev->asic_funcs = &nv_asic_funcs;
643 adev->rev_id = nv_get_rev_id(adev);
644 adev->external_rev_id = 0xff;
645 switch (adev->asic_type) {
647 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
648 AMD_CG_SUPPORT_GFX_CGCG |
649 AMD_CG_SUPPORT_IH_CG |
650 AMD_CG_SUPPORT_HDP_MGCG |
651 AMD_CG_SUPPORT_HDP_LS |
652 AMD_CG_SUPPORT_SDMA_MGCG |
653 AMD_CG_SUPPORT_SDMA_LS |
654 AMD_CG_SUPPORT_MC_MGCG |
655 AMD_CG_SUPPORT_MC_LS |
656 AMD_CG_SUPPORT_ATHUB_MGCG |
657 AMD_CG_SUPPORT_ATHUB_LS |
658 AMD_CG_SUPPORT_VCN_MGCG |
659 AMD_CG_SUPPORT_BIF_MGCG |
660 AMD_CG_SUPPORT_BIF_LS;
661 adev->pg_flags = AMD_PG_SUPPORT_VCN |
662 AMD_PG_SUPPORT_VCN_DPG |
663 AMD_PG_SUPPORT_ATHUB;
664 adev->external_rev_id = adev->rev_id + 0x1;
667 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
668 AMD_CG_SUPPORT_GFX_CGCG |
669 AMD_CG_SUPPORT_IH_CG |
670 AMD_CG_SUPPORT_HDP_MGCG |
671 AMD_CG_SUPPORT_HDP_LS |
672 AMD_CG_SUPPORT_SDMA_MGCG |
673 AMD_CG_SUPPORT_SDMA_LS |
674 AMD_CG_SUPPORT_MC_MGCG |
675 AMD_CG_SUPPORT_MC_LS |
676 AMD_CG_SUPPORT_ATHUB_MGCG |
677 AMD_CG_SUPPORT_ATHUB_LS |
678 AMD_CG_SUPPORT_VCN_MGCG |
679 AMD_CG_SUPPORT_BIF_MGCG |
680 AMD_CG_SUPPORT_BIF_LS;
681 adev->pg_flags = AMD_PG_SUPPORT_VCN |
682 AMD_PG_SUPPORT_VCN_DPG;
683 adev->external_rev_id = adev->rev_id + 20;
686 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
687 AMD_CG_SUPPORT_GFX_MGLS |
688 AMD_CG_SUPPORT_GFX_CGCG |
689 AMD_CG_SUPPORT_GFX_CP_LS |
690 AMD_CG_SUPPORT_GFX_RLC_LS |
691 AMD_CG_SUPPORT_IH_CG |
692 AMD_CG_SUPPORT_HDP_MGCG |
693 AMD_CG_SUPPORT_HDP_LS |
694 AMD_CG_SUPPORT_SDMA_MGCG |
695 AMD_CG_SUPPORT_SDMA_LS |
696 AMD_CG_SUPPORT_MC_MGCG |
697 AMD_CG_SUPPORT_MC_LS |
698 AMD_CG_SUPPORT_ATHUB_MGCG |
699 AMD_CG_SUPPORT_ATHUB_LS |
700 AMD_CG_SUPPORT_VCN_MGCG;
701 adev->pg_flags = AMD_PG_SUPPORT_VCN |
702 AMD_PG_SUPPORT_VCN_DPG |
703 AMD_PG_SUPPORT_ATHUB;
704 adev->external_rev_id = adev->rev_id + 0xa;
707 /* FIXME: not supported yet */
711 if (amdgpu_sriov_vf(adev)) {
712 amdgpu_virt_init_setting(adev);
713 xgpu_nv_mailbox_set_irq_funcs(adev);
719 static int nv_common_late_init(void *handle)
721 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
723 if (amdgpu_sriov_vf(adev))
724 xgpu_nv_mailbox_get_irq(adev);
729 static int nv_common_sw_init(void *handle)
731 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
733 if (amdgpu_sriov_vf(adev))
734 xgpu_nv_mailbox_add_irq_id(adev);
739 static int nv_common_sw_fini(void *handle)
744 static int nv_common_hw_init(void *handle)
746 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
748 /* enable pcie gen2/3 link */
749 nv_pcie_gen3_enable(adev);
751 nv_program_aspm(adev);
752 /* setup nbio registers */
753 adev->nbio.funcs->init_registers(adev);
754 /* remap HDP registers to a hole in mmio space,
755 * for the purpose of expose those registers
758 if (adev->nbio.funcs->remap_hdp_registers)
759 adev->nbio.funcs->remap_hdp_registers(adev);
760 /* enable the doorbell aperture */
761 nv_enable_doorbell_aperture(adev, true);
766 static int nv_common_hw_fini(void *handle)
768 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
770 /* disable the doorbell aperture */
771 nv_enable_doorbell_aperture(adev, false);
776 static int nv_common_suspend(void *handle)
778 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
780 return nv_common_hw_fini(adev);
783 static int nv_common_resume(void *handle)
785 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
787 return nv_common_hw_init(adev);
790 static bool nv_common_is_idle(void *handle)
795 static int nv_common_wait_for_idle(void *handle)
800 static int nv_common_soft_reset(void *handle)
805 static void nv_update_hdp_mem_power_gating(struct amdgpu_device *adev,
808 uint32_t hdp_clk_cntl, hdp_clk_cntl1;
809 uint32_t hdp_mem_pwr_cntl;
811 if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
812 AMD_CG_SUPPORT_HDP_DS |
813 AMD_CG_SUPPORT_HDP_SD)))
816 hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
817 hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
819 /* Before doing clock/power mode switch,
820 * forced on IPH & RC clock */
821 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
822 IPH_MEM_CLK_SOFT_OVERRIDE, 1);
823 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
824 RC_MEM_CLK_SOFT_OVERRIDE, 1);
825 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
827 /* HDP 5.0 doesn't support dynamic power mode switch,
828 * disable clock and power gating before any changing */
829 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
830 IPH_MEM_POWER_CTRL_EN, 0);
831 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
832 IPH_MEM_POWER_LS_EN, 0);
833 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
834 IPH_MEM_POWER_DS_EN, 0);
835 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
836 IPH_MEM_POWER_SD_EN, 0);
837 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
838 RC_MEM_POWER_CTRL_EN, 0);
839 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
840 RC_MEM_POWER_LS_EN, 0);
841 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
842 RC_MEM_POWER_DS_EN, 0);
843 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
844 RC_MEM_POWER_SD_EN, 0);
845 WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
847 /* only one clock gating mode (LS/DS/SD) can be enabled */
848 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
849 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
851 IPH_MEM_POWER_LS_EN, enable);
852 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
854 RC_MEM_POWER_LS_EN, enable);
855 } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
856 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
858 IPH_MEM_POWER_DS_EN, enable);
859 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
861 RC_MEM_POWER_DS_EN, enable);
862 } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
863 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
865 IPH_MEM_POWER_SD_EN, enable);
866 /* RC should not use shut down mode, fallback to ds */
867 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
869 RC_MEM_POWER_DS_EN, enable);
872 WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
874 /* restore IPH & RC clock override after clock/power mode changing */
875 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1);
878 static void nv_update_hdp_clock_gating(struct amdgpu_device *adev,
881 uint32_t hdp_clk_cntl;
883 if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
886 hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
891 (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
892 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
893 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
894 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
895 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
896 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
898 hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
899 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
900 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
901 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
902 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
903 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
906 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
909 static int nv_common_set_clockgating_state(void *handle,
910 enum amd_clockgating_state state)
912 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
914 if (amdgpu_sriov_vf(adev))
917 switch (adev->asic_type) {
921 adev->nbio.funcs->update_medium_grain_clock_gating(adev,
922 state == AMD_CG_STATE_GATE ? true : false);
923 adev->nbio.funcs->update_medium_grain_light_sleep(adev,
924 state == AMD_CG_STATE_GATE ? true : false);
925 nv_update_hdp_mem_power_gating(adev,
926 state == AMD_CG_STATE_GATE ? true : false);
927 nv_update_hdp_clock_gating(adev,
928 state == AMD_CG_STATE_GATE ? true : false);
936 static int nv_common_set_powergating_state(void *handle,
937 enum amd_powergating_state state)
943 static void nv_common_get_clockgating_state(void *handle, u32 *flags)
945 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
948 if (amdgpu_sriov_vf(adev))
951 adev->nbio.funcs->get_clockgating_state(adev, flags);
953 /* AMD_CG_SUPPORT_HDP_MGCG */
954 tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
955 if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
956 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
957 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
958 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
959 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
960 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
961 *flags |= AMD_CG_SUPPORT_HDP_MGCG;
963 /* AMD_CG_SUPPORT_HDP_LS/DS/SD */
964 tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
965 if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK)
966 *flags |= AMD_CG_SUPPORT_HDP_LS;
967 else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK)
968 *flags |= AMD_CG_SUPPORT_HDP_DS;
969 else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK)
970 *flags |= AMD_CG_SUPPORT_HDP_SD;
975 static const struct amd_ip_funcs nv_common_ip_funcs = {
977 .early_init = nv_common_early_init,
978 .late_init = nv_common_late_init,
979 .sw_init = nv_common_sw_init,
980 .sw_fini = nv_common_sw_fini,
981 .hw_init = nv_common_hw_init,
982 .hw_fini = nv_common_hw_fini,
983 .suspend = nv_common_suspend,
984 .resume = nv_common_resume,
985 .is_idle = nv_common_is_idle,
986 .wait_for_idle = nv_common_wait_for_idle,
987 .soft_reset = nv_common_soft_reset,
988 .set_clockgating_state = nv_common_set_clockgating_state,
989 .set_powergating_state = nv_common_set_powergating_state,
990 .get_clockgating_state = nv_common_get_clockgating_state,