2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
29 #include "amdgpu_atombios.h"
30 #include "amdgpu_ih.h"
31 #include "amdgpu_uvd.h"
32 #include "amdgpu_vce.h"
33 #include "amdgpu_ucode.h"
34 #include "amdgpu_psp.h"
38 #include "uvd/uvd_7_0_offset.h"
39 #include "gc/gc_9_0_offset.h"
40 #include "gc/gc_9_0_sh_mask.h"
41 #include "sdma0/sdma0_4_0_offset.h"
42 #include "sdma1/sdma1_4_0_offset.h"
43 #include "hdp/hdp_4_0_offset.h"
44 #include "hdp/hdp_4_0_sh_mask.h"
45 #include "smuio/smuio_9_0_offset.h"
46 #include "smuio/smuio_9_0_sh_mask.h"
47 #include "nbio/nbio_7_0_default.h"
48 #include "nbio/nbio_7_0_offset.h"
49 #include "nbio/nbio_7_0_sh_mask.h"
50 #include "nbio/nbio_7_0_smn.h"
51 #include "mp/mp_9_0_offset.h"
54 #include "soc15_common.h"
57 #include "gfxhub_v1_0.h"
58 #include "mmhub_v1_0.h"
61 #include "vega10_ih.h"
62 #include "sdma_v4_0.h"
67 #include "dce_virtual.h"
69 #include "amdgpu_smu.h"
70 #include "amdgpu_ras.h"
71 #include "amdgpu_xgmi.h"
72 #include <uapi/linux/kfd_ioctl.h>
74 #define mmMP0_MISC_CGTT_CTRL0 0x01b9
75 #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0
76 #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
77 #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0
79 /* for Vega20 register name change */
80 #define mmHDP_MEM_POWER_CTRL 0x00d4
81 #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L
82 #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L
83 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
84 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
85 #define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
87 * Indirect registers accessor
89 static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
91 unsigned long flags, address, data;
93 address = adev->nbio_funcs->get_pcie_index_offset(adev);
94 data = adev->nbio_funcs->get_pcie_data_offset(adev);
96 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
98 (void)RREG32(address);
100 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
104 static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
106 unsigned long flags, address, data;
108 address = adev->nbio_funcs->get_pcie_index_offset(adev);
109 data = adev->nbio_funcs->get_pcie_data_offset(adev);
111 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
112 WREG32(address, reg);
113 (void)RREG32(address);
116 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
119 static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
121 unsigned long flags, address, data;
124 address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
125 data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
127 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
128 WREG32(address, ((reg) & 0x1ff));
130 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
134 static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
136 unsigned long flags, address, data;
138 address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
139 data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
141 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
142 WREG32(address, ((reg) & 0x1ff));
144 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
147 static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg)
149 unsigned long flags, address, data;
152 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
153 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
155 spin_lock_irqsave(&adev->didt_idx_lock, flags);
156 WREG32(address, (reg));
158 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
162 static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
164 unsigned long flags, address, data;
166 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
167 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
169 spin_lock_irqsave(&adev->didt_idx_lock, flags);
170 WREG32(address, (reg));
172 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
175 static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
180 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
181 WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
182 r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA);
183 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
187 static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
191 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
192 WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
193 WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v));
194 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
197 static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg)
202 spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
203 WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
204 r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA);
205 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
209 static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
213 spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
214 WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
215 WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v));
216 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
219 static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
221 return adev->nbio_funcs->get_memsize(adev);
224 static u32 soc15_get_xclk(struct amdgpu_device *adev)
226 return adev->clock.spll.reference_freq;
230 void soc15_grbm_select(struct amdgpu_device *adev,
231 u32 me, u32 pipe, u32 queue, u32 vmid)
233 u32 grbm_gfx_cntl = 0;
234 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
235 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
236 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
237 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
239 WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
242 static void soc15_vga_set_state(struct amdgpu_device *adev, bool state)
247 static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
253 static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
254 u8 *bios, u32 length_bytes)
261 if (length_bytes == 0)
263 /* APU vbios image is part of sbios image */
264 if (adev->flags & AMD_IS_APU)
267 dw_ptr = (u32 *)bios;
268 length_dw = ALIGN(length_bytes, 4) / 4;
270 /* set rom index to 0 */
271 WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
272 /* read out the rom data */
273 for (i = 0; i < length_dw; i++)
274 dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
279 static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
280 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
281 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
282 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
283 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
284 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
285 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
286 { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
287 { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
288 { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
289 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
290 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
291 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
292 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
293 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
294 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
295 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
296 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
297 { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
298 { SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)},
301 static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
302 u32 sh_num, u32 reg_offset)
306 mutex_lock(&adev->grbm_idx_mutex);
307 if (se_num != 0xffffffff || sh_num != 0xffffffff)
308 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
310 val = RREG32(reg_offset);
312 if (se_num != 0xffffffff || sh_num != 0xffffffff)
313 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
314 mutex_unlock(&adev->grbm_idx_mutex);
318 static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
319 bool indexed, u32 se_num,
320 u32 sh_num, u32 reg_offset)
323 return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
325 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
326 return adev->gfx.config.gb_addr_config;
327 else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2))
328 return adev->gfx.config.db_debug2;
329 return RREG32(reg_offset);
333 static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
334 u32 sh_num, u32 reg_offset, u32 *value)
337 struct soc15_allowed_register_entry *en;
340 for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
341 en = &soc15_allowed_read_registers[i];
342 if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
346 *value = soc15_get_register_value(adev,
347 soc15_allowed_read_registers[i].grbm_indexed,
348 se_num, sh_num, reg_offset);
356 * soc15_program_register_sequence - program an array of registers.
358 * @adev: amdgpu_device pointer
359 * @regs: pointer to the register array
360 * @array_size: size of the register array
362 * Programs an array or registers with and and or masks.
363 * This is a helper for setting golden registers.
366 void soc15_program_register_sequence(struct amdgpu_device *adev,
367 const struct soc15_reg_golden *regs,
368 const u32 array_size)
370 const struct soc15_reg_golden *entry;
374 for (i = 0; i < array_size; ++i) {
376 reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
378 if (entry->and_mask == 0xffffffff) {
379 tmp = entry->or_mask;
382 tmp &= ~(entry->and_mask);
383 tmp |= (entry->or_mask & entry->and_mask);
386 if (reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3) ||
387 reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE) ||
388 reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1) ||
389 reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG))
390 WREG32_RLC(reg, tmp);
398 static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
403 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
405 dev_info(adev->dev, "GPU mode1 reset\n");
408 pci_clear_master(adev->pdev);
410 pci_save_state(adev->pdev);
412 ret = psp_gpu_reset(adev);
414 dev_err(adev->dev, "GPU mode1 reset failed\n");
416 pci_restore_state(adev->pdev);
418 /* wait for asic to come out of reset */
419 for (i = 0; i < adev->usec_timeout; i++) {
420 u32 memsize = adev->nbio_funcs->get_memsize(adev);
422 if (memsize != 0xffffffff)
427 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
432 static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
434 void *pp_handle = adev->powerplay.pp_handle;
435 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
437 if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
442 return pp_funcs->get_asic_baco_capability(pp_handle, cap);
445 static int soc15_asic_baco_reset(struct amdgpu_device *adev)
447 void *pp_handle = adev->powerplay.pp_handle;
448 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
450 if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
453 /* enter BACO state */
454 if (pp_funcs->set_asic_baco_state(pp_handle, 1))
457 /* exit BACO state */
458 if (pp_funcs->set_asic_baco_state(pp_handle, 0))
461 dev_info(adev->dev, "GPU BACO reset\n");
463 adev->in_baco_reset = 1;
468 static enum amd_reset_method
469 soc15_asic_reset_method(struct amdgpu_device *adev)
473 switch (adev->asic_type) {
475 return AMD_RESET_METHOD_MODE2;
478 soc15_asic_get_baco_capability(adev, &baco_reset);
481 if (adev->psp.sos_fw_version >= 0x80067)
482 soc15_asic_get_baco_capability(adev, &baco_reset);
486 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
487 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
489 if (hive || (ras && ras->supported))
499 return AMD_RESET_METHOD_BACO;
501 return AMD_RESET_METHOD_MODE1;
504 static int soc15_asic_reset(struct amdgpu_device *adev)
508 if (soc15_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)
509 ret = soc15_asic_baco_reset(adev);
511 ret = soc15_asic_mode1_reset(adev);
516 /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
517 u32 cntl_reg, u32 status_reg)
522 static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
526 r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
530 r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
535 static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
542 static void soc15_pcie_gen3_enable(struct amdgpu_device *adev)
544 if (pci_is_root_bus(adev->pdev->bus))
547 if (amdgpu_pcie_gen2 == 0)
550 if (adev->flags & AMD_IS_APU)
553 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
554 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
560 static void soc15_program_aspm(struct amdgpu_device *adev)
563 if (amdgpu_aspm == 0)
569 static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
572 adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
573 adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
576 static const struct amdgpu_ip_block_version vega10_common_ip_block =
578 .type = AMD_IP_BLOCK_TYPE_COMMON,
582 .funcs = &soc15_common_ip_funcs,
585 static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
587 return adev->nbio_funcs->get_rev_id(adev);
590 int soc15_set_ip_blocks(struct amdgpu_device *adev)
592 /* Set IP register base before any HW register access */
593 switch (adev->asic_type) {
597 vega10_reg_base_init(adev);
600 vega20_reg_base_init(adev);
603 arct_reg_base_init(adev);
609 if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
610 adev->gmc.xgmi.supported = true;
612 if (adev->flags & AMD_IS_APU)
613 adev->nbio_funcs = &nbio_v7_0_funcs;
614 else if (adev->asic_type == CHIP_VEGA20 ||
615 adev->asic_type == CHIP_ARCTURUS)
616 adev->nbio_funcs = &nbio_v7_4_funcs;
618 adev->nbio_funcs = &nbio_v6_1_funcs;
620 if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
621 adev->df_funcs = &df_v3_6_funcs;
623 adev->df_funcs = &df_v1_7_funcs;
625 adev->rev_id = soc15_get_rev_id(adev);
626 adev->nbio_funcs->detect_hw_virt(adev);
628 if (amdgpu_sriov_vf(adev))
629 adev->virt.ops = &xgpu_ai_virt_ops;
631 switch (adev->asic_type) {
635 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
636 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
638 /* For Vega10 SR-IOV, PSP need to be initialized before IH */
639 if (amdgpu_sriov_vf(adev)) {
640 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
641 if (adev->asic_type == CHIP_VEGA20)
642 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
644 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
646 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
648 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
649 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
650 if (adev->asic_type == CHIP_VEGA20)
651 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
653 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
656 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
657 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
658 if (!amdgpu_sriov_vf(adev)) {
659 if (is_support_sw_smu(adev))
660 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
662 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
664 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
665 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
666 #if defined(CONFIG_DRM_AMD_DC)
667 else if (amdgpu_device_has_dc_support(adev))
668 amdgpu_device_ip_block_add(adev, &dm_ip_block);
670 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) {
671 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
672 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
676 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
677 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
678 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
679 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
680 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
681 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
682 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
683 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
684 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
685 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
686 #if defined(CONFIG_DRM_AMD_DC)
687 else if (amdgpu_device_has_dc_support(adev))
688 amdgpu_device_ip_block_add(adev, &dm_ip_block);
690 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
693 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
694 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
695 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
696 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
697 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
698 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
699 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
700 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
709 static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
711 adev->nbio_funcs->hdp_flush(adev, ring);
714 static void soc15_invalidate_hdp(struct amdgpu_device *adev,
715 struct amdgpu_ring *ring)
717 if (!ring || !ring->funcs->emit_wreg)
718 WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
720 amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
721 HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
724 static bool soc15_need_full_reset(struct amdgpu_device *adev)
726 /* change this when we implement soft reset */
729 static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
732 uint32_t perfctr = 0;
733 uint64_t cnt0_of, cnt1_of;
736 /* This reports 0 on APUs, so return to avoid writing/reading registers
737 * that may or may not be different from their GPU counterparts
739 if (adev->flags & AMD_IS_APU)
742 /* Set the 2 events that we wish to watch, defined above */
743 /* Reg 40 is # received msgs */
744 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
745 /* Pre-VG20, Reg 104 is # of posted requests sent. On VG20 it's 108 */
746 if (adev->asic_type == CHIP_VEGA20)
747 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK,
750 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK,
753 /* Write to enable desired perf counters */
754 WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr);
755 /* Zero out and enable the perf counters
757 * Bit 0 = Start all counters(1)
758 * Bit 2 = Global counter reset enable(1)
760 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
764 /* Load the shadow and disable the perf counters
766 * Bit 0 = Stop counters(0)
767 * Bit 1 = Load the shadow counters(1)
769 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
771 /* Read register values to get any >32bit overflow */
772 tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK);
773 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
774 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
776 /* Get the values and add the overflow */
777 *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
778 *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
781 static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
785 /* Just return false for soc15 GPUs. Reset does not seem to
788 if (!amdgpu_passthrough(adev))
791 if (adev->flags & AMD_IS_APU)
794 /* Check sOS sign of life register to confirm sys driver and sOS
795 * are already been loaded.
797 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
804 static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev)
806 uint64_t nak_r, nak_g;
808 /* Get the number of NAKs received and generated */
809 nak_r = RREG32_PCIE(smnPCIE_RX_NUM_NAK);
810 nak_g = RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED);
812 /* Add the total number of NAKs, i.e the number of replays */
813 return (nak_r + nak_g);
816 static const struct amdgpu_asic_funcs soc15_asic_funcs =
818 .read_disabled_bios = &soc15_read_disabled_bios,
819 .read_bios_from_rom = &soc15_read_bios_from_rom,
820 .read_register = &soc15_read_register,
821 .reset = &soc15_asic_reset,
822 .reset_method = &soc15_asic_reset_method,
823 .set_vga_state = &soc15_vga_set_state,
824 .get_xclk = &soc15_get_xclk,
825 .set_uvd_clocks = &soc15_set_uvd_clocks,
826 .set_vce_clocks = &soc15_set_vce_clocks,
827 .get_config_memsize = &soc15_get_config_memsize,
828 .flush_hdp = &soc15_flush_hdp,
829 .invalidate_hdp = &soc15_invalidate_hdp,
830 .need_full_reset = &soc15_need_full_reset,
831 .init_doorbell_index = &vega10_doorbell_index_init,
832 .get_pcie_usage = &soc15_get_pcie_usage,
833 .need_reset_on_init = &soc15_need_reset_on_init,
834 .get_pcie_replay_count = &soc15_get_pcie_replay_count,
837 static const struct amdgpu_asic_funcs vega20_asic_funcs =
839 .read_disabled_bios = &soc15_read_disabled_bios,
840 .read_bios_from_rom = &soc15_read_bios_from_rom,
841 .read_register = &soc15_read_register,
842 .reset = &soc15_asic_reset,
843 .set_vga_state = &soc15_vga_set_state,
844 .get_xclk = &soc15_get_xclk,
845 .set_uvd_clocks = &soc15_set_uvd_clocks,
846 .set_vce_clocks = &soc15_set_vce_clocks,
847 .get_config_memsize = &soc15_get_config_memsize,
848 .flush_hdp = &soc15_flush_hdp,
849 .invalidate_hdp = &soc15_invalidate_hdp,
850 .need_full_reset = &soc15_need_full_reset,
851 .init_doorbell_index = &vega20_doorbell_index_init,
852 .get_pcie_usage = &soc15_get_pcie_usage,
853 .need_reset_on_init = &soc15_need_reset_on_init,
854 .get_pcie_replay_count = &soc15_get_pcie_replay_count,
857 static int soc15_common_early_init(void *handle)
859 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
860 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
862 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
863 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
864 adev->smc_rreg = NULL;
865 adev->smc_wreg = NULL;
866 adev->pcie_rreg = &soc15_pcie_rreg;
867 adev->pcie_wreg = &soc15_pcie_wreg;
868 adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
869 adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
870 adev->didt_rreg = &soc15_didt_rreg;
871 adev->didt_wreg = &soc15_didt_wreg;
872 adev->gc_cac_rreg = &soc15_gc_cac_rreg;
873 adev->gc_cac_wreg = &soc15_gc_cac_wreg;
874 adev->se_cac_rreg = &soc15_se_cac_rreg;
875 adev->se_cac_wreg = &soc15_se_cac_wreg;
878 adev->external_rev_id = 0xFF;
879 switch (adev->asic_type) {
881 adev->asic_funcs = &soc15_asic_funcs;
882 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
883 AMD_CG_SUPPORT_GFX_MGLS |
884 AMD_CG_SUPPORT_GFX_RLC_LS |
885 AMD_CG_SUPPORT_GFX_CP_LS |
886 AMD_CG_SUPPORT_GFX_3D_CGCG |
887 AMD_CG_SUPPORT_GFX_3D_CGLS |
888 AMD_CG_SUPPORT_GFX_CGCG |
889 AMD_CG_SUPPORT_GFX_CGLS |
890 AMD_CG_SUPPORT_BIF_MGCG |
891 AMD_CG_SUPPORT_BIF_LS |
892 AMD_CG_SUPPORT_HDP_LS |
893 AMD_CG_SUPPORT_DRM_MGCG |
894 AMD_CG_SUPPORT_DRM_LS |
895 AMD_CG_SUPPORT_ROM_MGCG |
896 AMD_CG_SUPPORT_DF_MGCG |
897 AMD_CG_SUPPORT_SDMA_MGCG |
898 AMD_CG_SUPPORT_SDMA_LS |
899 AMD_CG_SUPPORT_MC_MGCG |
900 AMD_CG_SUPPORT_MC_LS;
902 adev->external_rev_id = 0x1;
905 adev->asic_funcs = &soc15_asic_funcs;
906 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
907 AMD_CG_SUPPORT_GFX_MGLS |
908 AMD_CG_SUPPORT_GFX_CGCG |
909 AMD_CG_SUPPORT_GFX_CGLS |
910 AMD_CG_SUPPORT_GFX_3D_CGCG |
911 AMD_CG_SUPPORT_GFX_3D_CGLS |
912 AMD_CG_SUPPORT_GFX_CP_LS |
913 AMD_CG_SUPPORT_MC_LS |
914 AMD_CG_SUPPORT_MC_MGCG |
915 AMD_CG_SUPPORT_SDMA_MGCG |
916 AMD_CG_SUPPORT_SDMA_LS |
917 AMD_CG_SUPPORT_BIF_MGCG |
918 AMD_CG_SUPPORT_BIF_LS |
919 AMD_CG_SUPPORT_HDP_MGCG |
920 AMD_CG_SUPPORT_HDP_LS |
921 AMD_CG_SUPPORT_ROM_MGCG |
922 AMD_CG_SUPPORT_VCE_MGCG |
923 AMD_CG_SUPPORT_UVD_MGCG;
925 adev->external_rev_id = adev->rev_id + 0x14;
928 adev->asic_funcs = &vega20_asic_funcs;
929 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
930 AMD_CG_SUPPORT_GFX_MGLS |
931 AMD_CG_SUPPORT_GFX_CGCG |
932 AMD_CG_SUPPORT_GFX_CGLS |
933 AMD_CG_SUPPORT_GFX_3D_CGCG |
934 AMD_CG_SUPPORT_GFX_3D_CGLS |
935 AMD_CG_SUPPORT_GFX_CP_LS |
936 AMD_CG_SUPPORT_MC_LS |
937 AMD_CG_SUPPORT_MC_MGCG |
938 AMD_CG_SUPPORT_SDMA_MGCG |
939 AMD_CG_SUPPORT_SDMA_LS |
940 AMD_CG_SUPPORT_BIF_MGCG |
941 AMD_CG_SUPPORT_BIF_LS |
942 AMD_CG_SUPPORT_HDP_MGCG |
943 AMD_CG_SUPPORT_HDP_LS |
944 AMD_CG_SUPPORT_ROM_MGCG |
945 AMD_CG_SUPPORT_VCE_MGCG |
946 AMD_CG_SUPPORT_UVD_MGCG;
948 adev->external_rev_id = adev->rev_id + 0x28;
951 adev->asic_funcs = &soc15_asic_funcs;
952 if (adev->rev_id >= 0x8)
953 adev->external_rev_id = adev->rev_id + 0x79;
954 else if (adev->pdev->device == 0x15d8)
955 adev->external_rev_id = adev->rev_id + 0x41;
956 else if (adev->rev_id == 1)
957 adev->external_rev_id = adev->rev_id + 0x20;
959 adev->external_rev_id = adev->rev_id + 0x01;
961 if (adev->rev_id >= 0x8) {
962 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
963 AMD_CG_SUPPORT_GFX_MGLS |
964 AMD_CG_SUPPORT_GFX_CP_LS |
965 AMD_CG_SUPPORT_GFX_3D_CGCG |
966 AMD_CG_SUPPORT_GFX_3D_CGLS |
967 AMD_CG_SUPPORT_GFX_CGCG |
968 AMD_CG_SUPPORT_GFX_CGLS |
969 AMD_CG_SUPPORT_BIF_LS |
970 AMD_CG_SUPPORT_HDP_LS |
971 AMD_CG_SUPPORT_ROM_MGCG |
972 AMD_CG_SUPPORT_MC_MGCG |
973 AMD_CG_SUPPORT_MC_LS |
974 AMD_CG_SUPPORT_SDMA_MGCG |
975 AMD_CG_SUPPORT_SDMA_LS |
976 AMD_CG_SUPPORT_VCN_MGCG;
978 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
979 } else if (adev->pdev->device == 0x15d8) {
980 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
981 AMD_CG_SUPPORT_GFX_MGLS |
982 AMD_CG_SUPPORT_GFX_CP_LS |
983 AMD_CG_SUPPORT_GFX_3D_CGCG |
984 AMD_CG_SUPPORT_GFX_3D_CGLS |
985 AMD_CG_SUPPORT_GFX_CGCG |
986 AMD_CG_SUPPORT_GFX_CGLS |
987 AMD_CG_SUPPORT_BIF_LS |
988 AMD_CG_SUPPORT_HDP_LS |
989 AMD_CG_SUPPORT_ROM_MGCG |
990 AMD_CG_SUPPORT_MC_MGCG |
991 AMD_CG_SUPPORT_MC_LS |
992 AMD_CG_SUPPORT_SDMA_MGCG |
993 AMD_CG_SUPPORT_SDMA_LS;
995 adev->pg_flags = AMD_PG_SUPPORT_SDMA |
996 AMD_PG_SUPPORT_MMHUB |
998 AMD_PG_SUPPORT_VCN_DPG;
1000 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1001 AMD_CG_SUPPORT_GFX_MGLS |
1002 AMD_CG_SUPPORT_GFX_RLC_LS |
1003 AMD_CG_SUPPORT_GFX_CP_LS |
1004 AMD_CG_SUPPORT_GFX_3D_CGCG |
1005 AMD_CG_SUPPORT_GFX_3D_CGLS |
1006 AMD_CG_SUPPORT_GFX_CGCG |
1007 AMD_CG_SUPPORT_GFX_CGLS |
1008 AMD_CG_SUPPORT_BIF_MGCG |
1009 AMD_CG_SUPPORT_BIF_LS |
1010 AMD_CG_SUPPORT_HDP_MGCG |
1011 AMD_CG_SUPPORT_HDP_LS |
1012 AMD_CG_SUPPORT_DRM_MGCG |
1013 AMD_CG_SUPPORT_DRM_LS |
1014 AMD_CG_SUPPORT_ROM_MGCG |
1015 AMD_CG_SUPPORT_MC_MGCG |
1016 AMD_CG_SUPPORT_MC_LS |
1017 AMD_CG_SUPPORT_SDMA_MGCG |
1018 AMD_CG_SUPPORT_SDMA_LS |
1019 AMD_CG_SUPPORT_VCN_MGCG;
1021 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1024 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1025 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1027 AMD_PG_SUPPORT_RLC_SMU_HS;
1030 adev->asic_funcs = &vega20_asic_funcs;
1033 adev->external_rev_id = adev->rev_id + 0x32;
1036 /* FIXME: not supported yet */
1040 if (amdgpu_sriov_vf(adev)) {
1041 amdgpu_virt_init_setting(adev);
1042 xgpu_ai_mailbox_set_irq_funcs(adev);
1048 static int soc15_common_late_init(void *handle)
1050 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1052 if (amdgpu_sriov_vf(adev))
1053 xgpu_ai_mailbox_get_irq(adev);
1058 static int soc15_common_sw_init(void *handle)
1060 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1062 if (amdgpu_sriov_vf(adev))
1063 xgpu_ai_mailbox_add_irq_id(adev);
1065 adev->df_funcs->sw_init(adev);
1070 static int soc15_common_sw_fini(void *handle)
1075 static void soc15_doorbell_range_init(struct amdgpu_device *adev)
1078 struct amdgpu_ring *ring;
1080 /* Two reasons to skip
1081 * 1, Host driver already programmed them
1082 * 2, To avoid registers program violations in SR-IOV
1084 if (!amdgpu_virt_support_skip_setting(adev)) {
1085 for (i = 0; i < adev->sdma.num_instances; i++) {
1086 ring = &adev->sdma.instance[i].ring;
1087 adev->nbio_funcs->sdma_doorbell_range(adev, i,
1088 ring->use_doorbell, ring->doorbell_index,
1089 adev->doorbell_index.sdma_doorbell_range);
1093 adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
1094 adev->irq.ih.doorbell_index);
1097 static int soc15_common_hw_init(void *handle)
1099 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1101 /* enable pcie gen2/3 link */
1102 soc15_pcie_gen3_enable(adev);
1104 soc15_program_aspm(adev);
1105 /* setup nbio registers */
1106 adev->nbio_funcs->init_registers(adev);
1107 /* remap HDP registers to a hole in mmio space,
1108 * for the purpose of expose those registers
1111 if (adev->nbio_funcs->remap_hdp_registers)
1112 adev->nbio_funcs->remap_hdp_registers(adev);
1114 /* enable the doorbell aperture */
1115 soc15_enable_doorbell_aperture(adev, true);
1116 /* HW doorbell routing policy: doorbell writing not
1117 * in SDMA/IH/MM/ACV range will be routed to CP. So
1118 * we need to init SDMA/IH/MM/ACV doorbell range prior
1119 * to CP ip block init and ring test.
1121 soc15_doorbell_range_init(adev);
1126 static int soc15_common_hw_fini(void *handle)
1128 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1130 /* disable the doorbell aperture */
1131 soc15_enable_doorbell_aperture(adev, false);
1132 if (amdgpu_sriov_vf(adev))
1133 xgpu_ai_mailbox_put_irq(adev);
1138 static int soc15_common_suspend(void *handle)
1140 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1142 return soc15_common_hw_fini(adev);
1145 static int soc15_common_resume(void *handle)
1147 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1149 return soc15_common_hw_init(adev);
1152 static bool soc15_common_is_idle(void *handle)
1157 static int soc15_common_wait_for_idle(void *handle)
1162 static int soc15_common_soft_reset(void *handle)
1167 static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable)
1171 if (adev->asic_type == CHIP_VEGA20) {
1172 def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
1174 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1175 data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
1176 HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
1177 HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
1178 HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
1180 data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
1181 HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
1182 HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
1183 HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
1186 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data);
1188 def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
1190 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1191 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1193 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1196 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
1200 static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
1204 def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1206 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG))
1207 data &= ~(0x01000000 |
1216 data |= (0x01000000 |
1226 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data);
1229 static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable)
1233 def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1235 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1241 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data);
1244 static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1249 def = data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
1251 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1252 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1253 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1255 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1256 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1259 WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data);
1262 static int soc15_common_set_clockgating_state(void *handle,
1263 enum amd_clockgating_state state)
1265 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1267 if (amdgpu_sriov_vf(adev))
1270 switch (adev->asic_type) {
1274 adev->nbio_funcs->update_medium_grain_clock_gating(adev,
1275 state == AMD_CG_STATE_GATE ? true : false);
1276 adev->nbio_funcs->update_medium_grain_light_sleep(adev,
1277 state == AMD_CG_STATE_GATE ? true : false);
1278 soc15_update_hdp_light_sleep(adev,
1279 state == AMD_CG_STATE_GATE ? true : false);
1280 soc15_update_drm_clock_gating(adev,
1281 state == AMD_CG_STATE_GATE ? true : false);
1282 soc15_update_drm_light_sleep(adev,
1283 state == AMD_CG_STATE_GATE ? true : false);
1284 soc15_update_rom_medium_grain_clock_gating(adev,
1285 state == AMD_CG_STATE_GATE ? true : false);
1286 adev->df_funcs->update_medium_grain_clock_gating(adev,
1287 state == AMD_CG_STATE_GATE ? true : false);
1290 adev->nbio_funcs->update_medium_grain_clock_gating(adev,
1291 state == AMD_CG_STATE_GATE ? true : false);
1292 adev->nbio_funcs->update_medium_grain_light_sleep(adev,
1293 state == AMD_CG_STATE_GATE ? true : false);
1294 soc15_update_hdp_light_sleep(adev,
1295 state == AMD_CG_STATE_GATE ? true : false);
1296 soc15_update_drm_clock_gating(adev,
1297 state == AMD_CG_STATE_GATE ? true : false);
1298 soc15_update_drm_light_sleep(adev,
1299 state == AMD_CG_STATE_GATE ? true : false);
1300 soc15_update_rom_medium_grain_clock_gating(adev,
1301 state == AMD_CG_STATE_GATE ? true : false);
1309 static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
1311 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1314 if (amdgpu_sriov_vf(adev))
1317 adev->nbio_funcs->get_clockgating_state(adev, flags);
1319 /* AMD_CG_SUPPORT_HDP_LS */
1320 data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
1321 if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
1322 *flags |= AMD_CG_SUPPORT_HDP_LS;
1324 /* AMD_CG_SUPPORT_DRM_MGCG */
1325 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1326 if (!(data & 0x01000000))
1327 *flags |= AMD_CG_SUPPORT_DRM_MGCG;
1329 /* AMD_CG_SUPPORT_DRM_LS */
1330 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1332 *flags |= AMD_CG_SUPPORT_DRM_LS;
1334 /* AMD_CG_SUPPORT_ROM_MGCG */
1335 data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
1336 if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
1337 *flags |= AMD_CG_SUPPORT_ROM_MGCG;
1339 adev->df_funcs->get_clockgating_state(adev, flags);
1342 static int soc15_common_set_powergating_state(void *handle,
1343 enum amd_powergating_state state)
1349 const struct amd_ip_funcs soc15_common_ip_funcs = {
1350 .name = "soc15_common",
1351 .early_init = soc15_common_early_init,
1352 .late_init = soc15_common_late_init,
1353 .sw_init = soc15_common_sw_init,
1354 .sw_fini = soc15_common_sw_fini,
1355 .hw_init = soc15_common_hw_init,
1356 .hw_fini = soc15_common_hw_fini,
1357 .suspend = soc15_common_suspend,
1358 .resume = soc15_common_resume,
1359 .is_idle = soc15_common_is_idle,
1360 .wait_for_idle = soc15_common_wait_for_idle,
1361 .soft_reset = soc15_common_soft_reset,
1362 .set_clockgating_state = soc15_common_set_clockgating_state,
1363 .set_powergating_state = soc15_common_set_powergating_state,
1364 .get_clockgating_state= soc15_common_get_clockgating_state,