2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
29 #include "amdgpu_atombios.h"
30 #include "amdgpu_ih.h"
31 #include "amdgpu_uvd.h"
32 #include "amdgpu_vce.h"
33 #include "amdgpu_ucode.h"
34 #include "amdgpu_psp.h"
38 #include "uvd/uvd_7_0_offset.h"
39 #include "gc/gc_9_0_offset.h"
40 #include "gc/gc_9_0_sh_mask.h"
41 #include "sdma0/sdma0_4_0_offset.h"
42 #include "sdma1/sdma1_4_0_offset.h"
43 #include "hdp/hdp_4_0_offset.h"
44 #include "hdp/hdp_4_0_sh_mask.h"
45 #include "smuio/smuio_9_0_offset.h"
46 #include "smuio/smuio_9_0_sh_mask.h"
47 #include "nbio/nbio_7_0_default.h"
48 #include "nbio/nbio_7_0_offset.h"
49 #include "nbio/nbio_7_0_sh_mask.h"
50 #include "nbio/nbio_7_0_smn.h"
51 #include "mp/mp_9_0_offset.h"
54 #include "soc15_common.h"
57 #include "gfxhub_v1_0.h"
58 #include "mmhub_v1_0.h"
61 #include "nbio_v6_1.h"
62 #include "nbio_v7_0.h"
63 #include "nbio_v7_4.h"
64 #include "vega10_ih.h"
65 #include "navi10_ih.h"
66 #include "sdma_v4_0.h"
71 #include "jpeg_v2_0.h"
73 #include "jpeg_v2_5.h"
74 #include "dce_virtual.h"
76 #include "amdgpu_smu.h"
77 #include "amdgpu_ras.h"
78 #include "amdgpu_xgmi.h"
79 #include <uapi/linux/kfd_ioctl.h>
81 #define mmMP0_MISC_CGTT_CTRL0 0x01b9
82 #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0
83 #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
84 #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0
86 /* for Vega20 register name change */
87 #define mmHDP_MEM_POWER_CTRL 0x00d4
88 #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L
89 #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L
90 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
91 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
92 #define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
94 /* for Vega20/arcturus regiter offset change */
95 #define mmROM_INDEX_VG20 0x00e4
96 #define mmROM_INDEX_VG20_BASE_IDX 0
97 #define mmROM_DATA_VG20 0x00e5
98 #define mmROM_DATA_VG20_BASE_IDX 0
101 * Indirect registers accessor
103 static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
105 unsigned long address, data;
106 address = adev->nbio.funcs->get_pcie_index_offset(adev);
107 data = adev->nbio.funcs->get_pcie_data_offset(adev);
109 return amdgpu_device_indirect_rreg(adev, address, data, reg);
112 static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
114 unsigned long address, data;
116 address = adev->nbio.funcs->get_pcie_index_offset(adev);
117 data = adev->nbio.funcs->get_pcie_data_offset(adev);
119 amdgpu_device_indirect_wreg(adev, address, data, reg, v);
122 static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
124 unsigned long address, data;
125 address = adev->nbio.funcs->get_pcie_index_offset(adev);
126 data = adev->nbio.funcs->get_pcie_data_offset(adev);
128 return amdgpu_device_indirect_rreg64(adev, address, data, reg);
131 static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
133 unsigned long address, data;
135 address = adev->nbio.funcs->get_pcie_index_offset(adev);
136 data = adev->nbio.funcs->get_pcie_data_offset(adev);
138 amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
141 static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
143 unsigned long flags, address, data;
146 address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
147 data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
149 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
150 WREG32(address, ((reg) & 0x1ff));
152 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
156 static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
158 unsigned long flags, address, data;
160 address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
161 data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
163 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
164 WREG32(address, ((reg) & 0x1ff));
166 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
169 static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg)
171 unsigned long flags, address, data;
174 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
175 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
177 spin_lock_irqsave(&adev->didt_idx_lock, flags);
178 WREG32(address, (reg));
180 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
184 static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
186 unsigned long flags, address, data;
188 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
189 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
191 spin_lock_irqsave(&adev->didt_idx_lock, flags);
192 WREG32(address, (reg));
194 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
197 static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
202 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
203 WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
204 r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA);
205 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
209 static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
213 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
214 WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
215 WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v));
216 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
219 static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg)
224 spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
225 WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
226 r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA);
227 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
231 static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
235 spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
236 WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
237 WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v));
238 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
241 static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
243 return adev->nbio.funcs->get_memsize(adev);
246 static u32 soc15_get_xclk(struct amdgpu_device *adev)
248 u32 reference_clock = adev->clock.spll.reference_freq;
250 if (adev->asic_type == CHIP_RAVEN)
251 return reference_clock / 4;
253 return reference_clock;
257 void soc15_grbm_select(struct amdgpu_device *adev,
258 u32 me, u32 pipe, u32 queue, u32 vmid)
260 u32 grbm_gfx_cntl = 0;
261 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
262 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
263 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
264 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
266 WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
269 static void soc15_vga_set_state(struct amdgpu_device *adev, bool state)
274 static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
280 static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
281 u8 *bios, u32 length_bytes)
285 uint32_t rom_index_offset;
286 uint32_t rom_data_offset;
290 if (length_bytes == 0)
292 /* APU vbios image is part of sbios image */
293 if (adev->flags & AMD_IS_APU)
296 dw_ptr = (u32 *)bios;
297 length_dw = ALIGN(length_bytes, 4) / 4;
299 switch (adev->asic_type) {
302 rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX_VG20);
303 rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA_VG20);
306 rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX);
307 rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA);
311 /* set rom index to 0 */
312 WREG32(rom_index_offset, 0);
313 /* read out the rom data */
314 for (i = 0; i < length_dw; i++)
315 dw_ptr[i] = RREG32(rom_data_offset);
320 static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
321 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
322 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
323 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
324 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
325 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
326 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
327 { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
328 { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
329 { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
330 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
331 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
332 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
333 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
334 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
335 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
336 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
337 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
338 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
339 { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
340 { SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)},
343 static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
344 u32 sh_num, u32 reg_offset)
348 mutex_lock(&adev->grbm_idx_mutex);
349 if (se_num != 0xffffffff || sh_num != 0xffffffff)
350 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
352 val = RREG32(reg_offset);
354 if (se_num != 0xffffffff || sh_num != 0xffffffff)
355 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
356 mutex_unlock(&adev->grbm_idx_mutex);
360 static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
361 bool indexed, u32 se_num,
362 u32 sh_num, u32 reg_offset)
365 return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
367 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
368 return adev->gfx.config.gb_addr_config;
369 else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2))
370 return adev->gfx.config.db_debug2;
371 return RREG32(reg_offset);
375 static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
376 u32 sh_num, u32 reg_offset, u32 *value)
379 struct soc15_allowed_register_entry *en;
382 for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
383 en = &soc15_allowed_read_registers[i];
384 if (adev->reg_offset[en->hwip][en->inst] &&
385 reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
389 *value = soc15_get_register_value(adev,
390 soc15_allowed_read_registers[i].grbm_indexed,
391 se_num, sh_num, reg_offset);
399 * soc15_program_register_sequence - program an array of registers.
401 * @adev: amdgpu_device pointer
402 * @regs: pointer to the register array
403 * @array_size: size of the register array
405 * Programs an array or registers with and and or masks.
406 * This is a helper for setting golden registers.
409 void soc15_program_register_sequence(struct amdgpu_device *adev,
410 const struct soc15_reg_golden *regs,
411 const u32 array_size)
413 const struct soc15_reg_golden *entry;
417 for (i = 0; i < array_size; ++i) {
419 reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
421 if (entry->and_mask == 0xffffffff) {
422 tmp = entry->or_mask;
425 tmp &= ~(entry->and_mask);
426 tmp |= (entry->or_mask & entry->and_mask);
429 if (reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3) ||
430 reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE) ||
431 reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1) ||
432 reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG))
433 WREG32_RLC(reg, tmp);
441 static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
446 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
448 dev_info(adev->dev, "GPU mode1 reset\n");
451 pci_clear_master(adev->pdev);
453 amdgpu_device_cache_pci_state(adev->pdev);
455 ret = psp_gpu_reset(adev);
457 dev_err(adev->dev, "GPU mode1 reset failed\n");
459 amdgpu_device_load_pci_state(adev->pdev);
461 /* wait for asic to come out of reset */
462 for (i = 0; i < adev->usec_timeout; i++) {
463 u32 memsize = adev->nbio.funcs->get_memsize(adev);
465 if (memsize != 0xffffffff)
470 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
475 static int soc15_asic_baco_reset(struct amdgpu_device *adev)
477 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
480 /* avoid NBIF got stuck when do RAS recovery in BACO reset */
481 if (ras && ras->supported)
482 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
484 ret = amdgpu_dpm_baco_reset(adev);
488 /* re-enable doorbell interrupt after BACO exit */
489 if (ras && ras->supported)
490 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
495 static enum amd_reset_method
496 soc15_asic_reset_method(struct amdgpu_device *adev)
498 bool baco_reset = false;
499 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
501 if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
502 amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
503 amdgpu_reset_method == AMD_RESET_METHOD_BACO)
504 return amdgpu_reset_method;
506 if (amdgpu_reset_method != -1)
507 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
508 amdgpu_reset_method);
510 switch (adev->asic_type) {
513 return AMD_RESET_METHOD_MODE2;
517 baco_reset = amdgpu_dpm_is_baco_supported(adev);
520 if (adev->psp.sos_fw_version >= 0x80067)
521 baco_reset = amdgpu_dpm_is_baco_supported(adev);
524 * 1. PMFW version > 0x284300: all cases use baco
525 * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco
527 if ((ras && ras->supported) && adev->pm.fw_version <= 0x283400)
535 return AMD_RESET_METHOD_BACO;
537 return AMD_RESET_METHOD_MODE1;
540 static int soc15_asic_reset(struct amdgpu_device *adev)
542 /* original raven doesn't have full asic reset */
543 if ((adev->apu_flags & AMD_APU_IS_RAVEN) &&
544 !(adev->apu_flags & AMD_APU_IS_RAVEN2))
547 switch (soc15_asic_reset_method(adev)) {
548 case AMD_RESET_METHOD_BACO:
549 dev_info(adev->dev, "BACO reset\n");
550 return soc15_asic_baco_reset(adev);
551 case AMD_RESET_METHOD_MODE2:
552 dev_info(adev->dev, "MODE2 reset\n");
553 return amdgpu_dpm_mode2_reset(adev);
555 dev_info(adev->dev, "MODE1 reset\n");
556 return soc15_asic_mode1_reset(adev);
560 static bool soc15_supports_baco(struct amdgpu_device *adev)
562 switch (adev->asic_type) {
566 return amdgpu_dpm_is_baco_supported(adev);
568 if (adev->psp.sos_fw_version >= 0x80067)
569 return amdgpu_dpm_is_baco_supported(adev);
576 /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
577 u32 cntl_reg, u32 status_reg)
582 static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
586 r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
590 r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
595 static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
602 static void soc15_pcie_gen3_enable(struct amdgpu_device *adev)
604 if (pci_is_root_bus(adev->pdev->bus))
607 if (amdgpu_pcie_gen2 == 0)
610 if (adev->flags & AMD_IS_APU)
613 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
614 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
620 static void soc15_program_aspm(struct amdgpu_device *adev)
623 if (amdgpu_aspm == 0)
629 static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
632 adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
633 adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
636 static const struct amdgpu_ip_block_version vega10_common_ip_block =
638 .type = AMD_IP_BLOCK_TYPE_COMMON,
642 .funcs = &soc15_common_ip_funcs,
645 static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
647 return adev->nbio.funcs->get_rev_id(adev);
650 static void soc15_reg_base_init(struct amdgpu_device *adev)
654 /* Set IP register base before any HW register access */
655 switch (adev->asic_type) {
659 vega10_reg_base_init(adev);
662 /* It's safe to do ip discovery here for Renior,
663 * it doesn't support SRIOV. */
664 if (amdgpu_discovery) {
665 r = amdgpu_discovery_reg_base_init(adev);
668 DRM_WARN("failed to init reg base from ip discovery table, "
669 "fallback to legacy init method\n");
671 vega10_reg_base_init(adev);
674 vega20_reg_base_init(adev);
677 arct_reg_base_init(adev);
680 DRM_ERROR("Unsupported asic type: %d!\n", adev->asic_type);
685 void soc15_set_virt_ops(struct amdgpu_device *adev)
687 adev->virt.ops = &xgpu_ai_virt_ops;
689 /* init soc15 reg base early enough so we can
690 * request request full access for sriov before
692 soc15_reg_base_init(adev);
695 int soc15_set_ip_blocks(struct amdgpu_device *adev)
697 /* for bare metal case */
698 if (!amdgpu_sriov_vf(adev))
699 soc15_reg_base_init(adev);
701 if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
702 adev->gmc.xgmi.supported = true;
704 if (adev->flags & AMD_IS_APU) {
705 adev->nbio.funcs = &nbio_v7_0_funcs;
706 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
707 } else if (adev->asic_type == CHIP_VEGA20 ||
708 adev->asic_type == CHIP_ARCTURUS) {
709 adev->nbio.funcs = &nbio_v7_4_funcs;
710 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
712 adev->nbio.funcs = &nbio_v6_1_funcs;
713 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
716 if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
717 adev->df.funcs = &df_v3_6_funcs;
719 adev->df.funcs = &df_v1_7_funcs;
721 adev->rev_id = soc15_get_rev_id(adev);
723 switch (adev->asic_type) {
727 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
728 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
730 /* For Vega10 SR-IOV, PSP need to be initialized before IH */
731 if (amdgpu_sriov_vf(adev)) {
732 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
733 if (adev->asic_type == CHIP_VEGA20)
734 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
736 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
738 if (adev->asic_type == CHIP_VEGA20)
739 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
741 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
743 if (adev->asic_type == CHIP_VEGA20)
744 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
746 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
747 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
748 if (adev->asic_type == CHIP_VEGA20)
749 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
751 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
754 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
755 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
756 if (is_support_sw_smu(adev)) {
757 if (!amdgpu_sriov_vf(adev))
758 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
760 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
762 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
763 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
764 #if defined(CONFIG_DRM_AMD_DC)
765 else if (amdgpu_device_has_dc_support(adev))
766 amdgpu_device_ip_block_add(adev, &dm_ip_block);
768 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) {
769 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
770 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
774 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
775 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
776 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
777 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
778 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
779 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
780 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
781 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
782 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
783 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
784 #if defined(CONFIG_DRM_AMD_DC)
785 else if (amdgpu_device_has_dc_support(adev))
786 amdgpu_device_ip_block_add(adev, &dm_ip_block);
788 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
791 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
792 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
794 if (amdgpu_sriov_vf(adev)) {
795 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
796 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
797 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
799 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
800 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
801 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
804 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
805 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
806 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
807 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
808 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
810 if (amdgpu_sriov_vf(adev)) {
811 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
812 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
814 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
816 if (!amdgpu_sriov_vf(adev))
817 amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
820 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
821 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
822 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
823 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
824 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
825 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
826 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
827 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
828 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
829 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
830 #if defined(CONFIG_DRM_AMD_DC)
831 else if (amdgpu_device_has_dc_support(adev))
832 amdgpu_device_ip_block_add(adev, &dm_ip_block);
834 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
835 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
844 static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
846 adev->nbio.funcs->hdp_flush(adev, ring);
849 static void soc15_invalidate_hdp(struct amdgpu_device *adev,
850 struct amdgpu_ring *ring)
852 if (!ring || !ring->funcs->emit_wreg)
853 WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
855 amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
856 HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
859 static bool soc15_need_full_reset(struct amdgpu_device *adev)
861 /* change this when we implement soft reset */
865 static void vega20_reset_hdp_ras_error_count(struct amdgpu_device *adev)
867 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP))
869 /*read back hdp ras counter to reset it to 0 */
870 RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT);
873 static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
876 uint32_t perfctr = 0;
877 uint64_t cnt0_of, cnt1_of;
880 /* This reports 0 on APUs, so return to avoid writing/reading registers
881 * that may or may not be different from their GPU counterparts
883 if (adev->flags & AMD_IS_APU)
886 /* Set the 2 events that we wish to watch, defined above */
887 /* Reg 40 is # received msgs */
888 /* Reg 104 is # of posted requests sent */
889 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
890 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
892 /* Write to enable desired perf counters */
893 WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr);
894 /* Zero out and enable the perf counters
896 * Bit 0 = Start all counters(1)
897 * Bit 2 = Global counter reset enable(1)
899 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
903 /* Load the shadow and disable the perf counters
905 * Bit 0 = Stop counters(0)
906 * Bit 1 = Load the shadow counters(1)
908 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
910 /* Read register values to get any >32bit overflow */
911 tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK);
912 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
913 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
915 /* Get the values and add the overflow */
916 *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
917 *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
920 static void vega20_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
923 uint32_t perfctr = 0;
924 uint64_t cnt0_of, cnt1_of;
927 /* This reports 0 on APUs, so return to avoid writing/reading registers
928 * that may or may not be different from their GPU counterparts
930 if (adev->flags & AMD_IS_APU)
933 /* Set the 2 events that we wish to watch, defined above */
934 /* Reg 40 is # received msgs */
935 /* Reg 108 is # of posted requests sent on VG20 */
936 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
938 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
941 /* Write to enable desired perf counters */
942 WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctr);
943 /* Zero out and enable the perf counters
945 * Bit 0 = Start all counters(1)
946 * Bit 2 = Global counter reset enable(1)
948 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
952 /* Load the shadow and disable the perf counters
954 * Bit 0 = Stop counters(0)
955 * Bit 1 = Load the shadow counters(1)
957 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
959 /* Read register values to get any >32bit overflow */
960 tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3);
961 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER0_UPPER);
962 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER1_UPPER);
964 /* Get the values and add the overflow */
965 *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) | (cnt0_of << 32);
966 *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK3) | (cnt1_of << 32);
969 static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
973 /* Just return false for soc15 GPUs. Reset does not seem to
976 if (!amdgpu_passthrough(adev))
979 if (adev->flags & AMD_IS_APU)
982 /* Check sOS sign of life register to confirm sys driver and sOS
983 * are already been loaded.
985 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
992 static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev)
994 uint64_t nak_r, nak_g;
996 /* Get the number of NAKs received and generated */
997 nak_r = RREG32_PCIE(smnPCIE_RX_NUM_NAK);
998 nak_g = RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED);
1000 /* Add the total number of NAKs, i.e the number of replays */
1001 return (nak_r + nak_g);
1004 static void soc15_pre_asic_init(struct amdgpu_device *adev)
1006 gmc_v9_0_restore_registers(adev);
1009 static const struct amdgpu_asic_funcs soc15_asic_funcs =
1011 .read_disabled_bios = &soc15_read_disabled_bios,
1012 .read_bios_from_rom = &soc15_read_bios_from_rom,
1013 .read_register = &soc15_read_register,
1014 .reset = &soc15_asic_reset,
1015 .reset_method = &soc15_asic_reset_method,
1016 .set_vga_state = &soc15_vga_set_state,
1017 .get_xclk = &soc15_get_xclk,
1018 .set_uvd_clocks = &soc15_set_uvd_clocks,
1019 .set_vce_clocks = &soc15_set_vce_clocks,
1020 .get_config_memsize = &soc15_get_config_memsize,
1021 .flush_hdp = &soc15_flush_hdp,
1022 .invalidate_hdp = &soc15_invalidate_hdp,
1023 .need_full_reset = &soc15_need_full_reset,
1024 .init_doorbell_index = &vega10_doorbell_index_init,
1025 .get_pcie_usage = &soc15_get_pcie_usage,
1026 .need_reset_on_init = &soc15_need_reset_on_init,
1027 .get_pcie_replay_count = &soc15_get_pcie_replay_count,
1028 .supports_baco = &soc15_supports_baco,
1029 .pre_asic_init = &soc15_pre_asic_init,
1032 static const struct amdgpu_asic_funcs vega20_asic_funcs =
1034 .read_disabled_bios = &soc15_read_disabled_bios,
1035 .read_bios_from_rom = &soc15_read_bios_from_rom,
1036 .read_register = &soc15_read_register,
1037 .reset = &soc15_asic_reset,
1038 .reset_method = &soc15_asic_reset_method,
1039 .set_vga_state = &soc15_vga_set_state,
1040 .get_xclk = &soc15_get_xclk,
1041 .set_uvd_clocks = &soc15_set_uvd_clocks,
1042 .set_vce_clocks = &soc15_set_vce_clocks,
1043 .get_config_memsize = &soc15_get_config_memsize,
1044 .flush_hdp = &soc15_flush_hdp,
1045 .invalidate_hdp = &soc15_invalidate_hdp,
1046 .reset_hdp_ras_error_count = &vega20_reset_hdp_ras_error_count,
1047 .need_full_reset = &soc15_need_full_reset,
1048 .init_doorbell_index = &vega20_doorbell_index_init,
1049 .get_pcie_usage = &vega20_get_pcie_usage,
1050 .need_reset_on_init = &soc15_need_reset_on_init,
1051 .get_pcie_replay_count = &soc15_get_pcie_replay_count,
1052 .supports_baco = &soc15_supports_baco,
1053 .pre_asic_init = &soc15_pre_asic_init,
1056 static int soc15_common_early_init(void *handle)
1058 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
1059 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1061 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
1062 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
1063 adev->smc_rreg = NULL;
1064 adev->smc_wreg = NULL;
1065 adev->pcie_rreg = &soc15_pcie_rreg;
1066 adev->pcie_wreg = &soc15_pcie_wreg;
1067 adev->pcie_rreg64 = &soc15_pcie_rreg64;
1068 adev->pcie_wreg64 = &soc15_pcie_wreg64;
1069 adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
1070 adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
1071 adev->didt_rreg = &soc15_didt_rreg;
1072 adev->didt_wreg = &soc15_didt_wreg;
1073 adev->gc_cac_rreg = &soc15_gc_cac_rreg;
1074 adev->gc_cac_wreg = &soc15_gc_cac_wreg;
1075 adev->se_cac_rreg = &soc15_se_cac_rreg;
1076 adev->se_cac_wreg = &soc15_se_cac_wreg;
1079 adev->external_rev_id = 0xFF;
1080 switch (adev->asic_type) {
1082 adev->asic_funcs = &soc15_asic_funcs;
1083 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1084 AMD_CG_SUPPORT_GFX_MGLS |
1085 AMD_CG_SUPPORT_GFX_RLC_LS |
1086 AMD_CG_SUPPORT_GFX_CP_LS |
1087 AMD_CG_SUPPORT_GFX_3D_CGCG |
1088 AMD_CG_SUPPORT_GFX_3D_CGLS |
1089 AMD_CG_SUPPORT_GFX_CGCG |
1090 AMD_CG_SUPPORT_GFX_CGLS |
1091 AMD_CG_SUPPORT_BIF_MGCG |
1092 AMD_CG_SUPPORT_BIF_LS |
1093 AMD_CG_SUPPORT_HDP_LS |
1094 AMD_CG_SUPPORT_DRM_MGCG |
1095 AMD_CG_SUPPORT_DRM_LS |
1096 AMD_CG_SUPPORT_ROM_MGCG |
1097 AMD_CG_SUPPORT_DF_MGCG |
1098 AMD_CG_SUPPORT_SDMA_MGCG |
1099 AMD_CG_SUPPORT_SDMA_LS |
1100 AMD_CG_SUPPORT_MC_MGCG |
1101 AMD_CG_SUPPORT_MC_LS;
1103 adev->external_rev_id = 0x1;
1106 adev->asic_funcs = &soc15_asic_funcs;
1107 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1108 AMD_CG_SUPPORT_GFX_MGLS |
1109 AMD_CG_SUPPORT_GFX_CGCG |
1110 AMD_CG_SUPPORT_GFX_CGLS |
1111 AMD_CG_SUPPORT_GFX_3D_CGCG |
1112 AMD_CG_SUPPORT_GFX_3D_CGLS |
1113 AMD_CG_SUPPORT_GFX_CP_LS |
1114 AMD_CG_SUPPORT_MC_LS |
1115 AMD_CG_SUPPORT_MC_MGCG |
1116 AMD_CG_SUPPORT_SDMA_MGCG |
1117 AMD_CG_SUPPORT_SDMA_LS |
1118 AMD_CG_SUPPORT_BIF_MGCG |
1119 AMD_CG_SUPPORT_BIF_LS |
1120 AMD_CG_SUPPORT_HDP_MGCG |
1121 AMD_CG_SUPPORT_HDP_LS |
1122 AMD_CG_SUPPORT_ROM_MGCG |
1123 AMD_CG_SUPPORT_VCE_MGCG |
1124 AMD_CG_SUPPORT_UVD_MGCG;
1126 adev->external_rev_id = adev->rev_id + 0x14;
1129 adev->asic_funcs = &vega20_asic_funcs;
1130 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1131 AMD_CG_SUPPORT_GFX_MGLS |
1132 AMD_CG_SUPPORT_GFX_CGCG |
1133 AMD_CG_SUPPORT_GFX_CGLS |
1134 AMD_CG_SUPPORT_GFX_3D_CGCG |
1135 AMD_CG_SUPPORT_GFX_3D_CGLS |
1136 AMD_CG_SUPPORT_GFX_CP_LS |
1137 AMD_CG_SUPPORT_MC_LS |
1138 AMD_CG_SUPPORT_MC_MGCG |
1139 AMD_CG_SUPPORT_SDMA_MGCG |
1140 AMD_CG_SUPPORT_SDMA_LS |
1141 AMD_CG_SUPPORT_BIF_MGCG |
1142 AMD_CG_SUPPORT_BIF_LS |
1143 AMD_CG_SUPPORT_HDP_MGCG |
1144 AMD_CG_SUPPORT_HDP_LS |
1145 AMD_CG_SUPPORT_ROM_MGCG |
1146 AMD_CG_SUPPORT_VCE_MGCG |
1147 AMD_CG_SUPPORT_UVD_MGCG;
1149 adev->external_rev_id = adev->rev_id + 0x28;
1152 adev->asic_funcs = &soc15_asic_funcs;
1153 if (adev->pdev->device == 0x15dd)
1154 adev->apu_flags |= AMD_APU_IS_RAVEN;
1155 if (adev->pdev->device == 0x15d8)
1156 adev->apu_flags |= AMD_APU_IS_PICASSO;
1157 if (adev->rev_id >= 0x8)
1158 adev->apu_flags |= AMD_APU_IS_RAVEN2;
1160 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1161 adev->external_rev_id = adev->rev_id + 0x79;
1162 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1163 adev->external_rev_id = adev->rev_id + 0x41;
1164 else if (adev->rev_id == 1)
1165 adev->external_rev_id = adev->rev_id + 0x20;
1167 adev->external_rev_id = adev->rev_id + 0x01;
1169 if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
1170 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1171 AMD_CG_SUPPORT_GFX_MGLS |
1172 AMD_CG_SUPPORT_GFX_CP_LS |
1173 AMD_CG_SUPPORT_GFX_3D_CGCG |
1174 AMD_CG_SUPPORT_GFX_3D_CGLS |
1175 AMD_CG_SUPPORT_GFX_CGCG |
1176 AMD_CG_SUPPORT_GFX_CGLS |
1177 AMD_CG_SUPPORT_BIF_LS |
1178 AMD_CG_SUPPORT_HDP_LS |
1179 AMD_CG_SUPPORT_ROM_MGCG |
1180 AMD_CG_SUPPORT_MC_MGCG |
1181 AMD_CG_SUPPORT_MC_LS |
1182 AMD_CG_SUPPORT_SDMA_MGCG |
1183 AMD_CG_SUPPORT_SDMA_LS |
1184 AMD_CG_SUPPORT_VCN_MGCG;
1186 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1187 } else if (adev->apu_flags & AMD_APU_IS_PICASSO) {
1188 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1189 AMD_CG_SUPPORT_GFX_MGLS |
1190 AMD_CG_SUPPORT_GFX_CP_LS |
1191 AMD_CG_SUPPORT_GFX_3D_CGCG |
1192 AMD_CG_SUPPORT_GFX_3D_CGLS |
1193 AMD_CG_SUPPORT_GFX_CGCG |
1194 AMD_CG_SUPPORT_GFX_CGLS |
1195 AMD_CG_SUPPORT_BIF_LS |
1196 AMD_CG_SUPPORT_HDP_LS |
1197 AMD_CG_SUPPORT_ROM_MGCG |
1198 AMD_CG_SUPPORT_MC_MGCG |
1199 AMD_CG_SUPPORT_MC_LS |
1200 AMD_CG_SUPPORT_SDMA_MGCG |
1201 AMD_CG_SUPPORT_SDMA_LS;
1203 adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1204 AMD_PG_SUPPORT_MMHUB |
1207 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1208 AMD_CG_SUPPORT_GFX_MGLS |
1209 AMD_CG_SUPPORT_GFX_RLC_LS |
1210 AMD_CG_SUPPORT_GFX_CP_LS |
1211 AMD_CG_SUPPORT_GFX_3D_CGCG |
1212 AMD_CG_SUPPORT_GFX_3D_CGLS |
1213 AMD_CG_SUPPORT_GFX_CGCG |
1214 AMD_CG_SUPPORT_GFX_CGLS |
1215 AMD_CG_SUPPORT_BIF_MGCG |
1216 AMD_CG_SUPPORT_BIF_LS |
1217 AMD_CG_SUPPORT_HDP_MGCG |
1218 AMD_CG_SUPPORT_HDP_LS |
1219 AMD_CG_SUPPORT_DRM_MGCG |
1220 AMD_CG_SUPPORT_DRM_LS |
1221 AMD_CG_SUPPORT_ROM_MGCG |
1222 AMD_CG_SUPPORT_MC_MGCG |
1223 AMD_CG_SUPPORT_MC_LS |
1224 AMD_CG_SUPPORT_SDMA_MGCG |
1225 AMD_CG_SUPPORT_SDMA_LS |
1226 AMD_CG_SUPPORT_VCN_MGCG;
1228 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1232 adev->asic_funcs = &vega20_asic_funcs;
1233 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1234 AMD_CG_SUPPORT_GFX_MGLS |
1235 AMD_CG_SUPPORT_GFX_CGCG |
1236 AMD_CG_SUPPORT_GFX_CGLS |
1237 AMD_CG_SUPPORT_GFX_CP_LS |
1238 AMD_CG_SUPPORT_HDP_MGCG |
1239 AMD_CG_SUPPORT_HDP_LS |
1240 AMD_CG_SUPPORT_SDMA_MGCG |
1241 AMD_CG_SUPPORT_SDMA_LS |
1242 AMD_CG_SUPPORT_MC_MGCG |
1243 AMD_CG_SUPPORT_MC_LS |
1244 AMD_CG_SUPPORT_IH_CG |
1245 AMD_CG_SUPPORT_VCN_MGCG |
1246 AMD_CG_SUPPORT_JPEG_MGCG;
1247 adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG;
1248 adev->external_rev_id = adev->rev_id + 0x32;
1251 adev->asic_funcs = &soc15_asic_funcs;
1252 if (adev->pdev->device == 0x1636)
1253 adev->apu_flags |= AMD_APU_IS_RENOIR;
1255 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1257 if (adev->apu_flags & AMD_APU_IS_RENOIR)
1258 adev->external_rev_id = adev->rev_id + 0x91;
1260 adev->external_rev_id = adev->rev_id + 0xa1;
1261 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1262 AMD_CG_SUPPORT_GFX_MGLS |
1263 AMD_CG_SUPPORT_GFX_3D_CGCG |
1264 AMD_CG_SUPPORT_GFX_3D_CGLS |
1265 AMD_CG_SUPPORT_GFX_CGCG |
1266 AMD_CG_SUPPORT_GFX_CGLS |
1267 AMD_CG_SUPPORT_GFX_CP_LS |
1268 AMD_CG_SUPPORT_MC_MGCG |
1269 AMD_CG_SUPPORT_MC_LS |
1270 AMD_CG_SUPPORT_SDMA_MGCG |
1271 AMD_CG_SUPPORT_SDMA_LS |
1272 AMD_CG_SUPPORT_BIF_LS |
1273 AMD_CG_SUPPORT_HDP_LS |
1274 AMD_CG_SUPPORT_ROM_MGCG |
1275 AMD_CG_SUPPORT_VCN_MGCG |
1276 AMD_CG_SUPPORT_JPEG_MGCG |
1277 AMD_CG_SUPPORT_IH_CG |
1278 AMD_CG_SUPPORT_ATHUB_LS |
1279 AMD_CG_SUPPORT_ATHUB_MGCG |
1280 AMD_CG_SUPPORT_DF_MGCG;
1281 adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1282 AMD_PG_SUPPORT_VCN |
1283 AMD_PG_SUPPORT_JPEG |
1284 AMD_PG_SUPPORT_VCN_DPG;
1287 /* FIXME: not supported yet */
1291 if (amdgpu_sriov_vf(adev)) {
1292 amdgpu_virt_init_setting(adev);
1293 xgpu_ai_mailbox_set_irq_funcs(adev);
1299 static int soc15_common_late_init(void *handle)
1301 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1304 if (amdgpu_sriov_vf(adev))
1305 xgpu_ai_mailbox_get_irq(adev);
1307 if (adev->asic_funcs &&
1308 adev->asic_funcs->reset_hdp_ras_error_count)
1309 adev->asic_funcs->reset_hdp_ras_error_count(adev);
1311 if (adev->nbio.funcs->ras_late_init)
1312 r = adev->nbio.funcs->ras_late_init(adev);
1317 static int soc15_common_sw_init(void *handle)
1319 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1321 if (amdgpu_sriov_vf(adev))
1322 xgpu_ai_mailbox_add_irq_id(adev);
1324 adev->df.funcs->sw_init(adev);
1329 static int soc15_common_sw_fini(void *handle)
1331 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1333 amdgpu_nbio_ras_fini(adev);
1334 adev->df.funcs->sw_fini(adev);
1338 static void soc15_doorbell_range_init(struct amdgpu_device *adev)
1341 struct amdgpu_ring *ring;
1343 /* sdma/ih doorbell range are programed by hypervisor */
1344 if (!amdgpu_sriov_vf(adev)) {
1345 for (i = 0; i < adev->sdma.num_instances; i++) {
1346 ring = &adev->sdma.instance[i].ring;
1347 adev->nbio.funcs->sdma_doorbell_range(adev, i,
1348 ring->use_doorbell, ring->doorbell_index,
1349 adev->doorbell_index.sdma_doorbell_range);
1352 adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
1353 adev->irq.ih.doorbell_index);
1357 static int soc15_common_hw_init(void *handle)
1359 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1361 /* enable pcie gen2/3 link */
1362 soc15_pcie_gen3_enable(adev);
1364 soc15_program_aspm(adev);
1365 /* setup nbio registers */
1366 adev->nbio.funcs->init_registers(adev);
1367 /* remap HDP registers to a hole in mmio space,
1368 * for the purpose of expose those registers
1371 if (adev->nbio.funcs->remap_hdp_registers)
1372 adev->nbio.funcs->remap_hdp_registers(adev);
1374 /* enable the doorbell aperture */
1375 soc15_enable_doorbell_aperture(adev, true);
1376 /* HW doorbell routing policy: doorbell writing not
1377 * in SDMA/IH/MM/ACV range will be routed to CP. So
1378 * we need to init SDMA/IH/MM/ACV doorbell range prior
1379 * to CP ip block init and ring test.
1381 soc15_doorbell_range_init(adev);
1386 static int soc15_common_hw_fini(void *handle)
1388 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1390 /* disable the doorbell aperture */
1391 soc15_enable_doorbell_aperture(adev, false);
1392 if (amdgpu_sriov_vf(adev))
1393 xgpu_ai_mailbox_put_irq(adev);
1395 if (adev->nbio.ras_if &&
1396 amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
1397 if (adev->nbio.funcs->init_ras_controller_interrupt)
1398 amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0);
1399 if (adev->nbio.funcs->init_ras_err_event_athub_interrupt)
1400 amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
1406 static int soc15_common_suspend(void *handle)
1408 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1410 return soc15_common_hw_fini(adev);
1413 static int soc15_common_resume(void *handle)
1415 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1417 return soc15_common_hw_init(adev);
1420 static bool soc15_common_is_idle(void *handle)
1425 static int soc15_common_wait_for_idle(void *handle)
1430 static int soc15_common_soft_reset(void *handle)
1435 static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable)
1439 if (adev->asic_type == CHIP_VEGA20 ||
1440 adev->asic_type == CHIP_ARCTURUS ||
1441 adev->asic_type == CHIP_RENOIR) {
1442 def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
1444 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1445 data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
1446 HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
1447 HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
1448 HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
1450 data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
1451 HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
1452 HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
1453 HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
1456 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data);
1458 def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
1460 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1461 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1463 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1466 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
1470 static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
1474 def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1476 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG))
1477 data &= ~(0x01000000 |
1486 data |= (0x01000000 |
1496 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data);
1499 static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable)
1503 def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1505 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1511 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data);
1514 static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1519 def = data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
1521 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1522 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1523 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1525 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1526 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1529 WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data);
1532 static int soc15_common_set_clockgating_state(void *handle,
1533 enum amd_clockgating_state state)
1535 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1537 if (amdgpu_sriov_vf(adev))
1540 switch (adev->asic_type) {
1544 adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1545 state == AMD_CG_STATE_GATE);
1546 adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1547 state == AMD_CG_STATE_GATE);
1548 soc15_update_hdp_light_sleep(adev,
1549 state == AMD_CG_STATE_GATE);
1550 soc15_update_drm_clock_gating(adev,
1551 state == AMD_CG_STATE_GATE);
1552 soc15_update_drm_light_sleep(adev,
1553 state == AMD_CG_STATE_GATE);
1554 soc15_update_rom_medium_grain_clock_gating(adev,
1555 state == AMD_CG_STATE_GATE);
1556 adev->df.funcs->update_medium_grain_clock_gating(adev,
1557 state == AMD_CG_STATE_GATE);
1561 adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1562 state == AMD_CG_STATE_GATE);
1563 adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1564 state == AMD_CG_STATE_GATE);
1565 soc15_update_hdp_light_sleep(adev,
1566 state == AMD_CG_STATE_GATE);
1567 soc15_update_drm_clock_gating(adev,
1568 state == AMD_CG_STATE_GATE);
1569 soc15_update_drm_light_sleep(adev,
1570 state == AMD_CG_STATE_GATE);
1571 soc15_update_rom_medium_grain_clock_gating(adev,
1572 state == AMD_CG_STATE_GATE);
1575 soc15_update_hdp_light_sleep(adev,
1576 state == AMD_CG_STATE_GATE);
1584 static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
1586 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1589 if (amdgpu_sriov_vf(adev))
1592 adev->nbio.funcs->get_clockgating_state(adev, flags);
1594 /* AMD_CG_SUPPORT_HDP_LS */
1595 data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
1596 if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
1597 *flags |= AMD_CG_SUPPORT_HDP_LS;
1599 /* AMD_CG_SUPPORT_DRM_MGCG */
1600 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1601 if (!(data & 0x01000000))
1602 *flags |= AMD_CG_SUPPORT_DRM_MGCG;
1604 /* AMD_CG_SUPPORT_DRM_LS */
1605 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1607 *flags |= AMD_CG_SUPPORT_DRM_LS;
1609 /* AMD_CG_SUPPORT_ROM_MGCG */
1610 data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
1611 if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
1612 *flags |= AMD_CG_SUPPORT_ROM_MGCG;
1614 adev->df.funcs->get_clockgating_state(adev, flags);
1617 static int soc15_common_set_powergating_state(void *handle,
1618 enum amd_powergating_state state)
1624 const struct amd_ip_funcs soc15_common_ip_funcs = {
1625 .name = "soc15_common",
1626 .early_init = soc15_common_early_init,
1627 .late_init = soc15_common_late_init,
1628 .sw_init = soc15_common_sw_init,
1629 .sw_fini = soc15_common_sw_fini,
1630 .hw_init = soc15_common_hw_init,
1631 .hw_fini = soc15_common_hw_fini,
1632 .suspend = soc15_common_suspend,
1633 .resume = soc15_common_resume,
1634 .is_idle = soc15_common_is_idle,
1635 .wait_for_idle = soc15_common_wait_for_idle,
1636 .soft_reset = soc15_common_soft_reset,
1637 .set_clockgating_state = soc15_common_set_clockgating_state,
1638 .set_powergating_state = soc15_common_set_powergating_state,
1639 .get_clockgating_state= soc15_common_get_clockgating_state,