#define mmGCR_GENERAL_CNTL_Sienna_Cichlid 0x1580
#define mmGCR_GENERAL_CNTL_Sienna_Cichlid_BASE_IDX 0
+#define mmSPI_CONFIG_CNTL_1_Vangogh 0x2441
+#define mmSPI_CONFIG_CNTL_1_Vangogh_BASE_IDX 1
+#define mmVGT_TF_MEMORY_BASE_HI_Vangogh 0x2261
+#define mmVGT_TF_MEMORY_BASE_HI_Vangogh_BASE_IDX 1
+#define mmVGT_HS_OFFCHIP_PARAM_Vangogh 0x224f
+#define mmVGT_HS_OFFCHIP_PARAM_Vangogh_BASE_IDX 1
+#define mmVGT_TF_RING_SIZE_Vangogh 0x224e
+#define mmVGT_TF_RING_SIZE_Vangogh_BASE_IDX 1
+#define mmVGT_GSVS_RING_SIZE_Vangogh 0x2241
+#define mmVGT_GSVS_RING_SIZE_Vangogh_BASE_IDX 1
+#define mmVGT_TF_MEMORY_BASE_Vangogh 0x2250
+#define mmVGT_TF_MEMORY_BASE_Vangogh_BASE_IDX 1
+#define mmVGT_ESGS_RING_SIZE_Vangogh 0x2240
+#define mmVGT_ESGS_RING_SIZE_Vangogh_BASE_IDX 1
+#define mmSPI_CONFIG_CNTL_Vangogh 0x2440
+#define mmSPI_CONFIG_CNTL_Vangogh_BASE_IDX 1
+
#define mmCP_HYP_PFP_UCODE_ADDR 0x5814
#define mmCP_HYP_PFP_UCODE_ADDR_BASE_IDX 1
#define mmCP_HYP_PFP_UCODE_DATA 0x5815
MODULE_FIRMWARE("amdgpu/navy_flounder_mec2.bin");
MODULE_FIRMWARE("amdgpu/navy_flounder_rlc.bin");
+MODULE_FIRMWARE("amdgpu/vangogh_ce.bin");
+MODULE_FIRMWARE("amdgpu/vangogh_pfp.bin");
+MODULE_FIRMWARE("amdgpu/vangogh_me.bin");
+MODULE_FIRMWARE("amdgpu/vangogh_mec.bin");
+MODULE_FIRMWARE("amdgpu/vangogh_mec2.bin");
+MODULE_FIRMWARE("amdgpu/vangogh_rlc.bin");
+
+MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_ce.bin");
+MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_pfp.bin");
+MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_me.bin");
+MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_mec.bin");
+MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_mec2.bin");
+MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_rlc.bin");
+
static const struct soc15_reg_golden golden_settings_gc_10_1[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
};
+static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] =
+{
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff7f0fff, 0x7e000100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0x000000ff, 0x000000e4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000142),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x000000e4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CM_CTRL1, 0xff8fff0f, 0x580f1008),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xf7ffffff, 0x00f80988),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_CL_ENHANCE, 0xf17fffff, 0x01200007),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0xffffffbf, 0x00000020),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1_Vangogh, 0xffffffff, 0x00070103),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQG_CONFIG, 0x000017ff, 0x00001000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00400000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
+};
+
#define DEFAULT_SH_MEM_CONFIG \
((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
(SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
adev->gfx.kiq.pmf = &gfx_v10_0_kiq_pm4_funcs;
}
+static void gfx_v10_0_init_spm_golden_registers(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_NAVI10:
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_rlc_spm_10_0_nv10,
+ (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_0_nv10));
+ break;
+ case CHIP_NAVI14:
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_rlc_spm_10_1_nv14,
+ (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_nv14));
+ break;
+ case CHIP_NAVI12:
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_rlc_spm_10_1_2_nv12,
+ (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_2_nv12));
+ break;
+ default:
+ break;
+ }
+}
+
static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
soc15_program_register_sequence(adev,
golden_settings_gc_10_0_nv10,
(const u32)ARRAY_SIZE(golden_settings_gc_10_0_nv10));
- soc15_program_register_sequence(adev,
- golden_settings_gc_rlc_spm_10_0_nv10,
- (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_0_nv10));
break;
case CHIP_NAVI14:
soc15_program_register_sequence(adev,
soc15_program_register_sequence(adev,
golden_settings_gc_10_1_nv14,
(const u32)ARRAY_SIZE(golden_settings_gc_10_1_nv14));
- soc15_program_register_sequence(adev,
- golden_settings_gc_rlc_spm_10_1_nv14,
- (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_nv14));
break;
case CHIP_NAVI12:
soc15_program_register_sequence(adev,
soc15_program_register_sequence(adev,
golden_settings_gc_10_1_2_nv12,
(const u32)ARRAY_SIZE(golden_settings_gc_10_1_2_nv12));
- soc15_program_register_sequence(adev,
- golden_settings_gc_rlc_spm_10_1_2_nv12,
- (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_2_nv12));
break;
case CHIP_SIENNA_CICHLID:
soc15_program_register_sequence(adev,
golden_settings_gc_10_3_2,
(const u32)ARRAY_SIZE(golden_settings_gc_10_3_2));
break;
-
+ case CHIP_VANGOGH:
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_10_3_vangogh,
+ (const u32)ARRAY_SIZE(golden_settings_gc_10_3_vangogh));
+ break;
default:
break;
}
+ gfx_v10_0_init_spm_golden_registers(adev);
}
static void gfx_v10_0_scratch_init(struct amdgpu_device *adev)
break;
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
+ case CHIP_VANGOGH:
adev->gfx.cp_fw_write_wait = true;
break;
default:
break;
}
- if (adev->gfx.cp_fw_write_wait == false)
+ if (!adev->gfx.cp_fw_write_wait)
DRM_WARN_ONCE("CP firmware version too old, please update!");
}
if (!gfx_v10_0_navi10_gfxoff_should_enable(adev))
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
break;
+ case CHIP_NAVY_FLOUNDER:
+ case CHIP_VANGOGH:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ break;
default:
break;
}
case CHIP_NAVY_FLOUNDER:
chip_name = "navy_flounder";
break;
+ case CHIP_VANGOGH:
+ chip_name = "vangogh";
+ break;
+ case CHIP_DIMGREY_CAVEFISH:
+ chip_name = "dimgrey_cavefish";
+ break;
default:
BUG();
}
amdgpu_gfx_compute_queue_acquire(adev);
mec_hpd_size = adev->gfx.num_compute_rings * GFX10_MEC_HPD_SIZE;
- r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
- &adev->gfx.mec.hpd_eop_obj,
- &adev->gfx.mec.hpd_eop_gpu_addr,
- (void **)&hpd);
- if (r) {
- dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
- gfx_v10_0_mec_fini(adev);
- return r;
- }
+ if (mec_hpd_size) {
+ r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->gfx.mec.hpd_eop_obj,
+ &adev->gfx.mec.hpd_eop_gpu_addr,
+ (void **)&hpd);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
+ gfx_v10_0_mec_fini(adev);
+ return r;
+ }
- memset(hpd, 0, mec_hpd_size);
+ memset(hpd, 0, mec_hpd_size);
- amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
- amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
+ amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
+ amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
+ }
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
.read_wave_sgprs = &gfx_v10_0_read_wave_sgprs,
.read_wave_vgprs = &gfx_v10_0_read_wave_vgprs,
.select_me_pipe_q = &gfx_v10_0_select_me_pipe_q,
+ .init_spm_golden = &gfx_v10_0_init_spm_golden_registers,
};
static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev)
break;
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
+ case CHIP_VANGOGH:
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
break;
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
+ case CHIP_VANGOGH:
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
adev->gfx.me.num_queue_per_pipe = 1;
/* for ASICs that integrates GFX v10.3
* pa_sc_tile_steering_override should be set to 0 */
if (adev->asic_type == CHIP_SIENNA_CICHLID ||
- adev->asic_type == CHIP_NAVY_FLOUNDER)
+ adev->asic_type == CHIP_NAVY_FLOUNDER ||
+ adev->asic_type == CHIP_VANGOGH)
return 0;
/* init num_sc */
switch (adev->asic_type) {
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
+ case CHIP_VANGOGH:
tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
DOORBELL_RANGE_LOWER_Sienna_Cichlid, ring->doorbell_index);
WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
switch (adev->asic_type) {
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
+ case CHIP_VANGOGH:
WREG32_SOC15(GC, 0, mmCP_MEC_CNTL_Sienna_Cichlid, 0);
break;
default:
switch (adev->asic_type) {
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
+ case CHIP_VANGOGH:
WREG32_SOC15(GC, 0, mmCP_MEC_CNTL_Sienna_Cichlid,
(CP_MEC_CNTL__MEC_ME1_HALT_MASK |
CP_MEC_CNTL__MEC_ME2_HALT_MASK));
switch (adev->asic_type) {
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
+ case CHIP_VANGOGH:
tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
struct v10_gfx_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.gfx_ring[0];
- if (!adev->in_gpu_reset && !adev->in_suspend) {
+ if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex);
nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.me.mqd_backup[mqd_idx])
memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
- } else if (adev->in_gpu_reset) {
+ } else if (amdgpu_in_reset(adev)) {
/* reset mqd with the backup copy */
if (adev->gfx.me.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
struct v10_compute_mqd *mqd = ring->mqd_ptr;
int j;
+ /* inactivate the queue */
+ if (amdgpu_sriov_vf(adev))
+ WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, 0);
+
/* disable wptr polling */
WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
gfx_v10_0_kiq_setting(ring);
- if (adev->in_gpu_reset) { /* for GPU_RESET case */
+ if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
struct v10_compute_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
- if (!adev->in_gpu_reset && !adev->in_suspend) {
+ if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex);
nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
- } else if (adev->in_gpu_reset) { /* for GPU_RESET case */
+ } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
return false;
}
break;
+ case CHIP_VANGOGH:
+ return true;
default:
data = RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE);
WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, 0);
switch (adev->asic_type) {
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
+ case CHIP_VANGOGH:
/* mmVGT_TF_RING_SIZE_UMD -> mmVGT_TF_RING_SIZE */
data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE_UMD) <<
GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+
+ if (!adev->in_pci_err_recovery) {
#ifndef BRING_UP_DEBUG
- if (amdgpu_async_gfx_ring) {
- r = gfx_v10_0_kiq_disable_kgq(adev);
- if (r)
- DRM_ERROR("KGQ disable failed\n");
- }
+ if (amdgpu_async_gfx_ring) {
+ r = gfx_v10_0_kiq_disable_kgq(adev);
+ if (r)
+ DRM_ERROR("KGQ disable failed\n");
+ }
#endif
- if (amdgpu_gfx_disable_kcq(adev))
- DRM_ERROR("KCQ disable failed\n");
+ if (amdgpu_gfx_disable_kcq(adev))
+ DRM_ERROR("KCQ disable failed\n");
+ }
+
if (amdgpu_sriov_vf(adev)) {
gfx_v10_0_cp_gfx_enable(adev, false);
/* Program KIQ position of RLC_CP_SCHEDULERS during destroy */
GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__DB_BUSY_MASK |
GRBM_STATUS__CB_BUSY_MASK | GRBM_STATUS__GDS_BUSY_MASK |
- GRBM_STATUS__SPI_BUSY_MASK | GRBM_STATUS__GE_BUSY_NO_DMA_MASK
- | GRBM_STATUS__BCI_BUSY_MASK)) {
+ GRBM_STATUS__SPI_BUSY_MASK | GRBM_STATUS__GE_BUSY_NO_DMA_MASK)) {
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
GRBM_SOFT_RESET, SOFT_RESET_CP,
1);
switch (adev->asic_type) {
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
+ case CHIP_VANGOGH:
if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY_Sienna_Cichlid))
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
GRBM_SOFT_RESET,
break;
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
+ case CHIP_VANGOGH:
adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_Sienna_Cichlid;
break;
default:
break;
}
- adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
+ adev->gfx.num_compute_rings = amdgpu_num_kcq;
gfx_v10_0_set_kiq_pm4_funcs(adev);
gfx_v10_0_set_ring_funcs(adev);
switch (adev->asic_type) {
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
+ case CHIP_VANGOGH:
WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE_Sienna_Cichlid, data);
/* wait for RLC_SAFE_MODE */
switch (adev->asic_type) {
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
+ case CHIP_VANGOGH:
WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE_Sienna_Cichlid, data);
break;
default:
(AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_GFX_CGCG |
- AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS))
gfx_v10_0_enable_gui_idle_interrupt(adev, enable);
return gfx_v10_0_check_rlcg_range(adev, offset, NULL, 0);
}
+static void gfx_v10_cntl_power_gating(struct amdgpu_device *adev, bool enable)
+{
+ u32 data = RREG32_SOC15(GC, 0, mmRLC_PG_CNTL);
+
+ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
+ data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
+ else
+ data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
+
+ WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, data);
+}
+
+static void gfx_v10_cntl_pg(struct amdgpu_device *adev, bool enable)
+{
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
+
+ gfx_v10_cntl_power_gating(adev, enable);
+
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
+}
+
static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = {
.is_rlc_enabled = gfx_v10_0_is_rlc_enabled,
.set_safe_mode = gfx_v10_0_set_safe_mode,
case CHIP_NAVY_FLOUNDER:
amdgpu_gfx_off_ctrl(adev, enable);
break;
+ case CHIP_VANGOGH:
+ gfx_v10_cntl_pg(adev, enable);
+ break;
default:
break;
}
case CHIP_NAVI12:
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
+ case CHIP_VANGOGH:
+ case CHIP_DIMGREY_CAVEFISH:
gfx_v10_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
case CHIP_NAVI14:
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
+ case CHIP_VANGOGH:
adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs;
break;
case CHIP_NAVI12: