#define MCA_BANK_IPID(_ip, _hwid, _type) \
[AMDGPU_MCA_IP_##_ip] = { .hwid = _hwid, .mcatype = _type, }
-static inline bool smu_v13_0_6_is_unified_metrics(struct smu_context *smu)
-{
- return (smu->adev->flags & AMD_IS_APU) &&
- smu->smc_fw_version <= 0x4556900;
-}
-
-static inline bool smu_v13_0_6_is_other_end_count_available(struct smu_context *smu)
-{
- switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) {
- case IP_VERSION(13, 0, 6):
- return smu->smc_fw_version >= 0x557600;
- case IP_VERSION(13, 0, 14):
- return smu->smc_fw_version >= 0x05550E00;
- default:
- return false;
- }
-}
-
-static inline bool smu_v13_0_6_is_blw_host_limit_available(struct smu_context *smu)
-{
- if (smu->adev->flags & AMD_IS_APU)
- return smu->smc_fw_version >= 0x04556F00;
+enum smu_v13_0_6_caps {
+ SMU_13_0_6_CAPS_DPM,
+ SMU_13_0_6_CAPS_UNI_METRICS,
+ SMU_13_0_6_CAPS_DPM_POLICY,
+ SMU_13_0_6_CAPS_OTHER_END_METRICS,
+ SMU_13_0_6_CAPS_SET_UCLK_MAX,
+ SMU_13_0_6_CAPS_PCIE_METRICS,
+ SMU_13_0_6_CAPS_HST_LIMIT_METRICS,
+ SMU_13_0_6_CAPS_MCA_DEBUG_MODE,
+ SMU_13_0_6_CAPS_PER_INST_METRICS,
+ SMU_13_0_6_CAPS_CTF_LIMIT,
+ SMU_13_0_6_CAPS_RMA_MSG,
+ SMU_13_0_6_CAPS_ACA_SYND,
+ SMU_13_0_6_CAPS_SDMA_RESET,
+ SMU_13_0_6_CAPS_ALL,
+};
- switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) {
- case IP_VERSION(13, 0, 6):
- return smu->smc_fw_version >= 0x557900;
- case IP_VERSION(13, 0, 14):
- return smu->smc_fw_version >= 0x05551000;
- default:
- return false;
- }
-}
+#define SMU_CAPS_MASK(x) (ULL(1) << x)
+#define SMU_CAPS(x) SMU_CAPS_MASK(SMU_13_0_6_CAPS_##x)
struct mca_bank_ipid {
enum amdgpu_mca_ip ip;
uint32_t *freq_table;
};
+static void smu_v13_0_14_init_caps(struct smu_context *smu)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ uint64_t caps = SMU_CAPS(DPM) | SMU_CAPS(UNI_METRICS) |
+ SMU_CAPS(SET_UCLK_MAX) | SMU_CAPS(DPM_POLICY) |
+ SMU_CAPS(PCIE_METRICS) | SMU_CAPS(CTF_LIMIT) |
+ SMU_CAPS(MCA_DEBUG_MODE) | SMU_CAPS(RMA_MSG) |
+ SMU_CAPS(ACA_SYND);
+ uint32_t fw_ver = smu->smc_fw_version;
+
+ if (fw_ver >= 0x05550E00)
+ caps |= SMU_CAPS(OTHER_END_METRICS);
+ if (fw_ver >= 0x05551000)
+ caps |= SMU_CAPS(HST_LIMIT_METRICS);
+ if (fw_ver >= 0x05550B00)
+ caps |= SMU_CAPS(PER_INST_METRICS);
+ if (fw_ver > 0x05550f00)
+ caps |= SMU_CAPS(SDMA_RESET);
+
+ dpm_context->caps = caps;
+}
+
+static void smu_v13_0_6_init_caps(struct smu_context *smu)
+{
+ uint64_t caps = SMU_CAPS(DPM) | SMU_CAPS(UNI_METRICS) |
+ SMU_CAPS(SET_UCLK_MAX) | SMU_CAPS(DPM_POLICY) |
+ SMU_CAPS(PCIE_METRICS) | SMU_CAPS(MCA_DEBUG_MODE) |
+ SMU_CAPS(CTF_LIMIT) | SMU_CAPS(RMA_MSG) |
+ SMU_CAPS(ACA_SYND);
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t fw_ver = smu->smc_fw_version;
+ uint32_t pgm = (fw_ver >> 24) & 0xFF;
+
+ if (fw_ver < 0x552F00)
+ caps &= ~SMU_CAPS(DPM);
+
+ if (adev->flags & AMD_IS_APU) {
+ caps &= ~SMU_CAPS(PCIE_METRICS);
+ caps &= ~SMU_CAPS(SET_UCLK_MAX);
+ caps &= ~SMU_CAPS(DPM_POLICY);
+ caps &= ~SMU_CAPS(RMA_MSG);
+ caps &= ~SMU_CAPS(ACA_SYND);
+
+ if (fw_ver <= 0x4556900)
+ caps &= ~SMU_CAPS(UNI_METRICS);
+
+ if (fw_ver >= 0x04556F00)
+ caps |= SMU_CAPS(HST_LIMIT_METRICS);
+ if (fw_ver >= 0x04556A00)
+ caps |= SMU_CAPS(PER_INST_METRICS);
+ if (fw_ver < 0x554500)
+ caps &= ~SMU_CAPS(CTF_LIMIT);
+ } else {
+ if (fw_ver >= 0x557600)
+ caps |= SMU_CAPS(OTHER_END_METRICS);
+ if (fw_ver < 0x00556000)
+ caps &= ~SMU_CAPS(DPM_POLICY);
+ if (amdgpu_sriov_vf(adev) && (fw_ver < 0x556600))
+ caps &= ~SMU_CAPS(SET_UCLK_MAX);
+ if (fw_ver < 0x556300)
+ caps &= ~SMU_CAPS(PCIE_METRICS);
+ if (fw_ver < 0x554800)
+ caps &= ~SMU_CAPS(MCA_DEBUG_MODE);
+ if (fw_ver >= 0x556F00)
+ caps |= SMU_CAPS(PER_INST_METRICS);
+ if (fw_ver < 0x554500)
+ caps &= ~SMU_CAPS(CTF_LIMIT);
+ if (fw_ver < 0x00555a00)
+ caps &= ~SMU_CAPS(RMA_MSG);
+ if (fw_ver < 0x00555600)
+ caps &= ~SMU_CAPS(ACA_SYND);
+ if (pgm == 0 && fw_ver >= 0x557900)
+ caps |= SMU_CAPS(HST_LIMIT_METRICS);
+ }
+ if (((pgm == 7) && (fw_ver > 0x07550700)) ||
+ ((pgm == 0) && (fw_ver > 0x00557700)) ||
+ ((pgm == 4) && (fw_ver > 0x4556e6c)))
+ caps |= SMU_CAPS(SDMA_RESET);
+
+ dpm_context->caps = caps;
+}
+
+static inline bool smu_v13_0_6_caps_supported(struct smu_context *smu,
+ enum smu_v13_0_6_caps caps)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+
+ return (dpm_context->caps & SMU_CAPS_MASK(caps)) == SMU_CAPS_MASK(caps);
+}
+
+static void smu_v13_0_x_init_caps(struct smu_context *smu)
+{
+ switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) {
+ case IP_VERSION(13, 0, 14):
+ return smu_v13_0_14_init_caps(smu);
+ default:
+ return smu_v13_0_6_init_caps(smu);
+ }
+}
+
+static int smu_v13_0_6_check_fw_version(struct smu_context *smu)
+{
+ int r;
+
+ r = smu_v13_0_check_fw_version(smu);
+ /* Initialize caps flags once fw version is fetched */
+ if (!r)
+ smu_v13_0_x_init_caps(smu);
+
+ return r;
+}
+
static int smu_v13_0_6_init_microcode(struct smu_context *smu)
{
const struct smc_firmware_header_v2_1 *v2_1;
MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
- bool flag = smu_v13_0_6_is_unified_metrics(smu);
+ bool flag = !smu_v13_0_6_caps_supported(smu, SMU_CAPS(UNI_METRICS));
int ret, i, retry = 100;
uint32_t table_version;
smu_v13_0_6_setup_driver_pptable(smu);
/* DPM policy not supported in older firmwares */
- if (!(smu->adev->flags & AMD_IS_APU) &&
- (smu->smc_fw_version < 0x00556000)) {
+ if (!smu_v13_0_6_caps_supported(smu, SMU_CAPS(DPM_POLICY))) {
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
smu_dpm->dpm_policies->policy_mask &=
struct smu_table_context *smu_table = &smu->smu_table;
MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table;
MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table;
- bool flag = smu_v13_0_6_is_unified_metrics(smu);
+ bool flag = !smu_v13_0_6_caps_supported(smu, SMU_CAPS(UNI_METRICS));
struct amdgpu_device *adev = smu->adev;
int ret = 0;
int xcc_id;
switch (member) {
case METRICS_CURR_GFXCLK:
case METRICS_AVERAGE_GFXCLK:
- if (smu->smc_fw_version >= 0x552F00) {
+ if (smu_v13_0_6_caps_supported(smu, SMU_CAPS(DPM))) {
xcc_id = GET_INST(GC, 0);
*value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, flag)[xcc_id]);
} else {
static int smu_v13_0_6_mca_set_debug_mode(struct smu_context *smu, bool enable)
{
/* NOTE: this ClearMcaOnRead message is only supported for smu version 85.72.0 or higher */
- if (smu->smc_fw_version < 0x554800)
+ if (!smu_v13_0_6_caps_supported(smu, SMU_CAPS(MCA_DEBUG_MODE)))
return 0;
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ClearMcaOnRead,
if (max == pstate_table->uclk_pstate.curr.max)
return 0;
/* For VF, only allowed in FW versions 85.102 or greater */
- if (amdgpu_sriov_vf(adev) &&
- ((smu->smc_fw_version < 0x556600) ||
- (adev->flags & AMD_IS_APU)))
+ if (!smu_v13_0_6_caps_supported(smu,
+ SMU_CAPS(SET_UCLK_MAX)))
return -EOPNOTSUPP;
/* Only max clock limiting is allowed for UCLK */
ret = smu_v13_0_set_soft_freq_limited_range(
ret = smu_cmn_get_enabled_mask(smu, feature_mask);
- if (ret == -EIO && smu->smc_fw_version < 0x552F00) {
+ if (ret == -EIO && !smu_v13_0_6_caps_supported(smu, SMU_CAPS(DPM))) {
*feature_mask = 0;
ret = 0;
}
static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
{
- bool per_inst, smu_13_0_6_per_inst, smu_13_0_14_per_inst, apu_per_inst;
struct smu_table_context *smu_table = &smu->smu_table;
struct gpu_metrics_v1_7 *gpu_metrics =
(struct gpu_metrics_v1_7 *)smu_table->gpu_metrics_table;
- bool flag = smu_v13_0_6_is_unified_metrics(smu);
+ bool flag = !smu_v13_0_6_caps_supported(smu, SMU_CAPS(UNI_METRICS));
int ret = 0, xcc_id, inst, i, j, k, idx;
struct amdgpu_device *adev = smu->adev;
MetricsTableX_t *metrics_x;
struct amdgpu_xcp *xcp;
u16 link_width_level;
u32 inst_mask;
+ bool per_inst;
metrics_x = kzalloc(max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)), GFP_KERNEL);
ret = smu_v13_0_6_get_metrics_table(smu, metrics_x, true);
* table for both pf & one vf for smu version 85.99.0 or higher else report only
* for pf from registers
*/
- if (smu->smc_fw_version >= 0x556300) {
+ if (smu_v13_0_6_caps_supported(smu, SMU_CAPS(PCIE_METRICS))) {
gpu_metrics->pcie_link_width = metrics_x->PCIeLinkWidth;
gpu_metrics->pcie_link_speed =
pcie_gen_to_speed(metrics_x->PCIeLinkSpeed);
metrics_x->PCIeNAKSentCountAcc;
gpu_metrics->pcie_nak_rcvd_count_acc =
metrics_x->PCIeNAKReceivedCountAcc;
- if (smu_v13_0_6_is_other_end_count_available(smu))
+ if (smu_v13_0_6_caps_supported(smu,
+ SMU_CAPS(OTHER_END_METRICS)))
gpu_metrics->pcie_lc_perf_other_end_recovery =
metrics_x->PCIeOtherEndRecoveryAcc;
gpu_metrics->num_partition = adev->xcp_mgr->num_xcps;
- apu_per_inst = (adev->flags & AMD_IS_APU) && (smu->smc_fw_version >= 0x04556A00);
- smu_13_0_6_per_inst = !(adev->flags & AMD_IS_APU) &&
- (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)
- == IP_VERSION(13, 0, 6)) &&
- (smu->smc_fw_version >= 0x556F00);
- smu_13_0_14_per_inst = !(adev->flags & AMD_IS_APU) &&
- (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)
- == IP_VERSION(13, 0, 14)) &&
- (smu->smc_fw_version >= 0x05550B00);
-
- per_inst = apu_per_inst || smu_13_0_6_per_inst || smu_13_0_14_per_inst;
+ per_inst = smu_v13_0_6_caps_supported(smu, SMU_CAPS(PER_INST_METRICS));
for_each_xcp(adev->xcp_mgr, xcp, i) {
amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] =
SMUQ10_ROUND(metrics_x->GfxBusyAcc[inst]);
- if (smu_v13_0_6_is_blw_host_limit_available(smu))
+ if (smu_v13_0_6_caps_supported(
+ smu, SMU_CAPS(HST_LIMIT_METRICS)))
gpu_metrics->xcp_stats[i].gfx_below_host_limit_acc[idx] =
SMUQ10_ROUND(metrics_x->GfxclkBelowHostLimitAcc
[inst]);
return -EINVAL;
/*Check smu version, GetCtfLimit message only supported for smu version 85.69 or higher */
- if (smu->smc_fw_version < 0x554500)
+ if (!smu_v13_0_6_caps_supported(smu, SMU_CAPS(CTF_LIMIT)))
return 0;
/* Get SOC Max operating temperature */
static int smu_v13_0_6_send_rma_reason(struct smu_context *smu)
{
- struct amdgpu_device *adev = smu->adev;
int ret;
/* NOTE: the message is only valid on dGPU with pmfw 85.90.0 and above */
- if ((adev->flags & AMD_IS_APU) || smu->smc_fw_version < 0x00555a00)
+ if (!smu_v13_0_6_caps_supported(smu, SMU_CAPS(RMA_MSG)))
return 0;
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RmaDueToBadPageThreshold, NULL);
smu_program = (smu->smc_fw_version >> 24) & 0xff;
switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 6):
- if (((smu_program == 7) && (smu->smc_fw_version > 0x07550700)) ||
- ((smu_program == 0) && (smu->smc_fw_version > 0x00557700)))
+ if ((smu_program == 7 || smu_program == 0) &&
+ smu_v13_0_6_caps_supported(smu, SMU_CAPS(SDMA_RESET)))
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_ResetSDMA, inst_mask, NULL);
else if ((smu_program == 4) &&
- (smu->smc_fw_version > 0x4556e6c))
+ smu_v13_0_6_caps_supported(smu, SMU_CAPS(SDMA_RESET)))
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_ResetSDMA2, inst_mask, NULL);
break;
case IP_VERSION(13, 0, 14):
- if ((smu_program == 5) &&
- (smu->smc_fw_version > 0x05550f00))
+ if (smu_v13_0_6_caps_supported(smu, SMU_CAPS(SDMA_RESET)))
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_ResetSDMA2, inst_mask, NULL);
break;
if (instlo != 0x03b30400)
return false;
- if (!(adev->flags & AMD_IS_APU) && smu->smc_fw_version >= 0x00555600) {
+ if (smu_v13_0_6_caps_supported(smu, SMU_CAPS(ACA_SYND))) {
errcode = MCA_REG__SYND__ERRORINFORMATION(entry->regs[MCA_REG_IDX_SYND]);
errcode &= 0xff;
} else {
static int aca_smu_parse_error_code(struct amdgpu_device *adev, struct aca_bank *bank)
{
+ struct smu_context *smu = adev->powerplay.pp_handle;
int error_code;
- if (!(adev->flags & AMD_IS_APU) && adev->pm.fw_version >= 0x00555600)
+ if (smu_v13_0_6_caps_supported(smu, SMU_CAPS(ACA_SYND)))
error_code = ACA_REG__SYND__ERRORINFORMATION(bank->regs[ACA_REG_IDX_SYND]);
else
error_code = ACA_REG__STATUS__ERRORCODE(bank->regs[ACA_REG_IDX_STATUS]);
.fini_power = smu_v13_0_fini_power,
.check_fw_status = smu_v13_0_6_check_fw_status,
/* pptable related */
- .check_fw_version = smu_v13_0_check_fw_version,
+ .check_fw_version = smu_v13_0_6_check_fw_version,
.set_driver_table_location = smu_v13_0_set_driver_table_location,
.set_tool_table_location = smu_v13_0_set_tool_table_location,
.notify_memory_pool_location = smu_v13_0_notify_memory_pool_location,