2 * Copyright 2020 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #define SWSMU_CODE_LAYER_L4
26 #include "amdgpu_smu.h"
28 #include "soc15_common.h"
31 * DO NOT use these for err/warn/info/debug messages.
32 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
33 * They are more MGPU friendly.
41 * Although these are defined in each ASIC's specific header file.
42 * They share the same definitions and values. That makes common
43 * APIs for SMC messages issuing for all ASICs possible.
45 #define mmMP1_SMN_C2PMSG_66 0x0282
46 #define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
48 #define mmMP1_SMN_C2PMSG_82 0x0292
49 #define mmMP1_SMN_C2PMSG_82_BASE_IDX 0
51 #define mmMP1_SMN_C2PMSG_90 0x029a
52 #define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
54 #define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
56 #undef __SMU_DUMMY_MAP
57 #define __SMU_DUMMY_MAP(type) #type
58 static const char* __smu_message_names[] = {
62 static const char *smu_get_message_name(struct smu_context *smu,
63 enum smu_message_type type)
65 if (type < 0 || type >= SMU_MSG_MAX_COUNT)
66 return "unknown smu message";
68 return __smu_message_names[type];
71 static void smu_cmn_send_msg_without_waiting(struct smu_context *smu,
74 struct amdgpu_device *adev = smu->adev;
76 WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
79 static void smu_cmn_read_arg(struct smu_context *smu,
82 struct amdgpu_device *adev = smu->adev;
84 *arg = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82);
87 static int smu_cmn_wait_for_response(struct smu_context *smu)
89 struct amdgpu_device *adev = smu->adev;
90 uint32_t cur_value, i, timeout = adev->usec_timeout * 10;
92 for (i = 0; i < timeout; i++) {
93 cur_value = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90);
94 if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
95 return cur_value == 0x1 ? 0 : -EIO;
100 /* timeout means wrong logic */
104 return RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
107 int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
108 enum smu_message_type msg,
112 struct amdgpu_device *adev = smu->adev;
113 int ret = 0, index = 0;
115 if (smu->adev->in_pci_err_recovery)
118 index = smu_cmn_to_asic_specific_index(smu,
119 CMN2ASIC_MAPPING_MSG,
122 return index == -EACCES ? 0 : index;
124 mutex_lock(&smu->message_lock);
125 ret = smu_cmn_wait_for_response(smu);
127 dev_err(adev->dev, "Msg issuing pre-check failed and "
128 "SMU may be not in the right state!\n");
132 WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
134 WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
136 smu_cmn_send_msg_without_waiting(smu, (uint16_t)index);
138 ret = smu_cmn_wait_for_response(smu);
140 dev_err(adev->dev, "failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
141 smu_get_message_name(smu, msg), index, param, ret);
146 smu_cmn_read_arg(smu, read_arg);
149 mutex_unlock(&smu->message_lock);
153 int smu_cmn_send_smc_msg(struct smu_context *smu,
154 enum smu_message_type msg,
157 return smu_cmn_send_smc_msg_with_param(smu,
163 int smu_cmn_to_asic_specific_index(struct smu_context *smu,
164 enum smu_cmn2asic_mapping_type type,
167 struct cmn2asic_msg_mapping msg_mapping;
168 struct cmn2asic_mapping mapping;
171 case CMN2ASIC_MAPPING_MSG:
172 if (index >= SMU_MSG_MAX_COUNT ||
176 msg_mapping = smu->message_map[index];
177 if (!msg_mapping.valid_mapping)
180 if (amdgpu_sriov_vf(smu->adev) &&
181 !msg_mapping.valid_in_vf)
184 return msg_mapping.map_to;
186 case CMN2ASIC_MAPPING_CLK:
187 if (index >= SMU_CLK_COUNT ||
191 mapping = smu->clock_map[index];
192 if (!mapping.valid_mapping)
195 return mapping.map_to;
197 case CMN2ASIC_MAPPING_FEATURE:
198 if (index >= SMU_FEATURE_COUNT ||
202 mapping = smu->feature_map[index];
203 if (!mapping.valid_mapping)
206 return mapping.map_to;
208 case CMN2ASIC_MAPPING_TABLE:
209 if (index >= SMU_TABLE_COUNT ||
213 mapping = smu->table_map[index];
214 if (!mapping.valid_mapping)
217 return mapping.map_to;
219 case CMN2ASIC_MAPPING_PWR:
220 if (index >= SMU_POWER_SOURCE_COUNT ||
224 mapping = smu->pwr_src_map[index];
225 if (!mapping.valid_mapping)
228 return mapping.map_to;
230 case CMN2ASIC_MAPPING_WORKLOAD:
231 if (index > PP_SMC_POWER_PROFILE_CUSTOM ||
235 mapping = smu->workload_map[index];
236 if (!mapping.valid_mapping)
239 return mapping.map_to;
246 int smu_cmn_feature_is_supported(struct smu_context *smu,
247 enum smu_feature_mask mask)
249 struct smu_feature *feature = &smu->smu_feature;
253 feature_id = smu_cmn_to_asic_specific_index(smu,
254 CMN2ASIC_MAPPING_FEATURE,
259 WARN_ON(feature_id > feature->feature_num);
261 mutex_lock(&feature->mutex);
262 ret = test_bit(feature_id, feature->supported);
263 mutex_unlock(&feature->mutex);
268 int smu_cmn_feature_is_enabled(struct smu_context *smu,
269 enum smu_feature_mask mask)
271 struct smu_feature *feature = &smu->smu_feature;
277 feature_id = smu_cmn_to_asic_specific_index(smu,
278 CMN2ASIC_MAPPING_FEATURE,
283 WARN_ON(feature_id > feature->feature_num);
285 mutex_lock(&feature->mutex);
286 ret = test_bit(feature_id, feature->enabled);
287 mutex_unlock(&feature->mutex);
292 bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
293 enum smu_clk_type clk_type)
295 enum smu_feature_mask feature_id = 0;
300 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
304 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
307 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
313 if (!smu_cmn_feature_is_enabled(smu, feature_id))
319 int smu_cmn_get_enabled_mask(struct smu_context *smu,
320 uint32_t *feature_mask,
323 uint32_t feature_mask_high = 0, feature_mask_low = 0;
324 struct smu_feature *feature = &smu->smu_feature;
327 if (!feature_mask || num < 2)
330 if (bitmap_empty(feature->enabled, feature->feature_num)) {
331 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
335 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
339 feature_mask[0] = feature_mask_low;
340 feature_mask[1] = feature_mask_high;
342 bitmap_copy((unsigned long *)feature_mask, feature->enabled,
343 feature->feature_num);
349 int smu_cmn_feature_update_enable_state(struct smu_context *smu,
350 uint64_t feature_mask,
353 struct smu_feature *feature = &smu->smu_feature;
357 ret = smu_cmn_send_smc_msg_with_param(smu,
358 SMU_MSG_EnableSmuFeaturesLow,
359 lower_32_bits(feature_mask),
363 ret = smu_cmn_send_smc_msg_with_param(smu,
364 SMU_MSG_EnableSmuFeaturesHigh,
365 upper_32_bits(feature_mask),
370 ret = smu_cmn_send_smc_msg_with_param(smu,
371 SMU_MSG_DisableSmuFeaturesLow,
372 lower_32_bits(feature_mask),
376 ret = smu_cmn_send_smc_msg_with_param(smu,
377 SMU_MSG_DisableSmuFeaturesHigh,
378 upper_32_bits(feature_mask),
384 mutex_lock(&feature->mutex);
386 bitmap_or(feature->enabled, feature->enabled,
387 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
389 bitmap_andnot(feature->enabled, feature->enabled,
390 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
391 mutex_unlock(&feature->mutex);
396 int smu_cmn_feature_set_enabled(struct smu_context *smu,
397 enum smu_feature_mask mask,
400 struct smu_feature *feature = &smu->smu_feature;
403 feature_id = smu_cmn_to_asic_specific_index(smu,
404 CMN2ASIC_MAPPING_FEATURE,
409 WARN_ON(feature_id > feature->feature_num);
411 return smu_cmn_feature_update_enable_state(smu,
416 #undef __SMU_DUMMY_MAP
417 #define __SMU_DUMMY_MAP(fea) #fea
418 static const char* __smu_feature_names[] = {
422 static const char *smu_get_feature_name(struct smu_context *smu,
423 enum smu_feature_mask feature)
425 if (feature < 0 || feature >= SMU_FEATURE_COUNT)
426 return "unknown smu feature";
427 return __smu_feature_names[feature];
430 size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
433 uint32_t feature_mask[2] = { 0 };
434 int feature_index = 0;
436 int8_t sort_feature[SMU_FEATURE_COUNT];
440 ret = smu_cmn_get_enabled_mask(smu,
446 size = sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
447 feature_mask[1], feature_mask[0]);
449 memset(sort_feature, -1, sizeof(sort_feature));
451 for (i = 0; i < SMU_FEATURE_COUNT; i++) {
452 feature_index = smu_cmn_to_asic_specific_index(smu,
453 CMN2ASIC_MAPPING_FEATURE,
455 if (feature_index < 0)
458 sort_feature[feature_index] = i;
461 size += sprintf(buf + size, "%-2s. %-20s %-3s : %-s\n",
462 "No", "Feature", "Bit", "State");
464 for (i = 0; i < SMU_FEATURE_COUNT; i++) {
465 if (sort_feature[i] < 0)
468 size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
470 smu_get_feature_name(smu, sort_feature[i]),
472 !!smu_cmn_feature_is_enabled(smu, sort_feature[i]) ?
473 "enabled" : "disabled");
479 int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
483 uint32_t feature_mask[2] = { 0 };
484 uint64_t feature_2_enabled = 0;
485 uint64_t feature_2_disabled = 0;
486 uint64_t feature_enables = 0;
488 ret = smu_cmn_get_enabled_mask(smu,
494 feature_enables = ((uint64_t)feature_mask[1] << 32 |
495 (uint64_t)feature_mask[0]);
497 feature_2_enabled = ~feature_enables & new_mask;
498 feature_2_disabled = feature_enables & ~new_mask;
500 if (feature_2_enabled) {
501 ret = smu_cmn_feature_update_enable_state(smu,
507 if (feature_2_disabled) {
508 ret = smu_cmn_feature_update_enable_state(smu,
518 int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
519 enum smu_feature_mask mask)
521 uint64_t features_to_disable = U64_MAX;
522 int skipped_feature_id;
524 skipped_feature_id = smu_cmn_to_asic_specific_index(smu,
525 CMN2ASIC_MAPPING_FEATURE,
527 if (skipped_feature_id < 0)
530 features_to_disable &= ~(1ULL << skipped_feature_id);
532 return smu_cmn_feature_update_enable_state(smu,
537 int smu_cmn_get_smc_version(struct smu_context *smu,
538 uint32_t *if_version,
539 uint32_t *smu_version)
543 if (!if_version && !smu_version)
546 if (smu->smc_fw_if_version && smu->smc_fw_version)
549 *if_version = smu->smc_fw_if_version;
552 *smu_version = smu->smc_fw_version;
558 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
562 smu->smc_fw_if_version = *if_version;
566 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
570 smu->smc_fw_version = *smu_version;
576 int smu_cmn_update_table(struct smu_context *smu,
577 enum smu_table_id table_index,
582 struct smu_table_context *smu_table = &smu->smu_table;
583 struct amdgpu_device *adev = smu->adev;
584 struct smu_table *table = &smu_table->driver_table;
585 int table_id = smu_cmn_to_asic_specific_index(smu,
586 CMN2ASIC_MAPPING_TABLE,
590 if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
593 table_size = smu_table->tables[table_index].size;
596 memcpy(table->cpu_addr, table_data, table_size);
598 * Flush hdp cache: to guard the content seen by
599 * GPU is consitent with CPU.
601 amdgpu_asic_flush_hdp(adev, NULL);
604 ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ?
605 SMU_MSG_TransferTableDram2Smu :
606 SMU_MSG_TransferTableSmu2Dram,
607 table_id | ((argument & 0xFFFF) << 16),
613 amdgpu_asic_flush_hdp(adev, NULL);
614 memcpy(table_data, table->cpu_addr, table_size);
620 int smu_cmn_write_watermarks_table(struct smu_context *smu)
622 void *watermarks_table = smu->smu_table.watermarks_table;
624 if (!watermarks_table)
627 return smu_cmn_update_table(smu,
628 SMU_TABLE_WATERMARKS,
634 int smu_cmn_write_pptable(struct smu_context *smu)
636 void *pptable = smu->smu_table.driver_pptable;
638 return smu_cmn_update_table(smu,
645 int smu_cmn_get_metrics_table_locked(struct smu_context *smu,
649 struct smu_table_context *smu_table= &smu->smu_table;
650 uint32_t table_size =
651 smu_table->tables[SMU_TABLE_SMU_METRICS].size;
655 !smu_table->metrics_time ||
656 time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
657 ret = smu_cmn_update_table(smu,
658 SMU_TABLE_SMU_METRICS,
660 smu_table->metrics_table,
663 dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
666 smu_table->metrics_time = jiffies;
670 memcpy(metrics_table, smu_table->metrics_table, table_size);
675 int smu_cmn_get_metrics_table(struct smu_context *smu,
681 mutex_lock(&smu->metrics_lock);
682 ret = smu_cmn_get_metrics_table_locked(smu,
685 mutex_unlock(&smu->metrics_lock);