2 * Copyright 2020 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #define SWSMU_CODE_LAYER_L4
26 #include "amdgpu_smu.h"
28 #include "soc15_common.h"
31 * DO NOT use these for err/warn/info/debug messages.
32 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
33 * They are more MGPU friendly.
41 * Although these are defined in each ASIC's specific header file.
42 * They share the same definitions and values. That makes common
43 * APIs for SMC messages issuing for all ASICs possible.
45 #define mmMP1_SMN_C2PMSG_66 0x0282
46 #define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
48 #define mmMP1_SMN_C2PMSG_82 0x0292
49 #define mmMP1_SMN_C2PMSG_82_BASE_IDX 0
51 #define mmMP1_SMN_C2PMSG_90 0x029a
52 #define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
54 #define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
56 #undef __SMU_DUMMY_MAP
57 #define __SMU_DUMMY_MAP(type) #type
58 static const char* __smu_message_names[] = {
62 static const char *smu_get_message_name(struct smu_context *smu,
63 enum smu_message_type type)
65 if (type < 0 || type >= SMU_MSG_MAX_COUNT)
66 return "unknown smu message";
68 return __smu_message_names[type];
71 static void smu_cmn_read_arg(struct smu_context *smu,
74 struct amdgpu_device *adev = smu->adev;
76 *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
79 int smu_cmn_wait_for_response(struct smu_context *smu)
81 struct amdgpu_device *adev = smu->adev;
82 uint32_t cur_value, i, timeout = adev->usec_timeout * 20;
84 for (i = 0; i < timeout; i++) {
85 cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
86 if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
92 /* timeout means wrong logic */
96 return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
99 int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
100 uint16_t msg, uint32_t param)
102 struct amdgpu_device *adev = smu->adev;
105 ret = smu_cmn_wait_for_response(smu);
107 dev_err(adev->dev, "Msg issuing pre-check failed and "
108 "SMU may be not in the right state!\n");
114 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
115 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
116 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
121 int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
122 enum smu_message_type msg,
126 struct amdgpu_device *adev = smu->adev;
127 int ret = 0, index = 0;
129 if (smu->adev->in_pci_err_recovery)
132 index = smu_cmn_to_asic_specific_index(smu,
133 CMN2ASIC_MAPPING_MSG,
136 return index == -EACCES ? 0 : index;
138 mutex_lock(&smu->message_lock);
139 ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, param);
143 ret = smu_cmn_wait_for_response(smu);
146 dev_err(adev->dev, "message: %15s (%d) \tparam: 0x%08x is timeout (no response)\n",
147 smu_get_message_name(smu, msg), index, param);
149 dev_err(adev->dev, "failed send message: %15s (%d) \tparam: 0x%08x response %#x\n",
150 smu_get_message_name(smu, msg), index, param,
158 smu_cmn_read_arg(smu, read_arg);
160 ret = 0; /* 0 as driver return value */
162 mutex_unlock(&smu->message_lock);
166 int smu_cmn_send_smc_msg(struct smu_context *smu,
167 enum smu_message_type msg,
170 return smu_cmn_send_smc_msg_with_param(smu,
176 int smu_cmn_to_asic_specific_index(struct smu_context *smu,
177 enum smu_cmn2asic_mapping_type type,
180 struct cmn2asic_msg_mapping msg_mapping;
181 struct cmn2asic_mapping mapping;
184 case CMN2ASIC_MAPPING_MSG:
185 if (index >= SMU_MSG_MAX_COUNT ||
189 msg_mapping = smu->message_map[index];
190 if (!msg_mapping.valid_mapping)
193 if (amdgpu_sriov_vf(smu->adev) &&
194 !msg_mapping.valid_in_vf)
197 return msg_mapping.map_to;
199 case CMN2ASIC_MAPPING_CLK:
200 if (index >= SMU_CLK_COUNT ||
204 mapping = smu->clock_map[index];
205 if (!mapping.valid_mapping)
208 return mapping.map_to;
210 case CMN2ASIC_MAPPING_FEATURE:
211 if (index >= SMU_FEATURE_COUNT ||
215 mapping = smu->feature_map[index];
216 if (!mapping.valid_mapping)
219 return mapping.map_to;
221 case CMN2ASIC_MAPPING_TABLE:
222 if (index >= SMU_TABLE_COUNT ||
226 mapping = smu->table_map[index];
227 if (!mapping.valid_mapping)
230 return mapping.map_to;
232 case CMN2ASIC_MAPPING_PWR:
233 if (index >= SMU_POWER_SOURCE_COUNT ||
237 mapping = smu->pwr_src_map[index];
238 if (!mapping.valid_mapping)
241 return mapping.map_to;
243 case CMN2ASIC_MAPPING_WORKLOAD:
244 if (index > PP_SMC_POWER_PROFILE_CUSTOM ||
248 mapping = smu->workload_map[index];
249 if (!mapping.valid_mapping)
252 return mapping.map_to;
259 int smu_cmn_feature_is_supported(struct smu_context *smu,
260 enum smu_feature_mask mask)
262 struct smu_feature *feature = &smu->smu_feature;
266 feature_id = smu_cmn_to_asic_specific_index(smu,
267 CMN2ASIC_MAPPING_FEATURE,
272 WARN_ON(feature_id > feature->feature_num);
274 mutex_lock(&feature->mutex);
275 ret = test_bit(feature_id, feature->supported);
276 mutex_unlock(&feature->mutex);
281 int smu_cmn_feature_is_enabled(struct smu_context *smu,
282 enum smu_feature_mask mask)
284 struct smu_feature *feature = &smu->smu_feature;
285 struct amdgpu_device *adev = smu->adev;
289 if (smu->is_apu && adev->family < AMDGPU_FAMILY_VGH)
292 feature_id = smu_cmn_to_asic_specific_index(smu,
293 CMN2ASIC_MAPPING_FEATURE,
298 WARN_ON(feature_id > feature->feature_num);
300 mutex_lock(&feature->mutex);
301 ret = test_bit(feature_id, feature->enabled);
302 mutex_unlock(&feature->mutex);
307 bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
308 enum smu_clk_type clk_type)
310 enum smu_feature_mask feature_id = 0;
315 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
319 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
322 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
328 if (!smu_cmn_feature_is_enabled(smu, feature_id))
334 int smu_cmn_get_enabled_mask(struct smu_context *smu,
335 uint32_t *feature_mask,
338 uint32_t feature_mask_high = 0, feature_mask_low = 0;
339 struct smu_feature *feature = &smu->smu_feature;
342 if (!feature_mask || num < 2)
345 if (bitmap_empty(feature->enabled, feature->feature_num)) {
346 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
350 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
354 feature_mask[0] = feature_mask_low;
355 feature_mask[1] = feature_mask_high;
357 bitmap_copy((unsigned long *)feature_mask, feature->enabled,
358 feature->feature_num);
364 int smu_cmn_get_enabled_32_bits_mask(struct smu_context *smu,
365 uint32_t *feature_mask,
368 uint32_t feature_mask_en_low = 0;
369 uint32_t feature_mask_en_high = 0;
370 struct smu_feature *feature = &smu->smu_feature;
373 if (!feature_mask || num < 2)
376 if (bitmap_empty(feature->enabled, feature->feature_num)) {
377 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetEnabledSmuFeatures, 0,
378 &feature_mask_en_low);
383 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetEnabledSmuFeatures, 1,
384 &feature_mask_en_high);
389 feature_mask[0] = feature_mask_en_low;
390 feature_mask[1] = feature_mask_en_high;
393 bitmap_copy((unsigned long *)feature_mask, feature->enabled,
394 feature->feature_num);
401 int smu_cmn_feature_update_enable_state(struct smu_context *smu,
402 uint64_t feature_mask,
405 struct smu_feature *feature = &smu->smu_feature;
409 ret = smu_cmn_send_smc_msg_with_param(smu,
410 SMU_MSG_EnableSmuFeaturesLow,
411 lower_32_bits(feature_mask),
415 ret = smu_cmn_send_smc_msg_with_param(smu,
416 SMU_MSG_EnableSmuFeaturesHigh,
417 upper_32_bits(feature_mask),
422 ret = smu_cmn_send_smc_msg_with_param(smu,
423 SMU_MSG_DisableSmuFeaturesLow,
424 lower_32_bits(feature_mask),
428 ret = smu_cmn_send_smc_msg_with_param(smu,
429 SMU_MSG_DisableSmuFeaturesHigh,
430 upper_32_bits(feature_mask),
436 mutex_lock(&feature->mutex);
438 bitmap_or(feature->enabled, feature->enabled,
439 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
441 bitmap_andnot(feature->enabled, feature->enabled,
442 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
443 mutex_unlock(&feature->mutex);
448 int smu_cmn_feature_set_enabled(struct smu_context *smu,
449 enum smu_feature_mask mask,
452 struct smu_feature *feature = &smu->smu_feature;
455 feature_id = smu_cmn_to_asic_specific_index(smu,
456 CMN2ASIC_MAPPING_FEATURE,
461 WARN_ON(feature_id > feature->feature_num);
463 return smu_cmn_feature_update_enable_state(smu,
468 #undef __SMU_DUMMY_MAP
469 #define __SMU_DUMMY_MAP(fea) #fea
470 static const char* __smu_feature_names[] = {
474 static const char *smu_get_feature_name(struct smu_context *smu,
475 enum smu_feature_mask feature)
477 if (feature < 0 || feature >= SMU_FEATURE_COUNT)
478 return "unknown smu feature";
479 return __smu_feature_names[feature];
482 size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
485 uint32_t feature_mask[2] = { 0 };
486 int feature_index = 0;
488 int8_t sort_feature[SMU_FEATURE_COUNT];
493 ret = smu_cmn_get_enabled_mask(smu,
499 ret = smu_cmn_get_enabled_32_bits_mask(smu,
506 size = sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
507 feature_mask[1], feature_mask[0]);
509 memset(sort_feature, -1, sizeof(sort_feature));
511 for (i = 0; i < SMU_FEATURE_COUNT; i++) {
512 feature_index = smu_cmn_to_asic_specific_index(smu,
513 CMN2ASIC_MAPPING_FEATURE,
515 if (feature_index < 0)
518 sort_feature[feature_index] = i;
521 size += sprintf(buf + size, "%-2s. %-20s %-3s : %-s\n",
522 "No", "Feature", "Bit", "State");
524 for (i = 0; i < SMU_FEATURE_COUNT; i++) {
525 if (sort_feature[i] < 0)
528 size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
530 smu_get_feature_name(smu, sort_feature[i]),
532 !!smu_cmn_feature_is_enabled(smu, sort_feature[i]) ?
533 "enabled" : "disabled");
539 int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
543 uint32_t feature_mask[2] = { 0 };
544 uint64_t feature_2_enabled = 0;
545 uint64_t feature_2_disabled = 0;
546 uint64_t feature_enables = 0;
548 ret = smu_cmn_get_enabled_mask(smu,
554 feature_enables = ((uint64_t)feature_mask[1] << 32 |
555 (uint64_t)feature_mask[0]);
557 feature_2_enabled = ~feature_enables & new_mask;
558 feature_2_disabled = feature_enables & ~new_mask;
560 if (feature_2_enabled) {
561 ret = smu_cmn_feature_update_enable_state(smu,
567 if (feature_2_disabled) {
568 ret = smu_cmn_feature_update_enable_state(smu,
578 int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
579 enum smu_feature_mask mask)
581 uint64_t features_to_disable = U64_MAX;
582 int skipped_feature_id;
584 skipped_feature_id = smu_cmn_to_asic_specific_index(smu,
585 CMN2ASIC_MAPPING_FEATURE,
587 if (skipped_feature_id < 0)
590 features_to_disable &= ~(1ULL << skipped_feature_id);
592 return smu_cmn_feature_update_enable_state(smu,
597 int smu_cmn_get_smc_version(struct smu_context *smu,
598 uint32_t *if_version,
599 uint32_t *smu_version)
603 if (!if_version && !smu_version)
606 if (smu->smc_fw_if_version && smu->smc_fw_version)
609 *if_version = smu->smc_fw_if_version;
612 *smu_version = smu->smc_fw_version;
618 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
622 smu->smc_fw_if_version = *if_version;
626 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
630 smu->smc_fw_version = *smu_version;
636 int smu_cmn_update_table(struct smu_context *smu,
637 enum smu_table_id table_index,
642 struct smu_table_context *smu_table = &smu->smu_table;
643 struct amdgpu_device *adev = smu->adev;
644 struct smu_table *table = &smu_table->driver_table;
645 int table_id = smu_cmn_to_asic_specific_index(smu,
646 CMN2ASIC_MAPPING_TABLE,
650 if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
653 table_size = smu_table->tables[table_index].size;
656 memcpy(table->cpu_addr, table_data, table_size);
658 * Flush hdp cache: to guard the content seen by
659 * GPU is consitent with CPU.
661 amdgpu_asic_flush_hdp(adev, NULL);
664 ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ?
665 SMU_MSG_TransferTableDram2Smu :
666 SMU_MSG_TransferTableSmu2Dram,
667 table_id | ((argument & 0xFFFF) << 16),
673 amdgpu_asic_invalidate_hdp(adev, NULL);
674 memcpy(table_data, table->cpu_addr, table_size);
680 int smu_cmn_write_watermarks_table(struct smu_context *smu)
682 void *watermarks_table = smu->smu_table.watermarks_table;
684 if (!watermarks_table)
687 return smu_cmn_update_table(smu,
688 SMU_TABLE_WATERMARKS,
694 int smu_cmn_write_pptable(struct smu_context *smu)
696 void *pptable = smu->smu_table.driver_pptable;
698 return smu_cmn_update_table(smu,
705 int smu_cmn_get_metrics_table_locked(struct smu_context *smu,
709 struct smu_table_context *smu_table= &smu->smu_table;
710 uint32_t table_size =
711 smu_table->tables[SMU_TABLE_SMU_METRICS].size;
715 !smu_table->metrics_time ||
716 time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
717 ret = smu_cmn_update_table(smu,
718 SMU_TABLE_SMU_METRICS,
720 smu_table->metrics_table,
723 dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
726 smu_table->metrics_time = jiffies;
730 memcpy(metrics_table, smu_table->metrics_table, table_size);
735 int smu_cmn_get_metrics_table(struct smu_context *smu,
741 mutex_lock(&smu->metrics_lock);
742 ret = smu_cmn_get_metrics_table_locked(smu,
745 mutex_unlock(&smu->metrics_lock);
750 void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
752 struct metrics_table_header *header = (struct metrics_table_header *)table;
753 uint16_t structure_size;
755 #define METRICS_VERSION(a, b) ((a << 16) | b )
757 switch (METRICS_VERSION(frev, crev)) {
758 case METRICS_VERSION(1, 0):
759 structure_size = sizeof(struct gpu_metrics_v1_0);
761 case METRICS_VERSION(1, 1):
762 structure_size = sizeof(struct gpu_metrics_v1_1);
764 case METRICS_VERSION(2, 0):
765 structure_size = sizeof(struct gpu_metrics_v2_0);
767 case METRICS_VERSION(2, 1):
768 structure_size = sizeof(struct gpu_metrics_v2_1);
774 #undef METRICS_VERSION
776 memset(header, 0xFF, structure_size);
778 header->format_revision = frev;
779 header->content_revision = crev;
780 header->structure_size = structure_size;
784 int smu_cmn_set_mp1_state(struct smu_context *smu,
785 enum pp_mp1_state mp1_state)
787 enum smu_message_type msg;
791 case PP_MP1_STATE_SHUTDOWN:
792 msg = SMU_MSG_PrepareMp1ForShutdown;
794 case PP_MP1_STATE_UNLOAD:
795 msg = SMU_MSG_PrepareMp1ForUnload;
797 case PP_MP1_STATE_RESET:
798 msg = SMU_MSG_PrepareMp1ForReset;
800 case PP_MP1_STATE_NONE:
805 ret = smu_cmn_send_smc_msg(smu, msg, NULL);
807 dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");