2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <linux/pci.h>
27 #include "amdgpu_smu.h"
28 #include "smu_internal.h"
29 #include "smu_v11_0.h"
30 #include "smu_v12_0.h"
32 #include "vega20_ppt.h"
33 #include "arcturus_ppt.h"
34 #include "navi10_ppt.h"
35 #include "renoir_ppt.h"
37 #undef __SMU_DUMMY_MAP
38 #define __SMU_DUMMY_MAP(type) #type
39 static const char* __smu_message_names[] = {
43 const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
45 if (type < 0 || type >= SMU_MSG_MAX_COUNT)
46 return "unknown smu message";
47 return __smu_message_names[type];
50 #undef __SMU_DUMMY_MAP
51 #define __SMU_DUMMY_MAP(fea) #fea
52 static const char* __smu_feature_names[] = {
56 const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature)
58 if (feature < 0 || feature >= SMU_FEATURE_COUNT)
59 return "unknown smu feature";
60 return __smu_feature_names[feature];
63 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
67 uint32_t feature_mask[2] = { 0 };
68 int32_t feature_index = 0;
70 uint32_t sort_feature[SMU_FEATURE_COUNT];
71 uint64_t hw_feature_count = 0;
73 mutex_lock(&smu->mutex);
75 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
79 size = sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
80 feature_mask[1], feature_mask[0]);
82 for (i = 0; i < SMU_FEATURE_COUNT; i++) {
83 feature_index = smu_feature_get_index(smu, i);
84 if (feature_index < 0)
86 sort_feature[feature_index] = i;
90 for (i = 0; i < hw_feature_count; i++) {
91 size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
93 smu_get_feature_name(smu, sort_feature[i]),
95 !!smu_feature_is_enabled(smu, sort_feature[i]) ?
96 "enabled" : "disabled");
100 mutex_unlock(&smu->mutex);
105 static int smu_feature_update_enable_state(struct smu_context *smu,
106 uint64_t feature_mask,
109 struct smu_feature *feature = &smu->smu_feature;
110 uint32_t feature_low = 0, feature_high = 0;
113 if (!smu->pm_enabled)
116 feature_low = (feature_mask >> 0 ) & 0xffffffff;
117 feature_high = (feature_mask >> 32) & 0xffffffff;
120 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
124 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
129 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
133 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
139 mutex_lock(&feature->mutex);
141 bitmap_or(feature->enabled, feature->enabled,
142 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
144 bitmap_andnot(feature->enabled, feature->enabled,
145 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
146 mutex_unlock(&feature->mutex);
151 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
154 uint32_t feature_mask[2] = { 0 };
155 uint64_t feature_2_enabled = 0;
156 uint64_t feature_2_disabled = 0;
157 uint64_t feature_enables = 0;
159 mutex_lock(&smu->mutex);
161 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
165 feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
167 feature_2_enabled = ~feature_enables & new_mask;
168 feature_2_disabled = feature_enables & ~new_mask;
170 if (feature_2_enabled) {
171 ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
175 if (feature_2_disabled) {
176 ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
182 mutex_unlock(&smu->mutex);
187 int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
191 if (!if_version && !smu_version)
195 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
201 ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
209 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
210 uint32_t min, uint32_t max, bool lock_needed)
214 if (!smu_clk_dpm_is_enabled(smu, clk_type))
218 mutex_lock(&smu->mutex);
219 ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
221 mutex_unlock(&smu->mutex);
226 int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
227 uint32_t min, uint32_t max)
229 int ret = 0, clk_id = 0;
232 if (min <= 0 && max <= 0)
235 if (!smu_clk_dpm_is_enabled(smu, clk_type))
238 clk_id = smu_clk_get_index(smu, clk_type);
243 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
244 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
251 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
252 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
262 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
263 uint32_t *min, uint32_t *max, bool lock_needed)
265 uint32_t clock_limit;
272 mutex_lock(&smu->mutex);
274 if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
278 clock_limit = smu->smu_table.boot_values.uclk;
282 clock_limit = smu->smu_table.boot_values.gfxclk;
285 clock_limit = smu->smu_table.boot_values.socclk;
292 /* clock in Mhz unit */
294 *min = clock_limit / 100;
296 *max = clock_limit / 100;
299 * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
300 * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
302 ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
306 mutex_unlock(&smu->mutex);
311 int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
312 uint16_t level, uint32_t *value)
314 int ret = 0, clk_id = 0;
320 if (!smu_clk_dpm_is_enabled(smu, clk_type))
323 clk_id = smu_clk_get_index(smu, clk_type);
327 param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
329 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex,
334 /* BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM
335 * now, we un-support it */
336 *value = param & 0x7fffffff;
341 int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
344 return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
347 int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
348 uint32_t *min_value, uint32_t *max_value)
351 uint32_t level_count = 0;
353 if (!min_value && !max_value)
357 /* by default, level 0 clock value as min value */
358 ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, min_value);
364 ret = smu_get_dpm_level_count(smu, clk_type, &level_count);
368 ret = smu_get_dpm_freq_by_index(smu, clk_type, level_count - 1, max_value);
376 bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
378 enum smu_feature_mask feature_id = 0;
383 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
387 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
390 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
396 if(!smu_feature_is_enabled(smu, feature_id)) {
404 * smu_dpm_set_power_gate - power gate/ungate the specific IP block
406 * @smu: smu_context pointer
407 * @block_type: the IP block to power gate/ungate
408 * @gate: to power gate if true, ungate otherwise
410 * This API uses no smu->mutex lock protection due to:
411 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
412 * This is guarded to be race condition free by the caller.
413 * 2. Or get called on user setting request of power_dpm_force_performance_level.
414 * Under this case, the smu->mutex lock protection is already enforced on
415 * the parent API smu_force_performance_level of the call path.
417 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
422 switch (block_type) {
423 case AMD_IP_BLOCK_TYPE_UVD:
424 ret = smu_dpm_set_uvd_enable(smu, !gate);
426 case AMD_IP_BLOCK_TYPE_VCE:
427 ret = smu_dpm_set_vce_enable(smu, !gate);
429 case AMD_IP_BLOCK_TYPE_GFX:
430 ret = smu_gfx_off_control(smu, gate);
432 case AMD_IP_BLOCK_TYPE_SDMA:
433 ret = smu_powergate_sdma(smu, gate);
435 case AMD_IP_BLOCK_TYPE_JPEG:
436 ret = smu_dpm_set_jpeg_enable(smu, !gate);
445 int smu_get_power_num_states(struct smu_context *smu,
446 struct pp_states_info *state_info)
451 /* not support power state */
452 memset(state_info, 0, sizeof(struct pp_states_info));
453 state_info->nums = 1;
454 state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
459 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
460 void *data, uint32_t *size)
462 struct smu_power_context *smu_power = &smu->smu_power;
463 struct smu_power_gate *power_gate = &smu_power->power_gate;
470 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
471 *((uint32_t *)data) = smu->pstate_sclk;
474 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
475 *((uint32_t *)data) = smu->pstate_mclk;
478 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
479 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
482 case AMDGPU_PP_SENSOR_UVD_POWER:
483 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
486 case AMDGPU_PP_SENSOR_VCE_POWER:
487 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
490 case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
491 *(uint32_t *)data = power_gate->vcn_gated ? 0 : 1;
505 int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
506 void *table_data, bool drv2smu)
508 struct smu_table_context *smu_table = &smu->smu_table;
509 struct amdgpu_device *adev = smu->adev;
510 struct smu_table *table = &smu_table->driver_table;
511 int table_id = smu_table_get_index(smu, table_index);
515 if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
518 table_size = smu_table->tables[table_index].size;
521 memcpy(table->cpu_addr, table_data, table_size);
523 * Flush hdp cache: to guard the content seen by
524 * GPU is consitent with CPU.
526 amdgpu_asic_flush_hdp(adev, NULL);
529 ret = smu_send_smc_msg_with_param(smu, drv2smu ?
530 SMU_MSG_TransferTableDram2Smu :
531 SMU_MSG_TransferTableSmu2Dram,
532 table_id | ((argument & 0xFFFF) << 16),
538 amdgpu_asic_flush_hdp(adev, NULL);
539 memcpy(table_data, table->cpu_addr, table_size);
545 bool is_support_sw_smu(struct amdgpu_device *adev)
547 if (adev->asic_type == CHIP_VEGA20)
548 return (amdgpu_dpm == 2) ? true : false;
549 else if (adev->asic_type >= CHIP_ARCTURUS) {
550 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
558 bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
560 if (!is_support_sw_smu(adev))
563 if (adev->asic_type == CHIP_VEGA20)
569 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
571 struct smu_table_context *smu_table = &smu->smu_table;
572 uint32_t powerplay_table_size;
574 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
577 mutex_lock(&smu->mutex);
579 if (smu_table->hardcode_pptable)
580 *table = smu_table->hardcode_pptable;
582 *table = smu_table->power_play_table;
584 powerplay_table_size = smu_table->power_play_table_size;
586 mutex_unlock(&smu->mutex);
588 return powerplay_table_size;
591 int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
593 struct smu_table_context *smu_table = &smu->smu_table;
594 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
597 if (!smu->pm_enabled)
599 if (header->usStructureSize != size) {
600 pr_err("pp table size not matched !\n");
604 mutex_lock(&smu->mutex);
605 if (!smu_table->hardcode_pptable)
606 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
607 if (!smu_table->hardcode_pptable) {
612 memcpy(smu_table->hardcode_pptable, buf, size);
613 smu_table->power_play_table = smu_table->hardcode_pptable;
614 smu_table->power_play_table_size = size;
617 * Special hw_fini action(for Navi1x, the DPMs disablement will be
618 * skipped) may be needed for custom pptable uploading.
620 smu->uploading_custom_pp_table = true;
622 ret = smu_reset(smu);
624 pr_info("smu reset failed, ret = %d\n", ret);
626 smu->uploading_custom_pp_table = false;
629 mutex_unlock(&smu->mutex);
633 int smu_feature_init_dpm(struct smu_context *smu)
635 struct smu_feature *feature = &smu->smu_feature;
637 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
639 if (!smu->pm_enabled)
641 mutex_lock(&feature->mutex);
642 bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
643 mutex_unlock(&feature->mutex);
645 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
650 mutex_lock(&feature->mutex);
651 bitmap_or(feature->allowed, feature->allowed,
652 (unsigned long *)allowed_feature_mask,
653 feature->feature_num);
654 mutex_unlock(&feature->mutex);
660 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
662 struct smu_feature *feature = &smu->smu_feature;
669 feature_id = smu_feature_get_index(smu, mask);
673 WARN_ON(feature_id > feature->feature_num);
675 mutex_lock(&feature->mutex);
676 ret = test_bit(feature_id, feature->enabled);
677 mutex_unlock(&feature->mutex);
682 int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
685 struct smu_feature *feature = &smu->smu_feature;
688 feature_id = smu_feature_get_index(smu, mask);
692 WARN_ON(feature_id > feature->feature_num);
694 return smu_feature_update_enable_state(smu,
699 int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
701 struct smu_feature *feature = &smu->smu_feature;
705 feature_id = smu_feature_get_index(smu, mask);
709 WARN_ON(feature_id > feature->feature_num);
711 mutex_lock(&feature->mutex);
712 ret = test_bit(feature_id, feature->supported);
713 mutex_unlock(&feature->mutex);
718 int smu_feature_set_supported(struct smu_context *smu,
719 enum smu_feature_mask mask,
722 struct smu_feature *feature = &smu->smu_feature;
726 feature_id = smu_feature_get_index(smu, mask);
730 WARN_ON(feature_id > feature->feature_num);
732 mutex_lock(&feature->mutex);
734 test_and_set_bit(feature_id, feature->supported);
736 test_and_clear_bit(feature_id, feature->supported);
737 mutex_unlock(&feature->mutex);
742 static int smu_set_funcs(struct amdgpu_device *adev)
744 struct smu_context *smu = &adev->smu;
746 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
747 smu->od_enabled = true;
749 switch (adev->asic_type) {
751 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
752 vega20_set_ppt_funcs(smu);
757 navi10_set_ppt_funcs(smu);
760 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
761 arcturus_set_ppt_funcs(smu);
762 /* OD is not supported on Arcturus */
763 smu->od_enabled =false;
766 renoir_set_ppt_funcs(smu);
775 static int smu_early_init(void *handle)
777 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
778 struct smu_context *smu = &adev->smu;
781 smu->pm_enabled = !!amdgpu_dpm;
783 mutex_init(&smu->mutex);
785 return smu_set_funcs(adev);
788 static int smu_late_init(void *handle)
790 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
791 struct smu_context *smu = &adev->smu;
793 if (!smu->pm_enabled)
796 smu_handle_task(&adev->smu,
797 smu->smu_dpm.dpm_level,
798 AMD_PP_TASK_COMPLETE_INIT,
804 int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
805 uint16_t *size, uint8_t *frev, uint8_t *crev,
808 struct amdgpu_device *adev = smu->adev;
811 if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
812 size, frev, crev, &data_start))
815 *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
820 static int smu_initialize_pptable(struct smu_context *smu)
826 static int smu_smc_table_sw_init(struct smu_context *smu)
830 ret = smu_initialize_pptable(smu);
832 pr_err("Failed to init smu_initialize_pptable!\n");
837 * Create smu_table structure, and init smc tables such as
838 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
840 ret = smu_init_smc_tables(smu);
842 pr_err("Failed to init smc tables!\n");
847 * Create smu_power_context structure, and allocate smu_dpm_context and
848 * context size to fill the smu_power_context data.
850 ret = smu_init_power(smu);
852 pr_err("Failed to init smu_init_power!\n");
859 static int smu_smc_table_sw_fini(struct smu_context *smu)
863 ret = smu_fini_smc_tables(smu);
865 pr_err("Failed to smu_fini_smc_tables!\n");
872 static int smu_sw_init(void *handle)
874 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
875 struct smu_context *smu = &adev->smu;
878 smu->pool_size = adev->pm.smu_prv_buffer_size;
879 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
880 mutex_init(&smu->smu_feature.mutex);
881 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
882 bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
883 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
885 mutex_init(&smu->smu_baco.mutex);
886 smu->smu_baco.state = SMU_BACO_STATE_EXIT;
887 smu->smu_baco.platform_support = false;
889 mutex_init(&smu->sensor_lock);
890 mutex_init(&smu->metrics_lock);
891 mutex_init(&smu->message_lock);
893 smu->watermarks_bitmap = 0;
894 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
895 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
897 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
898 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
899 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
900 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
901 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
902 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
903 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
904 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
906 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
907 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
908 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
909 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
910 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
911 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
912 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
913 smu->display_config = &adev->pm.pm_display_cfg;
915 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
916 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
917 ret = smu_init_microcode(smu);
919 pr_err("Failed to load smu firmware!\n");
923 ret = smu_smc_table_sw_init(smu);
925 pr_err("Failed to sw init smc table!\n");
929 ret = smu_register_irq_handler(smu);
931 pr_err("Failed to register smc irq handler!\n");
935 if (adev->smu.ppt_funcs->i2c_eeprom_init) {
936 ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
945 static int smu_sw_fini(void *handle)
947 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
948 struct smu_context *smu = &adev->smu;
951 if (adev->smu.ppt_funcs->i2c_eeprom_fini)
952 smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
954 kfree(smu->irq_source);
955 smu->irq_source = NULL;
957 ret = smu_smc_table_sw_fini(smu);
959 pr_err("Failed to sw fini smc table!\n");
963 ret = smu_fini_power(smu);
965 pr_err("Failed to init smu_fini_power!\n");
972 static int smu_init_fb_allocations(struct smu_context *smu)
974 struct amdgpu_device *adev = smu->adev;
975 struct smu_table_context *smu_table = &smu->smu_table;
976 struct smu_table *tables = smu_table->tables;
977 struct smu_table *driver_table = &(smu_table->driver_table);
978 uint32_t max_table_size = 0;
981 /* VRAM allocation for tool table */
982 if (tables[SMU_TABLE_PMSTATUSLOG].size) {
983 ret = amdgpu_bo_create_kernel(adev,
984 tables[SMU_TABLE_PMSTATUSLOG].size,
985 tables[SMU_TABLE_PMSTATUSLOG].align,
986 tables[SMU_TABLE_PMSTATUSLOG].domain,
987 &tables[SMU_TABLE_PMSTATUSLOG].bo,
988 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
989 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
991 pr_err("VRAM allocation for tool table failed!\n");
996 /* VRAM allocation for driver table */
997 for (i = 0; i < SMU_TABLE_COUNT; i++) {
998 if (tables[i].size == 0)
1001 if (i == SMU_TABLE_PMSTATUSLOG)
1004 if (max_table_size < tables[i].size)
1005 max_table_size = tables[i].size;
1008 driver_table->size = max_table_size;
1009 driver_table->align = PAGE_SIZE;
1010 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
1012 ret = amdgpu_bo_create_kernel(adev,
1014 driver_table->align,
1015 driver_table->domain,
1017 &driver_table->mc_address,
1018 &driver_table->cpu_addr);
1020 pr_err("VRAM allocation for driver table failed!\n");
1021 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
1022 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
1023 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
1024 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
1030 static int smu_fini_fb_allocations(struct smu_context *smu)
1032 struct smu_table_context *smu_table = &smu->smu_table;
1033 struct smu_table *tables = smu_table->tables;
1034 struct smu_table *driver_table = &(smu_table->driver_table);
1039 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
1040 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
1041 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
1042 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
1044 amdgpu_bo_free_kernel(&driver_table->bo,
1045 &driver_table->mc_address,
1046 &driver_table->cpu_addr);
1051 static int smu_smc_table_hw_init(struct smu_context *smu,
1054 struct amdgpu_device *adev = smu->adev;
1057 if (smu_is_dpm_running(smu) && adev->in_suspend) {
1058 pr_info("dpm has been enabled\n");
1062 if (adev->asic_type != CHIP_ARCTURUS) {
1063 ret = smu_init_display_count(smu, 0);
1069 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1070 ret = smu_get_vbios_bootup_values(smu);
1074 ret = smu_setup_pptable(smu);
1078 ret = smu_get_clk_info_from_vbios(smu);
1083 * check if the format_revision in vbios is up to pptable header
1084 * version, and the structure size is not 0.
1086 ret = smu_check_pptable(smu);
1091 * allocate vram bos to store smc table contents.
1093 ret = smu_init_fb_allocations(smu);
1098 * Parse pptable format and fill PPTable_t smc_pptable to
1099 * smu_table_context structure. And read the smc_dpm_table from vbios,
1100 * then fill it into smc_pptable.
1102 ret = smu_parse_pptable(smu);
1107 * Send msg GetDriverIfVersion to check if the return value is equal
1108 * with DRIVER_IF_VERSION of smc header.
1110 ret = smu_check_fw_version(smu);
1115 ret = smu_set_driver_table_location(smu);
1119 /* smu_dump_pptable(smu); */
1120 if (!amdgpu_sriov_vf(adev)) {
1122 * Copy pptable bo in the vram to smc with SMU MSGs such as
1123 * SetDriverDramAddr and TransferTableDram2Smu.
1125 ret = smu_write_pptable(smu);
1129 /* issue Run*Btc msg */
1130 ret = smu_run_btc(smu);
1133 ret = smu_feature_set_allowed_mask(smu);
1137 ret = smu_system_features_control(smu, true);
1141 if (adev->asic_type == CHIP_NAVI10) {
1142 if ((adev->pdev->device == 0x731f && (adev->pdev->revision == 0xc2 ||
1143 adev->pdev->revision == 0xc3 ||
1144 adev->pdev->revision == 0xca ||
1145 adev->pdev->revision == 0xcb)) ||
1146 (adev->pdev->device == 0x66af && (adev->pdev->revision == 0xf3 ||
1147 adev->pdev->revision == 0xf4 ||
1148 adev->pdev->revision == 0xf5 ||
1149 adev->pdev->revision == 0xf6))) {
1150 ret = smu_disable_umc_cdr_12gbps_workaround(smu);
1152 pr_err("Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n");
1158 if (adev->asic_type != CHIP_ARCTURUS) {
1159 ret = smu_notify_display_change(smu);
1164 * Set min deep sleep dce fclk with bootup value from vbios via
1165 * SetMinDeepSleepDcefclk MSG.
1167 ret = smu_set_min_dcef_deep_sleep(smu);
1173 * Set initialized values (get from vbios) to dpm tables context such as
1174 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1178 ret = smu_populate_smc_tables(smu);
1182 ret = smu_init_max_sustainable_clocks(smu);
1187 if (adev->asic_type != CHIP_ARCTURUS) {
1188 ret = smu_override_pcie_parameters(smu);
1193 ret = smu_set_default_od_settings(smu, initialize);
1198 ret = smu_populate_umd_state_clk(smu);
1202 ret = smu_get_power_limit(smu, &smu->default_power_limit, false, false);
1208 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1210 if (!amdgpu_sriov_vf(adev)) {
1211 ret = smu_set_tool_table_location(smu);
1213 if (!smu_is_dpm_running(smu))
1214 pr_info("dpm has been disabled\n");
1220 * smu_alloc_memory_pool - allocate memory pool in the system memory
1222 * @smu: amdgpu_device pointer
1224 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
1225 * and DramLogSetDramAddr can notify it changed.
1227 * Returns 0 on success, error on failure.
1229 static int smu_alloc_memory_pool(struct smu_context *smu)
1231 struct amdgpu_device *adev = smu->adev;
1232 struct smu_table_context *smu_table = &smu->smu_table;
1233 struct smu_table *memory_pool = &smu_table->memory_pool;
1234 uint64_t pool_size = smu->pool_size;
1237 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
1240 memory_pool->size = pool_size;
1241 memory_pool->align = PAGE_SIZE;
1242 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
1244 switch (pool_size) {
1245 case SMU_MEMORY_POOL_SIZE_256_MB:
1246 case SMU_MEMORY_POOL_SIZE_512_MB:
1247 case SMU_MEMORY_POOL_SIZE_1_GB:
1248 case SMU_MEMORY_POOL_SIZE_2_GB:
1249 ret = amdgpu_bo_create_kernel(adev,
1252 memory_pool->domain,
1254 &memory_pool->mc_address,
1255 &memory_pool->cpu_addr);
1264 static int smu_free_memory_pool(struct smu_context *smu)
1266 struct smu_table_context *smu_table = &smu->smu_table;
1267 struct smu_table *memory_pool = &smu_table->memory_pool;
1269 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1272 amdgpu_bo_free_kernel(&memory_pool->bo,
1273 &memory_pool->mc_address,
1274 &memory_pool->cpu_addr);
1276 memset(memory_pool, 0, sizeof(struct smu_table));
1281 static int smu_start_smc_engine(struct smu_context *smu)
1283 struct amdgpu_device *adev = smu->adev;
1286 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1287 if (adev->asic_type < CHIP_NAVI10) {
1288 if (smu->ppt_funcs->load_microcode) {
1289 ret = smu->ppt_funcs->load_microcode(smu);
1296 if (smu->ppt_funcs->check_fw_status) {
1297 ret = smu->ppt_funcs->check_fw_status(smu);
1299 pr_err("SMC is not ready\n");
1305 static int smu_hw_init(void *handle)
1308 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1309 struct smu_context *smu = &adev->smu;
1311 ret = smu_start_smc_engine(smu);
1313 pr_err("SMU is not ready yet!\n");
1318 smu_powergate_sdma(&adev->smu, false);
1319 smu_powergate_vcn(&adev->smu, false);
1320 smu_powergate_jpeg(&adev->smu, false);
1321 smu_set_gfx_cgpg(&adev->smu, true);
1324 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1327 if (!smu->pm_enabled)
1330 ret = smu_feature_init_dpm(smu);
1334 ret = smu_smc_table_hw_init(smu, true);
1338 ret = smu_alloc_memory_pool(smu);
1343 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1346 ret = smu_notify_memory_pool_location(smu);
1350 ret = smu_start_thermal_control(smu);
1354 if (!smu->pm_enabled)
1355 adev->pm.dpm_enabled = false;
1357 adev->pm.dpm_enabled = true; /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
1359 pr_info("SMU is initialized successfully!\n");
1367 static int smu_stop_dpms(struct smu_context *smu)
1369 return smu_system_features_control(smu, false);
1372 static int smu_hw_fini(void *handle)
1374 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1375 struct smu_context *smu = &adev->smu;
1376 struct smu_table_context *table_context = &smu->smu_table;
1379 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1383 smu_powergate_sdma(&adev->smu, true);
1384 smu_powergate_vcn(&adev->smu, true);
1385 smu_powergate_jpeg(&adev->smu, true);
1388 if (!smu->pm_enabled)
1391 if (!amdgpu_sriov_vf(adev)){
1392 ret = smu_stop_thermal_control(smu);
1394 pr_warn("Fail to stop thermal control!\n");
1399 * For custom pptable uploading, skip the DPM features
1400 * disable process on Navi1x ASICs.
1401 * - As the gfx related features are under control of
1402 * RLC on those ASICs. RLC reinitialization will be
1403 * needed to reenable them. That will cost much more
1406 * - SMU firmware can handle the DPM reenablement
1409 if (!smu->uploading_custom_pp_table ||
1410 !((adev->asic_type >= CHIP_NAVI10) &&
1411 (adev->asic_type <= CHIP_NAVI12))) {
1412 ret = smu_stop_dpms(smu);
1414 pr_warn("Fail to stop Dpms!\n");
1420 kfree(table_context->driver_pptable);
1421 table_context->driver_pptable = NULL;
1423 kfree(table_context->max_sustainable_clocks);
1424 table_context->max_sustainable_clocks = NULL;
1426 kfree(table_context->overdrive_table);
1427 table_context->overdrive_table = NULL;
1429 ret = smu_fini_fb_allocations(smu);
1433 ret = smu_free_memory_pool(smu);
1440 int smu_reset(struct smu_context *smu)
1442 struct amdgpu_device *adev = smu->adev;
1445 ret = smu_hw_fini(adev);
1449 ret = smu_hw_init(adev);
1456 static int smu_disable_dpm(struct smu_context *smu)
1458 struct amdgpu_device *adev = smu->adev;
1459 uint32_t smu_version;
1461 bool use_baco = !smu->is_apu &&
1462 ((adev->in_gpu_reset &&
1463 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1464 (adev->in_runpm && amdgpu_asic_supports_baco(adev)));
1466 ret = smu_get_smc_version(smu, NULL, &smu_version);
1468 pr_err("Failed to get smu version.\n");
1473 * For baco on Arcturus, this operation
1474 * (disable all smu feature) will be handled by SMU FW.
1476 if (adev->asic_type == CHIP_ARCTURUS) {
1477 if (use_baco && (smu_version > 0x360e00))
1481 /* Disable all enabled SMU features */
1482 ret = smu_system_features_control(smu, false);
1484 pr_err("Failed to disable smu features.\n");
1488 /* For baco, need to leave BACO feature enabled */
1491 * Correct the way for checking whether SMU_FEATURE_BACO_BIT
1494 * Since 'smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)' will
1495 * always return false as the 'smu_system_features_control(smu, false)'
1496 * was just issued above which disabled all SMU features.
1498 * Thus 'smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT)' is used
1499 * now for the checking.
1501 if (smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT) >= 0) {
1502 ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
1504 pr_warn("set BACO feature enabled failed, return %d\n", ret);
1513 static int smu_suspend(void *handle)
1515 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1516 struct smu_context *smu = &adev->smu;
1519 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1522 if (!smu->pm_enabled)
1525 if(!amdgpu_sriov_vf(adev)) {
1526 ret = smu_disable_dpm(smu);
1531 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1533 if (adev->asic_type >= CHIP_NAVI10 &&
1534 adev->gfx.rlc.funcs->stop)
1535 adev->gfx.rlc.funcs->stop(adev);
1537 smu_set_gfx_cgpg(&adev->smu, false);
1542 static int smu_resume(void *handle)
1545 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1546 struct smu_context *smu = &adev->smu;
1548 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1551 if (!smu->pm_enabled)
1554 pr_info("SMU is resuming...\n");
1556 ret = smu_start_smc_engine(smu);
1558 pr_err("SMU is not ready yet!\n");
1562 ret = smu_smc_table_hw_init(smu, false);
1566 ret = smu_start_thermal_control(smu);
1571 smu_set_gfx_cgpg(&adev->smu, true);
1573 smu->disable_uclk_switch = 0;
1575 pr_info("SMU is resumed successfully!\n");
1583 int smu_display_configuration_change(struct smu_context *smu,
1584 const struct amd_pp_display_configuration *display_config)
1587 int num_of_active_display = 0;
1589 if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
1592 if (!display_config)
1595 mutex_lock(&smu->mutex);
1597 if (smu->ppt_funcs->set_deep_sleep_dcefclk)
1598 smu->ppt_funcs->set_deep_sleep_dcefclk(smu,
1599 display_config->min_dcef_deep_sleep_set_clk / 100);
1601 for (index = 0; index < display_config->num_path_including_non_display; index++) {
1602 if (display_config->displays[index].controller_id != 0)
1603 num_of_active_display++;
1606 smu_set_active_display_count(smu, num_of_active_display);
1608 smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1609 display_config->cpu_cc6_disable,
1610 display_config->cpu_pstate_disable,
1611 display_config->nb_pstate_switch_disable);
1613 mutex_unlock(&smu->mutex);
1618 static int smu_get_clock_info(struct smu_context *smu,
1619 struct smu_clock_info *clk_info,
1620 enum smu_perf_level_designation designation)
1623 struct smu_performance_level level = {0};
1628 ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1632 clk_info->min_mem_clk = level.memory_clock;
1633 clk_info->min_eng_clk = level.core_clock;
1634 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1636 ret = smu_get_perf_level(smu, designation, &level);
1640 clk_info->min_mem_clk = level.memory_clock;
1641 clk_info->min_eng_clk = level.core_clock;
1642 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1647 int smu_get_current_clocks(struct smu_context *smu,
1648 struct amd_pp_clock_info *clocks)
1650 struct amd_pp_simple_clock_info simple_clocks = {0};
1651 struct smu_clock_info hw_clocks;
1654 if (!is_support_sw_smu(smu->adev))
1657 mutex_lock(&smu->mutex);
1659 smu_get_dal_power_level(smu, &simple_clocks);
1661 if (smu->support_power_containment)
1662 ret = smu_get_clock_info(smu, &hw_clocks,
1663 PERF_LEVEL_POWER_CONTAINMENT);
1665 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1668 pr_err("Error in smu_get_clock_info\n");
1672 clocks->min_engine_clock = hw_clocks.min_eng_clk;
1673 clocks->max_engine_clock = hw_clocks.max_eng_clk;
1674 clocks->min_memory_clock = hw_clocks.min_mem_clk;
1675 clocks->max_memory_clock = hw_clocks.max_mem_clk;
1676 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1677 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1678 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1679 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1681 if (simple_clocks.level == 0)
1682 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1684 clocks->max_clocks_state = simple_clocks.level;
1686 if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1687 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1688 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1692 mutex_unlock(&smu->mutex);
1696 static int smu_set_clockgating_state(void *handle,
1697 enum amd_clockgating_state state)
1702 static int smu_set_powergating_state(void *handle,
1703 enum amd_powergating_state state)
1708 static int smu_enable_umd_pstate(void *handle,
1709 enum amd_dpm_forced_level *level)
1711 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1712 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1713 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1714 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1716 struct smu_context *smu = (struct smu_context*)(handle);
1717 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1719 if (!smu->is_apu && (!smu->pm_enabled || !smu_dpm_ctx->dpm_context))
1722 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1723 /* enter umd pstate, save current level, disable gfx cg*/
1724 if (*level & profile_mode_mask) {
1725 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1726 smu_dpm_ctx->enable_umd_pstate = true;
1727 amdgpu_device_ip_set_clockgating_state(smu->adev,
1728 AMD_IP_BLOCK_TYPE_GFX,
1729 AMD_CG_STATE_UNGATE);
1730 amdgpu_device_ip_set_powergating_state(smu->adev,
1731 AMD_IP_BLOCK_TYPE_GFX,
1732 AMD_PG_STATE_UNGATE);
1735 /* exit umd pstate, restore level, enable gfx cg*/
1736 if (!(*level & profile_mode_mask)) {
1737 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1738 *level = smu_dpm_ctx->saved_dpm_level;
1739 smu_dpm_ctx->enable_umd_pstate = false;
1740 amdgpu_device_ip_set_clockgating_state(smu->adev,
1741 AMD_IP_BLOCK_TYPE_GFX,
1743 amdgpu_device_ip_set_powergating_state(smu->adev,
1744 AMD_IP_BLOCK_TYPE_GFX,
1752 int smu_adjust_power_state_dynamic(struct smu_context *smu,
1753 enum amd_dpm_forced_level level,
1754 bool skip_display_settings)
1759 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1761 if (!smu->pm_enabled)
1764 if (!skip_display_settings) {
1765 ret = smu_display_config_changed(smu);
1767 pr_err("Failed to change display config!");
1772 ret = smu_apply_clocks_adjust_rules(smu);
1774 pr_err("Failed to apply clocks adjust rules!");
1778 if (!skip_display_settings) {
1779 ret = smu_notify_smc_display_config(smu);
1781 pr_err("Failed to notify smc display config!");
1786 if (smu_dpm_ctx->dpm_level != level) {
1787 ret = smu_asic_set_performance_level(smu, level);
1789 pr_err("Failed to set performance level!");
1793 /* update the saved copy */
1794 smu_dpm_ctx->dpm_level = level;
1797 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1798 index = fls(smu->workload_mask);
1799 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1800 workload = smu->workload_setting[index];
1802 if (smu->power_profile_mode != workload)
1803 smu_set_power_profile_mode(smu, &workload, 0, false);
1809 int smu_handle_task(struct smu_context *smu,
1810 enum amd_dpm_forced_level level,
1811 enum amd_pp_task task_id,
1817 mutex_lock(&smu->mutex);
1820 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1821 ret = smu_pre_display_config_changed(smu);
1824 ret = smu_set_cpu_power_state(smu);
1827 ret = smu_adjust_power_state_dynamic(smu, level, false);
1829 case AMD_PP_TASK_COMPLETE_INIT:
1830 case AMD_PP_TASK_READJUST_POWER_STATE:
1831 ret = smu_adjust_power_state_dynamic(smu, level, true);
1839 mutex_unlock(&smu->mutex);
1844 int smu_switch_power_profile(struct smu_context *smu,
1845 enum PP_SMC_POWER_PROFILE type,
1848 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1852 if (!smu->pm_enabled)
1855 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1858 mutex_lock(&smu->mutex);
1861 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1862 index = fls(smu->workload_mask);
1863 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1864 workload = smu->workload_setting[index];
1866 smu->workload_mask |= (1 << smu->workload_prority[type]);
1867 index = fls(smu->workload_mask);
1868 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1869 workload = smu->workload_setting[index];
1872 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1873 smu_set_power_profile_mode(smu, &workload, 0, false);
1875 mutex_unlock(&smu->mutex);
1880 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1882 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1883 enum amd_dpm_forced_level level;
1885 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1888 mutex_lock(&(smu->mutex));
1889 level = smu_dpm_ctx->dpm_level;
1890 mutex_unlock(&(smu->mutex));
1895 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1897 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1900 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1903 mutex_lock(&smu->mutex);
1905 ret = smu_enable_umd_pstate(smu, &level);
1907 mutex_unlock(&smu->mutex);
1911 ret = smu_handle_task(smu, level,
1912 AMD_PP_TASK_READJUST_POWER_STATE,
1915 mutex_unlock(&smu->mutex);
1920 int smu_set_display_count(struct smu_context *smu, uint32_t count)
1924 mutex_lock(&smu->mutex);
1925 ret = smu_init_display_count(smu, count);
1926 mutex_unlock(&smu->mutex);
1931 int smu_force_clk_levels(struct smu_context *smu,
1932 enum smu_clk_type clk_type,
1936 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1939 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1940 pr_debug("force clock level is for dpm manual mode only.\n");
1945 mutex_lock(&smu->mutex);
1947 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
1948 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
1951 mutex_unlock(&smu->mutex);
1956 int smu_set_mp1_state(struct smu_context *smu,
1957 enum pp_mp1_state mp1_state)
1963 * The SMC is not fully ready. That may be
1964 * expected as the IP may be masked.
1965 * So, just return without error.
1967 if (!smu->pm_enabled)
1970 mutex_lock(&smu->mutex);
1972 switch (mp1_state) {
1973 case PP_MP1_STATE_SHUTDOWN:
1974 msg = SMU_MSG_PrepareMp1ForShutdown;
1976 case PP_MP1_STATE_UNLOAD:
1977 msg = SMU_MSG_PrepareMp1ForUnload;
1979 case PP_MP1_STATE_RESET:
1980 msg = SMU_MSG_PrepareMp1ForReset;
1982 case PP_MP1_STATE_NONE:
1984 mutex_unlock(&smu->mutex);
1988 /* some asics may not support those messages */
1989 if (smu_msg_get_index(smu, msg) < 0) {
1990 mutex_unlock(&smu->mutex);
1994 ret = smu_send_smc_msg(smu, msg, NULL);
1996 pr_err("[PrepareMp1] Failed!\n");
1998 mutex_unlock(&smu->mutex);
2003 int smu_set_df_cstate(struct smu_context *smu,
2004 enum pp_df_cstate state)
2009 * The SMC is not fully ready. That may be
2010 * expected as the IP may be masked.
2011 * So, just return without error.
2013 if (!smu->pm_enabled)
2016 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
2019 mutex_lock(&smu->mutex);
2021 ret = smu->ppt_funcs->set_df_cstate(smu, state);
2023 pr_err("[SetDfCstate] failed!\n");
2025 mutex_unlock(&smu->mutex);
2030 int smu_write_watermarks_table(struct smu_context *smu)
2032 void *watermarks_table = smu->smu_table.watermarks_table;
2034 if (!watermarks_table)
2037 return smu_update_table(smu,
2038 SMU_TABLE_WATERMARKS,
2044 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
2045 struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
2047 void *table = smu->smu_table.watermarks_table;
2052 mutex_lock(&smu->mutex);
2054 if (!smu->disable_watermark &&
2055 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
2056 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
2057 smu_set_watermarks_table(smu, table, clock_ranges);
2059 if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) {
2060 smu->watermarks_bitmap |= WATERMARKS_EXIST;
2061 smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
2065 mutex_unlock(&smu->mutex);
2070 const struct amd_ip_funcs smu_ip_funcs = {
2072 .early_init = smu_early_init,
2073 .late_init = smu_late_init,
2074 .sw_init = smu_sw_init,
2075 .sw_fini = smu_sw_fini,
2076 .hw_init = smu_hw_init,
2077 .hw_fini = smu_hw_fini,
2078 .suspend = smu_suspend,
2079 .resume = smu_resume,
2081 .check_soft_reset = NULL,
2082 .wait_for_idle = NULL,
2084 .set_clockgating_state = smu_set_clockgating_state,
2085 .set_powergating_state = smu_set_powergating_state,
2086 .enable_umd_pstate = smu_enable_umd_pstate,
2089 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
2091 .type = AMD_IP_BLOCK_TYPE_SMC,
2095 .funcs = &smu_ip_funcs,
2098 const struct amdgpu_ip_block_version smu_v12_0_ip_block =
2100 .type = AMD_IP_BLOCK_TYPE_SMC,
2104 .funcs = &smu_ip_funcs,
2107 int smu_load_microcode(struct smu_context *smu)
2111 mutex_lock(&smu->mutex);
2113 if (smu->ppt_funcs->load_microcode)
2114 ret = smu->ppt_funcs->load_microcode(smu);
2116 mutex_unlock(&smu->mutex);
2121 int smu_check_fw_status(struct smu_context *smu)
2125 mutex_lock(&smu->mutex);
2127 if (smu->ppt_funcs->check_fw_status)
2128 ret = smu->ppt_funcs->check_fw_status(smu);
2130 mutex_unlock(&smu->mutex);
2135 int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2139 mutex_lock(&smu->mutex);
2141 if (smu->ppt_funcs->set_gfx_cgpg)
2142 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2144 mutex_unlock(&smu->mutex);
2149 int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
2153 mutex_lock(&smu->mutex);
2155 if (smu->ppt_funcs->set_fan_speed_rpm)
2156 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2158 mutex_unlock(&smu->mutex);
2163 int smu_get_power_limit(struct smu_context *smu,
2171 mutex_lock(&smu->mutex);
2173 if (smu->ppt_funcs->get_power_limit)
2174 ret = smu->ppt_funcs->get_power_limit(smu, limit, def);
2177 mutex_unlock(&smu->mutex);
2182 int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
2186 mutex_lock(&smu->mutex);
2188 if (smu->ppt_funcs->set_power_limit)
2189 ret = smu->ppt_funcs->set_power_limit(smu, limit);
2191 mutex_unlock(&smu->mutex);
2196 int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2200 mutex_lock(&smu->mutex);
2202 if (smu->ppt_funcs->print_clk_levels)
2203 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2205 mutex_unlock(&smu->mutex);
2210 int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
2214 mutex_lock(&smu->mutex);
2216 if (smu->ppt_funcs->get_od_percentage)
2217 ret = smu->ppt_funcs->get_od_percentage(smu, type);
2219 mutex_unlock(&smu->mutex);
2224 int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
2228 mutex_lock(&smu->mutex);
2230 if (smu->ppt_funcs->set_od_percentage)
2231 ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
2233 mutex_unlock(&smu->mutex);
2238 int smu_od_edit_dpm_table(struct smu_context *smu,
2239 enum PP_OD_DPM_TABLE_COMMAND type,
2240 long *input, uint32_t size)
2244 mutex_lock(&smu->mutex);
2246 if (smu->ppt_funcs->od_edit_dpm_table)
2247 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2249 mutex_unlock(&smu->mutex);
2254 int smu_read_sensor(struct smu_context *smu,
2255 enum amd_pp_sensors sensor,
2256 void *data, uint32_t *size)
2260 mutex_lock(&smu->mutex);
2262 if (smu->ppt_funcs->read_sensor)
2263 ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
2265 mutex_unlock(&smu->mutex);
2270 int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
2274 mutex_lock(&smu->mutex);
2276 if (smu->ppt_funcs->get_power_profile_mode)
2277 ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
2279 mutex_unlock(&smu->mutex);
2284 int smu_set_power_profile_mode(struct smu_context *smu,
2286 uint32_t param_size,
2292 mutex_lock(&smu->mutex);
2294 if (smu->ppt_funcs->set_power_profile_mode)
2295 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
2298 mutex_unlock(&smu->mutex);
2304 int smu_get_fan_control_mode(struct smu_context *smu)
2308 mutex_lock(&smu->mutex);
2310 if (smu->ppt_funcs->get_fan_control_mode)
2311 ret = smu->ppt_funcs->get_fan_control_mode(smu);
2313 mutex_unlock(&smu->mutex);
2318 int smu_set_fan_control_mode(struct smu_context *smu, int value)
2322 mutex_lock(&smu->mutex);
2324 if (smu->ppt_funcs->set_fan_control_mode)
2325 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
2327 mutex_unlock(&smu->mutex);
2332 int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
2336 mutex_lock(&smu->mutex);
2338 if (smu->ppt_funcs->get_fan_speed_percent)
2339 ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed);
2341 mutex_unlock(&smu->mutex);
2346 int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
2350 mutex_lock(&smu->mutex);
2352 if (smu->ppt_funcs->set_fan_speed_percent)
2353 ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
2355 mutex_unlock(&smu->mutex);
2360 int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
2364 mutex_lock(&smu->mutex);
2366 if (smu->ppt_funcs->get_fan_speed_rpm)
2367 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
2369 mutex_unlock(&smu->mutex);
2374 int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
2378 mutex_lock(&smu->mutex);
2380 if (smu->ppt_funcs->set_deep_sleep_dcefclk)
2381 ret = smu->ppt_funcs->set_deep_sleep_dcefclk(smu, clk);
2383 mutex_unlock(&smu->mutex);
2388 int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
2392 if (smu->ppt_funcs->set_active_display_count)
2393 ret = smu->ppt_funcs->set_active_display_count(smu, count);
2398 int smu_get_clock_by_type(struct smu_context *smu,
2399 enum amd_pp_clock_type type,
2400 struct amd_pp_clocks *clocks)
2404 mutex_lock(&smu->mutex);
2406 if (smu->ppt_funcs->get_clock_by_type)
2407 ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks);
2409 mutex_unlock(&smu->mutex);
2414 int smu_get_max_high_clocks(struct smu_context *smu,
2415 struct amd_pp_simple_clock_info *clocks)
2419 mutex_lock(&smu->mutex);
2421 if (smu->ppt_funcs->get_max_high_clocks)
2422 ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks);
2424 mutex_unlock(&smu->mutex);
2429 int smu_get_clock_by_type_with_latency(struct smu_context *smu,
2430 enum smu_clk_type clk_type,
2431 struct pp_clock_levels_with_latency *clocks)
2435 mutex_lock(&smu->mutex);
2437 if (smu->ppt_funcs->get_clock_by_type_with_latency)
2438 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2440 mutex_unlock(&smu->mutex);
2445 int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
2446 enum amd_pp_clock_type type,
2447 struct pp_clock_levels_with_voltage *clocks)
2451 mutex_lock(&smu->mutex);
2453 if (smu->ppt_funcs->get_clock_by_type_with_voltage)
2454 ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
2456 mutex_unlock(&smu->mutex);
2462 int smu_display_clock_voltage_request(struct smu_context *smu,
2463 struct pp_display_clock_request *clock_req)
2467 mutex_lock(&smu->mutex);
2469 if (smu->ppt_funcs->display_clock_voltage_request)
2470 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
2472 mutex_unlock(&smu->mutex);
2478 int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
2482 mutex_lock(&smu->mutex);
2484 if (smu->ppt_funcs->display_disable_memory_clock_switch)
2485 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2487 mutex_unlock(&smu->mutex);
2492 int smu_notify_smu_enable_pwe(struct smu_context *smu)
2496 mutex_lock(&smu->mutex);
2498 if (smu->ppt_funcs->notify_smu_enable_pwe)
2499 ret = smu->ppt_funcs->notify_smu_enable_pwe(smu);
2501 mutex_unlock(&smu->mutex);
2506 int smu_set_xgmi_pstate(struct smu_context *smu,
2511 mutex_lock(&smu->mutex);
2513 if (smu->ppt_funcs->set_xgmi_pstate)
2514 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
2516 mutex_unlock(&smu->mutex);
2521 int smu_set_azalia_d3_pme(struct smu_context *smu)
2525 mutex_lock(&smu->mutex);
2527 if (smu->ppt_funcs->set_azalia_d3_pme)
2528 ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
2530 mutex_unlock(&smu->mutex);
2535 bool smu_baco_is_support(struct smu_context *smu)
2539 mutex_lock(&smu->mutex);
2541 if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
2542 ret = smu->ppt_funcs->baco_is_support(smu);
2544 mutex_unlock(&smu->mutex);
2549 int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
2551 if (smu->ppt_funcs->baco_get_state)
2554 mutex_lock(&smu->mutex);
2555 *state = smu->ppt_funcs->baco_get_state(smu);
2556 mutex_unlock(&smu->mutex);
2561 int smu_baco_enter(struct smu_context *smu)
2565 mutex_lock(&smu->mutex);
2567 if (smu->ppt_funcs->baco_enter)
2568 ret = smu->ppt_funcs->baco_enter(smu);
2570 mutex_unlock(&smu->mutex);
2575 int smu_baco_exit(struct smu_context *smu)
2579 mutex_lock(&smu->mutex);
2581 if (smu->ppt_funcs->baco_exit)
2582 ret = smu->ppt_funcs->baco_exit(smu);
2584 mutex_unlock(&smu->mutex);
2589 int smu_mode2_reset(struct smu_context *smu)
2593 mutex_lock(&smu->mutex);
2595 if (smu->ppt_funcs->mode2_reset)
2596 ret = smu->ppt_funcs->mode2_reset(smu);
2598 mutex_unlock(&smu->mutex);
2603 int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
2604 struct pp_smu_nv_clock_table *max_clocks)
2608 mutex_lock(&smu->mutex);
2610 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
2611 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
2613 mutex_unlock(&smu->mutex);
2618 int smu_get_uclk_dpm_states(struct smu_context *smu,
2619 unsigned int *clock_values_in_khz,
2620 unsigned int *num_states)
2624 mutex_lock(&smu->mutex);
2626 if (smu->ppt_funcs->get_uclk_dpm_states)
2627 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
2629 mutex_unlock(&smu->mutex);
2634 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
2636 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
2638 mutex_lock(&smu->mutex);
2640 if (smu->ppt_funcs->get_current_power_state)
2641 pm_state = smu->ppt_funcs->get_current_power_state(smu);
2643 mutex_unlock(&smu->mutex);
2648 int smu_get_dpm_clock_table(struct smu_context *smu,
2649 struct dpm_clocks *clock_table)
2653 mutex_lock(&smu->mutex);
2655 if (smu->ppt_funcs->get_dpm_clock_table)
2656 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
2658 mutex_unlock(&smu->mutex);
2663 uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
2667 if (smu->ppt_funcs->get_pptable_power_limit)
2668 ret = smu->ppt_funcs->get_pptable_power_limit(smu);