2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
27 #include "amdgpu_smu.h"
28 #include "soc15_common.h"
29 #include "smu_v11_0.h"
32 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
38 case AMD_IP_BLOCK_TYPE_UVD:
39 ret = smu_dpm_set_uvd_enable(smu, gate);
41 case AMD_IP_BLOCK_TYPE_VCE:
42 ret = smu_dpm_set_vce_enable(smu, gate);
51 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
53 /* not support power state */
54 return POWER_STATE_TYPE_DEFAULT;
57 int smu_get_power_num_states(struct smu_context *smu,
58 struct pp_states_info *state_info)
63 /* not support power state */
64 memset(state_info, 0, sizeof(struct pp_states_info));
70 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
71 void *data, uint32_t *size)
76 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
77 *((uint32_t *)data) = smu->pstate_sclk;
80 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
81 *((uint32_t *)data) = smu->pstate_mclk;
84 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
85 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
99 int smu_update_table(struct smu_context *smu, uint32_t table_id,
100 void *table_data, bool drv2smu)
102 struct smu_table_context *smu_table = &smu->smu_table;
103 struct smu_table *table = NULL;
106 if (!table_data || table_id >= smu_table->table_count)
109 table = &smu_table->tables[table_id];
112 memcpy(table->cpu_addr, table_data, table->size);
114 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
115 upper_32_bits(table->mc_address));
118 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
119 lower_32_bits(table->mc_address));
122 ret = smu_send_smc_msg_with_param(smu, drv2smu ?
123 SMU_MSG_TransferTableDram2Smu :
124 SMU_MSG_TransferTableSmu2Dram,
130 memcpy(table_data, table->cpu_addr, table->size);
135 bool is_support_sw_smu(struct amdgpu_device *adev)
140 if (adev->asic_type >= CHIP_VEGA20)
146 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
148 struct smu_table_context *smu_table = &smu->smu_table;
150 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
153 if (smu_table->hardcode_pptable)
154 *table = smu_table->hardcode_pptable;
156 *table = smu_table->power_play_table;
158 return smu_table->power_play_table_size;
161 int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
163 struct smu_table_context *smu_table = &smu->smu_table;
164 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
167 if (header->usStructureSize != size) {
168 pr_err("pp table size not matched !\n");
172 mutex_lock(&smu->mutex);
173 if (!smu_table->hardcode_pptable)
174 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
175 if (!smu_table->hardcode_pptable) {
180 memcpy(smu_table->hardcode_pptable, buf, size);
181 smu_table->power_play_table = smu_table->hardcode_pptable;
182 smu_table->power_play_table_size = size;
183 mutex_unlock(&smu->mutex);
185 ret = smu_reset(smu);
187 pr_info("smu reset failed, ret = %d\n", ret);
190 mutex_unlock(&smu->mutex);
194 int smu_feature_init_dpm(struct smu_context *smu)
196 struct smu_feature *feature = &smu->smu_feature;
198 uint32_t unallowed_feature_mask[SMU_FEATURE_MAX/32];
200 mutex_lock(&feature->mutex);
201 bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
202 mutex_unlock(&feature->mutex);
204 ret = smu_get_unallowed_feature_mask(smu, unallowed_feature_mask,
209 mutex_lock(&feature->mutex);
210 bitmap_andnot(feature->allowed, feature->allowed,
211 (unsigned long *)unallowed_feature_mask,
212 feature->feature_num);
213 mutex_unlock(&feature->mutex);
218 int smu_feature_is_enabled(struct smu_context *smu, int feature_id)
220 struct smu_feature *feature = &smu->smu_feature;
223 WARN_ON(feature_id > feature->feature_num);
225 mutex_lock(&feature->mutex);
226 ret = test_bit(feature_id, feature->enabled);
227 mutex_unlock(&feature->mutex);
232 int smu_feature_set_enabled(struct smu_context *smu, int feature_id, bool enable)
234 struct smu_feature *feature = &smu->smu_feature;
237 WARN_ON(feature_id > feature->feature_num);
239 mutex_lock(&feature->mutex);
240 ret = smu_feature_update_enable_state(smu, feature_id, enable);
245 test_and_set_bit(feature_id, feature->enabled);
247 test_and_clear_bit(feature_id, feature->enabled);
250 mutex_unlock(&feature->mutex);
255 int smu_feature_is_supported(struct smu_context *smu, int feature_id)
257 struct smu_feature *feature = &smu->smu_feature;
260 WARN_ON(feature_id > feature->feature_num);
262 mutex_lock(&feature->mutex);
263 ret = test_bit(feature_id, feature->supported);
264 mutex_unlock(&feature->mutex);
269 int smu_feature_set_supported(struct smu_context *smu, int feature_id,
272 struct smu_feature *feature = &smu->smu_feature;
275 WARN_ON(feature_id > feature->feature_num);
277 mutex_unlock(&feature->mutex);
279 test_and_set_bit(feature_id, feature->supported);
281 test_and_clear_bit(feature_id, feature->supported);
282 mutex_unlock(&feature->mutex);
287 static int smu_set_funcs(struct amdgpu_device *adev)
289 struct smu_context *smu = &adev->smu;
291 switch (adev->asic_type) {
293 smu_v11_0_set_smu_funcs(smu);
302 static int smu_early_init(void *handle)
304 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
305 struct smu_context *smu = &adev->smu;
308 mutex_init(&smu->mutex);
310 return smu_set_funcs(adev);
313 static int smu_late_init(void *handle)
315 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
316 struct smu_context *smu = &adev->smu;
317 mutex_lock(&smu->mutex);
318 smu_handle_task(&adev->smu,
319 smu->smu_dpm.dpm_level,
320 AMD_PP_TASK_COMPLETE_INIT);
321 mutex_unlock(&smu->mutex);
326 int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
327 uint16_t *size, uint8_t *frev, uint8_t *crev,
330 struct amdgpu_device *adev = smu->adev;
333 if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
334 size, frev, crev, &data_start))
337 *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
342 static int smu_initialize_pptable(struct smu_context *smu)
348 static int smu_smc_table_sw_init(struct smu_context *smu)
352 ret = smu_initialize_pptable(smu);
354 pr_err("Failed to init smu_initialize_pptable!\n");
359 * Create smu_table structure, and init smc tables such as
360 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
362 ret = smu_init_smc_tables(smu);
364 pr_err("Failed to init smc tables!\n");
369 * Create smu_power_context structure, and allocate smu_dpm_context and
370 * context size to fill the smu_power_context data.
372 ret = smu_init_power(smu);
374 pr_err("Failed to init smu_init_power!\n");
381 static int smu_smc_table_sw_fini(struct smu_context *smu)
385 ret = smu_fini_smc_tables(smu);
387 pr_err("Failed to smu_fini_smc_tables!\n");
394 static int smu_sw_init(void *handle)
396 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
397 struct smu_context *smu = &adev->smu;
400 if (!is_support_sw_smu(adev))
403 smu->pool_size = adev->pm.smu_prv_buffer_size;
404 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
405 mutex_init(&smu->smu_feature.mutex);
406 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
407 bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
408 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
409 smu->watermarks_bitmap = 0;
410 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
411 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
413 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
414 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
415 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
416 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
417 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
418 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
419 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
420 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
422 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
423 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
424 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
425 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
426 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
427 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
428 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
429 smu->display_config = &adev->pm.pm_display_cfg;
431 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
432 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
433 ret = smu_init_microcode(smu);
435 pr_err("Failed to load smu firmware!\n");
439 ret = smu_smc_table_sw_init(smu);
441 pr_err("Failed to sw init smc table!\n");
448 static int smu_sw_fini(void *handle)
450 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
451 struct smu_context *smu = &adev->smu;
454 if (!is_support_sw_smu(adev))
457 ret = smu_smc_table_sw_fini(smu);
459 pr_err("Failed to sw fini smc table!\n");
463 ret = smu_fini_power(smu);
465 pr_err("Failed to init smu_fini_power!\n");
472 static int smu_init_fb_allocations(struct smu_context *smu)
474 struct amdgpu_device *adev = smu->adev;
475 struct smu_table_context *smu_table = &smu->smu_table;
476 struct smu_table *tables = smu_table->tables;
477 uint32_t table_count = smu_table->table_count;
481 if (table_count <= 0)
484 for (i = 0 ; i < table_count; i++) {
485 if (tables[i].size == 0)
487 ret = amdgpu_bo_create_kernel(adev,
492 &tables[i].mc_address,
493 &tables[i].cpu_addr);
501 if (tables[i].size == 0)
503 amdgpu_bo_free_kernel(&tables[i].bo,
504 &tables[i].mc_address,
505 &tables[i].cpu_addr);
511 static int smu_fini_fb_allocations(struct smu_context *smu)
513 struct smu_table_context *smu_table = &smu->smu_table;
514 struct smu_table *tables = smu_table->tables;
515 uint32_t table_count = smu_table->table_count;
518 if (table_count == 0 || tables == NULL)
521 for (i = 0 ; i < table_count; i++) {
522 if (tables[i].size == 0)
524 amdgpu_bo_free_kernel(&tables[i].bo,
525 &tables[i].mc_address,
526 &tables[i].cpu_addr);
532 static int smu_smc_table_hw_init(struct smu_context *smu)
536 ret = smu_init_display(smu);
540 ret = smu_feature_set_allowed_mask(smu);
544 ret = smu_read_pptable_from_vbios(smu);
548 /* get boot_values from vbios to set revision, gfxclk, and etc. */
549 ret = smu_get_vbios_bootup_values(smu);
553 ret = smu_get_clk_info_from_vbios(smu);
558 * check if the format_revision in vbios is up to pptable header
559 * version, and the structure size is not 0.
561 ret = smu_get_clk_info_from_vbios(smu);
565 ret = smu_check_pptable(smu);
570 * allocate vram bos to store smc table contents.
572 ret = smu_init_fb_allocations(smu);
577 * Parse pptable format and fill PPTable_t smc_pptable to
578 * smu_table_context structure. And read the smc_dpm_table from vbios,
579 * then fill it into smc_pptable.
581 ret = smu_parse_pptable(smu);
586 * Send msg GetDriverIfVersion to check if the return value is equal
587 * with DRIVER_IF_VERSION of smc header.
589 ret = smu_check_fw_version(smu);
594 * Copy pptable bo in the vram to smc with SMU MSGs such as
595 * SetDriverDramAddr and TransferTableDram2Smu.
597 ret = smu_write_pptable(smu);
601 /* issue RunAfllBtc msg */
602 ret = smu_run_afll_btc(smu);
606 ret = smu_feature_enable_all(smu);
610 ret = smu_notify_display_change(smu);
615 * Set min deep sleep dce fclk with bootup value from vbios via
616 * SetMinDeepSleepDcefclk MSG.
618 ret = smu_set_min_dcef_deep_sleep(smu);
623 * Set initialized values (get from vbios) to dpm tables context such as
624 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
627 ret = smu_populate_smc_pptable(smu);
631 ret = smu_init_max_sustainable_clocks(smu);
635 ret = smu_set_od8_default_settings(smu);
639 ret = smu_populate_umd_state_clk(smu);
643 ret = smu_get_power_limit(smu, &smu->default_power_limit, false);
648 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
650 ret = smu_set_tool_table_location(smu);
656 * smu_alloc_memory_pool - allocate memory pool in the system memory
658 * @smu: amdgpu_device pointer
660 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
661 * and DramLogSetDramAddr can notify it changed.
663 * Returns 0 on success, error on failure.
665 static int smu_alloc_memory_pool(struct smu_context *smu)
667 struct amdgpu_device *adev = smu->adev;
668 struct smu_table_context *smu_table = &smu->smu_table;
669 struct smu_table *memory_pool = &smu_table->memory_pool;
670 uint64_t pool_size = smu->pool_size;
673 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
676 memory_pool->size = pool_size;
677 memory_pool->align = PAGE_SIZE;
678 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
681 case SMU_MEMORY_POOL_SIZE_256_MB:
682 case SMU_MEMORY_POOL_SIZE_512_MB:
683 case SMU_MEMORY_POOL_SIZE_1_GB:
684 case SMU_MEMORY_POOL_SIZE_2_GB:
685 ret = amdgpu_bo_create_kernel(adev,
690 &memory_pool->mc_address,
691 &memory_pool->cpu_addr);
700 static int smu_free_memory_pool(struct smu_context *smu)
702 struct smu_table_context *smu_table = &smu->smu_table;
703 struct smu_table *memory_pool = &smu_table->memory_pool;
706 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
709 amdgpu_bo_free_kernel(&memory_pool->bo,
710 &memory_pool->mc_address,
711 &memory_pool->cpu_addr);
713 memset(memory_pool, 0, sizeof(struct smu_table));
717 static int smu_hw_init(void *handle)
720 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
721 struct smu_context *smu = &adev->smu;
723 if (!is_support_sw_smu(adev))
726 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
727 ret = smu_load_microcode(smu);
732 ret = smu_check_fw_status(smu);
734 pr_err("SMC firmware status is not correct\n");
738 mutex_lock(&smu->mutex);
740 ret = smu_feature_init_dpm(smu);
744 ret = smu_smc_table_hw_init(smu);
748 ret = smu_alloc_memory_pool(smu);
753 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
756 ret = smu_notify_memory_pool_location(smu);
760 ret = smu_start_thermal_control(smu);
764 mutex_unlock(&smu->mutex);
766 adev->pm.dpm_enabled = true;
768 pr_info("SMU is initialized successfully!\n");
773 mutex_unlock(&smu->mutex);
777 static int smu_hw_fini(void *handle)
779 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
780 struct smu_context *smu = &adev->smu;
781 struct smu_table_context *table_context = &smu->smu_table;
784 if (!is_support_sw_smu(adev))
787 kfree(table_context->driver_pptable);
788 table_context->driver_pptable = NULL;
790 kfree(table_context->max_sustainable_clocks);
791 table_context->max_sustainable_clocks = NULL;
793 kfree(table_context->od_feature_capabilities);
794 table_context->od_feature_capabilities = NULL;
796 kfree(table_context->od_settings_max);
797 table_context->od_settings_max = NULL;
799 kfree(table_context->od_settings_min);
800 table_context->od_settings_min = NULL;
802 kfree(table_context->overdrive_table);
803 table_context->overdrive_table = NULL;
805 kfree(table_context->od8_settings);
806 table_context->od8_settings = NULL;
808 ret = smu_fini_fb_allocations(smu);
812 ret = smu_free_memory_pool(smu);
819 int smu_reset(struct smu_context *smu)
821 struct amdgpu_device *adev = smu->adev;
824 ret = smu_hw_fini(adev);
828 ret = smu_hw_init(adev);
835 static int smu_suspend(void *handle)
837 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
839 if (!is_support_sw_smu(adev))
845 static int smu_resume(void *handle)
848 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
849 struct smu_context *smu = &adev->smu;
851 if (!is_support_sw_smu(adev))
854 pr_info("SMU is resuming...\n");
856 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
857 ret = smu_load_microcode(smu);
862 ret = smu_check_fw_status(smu);
864 pr_err("SMC firmware status is not correct\n");
868 mutex_lock(&smu->mutex);
870 ret = smu_set_tool_table_location(smu);
874 ret = smu_write_pptable(smu);
878 ret = smu_write_watermarks_table(smu);
882 ret = smu_set_last_dcef_min_deep_sleep_clk(smu);
886 ret = smu_system_features_control(smu, true);
890 mutex_unlock(&smu->mutex);
892 pr_info("SMU is resumed successfully!\n");
896 mutex_unlock(&smu->mutex);
900 int smu_display_configuration_change(struct smu_context *smu,
901 const struct amd_pp_display_configuration *display_config)
904 int num_of_active_display = 0;
906 if (!is_support_sw_smu(smu->adev))
912 mutex_lock(&smu->mutex);
914 smu_set_deep_sleep_dcefclk(smu,
915 display_config->min_dcef_deep_sleep_set_clk / 100);
917 for (index = 0; index < display_config->num_path_including_non_display; index++) {
918 if (display_config->displays[index].controller_id != 0)
919 num_of_active_display++;
922 smu_set_active_display_count(smu, num_of_active_display);
924 smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
925 display_config->cpu_cc6_disable,
926 display_config->cpu_pstate_disable,
927 display_config->nb_pstate_switch_disable);
929 mutex_unlock(&smu->mutex);
934 static int smu_get_clock_info(struct smu_context *smu,
935 struct smu_clock_info *clk_info,
936 enum smu_perf_level_designation designation)
939 struct smu_performance_level level = {0};
944 ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
948 clk_info->min_mem_clk = level.memory_clock;
949 clk_info->min_eng_clk = level.core_clock;
950 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
952 ret = smu_get_perf_level(smu, designation, &level);
956 clk_info->min_mem_clk = level.memory_clock;
957 clk_info->min_eng_clk = level.core_clock;
958 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
963 int smu_get_current_clocks(struct smu_context *smu,
964 struct amd_pp_clock_info *clocks)
966 struct amd_pp_simple_clock_info simple_clocks = {0};
967 struct smu_clock_info hw_clocks;
970 if (!is_support_sw_smu(smu->adev))
973 mutex_lock(&smu->mutex);
975 smu_get_dal_power_level(smu, &simple_clocks);
977 if (smu->support_power_containment)
978 ret = smu_get_clock_info(smu, &hw_clocks,
979 PERF_LEVEL_POWER_CONTAINMENT);
981 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
984 pr_err("Error in smu_get_clock_info\n");
988 clocks->min_engine_clock = hw_clocks.min_eng_clk;
989 clocks->max_engine_clock = hw_clocks.max_eng_clk;
990 clocks->min_memory_clock = hw_clocks.min_mem_clk;
991 clocks->max_memory_clock = hw_clocks.max_mem_clk;
992 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
993 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
994 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
995 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
997 if (simple_clocks.level == 0)
998 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1000 clocks->max_clocks_state = simple_clocks.level;
1002 if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1003 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1004 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1008 mutex_unlock(&smu->mutex);
1012 static int smu_set_clockgating_state(void *handle,
1013 enum amd_clockgating_state state)
1018 static int smu_set_powergating_state(void *handle,
1019 enum amd_powergating_state state)
1024 static int smu_enable_umd_pstate(void *handle,
1025 enum amd_dpm_forced_level *level)
1027 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1028 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1029 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1030 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1032 struct smu_context *smu = (struct smu_context*)(handle);
1033 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1034 if (!smu_dpm_ctx->dpm_context)
1037 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1038 /* enter umd pstate, save current level, disable gfx cg*/
1039 if (*level & profile_mode_mask) {
1040 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1041 smu_dpm_ctx->enable_umd_pstate = true;
1042 amdgpu_device_ip_set_clockgating_state(smu->adev,
1043 AMD_IP_BLOCK_TYPE_GFX,
1044 AMD_CG_STATE_UNGATE);
1045 amdgpu_device_ip_set_powergating_state(smu->adev,
1046 AMD_IP_BLOCK_TYPE_GFX,
1047 AMD_PG_STATE_UNGATE);
1050 /* exit umd pstate, restore level, enable gfx cg*/
1051 if (!(*level & profile_mode_mask)) {
1052 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1053 *level = smu_dpm_ctx->saved_dpm_level;
1054 smu_dpm_ctx->enable_umd_pstate = false;
1055 amdgpu_device_ip_set_clockgating_state(smu->adev,
1056 AMD_IP_BLOCK_TYPE_GFX,
1058 amdgpu_device_ip_set_powergating_state(smu->adev,
1059 AMD_IP_BLOCK_TYPE_GFX,
1067 int smu_unforce_dpm_levels(struct smu_context *smu)
1071 ret = smu_upload_dpm_level(smu, false);
1073 pr_err("Failed to upload DPM Bootup Levels!");
1077 ret = smu_upload_dpm_level(smu, true);
1079 pr_err("Failed to upload DPM Max Levels!");
1086 int smu_adjust_power_state_dynamic(struct smu_context *smu,
1087 enum amd_dpm_forced_level level,
1088 bool skip_display_settings)
1092 uint32_t sclk_mask, mclk_mask, soc_mask;
1094 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1096 if (!skip_display_settings) {
1097 ret = smu_display_config_changed(smu);
1099 pr_err("Failed to change display config!");
1104 ret = smu_apply_clocks_adjust_rules(smu);
1106 pr_err("Failed to apply clocks adjust rules!");
1110 if (!skip_display_settings) {
1111 ret = smu_notify_smc_dispaly_config(smu);
1113 pr_err("Failed to notify smc display config!");
1118 if (smu_dpm_ctx->dpm_level != level) {
1120 case AMD_DPM_FORCED_LEVEL_HIGH:
1121 ret = smu_force_dpm_limit_value(smu, true);
1123 case AMD_DPM_FORCED_LEVEL_LOW:
1124 ret = smu_force_dpm_limit_value(smu, false);
1127 case AMD_DPM_FORCED_LEVEL_AUTO:
1128 ret = smu_unforce_dpm_levels(smu);
1131 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1132 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1133 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1134 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1135 ret = smu_get_profiling_clk_mask(smu, level,
1141 smu_force_clk_levels(smu, PP_SCLK, 1 << sclk_mask);
1142 smu_force_clk_levels(smu, PP_MCLK, 1 << mclk_mask);
1145 case AMD_DPM_FORCED_LEVEL_MANUAL:
1146 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1152 smu_dpm_ctx->dpm_level = level;
1155 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1156 index = fls(smu->workload_mask);
1157 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1158 workload = smu->workload_setting[index];
1160 if (smu->power_profile_mode != workload)
1161 smu_set_power_profile_mode(smu, &workload, 0);
1167 int smu_handle_task(struct smu_context *smu,
1168 enum amd_dpm_forced_level level,
1169 enum amd_pp_task task_id)
1174 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1175 ret = smu_pre_display_config_changed(smu);
1178 ret = smu_set_cpu_power_state(smu);
1181 ret = smu_adjust_power_state_dynamic(smu, level, false);
1183 case AMD_PP_TASK_COMPLETE_INIT:
1184 case AMD_PP_TASK_READJUST_POWER_STATE:
1185 ret = smu_adjust_power_state_dynamic(smu, level, true);
1194 const struct amd_ip_funcs smu_ip_funcs = {
1196 .early_init = smu_early_init,
1197 .late_init = smu_late_init,
1198 .sw_init = smu_sw_init,
1199 .sw_fini = smu_sw_fini,
1200 .hw_init = smu_hw_init,
1201 .hw_fini = smu_hw_fini,
1202 .suspend = smu_suspend,
1203 .resume = smu_resume,
1205 .check_soft_reset = NULL,
1206 .wait_for_idle = NULL,
1208 .set_clockgating_state = smu_set_clockgating_state,
1209 .set_powergating_state = smu_set_powergating_state,
1210 .enable_umd_pstate = smu_enable_umd_pstate,
1213 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
1215 .type = AMD_IP_BLOCK_TYPE_SMC,
1219 .funcs = &smu_ip_funcs,