2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
27 #include "amdgpu_smu.h"
28 #include "soc15_common.h"
29 #include "smu_v11_0.h"
33 int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
37 if (!if_version && !smu_version)
41 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
45 ret = smu_read_smc_arg(smu, if_version);
51 ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
55 ret = smu_read_smc_arg(smu, smu_version);
63 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
64 uint32_t min, uint32_t max)
66 int ret = 0, clk_id = 0;
69 if (min <= 0 && max <= 0)
72 if (!smu_clk_dpm_is_enabled(smu, clk_type))
75 clk_id = smu_clk_get_index(smu, clk_type);
80 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
81 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
88 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
89 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
99 int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
100 uint32_t min, uint32_t max)
102 int ret = 0, clk_id = 0;
105 if (min <= 0 && max <= 0)
108 if (!smu_clk_dpm_is_enabled(smu, clk_type))
111 clk_id = smu_clk_get_index(smu, clk_type);
116 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
117 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
124 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
125 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
135 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
136 uint32_t *min, uint32_t *max)
138 int ret = 0, clk_id = 0;
140 uint32_t clock_limit;
145 if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
149 clock_limit = smu->smu_table.boot_values.uclk;
153 clock_limit = smu->smu_table.boot_values.gfxclk;
156 clock_limit = smu->smu_table.boot_values.socclk;
163 /* clock in Mhz unit */
165 *min = clock_limit / 100;
167 *max = clock_limit / 100;
172 mutex_lock(&smu->mutex);
173 clk_id = smu_clk_get_index(smu, clk_type);
179 param = (clk_id & 0xffff) << 16;
182 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param);
185 ret = smu_read_smc_arg(smu, max);
191 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param);
194 ret = smu_read_smc_arg(smu, min);
200 mutex_unlock(&smu->mutex);
204 int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
205 uint16_t level, uint32_t *value)
207 int ret = 0, clk_id = 0;
213 if (!smu_clk_dpm_is_enabled(smu, clk_type))
216 clk_id = smu_clk_get_index(smu, clk_type);
220 param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
222 ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
227 ret = smu_read_smc_arg(smu, ¶m);
231 /* BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM
232 * now, we un-support it */
233 *value = param & 0x7fffffff;
238 int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
241 return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
244 bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
246 enum smu_feature_mask feature_id = 0;
251 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
255 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
258 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
264 if(!smu_feature_is_enabled(smu, feature_id)) {
265 pr_warn("smu %d clk dpm feature %d is not enabled\n", clk_type, feature_id);
273 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
278 switch (block_type) {
279 case AMD_IP_BLOCK_TYPE_UVD:
280 ret = smu_dpm_set_uvd_enable(smu, gate);
282 case AMD_IP_BLOCK_TYPE_VCE:
283 ret = smu_dpm_set_vce_enable(smu, gate);
285 case AMD_IP_BLOCK_TYPE_GFX:
286 ret = smu_gfx_off_control(smu, gate);
295 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
297 /* not support power state */
298 return POWER_STATE_TYPE_DEFAULT;
301 int smu_get_power_num_states(struct smu_context *smu,
302 struct pp_states_info *state_info)
307 /* not support power state */
308 memset(state_info, 0, sizeof(struct pp_states_info));
309 state_info->nums = 1;
310 state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
315 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
316 void *data, uint32_t *size)
321 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
322 *((uint32_t *)data) = smu->pstate_sclk;
325 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
326 *((uint32_t *)data) = smu->pstate_mclk;
329 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
330 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
333 case AMDGPU_PP_SENSOR_UVD_POWER:
334 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
337 case AMDGPU_PP_SENSOR_VCE_POWER:
338 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
341 case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
342 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT) ? 1 : 0;
356 int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
357 void *table_data, bool drv2smu)
359 struct smu_table_context *smu_table = &smu->smu_table;
360 struct smu_table *table = NULL;
362 int table_id = smu_table_get_index(smu, table_index);
364 if (!table_data || table_id >= smu_table->table_count)
367 table = &smu_table->tables[table_index];
370 memcpy(table->cpu_addr, table_data, table->size);
372 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
373 upper_32_bits(table->mc_address));
376 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
377 lower_32_bits(table->mc_address));
380 ret = smu_send_smc_msg_with_param(smu, drv2smu ?
381 SMU_MSG_TransferTableDram2Smu :
382 SMU_MSG_TransferTableSmu2Dram,
383 table_id | ((argument & 0xFFFF) << 16));
388 memcpy(table_data, table->cpu_addr, table->size);
393 bool is_support_sw_smu(struct amdgpu_device *adev)
395 if (adev->asic_type == CHIP_VEGA20)
396 return (amdgpu_dpm == 2) ? true : false;
397 else if (adev->asic_type >= CHIP_NAVI10)
403 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
405 struct smu_table_context *smu_table = &smu->smu_table;
407 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
410 if (smu_table->hardcode_pptable)
411 *table = smu_table->hardcode_pptable;
413 *table = smu_table->power_play_table;
415 return smu_table->power_play_table_size;
418 int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
420 struct smu_table_context *smu_table = &smu->smu_table;
421 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
424 if (!smu->pm_enabled)
426 if (header->usStructureSize != size) {
427 pr_err("pp table size not matched !\n");
431 mutex_lock(&smu->mutex);
432 if (!smu_table->hardcode_pptable)
433 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
434 if (!smu_table->hardcode_pptable) {
439 memcpy(smu_table->hardcode_pptable, buf, size);
440 smu_table->power_play_table = smu_table->hardcode_pptable;
441 smu_table->power_play_table_size = size;
442 mutex_unlock(&smu->mutex);
444 ret = smu_reset(smu);
446 pr_info("smu reset failed, ret = %d\n", ret);
451 mutex_unlock(&smu->mutex);
455 int smu_feature_init_dpm(struct smu_context *smu)
457 struct smu_feature *feature = &smu->smu_feature;
459 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
461 if (!smu->pm_enabled)
463 mutex_lock(&feature->mutex);
464 bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
465 mutex_unlock(&feature->mutex);
467 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
472 mutex_lock(&feature->mutex);
473 bitmap_or(feature->allowed, feature->allowed,
474 (unsigned long *)allowed_feature_mask,
475 feature->feature_num);
476 mutex_unlock(&feature->mutex);
481 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
483 struct smu_feature *feature = &smu->smu_feature;
487 feature_id = smu_feature_get_index(smu, mask);
489 WARN_ON(feature_id > feature->feature_num);
491 mutex_lock(&feature->mutex);
492 ret = test_bit(feature_id, feature->enabled);
493 mutex_unlock(&feature->mutex);
498 int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
501 struct smu_feature *feature = &smu->smu_feature;
505 feature_id = smu_feature_get_index(smu, mask);
507 WARN_ON(feature_id > feature->feature_num);
509 mutex_lock(&feature->mutex);
510 ret = smu_feature_update_enable_state(smu, feature_id, enable);
515 test_and_set_bit(feature_id, feature->enabled);
517 test_and_clear_bit(feature_id, feature->enabled);
520 mutex_unlock(&feature->mutex);
525 int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
527 struct smu_feature *feature = &smu->smu_feature;
531 feature_id = smu_feature_get_index(smu, mask);
533 WARN_ON(feature_id > feature->feature_num);
535 mutex_lock(&feature->mutex);
536 ret = test_bit(feature_id, feature->supported);
537 mutex_unlock(&feature->mutex);
542 int smu_feature_set_supported(struct smu_context *smu,
543 enum smu_feature_mask mask,
546 struct smu_feature *feature = &smu->smu_feature;
550 feature_id = smu_feature_get_index(smu, mask);
552 WARN_ON(feature_id > feature->feature_num);
554 mutex_lock(&feature->mutex);
556 test_and_set_bit(feature_id, feature->supported);
558 test_and_clear_bit(feature_id, feature->supported);
559 mutex_unlock(&feature->mutex);
564 static int smu_set_funcs(struct amdgpu_device *adev)
566 struct smu_context *smu = &adev->smu;
568 switch (adev->asic_type) {
571 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
572 smu->od_enabled = true;
573 smu_v11_0_set_smu_funcs(smu);
582 static int smu_early_init(void *handle)
584 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
585 struct smu_context *smu = &adev->smu;
588 smu->pm_enabled = !!amdgpu_dpm;
589 mutex_init(&smu->mutex);
591 return smu_set_funcs(adev);
594 static int smu_late_init(void *handle)
596 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
597 struct smu_context *smu = &adev->smu;
599 if (!smu->pm_enabled)
601 mutex_lock(&smu->mutex);
602 smu_handle_task(&adev->smu,
603 smu->smu_dpm.dpm_level,
604 AMD_PP_TASK_COMPLETE_INIT);
605 mutex_unlock(&smu->mutex);
610 int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
611 uint16_t *size, uint8_t *frev, uint8_t *crev,
614 struct amdgpu_device *adev = smu->adev;
617 if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
618 size, frev, crev, &data_start))
621 *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
626 static int smu_initialize_pptable(struct smu_context *smu)
632 static int smu_smc_table_sw_init(struct smu_context *smu)
636 ret = smu_initialize_pptable(smu);
638 pr_err("Failed to init smu_initialize_pptable!\n");
643 * Create smu_table structure, and init smc tables such as
644 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
646 ret = smu_init_smc_tables(smu);
648 pr_err("Failed to init smc tables!\n");
653 * Create smu_power_context structure, and allocate smu_dpm_context and
654 * context size to fill the smu_power_context data.
656 ret = smu_init_power(smu);
658 pr_err("Failed to init smu_init_power!\n");
665 static int smu_smc_table_sw_fini(struct smu_context *smu)
669 ret = smu_fini_smc_tables(smu);
671 pr_err("Failed to smu_fini_smc_tables!\n");
678 static int smu_sw_init(void *handle)
680 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
681 struct smu_context *smu = &adev->smu;
684 smu->pool_size = adev->pm.smu_prv_buffer_size;
685 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
686 mutex_init(&smu->smu_feature.mutex);
687 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
688 bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
689 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
691 mutex_init(&smu->smu_baco.mutex);
692 smu->smu_baco.state = SMU_BACO_STATE_EXIT;
693 smu->smu_baco.platform_support = false;
695 smu->watermarks_bitmap = 0;
696 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
697 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
699 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
700 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
701 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
702 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
703 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
704 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
705 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
706 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
708 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
709 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
710 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
711 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
712 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
713 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
714 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
715 smu->display_config = &adev->pm.pm_display_cfg;
717 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
718 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
719 ret = smu_init_microcode(smu);
721 pr_err("Failed to load smu firmware!\n");
725 ret = smu_smc_table_sw_init(smu);
727 pr_err("Failed to sw init smc table!\n");
731 ret = smu_register_irq_handler(smu);
733 pr_err("Failed to register smc irq handler!\n");
740 static int smu_sw_fini(void *handle)
742 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743 struct smu_context *smu = &adev->smu;
746 kfree(smu->irq_source);
747 smu->irq_source = NULL;
749 ret = smu_smc_table_sw_fini(smu);
751 pr_err("Failed to sw fini smc table!\n");
755 ret = smu_fini_power(smu);
757 pr_err("Failed to init smu_fini_power!\n");
764 static int smu_init_fb_allocations(struct smu_context *smu)
766 struct amdgpu_device *adev = smu->adev;
767 struct smu_table_context *smu_table = &smu->smu_table;
768 struct smu_table *tables = smu_table->tables;
769 uint32_t table_count = smu_table->table_count;
773 if (table_count <= 0)
776 for (i = 0 ; i < table_count; i++) {
777 if (tables[i].size == 0)
779 ret = amdgpu_bo_create_kernel(adev,
784 &tables[i].mc_address,
785 &tables[i].cpu_addr);
793 if (tables[i].size == 0)
795 amdgpu_bo_free_kernel(&tables[i].bo,
796 &tables[i].mc_address,
797 &tables[i].cpu_addr);
803 static int smu_fini_fb_allocations(struct smu_context *smu)
805 struct smu_table_context *smu_table = &smu->smu_table;
806 struct smu_table *tables = smu_table->tables;
807 uint32_t table_count = smu_table->table_count;
810 if (table_count == 0 || tables == NULL)
813 for (i = 0 ; i < table_count; i++) {
814 if (tables[i].size == 0)
816 amdgpu_bo_free_kernel(&tables[i].bo,
817 &tables[i].mc_address,
818 &tables[i].cpu_addr);
824 static int smu_override_pcie_parameters(struct smu_context *smu)
826 struct amdgpu_device *adev = smu->adev;
827 uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
830 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
832 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
834 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
836 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
839 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
840 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
841 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
843 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
845 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
847 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
849 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
851 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
853 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
856 smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
857 ret = smu_send_smc_msg_with_param(smu,
858 SMU_MSG_OverridePcieParameters,
861 pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
865 static int smu_smc_table_hw_init(struct smu_context *smu,
868 struct amdgpu_device *adev = smu->adev;
871 if (smu_is_dpm_running(smu) && adev->in_suspend) {
872 pr_info("dpm has been enabled\n");
876 ret = smu_init_display_count(smu, 0);
881 /* get boot_values from vbios to set revision, gfxclk, and etc. */
882 ret = smu_get_vbios_bootup_values(smu);
886 ret = smu_setup_pptable(smu);
890 ret = smu_get_clk_info_from_vbios(smu);
895 * check if the format_revision in vbios is up to pptable header
896 * version, and the structure size is not 0.
898 ret = smu_check_pptable(smu);
903 * allocate vram bos to store smc table contents.
905 ret = smu_init_fb_allocations(smu);
910 * Parse pptable format and fill PPTable_t smc_pptable to
911 * smu_table_context structure. And read the smc_dpm_table from vbios,
912 * then fill it into smc_pptable.
914 ret = smu_parse_pptable(smu);
919 * Send msg GetDriverIfVersion to check if the return value is equal
920 * with DRIVER_IF_VERSION of smc header.
922 ret = smu_check_fw_version(smu);
928 * Copy pptable bo in the vram to smc with SMU MSGs such as
929 * SetDriverDramAddr and TransferTableDram2Smu.
931 ret = smu_write_pptable(smu);
935 /* issue RunAfllBtc msg */
936 ret = smu_run_afll_btc(smu);
940 ret = smu_feature_set_allowed_mask(smu);
944 ret = smu_system_features_control(smu, true);
948 ret = smu_override_pcie_parameters(smu);
952 ret = smu_notify_display_change(smu);
957 * Set min deep sleep dce fclk with bootup value from vbios via
958 * SetMinDeepSleepDcefclk MSG.
960 ret = smu_set_min_dcef_deep_sleep(smu);
965 * Set initialized values (get from vbios) to dpm tables context such as
966 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
970 ret = smu_populate_smc_pptable(smu);
974 ret = smu_init_max_sustainable_clocks(smu);
979 ret = smu_set_default_od_settings(smu, initialize);
984 ret = smu_populate_umd_state_clk(smu);
988 ret = smu_get_power_limit(smu, &smu->default_power_limit, false);
994 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
996 ret = smu_set_tool_table_location(smu);
998 if (!smu_is_dpm_running(smu))
999 pr_info("dpm has been disabled\n");
1005 * smu_alloc_memory_pool - allocate memory pool in the system memory
1007 * @smu: amdgpu_device pointer
1009 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
1010 * and DramLogSetDramAddr can notify it changed.
1012 * Returns 0 on success, error on failure.
1014 static int smu_alloc_memory_pool(struct smu_context *smu)
1016 struct amdgpu_device *adev = smu->adev;
1017 struct smu_table_context *smu_table = &smu->smu_table;
1018 struct smu_table *memory_pool = &smu_table->memory_pool;
1019 uint64_t pool_size = smu->pool_size;
1022 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
1025 memory_pool->size = pool_size;
1026 memory_pool->align = PAGE_SIZE;
1027 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
1029 switch (pool_size) {
1030 case SMU_MEMORY_POOL_SIZE_256_MB:
1031 case SMU_MEMORY_POOL_SIZE_512_MB:
1032 case SMU_MEMORY_POOL_SIZE_1_GB:
1033 case SMU_MEMORY_POOL_SIZE_2_GB:
1034 ret = amdgpu_bo_create_kernel(adev,
1037 memory_pool->domain,
1039 &memory_pool->mc_address,
1040 &memory_pool->cpu_addr);
1049 static int smu_free_memory_pool(struct smu_context *smu)
1051 struct smu_table_context *smu_table = &smu->smu_table;
1052 struct smu_table *memory_pool = &smu_table->memory_pool;
1055 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1058 amdgpu_bo_free_kernel(&memory_pool->bo,
1059 &memory_pool->mc_address,
1060 &memory_pool->cpu_addr);
1062 memset(memory_pool, 0, sizeof(struct smu_table));
1067 static int smu_hw_init(void *handle)
1070 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1071 struct smu_context *smu = &adev->smu;
1073 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1074 ret = smu_check_fw_status(smu);
1076 pr_err("SMC firmware status is not correct\n");
1081 ret = smu_feature_init_dpm(smu);
1085 ret = smu_smc_table_hw_init(smu, true);
1089 ret = smu_alloc_memory_pool(smu);
1094 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1097 ret = smu_notify_memory_pool_location(smu);
1101 ret = smu_start_thermal_control(smu);
1105 if (!smu->pm_enabled)
1106 adev->pm.dpm_enabled = false;
1108 adev->pm.dpm_enabled = true; /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
1110 pr_info("SMU is initialized successfully!\n");
1118 static int smu_hw_fini(void *handle)
1120 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1121 struct smu_context *smu = &adev->smu;
1122 struct smu_table_context *table_context = &smu->smu_table;
1125 kfree(table_context->driver_pptable);
1126 table_context->driver_pptable = NULL;
1128 kfree(table_context->max_sustainable_clocks);
1129 table_context->max_sustainable_clocks = NULL;
1131 kfree(table_context->overdrive_table);
1132 table_context->overdrive_table = NULL;
1134 ret = smu_fini_fb_allocations(smu);
1138 ret = smu_free_memory_pool(smu);
1145 int smu_reset(struct smu_context *smu)
1147 struct amdgpu_device *adev = smu->adev;
1150 ret = smu_hw_fini(adev);
1154 ret = smu_hw_init(adev);
1161 static int smu_suspend(void *handle)
1164 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1165 struct smu_context *smu = &adev->smu;
1166 bool baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
1168 ret = smu_system_features_control(smu, false);
1172 if (adev->in_gpu_reset && baco_feature_is_enabled) {
1173 ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
1175 pr_warn("set BACO feature enabled failed, return %d\n", ret);
1180 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1182 if (adev->asic_type >= CHIP_NAVI10 &&
1183 adev->gfx.rlc.funcs->stop)
1184 adev->gfx.rlc.funcs->stop(adev);
1189 static int smu_resume(void *handle)
1192 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1193 struct smu_context *smu = &adev->smu;
1195 pr_info("SMU is resuming...\n");
1197 mutex_lock(&smu->mutex);
1199 ret = smu_smc_table_hw_init(smu, false);
1203 ret = smu_start_thermal_control(smu);
1207 mutex_unlock(&smu->mutex);
1209 pr_info("SMU is resumed successfully!\n");
1213 mutex_unlock(&smu->mutex);
1217 int smu_display_configuration_change(struct smu_context *smu,
1218 const struct amd_pp_display_configuration *display_config)
1221 int num_of_active_display = 0;
1223 if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
1226 if (!display_config)
1229 mutex_lock(&smu->mutex);
1231 smu_set_deep_sleep_dcefclk(smu,
1232 display_config->min_dcef_deep_sleep_set_clk / 100);
1234 for (index = 0; index < display_config->num_path_including_non_display; index++) {
1235 if (display_config->displays[index].controller_id != 0)
1236 num_of_active_display++;
1239 smu_set_active_display_count(smu, num_of_active_display);
1241 smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1242 display_config->cpu_cc6_disable,
1243 display_config->cpu_pstate_disable,
1244 display_config->nb_pstate_switch_disable);
1246 mutex_unlock(&smu->mutex);
1251 static int smu_get_clock_info(struct smu_context *smu,
1252 struct smu_clock_info *clk_info,
1253 enum smu_perf_level_designation designation)
1256 struct smu_performance_level level = {0};
1261 ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1265 clk_info->min_mem_clk = level.memory_clock;
1266 clk_info->min_eng_clk = level.core_clock;
1267 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1269 ret = smu_get_perf_level(smu, designation, &level);
1273 clk_info->min_mem_clk = level.memory_clock;
1274 clk_info->min_eng_clk = level.core_clock;
1275 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1280 int smu_get_current_clocks(struct smu_context *smu,
1281 struct amd_pp_clock_info *clocks)
1283 struct amd_pp_simple_clock_info simple_clocks = {0};
1284 struct smu_clock_info hw_clocks;
1287 if (!is_support_sw_smu(smu->adev))
1290 mutex_lock(&smu->mutex);
1292 smu_get_dal_power_level(smu, &simple_clocks);
1294 if (smu->support_power_containment)
1295 ret = smu_get_clock_info(smu, &hw_clocks,
1296 PERF_LEVEL_POWER_CONTAINMENT);
1298 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1301 pr_err("Error in smu_get_clock_info\n");
1305 clocks->min_engine_clock = hw_clocks.min_eng_clk;
1306 clocks->max_engine_clock = hw_clocks.max_eng_clk;
1307 clocks->min_memory_clock = hw_clocks.min_mem_clk;
1308 clocks->max_memory_clock = hw_clocks.max_mem_clk;
1309 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1310 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1311 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1312 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1314 if (simple_clocks.level == 0)
1315 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1317 clocks->max_clocks_state = simple_clocks.level;
1319 if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1320 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1321 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1325 mutex_unlock(&smu->mutex);
1329 static int smu_set_clockgating_state(void *handle,
1330 enum amd_clockgating_state state)
1335 static int smu_set_powergating_state(void *handle,
1336 enum amd_powergating_state state)
1341 static int smu_enable_umd_pstate(void *handle,
1342 enum amd_dpm_forced_level *level)
1344 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1345 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1346 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1347 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1349 struct smu_context *smu = (struct smu_context*)(handle);
1350 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1351 if (!smu->pm_enabled || !smu_dpm_ctx->dpm_context)
1354 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1355 /* enter umd pstate, save current level, disable gfx cg*/
1356 if (*level & profile_mode_mask) {
1357 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1358 smu_dpm_ctx->enable_umd_pstate = true;
1359 amdgpu_device_ip_set_clockgating_state(smu->adev,
1360 AMD_IP_BLOCK_TYPE_GFX,
1361 AMD_CG_STATE_UNGATE);
1362 amdgpu_device_ip_set_powergating_state(smu->adev,
1363 AMD_IP_BLOCK_TYPE_GFX,
1364 AMD_PG_STATE_UNGATE);
1367 /* exit umd pstate, restore level, enable gfx cg*/
1368 if (!(*level & profile_mode_mask)) {
1369 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1370 *level = smu_dpm_ctx->saved_dpm_level;
1371 smu_dpm_ctx->enable_umd_pstate = false;
1372 amdgpu_device_ip_set_clockgating_state(smu->adev,
1373 AMD_IP_BLOCK_TYPE_GFX,
1375 amdgpu_device_ip_set_powergating_state(smu->adev,
1376 AMD_IP_BLOCK_TYPE_GFX,
1384 static int smu_default_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1387 uint32_t sclk_mask, mclk_mask, soc_mask;
1390 case AMD_DPM_FORCED_LEVEL_HIGH:
1391 ret = smu_force_dpm_limit_value(smu, true);
1393 case AMD_DPM_FORCED_LEVEL_LOW:
1394 ret = smu_force_dpm_limit_value(smu, false);
1396 case AMD_DPM_FORCED_LEVEL_AUTO:
1397 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1398 ret = smu_unforce_dpm_levels(smu);
1400 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1401 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1402 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1403 ret = smu_get_profiling_clk_mask(smu, level,
1409 smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask);
1410 smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
1411 smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
1413 case AMD_DPM_FORCED_LEVEL_MANUAL:
1414 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1421 int smu_adjust_power_state_dynamic(struct smu_context *smu,
1422 enum amd_dpm_forced_level level,
1423 bool skip_display_settings)
1428 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1430 if (!smu->pm_enabled)
1432 if (!skip_display_settings) {
1433 ret = smu_display_config_changed(smu);
1435 pr_err("Failed to change display config!");
1440 if (!smu->pm_enabled)
1442 ret = smu_apply_clocks_adjust_rules(smu);
1444 pr_err("Failed to apply clocks adjust rules!");
1448 if (!skip_display_settings) {
1449 ret = smu_notify_smc_dispaly_config(smu);
1451 pr_err("Failed to notify smc display config!");
1456 if (smu_dpm_ctx->dpm_level != level) {
1457 ret = smu_asic_set_performance_level(smu, level);
1459 ret = smu_default_set_performance_level(smu, level);
1462 smu_dpm_ctx->dpm_level = level;
1465 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1466 index = fls(smu->workload_mask);
1467 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1468 workload = smu->workload_setting[index];
1470 if (smu->power_profile_mode != workload)
1471 smu_set_power_profile_mode(smu, &workload, 0);
1477 int smu_handle_task(struct smu_context *smu,
1478 enum amd_dpm_forced_level level,
1479 enum amd_pp_task task_id)
1484 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1485 ret = smu_pre_display_config_changed(smu);
1488 ret = smu_set_cpu_power_state(smu);
1491 ret = smu_adjust_power_state_dynamic(smu, level, false);
1493 case AMD_PP_TASK_COMPLETE_INIT:
1494 case AMD_PP_TASK_READJUST_POWER_STATE:
1495 ret = smu_adjust_power_state_dynamic(smu, level, true);
1504 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1506 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1507 enum amd_dpm_forced_level level;
1509 if (!smu_dpm_ctx->dpm_context)
1512 mutex_lock(&(smu->mutex));
1513 level = smu_dpm_ctx->dpm_level;
1514 mutex_unlock(&(smu->mutex));
1519 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1523 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1525 if (!smu_dpm_ctx->dpm_context)
1528 for (i = 0; i < smu->adev->num_ip_blocks; i++) {
1529 if (smu->adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)
1534 smu->adev->ip_blocks[i].version->funcs->enable_umd_pstate(smu, &level);
1535 ret = smu_handle_task(smu, level,
1536 AMD_PP_TASK_READJUST_POWER_STATE);
1540 mutex_lock(&smu->mutex);
1541 smu_dpm_ctx->dpm_level = level;
1542 mutex_unlock(&smu->mutex);
1547 int smu_set_display_count(struct smu_context *smu, uint32_t count)
1551 mutex_lock(&smu->mutex);
1552 ret = smu_init_display_count(smu, count);
1553 mutex_unlock(&smu->mutex);
1558 const struct amd_ip_funcs smu_ip_funcs = {
1560 .early_init = smu_early_init,
1561 .late_init = smu_late_init,
1562 .sw_init = smu_sw_init,
1563 .sw_fini = smu_sw_fini,
1564 .hw_init = smu_hw_init,
1565 .hw_fini = smu_hw_fini,
1566 .suspend = smu_suspend,
1567 .resume = smu_resume,
1569 .check_soft_reset = NULL,
1570 .wait_for_idle = NULL,
1572 .set_clockgating_state = smu_set_clockgating_state,
1573 .set_powergating_state = smu_set_powergating_state,
1574 .enable_umd_pstate = smu_enable_umd_pstate,
1577 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
1579 .type = AMD_IP_BLOCK_TYPE_SMC,
1583 .funcs = &smu_ip_funcs,