2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <linux/pci.h>
27 #include "amdgpu_smu.h"
28 #include "smu_internal.h"
29 #include "smu_v11_0.h"
30 #include "smu_v12_0.h"
32 #include "arcturus_ppt.h"
33 #include "navi10_ppt.h"
34 #include "sienna_cichlid_ppt.h"
35 #include "renoir_ppt.h"
40 * DO NOT use these for err/warn/info/debug messages.
41 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
42 * They are more MGPU friendly.
49 #undef __SMU_DUMMY_MAP
50 #define __SMU_DUMMY_MAP(type) #type
51 static const char* __smu_message_names[] = {
55 const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
57 if (type < 0 || type >= SMU_MSG_MAX_COUNT)
58 return "unknown smu message";
59 return __smu_message_names[type];
62 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
66 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
69 mutex_lock(&smu->mutex);
71 size = smu_get_pp_feature_mask(smu, buf);
73 mutex_unlock(&smu->mutex);
78 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
82 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
85 mutex_lock(&smu->mutex);
87 ret = smu_set_pp_feature_mask(smu, new_mask);
89 mutex_unlock(&smu->mutex);
94 int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
97 struct smu_context *smu = &adev->smu;
99 if (is_support_sw_smu(adev) && smu->ppt_funcs->get_gfx_off_status)
100 *value = smu_get_gfx_off_status(smu);
107 int smu_set_soft_freq_range(struct smu_context *smu,
108 enum smu_clk_type clk_type,
114 mutex_lock(&smu->mutex);
116 if (smu->ppt_funcs->set_soft_freq_limited_range)
117 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
122 mutex_unlock(&smu->mutex);
127 int smu_get_dpm_freq_range(struct smu_context *smu,
128 enum smu_clk_type clk_type,
137 mutex_lock(&smu->mutex);
139 if (smu->ppt_funcs->get_dpm_ultimate_freq)
140 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
145 mutex_unlock(&smu->mutex);
151 * smu_dpm_set_power_gate - power gate/ungate the specific IP block
153 * @smu: smu_context pointer
154 * @block_type: the IP block to power gate/ungate
155 * @gate: to power gate if true, ungate otherwise
157 * This API uses no smu->mutex lock protection due to:
158 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
159 * This is guarded to be race condition free by the caller.
160 * 2. Or get called on user setting request of power_dpm_force_performance_level.
161 * Under this case, the smu->mutex lock protection is already enforced on
162 * the parent API smu_force_performance_level of the call path.
164 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
169 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
172 switch (block_type) {
174 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
175 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept.
177 case AMD_IP_BLOCK_TYPE_UVD:
178 case AMD_IP_BLOCK_TYPE_VCN:
179 ret = smu_dpm_set_vcn_enable(smu, !gate);
181 dev_err(smu->adev->dev, "Failed to power %s VCN!\n",
182 gate ? "gate" : "ungate");
184 case AMD_IP_BLOCK_TYPE_GFX:
185 ret = smu_gfx_off_control(smu, gate);
187 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",
188 gate ? "enable" : "disable");
190 case AMD_IP_BLOCK_TYPE_SDMA:
191 ret = smu_powergate_sdma(smu, gate);
193 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",
194 gate ? "gate" : "ungate");
196 case AMD_IP_BLOCK_TYPE_JPEG:
197 ret = smu_dpm_set_jpeg_enable(smu, !gate);
199 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
200 gate ? "gate" : "ungate");
203 dev_err(smu->adev->dev, "Unsupported block type!\n");
210 int smu_get_power_num_states(struct smu_context *smu,
211 struct pp_states_info *state_info)
216 /* not support power state */
217 memset(state_info, 0, sizeof(struct pp_states_info));
218 state_info->nums = 1;
219 state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
224 bool is_support_sw_smu(struct amdgpu_device *adev)
226 if (adev->asic_type >= CHIP_ARCTURUS)
232 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
234 struct smu_table_context *smu_table = &smu->smu_table;
235 uint32_t powerplay_table_size;
237 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
240 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
243 mutex_lock(&smu->mutex);
245 if (smu_table->hardcode_pptable)
246 *table = smu_table->hardcode_pptable;
248 *table = smu_table->power_play_table;
250 powerplay_table_size = smu_table->power_play_table_size;
252 mutex_unlock(&smu->mutex);
254 return powerplay_table_size;
257 int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
259 struct smu_table_context *smu_table = &smu->smu_table;
260 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
263 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
266 if (header->usStructureSize != size) {
267 dev_err(smu->adev->dev, "pp table size not matched !\n");
271 mutex_lock(&smu->mutex);
272 if (!smu_table->hardcode_pptable)
273 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
274 if (!smu_table->hardcode_pptable) {
279 memcpy(smu_table->hardcode_pptable, buf, size);
280 smu_table->power_play_table = smu_table->hardcode_pptable;
281 smu_table->power_play_table_size = size;
284 * Special hw_fini action(for Navi1x, the DPMs disablement will be
285 * skipped) may be needed for custom pptable uploading.
287 smu->uploading_custom_pp_table = true;
289 ret = smu_reset(smu);
291 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);
293 smu->uploading_custom_pp_table = false;
296 mutex_unlock(&smu->mutex);
300 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
302 struct smu_feature *feature = &smu->smu_feature;
304 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
306 mutex_lock(&feature->mutex);
307 bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
308 mutex_unlock(&feature->mutex);
310 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
315 mutex_lock(&feature->mutex);
316 bitmap_or(feature->allowed, feature->allowed,
317 (unsigned long *)allowed_feature_mask,
318 feature->feature_num);
319 mutex_unlock(&feature->mutex);
324 static int smu_set_funcs(struct amdgpu_device *adev)
326 struct smu_context *smu = &adev->smu;
328 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
329 smu->od_enabled = true;
331 switch (adev->asic_type) {
335 navi10_set_ppt_funcs(smu);
338 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
339 arcturus_set_ppt_funcs(smu);
340 /* OD is not supported on Arcturus */
341 smu->od_enabled =false;
343 case CHIP_SIENNA_CICHLID:
344 case CHIP_NAVY_FLOUNDER:
345 sienna_cichlid_set_ppt_funcs(smu);
348 renoir_set_ppt_funcs(smu);
357 static int smu_early_init(void *handle)
359 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
360 struct smu_context *smu = &adev->smu;
363 smu->pm_enabled = !!amdgpu_dpm;
365 mutex_init(&smu->mutex);
367 return smu_set_funcs(adev);
370 static int smu_late_init(void *handle)
372 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
373 struct smu_context *smu = &adev->smu;
376 if (!smu->pm_enabled)
379 ret = smu_set_default_od_settings(smu);
381 dev_err(adev->dev, "Failed to setup default OD settings!\n");
386 * Set initialized values (get from vbios) to dpm tables context such as
387 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
390 ret = smu_set_default_dpm_table(smu);
392 dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
396 ret = smu_populate_umd_state_clk(smu);
398 dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
402 ret = smu_get_asic_power_limits(smu);
404 dev_err(adev->dev, "Failed to get asic power limits!\n");
408 smu_get_unique_id(smu);
410 smu_handle_task(&adev->smu,
411 smu->smu_dpm.dpm_level,
412 AMD_PP_TASK_COMPLETE_INIT,
418 static int smu_init_fb_allocations(struct smu_context *smu)
420 struct amdgpu_device *adev = smu->adev;
421 struct smu_table_context *smu_table = &smu->smu_table;
422 struct smu_table *tables = smu_table->tables;
423 struct smu_table *driver_table = &(smu_table->driver_table);
424 uint32_t max_table_size = 0;
427 /* VRAM allocation for tool table */
428 if (tables[SMU_TABLE_PMSTATUSLOG].size) {
429 ret = amdgpu_bo_create_kernel(adev,
430 tables[SMU_TABLE_PMSTATUSLOG].size,
431 tables[SMU_TABLE_PMSTATUSLOG].align,
432 tables[SMU_TABLE_PMSTATUSLOG].domain,
433 &tables[SMU_TABLE_PMSTATUSLOG].bo,
434 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
435 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
437 dev_err(adev->dev, "VRAM allocation for tool table failed!\n");
442 /* VRAM allocation for driver table */
443 for (i = 0; i < SMU_TABLE_COUNT; i++) {
444 if (tables[i].size == 0)
447 if (i == SMU_TABLE_PMSTATUSLOG)
450 if (max_table_size < tables[i].size)
451 max_table_size = tables[i].size;
454 driver_table->size = max_table_size;
455 driver_table->align = PAGE_SIZE;
456 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
458 ret = amdgpu_bo_create_kernel(adev,
461 driver_table->domain,
463 &driver_table->mc_address,
464 &driver_table->cpu_addr);
466 dev_err(adev->dev, "VRAM allocation for driver table failed!\n");
467 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
468 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
469 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
470 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
476 static int smu_fini_fb_allocations(struct smu_context *smu)
478 struct smu_table_context *smu_table = &smu->smu_table;
479 struct smu_table *tables = smu_table->tables;
480 struct smu_table *driver_table = &(smu_table->driver_table);
485 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
486 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
487 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
488 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
490 amdgpu_bo_free_kernel(&driver_table->bo,
491 &driver_table->mc_address,
492 &driver_table->cpu_addr);
498 * smu_alloc_memory_pool - allocate memory pool in the system memory
500 * @smu: amdgpu_device pointer
502 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
503 * and DramLogSetDramAddr can notify it changed.
505 * Returns 0 on success, error on failure.
507 static int smu_alloc_memory_pool(struct smu_context *smu)
509 struct amdgpu_device *adev = smu->adev;
510 struct smu_table_context *smu_table = &smu->smu_table;
511 struct smu_table *memory_pool = &smu_table->memory_pool;
512 uint64_t pool_size = smu->pool_size;
515 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
518 memory_pool->size = pool_size;
519 memory_pool->align = PAGE_SIZE;
520 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
523 case SMU_MEMORY_POOL_SIZE_256_MB:
524 case SMU_MEMORY_POOL_SIZE_512_MB:
525 case SMU_MEMORY_POOL_SIZE_1_GB:
526 case SMU_MEMORY_POOL_SIZE_2_GB:
527 ret = amdgpu_bo_create_kernel(adev,
532 &memory_pool->mc_address,
533 &memory_pool->cpu_addr);
535 dev_err(adev->dev, "VRAM allocation for dramlog failed!\n");
544 static int smu_free_memory_pool(struct smu_context *smu)
546 struct smu_table_context *smu_table = &smu->smu_table;
547 struct smu_table *memory_pool = &smu_table->memory_pool;
549 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
552 amdgpu_bo_free_kernel(&memory_pool->bo,
553 &memory_pool->mc_address,
554 &memory_pool->cpu_addr);
556 memset(memory_pool, 0, sizeof(struct smu_table));
561 static int smu_smc_table_sw_init(struct smu_context *smu)
566 * Create smu_table structure, and init smc tables such as
567 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
569 ret = smu_init_smc_tables(smu);
571 dev_err(smu->adev->dev, "Failed to init smc tables!\n");
576 * Create smu_power_context structure, and allocate smu_dpm_context and
577 * context size to fill the smu_power_context data.
579 ret = smu_init_power(smu);
581 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");
586 * allocate vram bos to store smc table contents.
588 ret = smu_init_fb_allocations(smu);
592 ret = smu_alloc_memory_pool(smu);
599 static int smu_smc_table_sw_fini(struct smu_context *smu)
603 ret = smu_free_memory_pool(smu);
607 ret = smu_fini_fb_allocations(smu);
611 ret = smu_fini_power(smu);
613 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");
617 ret = smu_fini_smc_tables(smu);
619 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");
626 static void smu_throttling_logging_work_fn(struct work_struct *work)
628 struct smu_context *smu = container_of(work, struct smu_context,
629 throttling_logging_work);
631 smu_log_thermal_throttling(smu);
634 static int smu_sw_init(void *handle)
636 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
637 struct smu_context *smu = &adev->smu;
640 smu->pool_size = adev->pm.smu_prv_buffer_size;
641 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
642 mutex_init(&smu->smu_feature.mutex);
643 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
644 bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
645 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
647 mutex_init(&smu->smu_baco.mutex);
648 smu->smu_baco.state = SMU_BACO_STATE_EXIT;
649 smu->smu_baco.platform_support = false;
651 mutex_init(&smu->sensor_lock);
652 mutex_init(&smu->metrics_lock);
653 mutex_init(&smu->message_lock);
655 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
656 smu->watermarks_bitmap = 0;
657 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
658 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
660 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
661 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
662 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
663 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
664 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
665 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
666 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
667 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
669 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
670 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
671 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
672 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
673 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
674 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
675 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
676 smu->display_config = &adev->pm.pm_display_cfg;
678 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
679 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
680 ret = smu_init_microcode(smu);
682 dev_err(adev->dev, "Failed to load smu firmware!\n");
686 ret = smu_smc_table_sw_init(smu);
688 dev_err(adev->dev, "Failed to sw init smc table!\n");
692 ret = smu_register_irq_handler(smu);
694 dev_err(adev->dev, "Failed to register smc irq handler!\n");
701 static int smu_sw_fini(void *handle)
703 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
704 struct smu_context *smu = &adev->smu;
707 ret = smu_smc_table_sw_fini(smu);
709 dev_err(adev->dev, "Failed to sw fini smc table!\n");
713 smu_fini_microcode(smu);
718 static int smu_get_thermal_temperature_range(struct smu_context *smu)
720 struct amdgpu_device *adev = smu->adev;
721 struct smu_temperature_range *range =
725 if (!smu->ppt_funcs->get_thermal_temperature_range)
728 ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);
732 adev->pm.dpm.thermal.min_temp = range->min;
733 adev->pm.dpm.thermal.max_temp = range->max;
734 adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max;
735 adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min;
736 adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max;
737 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max;
738 adev->pm.dpm.thermal.min_mem_temp = range->mem_min;
739 adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max;
740 adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max;
745 static int smu_smc_hw_setup(struct smu_context *smu)
747 struct amdgpu_device *adev = smu->adev;
748 uint32_t pcie_gen = 0, pcie_width = 0;
751 if (smu_is_dpm_running(smu) && adev->in_suspend) {
752 dev_info(adev->dev, "dpm has been enabled\n");
756 ret = smu_init_display_count(smu, 0);
758 dev_info(adev->dev, "Failed to pre-set display count as 0!\n");
762 ret = smu_set_driver_table_location(smu);
764 dev_err(adev->dev, "Failed to SetDriverDramAddr!\n");
769 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
771 ret = smu_set_tool_table_location(smu);
773 dev_err(adev->dev, "Failed to SetToolsDramAddr!\n");
778 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
781 ret = smu_notify_memory_pool_location(smu);
783 dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n");
787 /* smu_dump_pptable(smu); */
789 * Copy pptable bo in the vram to smc with SMU MSGs such as
790 * SetDriverDramAddr and TransferTableDram2Smu.
792 ret = smu_write_pptable(smu);
794 dev_err(adev->dev, "Failed to transfer pptable to SMC!\n");
798 /* issue Run*Btc msg */
799 ret = smu_run_btc(smu);
803 ret = smu_feature_set_allowed_mask(smu);
805 dev_err(adev->dev, "Failed to set driver allowed features mask!\n");
809 ret = smu_system_features_control(smu, true);
811 dev_err(adev->dev, "Failed to enable requested dpm features!\n");
815 if (!smu_is_dpm_running(smu))
816 dev_info(adev->dev, "dpm has been disabled\n");
818 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
820 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
822 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
824 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
827 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
828 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
829 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
831 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
833 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
835 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
837 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
839 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
841 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
843 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
845 dev_err(adev->dev, "Attempt to override pcie params failed!\n");
849 ret = smu_get_thermal_temperature_range(smu);
851 dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
855 ret = smu_enable_thermal_alert(smu);
857 dev_err(adev->dev, "Failed to enable thermal alert!\n");
861 ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
865 ret = smu_disable_umc_cdr_12gbps_workaround(smu);
867 dev_err(adev->dev, "Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n");
872 * For Navi1X, manually switch it to AC mode as PMFW
873 * may boot it with DC mode.
875 ret = smu_set_power_source(smu,
876 adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
877 SMU_POWER_SOURCE_DC);
879 dev_err(adev->dev, "Failed to switch to %s mode!\n", adev->pm.ac_power ? "AC" : "DC");
883 ret = smu_notify_display_change(smu);
888 * Set min deep sleep dce fclk with bootup value from vbios via
889 * SetMinDeepSleepDcefclk MSG.
891 ret = smu_set_min_dcef_deep_sleep(smu,
892 smu->smu_table.boot_values.dcefclk / 100);
899 static int smu_start_smc_engine(struct smu_context *smu)
901 struct amdgpu_device *adev = smu->adev;
904 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
905 if (adev->asic_type < CHIP_NAVI10) {
906 if (smu->ppt_funcs->load_microcode) {
907 ret = smu->ppt_funcs->load_microcode(smu);
914 if (smu->ppt_funcs->check_fw_status) {
915 ret = smu->ppt_funcs->check_fw_status(smu);
917 dev_err(adev->dev, "SMC is not ready\n");
923 * Send msg GetDriverIfVersion to check if the return value is equal
924 * with DRIVER_IF_VERSION of smc header.
926 ret = smu_check_fw_version(smu);
933 static int smu_hw_init(void *handle)
936 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
937 struct smu_context *smu = &adev->smu;
939 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
940 smu->pm_enabled = false;
944 ret = smu_start_smc_engine(smu);
946 dev_err(adev->dev, "SMC engine is not correctly up!\n");
951 smu_powergate_sdma(&adev->smu, false);
952 smu_dpm_set_vcn_enable(smu, true);
953 smu_dpm_set_jpeg_enable(smu, true);
954 smu_set_gfx_cgpg(&adev->smu, true);
957 if (!smu->pm_enabled)
960 /* get boot_values from vbios to set revision, gfxclk, and etc. */
961 ret = smu_get_vbios_bootup_values(smu);
963 dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n");
967 ret = smu_setup_pptable(smu);
969 dev_err(adev->dev, "Failed to setup pptable!\n");
973 ret = smu_get_driver_allowed_feature_mask(smu);
977 ret = smu_smc_hw_setup(smu);
979 dev_err(adev->dev, "Failed to setup smc hw!\n");
984 * Move maximum sustainable clock retrieving here considering
985 * 1. It is not needed on resume(from S3).
986 * 2. DAL settings come between .hw_init and .late_init of SMU.
987 * And DAL needs to know the maximum sustainable clocks. Thus
988 * it cannot be put in .late_init().
990 ret = smu_init_max_sustainable_clocks(smu);
992 dev_err(adev->dev, "Failed to init max sustainable clocks!\n");
996 adev->pm.dpm_enabled = true;
998 dev_info(adev->dev, "SMU is initialized successfully!\n");
1003 static int smu_disable_dpms(struct smu_context *smu)
1005 struct amdgpu_device *adev = smu->adev;
1007 bool use_baco = !smu->is_apu &&
1008 ((adev->in_gpu_reset &&
1009 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1010 ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev)));
1013 * For custom pptable uploading, skip the DPM features
1014 * disable process on Navi1x ASICs.
1015 * - As the gfx related features are under control of
1016 * RLC on those ASICs. RLC reinitialization will be
1017 * needed to reenable them. That will cost much more
1020 * - SMU firmware can handle the DPM reenablement
1023 if (smu->uploading_custom_pp_table &&
1024 (adev->asic_type >= CHIP_NAVI10) &&
1025 (adev->asic_type <= CHIP_NAVI12))
1029 * For Sienna_Cichlid, PMFW will handle the features disablement properly
1030 * on BACO in. Driver involvement is unnecessary.
1032 if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
1037 * For gpu reset, runpm and hibernation through BACO,
1038 * BACO feature has to be kept enabled.
1040 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
1041 ret = smu_disable_all_features_with_exception(smu,
1042 SMU_FEATURE_BACO_BIT);
1044 dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
1046 ret = smu_system_features_control(smu, false);
1048 dev_err(adev->dev, "Failed to disable smu features.\n");
1051 if (adev->asic_type >= CHIP_NAVI10 &&
1052 adev->gfx.rlc.funcs->stop)
1053 adev->gfx.rlc.funcs->stop(adev);
1058 static int smu_smc_hw_cleanup(struct smu_context *smu)
1060 struct amdgpu_device *adev = smu->adev;
1063 smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
1065 cancel_work_sync(&smu->throttling_logging_work);
1067 ret = smu_disable_thermal_alert(smu);
1069 dev_err(adev->dev, "Fail to disable thermal alert!\n");
1073 ret = smu_disable_dpms(smu);
1075 dev_err(adev->dev, "Fail to disable dpm features!\n");
1082 static int smu_hw_fini(void *handle)
1084 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1085 struct smu_context *smu = &adev->smu;
1088 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1092 smu_powergate_sdma(&adev->smu, true);
1093 smu_dpm_set_vcn_enable(smu, false);
1094 smu_dpm_set_jpeg_enable(smu, false);
1097 if (!smu->pm_enabled)
1100 adev->pm.dpm_enabled = false;
1102 ret = smu_smc_hw_cleanup(smu);
1109 int smu_reset(struct smu_context *smu)
1111 struct amdgpu_device *adev = smu->adev;
1114 ret = smu_hw_fini(adev);
1118 ret = smu_hw_init(adev);
1122 ret = smu_late_init(adev);
1127 static int smu_suspend(void *handle)
1129 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1130 struct smu_context *smu = &adev->smu;
1133 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1136 if (!smu->pm_enabled)
1139 adev->pm.dpm_enabled = false;
1141 ret = smu_smc_hw_cleanup(smu);
1145 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1148 smu_set_gfx_cgpg(&adev->smu, false);
1153 static int smu_resume(void *handle)
1156 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1157 struct smu_context *smu = &adev->smu;
1159 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1162 if (!smu->pm_enabled)
1165 dev_info(adev->dev, "SMU is resuming...\n");
1167 ret = smu_start_smc_engine(smu);
1169 dev_err(adev->dev, "SMC engine is not correctly up!\n");
1173 ret = smu_smc_hw_setup(smu);
1175 dev_err(adev->dev, "Failed to setup smc hw!\n");
1180 smu_set_gfx_cgpg(&adev->smu, true);
1182 smu->disable_uclk_switch = 0;
1184 adev->pm.dpm_enabled = true;
1186 dev_info(adev->dev, "SMU is resumed successfully!\n");
1191 int smu_display_configuration_change(struct smu_context *smu,
1192 const struct amd_pp_display_configuration *display_config)
1195 int num_of_active_display = 0;
1197 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1200 if (!display_config)
1203 mutex_lock(&smu->mutex);
1205 smu_set_min_dcef_deep_sleep(smu,
1206 display_config->min_dcef_deep_sleep_set_clk / 100);
1208 for (index = 0; index < display_config->num_path_including_non_display; index++) {
1209 if (display_config->displays[index].controller_id != 0)
1210 num_of_active_display++;
1213 smu_set_active_display_count(smu, num_of_active_display);
1215 smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1216 display_config->cpu_cc6_disable,
1217 display_config->cpu_pstate_disable,
1218 display_config->nb_pstate_switch_disable);
1220 mutex_unlock(&smu->mutex);
1225 static int smu_get_clock_info(struct smu_context *smu,
1226 struct smu_clock_info *clk_info,
1227 enum smu_perf_level_designation designation)
1230 struct smu_performance_level level = {0};
1235 ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1239 clk_info->min_mem_clk = level.memory_clock;
1240 clk_info->min_eng_clk = level.core_clock;
1241 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1243 ret = smu_get_perf_level(smu, designation, &level);
1247 clk_info->min_mem_clk = level.memory_clock;
1248 clk_info->min_eng_clk = level.core_clock;
1249 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1254 int smu_get_current_clocks(struct smu_context *smu,
1255 struct amd_pp_clock_info *clocks)
1257 struct amd_pp_simple_clock_info simple_clocks = {0};
1258 struct smu_clock_info hw_clocks;
1261 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1264 mutex_lock(&smu->mutex);
1266 smu_get_dal_power_level(smu, &simple_clocks);
1268 if (smu->support_power_containment)
1269 ret = smu_get_clock_info(smu, &hw_clocks,
1270 PERF_LEVEL_POWER_CONTAINMENT);
1272 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1275 dev_err(smu->adev->dev, "Error in smu_get_clock_info\n");
1279 clocks->min_engine_clock = hw_clocks.min_eng_clk;
1280 clocks->max_engine_clock = hw_clocks.max_eng_clk;
1281 clocks->min_memory_clock = hw_clocks.min_mem_clk;
1282 clocks->max_memory_clock = hw_clocks.max_mem_clk;
1283 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1284 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1285 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1286 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1288 if (simple_clocks.level == 0)
1289 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1291 clocks->max_clocks_state = simple_clocks.level;
1293 if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1294 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1295 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1299 mutex_unlock(&smu->mutex);
1303 static int smu_set_clockgating_state(void *handle,
1304 enum amd_clockgating_state state)
1309 static int smu_set_powergating_state(void *handle,
1310 enum amd_powergating_state state)
1315 static int smu_enable_umd_pstate(void *handle,
1316 enum amd_dpm_forced_level *level)
1318 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1319 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1320 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1321 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1323 struct smu_context *smu = (struct smu_context*)(handle);
1324 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1326 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1329 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1330 /* enter umd pstate, save current level, disable gfx cg*/
1331 if (*level & profile_mode_mask) {
1332 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1333 smu_dpm_ctx->enable_umd_pstate = true;
1334 amdgpu_device_ip_set_powergating_state(smu->adev,
1335 AMD_IP_BLOCK_TYPE_GFX,
1336 AMD_PG_STATE_UNGATE);
1337 amdgpu_device_ip_set_clockgating_state(smu->adev,
1338 AMD_IP_BLOCK_TYPE_GFX,
1339 AMD_CG_STATE_UNGATE);
1342 /* exit umd pstate, restore level, enable gfx cg*/
1343 if (!(*level & profile_mode_mask)) {
1344 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1345 *level = smu_dpm_ctx->saved_dpm_level;
1346 smu_dpm_ctx->enable_umd_pstate = false;
1347 amdgpu_device_ip_set_clockgating_state(smu->adev,
1348 AMD_IP_BLOCK_TYPE_GFX,
1350 amdgpu_device_ip_set_powergating_state(smu->adev,
1351 AMD_IP_BLOCK_TYPE_GFX,
1359 static int smu_adjust_power_state_dynamic(struct smu_context *smu,
1360 enum amd_dpm_forced_level level,
1361 bool skip_display_settings)
1366 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1368 if (!skip_display_settings) {
1369 ret = smu_display_config_changed(smu);
1371 dev_err(smu->adev->dev, "Failed to change display config!");
1376 ret = smu_apply_clocks_adjust_rules(smu);
1378 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");
1382 if (!skip_display_settings) {
1383 ret = smu_notify_smc_display_config(smu);
1385 dev_err(smu->adev->dev, "Failed to notify smc display config!");
1390 if (smu_dpm_ctx->dpm_level != level) {
1391 ret = smu_asic_set_performance_level(smu, level);
1393 dev_err(smu->adev->dev, "Failed to set performance level!");
1397 /* update the saved copy */
1398 smu_dpm_ctx->dpm_level = level;
1401 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1402 index = fls(smu->workload_mask);
1403 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1404 workload = smu->workload_setting[index];
1406 if (smu->power_profile_mode != workload)
1407 smu_set_power_profile_mode(smu, &workload, 0, false);
1413 int smu_handle_task(struct smu_context *smu,
1414 enum amd_dpm_forced_level level,
1415 enum amd_pp_task task_id,
1420 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1424 mutex_lock(&smu->mutex);
1427 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1428 ret = smu_pre_display_config_changed(smu);
1431 ret = smu_set_cpu_power_state(smu);
1434 ret = smu_adjust_power_state_dynamic(smu, level, false);
1436 case AMD_PP_TASK_COMPLETE_INIT:
1437 case AMD_PP_TASK_READJUST_POWER_STATE:
1438 ret = smu_adjust_power_state_dynamic(smu, level, true);
1446 mutex_unlock(&smu->mutex);
1451 int smu_switch_power_profile(struct smu_context *smu,
1452 enum PP_SMC_POWER_PROFILE type,
1455 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1459 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1462 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1465 mutex_lock(&smu->mutex);
1468 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1469 index = fls(smu->workload_mask);
1470 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1471 workload = smu->workload_setting[index];
1473 smu->workload_mask |= (1 << smu->workload_prority[type]);
1474 index = fls(smu->workload_mask);
1475 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1476 workload = smu->workload_setting[index];
1479 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1480 smu_set_power_profile_mode(smu, &workload, 0, false);
1482 mutex_unlock(&smu->mutex);
1487 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1489 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1490 enum amd_dpm_forced_level level;
1492 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1495 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1498 mutex_lock(&(smu->mutex));
1499 level = smu_dpm_ctx->dpm_level;
1500 mutex_unlock(&(smu->mutex));
1505 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1507 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1510 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1513 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1516 mutex_lock(&smu->mutex);
1518 ret = smu_enable_umd_pstate(smu, &level);
1520 mutex_unlock(&smu->mutex);
1524 ret = smu_handle_task(smu, level,
1525 AMD_PP_TASK_READJUST_POWER_STATE,
1528 mutex_unlock(&smu->mutex);
1533 int smu_set_display_count(struct smu_context *smu, uint32_t count)
1537 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1540 mutex_lock(&smu->mutex);
1541 ret = smu_init_display_count(smu, count);
1542 mutex_unlock(&smu->mutex);
1547 int smu_force_clk_levels(struct smu_context *smu,
1548 enum smu_clk_type clk_type,
1551 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1554 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1557 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1558 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
1562 mutex_lock(&smu->mutex);
1564 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
1565 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
1567 mutex_unlock(&smu->mutex);
1573 * On system suspending or resetting, the dpm_enabled
1574 * flag will be cleared. So that those SMU services which
1575 * are not supported will be gated.
1576 * However, the mp1 state setting should still be granted
1577 * even if the dpm_enabled cleared.
1579 int smu_set_mp1_state(struct smu_context *smu,
1580 enum pp_mp1_state mp1_state)
1585 if (!smu->pm_enabled)
1588 mutex_lock(&smu->mutex);
1590 switch (mp1_state) {
1591 case PP_MP1_STATE_SHUTDOWN:
1592 msg = SMU_MSG_PrepareMp1ForShutdown;
1594 case PP_MP1_STATE_UNLOAD:
1595 msg = SMU_MSG_PrepareMp1ForUnload;
1597 case PP_MP1_STATE_RESET:
1598 msg = SMU_MSG_PrepareMp1ForReset;
1600 case PP_MP1_STATE_NONE:
1602 mutex_unlock(&smu->mutex);
1606 /* some asics may not support those messages */
1607 if (smu_cmn_to_asic_specific_index(smu,
1608 CMN2ASIC_MAPPING_MSG,
1610 mutex_unlock(&smu->mutex);
1614 ret = smu_send_smc_msg(smu, msg, NULL);
1616 dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
1618 mutex_unlock(&smu->mutex);
1623 int smu_set_df_cstate(struct smu_context *smu,
1624 enum pp_df_cstate state)
1628 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1631 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
1634 mutex_lock(&smu->mutex);
1636 ret = smu->ppt_funcs->set_df_cstate(smu, state);
1638 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
1640 mutex_unlock(&smu->mutex);
1645 int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
1649 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1652 if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
1655 mutex_lock(&smu->mutex);
1657 ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
1659 dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n");
1661 mutex_unlock(&smu->mutex);
1666 int smu_write_watermarks_table(struct smu_context *smu)
1670 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1673 mutex_lock(&smu->mutex);
1675 ret = smu_set_watermarks_table(smu, NULL);
1677 mutex_unlock(&smu->mutex);
1682 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
1683 struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
1687 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1690 mutex_lock(&smu->mutex);
1692 if (!smu->disable_watermark &&
1693 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
1694 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
1695 ret = smu_set_watermarks_table(smu, clock_ranges);
1697 if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) {
1698 smu->watermarks_bitmap |= WATERMARKS_EXIST;
1699 smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
1703 mutex_unlock(&smu->mutex);
1708 int smu_set_ac_dc(struct smu_context *smu)
1712 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1715 /* controlled by firmware */
1716 if (smu->dc_controlled_by_gpio)
1719 mutex_lock(&smu->mutex);
1720 ret = smu_set_power_source(smu,
1721 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
1722 SMU_POWER_SOURCE_DC);
1724 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
1725 smu->adev->pm.ac_power ? "AC" : "DC");
1726 mutex_unlock(&smu->mutex);
1731 const struct amd_ip_funcs smu_ip_funcs = {
1733 .early_init = smu_early_init,
1734 .late_init = smu_late_init,
1735 .sw_init = smu_sw_init,
1736 .sw_fini = smu_sw_fini,
1737 .hw_init = smu_hw_init,
1738 .hw_fini = smu_hw_fini,
1739 .suspend = smu_suspend,
1740 .resume = smu_resume,
1742 .check_soft_reset = NULL,
1743 .wait_for_idle = NULL,
1745 .set_clockgating_state = smu_set_clockgating_state,
1746 .set_powergating_state = smu_set_powergating_state,
1747 .enable_umd_pstate = smu_enable_umd_pstate,
1750 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
1752 .type = AMD_IP_BLOCK_TYPE_SMC,
1756 .funcs = &smu_ip_funcs,
1759 const struct amdgpu_ip_block_version smu_v12_0_ip_block =
1761 .type = AMD_IP_BLOCK_TYPE_SMC,
1765 .funcs = &smu_ip_funcs,
1768 int smu_load_microcode(struct smu_context *smu)
1772 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1775 mutex_lock(&smu->mutex);
1777 if (smu->ppt_funcs->load_microcode)
1778 ret = smu->ppt_funcs->load_microcode(smu);
1780 mutex_unlock(&smu->mutex);
1785 int smu_check_fw_status(struct smu_context *smu)
1789 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1792 mutex_lock(&smu->mutex);
1794 if (smu->ppt_funcs->check_fw_status)
1795 ret = smu->ppt_funcs->check_fw_status(smu);
1797 mutex_unlock(&smu->mutex);
1802 int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
1806 mutex_lock(&smu->mutex);
1808 if (smu->ppt_funcs->set_gfx_cgpg)
1809 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
1811 mutex_unlock(&smu->mutex);
1816 int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
1820 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1823 mutex_lock(&smu->mutex);
1825 if (smu->ppt_funcs->set_fan_speed_rpm)
1826 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
1828 mutex_unlock(&smu->mutex);
1833 int smu_get_power_limit(struct smu_context *smu,
1837 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1840 mutex_lock(&smu->mutex);
1842 *limit = (max_setting ? smu->max_power_limit : smu->current_power_limit);
1844 mutex_unlock(&smu->mutex);
1849 int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
1853 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1856 mutex_lock(&smu->mutex);
1858 if (limit > smu->max_power_limit) {
1859 dev_err(smu->adev->dev,
1860 "New power limit (%d) is over the max allowed %d\n",
1861 limit, smu->max_power_limit);
1866 limit = smu->current_power_limit;
1868 if (smu->ppt_funcs->set_power_limit)
1869 ret = smu->ppt_funcs->set_power_limit(smu, limit);
1872 mutex_unlock(&smu->mutex);
1877 int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
1881 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1884 mutex_lock(&smu->mutex);
1886 if (smu->ppt_funcs->print_clk_levels)
1887 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
1889 mutex_unlock(&smu->mutex);
1894 int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
1898 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1901 mutex_lock(&smu->mutex);
1903 if (smu->ppt_funcs->get_od_percentage)
1904 ret = smu->ppt_funcs->get_od_percentage(smu, type);
1906 mutex_unlock(&smu->mutex);
1911 int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
1915 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1918 mutex_lock(&smu->mutex);
1920 if (smu->ppt_funcs->set_od_percentage)
1921 ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
1923 mutex_unlock(&smu->mutex);
1928 int smu_od_edit_dpm_table(struct smu_context *smu,
1929 enum PP_OD_DPM_TABLE_COMMAND type,
1930 long *input, uint32_t size)
1934 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1937 mutex_lock(&smu->mutex);
1939 if (smu->ppt_funcs->od_edit_dpm_table)
1940 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
1942 mutex_unlock(&smu->mutex);
1947 int smu_read_sensor(struct smu_context *smu,
1948 enum amd_pp_sensors sensor,
1949 void *data, uint32_t *size)
1951 struct smu_umd_pstate_table *pstate_table =
1955 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1961 mutex_lock(&smu->mutex);
1964 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
1965 *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
1968 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
1969 *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
1972 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
1973 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
1976 case AMDGPU_PP_SENSOR_UVD_POWER:
1977 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
1980 case AMDGPU_PP_SENSOR_VCE_POWER:
1981 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
1984 case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
1985 *(uint32_t *)data = smu->smu_power.power_gate.vcn_gated ? 0 : 1;
1988 case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
1989 *(uint32_t *)data = 0;
1993 if (smu->ppt_funcs->read_sensor)
1994 ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
1998 mutex_unlock(&smu->mutex);
2003 int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
2007 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2010 mutex_lock(&smu->mutex);
2012 if (smu->ppt_funcs->get_power_profile_mode)
2013 ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
2015 mutex_unlock(&smu->mutex);
2020 int smu_set_power_profile_mode(struct smu_context *smu,
2022 uint32_t param_size,
2027 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2031 mutex_lock(&smu->mutex);
2033 if (smu->ppt_funcs->set_power_profile_mode)
2034 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
2037 mutex_unlock(&smu->mutex);
2043 int smu_get_fan_control_mode(struct smu_context *smu)
2047 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2050 mutex_lock(&smu->mutex);
2052 if (smu->ppt_funcs->get_fan_control_mode)
2053 ret = smu->ppt_funcs->get_fan_control_mode(smu);
2055 mutex_unlock(&smu->mutex);
2060 int smu_set_fan_control_mode(struct smu_context *smu, int value)
2064 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2067 mutex_lock(&smu->mutex);
2069 if (smu->ppt_funcs->set_fan_control_mode)
2070 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
2072 mutex_unlock(&smu->mutex);
2077 int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
2081 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2084 mutex_lock(&smu->mutex);
2086 if (smu->ppt_funcs->get_fan_speed_percent)
2087 ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed);
2089 mutex_unlock(&smu->mutex);
2094 int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
2098 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2101 mutex_lock(&smu->mutex);
2103 if (smu->ppt_funcs->set_fan_speed_percent)
2104 ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
2106 mutex_unlock(&smu->mutex);
2111 int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
2115 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2118 mutex_lock(&smu->mutex);
2120 if (smu->ppt_funcs->get_fan_speed_rpm)
2121 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
2123 mutex_unlock(&smu->mutex);
2128 int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
2132 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2135 mutex_lock(&smu->mutex);
2137 ret = smu_set_min_dcef_deep_sleep(smu, clk);
2139 mutex_unlock(&smu->mutex);
2144 int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
2148 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2151 if (smu->ppt_funcs->set_active_display_count)
2152 ret = smu->ppt_funcs->set_active_display_count(smu, count);
2157 int smu_get_clock_by_type(struct smu_context *smu,
2158 enum amd_pp_clock_type type,
2159 struct amd_pp_clocks *clocks)
2163 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2166 mutex_lock(&smu->mutex);
2168 if (smu->ppt_funcs->get_clock_by_type)
2169 ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks);
2171 mutex_unlock(&smu->mutex);
2176 int smu_get_max_high_clocks(struct smu_context *smu,
2177 struct amd_pp_simple_clock_info *clocks)
2181 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2184 mutex_lock(&smu->mutex);
2186 if (smu->ppt_funcs->get_max_high_clocks)
2187 ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks);
2189 mutex_unlock(&smu->mutex);
2194 int smu_get_clock_by_type_with_latency(struct smu_context *smu,
2195 enum smu_clk_type clk_type,
2196 struct pp_clock_levels_with_latency *clocks)
2200 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2203 mutex_lock(&smu->mutex);
2205 if (smu->ppt_funcs->get_clock_by_type_with_latency)
2206 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2208 mutex_unlock(&smu->mutex);
2213 int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
2214 enum amd_pp_clock_type type,
2215 struct pp_clock_levels_with_voltage *clocks)
2219 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2222 mutex_lock(&smu->mutex);
2224 if (smu->ppt_funcs->get_clock_by_type_with_voltage)
2225 ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
2227 mutex_unlock(&smu->mutex);
2233 int smu_display_clock_voltage_request(struct smu_context *smu,
2234 struct pp_display_clock_request *clock_req)
2238 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2241 mutex_lock(&smu->mutex);
2243 if (smu->ppt_funcs->display_clock_voltage_request)
2244 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
2246 mutex_unlock(&smu->mutex);
2252 int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
2256 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2259 mutex_lock(&smu->mutex);
2261 if (smu->ppt_funcs->display_disable_memory_clock_switch)
2262 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2264 mutex_unlock(&smu->mutex);
2269 int smu_notify_smu_enable_pwe(struct smu_context *smu)
2273 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2276 mutex_lock(&smu->mutex);
2278 if (smu->ppt_funcs->notify_smu_enable_pwe)
2279 ret = smu->ppt_funcs->notify_smu_enable_pwe(smu);
2281 mutex_unlock(&smu->mutex);
2286 int smu_set_xgmi_pstate(struct smu_context *smu,
2291 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2294 mutex_lock(&smu->mutex);
2296 if (smu->ppt_funcs->set_xgmi_pstate)
2297 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
2299 mutex_unlock(&smu->mutex);
2302 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
2307 int smu_set_azalia_d3_pme(struct smu_context *smu)
2311 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2314 mutex_lock(&smu->mutex);
2316 if (smu->ppt_funcs->set_azalia_d3_pme)
2317 ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
2319 mutex_unlock(&smu->mutex);
2325 * On system suspending or resetting, the dpm_enabled
2326 * flag will be cleared. So that those SMU services which
2327 * are not supported will be gated.
2329 * However, the baco/mode1 reset should still be granted
2330 * as they are still supported and necessary.
2332 bool smu_baco_is_support(struct smu_context *smu)
2336 if (!smu->pm_enabled)
2339 mutex_lock(&smu->mutex);
2341 if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
2342 ret = smu->ppt_funcs->baco_is_support(smu);
2344 mutex_unlock(&smu->mutex);
2349 int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
2351 if (smu->ppt_funcs->baco_get_state)
2354 mutex_lock(&smu->mutex);
2355 *state = smu->ppt_funcs->baco_get_state(smu);
2356 mutex_unlock(&smu->mutex);
2361 int smu_baco_enter(struct smu_context *smu)
2365 if (!smu->pm_enabled)
2368 mutex_lock(&smu->mutex);
2370 if (smu->ppt_funcs->baco_enter)
2371 ret = smu->ppt_funcs->baco_enter(smu);
2373 mutex_unlock(&smu->mutex);
2376 dev_err(smu->adev->dev, "Failed to enter BACO state!\n");
2381 int smu_baco_exit(struct smu_context *smu)
2385 if (!smu->pm_enabled)
2388 mutex_lock(&smu->mutex);
2390 if (smu->ppt_funcs->baco_exit)
2391 ret = smu->ppt_funcs->baco_exit(smu);
2393 mutex_unlock(&smu->mutex);
2396 dev_err(smu->adev->dev, "Failed to exit BACO state!\n");
2401 bool smu_mode1_reset_is_support(struct smu_context *smu)
2405 if (!smu->pm_enabled)
2408 mutex_lock(&smu->mutex);
2410 if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
2411 ret = smu->ppt_funcs->mode1_reset_is_support(smu);
2413 mutex_unlock(&smu->mutex);
2418 int smu_mode1_reset(struct smu_context *smu)
2422 if (!smu->pm_enabled)
2425 mutex_lock(&smu->mutex);
2427 if (smu->ppt_funcs->mode1_reset)
2428 ret = smu->ppt_funcs->mode1_reset(smu);
2430 mutex_unlock(&smu->mutex);
2435 int smu_mode2_reset(struct smu_context *smu)
2439 if (!smu->pm_enabled)
2442 mutex_lock(&smu->mutex);
2444 if (smu->ppt_funcs->mode2_reset)
2445 ret = smu->ppt_funcs->mode2_reset(smu);
2447 mutex_unlock(&smu->mutex);
2450 dev_err(smu->adev->dev, "Mode2 reset failed!\n");
2455 int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
2456 struct pp_smu_nv_clock_table *max_clocks)
2460 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2463 mutex_lock(&smu->mutex);
2465 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
2466 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
2468 mutex_unlock(&smu->mutex);
2473 int smu_get_uclk_dpm_states(struct smu_context *smu,
2474 unsigned int *clock_values_in_khz,
2475 unsigned int *num_states)
2479 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2482 mutex_lock(&smu->mutex);
2484 if (smu->ppt_funcs->get_uclk_dpm_states)
2485 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
2487 mutex_unlock(&smu->mutex);
2492 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
2494 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
2496 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2499 mutex_lock(&smu->mutex);
2501 if (smu->ppt_funcs->get_current_power_state)
2502 pm_state = smu->ppt_funcs->get_current_power_state(smu);
2504 mutex_unlock(&smu->mutex);
2509 int smu_get_dpm_clock_table(struct smu_context *smu,
2510 struct dpm_clocks *clock_table)
2514 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2517 mutex_lock(&smu->mutex);
2519 if (smu->ppt_funcs->get_dpm_clock_table)
2520 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
2522 mutex_unlock(&smu->mutex);