2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #define SWSMU_CODE_LAYER_L1
25 #include <linux/firmware.h>
26 #include <linux/pci.h>
29 #include "amdgpu_smu.h"
30 #include "smu_internal.h"
32 #include "arcturus_ppt.h"
33 #include "navi10_ppt.h"
34 #include "sienna_cichlid_ppt.h"
35 #include "renoir_ppt.h"
39 * DO NOT use these for err/warn/info/debug messages.
40 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
41 * They are more MGPU friendly.
48 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
52 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
55 mutex_lock(&smu->mutex);
57 size = smu_get_pp_feature_mask(smu, buf);
59 mutex_unlock(&smu->mutex);
64 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
68 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
71 mutex_lock(&smu->mutex);
73 ret = smu_set_pp_feature_mask(smu, new_mask);
75 mutex_unlock(&smu->mutex);
80 int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
83 struct smu_context *smu = &adev->smu;
85 if (is_support_sw_smu(adev) && smu->ppt_funcs->get_gfx_off_status)
86 *value = smu_get_gfx_off_status(smu);
93 int smu_set_soft_freq_range(struct smu_context *smu,
94 enum smu_clk_type clk_type,
100 mutex_lock(&smu->mutex);
102 if (smu->ppt_funcs->set_soft_freq_limited_range)
103 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
108 mutex_unlock(&smu->mutex);
113 int smu_get_dpm_freq_range(struct smu_context *smu,
114 enum smu_clk_type clk_type,
123 mutex_lock(&smu->mutex);
125 if (smu->ppt_funcs->get_dpm_ultimate_freq)
126 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
131 mutex_unlock(&smu->mutex);
136 static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu,
139 struct smu_power_context *smu_power = &smu->smu_power;
140 struct smu_power_gate *power_gate = &smu_power->power_gate;
143 if (!smu->ppt_funcs->dpm_set_vcn_enable)
146 if (atomic_read(&power_gate->vcn_gated) ^ enable)
149 ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable);
151 atomic_set(&power_gate->vcn_gated, !enable);
156 static int smu_dpm_set_vcn_enable(struct smu_context *smu,
159 struct smu_power_context *smu_power = &smu->smu_power;
160 struct smu_power_gate *power_gate = &smu_power->power_gate;
163 mutex_lock(&power_gate->vcn_gate_lock);
165 ret = smu_dpm_set_vcn_enable_locked(smu, enable);
167 mutex_unlock(&power_gate->vcn_gate_lock);
172 static int smu_dpm_set_jpeg_enable_locked(struct smu_context *smu,
175 struct smu_power_context *smu_power = &smu->smu_power;
176 struct smu_power_gate *power_gate = &smu_power->power_gate;
179 if (!smu->ppt_funcs->dpm_set_jpeg_enable)
182 if (atomic_read(&power_gate->jpeg_gated) ^ enable)
185 ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
187 atomic_set(&power_gate->jpeg_gated, !enable);
192 static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
195 struct smu_power_context *smu_power = &smu->smu_power;
196 struct smu_power_gate *power_gate = &smu_power->power_gate;
199 mutex_lock(&power_gate->jpeg_gate_lock);
201 ret = smu_dpm_set_jpeg_enable_locked(smu, enable);
203 mutex_unlock(&power_gate->jpeg_gate_lock);
209 * smu_dpm_set_power_gate - power gate/ungate the specific IP block
211 * @smu: smu_context pointer
212 * @block_type: the IP block to power gate/ungate
213 * @gate: to power gate if true, ungate otherwise
215 * This API uses no smu->mutex lock protection due to:
216 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
217 * This is guarded to be race condition free by the caller.
218 * 2. Or get called on user setting request of power_dpm_force_performance_level.
219 * Under this case, the smu->mutex lock protection is already enforced on
220 * the parent API smu_force_performance_level of the call path.
222 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
227 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
230 switch (block_type) {
232 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
233 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept.
235 case AMD_IP_BLOCK_TYPE_UVD:
236 case AMD_IP_BLOCK_TYPE_VCN:
237 ret = smu_dpm_set_vcn_enable(smu, !gate);
239 dev_err(smu->adev->dev, "Failed to power %s VCN!\n",
240 gate ? "gate" : "ungate");
242 case AMD_IP_BLOCK_TYPE_GFX:
243 ret = smu_gfx_off_control(smu, gate);
245 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",
246 gate ? "enable" : "disable");
248 case AMD_IP_BLOCK_TYPE_SDMA:
249 ret = smu_powergate_sdma(smu, gate);
251 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",
252 gate ? "gate" : "ungate");
254 case AMD_IP_BLOCK_TYPE_JPEG:
255 ret = smu_dpm_set_jpeg_enable(smu, !gate);
257 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
258 gate ? "gate" : "ungate");
261 dev_err(smu->adev->dev, "Unsupported block type!\n");
268 int smu_get_power_num_states(struct smu_context *smu,
269 struct pp_states_info *state_info)
274 /* not support power state */
275 memset(state_info, 0, sizeof(struct pp_states_info));
276 state_info->nums = 1;
277 state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
282 bool is_support_sw_smu(struct amdgpu_device *adev)
284 if (adev->asic_type >= CHIP_ARCTURUS)
290 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
292 struct smu_table_context *smu_table = &smu->smu_table;
293 uint32_t powerplay_table_size;
295 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
298 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
301 mutex_lock(&smu->mutex);
303 if (smu_table->hardcode_pptable)
304 *table = smu_table->hardcode_pptable;
306 *table = smu_table->power_play_table;
308 powerplay_table_size = smu_table->power_play_table_size;
310 mutex_unlock(&smu->mutex);
312 return powerplay_table_size;
315 int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
317 struct smu_table_context *smu_table = &smu->smu_table;
318 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
321 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
324 if (header->usStructureSize != size) {
325 dev_err(smu->adev->dev, "pp table size not matched !\n");
329 mutex_lock(&smu->mutex);
330 if (!smu_table->hardcode_pptable)
331 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
332 if (!smu_table->hardcode_pptable) {
337 memcpy(smu_table->hardcode_pptable, buf, size);
338 smu_table->power_play_table = smu_table->hardcode_pptable;
339 smu_table->power_play_table_size = size;
342 * Special hw_fini action(for Navi1x, the DPMs disablement will be
343 * skipped) may be needed for custom pptable uploading.
345 smu->uploading_custom_pp_table = true;
347 ret = smu_reset(smu);
349 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);
351 smu->uploading_custom_pp_table = false;
354 mutex_unlock(&smu->mutex);
358 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
360 struct smu_feature *feature = &smu->smu_feature;
362 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
364 bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
366 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
371 bitmap_or(feature->allowed, feature->allowed,
372 (unsigned long *)allowed_feature_mask,
373 feature->feature_num);
378 static int smu_set_funcs(struct amdgpu_device *adev)
380 struct smu_context *smu = &adev->smu;
382 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
383 smu->od_enabled = true;
385 switch (adev->asic_type) {
389 navi10_set_ppt_funcs(smu);
392 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
393 arcturus_set_ppt_funcs(smu);
394 /* OD is not supported on Arcturus */
395 smu->od_enabled =false;
397 case CHIP_SIENNA_CICHLID:
398 case CHIP_NAVY_FLOUNDER:
399 sienna_cichlid_set_ppt_funcs(smu);
402 renoir_set_ppt_funcs(smu);
411 static int smu_early_init(void *handle)
413 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
414 struct smu_context *smu = &adev->smu;
417 smu->pm_enabled = !!amdgpu_dpm;
419 mutex_init(&smu->mutex);
420 mutex_init(&smu->smu_baco.mutex);
421 smu->smu_baco.state = SMU_BACO_STATE_EXIT;
422 smu->smu_baco.platform_support = false;
424 return smu_set_funcs(adev);
427 static int smu_set_default_dpm_table(struct smu_context *smu)
429 struct smu_power_context *smu_power = &smu->smu_power;
430 struct smu_power_gate *power_gate = &smu_power->power_gate;
431 int vcn_gate, jpeg_gate;
434 if (!smu->ppt_funcs->set_default_dpm_table)
437 mutex_lock(&power_gate->vcn_gate_lock);
438 mutex_lock(&power_gate->jpeg_gate_lock);
440 vcn_gate = atomic_read(&power_gate->vcn_gated);
441 jpeg_gate = atomic_read(&power_gate->jpeg_gated);
443 ret = smu_dpm_set_vcn_enable_locked(smu, true);
447 ret = smu_dpm_set_jpeg_enable_locked(smu, true);
451 ret = smu->ppt_funcs->set_default_dpm_table(smu);
453 dev_err(smu->adev->dev,
454 "Failed to setup default dpm clock tables!\n");
456 smu_dpm_set_jpeg_enable_locked(smu, !jpeg_gate);
458 smu_dpm_set_vcn_enable_locked(smu, !vcn_gate);
460 mutex_unlock(&power_gate->jpeg_gate_lock);
461 mutex_unlock(&power_gate->vcn_gate_lock);
466 static int smu_late_init(void *handle)
468 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
469 struct smu_context *smu = &adev->smu;
472 if (!smu->pm_enabled)
475 ret = smu_post_init(smu);
477 dev_err(adev->dev, "Failed to post smu init!\n");
481 ret = smu_set_default_od_settings(smu);
483 dev_err(adev->dev, "Failed to setup default OD settings!\n");
487 ret = smu_populate_umd_state_clk(smu);
489 dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
493 ret = smu_get_asic_power_limits(smu);
495 dev_err(adev->dev, "Failed to get asic power limits!\n");
499 smu_get_unique_id(smu);
501 smu_get_fan_parameters(smu);
503 smu_handle_task(&adev->smu,
504 smu->smu_dpm.dpm_level,
505 AMD_PP_TASK_COMPLETE_INIT,
511 static int smu_init_fb_allocations(struct smu_context *smu)
513 struct amdgpu_device *adev = smu->adev;
514 struct smu_table_context *smu_table = &smu->smu_table;
515 struct smu_table *tables = smu_table->tables;
516 struct smu_table *driver_table = &(smu_table->driver_table);
517 uint32_t max_table_size = 0;
520 /* VRAM allocation for tool table */
521 if (tables[SMU_TABLE_PMSTATUSLOG].size) {
522 ret = amdgpu_bo_create_kernel(adev,
523 tables[SMU_TABLE_PMSTATUSLOG].size,
524 tables[SMU_TABLE_PMSTATUSLOG].align,
525 tables[SMU_TABLE_PMSTATUSLOG].domain,
526 &tables[SMU_TABLE_PMSTATUSLOG].bo,
527 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
528 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
530 dev_err(adev->dev, "VRAM allocation for tool table failed!\n");
535 /* VRAM allocation for driver table */
536 for (i = 0; i < SMU_TABLE_COUNT; i++) {
537 if (tables[i].size == 0)
540 if (i == SMU_TABLE_PMSTATUSLOG)
543 if (max_table_size < tables[i].size)
544 max_table_size = tables[i].size;
547 driver_table->size = max_table_size;
548 driver_table->align = PAGE_SIZE;
549 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
551 ret = amdgpu_bo_create_kernel(adev,
554 driver_table->domain,
556 &driver_table->mc_address,
557 &driver_table->cpu_addr);
559 dev_err(adev->dev, "VRAM allocation for driver table failed!\n");
560 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
561 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
562 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
563 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
569 static int smu_fini_fb_allocations(struct smu_context *smu)
571 struct smu_table_context *smu_table = &smu->smu_table;
572 struct smu_table *tables = smu_table->tables;
573 struct smu_table *driver_table = &(smu_table->driver_table);
575 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
576 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
577 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
578 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
580 amdgpu_bo_free_kernel(&driver_table->bo,
581 &driver_table->mc_address,
582 &driver_table->cpu_addr);
588 * smu_alloc_memory_pool - allocate memory pool in the system memory
590 * @smu: amdgpu_device pointer
592 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
593 * and DramLogSetDramAddr can notify it changed.
595 * Returns 0 on success, error on failure.
597 static int smu_alloc_memory_pool(struct smu_context *smu)
599 struct amdgpu_device *adev = smu->adev;
600 struct smu_table_context *smu_table = &smu->smu_table;
601 struct smu_table *memory_pool = &smu_table->memory_pool;
602 uint64_t pool_size = smu->pool_size;
605 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
608 memory_pool->size = pool_size;
609 memory_pool->align = PAGE_SIZE;
610 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
613 case SMU_MEMORY_POOL_SIZE_256_MB:
614 case SMU_MEMORY_POOL_SIZE_512_MB:
615 case SMU_MEMORY_POOL_SIZE_1_GB:
616 case SMU_MEMORY_POOL_SIZE_2_GB:
617 ret = amdgpu_bo_create_kernel(adev,
622 &memory_pool->mc_address,
623 &memory_pool->cpu_addr);
625 dev_err(adev->dev, "VRAM allocation for dramlog failed!\n");
634 static int smu_free_memory_pool(struct smu_context *smu)
636 struct smu_table_context *smu_table = &smu->smu_table;
637 struct smu_table *memory_pool = &smu_table->memory_pool;
639 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
642 amdgpu_bo_free_kernel(&memory_pool->bo,
643 &memory_pool->mc_address,
644 &memory_pool->cpu_addr);
646 memset(memory_pool, 0, sizeof(struct smu_table));
651 static int smu_alloc_dummy_read_table(struct smu_context *smu)
653 struct smu_table_context *smu_table = &smu->smu_table;
654 struct smu_table *dummy_read_1_table =
655 &smu_table->dummy_read_1_table;
656 struct amdgpu_device *adev = smu->adev;
659 dummy_read_1_table->size = 0x40000;
660 dummy_read_1_table->align = PAGE_SIZE;
661 dummy_read_1_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
663 ret = amdgpu_bo_create_kernel(adev,
664 dummy_read_1_table->size,
665 dummy_read_1_table->align,
666 dummy_read_1_table->domain,
667 &dummy_read_1_table->bo,
668 &dummy_read_1_table->mc_address,
669 &dummy_read_1_table->cpu_addr);
671 dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n");
676 static void smu_free_dummy_read_table(struct smu_context *smu)
678 struct smu_table_context *smu_table = &smu->smu_table;
679 struct smu_table *dummy_read_1_table =
680 &smu_table->dummy_read_1_table;
683 amdgpu_bo_free_kernel(&dummy_read_1_table->bo,
684 &dummy_read_1_table->mc_address,
685 &dummy_read_1_table->cpu_addr);
687 memset(dummy_read_1_table, 0, sizeof(struct smu_table));
690 static int smu_smc_table_sw_init(struct smu_context *smu)
695 * Create smu_table structure, and init smc tables such as
696 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
698 ret = smu_init_smc_tables(smu);
700 dev_err(smu->adev->dev, "Failed to init smc tables!\n");
705 * Create smu_power_context structure, and allocate smu_dpm_context and
706 * context size to fill the smu_power_context data.
708 ret = smu_init_power(smu);
710 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");
715 * allocate vram bos to store smc table contents.
717 ret = smu_init_fb_allocations(smu);
721 ret = smu_alloc_memory_pool(smu);
725 ret = smu_alloc_dummy_read_table(smu);
729 ret = smu_i2c_init(smu, &smu->adev->pm.smu_i2c);
736 static int smu_smc_table_sw_fini(struct smu_context *smu)
740 smu_i2c_fini(smu, &smu->adev->pm.smu_i2c);
742 smu_free_dummy_read_table(smu);
744 ret = smu_free_memory_pool(smu);
748 ret = smu_fini_fb_allocations(smu);
752 ret = smu_fini_power(smu);
754 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");
758 ret = smu_fini_smc_tables(smu);
760 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");
767 static void smu_throttling_logging_work_fn(struct work_struct *work)
769 struct smu_context *smu = container_of(work, struct smu_context,
770 throttling_logging_work);
772 smu_log_thermal_throttling(smu);
775 static void smu_interrupt_work_fn(struct work_struct *work)
777 struct smu_context *smu = container_of(work, struct smu_context,
780 mutex_lock(&smu->mutex);
782 if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
783 smu->ppt_funcs->interrupt_work(smu);
785 mutex_unlock(&smu->mutex);
788 static int smu_sw_init(void *handle)
790 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
791 struct smu_context *smu = &adev->smu;
794 smu->pool_size = adev->pm.smu_prv_buffer_size;
795 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
796 mutex_init(&smu->smu_feature.mutex);
797 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
798 bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
799 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
801 mutex_init(&smu->sensor_lock);
802 mutex_init(&smu->metrics_lock);
803 mutex_init(&smu->message_lock);
805 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
806 INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
807 atomic64_set(&smu->throttle_int_counter, 0);
808 smu->watermarks_bitmap = 0;
809 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
810 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
812 atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
813 atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
814 mutex_init(&smu->smu_power.power_gate.vcn_gate_lock);
815 mutex_init(&smu->smu_power.power_gate.jpeg_gate_lock);
817 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
818 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
819 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
820 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
821 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
822 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
823 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
824 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
826 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
827 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
828 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
829 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
830 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
831 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
832 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
833 smu->display_config = &adev->pm.pm_display_cfg;
835 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
836 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
838 if (!amdgpu_sriov_vf(adev)) {
839 ret = smu_init_microcode(smu);
841 dev_err(adev->dev, "Failed to load smu firmware!\n");
846 ret = smu_smc_table_sw_init(smu);
848 dev_err(adev->dev, "Failed to sw init smc table!\n");
852 ret = smu_register_irq_handler(smu);
854 dev_err(adev->dev, "Failed to register smc irq handler!\n");
861 static int smu_sw_fini(void *handle)
863 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
864 struct smu_context *smu = &adev->smu;
867 ret = smu_smc_table_sw_fini(smu);
869 dev_err(adev->dev, "Failed to sw fini smc table!\n");
873 smu_fini_microcode(smu);
878 static int smu_get_thermal_temperature_range(struct smu_context *smu)
880 struct amdgpu_device *adev = smu->adev;
881 struct smu_temperature_range *range =
885 if (!smu->ppt_funcs->get_thermal_temperature_range)
888 ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);
892 adev->pm.dpm.thermal.min_temp = range->min;
893 adev->pm.dpm.thermal.max_temp = range->max;
894 adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max;
895 adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min;
896 adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max;
897 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max;
898 adev->pm.dpm.thermal.min_mem_temp = range->mem_min;
899 adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max;
900 adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max;
905 static int smu_smc_hw_setup(struct smu_context *smu)
907 struct amdgpu_device *adev = smu->adev;
908 uint32_t pcie_gen = 0, pcie_width = 0;
911 if (adev->in_suspend && smu_is_dpm_running(smu)) {
912 dev_info(adev->dev, "dpm has been enabled\n");
916 ret = smu_init_display_count(smu, 0);
918 dev_info(adev->dev, "Failed to pre-set display count as 0!\n");
922 ret = smu_set_driver_table_location(smu);
924 dev_err(adev->dev, "Failed to SetDriverDramAddr!\n");
929 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
931 ret = smu_set_tool_table_location(smu);
933 dev_err(adev->dev, "Failed to SetToolsDramAddr!\n");
938 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
941 ret = smu_notify_memory_pool_location(smu);
943 dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n");
947 /* smu_dump_pptable(smu); */
949 * Copy pptable bo in the vram to smc with SMU MSGs such as
950 * SetDriverDramAddr and TransferTableDram2Smu.
952 ret = smu_write_pptable(smu);
954 dev_err(adev->dev, "Failed to transfer pptable to SMC!\n");
958 /* issue Run*Btc msg */
959 ret = smu_run_btc(smu);
963 ret = smu_feature_set_allowed_mask(smu);
965 dev_err(adev->dev, "Failed to set driver allowed features mask!\n");
969 ret = smu_system_features_control(smu, true);
971 dev_err(adev->dev, "Failed to enable requested dpm features!\n");
975 if (!smu_is_dpm_running(smu))
976 dev_info(adev->dev, "dpm has been disabled\n");
978 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
980 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
982 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
984 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
987 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
988 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
989 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
991 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
993 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
995 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
997 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
999 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
1001 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
1003 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
1005 dev_err(adev->dev, "Attempt to override pcie params failed!\n");
1009 ret = smu_get_thermal_temperature_range(smu);
1011 dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
1015 ret = smu_enable_thermal_alert(smu);
1017 dev_err(adev->dev, "Failed to enable thermal alert!\n");
1022 * Set initialized values (get from vbios) to dpm tables context such as
1023 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1026 ret = smu_set_default_dpm_table(smu);
1028 dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
1033 * Set initialized values (get from vbios) to dpm tables context such as
1034 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1037 ret = smu_set_default_dpm_table(smu);
1039 dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
1043 ret = smu_notify_display_change(smu);
1048 * Set min deep sleep dce fclk with bootup value from vbios via
1049 * SetMinDeepSleepDcefclk MSG.
1051 ret = smu_set_min_dcef_deep_sleep(smu,
1052 smu->smu_table.boot_values.dcefclk / 100);
1059 static int smu_start_smc_engine(struct smu_context *smu)
1061 struct amdgpu_device *adev = smu->adev;
1064 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1065 if (adev->asic_type < CHIP_NAVI10) {
1066 if (smu->ppt_funcs->load_microcode) {
1067 ret = smu->ppt_funcs->load_microcode(smu);
1074 if (smu->ppt_funcs->check_fw_status) {
1075 ret = smu->ppt_funcs->check_fw_status(smu);
1077 dev_err(adev->dev, "SMC is not ready\n");
1083 * Send msg GetDriverIfVersion to check if the return value is equal
1084 * with DRIVER_IF_VERSION of smc header.
1086 ret = smu_check_fw_version(smu);
1093 static int smu_hw_init(void *handle)
1096 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1097 struct smu_context *smu = &adev->smu;
1099 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
1100 smu->pm_enabled = false;
1104 ret = smu_start_smc_engine(smu);
1106 dev_err(adev->dev, "SMC engine is not correctly up!\n");
1111 smu_powergate_sdma(&adev->smu, false);
1112 smu_dpm_set_vcn_enable(smu, true);
1113 smu_dpm_set_jpeg_enable(smu, true);
1114 smu_set_gfx_cgpg(&adev->smu, true);
1117 if (!smu->pm_enabled)
1120 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1121 ret = smu_get_vbios_bootup_values(smu);
1123 dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n");
1127 ret = smu_setup_pptable(smu);
1129 dev_err(adev->dev, "Failed to setup pptable!\n");
1133 ret = smu_get_driver_allowed_feature_mask(smu);
1137 ret = smu_smc_hw_setup(smu);
1139 dev_err(adev->dev, "Failed to setup smc hw!\n");
1144 * Move maximum sustainable clock retrieving here considering
1145 * 1. It is not needed on resume(from S3).
1146 * 2. DAL settings come between .hw_init and .late_init of SMU.
1147 * And DAL needs to know the maximum sustainable clocks. Thus
1148 * it cannot be put in .late_init().
1150 ret = smu_init_max_sustainable_clocks(smu);
1152 dev_err(adev->dev, "Failed to init max sustainable clocks!\n");
1156 adev->pm.dpm_enabled = true;
1158 dev_info(adev->dev, "SMU is initialized successfully!\n");
1163 static int smu_disable_dpms(struct smu_context *smu)
1165 struct amdgpu_device *adev = smu->adev;
1167 bool use_baco = !smu->is_apu &&
1168 ((amdgpu_in_reset(adev) &&
1169 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1170 ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev)));
1173 * For custom pptable uploading, skip the DPM features
1174 * disable process on Navi1x ASICs.
1175 * - As the gfx related features are under control of
1176 * RLC on those ASICs. RLC reinitialization will be
1177 * needed to reenable them. That will cost much more
1180 * - SMU firmware can handle the DPM reenablement
1183 if (smu->uploading_custom_pp_table &&
1184 (adev->asic_type >= CHIP_NAVI10) &&
1185 (adev->asic_type <= CHIP_NAVY_FLOUNDER))
1189 * For Sienna_Cichlid, PMFW will handle the features disablement properly
1190 * on BACO in. Driver involvement is unnecessary.
1192 if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
1197 * For gpu reset, runpm and hibernation through BACO,
1198 * BACO feature has to be kept enabled.
1200 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
1201 ret = smu_disable_all_features_with_exception(smu,
1202 SMU_FEATURE_BACO_BIT);
1204 dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
1206 ret = smu_system_features_control(smu, false);
1208 dev_err(adev->dev, "Failed to disable smu features.\n");
1211 if (adev->asic_type >= CHIP_NAVI10 &&
1212 adev->gfx.rlc.funcs->stop)
1213 adev->gfx.rlc.funcs->stop(adev);
1218 static int smu_smc_hw_cleanup(struct smu_context *smu)
1220 struct amdgpu_device *adev = smu->adev;
1223 cancel_work_sync(&smu->throttling_logging_work);
1224 cancel_work_sync(&smu->interrupt_work);
1226 ret = smu_disable_thermal_alert(smu);
1228 dev_err(adev->dev, "Fail to disable thermal alert!\n");
1232 ret = smu_disable_dpms(smu);
1234 dev_err(adev->dev, "Fail to disable dpm features!\n");
1241 static int smu_hw_fini(void *handle)
1243 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1244 struct smu_context *smu = &adev->smu;
1246 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1250 smu_powergate_sdma(&adev->smu, true);
1251 smu_dpm_set_vcn_enable(smu, false);
1252 smu_dpm_set_jpeg_enable(smu, false);
1255 if (!smu->pm_enabled)
1258 adev->pm.dpm_enabled = false;
1260 return smu_smc_hw_cleanup(smu);
1263 int smu_reset(struct smu_context *smu)
1265 struct amdgpu_device *adev = smu->adev;
1268 amdgpu_gfx_off_ctrl(smu->adev, false);
1270 ret = smu_hw_fini(adev);
1274 ret = smu_hw_init(adev);
1278 ret = smu_late_init(adev);
1282 amdgpu_gfx_off_ctrl(smu->adev, true);
1287 static int smu_suspend(void *handle)
1289 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1290 struct smu_context *smu = &adev->smu;
1293 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1296 if (!smu->pm_enabled)
1299 adev->pm.dpm_enabled = false;
1301 ret = smu_smc_hw_cleanup(smu);
1305 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1308 smu_set_gfx_cgpg(&adev->smu, false);
1313 static int smu_resume(void *handle)
1316 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1317 struct smu_context *smu = &adev->smu;
1319 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1322 if (!smu->pm_enabled)
1325 dev_info(adev->dev, "SMU is resuming...\n");
1327 ret = smu_start_smc_engine(smu);
1329 dev_err(adev->dev, "SMC engine is not correctly up!\n");
1333 ret = smu_smc_hw_setup(smu);
1335 dev_err(adev->dev, "Failed to setup smc hw!\n");
1340 smu_set_gfx_cgpg(&adev->smu, true);
1342 smu->disable_uclk_switch = 0;
1344 adev->pm.dpm_enabled = true;
1346 dev_info(adev->dev, "SMU is resumed successfully!\n");
1351 int smu_display_configuration_change(struct smu_context *smu,
1352 const struct amd_pp_display_configuration *display_config)
1355 int num_of_active_display = 0;
1357 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1360 if (!display_config)
1363 mutex_lock(&smu->mutex);
1365 smu_set_min_dcef_deep_sleep(smu,
1366 display_config->min_dcef_deep_sleep_set_clk / 100);
1368 for (index = 0; index < display_config->num_path_including_non_display; index++) {
1369 if (display_config->displays[index].controller_id != 0)
1370 num_of_active_display++;
1373 smu_set_active_display_count(smu, num_of_active_display);
1375 smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1376 display_config->cpu_cc6_disable,
1377 display_config->cpu_pstate_disable,
1378 display_config->nb_pstate_switch_disable);
1380 mutex_unlock(&smu->mutex);
1385 static int smu_get_clock_info(struct smu_context *smu,
1386 struct smu_clock_info *clk_info,
1387 enum smu_perf_level_designation designation)
1390 struct smu_performance_level level = {0};
1395 ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1399 clk_info->min_mem_clk = level.memory_clock;
1400 clk_info->min_eng_clk = level.core_clock;
1401 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1403 ret = smu_get_perf_level(smu, designation, &level);
1407 clk_info->min_mem_clk = level.memory_clock;
1408 clk_info->min_eng_clk = level.core_clock;
1409 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1414 int smu_get_current_clocks(struct smu_context *smu,
1415 struct amd_pp_clock_info *clocks)
1417 struct amd_pp_simple_clock_info simple_clocks = {0};
1418 struct smu_clock_info hw_clocks;
1421 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1424 mutex_lock(&smu->mutex);
1426 smu_get_dal_power_level(smu, &simple_clocks);
1428 if (smu->support_power_containment)
1429 ret = smu_get_clock_info(smu, &hw_clocks,
1430 PERF_LEVEL_POWER_CONTAINMENT);
1432 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1435 dev_err(smu->adev->dev, "Error in smu_get_clock_info\n");
1439 clocks->min_engine_clock = hw_clocks.min_eng_clk;
1440 clocks->max_engine_clock = hw_clocks.max_eng_clk;
1441 clocks->min_memory_clock = hw_clocks.min_mem_clk;
1442 clocks->max_memory_clock = hw_clocks.max_mem_clk;
1443 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1444 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1445 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1446 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1448 if (simple_clocks.level == 0)
1449 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1451 clocks->max_clocks_state = simple_clocks.level;
1453 if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1454 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1455 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1459 mutex_unlock(&smu->mutex);
1463 static int smu_set_clockgating_state(void *handle,
1464 enum amd_clockgating_state state)
1469 static int smu_set_powergating_state(void *handle,
1470 enum amd_powergating_state state)
1475 static int smu_enable_umd_pstate(void *handle,
1476 enum amd_dpm_forced_level *level)
1478 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1479 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1480 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1481 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1483 struct smu_context *smu = (struct smu_context*)(handle);
1484 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1486 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1489 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1490 /* enter umd pstate, save current level, disable gfx cg*/
1491 if (*level & profile_mode_mask) {
1492 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1493 smu_dpm_ctx->enable_umd_pstate = true;
1494 amdgpu_device_ip_set_powergating_state(smu->adev,
1495 AMD_IP_BLOCK_TYPE_GFX,
1496 AMD_PG_STATE_UNGATE);
1497 amdgpu_device_ip_set_clockgating_state(smu->adev,
1498 AMD_IP_BLOCK_TYPE_GFX,
1499 AMD_CG_STATE_UNGATE);
1500 smu_gfx_ulv_control(smu, false);
1501 smu_deep_sleep_control(smu, false);
1504 /* exit umd pstate, restore level, enable gfx cg*/
1505 if (!(*level & profile_mode_mask)) {
1506 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1507 *level = smu_dpm_ctx->saved_dpm_level;
1508 smu_dpm_ctx->enable_umd_pstate = false;
1509 smu_deep_sleep_control(smu, true);
1510 smu_gfx_ulv_control(smu, true);
1511 amdgpu_device_ip_set_clockgating_state(smu->adev,
1512 AMD_IP_BLOCK_TYPE_GFX,
1514 amdgpu_device_ip_set_powergating_state(smu->adev,
1515 AMD_IP_BLOCK_TYPE_GFX,
1523 static int smu_adjust_power_state_dynamic(struct smu_context *smu,
1524 enum amd_dpm_forced_level level,
1525 bool skip_display_settings)
1530 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1532 if (!skip_display_settings) {
1533 ret = smu_display_config_changed(smu);
1535 dev_err(smu->adev->dev, "Failed to change display config!");
1540 ret = smu_apply_clocks_adjust_rules(smu);
1542 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");
1546 if (!skip_display_settings) {
1547 ret = smu_notify_smc_display_config(smu);
1549 dev_err(smu->adev->dev, "Failed to notify smc display config!");
1554 if (smu_dpm_ctx->dpm_level != level) {
1555 ret = smu_asic_set_performance_level(smu, level);
1557 dev_err(smu->adev->dev, "Failed to set performance level!");
1561 /* update the saved copy */
1562 smu_dpm_ctx->dpm_level = level;
1565 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1566 index = fls(smu->workload_mask);
1567 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1568 workload = smu->workload_setting[index];
1570 if (smu->power_profile_mode != workload)
1571 smu_set_power_profile_mode(smu, &workload, 0, false);
1577 int smu_handle_task(struct smu_context *smu,
1578 enum amd_dpm_forced_level level,
1579 enum amd_pp_task task_id,
1584 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1588 mutex_lock(&smu->mutex);
1591 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1592 ret = smu_pre_display_config_changed(smu);
1595 ret = smu_set_cpu_power_state(smu);
1598 ret = smu_adjust_power_state_dynamic(smu, level, false);
1600 case AMD_PP_TASK_COMPLETE_INIT:
1601 case AMD_PP_TASK_READJUST_POWER_STATE:
1602 ret = smu_adjust_power_state_dynamic(smu, level, true);
1610 mutex_unlock(&smu->mutex);
1615 int smu_switch_power_profile(struct smu_context *smu,
1616 enum PP_SMC_POWER_PROFILE type,
1619 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1623 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1626 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1629 mutex_lock(&smu->mutex);
1632 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1633 index = fls(smu->workload_mask);
1634 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1635 workload = smu->workload_setting[index];
1637 smu->workload_mask |= (1 << smu->workload_prority[type]);
1638 index = fls(smu->workload_mask);
1639 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1640 workload = smu->workload_setting[index];
1643 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1644 smu_set_power_profile_mode(smu, &workload, 0, false);
1646 mutex_unlock(&smu->mutex);
1651 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1653 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1654 enum amd_dpm_forced_level level;
1656 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1659 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1662 mutex_lock(&(smu->mutex));
1663 level = smu_dpm_ctx->dpm_level;
1664 mutex_unlock(&(smu->mutex));
1669 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1671 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1674 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1677 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1680 mutex_lock(&smu->mutex);
1682 ret = smu_enable_umd_pstate(smu, &level);
1684 mutex_unlock(&smu->mutex);
1688 ret = smu_handle_task(smu, level,
1689 AMD_PP_TASK_READJUST_POWER_STATE,
1692 mutex_unlock(&smu->mutex);
1697 int smu_set_display_count(struct smu_context *smu, uint32_t count)
1701 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1704 mutex_lock(&smu->mutex);
1705 ret = smu_init_display_count(smu, count);
1706 mutex_unlock(&smu->mutex);
1711 int smu_force_clk_levels(struct smu_context *smu,
1712 enum smu_clk_type clk_type,
1715 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1718 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1721 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1722 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
1726 mutex_lock(&smu->mutex);
1728 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
1729 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
1731 mutex_unlock(&smu->mutex);
1737 * On system suspending or resetting, the dpm_enabled
1738 * flag will be cleared. So that those SMU services which
1739 * are not supported will be gated.
1740 * However, the mp1 state setting should still be granted
1741 * even if the dpm_enabled cleared.
1743 int smu_set_mp1_state(struct smu_context *smu,
1744 enum pp_mp1_state mp1_state)
1749 if (!smu->pm_enabled)
1752 mutex_lock(&smu->mutex);
1754 switch (mp1_state) {
1755 case PP_MP1_STATE_SHUTDOWN:
1756 msg = SMU_MSG_PrepareMp1ForShutdown;
1758 case PP_MP1_STATE_UNLOAD:
1759 msg = SMU_MSG_PrepareMp1ForUnload;
1761 case PP_MP1_STATE_RESET:
1762 msg = SMU_MSG_PrepareMp1ForReset;
1764 case PP_MP1_STATE_NONE:
1766 mutex_unlock(&smu->mutex);
1770 ret = smu_send_smc_msg(smu, msg, NULL);
1771 /* some asics may not support those messages */
1775 dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
1777 mutex_unlock(&smu->mutex);
1782 int smu_set_df_cstate(struct smu_context *smu,
1783 enum pp_df_cstate state)
1787 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1790 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
1793 mutex_lock(&smu->mutex);
1795 ret = smu->ppt_funcs->set_df_cstate(smu, state);
1797 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
1799 mutex_unlock(&smu->mutex);
1804 int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
1808 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1811 if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
1814 mutex_lock(&smu->mutex);
1816 ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
1818 dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n");
1820 mutex_unlock(&smu->mutex);
1825 int smu_write_watermarks_table(struct smu_context *smu)
1829 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1832 mutex_lock(&smu->mutex);
1834 ret = smu_set_watermarks_table(smu, NULL);
1836 mutex_unlock(&smu->mutex);
1841 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
1842 struct pp_smu_wm_range_sets *clock_ranges)
1846 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1849 if (smu->disable_watermark)
1852 mutex_lock(&smu->mutex);
1854 ret = smu_set_watermarks_table(smu, clock_ranges);
1856 mutex_unlock(&smu->mutex);
1861 int smu_set_ac_dc(struct smu_context *smu)
1865 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1868 /* controlled by firmware */
1869 if (smu->dc_controlled_by_gpio)
1872 mutex_lock(&smu->mutex);
1873 ret = smu_set_power_source(smu,
1874 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
1875 SMU_POWER_SOURCE_DC);
1877 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
1878 smu->adev->pm.ac_power ? "AC" : "DC");
1879 mutex_unlock(&smu->mutex);
1884 const struct amd_ip_funcs smu_ip_funcs = {
1886 .early_init = smu_early_init,
1887 .late_init = smu_late_init,
1888 .sw_init = smu_sw_init,
1889 .sw_fini = smu_sw_fini,
1890 .hw_init = smu_hw_init,
1891 .hw_fini = smu_hw_fini,
1892 .suspend = smu_suspend,
1893 .resume = smu_resume,
1895 .check_soft_reset = NULL,
1896 .wait_for_idle = NULL,
1898 .set_clockgating_state = smu_set_clockgating_state,
1899 .set_powergating_state = smu_set_powergating_state,
1900 .enable_umd_pstate = smu_enable_umd_pstate,
1903 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
1905 .type = AMD_IP_BLOCK_TYPE_SMC,
1909 .funcs = &smu_ip_funcs,
1912 const struct amdgpu_ip_block_version smu_v12_0_ip_block =
1914 .type = AMD_IP_BLOCK_TYPE_SMC,
1918 .funcs = &smu_ip_funcs,
1921 int smu_load_microcode(struct smu_context *smu)
1925 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1928 mutex_lock(&smu->mutex);
1930 if (smu->ppt_funcs->load_microcode)
1931 ret = smu->ppt_funcs->load_microcode(smu);
1933 mutex_unlock(&smu->mutex);
1938 int smu_check_fw_status(struct smu_context *smu)
1942 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1945 mutex_lock(&smu->mutex);
1947 if (smu->ppt_funcs->check_fw_status)
1948 ret = smu->ppt_funcs->check_fw_status(smu);
1950 mutex_unlock(&smu->mutex);
1955 int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
1959 mutex_lock(&smu->mutex);
1961 if (smu->ppt_funcs->set_gfx_cgpg)
1962 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
1964 mutex_unlock(&smu->mutex);
1969 int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
1973 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1976 mutex_lock(&smu->mutex);
1978 if (smu->ppt_funcs->set_fan_speed_rpm)
1979 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
1981 mutex_unlock(&smu->mutex);
1986 int smu_get_power_limit(struct smu_context *smu,
1990 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1993 mutex_lock(&smu->mutex);
1995 *limit = (max_setting ? smu->max_power_limit : smu->current_power_limit);
1997 mutex_unlock(&smu->mutex);
2002 int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
2006 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2009 mutex_lock(&smu->mutex);
2011 if (limit > smu->max_power_limit) {
2012 dev_err(smu->adev->dev,
2013 "New power limit (%d) is over the max allowed %d\n",
2014 limit, smu->max_power_limit);
2019 limit = smu->current_power_limit;
2021 if (smu->ppt_funcs->set_power_limit)
2022 ret = smu->ppt_funcs->set_power_limit(smu, limit);
2025 mutex_unlock(&smu->mutex);
2030 int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2034 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2037 mutex_lock(&smu->mutex);
2039 if (smu->ppt_funcs->print_clk_levels)
2040 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2042 mutex_unlock(&smu->mutex);
2047 int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
2051 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2054 mutex_lock(&smu->mutex);
2056 if (smu->ppt_funcs->get_od_percentage)
2057 ret = smu->ppt_funcs->get_od_percentage(smu, type);
2059 mutex_unlock(&smu->mutex);
2064 int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
2068 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2071 mutex_lock(&smu->mutex);
2073 if (smu->ppt_funcs->set_od_percentage)
2074 ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
2076 mutex_unlock(&smu->mutex);
2081 int smu_od_edit_dpm_table(struct smu_context *smu,
2082 enum PP_OD_DPM_TABLE_COMMAND type,
2083 long *input, uint32_t size)
2087 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2090 mutex_lock(&smu->mutex);
2092 if (smu->ppt_funcs->od_edit_dpm_table) {
2093 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2094 if (!ret && (type == PP_OD_COMMIT_DPM_TABLE))
2095 ret = smu_handle_task(smu,
2096 smu->smu_dpm.dpm_level,
2097 AMD_PP_TASK_READJUST_POWER_STATE,
2101 mutex_unlock(&smu->mutex);
2106 int smu_read_sensor(struct smu_context *smu,
2107 enum amd_pp_sensors sensor,
2108 void *data, uint32_t *size)
2110 struct smu_umd_pstate_table *pstate_table =
2114 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2120 mutex_lock(&smu->mutex);
2122 if (smu->ppt_funcs->read_sensor)
2123 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
2127 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
2128 *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
2131 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
2132 *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
2135 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
2136 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
2139 case AMDGPU_PP_SENSOR_UVD_POWER:
2140 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
2143 case AMDGPU_PP_SENSOR_VCE_POWER:
2144 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
2147 case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
2148 *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0: 1;
2151 case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
2152 *(uint32_t *)data = 0;
2162 mutex_unlock(&smu->mutex);
2167 int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
2171 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2174 mutex_lock(&smu->mutex);
2176 if (smu->ppt_funcs->get_power_profile_mode)
2177 ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
2179 mutex_unlock(&smu->mutex);
2184 int smu_set_power_profile_mode(struct smu_context *smu,
2186 uint32_t param_size,
2191 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2195 mutex_lock(&smu->mutex);
2197 if (smu->ppt_funcs->set_power_profile_mode)
2198 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
2201 mutex_unlock(&smu->mutex);
2207 int smu_get_fan_control_mode(struct smu_context *smu)
2211 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2214 mutex_lock(&smu->mutex);
2216 if (smu->ppt_funcs->get_fan_control_mode)
2217 ret = smu->ppt_funcs->get_fan_control_mode(smu);
2219 mutex_unlock(&smu->mutex);
2224 int smu_set_fan_control_mode(struct smu_context *smu, int value)
2228 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2231 mutex_lock(&smu->mutex);
2233 if (smu->ppt_funcs->set_fan_control_mode)
2234 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
2236 mutex_unlock(&smu->mutex);
2241 int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
2245 uint32_t current_rpm;
2247 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2250 mutex_lock(&smu->mutex);
2252 if (smu->ppt_funcs->get_fan_speed_rpm) {
2253 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, ¤t_rpm);
2255 percent = current_rpm * 100 / smu->fan_max_rpm;
2256 *speed = percent > 100 ? 100 : percent;
2260 mutex_unlock(&smu->mutex);
2266 int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
2271 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2274 mutex_lock(&smu->mutex);
2276 if (smu->ppt_funcs->set_fan_speed_rpm) {
2279 rpm = speed * smu->fan_max_rpm / 100;
2280 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, rpm);
2283 mutex_unlock(&smu->mutex);
2288 int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
2292 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2295 mutex_lock(&smu->mutex);
2297 if (smu->ppt_funcs->get_fan_speed_rpm)
2298 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
2300 mutex_unlock(&smu->mutex);
2305 int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
2309 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2312 mutex_lock(&smu->mutex);
2314 ret = smu_set_min_dcef_deep_sleep(smu, clk);
2316 mutex_unlock(&smu->mutex);
2321 int smu_get_clock_by_type(struct smu_context *smu,
2322 enum amd_pp_clock_type type,
2323 struct amd_pp_clocks *clocks)
2327 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2330 mutex_lock(&smu->mutex);
2332 if (smu->ppt_funcs->get_clock_by_type)
2333 ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks);
2335 mutex_unlock(&smu->mutex);
2340 int smu_get_max_high_clocks(struct smu_context *smu,
2341 struct amd_pp_simple_clock_info *clocks)
2345 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2348 mutex_lock(&smu->mutex);
2350 if (smu->ppt_funcs->get_max_high_clocks)
2351 ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks);
2353 mutex_unlock(&smu->mutex);
2358 int smu_get_clock_by_type_with_latency(struct smu_context *smu,
2359 enum smu_clk_type clk_type,
2360 struct pp_clock_levels_with_latency *clocks)
2364 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2367 mutex_lock(&smu->mutex);
2369 if (smu->ppt_funcs->get_clock_by_type_with_latency)
2370 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2372 mutex_unlock(&smu->mutex);
2377 int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
2378 enum amd_pp_clock_type type,
2379 struct pp_clock_levels_with_voltage *clocks)
2383 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2386 mutex_lock(&smu->mutex);
2388 if (smu->ppt_funcs->get_clock_by_type_with_voltage)
2389 ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
2391 mutex_unlock(&smu->mutex);
2397 int smu_display_clock_voltage_request(struct smu_context *smu,
2398 struct pp_display_clock_request *clock_req)
2402 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2405 mutex_lock(&smu->mutex);
2407 if (smu->ppt_funcs->display_clock_voltage_request)
2408 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
2410 mutex_unlock(&smu->mutex);
2416 int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
2420 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2423 mutex_lock(&smu->mutex);
2425 if (smu->ppt_funcs->display_disable_memory_clock_switch)
2426 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2428 mutex_unlock(&smu->mutex);
2433 int smu_notify_smu_enable_pwe(struct smu_context *smu)
2437 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2440 mutex_lock(&smu->mutex);
2442 if (smu->ppt_funcs->notify_smu_enable_pwe)
2443 ret = smu->ppt_funcs->notify_smu_enable_pwe(smu);
2445 mutex_unlock(&smu->mutex);
2450 int smu_set_xgmi_pstate(struct smu_context *smu,
2455 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2458 mutex_lock(&smu->mutex);
2460 if (smu->ppt_funcs->set_xgmi_pstate)
2461 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
2463 mutex_unlock(&smu->mutex);
2466 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
2471 int smu_set_azalia_d3_pme(struct smu_context *smu)
2475 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2478 mutex_lock(&smu->mutex);
2480 if (smu->ppt_funcs->set_azalia_d3_pme)
2481 ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
2483 mutex_unlock(&smu->mutex);
2489 * On system suspending or resetting, the dpm_enabled
2490 * flag will be cleared. So that those SMU services which
2491 * are not supported will be gated.
2493 * However, the baco/mode1 reset should still be granted
2494 * as they are still supported and necessary.
2496 bool smu_baco_is_support(struct smu_context *smu)
2500 if (!smu->pm_enabled)
2503 mutex_lock(&smu->mutex);
2505 if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
2506 ret = smu->ppt_funcs->baco_is_support(smu);
2508 mutex_unlock(&smu->mutex);
2513 int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
2515 if (smu->ppt_funcs->baco_get_state)
2518 mutex_lock(&smu->mutex);
2519 *state = smu->ppt_funcs->baco_get_state(smu);
2520 mutex_unlock(&smu->mutex);
2525 int smu_baco_enter(struct smu_context *smu)
2529 if (!smu->pm_enabled)
2532 mutex_lock(&smu->mutex);
2534 if (smu->ppt_funcs->baco_enter)
2535 ret = smu->ppt_funcs->baco_enter(smu);
2537 mutex_unlock(&smu->mutex);
2540 dev_err(smu->adev->dev, "Failed to enter BACO state!\n");
2545 int smu_baco_exit(struct smu_context *smu)
2549 if (!smu->pm_enabled)
2552 mutex_lock(&smu->mutex);
2554 if (smu->ppt_funcs->baco_exit)
2555 ret = smu->ppt_funcs->baco_exit(smu);
2557 mutex_unlock(&smu->mutex);
2560 dev_err(smu->adev->dev, "Failed to exit BACO state!\n");
2565 bool smu_mode1_reset_is_support(struct smu_context *smu)
2569 if (!smu->pm_enabled)
2572 mutex_lock(&smu->mutex);
2574 if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
2575 ret = smu->ppt_funcs->mode1_reset_is_support(smu);
2577 mutex_unlock(&smu->mutex);
2582 int smu_mode1_reset(struct smu_context *smu)
2586 if (!smu->pm_enabled)
2589 mutex_lock(&smu->mutex);
2591 if (smu->ppt_funcs->mode1_reset)
2592 ret = smu->ppt_funcs->mode1_reset(smu);
2594 mutex_unlock(&smu->mutex);
2599 int smu_mode2_reset(struct smu_context *smu)
2603 if (!smu->pm_enabled)
2606 mutex_lock(&smu->mutex);
2608 if (smu->ppt_funcs->mode2_reset)
2609 ret = smu->ppt_funcs->mode2_reset(smu);
2611 mutex_unlock(&smu->mutex);
2614 dev_err(smu->adev->dev, "Mode2 reset failed!\n");
2619 int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
2620 struct pp_smu_nv_clock_table *max_clocks)
2624 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2627 mutex_lock(&smu->mutex);
2629 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
2630 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
2632 mutex_unlock(&smu->mutex);
2637 int smu_get_uclk_dpm_states(struct smu_context *smu,
2638 unsigned int *clock_values_in_khz,
2639 unsigned int *num_states)
2643 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2646 mutex_lock(&smu->mutex);
2648 if (smu->ppt_funcs->get_uclk_dpm_states)
2649 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
2651 mutex_unlock(&smu->mutex);
2656 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
2658 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
2660 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2663 mutex_lock(&smu->mutex);
2665 if (smu->ppt_funcs->get_current_power_state)
2666 pm_state = smu->ppt_funcs->get_current_power_state(smu);
2668 mutex_unlock(&smu->mutex);
2673 int smu_get_dpm_clock_table(struct smu_context *smu,
2674 struct dpm_clocks *clock_table)
2678 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2681 mutex_lock(&smu->mutex);
2683 if (smu->ppt_funcs->get_dpm_clock_table)
2684 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
2686 mutex_unlock(&smu->mutex);
2691 ssize_t smu_sys_get_gpu_metrics(struct smu_context *smu,
2696 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2699 if (!smu->ppt_funcs->get_gpu_metrics)
2702 mutex_lock(&smu->mutex);
2704 size = smu->ppt_funcs->get_gpu_metrics(smu, table);
2706 mutex_unlock(&smu->mutex);
2711 int smu_enable_mgpu_fan_boost(struct smu_context *smu)
2715 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2718 mutex_lock(&smu->mutex);
2720 if (smu->ppt_funcs->enable_mgpu_fan_boost)
2721 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
2723 mutex_unlock(&smu->mutex);