2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #define SWSMU_CODE_LAYER_L1
25 #include <linux/firmware.h>
26 #include <linux/pci.h>
29 #include "amdgpu_smu.h"
30 #include "smu_internal.h"
32 #include "arcturus_ppt.h"
33 #include "navi10_ppt.h"
34 #include "sienna_cichlid_ppt.h"
35 #include "renoir_ppt.h"
39 * DO NOT use these for err/warn/info/debug messages.
40 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
41 * They are more MGPU friendly.
48 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
52 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
55 mutex_lock(&smu->mutex);
57 size = smu_get_pp_feature_mask(smu, buf);
59 mutex_unlock(&smu->mutex);
64 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
68 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
71 mutex_lock(&smu->mutex);
73 ret = smu_set_pp_feature_mask(smu, new_mask);
75 mutex_unlock(&smu->mutex);
80 int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
83 struct smu_context *smu = &adev->smu;
85 if (is_support_sw_smu(adev) && smu->ppt_funcs->get_gfx_off_status)
86 *value = smu_get_gfx_off_status(smu);
93 int smu_set_soft_freq_range(struct smu_context *smu,
94 enum smu_clk_type clk_type,
100 mutex_lock(&smu->mutex);
102 if (smu->ppt_funcs->set_soft_freq_limited_range)
103 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
108 mutex_unlock(&smu->mutex);
113 int smu_get_dpm_freq_range(struct smu_context *smu,
114 enum smu_clk_type clk_type,
123 mutex_lock(&smu->mutex);
125 if (smu->ppt_funcs->get_dpm_ultimate_freq)
126 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
131 mutex_unlock(&smu->mutex);
136 static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu,
139 struct smu_power_context *smu_power = &smu->smu_power;
140 struct smu_power_gate *power_gate = &smu_power->power_gate;
143 if (!smu->ppt_funcs->dpm_set_vcn_enable)
146 if (atomic_read(&power_gate->vcn_gated) ^ enable)
149 ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable);
151 atomic_set(&power_gate->vcn_gated, !enable);
156 static int smu_dpm_set_vcn_enable(struct smu_context *smu,
159 struct smu_power_context *smu_power = &smu->smu_power;
160 struct smu_power_gate *power_gate = &smu_power->power_gate;
163 mutex_lock(&power_gate->vcn_gate_lock);
165 ret = smu_dpm_set_vcn_enable_locked(smu, enable);
167 mutex_unlock(&power_gate->vcn_gate_lock);
172 static int smu_dpm_set_jpeg_enable_locked(struct smu_context *smu,
175 struct smu_power_context *smu_power = &smu->smu_power;
176 struct smu_power_gate *power_gate = &smu_power->power_gate;
179 if (!smu->ppt_funcs->dpm_set_jpeg_enable)
182 if (atomic_read(&power_gate->jpeg_gated) ^ enable)
185 ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
187 atomic_set(&power_gate->jpeg_gated, !enable);
192 static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
195 struct smu_power_context *smu_power = &smu->smu_power;
196 struct smu_power_gate *power_gate = &smu_power->power_gate;
199 mutex_lock(&power_gate->jpeg_gate_lock);
201 ret = smu_dpm_set_jpeg_enable_locked(smu, enable);
203 mutex_unlock(&power_gate->jpeg_gate_lock);
209 * smu_dpm_set_power_gate - power gate/ungate the specific IP block
211 * @smu: smu_context pointer
212 * @block_type: the IP block to power gate/ungate
213 * @gate: to power gate if true, ungate otherwise
215 * This API uses no smu->mutex lock protection due to:
216 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
217 * This is guarded to be race condition free by the caller.
218 * 2. Or get called on user setting request of power_dpm_force_performance_level.
219 * Under this case, the smu->mutex lock protection is already enforced on
220 * the parent API smu_force_performance_level of the call path.
222 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
227 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
230 switch (block_type) {
232 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
233 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept.
235 case AMD_IP_BLOCK_TYPE_UVD:
236 case AMD_IP_BLOCK_TYPE_VCN:
237 ret = smu_dpm_set_vcn_enable(smu, !gate);
239 dev_err(smu->adev->dev, "Failed to power %s VCN!\n",
240 gate ? "gate" : "ungate");
242 case AMD_IP_BLOCK_TYPE_GFX:
243 ret = smu_gfx_off_control(smu, gate);
245 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",
246 gate ? "enable" : "disable");
248 case AMD_IP_BLOCK_TYPE_SDMA:
249 ret = smu_powergate_sdma(smu, gate);
251 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",
252 gate ? "gate" : "ungate");
254 case AMD_IP_BLOCK_TYPE_JPEG:
255 ret = smu_dpm_set_jpeg_enable(smu, !gate);
257 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
258 gate ? "gate" : "ungate");
261 dev_err(smu->adev->dev, "Unsupported block type!\n");
268 int smu_get_power_num_states(struct smu_context *smu,
269 struct pp_states_info *state_info)
274 /* not support power state */
275 memset(state_info, 0, sizeof(struct pp_states_info));
276 state_info->nums = 1;
277 state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
282 bool is_support_sw_smu(struct amdgpu_device *adev)
284 if (adev->asic_type >= CHIP_ARCTURUS)
290 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
292 struct smu_table_context *smu_table = &smu->smu_table;
293 uint32_t powerplay_table_size;
295 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
298 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
301 mutex_lock(&smu->mutex);
303 if (smu_table->hardcode_pptable)
304 *table = smu_table->hardcode_pptable;
306 *table = smu_table->power_play_table;
308 powerplay_table_size = smu_table->power_play_table_size;
310 mutex_unlock(&smu->mutex);
312 return powerplay_table_size;
315 int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
317 struct smu_table_context *smu_table = &smu->smu_table;
318 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
321 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
324 if (header->usStructureSize != size) {
325 dev_err(smu->adev->dev, "pp table size not matched !\n");
329 mutex_lock(&smu->mutex);
330 if (!smu_table->hardcode_pptable)
331 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
332 if (!smu_table->hardcode_pptable) {
337 memcpy(smu_table->hardcode_pptable, buf, size);
338 smu_table->power_play_table = smu_table->hardcode_pptable;
339 smu_table->power_play_table_size = size;
342 * Special hw_fini action(for Navi1x, the DPMs disablement will be
343 * skipped) may be needed for custom pptable uploading.
345 smu->uploading_custom_pp_table = true;
347 ret = smu_reset(smu);
349 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);
351 smu->uploading_custom_pp_table = false;
354 mutex_unlock(&smu->mutex);
358 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
360 struct smu_feature *feature = &smu->smu_feature;
362 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
364 bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
366 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
371 bitmap_or(feature->allowed, feature->allowed,
372 (unsigned long *)allowed_feature_mask,
373 feature->feature_num);
378 static int smu_set_funcs(struct amdgpu_device *adev)
380 struct smu_context *smu = &adev->smu;
382 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
383 smu->od_enabled = true;
385 switch (adev->asic_type) {
389 navi10_set_ppt_funcs(smu);
392 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
393 arcturus_set_ppt_funcs(smu);
394 /* OD is not supported on Arcturus */
395 smu->od_enabled =false;
397 case CHIP_SIENNA_CICHLID:
398 case CHIP_NAVY_FLOUNDER:
399 sienna_cichlid_set_ppt_funcs(smu);
402 renoir_set_ppt_funcs(smu);
411 static int smu_early_init(void *handle)
413 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
414 struct smu_context *smu = &adev->smu;
417 smu->pm_enabled = !!amdgpu_dpm;
419 mutex_init(&smu->mutex);
421 return smu_set_funcs(adev);
424 static int smu_set_default_dpm_table(struct smu_context *smu)
426 struct smu_power_context *smu_power = &smu->smu_power;
427 struct smu_power_gate *power_gate = &smu_power->power_gate;
428 int vcn_gate, jpeg_gate;
431 if (!smu->ppt_funcs->set_default_dpm_table)
434 mutex_lock(&power_gate->vcn_gate_lock);
435 mutex_lock(&power_gate->jpeg_gate_lock);
437 vcn_gate = atomic_read(&power_gate->vcn_gated);
438 jpeg_gate = atomic_read(&power_gate->jpeg_gated);
440 ret = smu_dpm_set_vcn_enable_locked(smu, true);
444 ret = smu_dpm_set_jpeg_enable_locked(smu, true);
448 ret = smu->ppt_funcs->set_default_dpm_table(smu);
450 dev_err(smu->adev->dev,
451 "Failed to setup default dpm clock tables!\n");
453 smu_dpm_set_jpeg_enable_locked(smu, !jpeg_gate);
455 smu_dpm_set_vcn_enable_locked(smu, !vcn_gate);
457 mutex_unlock(&power_gate->jpeg_gate_lock);
458 mutex_unlock(&power_gate->vcn_gate_lock);
463 static int smu_late_init(void *handle)
465 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
466 struct smu_context *smu = &adev->smu;
469 if (!smu->pm_enabled)
472 ret = smu_post_init(smu);
474 dev_err(adev->dev, "Failed to post smu init!\n");
478 ret = smu_set_default_od_settings(smu);
480 dev_err(adev->dev, "Failed to setup default OD settings!\n");
485 * Set initialized values (get from vbios) to dpm tables context such as
486 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
489 ret = smu_set_default_dpm_table(smu);
491 dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
495 ret = smu_populate_umd_state_clk(smu);
497 dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
501 ret = smu_get_asic_power_limits(smu);
503 dev_err(adev->dev, "Failed to get asic power limits!\n");
507 smu_get_unique_id(smu);
509 smu_get_fan_parameters(smu);
511 smu_handle_task(&adev->smu,
512 smu->smu_dpm.dpm_level,
513 AMD_PP_TASK_COMPLETE_INIT,
519 static int smu_init_fb_allocations(struct smu_context *smu)
521 struct amdgpu_device *adev = smu->adev;
522 struct smu_table_context *smu_table = &smu->smu_table;
523 struct smu_table *tables = smu_table->tables;
524 struct smu_table *driver_table = &(smu_table->driver_table);
525 uint32_t max_table_size = 0;
528 /* VRAM allocation for tool table */
529 if (tables[SMU_TABLE_PMSTATUSLOG].size) {
530 ret = amdgpu_bo_create_kernel(adev,
531 tables[SMU_TABLE_PMSTATUSLOG].size,
532 tables[SMU_TABLE_PMSTATUSLOG].align,
533 tables[SMU_TABLE_PMSTATUSLOG].domain,
534 &tables[SMU_TABLE_PMSTATUSLOG].bo,
535 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
536 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
538 dev_err(adev->dev, "VRAM allocation for tool table failed!\n");
543 /* VRAM allocation for driver table */
544 for (i = 0; i < SMU_TABLE_COUNT; i++) {
545 if (tables[i].size == 0)
548 if (i == SMU_TABLE_PMSTATUSLOG)
551 if (max_table_size < tables[i].size)
552 max_table_size = tables[i].size;
555 driver_table->size = max_table_size;
556 driver_table->align = PAGE_SIZE;
557 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
559 ret = amdgpu_bo_create_kernel(adev,
562 driver_table->domain,
564 &driver_table->mc_address,
565 &driver_table->cpu_addr);
567 dev_err(adev->dev, "VRAM allocation for driver table failed!\n");
568 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
569 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
570 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
571 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
577 static int smu_fini_fb_allocations(struct smu_context *smu)
579 struct smu_table_context *smu_table = &smu->smu_table;
580 struct smu_table *tables = smu_table->tables;
581 struct smu_table *driver_table = &(smu_table->driver_table);
583 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
584 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
585 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
586 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
588 amdgpu_bo_free_kernel(&driver_table->bo,
589 &driver_table->mc_address,
590 &driver_table->cpu_addr);
596 * smu_alloc_memory_pool - allocate memory pool in the system memory
598 * @smu: amdgpu_device pointer
600 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
601 * and DramLogSetDramAddr can notify it changed.
603 * Returns 0 on success, error on failure.
605 static int smu_alloc_memory_pool(struct smu_context *smu)
607 struct amdgpu_device *adev = smu->adev;
608 struct smu_table_context *smu_table = &smu->smu_table;
609 struct smu_table *memory_pool = &smu_table->memory_pool;
610 uint64_t pool_size = smu->pool_size;
613 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
616 memory_pool->size = pool_size;
617 memory_pool->align = PAGE_SIZE;
618 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
621 case SMU_MEMORY_POOL_SIZE_256_MB:
622 case SMU_MEMORY_POOL_SIZE_512_MB:
623 case SMU_MEMORY_POOL_SIZE_1_GB:
624 case SMU_MEMORY_POOL_SIZE_2_GB:
625 ret = amdgpu_bo_create_kernel(adev,
630 &memory_pool->mc_address,
631 &memory_pool->cpu_addr);
633 dev_err(adev->dev, "VRAM allocation for dramlog failed!\n");
642 static int smu_free_memory_pool(struct smu_context *smu)
644 struct smu_table_context *smu_table = &smu->smu_table;
645 struct smu_table *memory_pool = &smu_table->memory_pool;
647 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
650 amdgpu_bo_free_kernel(&memory_pool->bo,
651 &memory_pool->mc_address,
652 &memory_pool->cpu_addr);
654 memset(memory_pool, 0, sizeof(struct smu_table));
659 static int smu_alloc_dummy_read_table(struct smu_context *smu)
661 struct smu_table_context *smu_table = &smu->smu_table;
662 struct smu_table *dummy_read_1_table =
663 &smu_table->dummy_read_1_table;
664 struct amdgpu_device *adev = smu->adev;
667 dummy_read_1_table->size = 0x40000;
668 dummy_read_1_table->align = PAGE_SIZE;
669 dummy_read_1_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
671 ret = amdgpu_bo_create_kernel(adev,
672 dummy_read_1_table->size,
673 dummy_read_1_table->align,
674 dummy_read_1_table->domain,
675 &dummy_read_1_table->bo,
676 &dummy_read_1_table->mc_address,
677 &dummy_read_1_table->cpu_addr);
679 dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n");
684 static void smu_free_dummy_read_table(struct smu_context *smu)
686 struct smu_table_context *smu_table = &smu->smu_table;
687 struct smu_table *dummy_read_1_table =
688 &smu_table->dummy_read_1_table;
691 amdgpu_bo_free_kernel(&dummy_read_1_table->bo,
692 &dummy_read_1_table->mc_address,
693 &dummy_read_1_table->cpu_addr);
695 memset(dummy_read_1_table, 0, sizeof(struct smu_table));
698 static int smu_smc_table_sw_init(struct smu_context *smu)
703 * Create smu_table structure, and init smc tables such as
704 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
706 ret = smu_init_smc_tables(smu);
708 dev_err(smu->adev->dev, "Failed to init smc tables!\n");
713 * Create smu_power_context structure, and allocate smu_dpm_context and
714 * context size to fill the smu_power_context data.
716 ret = smu_init_power(smu);
718 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");
723 * allocate vram bos to store smc table contents.
725 ret = smu_init_fb_allocations(smu);
729 ret = smu_alloc_memory_pool(smu);
733 ret = smu_alloc_dummy_read_table(smu);
737 ret = smu_i2c_init(smu, &smu->adev->pm.smu_i2c);
744 static int smu_smc_table_sw_fini(struct smu_context *smu)
748 smu_i2c_fini(smu, &smu->adev->pm.smu_i2c);
750 smu_free_dummy_read_table(smu);
752 ret = smu_free_memory_pool(smu);
756 ret = smu_fini_fb_allocations(smu);
760 ret = smu_fini_power(smu);
762 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");
766 ret = smu_fini_smc_tables(smu);
768 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");
775 static void smu_throttling_logging_work_fn(struct work_struct *work)
777 struct smu_context *smu = container_of(work, struct smu_context,
778 throttling_logging_work);
780 smu_log_thermal_throttling(smu);
783 static int smu_sw_init(void *handle)
785 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
786 struct smu_context *smu = &adev->smu;
789 smu->pool_size = adev->pm.smu_prv_buffer_size;
790 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
791 mutex_init(&smu->smu_feature.mutex);
792 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
793 bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
794 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
796 mutex_init(&smu->smu_baco.mutex);
797 smu->smu_baco.state = SMU_BACO_STATE_EXIT;
798 smu->smu_baco.platform_support = false;
800 mutex_init(&smu->sensor_lock);
801 mutex_init(&smu->metrics_lock);
802 mutex_init(&smu->message_lock);
804 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
805 atomic64_set(&smu->throttle_int_counter, 0);
806 smu->watermarks_bitmap = 0;
807 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
808 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
810 atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
811 atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
812 mutex_init(&smu->smu_power.power_gate.vcn_gate_lock);
813 mutex_init(&smu->smu_power.power_gate.jpeg_gate_lock);
815 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
816 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
817 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
818 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
819 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
820 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
821 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
822 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
824 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
825 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
826 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
827 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
828 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
829 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
830 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
831 smu->display_config = &adev->pm.pm_display_cfg;
833 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
834 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
835 ret = smu_init_microcode(smu);
837 dev_err(adev->dev, "Failed to load smu firmware!\n");
841 ret = smu_smc_table_sw_init(smu);
843 dev_err(adev->dev, "Failed to sw init smc table!\n");
847 ret = smu_register_irq_handler(smu);
849 dev_err(adev->dev, "Failed to register smc irq handler!\n");
856 static int smu_sw_fini(void *handle)
858 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
859 struct smu_context *smu = &adev->smu;
862 ret = smu_smc_table_sw_fini(smu);
864 dev_err(adev->dev, "Failed to sw fini smc table!\n");
868 smu_fini_microcode(smu);
873 static int smu_get_thermal_temperature_range(struct smu_context *smu)
875 struct amdgpu_device *adev = smu->adev;
876 struct smu_temperature_range *range =
880 if (!smu->ppt_funcs->get_thermal_temperature_range)
883 ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);
887 adev->pm.dpm.thermal.min_temp = range->min;
888 adev->pm.dpm.thermal.max_temp = range->max;
889 adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max;
890 adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min;
891 adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max;
892 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max;
893 adev->pm.dpm.thermal.min_mem_temp = range->mem_min;
894 adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max;
895 adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max;
900 static int smu_smc_hw_setup(struct smu_context *smu)
902 struct amdgpu_device *adev = smu->adev;
903 uint32_t pcie_gen = 0, pcie_width = 0;
906 if (adev->in_suspend && smu_is_dpm_running(smu)) {
907 dev_info(adev->dev, "dpm has been enabled\n");
911 ret = smu_init_display_count(smu, 0);
913 dev_info(adev->dev, "Failed to pre-set display count as 0!\n");
917 ret = smu_set_driver_table_location(smu);
919 dev_err(adev->dev, "Failed to SetDriverDramAddr!\n");
924 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
926 ret = smu_set_tool_table_location(smu);
928 dev_err(adev->dev, "Failed to SetToolsDramAddr!\n");
933 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
936 ret = smu_notify_memory_pool_location(smu);
938 dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n");
942 /* smu_dump_pptable(smu); */
944 * Copy pptable bo in the vram to smc with SMU MSGs such as
945 * SetDriverDramAddr and TransferTableDram2Smu.
947 ret = smu_write_pptable(smu);
949 dev_err(adev->dev, "Failed to transfer pptable to SMC!\n");
953 /* issue Run*Btc msg */
954 ret = smu_run_btc(smu);
958 ret = smu_feature_set_allowed_mask(smu);
960 dev_err(adev->dev, "Failed to set driver allowed features mask!\n");
964 ret = smu_system_features_control(smu, true);
966 dev_err(adev->dev, "Failed to enable requested dpm features!\n");
970 if (!smu_is_dpm_running(smu))
971 dev_info(adev->dev, "dpm has been disabled\n");
973 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
975 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
977 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
979 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
982 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
983 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
984 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
986 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
988 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
990 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
992 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
994 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
996 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
998 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
1000 dev_err(adev->dev, "Attempt to override pcie params failed!\n");
1004 ret = smu_get_thermal_temperature_range(smu);
1006 dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
1010 ret = smu_enable_thermal_alert(smu);
1012 dev_err(adev->dev, "Failed to enable thermal alert!\n");
1017 * For Navi1X, manually switch it to AC mode as PMFW
1018 * may boot it with DC mode.
1020 ret = smu_set_power_source(smu,
1021 adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
1022 SMU_POWER_SOURCE_DC);
1024 dev_err(adev->dev, "Failed to switch to %s mode!\n", adev->pm.ac_power ? "AC" : "DC");
1028 ret = smu_notify_display_change(smu);
1033 * Set min deep sleep dce fclk with bootup value from vbios via
1034 * SetMinDeepSleepDcefclk MSG.
1036 ret = smu_set_min_dcef_deep_sleep(smu,
1037 smu->smu_table.boot_values.dcefclk / 100);
1044 static int smu_start_smc_engine(struct smu_context *smu)
1046 struct amdgpu_device *adev = smu->adev;
1049 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1050 if (adev->asic_type < CHIP_NAVI10) {
1051 if (smu->ppt_funcs->load_microcode) {
1052 ret = smu->ppt_funcs->load_microcode(smu);
1059 if (smu->ppt_funcs->check_fw_status) {
1060 ret = smu->ppt_funcs->check_fw_status(smu);
1062 dev_err(adev->dev, "SMC is not ready\n");
1068 * Send msg GetDriverIfVersion to check if the return value is equal
1069 * with DRIVER_IF_VERSION of smc header.
1071 ret = smu_check_fw_version(smu);
1078 static int smu_hw_init(void *handle)
1081 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1082 struct smu_context *smu = &adev->smu;
1084 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
1085 smu->pm_enabled = false;
1089 ret = smu_start_smc_engine(smu);
1091 dev_err(adev->dev, "SMC engine is not correctly up!\n");
1096 smu_powergate_sdma(&adev->smu, false);
1097 smu_dpm_set_vcn_enable(smu, true);
1098 smu_dpm_set_jpeg_enable(smu, true);
1099 smu_set_gfx_cgpg(&adev->smu, true);
1102 if (!smu->pm_enabled)
1105 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1106 ret = smu_get_vbios_bootup_values(smu);
1108 dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n");
1112 ret = smu_setup_pptable(smu);
1114 dev_err(adev->dev, "Failed to setup pptable!\n");
1118 ret = smu_get_driver_allowed_feature_mask(smu);
1122 ret = smu_smc_hw_setup(smu);
1124 dev_err(adev->dev, "Failed to setup smc hw!\n");
1129 * Move maximum sustainable clock retrieving here considering
1130 * 1. It is not needed on resume(from S3).
1131 * 2. DAL settings come between .hw_init and .late_init of SMU.
1132 * And DAL needs to know the maximum sustainable clocks. Thus
1133 * it cannot be put in .late_init().
1135 ret = smu_init_max_sustainable_clocks(smu);
1137 dev_err(adev->dev, "Failed to init max sustainable clocks!\n");
1141 adev->pm.dpm_enabled = true;
1143 dev_info(adev->dev, "SMU is initialized successfully!\n");
1148 static int smu_disable_dpms(struct smu_context *smu)
1150 struct amdgpu_device *adev = smu->adev;
1152 bool use_baco = !smu->is_apu &&
1153 ((amdgpu_in_reset(adev) &&
1154 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1155 ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev)));
1158 * For custom pptable uploading, skip the DPM features
1159 * disable process on Navi1x ASICs.
1160 * - As the gfx related features are under control of
1161 * RLC on those ASICs. RLC reinitialization will be
1162 * needed to reenable them. That will cost much more
1165 * - SMU firmware can handle the DPM reenablement
1168 if (smu->uploading_custom_pp_table &&
1169 (adev->asic_type >= CHIP_NAVI10) &&
1170 (adev->asic_type <= CHIP_NAVY_FLOUNDER))
1174 * For Sienna_Cichlid, PMFW will handle the features disablement properly
1175 * on BACO in. Driver involvement is unnecessary.
1177 if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
1182 * For gpu reset, runpm and hibernation through BACO,
1183 * BACO feature has to be kept enabled.
1185 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
1186 ret = smu_disable_all_features_with_exception(smu,
1187 SMU_FEATURE_BACO_BIT);
1189 dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
1191 ret = smu_system_features_control(smu, false);
1193 dev_err(adev->dev, "Failed to disable smu features.\n");
1196 if (adev->asic_type >= CHIP_NAVI10 &&
1197 adev->gfx.rlc.funcs->stop)
1198 adev->gfx.rlc.funcs->stop(adev);
1203 static int smu_smc_hw_cleanup(struct smu_context *smu)
1205 struct amdgpu_device *adev = smu->adev;
1208 cancel_work_sync(&smu->throttling_logging_work);
1210 ret = smu_disable_thermal_alert(smu);
1212 dev_err(adev->dev, "Fail to disable thermal alert!\n");
1216 ret = smu_disable_dpms(smu);
1218 dev_err(adev->dev, "Fail to disable dpm features!\n");
1225 static int smu_hw_fini(void *handle)
1227 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1228 struct smu_context *smu = &adev->smu;
1231 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1235 smu_powergate_sdma(&adev->smu, true);
1236 smu_dpm_set_vcn_enable(smu, false);
1237 smu_dpm_set_jpeg_enable(smu, false);
1240 if (!smu->pm_enabled)
1243 adev->pm.dpm_enabled = false;
1245 ret = smu_smc_hw_cleanup(smu);
1252 int smu_reset(struct smu_context *smu)
1254 struct amdgpu_device *adev = smu->adev;
1257 amdgpu_gfx_off_ctrl(smu->adev, false);
1259 ret = smu_hw_fini(adev);
1263 ret = smu_hw_init(adev);
1267 ret = smu_late_init(adev);
1271 amdgpu_gfx_off_ctrl(smu->adev, true);
1276 static int smu_suspend(void *handle)
1278 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1279 struct smu_context *smu = &adev->smu;
1282 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1285 if (!smu->pm_enabled)
1288 adev->pm.dpm_enabled = false;
1290 ret = smu_smc_hw_cleanup(smu);
1294 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1297 smu_set_gfx_cgpg(&adev->smu, false);
1302 static int smu_resume(void *handle)
1305 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1306 struct smu_context *smu = &adev->smu;
1308 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1311 if (!smu->pm_enabled)
1314 dev_info(adev->dev, "SMU is resuming...\n");
1316 ret = smu_start_smc_engine(smu);
1318 dev_err(adev->dev, "SMC engine is not correctly up!\n");
1322 ret = smu_smc_hw_setup(smu);
1324 dev_err(adev->dev, "Failed to setup smc hw!\n");
1329 smu_set_gfx_cgpg(&adev->smu, true);
1331 smu->disable_uclk_switch = 0;
1333 adev->pm.dpm_enabled = true;
1335 dev_info(adev->dev, "SMU is resumed successfully!\n");
1340 int smu_display_configuration_change(struct smu_context *smu,
1341 const struct amd_pp_display_configuration *display_config)
1344 int num_of_active_display = 0;
1346 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1349 if (!display_config)
1352 mutex_lock(&smu->mutex);
1354 smu_set_min_dcef_deep_sleep(smu,
1355 display_config->min_dcef_deep_sleep_set_clk / 100);
1357 for (index = 0; index < display_config->num_path_including_non_display; index++) {
1358 if (display_config->displays[index].controller_id != 0)
1359 num_of_active_display++;
1362 smu_set_active_display_count(smu, num_of_active_display);
1364 smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1365 display_config->cpu_cc6_disable,
1366 display_config->cpu_pstate_disable,
1367 display_config->nb_pstate_switch_disable);
1369 mutex_unlock(&smu->mutex);
1374 static int smu_get_clock_info(struct smu_context *smu,
1375 struct smu_clock_info *clk_info,
1376 enum smu_perf_level_designation designation)
1379 struct smu_performance_level level = {0};
1384 ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1388 clk_info->min_mem_clk = level.memory_clock;
1389 clk_info->min_eng_clk = level.core_clock;
1390 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1392 ret = smu_get_perf_level(smu, designation, &level);
1396 clk_info->min_mem_clk = level.memory_clock;
1397 clk_info->min_eng_clk = level.core_clock;
1398 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1403 int smu_get_current_clocks(struct smu_context *smu,
1404 struct amd_pp_clock_info *clocks)
1406 struct amd_pp_simple_clock_info simple_clocks = {0};
1407 struct smu_clock_info hw_clocks;
1410 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1413 mutex_lock(&smu->mutex);
1415 smu_get_dal_power_level(smu, &simple_clocks);
1417 if (smu->support_power_containment)
1418 ret = smu_get_clock_info(smu, &hw_clocks,
1419 PERF_LEVEL_POWER_CONTAINMENT);
1421 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1424 dev_err(smu->adev->dev, "Error in smu_get_clock_info\n");
1428 clocks->min_engine_clock = hw_clocks.min_eng_clk;
1429 clocks->max_engine_clock = hw_clocks.max_eng_clk;
1430 clocks->min_memory_clock = hw_clocks.min_mem_clk;
1431 clocks->max_memory_clock = hw_clocks.max_mem_clk;
1432 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1433 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1434 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1435 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1437 if (simple_clocks.level == 0)
1438 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1440 clocks->max_clocks_state = simple_clocks.level;
1442 if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1443 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1444 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1448 mutex_unlock(&smu->mutex);
1452 static int smu_set_clockgating_state(void *handle,
1453 enum amd_clockgating_state state)
1458 static int smu_set_powergating_state(void *handle,
1459 enum amd_powergating_state state)
1464 static int smu_enable_umd_pstate(void *handle,
1465 enum amd_dpm_forced_level *level)
1467 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1468 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1469 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1470 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1472 struct smu_context *smu = (struct smu_context*)(handle);
1473 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1475 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1478 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1479 /* enter umd pstate, save current level, disable gfx cg*/
1480 if (*level & profile_mode_mask) {
1481 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1482 smu_dpm_ctx->enable_umd_pstate = true;
1483 amdgpu_device_ip_set_powergating_state(smu->adev,
1484 AMD_IP_BLOCK_TYPE_GFX,
1485 AMD_PG_STATE_UNGATE);
1486 amdgpu_device_ip_set_clockgating_state(smu->adev,
1487 AMD_IP_BLOCK_TYPE_GFX,
1488 AMD_CG_STATE_UNGATE);
1489 smu_gfx_ulv_control(smu, false);
1490 smu_deep_sleep_control(smu, false);
1493 /* exit umd pstate, restore level, enable gfx cg*/
1494 if (!(*level & profile_mode_mask)) {
1495 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1496 *level = smu_dpm_ctx->saved_dpm_level;
1497 smu_dpm_ctx->enable_umd_pstate = false;
1498 smu_deep_sleep_control(smu, true);
1499 smu_gfx_ulv_control(smu, true);
1500 amdgpu_device_ip_set_clockgating_state(smu->adev,
1501 AMD_IP_BLOCK_TYPE_GFX,
1503 amdgpu_device_ip_set_powergating_state(smu->adev,
1504 AMD_IP_BLOCK_TYPE_GFX,
1512 static int smu_adjust_power_state_dynamic(struct smu_context *smu,
1513 enum amd_dpm_forced_level level,
1514 bool skip_display_settings)
1519 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1521 if (!skip_display_settings) {
1522 ret = smu_display_config_changed(smu);
1524 dev_err(smu->adev->dev, "Failed to change display config!");
1529 ret = smu_apply_clocks_adjust_rules(smu);
1531 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");
1535 if (!skip_display_settings) {
1536 ret = smu_notify_smc_display_config(smu);
1538 dev_err(smu->adev->dev, "Failed to notify smc display config!");
1543 if (smu_dpm_ctx->dpm_level != level) {
1544 ret = smu_asic_set_performance_level(smu, level);
1546 dev_err(smu->adev->dev, "Failed to set performance level!");
1550 /* update the saved copy */
1551 smu_dpm_ctx->dpm_level = level;
1554 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1555 index = fls(smu->workload_mask);
1556 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1557 workload = smu->workload_setting[index];
1559 if (smu->power_profile_mode != workload)
1560 smu_set_power_profile_mode(smu, &workload, 0, false);
1566 int smu_handle_task(struct smu_context *smu,
1567 enum amd_dpm_forced_level level,
1568 enum amd_pp_task task_id,
1573 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1577 mutex_lock(&smu->mutex);
1580 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1581 ret = smu_pre_display_config_changed(smu);
1584 ret = smu_set_cpu_power_state(smu);
1587 ret = smu_adjust_power_state_dynamic(smu, level, false);
1589 case AMD_PP_TASK_COMPLETE_INIT:
1590 case AMD_PP_TASK_READJUST_POWER_STATE:
1591 ret = smu_adjust_power_state_dynamic(smu, level, true);
1599 mutex_unlock(&smu->mutex);
1604 int smu_switch_power_profile(struct smu_context *smu,
1605 enum PP_SMC_POWER_PROFILE type,
1608 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1612 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1615 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1618 mutex_lock(&smu->mutex);
1621 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1622 index = fls(smu->workload_mask);
1623 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1624 workload = smu->workload_setting[index];
1626 smu->workload_mask |= (1 << smu->workload_prority[type]);
1627 index = fls(smu->workload_mask);
1628 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1629 workload = smu->workload_setting[index];
1632 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1633 smu_set_power_profile_mode(smu, &workload, 0, false);
1635 mutex_unlock(&smu->mutex);
1640 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1642 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1643 enum amd_dpm_forced_level level;
1645 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1648 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1651 mutex_lock(&(smu->mutex));
1652 level = smu_dpm_ctx->dpm_level;
1653 mutex_unlock(&(smu->mutex));
1658 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1660 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1663 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1666 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1669 mutex_lock(&smu->mutex);
1671 ret = smu_enable_umd_pstate(smu, &level);
1673 mutex_unlock(&smu->mutex);
1677 ret = smu_handle_task(smu, level,
1678 AMD_PP_TASK_READJUST_POWER_STATE,
1681 mutex_unlock(&smu->mutex);
1686 int smu_set_display_count(struct smu_context *smu, uint32_t count)
1690 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1693 mutex_lock(&smu->mutex);
1694 ret = smu_init_display_count(smu, count);
1695 mutex_unlock(&smu->mutex);
1700 int smu_force_clk_levels(struct smu_context *smu,
1701 enum smu_clk_type clk_type,
1704 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1707 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1710 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1711 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
1715 mutex_lock(&smu->mutex);
1717 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
1718 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
1720 mutex_unlock(&smu->mutex);
1726 * On system suspending or resetting, the dpm_enabled
1727 * flag will be cleared. So that those SMU services which
1728 * are not supported will be gated.
1729 * However, the mp1 state setting should still be granted
1730 * even if the dpm_enabled cleared.
1732 int smu_set_mp1_state(struct smu_context *smu,
1733 enum pp_mp1_state mp1_state)
1738 if (!smu->pm_enabled)
1741 mutex_lock(&smu->mutex);
1743 switch (mp1_state) {
1744 case PP_MP1_STATE_SHUTDOWN:
1745 msg = SMU_MSG_PrepareMp1ForShutdown;
1747 case PP_MP1_STATE_UNLOAD:
1748 msg = SMU_MSG_PrepareMp1ForUnload;
1750 case PP_MP1_STATE_RESET:
1751 msg = SMU_MSG_PrepareMp1ForReset;
1753 case PP_MP1_STATE_NONE:
1755 mutex_unlock(&smu->mutex);
1759 ret = smu_send_smc_msg(smu, msg, NULL);
1760 /* some asics may not support those messages */
1764 dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
1766 mutex_unlock(&smu->mutex);
1771 int smu_set_df_cstate(struct smu_context *smu,
1772 enum pp_df_cstate state)
1776 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1779 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
1782 mutex_lock(&smu->mutex);
1784 ret = smu->ppt_funcs->set_df_cstate(smu, state);
1786 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
1788 mutex_unlock(&smu->mutex);
1793 int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
1797 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1800 if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
1803 mutex_lock(&smu->mutex);
1805 ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
1807 dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n");
1809 mutex_unlock(&smu->mutex);
1814 int smu_write_watermarks_table(struct smu_context *smu)
1818 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1821 mutex_lock(&smu->mutex);
1823 ret = smu_set_watermarks_table(smu, NULL);
1825 mutex_unlock(&smu->mutex);
1830 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
1831 struct pp_smu_wm_range_sets *clock_ranges)
1835 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1838 mutex_lock(&smu->mutex);
1840 if (!smu->disable_watermark &&
1841 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
1842 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
1843 ret = smu_set_watermarks_table(smu, clock_ranges);
1845 if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) {
1846 smu->watermarks_bitmap |= WATERMARKS_EXIST;
1847 smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
1851 mutex_unlock(&smu->mutex);
1856 int smu_set_ac_dc(struct smu_context *smu)
1860 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1863 /* controlled by firmware */
1864 if (smu->dc_controlled_by_gpio)
1867 mutex_lock(&smu->mutex);
1868 ret = smu_set_power_source(smu,
1869 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
1870 SMU_POWER_SOURCE_DC);
1872 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
1873 smu->adev->pm.ac_power ? "AC" : "DC");
1874 mutex_unlock(&smu->mutex);
1879 const struct amd_ip_funcs smu_ip_funcs = {
1881 .early_init = smu_early_init,
1882 .late_init = smu_late_init,
1883 .sw_init = smu_sw_init,
1884 .sw_fini = smu_sw_fini,
1885 .hw_init = smu_hw_init,
1886 .hw_fini = smu_hw_fini,
1887 .suspend = smu_suspend,
1888 .resume = smu_resume,
1890 .check_soft_reset = NULL,
1891 .wait_for_idle = NULL,
1893 .set_clockgating_state = smu_set_clockgating_state,
1894 .set_powergating_state = smu_set_powergating_state,
1895 .enable_umd_pstate = smu_enable_umd_pstate,
1898 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
1900 .type = AMD_IP_BLOCK_TYPE_SMC,
1904 .funcs = &smu_ip_funcs,
1907 const struct amdgpu_ip_block_version smu_v12_0_ip_block =
1909 .type = AMD_IP_BLOCK_TYPE_SMC,
1913 .funcs = &smu_ip_funcs,
1916 int smu_load_microcode(struct smu_context *smu)
1920 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1923 mutex_lock(&smu->mutex);
1925 if (smu->ppt_funcs->load_microcode)
1926 ret = smu->ppt_funcs->load_microcode(smu);
1928 mutex_unlock(&smu->mutex);
1933 int smu_check_fw_status(struct smu_context *smu)
1937 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1940 mutex_lock(&smu->mutex);
1942 if (smu->ppt_funcs->check_fw_status)
1943 ret = smu->ppt_funcs->check_fw_status(smu);
1945 mutex_unlock(&smu->mutex);
1950 int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
1954 mutex_lock(&smu->mutex);
1956 if (smu->ppt_funcs->set_gfx_cgpg)
1957 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
1959 mutex_unlock(&smu->mutex);
1964 int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
1968 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1971 mutex_lock(&smu->mutex);
1973 if (smu->ppt_funcs->set_fan_speed_rpm)
1974 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
1976 mutex_unlock(&smu->mutex);
1981 int smu_get_power_limit(struct smu_context *smu,
1985 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1988 mutex_lock(&smu->mutex);
1990 *limit = (max_setting ? smu->max_power_limit : smu->current_power_limit);
1992 mutex_unlock(&smu->mutex);
1997 int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
2001 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2004 mutex_lock(&smu->mutex);
2006 if (limit > smu->max_power_limit) {
2007 dev_err(smu->adev->dev,
2008 "New power limit (%d) is over the max allowed %d\n",
2009 limit, smu->max_power_limit);
2014 limit = smu->current_power_limit;
2016 if (smu->ppt_funcs->set_power_limit)
2017 ret = smu->ppt_funcs->set_power_limit(smu, limit);
2020 mutex_unlock(&smu->mutex);
2025 int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2029 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2032 mutex_lock(&smu->mutex);
2034 if (smu->ppt_funcs->print_clk_levels)
2035 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2037 mutex_unlock(&smu->mutex);
2042 int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
2046 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2049 mutex_lock(&smu->mutex);
2051 if (smu->ppt_funcs->get_od_percentage)
2052 ret = smu->ppt_funcs->get_od_percentage(smu, type);
2054 mutex_unlock(&smu->mutex);
2059 int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
2063 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2066 mutex_lock(&smu->mutex);
2068 if (smu->ppt_funcs->set_od_percentage)
2069 ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
2071 mutex_unlock(&smu->mutex);
2076 int smu_od_edit_dpm_table(struct smu_context *smu,
2077 enum PP_OD_DPM_TABLE_COMMAND type,
2078 long *input, uint32_t size)
2082 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2085 mutex_lock(&smu->mutex);
2087 if (smu->ppt_funcs->od_edit_dpm_table) {
2088 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2089 if (!ret && (type == PP_OD_COMMIT_DPM_TABLE))
2090 ret = smu_handle_task(smu,
2091 smu->smu_dpm.dpm_level,
2092 AMD_PP_TASK_READJUST_POWER_STATE,
2096 mutex_unlock(&smu->mutex);
2101 int smu_read_sensor(struct smu_context *smu,
2102 enum amd_pp_sensors sensor,
2103 void *data, uint32_t *size)
2105 struct smu_umd_pstate_table *pstate_table =
2109 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2115 mutex_lock(&smu->mutex);
2117 if (smu->ppt_funcs->read_sensor)
2118 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
2122 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
2123 *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
2126 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
2127 *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
2130 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
2131 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
2134 case AMDGPU_PP_SENSOR_UVD_POWER:
2135 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
2138 case AMDGPU_PP_SENSOR_VCE_POWER:
2139 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
2142 case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
2143 *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0: 1;
2146 case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
2147 *(uint32_t *)data = 0;
2157 mutex_unlock(&smu->mutex);
2162 int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
2166 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2169 mutex_lock(&smu->mutex);
2171 if (smu->ppt_funcs->get_power_profile_mode)
2172 ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
2174 mutex_unlock(&smu->mutex);
2179 int smu_set_power_profile_mode(struct smu_context *smu,
2181 uint32_t param_size,
2186 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2190 mutex_lock(&smu->mutex);
2192 if (smu->ppt_funcs->set_power_profile_mode)
2193 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
2196 mutex_unlock(&smu->mutex);
2202 int smu_get_fan_control_mode(struct smu_context *smu)
2206 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2209 mutex_lock(&smu->mutex);
2211 if (smu->ppt_funcs->get_fan_control_mode)
2212 ret = smu->ppt_funcs->get_fan_control_mode(smu);
2214 mutex_unlock(&smu->mutex);
2219 int smu_set_fan_control_mode(struct smu_context *smu, int value)
2223 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2226 mutex_lock(&smu->mutex);
2228 if (smu->ppt_funcs->set_fan_control_mode)
2229 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
2231 mutex_unlock(&smu->mutex);
2236 int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
2240 uint32_t current_rpm;
2242 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2245 mutex_lock(&smu->mutex);
2247 if (smu->ppt_funcs->get_fan_speed_rpm) {
2248 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, ¤t_rpm);
2250 percent = current_rpm * 100 / smu->fan_max_rpm;
2251 *speed = percent > 100 ? 100 : percent;
2255 mutex_unlock(&smu->mutex);
2261 int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
2266 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2269 mutex_lock(&smu->mutex);
2271 if (smu->ppt_funcs->set_fan_speed_rpm) {
2274 rpm = speed * smu->fan_max_rpm / 100;
2275 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, rpm);
2278 mutex_unlock(&smu->mutex);
2283 int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
2287 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2290 mutex_lock(&smu->mutex);
2292 if (smu->ppt_funcs->get_fan_speed_rpm)
2293 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
2295 mutex_unlock(&smu->mutex);
2300 int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
2304 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2307 mutex_lock(&smu->mutex);
2309 ret = smu_set_min_dcef_deep_sleep(smu, clk);
2311 mutex_unlock(&smu->mutex);
2316 int smu_get_clock_by_type(struct smu_context *smu,
2317 enum amd_pp_clock_type type,
2318 struct amd_pp_clocks *clocks)
2322 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2325 mutex_lock(&smu->mutex);
2327 if (smu->ppt_funcs->get_clock_by_type)
2328 ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks);
2330 mutex_unlock(&smu->mutex);
2335 int smu_get_max_high_clocks(struct smu_context *smu,
2336 struct amd_pp_simple_clock_info *clocks)
2340 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2343 mutex_lock(&smu->mutex);
2345 if (smu->ppt_funcs->get_max_high_clocks)
2346 ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks);
2348 mutex_unlock(&smu->mutex);
2353 int smu_get_clock_by_type_with_latency(struct smu_context *smu,
2354 enum smu_clk_type clk_type,
2355 struct pp_clock_levels_with_latency *clocks)
2359 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2362 mutex_lock(&smu->mutex);
2364 if (smu->ppt_funcs->get_clock_by_type_with_latency)
2365 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2367 mutex_unlock(&smu->mutex);
2372 int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
2373 enum amd_pp_clock_type type,
2374 struct pp_clock_levels_with_voltage *clocks)
2378 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2381 mutex_lock(&smu->mutex);
2383 if (smu->ppt_funcs->get_clock_by_type_with_voltage)
2384 ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
2386 mutex_unlock(&smu->mutex);
2392 int smu_display_clock_voltage_request(struct smu_context *smu,
2393 struct pp_display_clock_request *clock_req)
2397 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2400 mutex_lock(&smu->mutex);
2402 if (smu->ppt_funcs->display_clock_voltage_request)
2403 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
2405 mutex_unlock(&smu->mutex);
2411 int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
2415 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2418 mutex_lock(&smu->mutex);
2420 if (smu->ppt_funcs->display_disable_memory_clock_switch)
2421 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2423 mutex_unlock(&smu->mutex);
2428 int smu_notify_smu_enable_pwe(struct smu_context *smu)
2432 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2435 mutex_lock(&smu->mutex);
2437 if (smu->ppt_funcs->notify_smu_enable_pwe)
2438 ret = smu->ppt_funcs->notify_smu_enable_pwe(smu);
2440 mutex_unlock(&smu->mutex);
2445 int smu_set_xgmi_pstate(struct smu_context *smu,
2450 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2453 mutex_lock(&smu->mutex);
2455 if (smu->ppt_funcs->set_xgmi_pstate)
2456 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
2458 mutex_unlock(&smu->mutex);
2461 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
2466 int smu_set_azalia_d3_pme(struct smu_context *smu)
2470 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2473 mutex_lock(&smu->mutex);
2475 if (smu->ppt_funcs->set_azalia_d3_pme)
2476 ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
2478 mutex_unlock(&smu->mutex);
2484 * On system suspending or resetting, the dpm_enabled
2485 * flag will be cleared. So that those SMU services which
2486 * are not supported will be gated.
2488 * However, the baco/mode1 reset should still be granted
2489 * as they are still supported and necessary.
2491 bool smu_baco_is_support(struct smu_context *smu)
2495 if (!smu->pm_enabled)
2498 mutex_lock(&smu->mutex);
2500 if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
2501 ret = smu->ppt_funcs->baco_is_support(smu);
2503 mutex_unlock(&smu->mutex);
2508 int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
2510 if (smu->ppt_funcs->baco_get_state)
2513 mutex_lock(&smu->mutex);
2514 *state = smu->ppt_funcs->baco_get_state(smu);
2515 mutex_unlock(&smu->mutex);
2520 int smu_baco_enter(struct smu_context *smu)
2524 if (!smu->pm_enabled)
2527 mutex_lock(&smu->mutex);
2529 if (smu->ppt_funcs->baco_enter)
2530 ret = smu->ppt_funcs->baco_enter(smu);
2532 mutex_unlock(&smu->mutex);
2535 dev_err(smu->adev->dev, "Failed to enter BACO state!\n");
2540 int smu_baco_exit(struct smu_context *smu)
2544 if (!smu->pm_enabled)
2547 mutex_lock(&smu->mutex);
2549 if (smu->ppt_funcs->baco_exit)
2550 ret = smu->ppt_funcs->baco_exit(smu);
2552 mutex_unlock(&smu->mutex);
2555 dev_err(smu->adev->dev, "Failed to exit BACO state!\n");
2560 bool smu_mode1_reset_is_support(struct smu_context *smu)
2564 if (!smu->pm_enabled)
2567 mutex_lock(&smu->mutex);
2569 if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
2570 ret = smu->ppt_funcs->mode1_reset_is_support(smu);
2572 mutex_unlock(&smu->mutex);
2577 int smu_mode1_reset(struct smu_context *smu)
2581 if (!smu->pm_enabled)
2584 mutex_lock(&smu->mutex);
2586 if (smu->ppt_funcs->mode1_reset)
2587 ret = smu->ppt_funcs->mode1_reset(smu);
2589 mutex_unlock(&smu->mutex);
2594 int smu_mode2_reset(struct smu_context *smu)
2598 if (!smu->pm_enabled)
2601 mutex_lock(&smu->mutex);
2603 if (smu->ppt_funcs->mode2_reset)
2604 ret = smu->ppt_funcs->mode2_reset(smu);
2606 mutex_unlock(&smu->mutex);
2609 dev_err(smu->adev->dev, "Mode2 reset failed!\n");
2614 int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
2615 struct pp_smu_nv_clock_table *max_clocks)
2619 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2622 mutex_lock(&smu->mutex);
2624 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
2625 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
2627 mutex_unlock(&smu->mutex);
2632 int smu_get_uclk_dpm_states(struct smu_context *smu,
2633 unsigned int *clock_values_in_khz,
2634 unsigned int *num_states)
2638 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2641 mutex_lock(&smu->mutex);
2643 if (smu->ppt_funcs->get_uclk_dpm_states)
2644 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
2646 mutex_unlock(&smu->mutex);
2651 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
2653 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
2655 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2658 mutex_lock(&smu->mutex);
2660 if (smu->ppt_funcs->get_current_power_state)
2661 pm_state = smu->ppt_funcs->get_current_power_state(smu);
2663 mutex_unlock(&smu->mutex);
2668 int smu_get_dpm_clock_table(struct smu_context *smu,
2669 struct dpm_clocks *clock_table)
2673 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2676 mutex_lock(&smu->mutex);
2678 if (smu->ppt_funcs->get_dpm_clock_table)
2679 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
2681 mutex_unlock(&smu->mutex);
2686 ssize_t smu_sys_get_gpu_metrics(struct smu_context *smu,
2691 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2694 if (!smu->ppt_funcs->get_gpu_metrics)
2697 mutex_lock(&smu->mutex);
2699 size = smu->ppt_funcs->get_gpu_metrics(smu, table);
2701 mutex_unlock(&smu->mutex);
2706 int smu_enable_mgpu_fan_boost(struct smu_context *smu)
2710 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2713 mutex_lock(&smu->mutex);
2715 if (smu->ppt_funcs->enable_mgpu_fan_boost)
2716 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
2718 mutex_unlock(&smu->mutex);