2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #define SWSMU_CODE_LAYER_L1
25 #include <linux/firmware.h>
26 #include <linux/pci.h>
29 #include "amdgpu_smu.h"
30 #include "smu_internal.h"
32 #include "arcturus_ppt.h"
33 #include "navi10_ppt.h"
34 #include "sienna_cichlid_ppt.h"
35 #include "renoir_ppt.h"
36 #include "vangogh_ppt.h"
40 * DO NOT use these for err/warn/info/debug messages.
41 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
42 * They are more MGPU friendly.
49 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
53 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
56 mutex_lock(&smu->mutex);
58 size = smu_get_pp_feature_mask(smu, buf);
60 mutex_unlock(&smu->mutex);
65 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
69 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
72 mutex_lock(&smu->mutex);
74 ret = smu_set_pp_feature_mask(smu, new_mask);
76 mutex_unlock(&smu->mutex);
81 int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
84 struct smu_context *smu = &adev->smu;
86 if (is_support_sw_smu(adev) && smu->ppt_funcs->get_gfx_off_status)
87 *value = smu_get_gfx_off_status(smu);
94 int smu_set_soft_freq_range(struct smu_context *smu,
95 enum smu_clk_type clk_type,
101 mutex_lock(&smu->mutex);
103 if (smu->ppt_funcs->set_soft_freq_limited_range)
104 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
109 mutex_unlock(&smu->mutex);
114 int smu_get_dpm_freq_range(struct smu_context *smu,
115 enum smu_clk_type clk_type,
124 mutex_lock(&smu->mutex);
126 if (smu->ppt_funcs->get_dpm_ultimate_freq)
127 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
132 mutex_unlock(&smu->mutex);
137 static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu,
140 struct smu_power_context *smu_power = &smu->smu_power;
141 struct smu_power_gate *power_gate = &smu_power->power_gate;
144 if (!smu->ppt_funcs->dpm_set_vcn_enable)
147 if (atomic_read(&power_gate->vcn_gated) ^ enable)
150 ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable);
152 atomic_set(&power_gate->vcn_gated, !enable);
157 static int smu_dpm_set_vcn_enable(struct smu_context *smu,
160 struct smu_power_context *smu_power = &smu->smu_power;
161 struct smu_power_gate *power_gate = &smu_power->power_gate;
164 mutex_lock(&power_gate->vcn_gate_lock);
166 ret = smu_dpm_set_vcn_enable_locked(smu, enable);
168 mutex_unlock(&power_gate->vcn_gate_lock);
173 static int smu_dpm_set_jpeg_enable_locked(struct smu_context *smu,
176 struct smu_power_context *smu_power = &smu->smu_power;
177 struct smu_power_gate *power_gate = &smu_power->power_gate;
180 if (!smu->ppt_funcs->dpm_set_jpeg_enable)
183 if (atomic_read(&power_gate->jpeg_gated) ^ enable)
186 ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
188 atomic_set(&power_gate->jpeg_gated, !enable);
193 static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
196 struct smu_power_context *smu_power = &smu->smu_power;
197 struct smu_power_gate *power_gate = &smu_power->power_gate;
200 mutex_lock(&power_gate->jpeg_gate_lock);
202 ret = smu_dpm_set_jpeg_enable_locked(smu, enable);
204 mutex_unlock(&power_gate->jpeg_gate_lock);
210 * smu_dpm_set_power_gate - power gate/ungate the specific IP block
212 * @smu: smu_context pointer
213 * @block_type: the IP block to power gate/ungate
214 * @gate: to power gate if true, ungate otherwise
216 * This API uses no smu->mutex lock protection due to:
217 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
218 * This is guarded to be race condition free by the caller.
219 * 2. Or get called on user setting request of power_dpm_force_performance_level.
220 * Under this case, the smu->mutex lock protection is already enforced on
221 * the parent API smu_force_performance_level of the call path.
223 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
228 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
231 switch (block_type) {
233 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
234 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept.
236 case AMD_IP_BLOCK_TYPE_UVD:
237 case AMD_IP_BLOCK_TYPE_VCN:
238 ret = smu_dpm_set_vcn_enable(smu, !gate);
240 dev_err(smu->adev->dev, "Failed to power %s VCN!\n",
241 gate ? "gate" : "ungate");
243 case AMD_IP_BLOCK_TYPE_GFX:
244 ret = smu_gfx_off_control(smu, gate);
246 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",
247 gate ? "enable" : "disable");
249 case AMD_IP_BLOCK_TYPE_SDMA:
250 ret = smu_powergate_sdma(smu, gate);
252 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",
253 gate ? "gate" : "ungate");
255 case AMD_IP_BLOCK_TYPE_JPEG:
256 ret = smu_dpm_set_jpeg_enable(smu, !gate);
258 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
259 gate ? "gate" : "ungate");
262 dev_err(smu->adev->dev, "Unsupported block type!\n");
269 int smu_get_power_num_states(struct smu_context *smu,
270 struct pp_states_info *state_info)
275 /* not support power state */
276 memset(state_info, 0, sizeof(struct pp_states_info));
277 state_info->nums = 1;
278 state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
283 bool is_support_sw_smu(struct amdgpu_device *adev)
285 if (adev->asic_type >= CHIP_ARCTURUS)
291 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
293 struct smu_table_context *smu_table = &smu->smu_table;
294 uint32_t powerplay_table_size;
296 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
299 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
302 mutex_lock(&smu->mutex);
304 if (smu_table->hardcode_pptable)
305 *table = smu_table->hardcode_pptable;
307 *table = smu_table->power_play_table;
309 powerplay_table_size = smu_table->power_play_table_size;
311 mutex_unlock(&smu->mutex);
313 return powerplay_table_size;
316 int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
318 struct smu_table_context *smu_table = &smu->smu_table;
319 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
322 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
325 if (header->usStructureSize != size) {
326 dev_err(smu->adev->dev, "pp table size not matched !\n");
330 mutex_lock(&smu->mutex);
331 if (!smu_table->hardcode_pptable)
332 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
333 if (!smu_table->hardcode_pptable) {
338 memcpy(smu_table->hardcode_pptable, buf, size);
339 smu_table->power_play_table = smu_table->hardcode_pptable;
340 smu_table->power_play_table_size = size;
343 * Special hw_fini action(for Navi1x, the DPMs disablement will be
344 * skipped) may be needed for custom pptable uploading.
346 smu->uploading_custom_pp_table = true;
348 ret = smu_reset(smu);
350 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);
352 smu->uploading_custom_pp_table = false;
355 mutex_unlock(&smu->mutex);
359 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
361 struct smu_feature *feature = &smu->smu_feature;
363 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
365 bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
367 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
372 bitmap_or(feature->allowed, feature->allowed,
373 (unsigned long *)allowed_feature_mask,
374 feature->feature_num);
379 static int smu_set_funcs(struct amdgpu_device *adev)
381 struct smu_context *smu = &adev->smu;
383 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
384 smu->od_enabled = true;
386 switch (adev->asic_type) {
390 navi10_set_ppt_funcs(smu);
393 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
394 arcturus_set_ppt_funcs(smu);
395 /* OD is not supported on Arcturus */
396 smu->od_enabled =false;
398 case CHIP_SIENNA_CICHLID:
399 case CHIP_NAVY_FLOUNDER:
400 case CHIP_DIMGREY_CAVEFISH:
401 sienna_cichlid_set_ppt_funcs(smu);
404 renoir_set_ppt_funcs(smu);
407 vangogh_set_ppt_funcs(smu);
408 /* enable the OD by default to allow the fine grain tuning function */
409 smu->od_enabled = true;
418 static int smu_early_init(void *handle)
420 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
421 struct smu_context *smu = &adev->smu;
424 smu->pm_enabled = !!amdgpu_dpm;
426 mutex_init(&smu->mutex);
427 mutex_init(&smu->smu_baco.mutex);
428 smu->smu_baco.state = SMU_BACO_STATE_EXIT;
429 smu->smu_baco.platform_support = false;
431 return smu_set_funcs(adev);
434 static int smu_set_default_dpm_table(struct smu_context *smu)
436 struct smu_power_context *smu_power = &smu->smu_power;
437 struct smu_power_gate *power_gate = &smu_power->power_gate;
438 int vcn_gate, jpeg_gate;
441 if (!smu->ppt_funcs->set_default_dpm_table)
444 mutex_lock(&power_gate->vcn_gate_lock);
445 mutex_lock(&power_gate->jpeg_gate_lock);
447 vcn_gate = atomic_read(&power_gate->vcn_gated);
448 jpeg_gate = atomic_read(&power_gate->jpeg_gated);
450 ret = smu_dpm_set_vcn_enable_locked(smu, true);
454 ret = smu_dpm_set_jpeg_enable_locked(smu, true);
458 ret = smu->ppt_funcs->set_default_dpm_table(smu);
460 dev_err(smu->adev->dev,
461 "Failed to setup default dpm clock tables!\n");
463 smu_dpm_set_jpeg_enable_locked(smu, !jpeg_gate);
465 smu_dpm_set_vcn_enable_locked(smu, !vcn_gate);
467 mutex_unlock(&power_gate->jpeg_gate_lock);
468 mutex_unlock(&power_gate->vcn_gate_lock);
473 static int smu_late_init(void *handle)
475 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
476 struct smu_context *smu = &adev->smu;
479 smu_set_fine_grain_gfx_freq_parameters(smu);
481 if (!smu->pm_enabled)
484 ret = smu_post_init(smu);
486 dev_err(adev->dev, "Failed to post smu init!\n");
490 if (adev->asic_type == CHIP_VANGOGH)
493 ret = smu_set_default_od_settings(smu);
495 dev_err(adev->dev, "Failed to setup default OD settings!\n");
499 ret = smu_populate_umd_state_clk(smu);
501 dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
505 ret = smu_get_asic_power_limits(smu);
507 dev_err(adev->dev, "Failed to get asic power limits!\n");
511 smu_get_unique_id(smu);
513 smu_get_fan_parameters(smu);
515 smu_handle_task(&adev->smu,
516 smu->smu_dpm.dpm_level,
517 AMD_PP_TASK_COMPLETE_INIT,
523 static int smu_init_fb_allocations(struct smu_context *smu)
525 struct amdgpu_device *adev = smu->adev;
526 struct smu_table_context *smu_table = &smu->smu_table;
527 struct smu_table *tables = smu_table->tables;
528 struct smu_table *driver_table = &(smu_table->driver_table);
529 uint32_t max_table_size = 0;
532 /* VRAM allocation for tool table */
533 if (tables[SMU_TABLE_PMSTATUSLOG].size) {
534 ret = amdgpu_bo_create_kernel(adev,
535 tables[SMU_TABLE_PMSTATUSLOG].size,
536 tables[SMU_TABLE_PMSTATUSLOG].align,
537 tables[SMU_TABLE_PMSTATUSLOG].domain,
538 &tables[SMU_TABLE_PMSTATUSLOG].bo,
539 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
540 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
542 dev_err(adev->dev, "VRAM allocation for tool table failed!\n");
547 /* VRAM allocation for driver table */
548 for (i = 0; i < SMU_TABLE_COUNT; i++) {
549 if (tables[i].size == 0)
552 if (i == SMU_TABLE_PMSTATUSLOG)
555 if (max_table_size < tables[i].size)
556 max_table_size = tables[i].size;
559 driver_table->size = max_table_size;
560 driver_table->align = PAGE_SIZE;
561 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
563 ret = amdgpu_bo_create_kernel(adev,
566 driver_table->domain,
568 &driver_table->mc_address,
569 &driver_table->cpu_addr);
571 dev_err(adev->dev, "VRAM allocation for driver table failed!\n");
572 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
573 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
574 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
575 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
581 static int smu_fini_fb_allocations(struct smu_context *smu)
583 struct smu_table_context *smu_table = &smu->smu_table;
584 struct smu_table *tables = smu_table->tables;
585 struct smu_table *driver_table = &(smu_table->driver_table);
587 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
588 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
589 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
590 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
592 amdgpu_bo_free_kernel(&driver_table->bo,
593 &driver_table->mc_address,
594 &driver_table->cpu_addr);
600 * smu_alloc_memory_pool - allocate memory pool in the system memory
602 * @smu: amdgpu_device pointer
604 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
605 * and DramLogSetDramAddr can notify it changed.
607 * Returns 0 on success, error on failure.
609 static int smu_alloc_memory_pool(struct smu_context *smu)
611 struct amdgpu_device *adev = smu->adev;
612 struct smu_table_context *smu_table = &smu->smu_table;
613 struct smu_table *memory_pool = &smu_table->memory_pool;
614 uint64_t pool_size = smu->pool_size;
617 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
620 memory_pool->size = pool_size;
621 memory_pool->align = PAGE_SIZE;
622 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
625 case SMU_MEMORY_POOL_SIZE_256_MB:
626 case SMU_MEMORY_POOL_SIZE_512_MB:
627 case SMU_MEMORY_POOL_SIZE_1_GB:
628 case SMU_MEMORY_POOL_SIZE_2_GB:
629 ret = amdgpu_bo_create_kernel(adev,
634 &memory_pool->mc_address,
635 &memory_pool->cpu_addr);
637 dev_err(adev->dev, "VRAM allocation for dramlog failed!\n");
646 static int smu_free_memory_pool(struct smu_context *smu)
648 struct smu_table_context *smu_table = &smu->smu_table;
649 struct smu_table *memory_pool = &smu_table->memory_pool;
651 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
654 amdgpu_bo_free_kernel(&memory_pool->bo,
655 &memory_pool->mc_address,
656 &memory_pool->cpu_addr);
658 memset(memory_pool, 0, sizeof(struct smu_table));
663 static int smu_alloc_dummy_read_table(struct smu_context *smu)
665 struct smu_table_context *smu_table = &smu->smu_table;
666 struct smu_table *dummy_read_1_table =
667 &smu_table->dummy_read_1_table;
668 struct amdgpu_device *adev = smu->adev;
671 dummy_read_1_table->size = 0x40000;
672 dummy_read_1_table->align = PAGE_SIZE;
673 dummy_read_1_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
675 ret = amdgpu_bo_create_kernel(adev,
676 dummy_read_1_table->size,
677 dummy_read_1_table->align,
678 dummy_read_1_table->domain,
679 &dummy_read_1_table->bo,
680 &dummy_read_1_table->mc_address,
681 &dummy_read_1_table->cpu_addr);
683 dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n");
688 static void smu_free_dummy_read_table(struct smu_context *smu)
690 struct smu_table_context *smu_table = &smu->smu_table;
691 struct smu_table *dummy_read_1_table =
692 &smu_table->dummy_read_1_table;
695 amdgpu_bo_free_kernel(&dummy_read_1_table->bo,
696 &dummy_read_1_table->mc_address,
697 &dummy_read_1_table->cpu_addr);
699 memset(dummy_read_1_table, 0, sizeof(struct smu_table));
702 static int smu_smc_table_sw_init(struct smu_context *smu)
707 * Create smu_table structure, and init smc tables such as
708 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
710 ret = smu_init_smc_tables(smu);
712 dev_err(smu->adev->dev, "Failed to init smc tables!\n");
717 * Create smu_power_context structure, and allocate smu_dpm_context and
718 * context size to fill the smu_power_context data.
720 ret = smu_init_power(smu);
722 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");
727 * allocate vram bos to store smc table contents.
729 ret = smu_init_fb_allocations(smu);
733 ret = smu_alloc_memory_pool(smu);
737 ret = smu_alloc_dummy_read_table(smu);
741 ret = smu_i2c_init(smu, &smu->adev->pm.smu_i2c);
748 static int smu_smc_table_sw_fini(struct smu_context *smu)
752 smu_i2c_fini(smu, &smu->adev->pm.smu_i2c);
754 smu_free_dummy_read_table(smu);
756 ret = smu_free_memory_pool(smu);
760 ret = smu_fini_fb_allocations(smu);
764 ret = smu_fini_power(smu);
766 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");
770 ret = smu_fini_smc_tables(smu);
772 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");
779 static void smu_throttling_logging_work_fn(struct work_struct *work)
781 struct smu_context *smu = container_of(work, struct smu_context,
782 throttling_logging_work);
784 smu_log_thermal_throttling(smu);
787 static void smu_interrupt_work_fn(struct work_struct *work)
789 struct smu_context *smu = container_of(work, struct smu_context,
792 mutex_lock(&smu->mutex);
794 if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
795 smu->ppt_funcs->interrupt_work(smu);
797 mutex_unlock(&smu->mutex);
800 static int smu_sw_init(void *handle)
802 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
803 struct smu_context *smu = &adev->smu;
806 smu->pool_size = adev->pm.smu_prv_buffer_size;
807 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
808 mutex_init(&smu->smu_feature.mutex);
809 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
810 bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
811 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
813 mutex_init(&smu->sensor_lock);
814 mutex_init(&smu->metrics_lock);
815 mutex_init(&smu->message_lock);
817 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
818 INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
819 atomic64_set(&smu->throttle_int_counter, 0);
820 smu->watermarks_bitmap = 0;
821 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
822 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
824 atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
825 atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
826 mutex_init(&smu->smu_power.power_gate.vcn_gate_lock);
827 mutex_init(&smu->smu_power.power_gate.jpeg_gate_lock);
829 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
830 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
831 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
832 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
833 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
834 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
835 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
836 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
838 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
839 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
840 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
841 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
842 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
843 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
844 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
845 smu->display_config = &adev->pm.pm_display_cfg;
847 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
848 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
850 ret = smu_init_microcode(smu);
852 dev_err(adev->dev, "Failed to load smu firmware!\n");
856 ret = smu_smc_table_sw_init(smu);
858 dev_err(adev->dev, "Failed to sw init smc table!\n");
862 ret = smu_register_irq_handler(smu);
864 dev_err(adev->dev, "Failed to register smc irq handler!\n");
871 static int smu_sw_fini(void *handle)
873 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
874 struct smu_context *smu = &adev->smu;
877 ret = smu_smc_table_sw_fini(smu);
879 dev_err(adev->dev, "Failed to sw fini smc table!\n");
883 smu_fini_microcode(smu);
888 static int smu_get_thermal_temperature_range(struct smu_context *smu)
890 struct amdgpu_device *adev = smu->adev;
891 struct smu_temperature_range *range =
895 if (!smu->ppt_funcs->get_thermal_temperature_range)
898 ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);
902 adev->pm.dpm.thermal.min_temp = range->min;
903 adev->pm.dpm.thermal.max_temp = range->max;
904 adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max;
905 adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min;
906 adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max;
907 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max;
908 adev->pm.dpm.thermal.min_mem_temp = range->mem_min;
909 adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max;
910 adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max;
915 static int smu_smc_hw_setup(struct smu_context *smu)
917 struct amdgpu_device *adev = smu->adev;
918 uint32_t pcie_gen = 0, pcie_width = 0;
921 if (adev->in_suspend && smu_is_dpm_running(smu)) {
922 dev_info(adev->dev, "dpm has been enabled\n");
923 /* this is needed specifically */
924 if ((adev->asic_type >= CHIP_SIENNA_CICHLID) &&
925 (adev->asic_type <= CHIP_DIMGREY_CAVEFISH))
926 ret = smu_system_features_control(smu, true);
930 ret = smu_init_display_count(smu, 0);
932 dev_info(adev->dev, "Failed to pre-set display count as 0!\n");
936 ret = smu_set_driver_table_location(smu);
938 dev_err(adev->dev, "Failed to SetDriverDramAddr!\n");
943 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
945 ret = smu_set_tool_table_location(smu);
947 dev_err(adev->dev, "Failed to SetToolsDramAddr!\n");
952 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
955 ret = smu_notify_memory_pool_location(smu);
957 dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n");
961 /* smu_dump_pptable(smu); */
963 * Copy pptable bo in the vram to smc with SMU MSGs such as
964 * SetDriverDramAddr and TransferTableDram2Smu.
966 ret = smu_write_pptable(smu);
968 dev_err(adev->dev, "Failed to transfer pptable to SMC!\n");
972 /* issue Run*Btc msg */
973 ret = smu_run_btc(smu);
977 ret = smu_feature_set_allowed_mask(smu);
979 dev_err(adev->dev, "Failed to set driver allowed features mask!\n");
983 ret = smu_system_features_control(smu, true);
985 dev_err(adev->dev, "Failed to enable requested dpm features!\n");
989 if (!smu_is_dpm_running(smu))
990 dev_info(adev->dev, "dpm has been disabled\n");
992 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
994 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
996 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
998 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
1001 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
1002 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
1003 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
1005 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
1007 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
1009 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
1011 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
1013 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
1015 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
1017 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
1019 dev_err(adev->dev, "Attempt to override pcie params failed!\n");
1023 ret = smu_get_thermal_temperature_range(smu);
1025 dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
1029 ret = smu_enable_thermal_alert(smu);
1031 dev_err(adev->dev, "Failed to enable thermal alert!\n");
1036 * Set initialized values (get from vbios) to dpm tables context such as
1037 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1040 ret = smu_set_default_dpm_table(smu);
1042 dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
1046 ret = smu_notify_display_change(smu);
1051 * Set min deep sleep dce fclk with bootup value from vbios via
1052 * SetMinDeepSleepDcefclk MSG.
1054 ret = smu_set_min_dcef_deep_sleep(smu,
1055 smu->smu_table.boot_values.dcefclk / 100);
1062 static int smu_start_smc_engine(struct smu_context *smu)
1064 struct amdgpu_device *adev = smu->adev;
1067 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1068 if (adev->asic_type < CHIP_NAVI10) {
1069 if (smu->ppt_funcs->load_microcode) {
1070 ret = smu->ppt_funcs->load_microcode(smu);
1077 if (smu->ppt_funcs->check_fw_status) {
1078 ret = smu->ppt_funcs->check_fw_status(smu);
1080 dev_err(adev->dev, "SMC is not ready\n");
1086 * Send msg GetDriverIfVersion to check if the return value is equal
1087 * with DRIVER_IF_VERSION of smc header.
1089 ret = smu_check_fw_version(smu);
1096 static int smu_hw_init(void *handle)
1099 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1100 struct smu_context *smu = &adev->smu;
1102 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
1103 smu->pm_enabled = false;
1107 ret = smu_start_smc_engine(smu);
1109 dev_err(adev->dev, "SMC engine is not correctly up!\n");
1114 smu_powergate_sdma(&adev->smu, false);
1115 smu_dpm_set_vcn_enable(smu, true);
1116 smu_dpm_set_jpeg_enable(smu, true);
1117 smu_set_gfx_cgpg(&adev->smu, true);
1120 if (!smu->pm_enabled)
1123 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1124 ret = smu_get_vbios_bootup_values(smu);
1126 dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n");
1130 ret = smu_setup_pptable(smu);
1132 dev_err(adev->dev, "Failed to setup pptable!\n");
1136 ret = smu_get_driver_allowed_feature_mask(smu);
1140 ret = smu_smc_hw_setup(smu);
1142 dev_err(adev->dev, "Failed to setup smc hw!\n");
1147 * Move maximum sustainable clock retrieving here considering
1148 * 1. It is not needed on resume(from S3).
1149 * 2. DAL settings come between .hw_init and .late_init of SMU.
1150 * And DAL needs to know the maximum sustainable clocks. Thus
1151 * it cannot be put in .late_init().
1153 ret = smu_init_max_sustainable_clocks(smu);
1155 dev_err(adev->dev, "Failed to init max sustainable clocks!\n");
1159 adev->pm.dpm_enabled = true;
1161 dev_info(adev->dev, "SMU is initialized successfully!\n");
1166 static int smu_disable_dpms(struct smu_context *smu)
1168 struct amdgpu_device *adev = smu->adev;
1170 bool use_baco = !smu->is_apu &&
1171 ((amdgpu_in_reset(adev) &&
1172 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1173 ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev)));
1176 * For custom pptable uploading, skip the DPM features
1177 * disable process on Navi1x ASICs.
1178 * - As the gfx related features are under control of
1179 * RLC on those ASICs. RLC reinitialization will be
1180 * needed to reenable them. That will cost much more
1183 * - SMU firmware can handle the DPM reenablement
1186 if (smu->uploading_custom_pp_table &&
1187 (adev->asic_type >= CHIP_NAVI10) &&
1188 (adev->asic_type <= CHIP_DIMGREY_CAVEFISH))
1192 * For Sienna_Cichlid, PMFW will handle the features disablement properly
1193 * on BACO in. Driver involvement is unnecessary.
1195 if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
1200 * For gpu reset, runpm and hibernation through BACO,
1201 * BACO feature has to be kept enabled.
1203 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
1204 ret = smu_disable_all_features_with_exception(smu,
1205 SMU_FEATURE_BACO_BIT);
1207 dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
1209 ret = smu_system_features_control(smu, false);
1211 dev_err(adev->dev, "Failed to disable smu features.\n");
1214 if (adev->asic_type >= CHIP_NAVI10 &&
1215 adev->gfx.rlc.funcs->stop)
1216 adev->gfx.rlc.funcs->stop(adev);
1221 static int smu_smc_hw_cleanup(struct smu_context *smu)
1223 struct amdgpu_device *adev = smu->adev;
1226 cancel_work_sync(&smu->throttling_logging_work);
1227 cancel_work_sync(&smu->interrupt_work);
1229 ret = smu_disable_thermal_alert(smu);
1231 dev_err(adev->dev, "Fail to disable thermal alert!\n");
1235 ret = smu_disable_dpms(smu);
1237 dev_err(adev->dev, "Fail to disable dpm features!\n");
1244 static int smu_hw_fini(void *handle)
1246 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1247 struct smu_context *smu = &adev->smu;
1249 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1253 smu_powergate_sdma(&adev->smu, true);
1254 smu_dpm_set_vcn_enable(smu, false);
1255 smu_dpm_set_jpeg_enable(smu, false);
1258 if (!smu->pm_enabled)
1261 adev->pm.dpm_enabled = false;
1263 return smu_smc_hw_cleanup(smu);
1266 int smu_reset(struct smu_context *smu)
1268 struct amdgpu_device *adev = smu->adev;
1271 amdgpu_gfx_off_ctrl(smu->adev, false);
1273 ret = smu_hw_fini(adev);
1277 ret = smu_hw_init(adev);
1281 ret = smu_late_init(adev);
1285 amdgpu_gfx_off_ctrl(smu->adev, true);
1290 static int smu_suspend(void *handle)
1292 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1293 struct smu_context *smu = &adev->smu;
1296 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1299 if (!smu->pm_enabled)
1302 adev->pm.dpm_enabled = false;
1304 ret = smu_smc_hw_cleanup(smu);
1308 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1311 smu_set_gfx_cgpg(&adev->smu, false);
1316 static int smu_resume(void *handle)
1319 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1320 struct smu_context *smu = &adev->smu;
1322 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1325 if (!smu->pm_enabled)
1328 dev_info(adev->dev, "SMU is resuming...\n");
1330 ret = smu_start_smc_engine(smu);
1332 dev_err(adev->dev, "SMC engine is not correctly up!\n");
1336 ret = smu_smc_hw_setup(smu);
1338 dev_err(adev->dev, "Failed to setup smc hw!\n");
1343 smu_set_gfx_cgpg(&adev->smu, true);
1345 smu->disable_uclk_switch = 0;
1347 adev->pm.dpm_enabled = true;
1349 dev_info(adev->dev, "SMU is resumed successfully!\n");
1354 int smu_display_configuration_change(struct smu_context *smu,
1355 const struct amd_pp_display_configuration *display_config)
1358 int num_of_active_display = 0;
1360 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1363 if (!display_config)
1366 mutex_lock(&smu->mutex);
1368 smu_set_min_dcef_deep_sleep(smu,
1369 display_config->min_dcef_deep_sleep_set_clk / 100);
1371 for (index = 0; index < display_config->num_path_including_non_display; index++) {
1372 if (display_config->displays[index].controller_id != 0)
1373 num_of_active_display++;
1376 mutex_unlock(&smu->mutex);
1381 static int smu_set_clockgating_state(void *handle,
1382 enum amd_clockgating_state state)
1387 static int smu_set_powergating_state(void *handle,
1388 enum amd_powergating_state state)
1393 static int smu_enable_umd_pstate(void *handle,
1394 enum amd_dpm_forced_level *level)
1396 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1397 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1398 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1399 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1401 struct smu_context *smu = (struct smu_context*)(handle);
1402 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1404 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1407 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1408 /* enter umd pstate, save current level, disable gfx cg*/
1409 if (*level & profile_mode_mask) {
1410 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1411 smu_dpm_ctx->enable_umd_pstate = true;
1412 smu_gpo_control(smu, false);
1413 amdgpu_device_ip_set_powergating_state(smu->adev,
1414 AMD_IP_BLOCK_TYPE_GFX,
1415 AMD_PG_STATE_UNGATE);
1416 amdgpu_device_ip_set_clockgating_state(smu->adev,
1417 AMD_IP_BLOCK_TYPE_GFX,
1418 AMD_CG_STATE_UNGATE);
1419 smu_gfx_ulv_control(smu, false);
1420 smu_deep_sleep_control(smu, false);
1421 amdgpu_asic_update_umd_stable_pstate(smu->adev, true);
1424 /* exit umd pstate, restore level, enable gfx cg*/
1425 if (!(*level & profile_mode_mask)) {
1426 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1427 *level = smu_dpm_ctx->saved_dpm_level;
1428 smu_dpm_ctx->enable_umd_pstate = false;
1429 amdgpu_asic_update_umd_stable_pstate(smu->adev, false);
1430 smu_deep_sleep_control(smu, true);
1431 smu_gfx_ulv_control(smu, true);
1432 amdgpu_device_ip_set_clockgating_state(smu->adev,
1433 AMD_IP_BLOCK_TYPE_GFX,
1435 amdgpu_device_ip_set_powergating_state(smu->adev,
1436 AMD_IP_BLOCK_TYPE_GFX,
1438 smu_gpo_control(smu, true);
1445 static int smu_adjust_power_state_dynamic(struct smu_context *smu,
1446 enum amd_dpm_forced_level level,
1447 bool skip_display_settings)
1452 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1454 if (!skip_display_settings) {
1455 ret = smu_display_config_changed(smu);
1457 dev_err(smu->adev->dev, "Failed to change display config!");
1462 ret = smu_apply_clocks_adjust_rules(smu);
1464 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");
1468 if (!skip_display_settings) {
1469 ret = smu_notify_smc_display_config(smu);
1471 dev_err(smu->adev->dev, "Failed to notify smc display config!");
1476 if (smu_dpm_ctx->dpm_level != level) {
1477 ret = smu_asic_set_performance_level(smu, level);
1479 dev_err(smu->adev->dev, "Failed to set performance level!");
1483 /* update the saved copy */
1484 smu_dpm_ctx->dpm_level = level;
1487 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1488 index = fls(smu->workload_mask);
1489 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1490 workload = smu->workload_setting[index];
1492 if (smu->power_profile_mode != workload)
1493 smu_set_power_profile_mode(smu, &workload, 0, false);
1499 int smu_handle_task(struct smu_context *smu,
1500 enum amd_dpm_forced_level level,
1501 enum amd_pp_task task_id,
1506 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1510 mutex_lock(&smu->mutex);
1513 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1514 ret = smu_pre_display_config_changed(smu);
1517 ret = smu_adjust_power_state_dynamic(smu, level, false);
1519 case AMD_PP_TASK_COMPLETE_INIT:
1520 case AMD_PP_TASK_READJUST_POWER_STATE:
1521 ret = smu_adjust_power_state_dynamic(smu, level, true);
1529 mutex_unlock(&smu->mutex);
1534 int smu_switch_power_profile(struct smu_context *smu,
1535 enum PP_SMC_POWER_PROFILE type,
1538 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1542 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1545 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1548 mutex_lock(&smu->mutex);
1551 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1552 index = fls(smu->workload_mask);
1553 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1554 workload = smu->workload_setting[index];
1556 smu->workload_mask |= (1 << smu->workload_prority[type]);
1557 index = fls(smu->workload_mask);
1558 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1559 workload = smu->workload_setting[index];
1562 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1563 smu_set_power_profile_mode(smu, &workload, 0, false);
1565 mutex_unlock(&smu->mutex);
1570 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1572 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1573 enum amd_dpm_forced_level level;
1575 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1578 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1581 mutex_lock(&(smu->mutex));
1582 level = smu_dpm_ctx->dpm_level;
1583 mutex_unlock(&(smu->mutex));
1588 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1590 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1593 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1596 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1599 mutex_lock(&smu->mutex);
1601 ret = smu_enable_umd_pstate(smu, &level);
1603 mutex_unlock(&smu->mutex);
1607 ret = smu_handle_task(smu, level,
1608 AMD_PP_TASK_READJUST_POWER_STATE,
1611 mutex_unlock(&smu->mutex);
1616 int smu_set_display_count(struct smu_context *smu, uint32_t count)
1620 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1623 mutex_lock(&smu->mutex);
1624 ret = smu_init_display_count(smu, count);
1625 mutex_unlock(&smu->mutex);
1630 int smu_force_clk_levels(struct smu_context *smu,
1631 enum smu_clk_type clk_type,
1634 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1637 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1640 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1641 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
1645 mutex_lock(&smu->mutex);
1647 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
1648 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
1650 mutex_unlock(&smu->mutex);
1656 * On system suspending or resetting, the dpm_enabled
1657 * flag will be cleared. So that those SMU services which
1658 * are not supported will be gated.
1659 * However, the mp1 state setting should still be granted
1660 * even if the dpm_enabled cleared.
1662 int smu_set_mp1_state(struct smu_context *smu,
1663 enum pp_mp1_state mp1_state)
1668 if (!smu->pm_enabled)
1671 mutex_lock(&smu->mutex);
1673 switch (mp1_state) {
1674 case PP_MP1_STATE_SHUTDOWN:
1675 msg = SMU_MSG_PrepareMp1ForShutdown;
1677 case PP_MP1_STATE_UNLOAD:
1678 msg = SMU_MSG_PrepareMp1ForUnload;
1680 case PP_MP1_STATE_RESET:
1681 msg = SMU_MSG_PrepareMp1ForReset;
1683 case PP_MP1_STATE_NONE:
1685 mutex_unlock(&smu->mutex);
1689 ret = smu_send_smc_msg(smu, msg, NULL);
1690 /* some asics may not support those messages */
1694 dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
1696 mutex_unlock(&smu->mutex);
1701 int smu_set_df_cstate(struct smu_context *smu,
1702 enum pp_df_cstate state)
1706 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1709 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
1712 mutex_lock(&smu->mutex);
1714 ret = smu->ppt_funcs->set_df_cstate(smu, state);
1716 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
1718 mutex_unlock(&smu->mutex);
1723 int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
1727 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1730 if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
1733 mutex_lock(&smu->mutex);
1735 ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
1737 dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n");
1739 mutex_unlock(&smu->mutex);
1744 int smu_write_watermarks_table(struct smu_context *smu)
1748 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1751 mutex_lock(&smu->mutex);
1753 ret = smu_set_watermarks_table(smu, NULL);
1755 mutex_unlock(&smu->mutex);
1760 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
1761 struct pp_smu_wm_range_sets *clock_ranges)
1765 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1768 if (smu->disable_watermark)
1771 mutex_lock(&smu->mutex);
1773 ret = smu_set_watermarks_table(smu, clock_ranges);
1775 mutex_unlock(&smu->mutex);
1780 int smu_set_ac_dc(struct smu_context *smu)
1784 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1787 /* controlled by firmware */
1788 if (smu->dc_controlled_by_gpio)
1791 mutex_lock(&smu->mutex);
1792 ret = smu_set_power_source(smu,
1793 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
1794 SMU_POWER_SOURCE_DC);
1796 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
1797 smu->adev->pm.ac_power ? "AC" : "DC");
1798 mutex_unlock(&smu->mutex);
1803 const struct amd_ip_funcs smu_ip_funcs = {
1805 .early_init = smu_early_init,
1806 .late_init = smu_late_init,
1807 .sw_init = smu_sw_init,
1808 .sw_fini = smu_sw_fini,
1809 .hw_init = smu_hw_init,
1810 .hw_fini = smu_hw_fini,
1811 .suspend = smu_suspend,
1812 .resume = smu_resume,
1814 .check_soft_reset = NULL,
1815 .wait_for_idle = NULL,
1817 .set_clockgating_state = smu_set_clockgating_state,
1818 .set_powergating_state = smu_set_powergating_state,
1819 .enable_umd_pstate = smu_enable_umd_pstate,
1822 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
1824 .type = AMD_IP_BLOCK_TYPE_SMC,
1828 .funcs = &smu_ip_funcs,
1831 const struct amdgpu_ip_block_version smu_v12_0_ip_block =
1833 .type = AMD_IP_BLOCK_TYPE_SMC,
1837 .funcs = &smu_ip_funcs,
1840 int smu_load_microcode(struct smu_context *smu)
1844 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1847 mutex_lock(&smu->mutex);
1849 if (smu->ppt_funcs->load_microcode)
1850 ret = smu->ppt_funcs->load_microcode(smu);
1852 mutex_unlock(&smu->mutex);
1857 int smu_check_fw_status(struct smu_context *smu)
1861 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1864 mutex_lock(&smu->mutex);
1866 if (smu->ppt_funcs->check_fw_status)
1867 ret = smu->ppt_funcs->check_fw_status(smu);
1869 mutex_unlock(&smu->mutex);
1874 int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
1878 mutex_lock(&smu->mutex);
1880 if (smu->ppt_funcs->set_gfx_cgpg)
1881 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
1883 mutex_unlock(&smu->mutex);
1888 int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
1892 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1895 mutex_lock(&smu->mutex);
1897 if (smu->ppt_funcs->set_fan_speed_rpm)
1898 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
1900 mutex_unlock(&smu->mutex);
1905 int smu_get_power_limit(struct smu_context *smu,
1909 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1912 mutex_lock(&smu->mutex);
1914 *limit = (max_setting ? smu->max_power_limit : smu->current_power_limit);
1916 mutex_unlock(&smu->mutex);
1921 int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
1925 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1928 mutex_lock(&smu->mutex);
1930 if (limit > smu->max_power_limit) {
1931 dev_err(smu->adev->dev,
1932 "New power limit (%d) is over the max allowed %d\n",
1933 limit, smu->max_power_limit);
1938 limit = smu->current_power_limit;
1940 if (smu->ppt_funcs->set_power_limit)
1941 ret = smu->ppt_funcs->set_power_limit(smu, limit);
1944 mutex_unlock(&smu->mutex);
1949 int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
1953 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1956 mutex_lock(&smu->mutex);
1958 if (smu->ppt_funcs->print_clk_levels)
1959 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
1961 mutex_unlock(&smu->mutex);
1966 int smu_od_edit_dpm_table(struct smu_context *smu,
1967 enum PP_OD_DPM_TABLE_COMMAND type,
1968 long *input, uint32_t size)
1972 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1975 mutex_lock(&smu->mutex);
1977 if (smu->ppt_funcs->od_edit_dpm_table) {
1978 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
1979 if (!ret && (type == PP_OD_COMMIT_DPM_TABLE))
1980 ret = smu_handle_task(smu,
1981 smu->smu_dpm.dpm_level,
1982 AMD_PP_TASK_READJUST_POWER_STATE,
1986 mutex_unlock(&smu->mutex);
1991 int smu_read_sensor(struct smu_context *smu,
1992 enum amd_pp_sensors sensor,
1993 void *data, uint32_t *size)
1995 struct smu_umd_pstate_table *pstate_table =
1999 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2005 mutex_lock(&smu->mutex);
2007 if (smu->ppt_funcs->read_sensor)
2008 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
2012 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
2013 *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
2016 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
2017 *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
2020 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
2021 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
2024 case AMDGPU_PP_SENSOR_UVD_POWER:
2025 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
2028 case AMDGPU_PP_SENSOR_VCE_POWER:
2029 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
2032 case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
2033 *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0: 1;
2036 case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
2037 *(uint32_t *)data = 0;
2047 mutex_unlock(&smu->mutex);
2052 int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
2056 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2059 mutex_lock(&smu->mutex);
2061 if (smu->ppt_funcs->get_power_profile_mode)
2062 ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
2064 mutex_unlock(&smu->mutex);
2069 int smu_set_power_profile_mode(struct smu_context *smu,
2071 uint32_t param_size,
2076 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2080 mutex_lock(&smu->mutex);
2082 if (smu->ppt_funcs->set_power_profile_mode)
2083 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
2086 mutex_unlock(&smu->mutex);
2092 int smu_get_fan_control_mode(struct smu_context *smu)
2096 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2099 mutex_lock(&smu->mutex);
2101 if (smu->ppt_funcs->get_fan_control_mode)
2102 ret = smu->ppt_funcs->get_fan_control_mode(smu);
2104 mutex_unlock(&smu->mutex);
2109 int smu_set_fan_control_mode(struct smu_context *smu, int value)
2113 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2116 mutex_lock(&smu->mutex);
2118 if (smu->ppt_funcs->set_fan_control_mode)
2119 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
2121 mutex_unlock(&smu->mutex);
2126 int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
2130 uint32_t current_rpm;
2132 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2135 mutex_lock(&smu->mutex);
2137 if (smu->ppt_funcs->get_fan_speed_rpm) {
2138 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, ¤t_rpm);
2140 percent = current_rpm * 100 / smu->fan_max_rpm;
2141 *speed = percent > 100 ? 100 : percent;
2145 mutex_unlock(&smu->mutex);
2151 int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
2156 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2159 mutex_lock(&smu->mutex);
2161 if (smu->ppt_funcs->set_fan_speed_rpm) {
2164 rpm = speed * smu->fan_max_rpm / 100;
2165 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, rpm);
2168 mutex_unlock(&smu->mutex);
2173 int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
2177 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2180 mutex_lock(&smu->mutex);
2182 if (smu->ppt_funcs->get_fan_speed_rpm)
2183 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
2185 mutex_unlock(&smu->mutex);
2190 int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
2194 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2197 mutex_lock(&smu->mutex);
2199 ret = smu_set_min_dcef_deep_sleep(smu, clk);
2201 mutex_unlock(&smu->mutex);
2206 int smu_get_clock_by_type_with_latency(struct smu_context *smu,
2207 enum smu_clk_type clk_type,
2208 struct pp_clock_levels_with_latency *clocks)
2212 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2215 mutex_lock(&smu->mutex);
2217 if (smu->ppt_funcs->get_clock_by_type_with_latency)
2218 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2220 mutex_unlock(&smu->mutex);
2225 int smu_display_clock_voltage_request(struct smu_context *smu,
2226 struct pp_display_clock_request *clock_req)
2230 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2233 mutex_lock(&smu->mutex);
2235 if (smu->ppt_funcs->display_clock_voltage_request)
2236 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
2238 mutex_unlock(&smu->mutex);
2244 int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
2248 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2251 mutex_lock(&smu->mutex);
2253 if (smu->ppt_funcs->display_disable_memory_clock_switch)
2254 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2256 mutex_unlock(&smu->mutex);
2261 int smu_set_xgmi_pstate(struct smu_context *smu,
2266 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2269 mutex_lock(&smu->mutex);
2271 if (smu->ppt_funcs->set_xgmi_pstate)
2272 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
2274 mutex_unlock(&smu->mutex);
2277 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
2282 int smu_set_azalia_d3_pme(struct smu_context *smu)
2286 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2289 mutex_lock(&smu->mutex);
2291 if (smu->ppt_funcs->set_azalia_d3_pme)
2292 ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
2294 mutex_unlock(&smu->mutex);
2300 * On system suspending or resetting, the dpm_enabled
2301 * flag will be cleared. So that those SMU services which
2302 * are not supported will be gated.
2304 * However, the baco/mode1 reset should still be granted
2305 * as they are still supported and necessary.
2307 bool smu_baco_is_support(struct smu_context *smu)
2311 if (!smu->pm_enabled)
2314 mutex_lock(&smu->mutex);
2316 if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
2317 ret = smu->ppt_funcs->baco_is_support(smu);
2319 mutex_unlock(&smu->mutex);
2324 int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
2326 if (smu->ppt_funcs->baco_get_state)
2329 mutex_lock(&smu->mutex);
2330 *state = smu->ppt_funcs->baco_get_state(smu);
2331 mutex_unlock(&smu->mutex);
2336 int smu_baco_enter(struct smu_context *smu)
2340 if (!smu->pm_enabled)
2343 mutex_lock(&smu->mutex);
2345 if (smu->ppt_funcs->baco_enter)
2346 ret = smu->ppt_funcs->baco_enter(smu);
2348 mutex_unlock(&smu->mutex);
2351 dev_err(smu->adev->dev, "Failed to enter BACO state!\n");
2356 int smu_baco_exit(struct smu_context *smu)
2360 if (!smu->pm_enabled)
2363 mutex_lock(&smu->mutex);
2365 if (smu->ppt_funcs->baco_exit)
2366 ret = smu->ppt_funcs->baco_exit(smu);
2368 mutex_unlock(&smu->mutex);
2371 dev_err(smu->adev->dev, "Failed to exit BACO state!\n");
2376 bool smu_mode1_reset_is_support(struct smu_context *smu)
2380 if (!smu->pm_enabled)
2383 mutex_lock(&smu->mutex);
2385 if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
2386 ret = smu->ppt_funcs->mode1_reset_is_support(smu);
2388 mutex_unlock(&smu->mutex);
2393 int smu_mode1_reset(struct smu_context *smu)
2397 if (!smu->pm_enabled)
2400 mutex_lock(&smu->mutex);
2402 if (smu->ppt_funcs->mode1_reset)
2403 ret = smu->ppt_funcs->mode1_reset(smu);
2405 mutex_unlock(&smu->mutex);
2410 int smu_mode2_reset(struct smu_context *smu)
2414 if (!smu->pm_enabled)
2417 mutex_lock(&smu->mutex);
2419 if (smu->ppt_funcs->mode2_reset)
2420 ret = smu->ppt_funcs->mode2_reset(smu);
2422 mutex_unlock(&smu->mutex);
2425 dev_err(smu->adev->dev, "Mode2 reset failed!\n");
2430 int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
2431 struct pp_smu_nv_clock_table *max_clocks)
2435 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2438 mutex_lock(&smu->mutex);
2440 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
2441 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
2443 mutex_unlock(&smu->mutex);
2448 int smu_get_uclk_dpm_states(struct smu_context *smu,
2449 unsigned int *clock_values_in_khz,
2450 unsigned int *num_states)
2454 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2457 mutex_lock(&smu->mutex);
2459 if (smu->ppt_funcs->get_uclk_dpm_states)
2460 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
2462 mutex_unlock(&smu->mutex);
2467 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
2469 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
2471 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2474 mutex_lock(&smu->mutex);
2476 if (smu->ppt_funcs->get_current_power_state)
2477 pm_state = smu->ppt_funcs->get_current_power_state(smu);
2479 mutex_unlock(&smu->mutex);
2484 int smu_get_dpm_clock_table(struct smu_context *smu,
2485 struct dpm_clocks *clock_table)
2489 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2492 mutex_lock(&smu->mutex);
2494 if (smu->ppt_funcs->get_dpm_clock_table)
2495 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
2497 mutex_unlock(&smu->mutex);
2502 ssize_t smu_sys_get_gpu_metrics(struct smu_context *smu,
2507 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2510 if (!smu->ppt_funcs->get_gpu_metrics)
2513 mutex_lock(&smu->mutex);
2515 size = smu->ppt_funcs->get_gpu_metrics(smu, table);
2517 mutex_unlock(&smu->mutex);
2522 int smu_enable_mgpu_fan_boost(struct smu_context *smu)
2526 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2529 mutex_lock(&smu->mutex);
2531 if (smu->ppt_funcs->enable_mgpu_fan_boost)
2532 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
2534 mutex_unlock(&smu->mutex);
2539 int smu_gfx_state_change_set(struct smu_context *smu, uint32_t state)
2543 mutex_lock(&smu->mutex);
2544 if (smu->ppt_funcs->gfx_state_change_set)
2545 ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
2546 mutex_unlock(&smu->mutex);