2 * Copyright 2011 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_i2c.h"
28 #include "amdgpu_dpm.h"
31 #include "amdgpu_display.h"
33 #include <linux/power_supply.h>
34 #include "amdgpu_smu.h"
36 #define amdgpu_dpm_enable_bapm(adev, e) \
37 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
39 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
41 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
44 if (!pp_funcs->get_sclk)
47 mutex_lock(&adev->pm.mutex);
48 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
50 mutex_unlock(&adev->pm.mutex);
55 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
57 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
60 if (!pp_funcs->get_mclk)
63 mutex_lock(&adev->pm.mutex);
64 ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
66 mutex_unlock(&adev->pm.mutex);
71 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
74 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
75 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
77 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
78 dev_dbg(adev->dev, "IP block%d already in the target %s state!",
79 block_type, gate ? "gate" : "ungate");
83 mutex_lock(&adev->pm.mutex);
86 case AMD_IP_BLOCK_TYPE_UVD:
87 case AMD_IP_BLOCK_TYPE_VCE:
88 case AMD_IP_BLOCK_TYPE_GFX:
89 case AMD_IP_BLOCK_TYPE_VCN:
90 case AMD_IP_BLOCK_TYPE_SDMA:
91 case AMD_IP_BLOCK_TYPE_JPEG:
92 case AMD_IP_BLOCK_TYPE_GMC:
93 case AMD_IP_BLOCK_TYPE_ACP:
94 if (pp_funcs && pp_funcs->set_powergating_by_smu)
95 ret = (pp_funcs->set_powergating_by_smu(
96 (adev)->powerplay.pp_handle, block_type, gate));
103 atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
105 mutex_unlock(&adev->pm.mutex);
110 int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev)
112 struct smu_context *smu = adev->powerplay.pp_handle;
113 int ret = -EOPNOTSUPP;
115 mutex_lock(&adev->pm.mutex);
116 ret = smu_set_gfx_power_up_by_imu(smu);
117 mutex_unlock(&adev->pm.mutex);
124 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
126 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
127 void *pp_handle = adev->powerplay.pp_handle;
130 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
133 mutex_lock(&adev->pm.mutex);
135 /* enter BACO state */
136 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
138 mutex_unlock(&adev->pm.mutex);
143 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
145 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
146 void *pp_handle = adev->powerplay.pp_handle;
149 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
152 mutex_lock(&adev->pm.mutex);
154 /* exit BACO state */
155 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
157 mutex_unlock(&adev->pm.mutex);
162 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
163 enum pp_mp1_state mp1_state)
166 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
168 if (pp_funcs && pp_funcs->set_mp1_state) {
169 mutex_lock(&adev->pm.mutex);
171 ret = pp_funcs->set_mp1_state(
172 adev->powerplay.pp_handle,
175 mutex_unlock(&adev->pm.mutex);
181 bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
183 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
184 void *pp_handle = adev->powerplay.pp_handle;
188 if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
190 /* Don't use baco for reset in S3.
191 * This is a workaround for some platforms
192 * where entering BACO during suspend
193 * seems to cause reboots or hangs.
194 * This might be related to the fact that BACO controls
195 * power to the whole GPU including devices like audio and USB.
196 * Powering down/up everything may adversely affect these other
197 * devices. Needs more investigation.
202 mutex_lock(&adev->pm.mutex);
204 ret = pp_funcs->get_asic_baco_capability(pp_handle,
207 mutex_unlock(&adev->pm.mutex);
209 return ret ? false : baco_cap;
212 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
214 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
215 void *pp_handle = adev->powerplay.pp_handle;
218 if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
221 mutex_lock(&adev->pm.mutex);
223 ret = pp_funcs->asic_reset_mode_2(pp_handle);
225 mutex_unlock(&adev->pm.mutex);
230 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
232 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
233 void *pp_handle = adev->powerplay.pp_handle;
236 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
239 mutex_lock(&adev->pm.mutex);
241 /* enter BACO state */
242 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
246 /* exit BACO state */
247 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
250 mutex_unlock(&adev->pm.mutex);
254 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
256 struct smu_context *smu = adev->powerplay.pp_handle;
257 bool support_mode1_reset = false;
259 if (is_support_sw_smu(adev)) {
260 mutex_lock(&adev->pm.mutex);
261 support_mode1_reset = smu_mode1_reset_is_support(smu);
262 mutex_unlock(&adev->pm.mutex);
265 return support_mode1_reset;
268 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
270 struct smu_context *smu = adev->powerplay.pp_handle;
271 int ret = -EOPNOTSUPP;
273 if (is_support_sw_smu(adev)) {
274 mutex_lock(&adev->pm.mutex);
275 ret = smu_mode1_reset(smu);
276 mutex_unlock(&adev->pm.mutex);
282 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
283 enum PP_SMC_POWER_PROFILE type,
286 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
289 if (amdgpu_sriov_vf(adev))
292 if (pp_funcs && pp_funcs->switch_power_profile) {
293 mutex_lock(&adev->pm.mutex);
294 ret = pp_funcs->switch_power_profile(
295 adev->powerplay.pp_handle, type, en);
296 mutex_unlock(&adev->pm.mutex);
302 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
305 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
308 if (pp_funcs && pp_funcs->set_xgmi_pstate) {
309 mutex_lock(&adev->pm.mutex);
310 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
312 mutex_unlock(&adev->pm.mutex);
318 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
322 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
323 void *pp_handle = adev->powerplay.pp_handle;
325 if (pp_funcs && pp_funcs->set_df_cstate) {
326 mutex_lock(&adev->pm.mutex);
327 ret = pp_funcs->set_df_cstate(pp_handle, cstate);
328 mutex_unlock(&adev->pm.mutex);
334 int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
336 struct smu_context *smu = adev->powerplay.pp_handle;
339 if (is_support_sw_smu(adev)) {
340 mutex_lock(&adev->pm.mutex);
341 ret = smu_allow_xgmi_power_down(smu, en);
342 mutex_unlock(&adev->pm.mutex);
348 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
350 void *pp_handle = adev->powerplay.pp_handle;
351 const struct amd_pm_funcs *pp_funcs =
352 adev->powerplay.pp_funcs;
355 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
356 mutex_lock(&adev->pm.mutex);
357 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
358 mutex_unlock(&adev->pm.mutex);
364 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
367 void *pp_handle = adev->powerplay.pp_handle;
368 const struct amd_pm_funcs *pp_funcs =
369 adev->powerplay.pp_funcs;
372 if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
373 mutex_lock(&adev->pm.mutex);
374 ret = pp_funcs->set_clockgating_by_smu(pp_handle,
376 mutex_unlock(&adev->pm.mutex);
382 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
385 void *pp_handle = adev->powerplay.pp_handle;
386 const struct amd_pm_funcs *pp_funcs =
387 adev->powerplay.pp_funcs;
388 int ret = -EOPNOTSUPP;
390 if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
391 mutex_lock(&adev->pm.mutex);
392 ret = pp_funcs->smu_i2c_bus_access(pp_handle,
394 mutex_unlock(&adev->pm.mutex);
400 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
402 if (adev->pm.dpm_enabled) {
403 mutex_lock(&adev->pm.mutex);
404 if (power_supply_is_system_supplied() > 0)
405 adev->pm.ac_power = true;
407 adev->pm.ac_power = false;
409 if (adev->powerplay.pp_funcs &&
410 adev->powerplay.pp_funcs->enable_bapm)
411 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
413 if (is_support_sw_smu(adev))
414 smu_set_ac_dc(adev->powerplay.pp_handle);
416 mutex_unlock(&adev->pm.mutex);
420 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
421 void *data, uint32_t *size)
423 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
429 if (pp_funcs && pp_funcs->read_sensor) {
430 mutex_lock(&adev->pm.mutex);
431 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
435 mutex_unlock(&adev->pm.mutex);
441 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
443 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
446 if (!adev->pm.dpm_enabled)
449 if (!pp_funcs->pm_compute_clocks)
452 if (adev->mode_info.num_crtc)
453 amdgpu_display_bandwidth_update(adev);
455 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
456 struct amdgpu_ring *ring = adev->rings[i];
457 if (ring && ring->sched.ready)
458 amdgpu_fence_wait_empty(ring);
461 mutex_lock(&adev->pm.mutex);
462 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
463 mutex_unlock(&adev->pm.mutex);
466 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
470 if (adev->family == AMDGPU_FAMILY_SI) {
471 mutex_lock(&adev->pm.mutex);
473 adev->pm.dpm.uvd_active = true;
474 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
476 adev->pm.dpm.uvd_active = false;
478 mutex_unlock(&adev->pm.mutex);
480 amdgpu_dpm_compute_clocks(adev);
484 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
486 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
487 enable ? "enable" : "disable", ret);
490 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
494 if (adev->family == AMDGPU_FAMILY_SI) {
495 mutex_lock(&adev->pm.mutex);
497 adev->pm.dpm.vce_active = true;
498 /* XXX select vce level based on ring/task */
499 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
501 adev->pm.dpm.vce_active = false;
503 mutex_unlock(&adev->pm.mutex);
505 amdgpu_dpm_compute_clocks(adev);
509 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
511 DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
512 enable ? "enable" : "disable", ret);
515 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
519 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
521 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
522 enable ? "enable" : "disable", ret);
525 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
527 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
530 if (!pp_funcs || !pp_funcs->load_firmware)
533 mutex_lock(&adev->pm.mutex);
534 r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
536 pr_err("smu firmware loading failed\n");
541 *smu_version = adev->pm.fw_version;
544 mutex_unlock(&adev->pm.mutex);
548 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
552 if (is_support_sw_smu(adev)) {
553 mutex_lock(&adev->pm.mutex);
554 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
556 mutex_unlock(&adev->pm.mutex);
562 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
564 struct smu_context *smu = adev->powerplay.pp_handle;
567 if (!is_support_sw_smu(adev))
570 mutex_lock(&adev->pm.mutex);
571 ret = smu_send_hbm_bad_pages_num(smu, size);
572 mutex_unlock(&adev->pm.mutex);
577 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size)
579 struct smu_context *smu = adev->powerplay.pp_handle;
582 if (!is_support_sw_smu(adev))
585 mutex_lock(&adev->pm.mutex);
586 ret = smu_send_hbm_bad_channel_flag(smu, size);
587 mutex_unlock(&adev->pm.mutex);
592 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
593 enum pp_clock_type type,
602 if (!is_support_sw_smu(adev))
605 mutex_lock(&adev->pm.mutex);
606 ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
610 mutex_unlock(&adev->pm.mutex);
615 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
616 enum pp_clock_type type,
620 struct smu_context *smu = adev->powerplay.pp_handle;
626 if (!is_support_sw_smu(adev))
629 mutex_lock(&adev->pm.mutex);
630 ret = smu_set_soft_freq_range(smu,
634 mutex_unlock(&adev->pm.mutex);
639 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
641 struct smu_context *smu = adev->powerplay.pp_handle;
644 if (!is_support_sw_smu(adev))
647 mutex_lock(&adev->pm.mutex);
648 ret = smu_write_watermarks_table(smu);
649 mutex_unlock(&adev->pm.mutex);
654 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
655 enum smu_event_type event,
658 struct smu_context *smu = adev->powerplay.pp_handle;
661 if (!is_support_sw_smu(adev))
664 mutex_lock(&adev->pm.mutex);
665 ret = smu_wait_for_event(smu, event, event_arg);
666 mutex_unlock(&adev->pm.mutex);
671 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
673 struct smu_context *smu = adev->powerplay.pp_handle;
676 if (!is_support_sw_smu(adev))
679 mutex_lock(&adev->pm.mutex);
680 ret = smu_get_status_gfxoff(smu, value);
681 mutex_unlock(&adev->pm.mutex);
686 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
688 struct smu_context *smu = adev->powerplay.pp_handle;
690 if (!is_support_sw_smu(adev))
693 return atomic64_read(&smu->throttle_int_counter);
696 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
697 * @adev: amdgpu_device pointer
698 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
701 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
702 enum gfx_change_state state)
704 mutex_lock(&adev->pm.mutex);
705 if (adev->powerplay.pp_funcs &&
706 adev->powerplay.pp_funcs->gfx_state_change_set)
707 ((adev)->powerplay.pp_funcs->gfx_state_change_set(
708 (adev)->powerplay.pp_handle, state));
709 mutex_unlock(&adev->pm.mutex);
712 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
715 struct smu_context *smu = adev->powerplay.pp_handle;
718 if (!is_support_sw_smu(adev))
721 mutex_lock(&adev->pm.mutex);
722 ret = smu_get_ecc_info(smu, umc_ecc);
723 mutex_unlock(&adev->pm.mutex);
728 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
731 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
732 struct amd_vce_state *vstate = NULL;
734 if (!pp_funcs->get_vce_clock_state)
737 mutex_lock(&adev->pm.mutex);
738 vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
740 mutex_unlock(&adev->pm.mutex);
745 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
746 enum amd_pm_state_type *state)
748 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
750 mutex_lock(&adev->pm.mutex);
752 if (!pp_funcs->get_current_power_state) {
753 *state = adev->pm.dpm.user_state;
757 *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
758 if (*state < POWER_STATE_TYPE_DEFAULT ||
759 *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
760 *state = adev->pm.dpm.user_state;
763 mutex_unlock(&adev->pm.mutex);
766 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
767 enum amd_pm_state_type state)
769 mutex_lock(&adev->pm.mutex);
770 adev->pm.dpm.user_state = state;
771 mutex_unlock(&adev->pm.mutex);
773 if (is_support_sw_smu(adev))
776 if (amdgpu_dpm_dispatch_task(adev,
777 AMD_PP_TASK_ENABLE_USER_STATE,
778 &state) == -EOPNOTSUPP)
779 amdgpu_dpm_compute_clocks(adev);
782 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
784 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
785 enum amd_dpm_forced_level level;
788 return AMD_DPM_FORCED_LEVEL_AUTO;
790 mutex_lock(&adev->pm.mutex);
791 if (pp_funcs->get_performance_level)
792 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
794 level = adev->pm.dpm.forced_level;
795 mutex_unlock(&adev->pm.mutex);
800 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
801 enum amd_dpm_forced_level level)
803 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
804 enum amd_dpm_forced_level current_level;
805 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
806 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
807 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
808 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
810 if (!pp_funcs || !pp_funcs->force_performance_level)
813 if (adev->pm.dpm.thermal_active)
816 current_level = amdgpu_dpm_get_performance_level(adev);
817 if (current_level == level)
820 if (adev->asic_type == CHIP_RAVEN) {
821 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
822 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
823 level == AMD_DPM_FORCED_LEVEL_MANUAL)
824 amdgpu_gfx_off_ctrl(adev, false);
825 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
826 level != AMD_DPM_FORCED_LEVEL_MANUAL)
827 amdgpu_gfx_off_ctrl(adev, true);
831 if (!(current_level & profile_mode_mask) &&
832 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
835 if (!(current_level & profile_mode_mask) &&
836 (level & profile_mode_mask)) {
837 /* enter UMD Pstate */
838 amdgpu_device_ip_set_powergating_state(adev,
839 AMD_IP_BLOCK_TYPE_GFX,
840 AMD_PG_STATE_UNGATE);
841 amdgpu_device_ip_set_clockgating_state(adev,
842 AMD_IP_BLOCK_TYPE_GFX,
843 AMD_CG_STATE_UNGATE);
844 } else if ((current_level & profile_mode_mask) &&
845 !(level & profile_mode_mask)) {
846 /* exit UMD Pstate */
847 amdgpu_device_ip_set_clockgating_state(adev,
848 AMD_IP_BLOCK_TYPE_GFX,
850 amdgpu_device_ip_set_powergating_state(adev,
851 AMD_IP_BLOCK_TYPE_GFX,
855 mutex_lock(&adev->pm.mutex);
857 if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
859 mutex_unlock(&adev->pm.mutex);
863 adev->pm.dpm.forced_level = level;
865 mutex_unlock(&adev->pm.mutex);
870 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
871 struct pp_states_info *states)
873 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
876 if (!pp_funcs->get_pp_num_states)
879 mutex_lock(&adev->pm.mutex);
880 ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
882 mutex_unlock(&adev->pm.mutex);
887 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
888 enum amd_pp_task task_id,
889 enum amd_pm_state_type *user_state)
891 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
894 if (!pp_funcs->dispatch_tasks)
897 mutex_lock(&adev->pm.mutex);
898 ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
901 mutex_unlock(&adev->pm.mutex);
906 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
908 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
911 if (!pp_funcs->get_pp_table)
914 mutex_lock(&adev->pm.mutex);
915 ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
917 mutex_unlock(&adev->pm.mutex);
922 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
927 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
930 if (!pp_funcs->set_fine_grain_clk_vol)
933 mutex_lock(&adev->pm.mutex);
934 ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
938 mutex_unlock(&adev->pm.mutex);
943 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
948 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
951 if (!pp_funcs->odn_edit_dpm_table)
954 mutex_lock(&adev->pm.mutex);
955 ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
959 mutex_unlock(&adev->pm.mutex);
964 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
965 enum pp_clock_type type,
968 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
971 if (!pp_funcs->print_clock_levels)
974 mutex_lock(&adev->pm.mutex);
975 ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
978 mutex_unlock(&adev->pm.mutex);
983 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
984 enum pp_clock_type type,
988 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
991 if (!pp_funcs->emit_clock_levels)
994 mutex_lock(&adev->pm.mutex);
995 ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
999 mutex_unlock(&adev->pm.mutex);
1004 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
1005 uint64_t ppfeature_masks)
1007 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1010 if (!pp_funcs->set_ppfeature_status)
1013 mutex_lock(&adev->pm.mutex);
1014 ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
1016 mutex_unlock(&adev->pm.mutex);
1021 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
1023 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1026 if (!pp_funcs->get_ppfeature_status)
1029 mutex_lock(&adev->pm.mutex);
1030 ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
1032 mutex_unlock(&adev->pm.mutex);
1037 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
1038 enum pp_clock_type type,
1041 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1044 if (!pp_funcs->force_clock_level)
1047 mutex_lock(&adev->pm.mutex);
1048 ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
1051 mutex_unlock(&adev->pm.mutex);
1056 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
1058 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1061 if (!pp_funcs->get_sclk_od)
1064 mutex_lock(&adev->pm.mutex);
1065 ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
1066 mutex_unlock(&adev->pm.mutex);
1071 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
1073 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1075 if (is_support_sw_smu(adev))
1078 mutex_lock(&adev->pm.mutex);
1079 if (pp_funcs->set_sclk_od)
1080 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
1081 mutex_unlock(&adev->pm.mutex);
1083 if (amdgpu_dpm_dispatch_task(adev,
1084 AMD_PP_TASK_READJUST_POWER_STATE,
1085 NULL) == -EOPNOTSUPP) {
1086 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1087 amdgpu_dpm_compute_clocks(adev);
1093 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
1095 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1098 if (!pp_funcs->get_mclk_od)
1101 mutex_lock(&adev->pm.mutex);
1102 ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
1103 mutex_unlock(&adev->pm.mutex);
1108 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
1110 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1112 if (is_support_sw_smu(adev))
1115 mutex_lock(&adev->pm.mutex);
1116 if (pp_funcs->set_mclk_od)
1117 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
1118 mutex_unlock(&adev->pm.mutex);
1120 if (amdgpu_dpm_dispatch_task(adev,
1121 AMD_PP_TASK_READJUST_POWER_STATE,
1122 NULL) == -EOPNOTSUPP) {
1123 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1124 amdgpu_dpm_compute_clocks(adev);
1130 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
1133 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1136 if (!pp_funcs->get_power_profile_mode)
1139 mutex_lock(&adev->pm.mutex);
1140 ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
1142 mutex_unlock(&adev->pm.mutex);
1147 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
1148 long *input, uint32_t size)
1150 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1153 if (!pp_funcs->set_power_profile_mode)
1156 mutex_lock(&adev->pm.mutex);
1157 ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
1160 mutex_unlock(&adev->pm.mutex);
1165 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
1167 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1170 if (!pp_funcs->get_gpu_metrics)
1173 mutex_lock(&adev->pm.mutex);
1174 ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
1176 mutex_unlock(&adev->pm.mutex);
1181 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
1184 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1187 if (!pp_funcs->get_fan_control_mode)
1190 mutex_lock(&adev->pm.mutex);
1191 ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
1193 mutex_unlock(&adev->pm.mutex);
1198 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
1201 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1204 if (!pp_funcs->set_fan_speed_pwm)
1207 mutex_lock(&adev->pm.mutex);
1208 ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
1210 mutex_unlock(&adev->pm.mutex);
1215 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
1218 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1221 if (!pp_funcs->get_fan_speed_pwm)
1224 mutex_lock(&adev->pm.mutex);
1225 ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
1227 mutex_unlock(&adev->pm.mutex);
1232 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
1235 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1238 if (!pp_funcs->get_fan_speed_rpm)
1241 mutex_lock(&adev->pm.mutex);
1242 ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
1244 mutex_unlock(&adev->pm.mutex);
1249 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
1252 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1255 if (!pp_funcs->set_fan_speed_rpm)
1258 mutex_lock(&adev->pm.mutex);
1259 ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
1261 mutex_unlock(&adev->pm.mutex);
1266 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
1269 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1272 if (!pp_funcs->set_fan_control_mode)
1275 mutex_lock(&adev->pm.mutex);
1276 ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
1278 mutex_unlock(&adev->pm.mutex);
1283 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
1285 enum pp_power_limit_level pp_limit_level,
1286 enum pp_power_type power_type)
1288 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1291 if (!pp_funcs->get_power_limit)
1294 mutex_lock(&adev->pm.mutex);
1295 ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
1299 mutex_unlock(&adev->pm.mutex);
1304 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
1307 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1310 if (!pp_funcs->set_power_limit)
1313 mutex_lock(&adev->pm.mutex);
1314 ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
1316 mutex_unlock(&adev->pm.mutex);
1321 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
1323 bool cclk_dpm_supported = false;
1325 if (!is_support_sw_smu(adev))
1328 mutex_lock(&adev->pm.mutex);
1329 cclk_dpm_supported = is_support_cclk_dpm(adev);
1330 mutex_unlock(&adev->pm.mutex);
1332 return (int)cclk_dpm_supported;
1335 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
1338 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1340 if (!pp_funcs->debugfs_print_current_performance_level)
1343 mutex_lock(&adev->pm.mutex);
1344 pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
1346 mutex_unlock(&adev->pm.mutex);
1351 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
1355 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1358 if (!pp_funcs->get_smu_prv_buf_details)
1361 mutex_lock(&adev->pm.mutex);
1362 ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
1365 mutex_unlock(&adev->pm.mutex);
1370 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
1372 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
1373 struct smu_context *smu = adev->powerplay.pp_handle;
1375 if ((is_support_sw_smu(adev) && smu->od_enabled) ||
1376 (is_support_sw_smu(adev) && smu->is_apu) ||
1377 (!is_support_sw_smu(adev) && hwmgr->od_enabled))
1383 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
1387 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1390 if (!pp_funcs->set_pp_table)
1393 mutex_lock(&adev->pm.mutex);
1394 ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
1397 mutex_unlock(&adev->pm.mutex);
1402 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
1404 struct smu_context *smu = adev->powerplay.pp_handle;
1406 if (!is_support_sw_smu(adev))
1409 return smu->cpu_core_num;
1412 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
1414 if (!is_support_sw_smu(adev))
1417 amdgpu_smu_stb_debug_fs_init(adev);
1420 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
1421 const struct amd_pp_display_configuration *input)
1423 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1426 if (!pp_funcs->display_configuration_change)
1429 mutex_lock(&adev->pm.mutex);
1430 ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
1432 mutex_unlock(&adev->pm.mutex);
1437 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
1438 enum amd_pp_clock_type type,
1439 struct amd_pp_clocks *clocks)
1441 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1444 if (!pp_funcs->get_clock_by_type)
1447 mutex_lock(&adev->pm.mutex);
1448 ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
1451 mutex_unlock(&adev->pm.mutex);
1456 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
1457 struct amd_pp_simple_clock_info *clocks)
1459 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1462 if (!pp_funcs->get_display_mode_validation_clocks)
1465 mutex_lock(&adev->pm.mutex);
1466 ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
1468 mutex_unlock(&adev->pm.mutex);
1473 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
1474 enum amd_pp_clock_type type,
1475 struct pp_clock_levels_with_latency *clocks)
1477 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1480 if (!pp_funcs->get_clock_by_type_with_latency)
1483 mutex_lock(&adev->pm.mutex);
1484 ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
1487 mutex_unlock(&adev->pm.mutex);
1492 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
1493 enum amd_pp_clock_type type,
1494 struct pp_clock_levels_with_voltage *clocks)
1496 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1499 if (!pp_funcs->get_clock_by_type_with_voltage)
1502 mutex_lock(&adev->pm.mutex);
1503 ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
1506 mutex_unlock(&adev->pm.mutex);
1511 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
1514 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1517 if (!pp_funcs->set_watermarks_for_clocks_ranges)
1520 mutex_lock(&adev->pm.mutex);
1521 ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
1523 mutex_unlock(&adev->pm.mutex);
1528 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
1529 struct pp_display_clock_request *clock)
1531 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1534 if (!pp_funcs->display_clock_voltage_request)
1537 mutex_lock(&adev->pm.mutex);
1538 ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
1540 mutex_unlock(&adev->pm.mutex);
1545 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
1546 struct amd_pp_clock_info *clocks)
1548 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1551 if (!pp_funcs->get_current_clocks)
1554 mutex_lock(&adev->pm.mutex);
1555 ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
1557 mutex_unlock(&adev->pm.mutex);
1562 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
1564 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1566 if (!pp_funcs->notify_smu_enable_pwe)
1569 mutex_lock(&adev->pm.mutex);
1570 pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
1571 mutex_unlock(&adev->pm.mutex);
1574 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
1577 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1580 if (!pp_funcs->set_active_display_count)
1583 mutex_lock(&adev->pm.mutex);
1584 ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
1586 mutex_unlock(&adev->pm.mutex);
1591 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
1594 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1597 if (!pp_funcs->set_min_deep_sleep_dcefclk)
1600 mutex_lock(&adev->pm.mutex);
1601 ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
1603 mutex_unlock(&adev->pm.mutex);
1608 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
1611 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1613 if (!pp_funcs->set_hard_min_dcefclk_by_freq)
1616 mutex_lock(&adev->pm.mutex);
1617 pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
1619 mutex_unlock(&adev->pm.mutex);
1622 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
1625 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1627 if (!pp_funcs->set_hard_min_fclk_by_freq)
1630 mutex_lock(&adev->pm.mutex);
1631 pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
1633 mutex_unlock(&adev->pm.mutex);
1636 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
1637 bool disable_memory_clock_switch)
1639 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1642 if (!pp_funcs->display_disable_memory_clock_switch)
1645 mutex_lock(&adev->pm.mutex);
1646 ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
1647 disable_memory_clock_switch);
1648 mutex_unlock(&adev->pm.mutex);
1653 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
1654 struct pp_smu_nv_clock_table *max_clocks)
1656 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1659 if (!pp_funcs->get_max_sustainable_clocks_by_dc)
1662 mutex_lock(&adev->pm.mutex);
1663 ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
1665 mutex_unlock(&adev->pm.mutex);
1670 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
1671 unsigned int *clock_values_in_khz,
1672 unsigned int *num_states)
1674 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1677 if (!pp_funcs->get_uclk_dpm_states)
1680 mutex_lock(&adev->pm.mutex);
1681 ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
1682 clock_values_in_khz,
1684 mutex_unlock(&adev->pm.mutex);
1689 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
1690 struct dpm_clocks *clock_table)
1692 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1695 if (!pp_funcs->get_dpm_clock_table)
1698 mutex_lock(&adev->pm.mutex);
1699 ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
1701 mutex_unlock(&adev->pm.mutex);