2 * Copyright 2011 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_i2c.h"
28 #include "amdgpu_dpm.h"
31 #include "amdgpu_display.h"
33 #include <linux/power_supply.h>
34 #include "amdgpu_smu.h"
36 #define amdgpu_dpm_enable_bapm(adev, e) \
37 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
39 #define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev))
41 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
43 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
46 if (!pp_funcs->get_sclk)
49 mutex_lock(&adev->pm.mutex);
50 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
52 mutex_unlock(&adev->pm.mutex);
57 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
59 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
62 if (!pp_funcs->get_mclk)
65 mutex_lock(&adev->pm.mutex);
66 ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
68 mutex_unlock(&adev->pm.mutex);
73 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
76 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
77 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
79 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
80 dev_dbg(adev->dev, "IP block%d already in the target %s state!",
81 block_type, gate ? "gate" : "ungate");
85 mutex_lock(&adev->pm.mutex);
88 case AMD_IP_BLOCK_TYPE_UVD:
89 case AMD_IP_BLOCK_TYPE_VCE:
90 case AMD_IP_BLOCK_TYPE_GFX:
91 case AMD_IP_BLOCK_TYPE_VCN:
92 case AMD_IP_BLOCK_TYPE_SDMA:
93 case AMD_IP_BLOCK_TYPE_JPEG:
94 case AMD_IP_BLOCK_TYPE_GMC:
95 case AMD_IP_BLOCK_TYPE_ACP:
96 if (pp_funcs && pp_funcs->set_powergating_by_smu)
97 ret = (pp_funcs->set_powergating_by_smu(
98 (adev)->powerplay.pp_handle, block_type, gate));
105 atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
107 mutex_unlock(&adev->pm.mutex);
112 int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev)
114 struct smu_context *smu = adev->powerplay.pp_handle;
115 int ret = -EOPNOTSUPP;
117 mutex_lock(&adev->pm.mutex);
118 ret = smu_set_gfx_power_up_by_imu(smu);
119 mutex_unlock(&adev->pm.mutex);
126 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
128 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
129 void *pp_handle = adev->powerplay.pp_handle;
132 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
135 mutex_lock(&adev->pm.mutex);
137 /* enter BACO state */
138 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
140 mutex_unlock(&adev->pm.mutex);
145 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
147 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
148 void *pp_handle = adev->powerplay.pp_handle;
151 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
154 mutex_lock(&adev->pm.mutex);
156 /* exit BACO state */
157 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
159 mutex_unlock(&adev->pm.mutex);
164 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
165 enum pp_mp1_state mp1_state)
168 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
170 if (pp_funcs && pp_funcs->set_mp1_state) {
171 mutex_lock(&adev->pm.mutex);
173 ret = pp_funcs->set_mp1_state(
174 adev->powerplay.pp_handle,
177 mutex_unlock(&adev->pm.mutex);
183 bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
185 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
186 void *pp_handle = adev->powerplay.pp_handle;
190 if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
192 /* Don't use baco for reset in S3.
193 * This is a workaround for some platforms
194 * where entering BACO during suspend
195 * seems to cause reboots or hangs.
196 * This might be related to the fact that BACO controls
197 * power to the whole GPU including devices like audio and USB.
198 * Powering down/up everything may adversely affect these other
199 * devices. Needs more investigation.
204 mutex_lock(&adev->pm.mutex);
206 ret = pp_funcs->get_asic_baco_capability(pp_handle,
209 mutex_unlock(&adev->pm.mutex);
211 return ret ? false : baco_cap;
214 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
216 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
217 void *pp_handle = adev->powerplay.pp_handle;
220 if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
223 mutex_lock(&adev->pm.mutex);
225 ret = pp_funcs->asic_reset_mode_2(pp_handle);
227 mutex_unlock(&adev->pm.mutex);
232 int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev)
234 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
235 void *pp_handle = adev->powerplay.pp_handle;
238 if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features)
241 mutex_lock(&adev->pm.mutex);
243 ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle);
245 mutex_unlock(&adev->pm.mutex);
250 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
252 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
253 void *pp_handle = adev->powerplay.pp_handle;
256 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
259 mutex_lock(&adev->pm.mutex);
261 /* enter BACO state */
262 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
266 /* exit BACO state */
267 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
270 mutex_unlock(&adev->pm.mutex);
274 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
276 struct smu_context *smu = adev->powerplay.pp_handle;
277 bool support_mode1_reset = false;
279 if (is_support_sw_smu(adev)) {
280 mutex_lock(&adev->pm.mutex);
281 support_mode1_reset = smu_mode1_reset_is_support(smu);
282 mutex_unlock(&adev->pm.mutex);
285 return support_mode1_reset;
288 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
290 struct smu_context *smu = adev->powerplay.pp_handle;
291 int ret = -EOPNOTSUPP;
293 if (is_support_sw_smu(adev)) {
294 mutex_lock(&adev->pm.mutex);
295 ret = smu_mode1_reset(smu);
296 mutex_unlock(&adev->pm.mutex);
302 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
303 enum PP_SMC_POWER_PROFILE type,
306 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
309 if (amdgpu_sriov_vf(adev))
312 if (pp_funcs && pp_funcs->switch_power_profile) {
313 mutex_lock(&adev->pm.mutex);
314 ret = pp_funcs->switch_power_profile(
315 adev->powerplay.pp_handle, type, en);
316 mutex_unlock(&adev->pm.mutex);
322 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
325 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
328 if (pp_funcs && pp_funcs->set_xgmi_pstate) {
329 mutex_lock(&adev->pm.mutex);
330 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
332 mutex_unlock(&adev->pm.mutex);
338 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
342 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
343 void *pp_handle = adev->powerplay.pp_handle;
345 if (pp_funcs && pp_funcs->set_df_cstate) {
346 mutex_lock(&adev->pm.mutex);
347 ret = pp_funcs->set_df_cstate(pp_handle, cstate);
348 mutex_unlock(&adev->pm.mutex);
354 int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
356 struct smu_context *smu = adev->powerplay.pp_handle;
359 if (is_support_sw_smu(adev)) {
360 mutex_lock(&adev->pm.mutex);
361 ret = smu_allow_xgmi_power_down(smu, en);
362 mutex_unlock(&adev->pm.mutex);
368 int amdgpu_dpm_get_xgmi_plpd_mode(struct amdgpu_device *adev, char **mode_desc)
370 struct smu_context *smu = adev->powerplay.pp_handle;
371 int mode = XGMI_PLPD_NONE;
373 if (is_support_sw_smu(adev)) {
374 mode = smu->plpd_mode;
375 if (mode_desc == NULL)
377 switch (smu->plpd_mode) {
378 case XGMI_PLPD_DISALLOW:
379 *mode_desc = "disallow";
381 case XGMI_PLPD_DEFAULT:
382 *mode_desc = "default";
384 case XGMI_PLPD_OPTIMIZED:
385 *mode_desc = "optimized";
397 int amdgpu_dpm_set_xgmi_plpd_mode(struct amdgpu_device *adev, int mode)
399 struct smu_context *smu = adev->powerplay.pp_handle;
400 int ret = -EOPNOTSUPP;
402 if (is_support_sw_smu(adev)) {
403 mutex_lock(&adev->pm.mutex);
404 ret = smu_set_xgmi_plpd_mode(smu, mode);
405 mutex_unlock(&adev->pm.mutex);
411 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
413 void *pp_handle = adev->powerplay.pp_handle;
414 const struct amd_pm_funcs *pp_funcs =
415 adev->powerplay.pp_funcs;
418 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
419 mutex_lock(&adev->pm.mutex);
420 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
421 mutex_unlock(&adev->pm.mutex);
427 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
430 void *pp_handle = adev->powerplay.pp_handle;
431 const struct amd_pm_funcs *pp_funcs =
432 adev->powerplay.pp_funcs;
435 if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
436 mutex_lock(&adev->pm.mutex);
437 ret = pp_funcs->set_clockgating_by_smu(pp_handle,
439 mutex_unlock(&adev->pm.mutex);
445 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
448 void *pp_handle = adev->powerplay.pp_handle;
449 const struct amd_pm_funcs *pp_funcs =
450 adev->powerplay.pp_funcs;
451 int ret = -EOPNOTSUPP;
453 if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
454 mutex_lock(&adev->pm.mutex);
455 ret = pp_funcs->smu_i2c_bus_access(pp_handle,
457 mutex_unlock(&adev->pm.mutex);
463 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
465 if (adev->pm.dpm_enabled) {
466 mutex_lock(&adev->pm.mutex);
467 if (power_supply_is_system_supplied() > 0)
468 adev->pm.ac_power = true;
470 adev->pm.ac_power = false;
472 if (adev->powerplay.pp_funcs &&
473 adev->powerplay.pp_funcs->enable_bapm)
474 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
476 if (is_support_sw_smu(adev))
477 smu_set_ac_dc(adev->powerplay.pp_handle);
479 mutex_unlock(&adev->pm.mutex);
483 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
484 void *data, uint32_t *size)
486 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
492 if (pp_funcs && pp_funcs->read_sensor) {
493 mutex_lock(&adev->pm.mutex);
494 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
498 mutex_unlock(&adev->pm.mutex);
504 int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit)
506 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
509 if (pp_funcs && pp_funcs->get_apu_thermal_limit) {
510 mutex_lock(&adev->pm.mutex);
511 ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit);
512 mutex_unlock(&adev->pm.mutex);
518 int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit)
520 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
523 if (pp_funcs && pp_funcs->set_apu_thermal_limit) {
524 mutex_lock(&adev->pm.mutex);
525 ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit);
526 mutex_unlock(&adev->pm.mutex);
532 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
534 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
537 if (!adev->pm.dpm_enabled)
540 if (!pp_funcs->pm_compute_clocks)
543 if (adev->mode_info.num_crtc)
544 amdgpu_display_bandwidth_update(adev);
546 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
547 struct amdgpu_ring *ring = adev->rings[i];
548 if (ring && ring->sched.ready)
549 amdgpu_fence_wait_empty(ring);
552 mutex_lock(&adev->pm.mutex);
553 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
554 mutex_unlock(&adev->pm.mutex);
557 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
561 if (adev->family == AMDGPU_FAMILY_SI) {
562 mutex_lock(&adev->pm.mutex);
564 adev->pm.dpm.uvd_active = true;
565 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
567 adev->pm.dpm.uvd_active = false;
569 mutex_unlock(&adev->pm.mutex);
571 amdgpu_dpm_compute_clocks(adev);
575 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
577 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
578 enable ? "enable" : "disable", ret);
581 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
585 if (adev->family == AMDGPU_FAMILY_SI) {
586 mutex_lock(&adev->pm.mutex);
588 adev->pm.dpm.vce_active = true;
589 /* XXX select vce level based on ring/task */
590 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
592 adev->pm.dpm.vce_active = false;
594 mutex_unlock(&adev->pm.mutex);
596 amdgpu_dpm_compute_clocks(adev);
600 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
602 DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
603 enable ? "enable" : "disable", ret);
606 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
610 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
612 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
613 enable ? "enable" : "disable", ret);
616 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
618 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
621 if (!pp_funcs || !pp_funcs->load_firmware)
624 mutex_lock(&adev->pm.mutex);
625 r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
627 pr_err("smu firmware loading failed\n");
632 *smu_version = adev->pm.fw_version;
635 mutex_unlock(&adev->pm.mutex);
639 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
643 if (is_support_sw_smu(adev)) {
644 mutex_lock(&adev->pm.mutex);
645 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
647 mutex_unlock(&adev->pm.mutex);
653 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
655 struct smu_context *smu = adev->powerplay.pp_handle;
658 if (!is_support_sw_smu(adev))
661 mutex_lock(&adev->pm.mutex);
662 ret = smu_send_hbm_bad_pages_num(smu, size);
663 mutex_unlock(&adev->pm.mutex);
668 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size)
670 struct smu_context *smu = adev->powerplay.pp_handle;
673 if (!is_support_sw_smu(adev))
676 mutex_lock(&adev->pm.mutex);
677 ret = smu_send_hbm_bad_channel_flag(smu, size);
678 mutex_unlock(&adev->pm.mutex);
683 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
684 enum pp_clock_type type,
693 if (!is_support_sw_smu(adev))
696 mutex_lock(&adev->pm.mutex);
697 ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
701 mutex_unlock(&adev->pm.mutex);
706 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
707 enum pp_clock_type type,
711 struct smu_context *smu = adev->powerplay.pp_handle;
717 if (!is_support_sw_smu(adev))
720 mutex_lock(&adev->pm.mutex);
721 ret = smu_set_soft_freq_range(smu,
725 mutex_unlock(&adev->pm.mutex);
730 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
732 struct smu_context *smu = adev->powerplay.pp_handle;
735 if (!is_support_sw_smu(adev))
738 mutex_lock(&adev->pm.mutex);
739 ret = smu_write_watermarks_table(smu);
740 mutex_unlock(&adev->pm.mutex);
745 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
746 enum smu_event_type event,
749 struct smu_context *smu = adev->powerplay.pp_handle;
752 if (!is_support_sw_smu(adev))
755 mutex_lock(&adev->pm.mutex);
756 ret = smu_wait_for_event(smu, event, event_arg);
757 mutex_unlock(&adev->pm.mutex);
762 int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value)
764 struct smu_context *smu = adev->powerplay.pp_handle;
767 if (!is_support_sw_smu(adev))
770 mutex_lock(&adev->pm.mutex);
771 ret = smu_set_residency_gfxoff(smu, value);
772 mutex_unlock(&adev->pm.mutex);
777 int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value)
779 struct smu_context *smu = adev->powerplay.pp_handle;
782 if (!is_support_sw_smu(adev))
785 mutex_lock(&adev->pm.mutex);
786 ret = smu_get_residency_gfxoff(smu, value);
787 mutex_unlock(&adev->pm.mutex);
792 int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value)
794 struct smu_context *smu = adev->powerplay.pp_handle;
797 if (!is_support_sw_smu(adev))
800 mutex_lock(&adev->pm.mutex);
801 ret = smu_get_entrycount_gfxoff(smu, value);
802 mutex_unlock(&adev->pm.mutex);
807 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
809 struct smu_context *smu = adev->powerplay.pp_handle;
812 if (!is_support_sw_smu(adev))
815 mutex_lock(&adev->pm.mutex);
816 ret = smu_get_status_gfxoff(smu, value);
817 mutex_unlock(&adev->pm.mutex);
822 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
824 struct smu_context *smu = adev->powerplay.pp_handle;
826 if (!is_support_sw_smu(adev))
829 return atomic64_read(&smu->throttle_int_counter);
832 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
833 * @adev: amdgpu_device pointer
834 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
837 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
838 enum gfx_change_state state)
840 mutex_lock(&adev->pm.mutex);
841 if (adev->powerplay.pp_funcs &&
842 adev->powerplay.pp_funcs->gfx_state_change_set)
843 ((adev)->powerplay.pp_funcs->gfx_state_change_set(
844 (adev)->powerplay.pp_handle, state));
845 mutex_unlock(&adev->pm.mutex);
848 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
851 struct smu_context *smu = adev->powerplay.pp_handle;
854 if (!is_support_sw_smu(adev))
857 mutex_lock(&adev->pm.mutex);
858 ret = smu_get_ecc_info(smu, umc_ecc);
859 mutex_unlock(&adev->pm.mutex);
864 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
867 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
868 struct amd_vce_state *vstate = NULL;
870 if (!pp_funcs->get_vce_clock_state)
873 mutex_lock(&adev->pm.mutex);
874 vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
876 mutex_unlock(&adev->pm.mutex);
881 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
882 enum amd_pm_state_type *state)
884 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
886 mutex_lock(&adev->pm.mutex);
888 if (!pp_funcs->get_current_power_state) {
889 *state = adev->pm.dpm.user_state;
893 *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
894 if (*state < POWER_STATE_TYPE_DEFAULT ||
895 *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
896 *state = adev->pm.dpm.user_state;
899 mutex_unlock(&adev->pm.mutex);
902 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
903 enum amd_pm_state_type state)
905 mutex_lock(&adev->pm.mutex);
906 adev->pm.dpm.user_state = state;
907 mutex_unlock(&adev->pm.mutex);
909 if (is_support_sw_smu(adev))
912 if (amdgpu_dpm_dispatch_task(adev,
913 AMD_PP_TASK_ENABLE_USER_STATE,
914 &state) == -EOPNOTSUPP)
915 amdgpu_dpm_compute_clocks(adev);
918 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
920 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
921 enum amd_dpm_forced_level level;
924 return AMD_DPM_FORCED_LEVEL_AUTO;
926 mutex_lock(&adev->pm.mutex);
927 if (pp_funcs->get_performance_level)
928 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
930 level = adev->pm.dpm.forced_level;
931 mutex_unlock(&adev->pm.mutex);
936 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
937 enum amd_dpm_forced_level level)
939 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
940 enum amd_dpm_forced_level current_level;
941 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
942 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
943 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
944 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
946 if (!pp_funcs || !pp_funcs->force_performance_level)
949 if (adev->pm.dpm.thermal_active)
952 current_level = amdgpu_dpm_get_performance_level(adev);
953 if (current_level == level)
956 if (adev->asic_type == CHIP_RAVEN) {
957 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
958 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
959 level == AMD_DPM_FORCED_LEVEL_MANUAL)
960 amdgpu_gfx_off_ctrl(adev, false);
961 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
962 level != AMD_DPM_FORCED_LEVEL_MANUAL)
963 amdgpu_gfx_off_ctrl(adev, true);
967 if (!(current_level & profile_mode_mask) &&
968 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
971 if (!(current_level & profile_mode_mask) &&
972 (level & profile_mode_mask)) {
973 /* enter UMD Pstate */
974 amdgpu_device_ip_set_powergating_state(adev,
975 AMD_IP_BLOCK_TYPE_GFX,
976 AMD_PG_STATE_UNGATE);
977 amdgpu_device_ip_set_clockgating_state(adev,
978 AMD_IP_BLOCK_TYPE_GFX,
979 AMD_CG_STATE_UNGATE);
980 } else if ((current_level & profile_mode_mask) &&
981 !(level & profile_mode_mask)) {
982 /* exit UMD Pstate */
983 amdgpu_device_ip_set_clockgating_state(adev,
984 AMD_IP_BLOCK_TYPE_GFX,
986 amdgpu_device_ip_set_powergating_state(adev,
987 AMD_IP_BLOCK_TYPE_GFX,
991 mutex_lock(&adev->pm.mutex);
993 if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
995 mutex_unlock(&adev->pm.mutex);
999 adev->pm.dpm.forced_level = level;
1001 mutex_unlock(&adev->pm.mutex);
1006 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
1007 struct pp_states_info *states)
1009 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1012 if (!pp_funcs->get_pp_num_states)
1015 mutex_lock(&adev->pm.mutex);
1016 ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
1018 mutex_unlock(&adev->pm.mutex);
1023 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
1024 enum amd_pp_task task_id,
1025 enum amd_pm_state_type *user_state)
1027 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1030 if (!pp_funcs->dispatch_tasks)
1033 mutex_lock(&adev->pm.mutex);
1034 ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
1037 mutex_unlock(&adev->pm.mutex);
1042 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
1044 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1047 if (!pp_funcs->get_pp_table)
1050 mutex_lock(&adev->pm.mutex);
1051 ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
1053 mutex_unlock(&adev->pm.mutex);
1058 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
1063 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1066 if (!pp_funcs->set_fine_grain_clk_vol)
1069 mutex_lock(&adev->pm.mutex);
1070 ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
1074 mutex_unlock(&adev->pm.mutex);
1079 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
1084 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1087 if (!pp_funcs->odn_edit_dpm_table)
1090 mutex_lock(&adev->pm.mutex);
1091 ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
1095 mutex_unlock(&adev->pm.mutex);
1100 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
1101 enum pp_clock_type type,
1104 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1107 if (!pp_funcs->print_clock_levels)
1110 mutex_lock(&adev->pm.mutex);
1111 ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
1114 mutex_unlock(&adev->pm.mutex);
1119 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
1120 enum pp_clock_type type,
1124 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1127 if (!pp_funcs->emit_clock_levels)
1130 mutex_lock(&adev->pm.mutex);
1131 ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
1135 mutex_unlock(&adev->pm.mutex);
1140 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
1141 uint64_t ppfeature_masks)
1143 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1146 if (!pp_funcs->set_ppfeature_status)
1149 mutex_lock(&adev->pm.mutex);
1150 ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
1152 mutex_unlock(&adev->pm.mutex);
1157 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
1159 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1162 if (!pp_funcs->get_ppfeature_status)
1165 mutex_lock(&adev->pm.mutex);
1166 ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
1168 mutex_unlock(&adev->pm.mutex);
1173 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
1174 enum pp_clock_type type,
1177 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1180 if (!pp_funcs->force_clock_level)
1183 mutex_lock(&adev->pm.mutex);
1184 ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
1187 mutex_unlock(&adev->pm.mutex);
1192 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
1194 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1197 if (!pp_funcs->get_sclk_od)
1200 mutex_lock(&adev->pm.mutex);
1201 ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
1202 mutex_unlock(&adev->pm.mutex);
1207 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
1209 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1211 if (is_support_sw_smu(adev))
1214 mutex_lock(&adev->pm.mutex);
1215 if (pp_funcs->set_sclk_od)
1216 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
1217 mutex_unlock(&adev->pm.mutex);
1219 if (amdgpu_dpm_dispatch_task(adev,
1220 AMD_PP_TASK_READJUST_POWER_STATE,
1221 NULL) == -EOPNOTSUPP) {
1222 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1223 amdgpu_dpm_compute_clocks(adev);
1229 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
1231 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1234 if (!pp_funcs->get_mclk_od)
1237 mutex_lock(&adev->pm.mutex);
1238 ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
1239 mutex_unlock(&adev->pm.mutex);
1244 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
1246 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1248 if (is_support_sw_smu(adev))
1251 mutex_lock(&adev->pm.mutex);
1252 if (pp_funcs->set_mclk_od)
1253 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
1254 mutex_unlock(&adev->pm.mutex);
1256 if (amdgpu_dpm_dispatch_task(adev,
1257 AMD_PP_TASK_READJUST_POWER_STATE,
1258 NULL) == -EOPNOTSUPP) {
1259 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1260 amdgpu_dpm_compute_clocks(adev);
1266 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
1269 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1272 if (!pp_funcs->get_power_profile_mode)
1275 mutex_lock(&adev->pm.mutex);
1276 ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
1278 mutex_unlock(&adev->pm.mutex);
1283 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
1284 long *input, uint32_t size)
1286 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1289 if (!pp_funcs->set_power_profile_mode)
1292 mutex_lock(&adev->pm.mutex);
1293 ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
1296 mutex_unlock(&adev->pm.mutex);
1301 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
1303 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1306 if (!pp_funcs->get_gpu_metrics)
1309 mutex_lock(&adev->pm.mutex);
1310 ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
1312 mutex_unlock(&adev->pm.mutex);
1317 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
1320 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1323 if (!pp_funcs->get_fan_control_mode)
1326 mutex_lock(&adev->pm.mutex);
1327 ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
1329 mutex_unlock(&adev->pm.mutex);
1334 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
1337 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1340 if (!pp_funcs->set_fan_speed_pwm)
1343 mutex_lock(&adev->pm.mutex);
1344 ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
1346 mutex_unlock(&adev->pm.mutex);
1351 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
1354 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1357 if (!pp_funcs->get_fan_speed_pwm)
1360 mutex_lock(&adev->pm.mutex);
1361 ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
1363 mutex_unlock(&adev->pm.mutex);
1368 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
1371 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1374 if (!pp_funcs->get_fan_speed_rpm)
1377 mutex_lock(&adev->pm.mutex);
1378 ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
1380 mutex_unlock(&adev->pm.mutex);
1385 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
1388 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1391 if (!pp_funcs->set_fan_speed_rpm)
1394 mutex_lock(&adev->pm.mutex);
1395 ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
1397 mutex_unlock(&adev->pm.mutex);
1402 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
1405 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1408 if (!pp_funcs->set_fan_control_mode)
1411 mutex_lock(&adev->pm.mutex);
1412 ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
1414 mutex_unlock(&adev->pm.mutex);
1419 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
1421 enum pp_power_limit_level pp_limit_level,
1422 enum pp_power_type power_type)
1424 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1427 if (!pp_funcs->get_power_limit)
1430 mutex_lock(&adev->pm.mutex);
1431 ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
1435 mutex_unlock(&adev->pm.mutex);
1440 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
1443 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1446 if (!pp_funcs->set_power_limit)
1449 mutex_lock(&adev->pm.mutex);
1450 ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
1452 mutex_unlock(&adev->pm.mutex);
1457 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
1459 bool cclk_dpm_supported = false;
1461 if (!is_support_sw_smu(adev))
1464 mutex_lock(&adev->pm.mutex);
1465 cclk_dpm_supported = is_support_cclk_dpm(adev);
1466 mutex_unlock(&adev->pm.mutex);
1468 return (int)cclk_dpm_supported;
1471 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
1474 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1476 if (!pp_funcs->debugfs_print_current_performance_level)
1479 mutex_lock(&adev->pm.mutex);
1480 pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
1482 mutex_unlock(&adev->pm.mutex);
1487 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
1491 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1494 if (!pp_funcs->get_smu_prv_buf_details)
1497 mutex_lock(&adev->pm.mutex);
1498 ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
1501 mutex_unlock(&adev->pm.mutex);
1506 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
1508 if (is_support_sw_smu(adev)) {
1509 struct smu_context *smu = adev->powerplay.pp_handle;
1511 return (smu->od_enabled || smu->is_apu);
1513 struct pp_hwmgr *hwmgr;
1516 * dpm on some legacy asics don't carry od_enabled member
1517 * as its pp_handle is casted directly from adev.
1519 if (amdgpu_dpm_is_legacy_dpm(adev))
1522 hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
1524 return hwmgr->od_enabled;
1528 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
1532 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1535 if (!pp_funcs->set_pp_table)
1538 mutex_lock(&adev->pm.mutex);
1539 ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
1542 mutex_unlock(&adev->pm.mutex);
1547 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
1549 struct smu_context *smu = adev->powerplay.pp_handle;
1551 if (!is_support_sw_smu(adev))
1554 return smu->cpu_core_num;
1557 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
1559 if (!is_support_sw_smu(adev))
1562 amdgpu_smu_stb_debug_fs_init(adev);
1565 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
1566 const struct amd_pp_display_configuration *input)
1568 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1571 if (!pp_funcs->display_configuration_change)
1574 mutex_lock(&adev->pm.mutex);
1575 ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
1577 mutex_unlock(&adev->pm.mutex);
1582 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
1583 enum amd_pp_clock_type type,
1584 struct amd_pp_clocks *clocks)
1586 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1589 if (!pp_funcs->get_clock_by_type)
1592 mutex_lock(&adev->pm.mutex);
1593 ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
1596 mutex_unlock(&adev->pm.mutex);
1601 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
1602 struct amd_pp_simple_clock_info *clocks)
1604 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1607 if (!pp_funcs->get_display_mode_validation_clocks)
1610 mutex_lock(&adev->pm.mutex);
1611 ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
1613 mutex_unlock(&adev->pm.mutex);
1618 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
1619 enum amd_pp_clock_type type,
1620 struct pp_clock_levels_with_latency *clocks)
1622 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1625 if (!pp_funcs->get_clock_by_type_with_latency)
1628 mutex_lock(&adev->pm.mutex);
1629 ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
1632 mutex_unlock(&adev->pm.mutex);
1637 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
1638 enum amd_pp_clock_type type,
1639 struct pp_clock_levels_with_voltage *clocks)
1641 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1644 if (!pp_funcs->get_clock_by_type_with_voltage)
1647 mutex_lock(&adev->pm.mutex);
1648 ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
1651 mutex_unlock(&adev->pm.mutex);
1656 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
1659 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1662 if (!pp_funcs->set_watermarks_for_clocks_ranges)
1665 mutex_lock(&adev->pm.mutex);
1666 ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
1668 mutex_unlock(&adev->pm.mutex);
1673 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
1674 struct pp_display_clock_request *clock)
1676 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1679 if (!pp_funcs->display_clock_voltage_request)
1682 mutex_lock(&adev->pm.mutex);
1683 ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
1685 mutex_unlock(&adev->pm.mutex);
1690 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
1691 struct amd_pp_clock_info *clocks)
1693 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1696 if (!pp_funcs->get_current_clocks)
1699 mutex_lock(&adev->pm.mutex);
1700 ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
1702 mutex_unlock(&adev->pm.mutex);
1707 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
1709 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1711 if (!pp_funcs->notify_smu_enable_pwe)
1714 mutex_lock(&adev->pm.mutex);
1715 pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
1716 mutex_unlock(&adev->pm.mutex);
1719 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
1722 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1725 if (!pp_funcs->set_active_display_count)
1728 mutex_lock(&adev->pm.mutex);
1729 ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
1731 mutex_unlock(&adev->pm.mutex);
1736 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
1739 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1742 if (!pp_funcs->set_min_deep_sleep_dcefclk)
1745 mutex_lock(&adev->pm.mutex);
1746 ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
1748 mutex_unlock(&adev->pm.mutex);
1753 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
1756 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1758 if (!pp_funcs->set_hard_min_dcefclk_by_freq)
1761 mutex_lock(&adev->pm.mutex);
1762 pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
1764 mutex_unlock(&adev->pm.mutex);
1767 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
1770 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1772 if (!pp_funcs->set_hard_min_fclk_by_freq)
1775 mutex_lock(&adev->pm.mutex);
1776 pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
1778 mutex_unlock(&adev->pm.mutex);
1781 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
1782 bool disable_memory_clock_switch)
1784 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1787 if (!pp_funcs->display_disable_memory_clock_switch)
1790 mutex_lock(&adev->pm.mutex);
1791 ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
1792 disable_memory_clock_switch);
1793 mutex_unlock(&adev->pm.mutex);
1798 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
1799 struct pp_smu_nv_clock_table *max_clocks)
1801 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1804 if (!pp_funcs->get_max_sustainable_clocks_by_dc)
1807 mutex_lock(&adev->pm.mutex);
1808 ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
1810 mutex_unlock(&adev->pm.mutex);
1815 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
1816 unsigned int *clock_values_in_khz,
1817 unsigned int *num_states)
1819 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1822 if (!pp_funcs->get_uclk_dpm_states)
1825 mutex_lock(&adev->pm.mutex);
1826 ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
1827 clock_values_in_khz,
1829 mutex_unlock(&adev->pm.mutex);
1834 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
1835 struct dpm_clocks *clock_table)
1837 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1840 if (!pp_funcs->get_dpm_clock_table)
1843 mutex_lock(&adev->pm.mutex);
1844 ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
1846 mutex_unlock(&adev->pm.mutex);