drm/amd/pm: Hide irrelevant pm device attributes
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / pm / amdgpu_dpm.c
1 /*
2  * Copyright 2011 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24
25 #include "amdgpu.h"
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_i2c.h"
28 #include "amdgpu_dpm.h"
29 #include "atom.h"
30 #include "amd_pcie.h"
31 #include "amdgpu_display.h"
32 #include "hwmgr.h"
33 #include <linux/power_supply.h>
34 #include "amdgpu_smu.h"
35
36 #define amdgpu_dpm_enable_bapm(adev, e) \
37                 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
38
39 #define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev))
40
41 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
42 {
43         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
44         int ret = 0;
45
46         if (!pp_funcs->get_sclk)
47                 return 0;
48
49         mutex_lock(&adev->pm.mutex);
50         ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
51                                  low);
52         mutex_unlock(&adev->pm.mutex);
53
54         return ret;
55 }
56
57 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
58 {
59         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
60         int ret = 0;
61
62         if (!pp_funcs->get_mclk)
63                 return 0;
64
65         mutex_lock(&adev->pm.mutex);
66         ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
67                                  low);
68         mutex_unlock(&adev->pm.mutex);
69
70         return ret;
71 }
72
73 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
74 {
75         int ret = 0;
76         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
77         enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
78
79         if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
80                 dev_dbg(adev->dev, "IP block%d already in the target %s state!",
81                                 block_type, gate ? "gate" : "ungate");
82                 return 0;
83         }
84
85         mutex_lock(&adev->pm.mutex);
86
87         switch (block_type) {
88         case AMD_IP_BLOCK_TYPE_UVD:
89         case AMD_IP_BLOCK_TYPE_VCE:
90         case AMD_IP_BLOCK_TYPE_GFX:
91         case AMD_IP_BLOCK_TYPE_VCN:
92         case AMD_IP_BLOCK_TYPE_SDMA:
93         case AMD_IP_BLOCK_TYPE_JPEG:
94         case AMD_IP_BLOCK_TYPE_GMC:
95         case AMD_IP_BLOCK_TYPE_ACP:
96         case AMD_IP_BLOCK_TYPE_VPE:
97                 if (pp_funcs && pp_funcs->set_powergating_by_smu)
98                         ret = (pp_funcs->set_powergating_by_smu(
99                                 (adev)->powerplay.pp_handle, block_type, gate));
100                 break;
101         default:
102                 break;
103         }
104
105         if (!ret)
106                 atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
107
108         mutex_unlock(&adev->pm.mutex);
109
110         return ret;
111 }
112
113 int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev)
114 {
115         struct smu_context *smu = adev->powerplay.pp_handle;
116         int ret = -EOPNOTSUPP;
117
118         mutex_lock(&adev->pm.mutex);
119         ret = smu_set_gfx_power_up_by_imu(smu);
120         mutex_unlock(&adev->pm.mutex);
121
122         msleep(10);
123
124         return ret;
125 }
126
127 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
128 {
129         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
130         void *pp_handle = adev->powerplay.pp_handle;
131         int ret = 0;
132
133         if (!pp_funcs || !pp_funcs->set_asic_baco_state)
134                 return -ENOENT;
135
136         mutex_lock(&adev->pm.mutex);
137
138         /* enter BACO state */
139         ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
140
141         mutex_unlock(&adev->pm.mutex);
142
143         return ret;
144 }
145
146 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
147 {
148         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
149         void *pp_handle = adev->powerplay.pp_handle;
150         int ret = 0;
151
152         if (!pp_funcs || !pp_funcs->set_asic_baco_state)
153                 return -ENOENT;
154
155         mutex_lock(&adev->pm.mutex);
156
157         /* exit BACO state */
158         ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
159
160         mutex_unlock(&adev->pm.mutex);
161
162         return ret;
163 }
164
165 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
166                              enum pp_mp1_state mp1_state)
167 {
168         int ret = 0;
169         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
170
171         if (pp_funcs && pp_funcs->set_mp1_state) {
172                 mutex_lock(&adev->pm.mutex);
173
174                 ret = pp_funcs->set_mp1_state(
175                                 adev->powerplay.pp_handle,
176                                 mp1_state);
177
178                 mutex_unlock(&adev->pm.mutex);
179         }
180
181         return ret;
182 }
183
184 bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
185 {
186         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
187         void *pp_handle = adev->powerplay.pp_handle;
188         bool baco_cap;
189         int ret = 0;
190
191         if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
192                 return false;
193         /* Don't use baco for reset in S3.
194          * This is a workaround for some platforms
195          * where entering BACO during suspend
196          * seems to cause reboots or hangs.
197          * This might be related to the fact that BACO controls
198          * power to the whole GPU including devices like audio and USB.
199          * Powering down/up everything may adversely affect these other
200          * devices.  Needs more investigation.
201          */
202         if (adev->in_s3)
203                 return false;
204
205         mutex_lock(&adev->pm.mutex);
206
207         ret = pp_funcs->get_asic_baco_capability(pp_handle,
208                                                  &baco_cap);
209
210         mutex_unlock(&adev->pm.mutex);
211
212         return ret ? false : baco_cap;
213 }
214
215 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
216 {
217         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
218         void *pp_handle = adev->powerplay.pp_handle;
219         int ret = 0;
220
221         if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
222                 return -ENOENT;
223
224         mutex_lock(&adev->pm.mutex);
225
226         ret = pp_funcs->asic_reset_mode_2(pp_handle);
227
228         mutex_unlock(&adev->pm.mutex);
229
230         return ret;
231 }
232
233 int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev)
234 {
235         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
236         void *pp_handle = adev->powerplay.pp_handle;
237         int ret = 0;
238
239         if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features)
240                 return -ENOENT;
241
242         mutex_lock(&adev->pm.mutex);
243
244         ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle);
245
246         mutex_unlock(&adev->pm.mutex);
247
248         return ret;
249 }
250
251 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
252 {
253         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
254         void *pp_handle = adev->powerplay.pp_handle;
255         int ret = 0;
256
257         if (!pp_funcs || !pp_funcs->set_asic_baco_state)
258                 return -ENOENT;
259
260         mutex_lock(&adev->pm.mutex);
261
262         /* enter BACO state */
263         ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
264         if (ret)
265                 goto out;
266
267         /* exit BACO state */
268         ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
269
270 out:
271         mutex_unlock(&adev->pm.mutex);
272         return ret;
273 }
274
275 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
276 {
277         struct smu_context *smu = adev->powerplay.pp_handle;
278         bool support_mode1_reset = false;
279
280         if (is_support_sw_smu(adev)) {
281                 mutex_lock(&adev->pm.mutex);
282                 support_mode1_reset = smu_mode1_reset_is_support(smu);
283                 mutex_unlock(&adev->pm.mutex);
284         }
285
286         return support_mode1_reset;
287 }
288
289 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
290 {
291         struct smu_context *smu = adev->powerplay.pp_handle;
292         int ret = -EOPNOTSUPP;
293
294         if (is_support_sw_smu(adev)) {
295                 mutex_lock(&adev->pm.mutex);
296                 ret = smu_mode1_reset(smu);
297                 mutex_unlock(&adev->pm.mutex);
298         }
299
300         return ret;
301 }
302
303 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
304                                     enum PP_SMC_POWER_PROFILE type,
305                                     bool en)
306 {
307         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
308         int ret = 0;
309
310         if (amdgpu_sriov_vf(adev))
311                 return 0;
312
313         if (pp_funcs && pp_funcs->switch_power_profile) {
314                 mutex_lock(&adev->pm.mutex);
315                 ret = pp_funcs->switch_power_profile(
316                         adev->powerplay.pp_handle, type, en);
317                 mutex_unlock(&adev->pm.mutex);
318         }
319
320         return ret;
321 }
322
323 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
324                                uint32_t pstate)
325 {
326         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
327         int ret = 0;
328
329         if (pp_funcs && pp_funcs->set_xgmi_pstate) {
330                 mutex_lock(&adev->pm.mutex);
331                 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
332                                                                 pstate);
333                 mutex_unlock(&adev->pm.mutex);
334         }
335
336         return ret;
337 }
338
339 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
340                              uint32_t cstate)
341 {
342         int ret = 0;
343         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
344         void *pp_handle = adev->powerplay.pp_handle;
345
346         if (pp_funcs && pp_funcs->set_df_cstate) {
347                 mutex_lock(&adev->pm.mutex);
348                 ret = pp_funcs->set_df_cstate(pp_handle, cstate);
349                 mutex_unlock(&adev->pm.mutex);
350         }
351
352         return ret;
353 }
354
355 int amdgpu_dpm_get_xgmi_plpd_mode(struct amdgpu_device *adev, char **mode_desc)
356 {
357         struct smu_context *smu = adev->powerplay.pp_handle;
358         int mode = XGMI_PLPD_NONE;
359
360         if (is_support_sw_smu(adev)) {
361                 mode = smu->plpd_mode;
362                 if (mode_desc == NULL)
363                         return mode;
364                 switch (smu->plpd_mode) {
365                 case XGMI_PLPD_DISALLOW:
366                         *mode_desc = "disallow";
367                         break;
368                 case XGMI_PLPD_DEFAULT:
369                         *mode_desc = "default";
370                         break;
371                 case XGMI_PLPD_OPTIMIZED:
372                         *mode_desc = "optimized";
373                         break;
374                 case XGMI_PLPD_NONE:
375                 default:
376                         *mode_desc = "none";
377                         break;
378                 }
379         }
380
381         return mode;
382 }
383
384 int amdgpu_dpm_set_xgmi_plpd_mode(struct amdgpu_device *adev, int mode)
385 {
386         struct smu_context *smu = adev->powerplay.pp_handle;
387         int ret = -EOPNOTSUPP;
388
389         if (is_support_sw_smu(adev)) {
390                 mutex_lock(&adev->pm.mutex);
391                 ret = smu_set_xgmi_plpd_mode(smu, mode);
392                 mutex_unlock(&adev->pm.mutex);
393         }
394
395         return ret;
396 }
397
398 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
399 {
400         void *pp_handle = adev->powerplay.pp_handle;
401         const struct amd_pm_funcs *pp_funcs =
402                         adev->powerplay.pp_funcs;
403         int ret = 0;
404
405         if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
406                 mutex_lock(&adev->pm.mutex);
407                 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
408                 mutex_unlock(&adev->pm.mutex);
409         }
410
411         return ret;
412 }
413
414 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
415                                       uint32_t msg_id)
416 {
417         void *pp_handle = adev->powerplay.pp_handle;
418         const struct amd_pm_funcs *pp_funcs =
419                         adev->powerplay.pp_funcs;
420         int ret = 0;
421
422         if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
423                 mutex_lock(&adev->pm.mutex);
424                 ret = pp_funcs->set_clockgating_by_smu(pp_handle,
425                                                        msg_id);
426                 mutex_unlock(&adev->pm.mutex);
427         }
428
429         return ret;
430 }
431
432 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
433                                   bool acquire)
434 {
435         void *pp_handle = adev->powerplay.pp_handle;
436         const struct amd_pm_funcs *pp_funcs =
437                         adev->powerplay.pp_funcs;
438         int ret = -EOPNOTSUPP;
439
440         if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
441                 mutex_lock(&adev->pm.mutex);
442                 ret = pp_funcs->smu_i2c_bus_access(pp_handle,
443                                                    acquire);
444                 mutex_unlock(&adev->pm.mutex);
445         }
446
447         return ret;
448 }
449
450 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
451 {
452         if (adev->pm.dpm_enabled) {
453                 mutex_lock(&adev->pm.mutex);
454                 if (power_supply_is_system_supplied() > 0)
455                         adev->pm.ac_power = true;
456                 else
457                         adev->pm.ac_power = false;
458
459                 if (adev->powerplay.pp_funcs &&
460                     adev->powerplay.pp_funcs->enable_bapm)
461                         amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
462
463                 if (is_support_sw_smu(adev))
464                         smu_set_ac_dc(adev->powerplay.pp_handle);
465
466                 mutex_unlock(&adev->pm.mutex);
467         }
468 }
469
470 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
471                            void *data, uint32_t *size)
472 {
473         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
474         int ret = -EINVAL;
475
476         if (!data || !size)
477                 return -EINVAL;
478
479         if (pp_funcs && pp_funcs->read_sensor) {
480                 mutex_lock(&adev->pm.mutex);
481                 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
482                                             sensor,
483                                             data,
484                                             size);
485                 mutex_unlock(&adev->pm.mutex);
486         }
487
488         return ret;
489 }
490
491 int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit)
492 {
493         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
494         int ret = -EOPNOTSUPP;
495
496         if (pp_funcs && pp_funcs->get_apu_thermal_limit) {
497                 mutex_lock(&adev->pm.mutex);
498                 ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit);
499                 mutex_unlock(&adev->pm.mutex);
500         }
501
502         return ret;
503 }
504
505 int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit)
506 {
507         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
508         int ret = -EOPNOTSUPP;
509
510         if (pp_funcs && pp_funcs->set_apu_thermal_limit) {
511                 mutex_lock(&adev->pm.mutex);
512                 ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit);
513                 mutex_unlock(&adev->pm.mutex);
514         }
515
516         return ret;
517 }
518
519 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
520 {
521         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
522         int i;
523
524         if (!adev->pm.dpm_enabled)
525                 return;
526
527         if (!pp_funcs->pm_compute_clocks)
528                 return;
529
530         if (adev->mode_info.num_crtc)
531                 amdgpu_display_bandwidth_update(adev);
532
533         for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
534                 struct amdgpu_ring *ring = adev->rings[i];
535                 if (ring && ring->sched.ready)
536                         amdgpu_fence_wait_empty(ring);
537         }
538
539         mutex_lock(&adev->pm.mutex);
540         pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
541         mutex_unlock(&adev->pm.mutex);
542 }
543
544 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
545 {
546         int ret = 0;
547
548         if (adev->family == AMDGPU_FAMILY_SI) {
549                 mutex_lock(&adev->pm.mutex);
550                 if (enable) {
551                         adev->pm.dpm.uvd_active = true;
552                         adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
553                 } else {
554                         adev->pm.dpm.uvd_active = false;
555                 }
556                 mutex_unlock(&adev->pm.mutex);
557
558                 amdgpu_dpm_compute_clocks(adev);
559                 return;
560         }
561
562         ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
563         if (ret)
564                 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
565                           enable ? "enable" : "disable", ret);
566 }
567
568 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
569 {
570         int ret = 0;
571
572         if (adev->family == AMDGPU_FAMILY_SI) {
573                 mutex_lock(&adev->pm.mutex);
574                 if (enable) {
575                         adev->pm.dpm.vce_active = true;
576                         /* XXX select vce level based on ring/task */
577                         adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
578                 } else {
579                         adev->pm.dpm.vce_active = false;
580                 }
581                 mutex_unlock(&adev->pm.mutex);
582
583                 amdgpu_dpm_compute_clocks(adev);
584                 return;
585         }
586
587         ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
588         if (ret)
589                 DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
590                           enable ? "enable" : "disable", ret);
591 }
592
593 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
594 {
595         int ret = 0;
596
597         ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
598         if (ret)
599                 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
600                           enable ? "enable" : "disable", ret);
601 }
602
603 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
604 {
605         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
606         int r = 0;
607
608         if (!pp_funcs || !pp_funcs->load_firmware)
609                 return 0;
610
611         mutex_lock(&adev->pm.mutex);
612         r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
613         if (r) {
614                 pr_err("smu firmware loading failed\n");
615                 goto out;
616         }
617
618         if (smu_version)
619                 *smu_version = adev->pm.fw_version;
620
621 out:
622         mutex_unlock(&adev->pm.mutex);
623         return r;
624 }
625
626 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
627 {
628         int ret = 0;
629
630         if (is_support_sw_smu(adev)) {
631                 mutex_lock(&adev->pm.mutex);
632                 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
633                                                  enable);
634                 mutex_unlock(&adev->pm.mutex);
635         }
636
637         return ret;
638 }
639
640 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
641 {
642         struct smu_context *smu = adev->powerplay.pp_handle;
643         int ret = 0;
644
645         if (!is_support_sw_smu(adev))
646                 return -EOPNOTSUPP;
647
648         mutex_lock(&adev->pm.mutex);
649         ret = smu_send_hbm_bad_pages_num(smu, size);
650         mutex_unlock(&adev->pm.mutex);
651
652         return ret;
653 }
654
655 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size)
656 {
657         struct smu_context *smu = adev->powerplay.pp_handle;
658         int ret = 0;
659
660         if (!is_support_sw_smu(adev))
661                 return -EOPNOTSUPP;
662
663         mutex_lock(&adev->pm.mutex);
664         ret = smu_send_hbm_bad_channel_flag(smu, size);
665         mutex_unlock(&adev->pm.mutex);
666
667         return ret;
668 }
669
670 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
671                                   enum pp_clock_type type,
672                                   uint32_t *min,
673                                   uint32_t *max)
674 {
675         int ret = 0;
676
677         if (type != PP_SCLK)
678                 return -EINVAL;
679
680         if (!is_support_sw_smu(adev))
681                 return -EOPNOTSUPP;
682
683         mutex_lock(&adev->pm.mutex);
684         ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
685                                      SMU_SCLK,
686                                      min,
687                                      max);
688         mutex_unlock(&adev->pm.mutex);
689
690         return ret;
691 }
692
693 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
694                                    enum pp_clock_type type,
695                                    uint32_t min,
696                                    uint32_t max)
697 {
698         struct smu_context *smu = adev->powerplay.pp_handle;
699         int ret = 0;
700
701         if (type != PP_SCLK)
702                 return -EINVAL;
703
704         if (!is_support_sw_smu(adev))
705                 return -EOPNOTSUPP;
706
707         mutex_lock(&adev->pm.mutex);
708         ret = smu_set_soft_freq_range(smu,
709                                       SMU_SCLK,
710                                       min,
711                                       max);
712         mutex_unlock(&adev->pm.mutex);
713
714         return ret;
715 }
716
717 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
718 {
719         struct smu_context *smu = adev->powerplay.pp_handle;
720         int ret = 0;
721
722         if (!is_support_sw_smu(adev))
723                 return 0;
724
725         mutex_lock(&adev->pm.mutex);
726         ret = smu_write_watermarks_table(smu);
727         mutex_unlock(&adev->pm.mutex);
728
729         return ret;
730 }
731
732 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
733                               enum smu_event_type event,
734                               uint64_t event_arg)
735 {
736         struct smu_context *smu = adev->powerplay.pp_handle;
737         int ret = 0;
738
739         if (!is_support_sw_smu(adev))
740                 return -EOPNOTSUPP;
741
742         mutex_lock(&adev->pm.mutex);
743         ret = smu_wait_for_event(smu, event, event_arg);
744         mutex_unlock(&adev->pm.mutex);
745
746         return ret;
747 }
748
749 int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value)
750 {
751         struct smu_context *smu = adev->powerplay.pp_handle;
752         int ret = 0;
753
754         if (!is_support_sw_smu(adev))
755                 return -EOPNOTSUPP;
756
757         mutex_lock(&adev->pm.mutex);
758         ret = smu_set_residency_gfxoff(smu, value);
759         mutex_unlock(&adev->pm.mutex);
760
761         return ret;
762 }
763
764 int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value)
765 {
766         struct smu_context *smu = adev->powerplay.pp_handle;
767         int ret = 0;
768
769         if (!is_support_sw_smu(adev))
770                 return -EOPNOTSUPP;
771
772         mutex_lock(&adev->pm.mutex);
773         ret = smu_get_residency_gfxoff(smu, value);
774         mutex_unlock(&adev->pm.mutex);
775
776         return ret;
777 }
778
779 int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value)
780 {
781         struct smu_context *smu = adev->powerplay.pp_handle;
782         int ret = 0;
783
784         if (!is_support_sw_smu(adev))
785                 return -EOPNOTSUPP;
786
787         mutex_lock(&adev->pm.mutex);
788         ret = smu_get_entrycount_gfxoff(smu, value);
789         mutex_unlock(&adev->pm.mutex);
790
791         return ret;
792 }
793
794 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
795 {
796         struct smu_context *smu = adev->powerplay.pp_handle;
797         int ret = 0;
798
799         if (!is_support_sw_smu(adev))
800                 return -EOPNOTSUPP;
801
802         mutex_lock(&adev->pm.mutex);
803         ret = smu_get_status_gfxoff(smu, value);
804         mutex_unlock(&adev->pm.mutex);
805
806         return ret;
807 }
808
809 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
810 {
811         struct smu_context *smu = adev->powerplay.pp_handle;
812
813         if (!is_support_sw_smu(adev))
814                 return 0;
815
816         return atomic64_read(&smu->throttle_int_counter);
817 }
818
819 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
820  * @adev: amdgpu_device pointer
821  * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
822  *
823  */
824 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
825                                  enum gfx_change_state state)
826 {
827         mutex_lock(&adev->pm.mutex);
828         if (adev->powerplay.pp_funcs &&
829             adev->powerplay.pp_funcs->gfx_state_change_set)
830                 ((adev)->powerplay.pp_funcs->gfx_state_change_set(
831                         (adev)->powerplay.pp_handle, state));
832         mutex_unlock(&adev->pm.mutex);
833 }
834
835 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
836                             void *umc_ecc)
837 {
838         struct smu_context *smu = adev->powerplay.pp_handle;
839         int ret = 0;
840
841         if (!is_support_sw_smu(adev))
842                 return -EOPNOTSUPP;
843
844         mutex_lock(&adev->pm.mutex);
845         ret = smu_get_ecc_info(smu, umc_ecc);
846         mutex_unlock(&adev->pm.mutex);
847
848         return ret;
849 }
850
851 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
852                                                      uint32_t idx)
853 {
854         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
855         struct amd_vce_state *vstate = NULL;
856
857         if (!pp_funcs->get_vce_clock_state)
858                 return NULL;
859
860         mutex_lock(&adev->pm.mutex);
861         vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
862                                                idx);
863         mutex_unlock(&adev->pm.mutex);
864
865         return vstate;
866 }
867
868 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
869                                         enum amd_pm_state_type *state)
870 {
871         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
872
873         mutex_lock(&adev->pm.mutex);
874
875         if (!pp_funcs->get_current_power_state) {
876                 *state = adev->pm.dpm.user_state;
877                 goto out;
878         }
879
880         *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
881         if (*state < POWER_STATE_TYPE_DEFAULT ||
882             *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
883                 *state = adev->pm.dpm.user_state;
884
885 out:
886         mutex_unlock(&adev->pm.mutex);
887 }
888
889 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
890                                 enum amd_pm_state_type state)
891 {
892         mutex_lock(&adev->pm.mutex);
893         adev->pm.dpm.user_state = state;
894         mutex_unlock(&adev->pm.mutex);
895
896         if (is_support_sw_smu(adev))
897                 return;
898
899         if (amdgpu_dpm_dispatch_task(adev,
900                                      AMD_PP_TASK_ENABLE_USER_STATE,
901                                      &state) == -EOPNOTSUPP)
902                 amdgpu_dpm_compute_clocks(adev);
903 }
904
905 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
906 {
907         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
908         enum amd_dpm_forced_level level;
909
910         if (!pp_funcs)
911                 return AMD_DPM_FORCED_LEVEL_AUTO;
912
913         mutex_lock(&adev->pm.mutex);
914         if (pp_funcs->get_performance_level)
915                 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
916         else
917                 level = adev->pm.dpm.forced_level;
918         mutex_unlock(&adev->pm.mutex);
919
920         return level;
921 }
922
923 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
924                                        enum amd_dpm_forced_level level)
925 {
926         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
927         enum amd_dpm_forced_level current_level;
928         uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
929                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
930                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
931                                         AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
932
933         if (!pp_funcs || !pp_funcs->force_performance_level)
934                 return 0;
935
936         if (adev->pm.dpm.thermal_active)
937                 return -EINVAL;
938
939         current_level = amdgpu_dpm_get_performance_level(adev);
940         if (current_level == level)
941                 return 0;
942
943         if (adev->asic_type == CHIP_RAVEN) {
944                 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
945                         if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
946                             level == AMD_DPM_FORCED_LEVEL_MANUAL)
947                                 amdgpu_gfx_off_ctrl(adev, false);
948                         else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
949                                  level != AMD_DPM_FORCED_LEVEL_MANUAL)
950                                 amdgpu_gfx_off_ctrl(adev, true);
951                 }
952         }
953
954         if (!(current_level & profile_mode_mask) &&
955             (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
956                 return -EINVAL;
957
958         if (!(current_level & profile_mode_mask) &&
959               (level & profile_mode_mask)) {
960                 /* enter UMD Pstate */
961                 amdgpu_device_ip_set_powergating_state(adev,
962                                                        AMD_IP_BLOCK_TYPE_GFX,
963                                                        AMD_PG_STATE_UNGATE);
964                 amdgpu_device_ip_set_clockgating_state(adev,
965                                                        AMD_IP_BLOCK_TYPE_GFX,
966                                                        AMD_CG_STATE_UNGATE);
967         } else if ((current_level & profile_mode_mask) &&
968                     !(level & profile_mode_mask)) {
969                 /* exit UMD Pstate */
970                 amdgpu_device_ip_set_clockgating_state(adev,
971                                                        AMD_IP_BLOCK_TYPE_GFX,
972                                                        AMD_CG_STATE_GATE);
973                 amdgpu_device_ip_set_powergating_state(adev,
974                                                        AMD_IP_BLOCK_TYPE_GFX,
975                                                        AMD_PG_STATE_GATE);
976         }
977
978         mutex_lock(&adev->pm.mutex);
979
980         if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
981                                               level)) {
982                 mutex_unlock(&adev->pm.mutex);
983                 return -EINVAL;
984         }
985
986         adev->pm.dpm.forced_level = level;
987
988         mutex_unlock(&adev->pm.mutex);
989
990         return 0;
991 }
992
993 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
994                                  struct pp_states_info *states)
995 {
996         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
997         int ret = 0;
998
999         if (!pp_funcs->get_pp_num_states)
1000                 return -EOPNOTSUPP;
1001
1002         mutex_lock(&adev->pm.mutex);
1003         ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
1004                                           states);
1005         mutex_unlock(&adev->pm.mutex);
1006
1007         return ret;
1008 }
1009
1010 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
1011                               enum amd_pp_task task_id,
1012                               enum amd_pm_state_type *user_state)
1013 {
1014         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1015         int ret = 0;
1016
1017         if (!pp_funcs->dispatch_tasks)
1018                 return -EOPNOTSUPP;
1019
1020         mutex_lock(&adev->pm.mutex);
1021         ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
1022                                        task_id,
1023                                        user_state);
1024         mutex_unlock(&adev->pm.mutex);
1025
1026         return ret;
1027 }
1028
1029 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
1030 {
1031         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1032         int ret = 0;
1033
1034         if (!pp_funcs->get_pp_table)
1035                 return 0;
1036
1037         mutex_lock(&adev->pm.mutex);
1038         ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
1039                                      table);
1040         mutex_unlock(&adev->pm.mutex);
1041
1042         return ret;
1043 }
1044
1045 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
1046                                       uint32_t type,
1047                                       long *input,
1048                                       uint32_t size)
1049 {
1050         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1051         int ret = 0;
1052
1053         if (!pp_funcs->set_fine_grain_clk_vol)
1054                 return 0;
1055
1056         mutex_lock(&adev->pm.mutex);
1057         ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
1058                                                type,
1059                                                input,
1060                                                size);
1061         mutex_unlock(&adev->pm.mutex);
1062
1063         return ret;
1064 }
1065
1066 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
1067                                   uint32_t type,
1068                                   long *input,
1069                                   uint32_t size)
1070 {
1071         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1072         int ret = 0;
1073
1074         if (!pp_funcs->odn_edit_dpm_table)
1075                 return 0;
1076
1077         mutex_lock(&adev->pm.mutex);
1078         ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
1079                                            type,
1080                                            input,
1081                                            size);
1082         mutex_unlock(&adev->pm.mutex);
1083
1084         return ret;
1085 }
1086
1087 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
1088                                   enum pp_clock_type type,
1089                                   char *buf)
1090 {
1091         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1092         int ret = 0;
1093
1094         if (!pp_funcs->print_clock_levels)
1095                 return 0;
1096
1097         mutex_lock(&adev->pm.mutex);
1098         ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
1099                                            type,
1100                                            buf);
1101         mutex_unlock(&adev->pm.mutex);
1102
1103         return ret;
1104 }
1105
1106 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
1107                                   enum pp_clock_type type,
1108                                   char *buf,
1109                                   int *offset)
1110 {
1111         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1112         int ret = 0;
1113
1114         if (!pp_funcs->emit_clock_levels)
1115                 return -ENOENT;
1116
1117         mutex_lock(&adev->pm.mutex);
1118         ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
1119                                            type,
1120                                            buf,
1121                                            offset);
1122         mutex_unlock(&adev->pm.mutex);
1123
1124         return ret;
1125 }
1126
1127 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
1128                                     uint64_t ppfeature_masks)
1129 {
1130         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1131         int ret = 0;
1132
1133         if (!pp_funcs->set_ppfeature_status)
1134                 return 0;
1135
1136         mutex_lock(&adev->pm.mutex);
1137         ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
1138                                              ppfeature_masks);
1139         mutex_unlock(&adev->pm.mutex);
1140
1141         return ret;
1142 }
1143
1144 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
1145 {
1146         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1147         int ret = 0;
1148
1149         if (!pp_funcs->get_ppfeature_status)
1150                 return 0;
1151
1152         mutex_lock(&adev->pm.mutex);
1153         ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
1154                                              buf);
1155         mutex_unlock(&adev->pm.mutex);
1156
1157         return ret;
1158 }
1159
1160 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
1161                                  enum pp_clock_type type,
1162                                  uint32_t mask)
1163 {
1164         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1165         int ret = 0;
1166
1167         if (!pp_funcs->force_clock_level)
1168                 return 0;
1169
1170         mutex_lock(&adev->pm.mutex);
1171         ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
1172                                           type,
1173                                           mask);
1174         mutex_unlock(&adev->pm.mutex);
1175
1176         return ret;
1177 }
1178
1179 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
1180 {
1181         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1182         int ret = 0;
1183
1184         if (!pp_funcs->get_sclk_od)
1185                 return -EOPNOTSUPP;
1186
1187         mutex_lock(&adev->pm.mutex);
1188         ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
1189         mutex_unlock(&adev->pm.mutex);
1190
1191         return ret;
1192 }
1193
1194 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
1195 {
1196         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1197
1198         if (is_support_sw_smu(adev))
1199                 return -EOPNOTSUPP;
1200
1201         mutex_lock(&adev->pm.mutex);
1202         if (pp_funcs->set_sclk_od)
1203                 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
1204         mutex_unlock(&adev->pm.mutex);
1205
1206         if (amdgpu_dpm_dispatch_task(adev,
1207                                      AMD_PP_TASK_READJUST_POWER_STATE,
1208                                      NULL) == -EOPNOTSUPP) {
1209                 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1210                 amdgpu_dpm_compute_clocks(adev);
1211         }
1212
1213         return 0;
1214 }
1215
1216 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
1217 {
1218         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1219         int ret = 0;
1220
1221         if (!pp_funcs->get_mclk_od)
1222                 return -EOPNOTSUPP;
1223
1224         mutex_lock(&adev->pm.mutex);
1225         ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
1226         mutex_unlock(&adev->pm.mutex);
1227
1228         return ret;
1229 }
1230
1231 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
1232 {
1233         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1234
1235         if (is_support_sw_smu(adev))
1236                 return -EOPNOTSUPP;
1237
1238         mutex_lock(&adev->pm.mutex);
1239         if (pp_funcs->set_mclk_od)
1240                 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
1241         mutex_unlock(&adev->pm.mutex);
1242
1243         if (amdgpu_dpm_dispatch_task(adev,
1244                                      AMD_PP_TASK_READJUST_POWER_STATE,
1245                                      NULL) == -EOPNOTSUPP) {
1246                 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1247                 amdgpu_dpm_compute_clocks(adev);
1248         }
1249
1250         return 0;
1251 }
1252
1253 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
1254                                       char *buf)
1255 {
1256         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1257         int ret = 0;
1258
1259         if (!pp_funcs->get_power_profile_mode)
1260                 return -EOPNOTSUPP;
1261
1262         mutex_lock(&adev->pm.mutex);
1263         ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
1264                                                buf);
1265         mutex_unlock(&adev->pm.mutex);
1266
1267         return ret;
1268 }
1269
1270 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
1271                                       long *input, uint32_t size)
1272 {
1273         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1274         int ret = 0;
1275
1276         if (!pp_funcs->set_power_profile_mode)
1277                 return 0;
1278
1279         mutex_lock(&adev->pm.mutex);
1280         ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
1281                                                input,
1282                                                size);
1283         mutex_unlock(&adev->pm.mutex);
1284
1285         return ret;
1286 }
1287
1288 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
1289 {
1290         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1291         int ret = 0;
1292
1293         if (!pp_funcs->get_gpu_metrics)
1294                 return 0;
1295
1296         mutex_lock(&adev->pm.mutex);
1297         ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
1298                                         table);
1299         mutex_unlock(&adev->pm.mutex);
1300
1301         return ret;
1302 }
1303
1304 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
1305                                     uint32_t *fan_mode)
1306 {
1307         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1308         int ret = 0;
1309
1310         if (!pp_funcs->get_fan_control_mode)
1311                 return -EOPNOTSUPP;
1312
1313         mutex_lock(&adev->pm.mutex);
1314         ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
1315                                              fan_mode);
1316         mutex_unlock(&adev->pm.mutex);
1317
1318         return ret;
1319 }
1320
1321 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
1322                                  uint32_t speed)
1323 {
1324         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1325         int ret = 0;
1326
1327         if (!pp_funcs->set_fan_speed_pwm)
1328                 return -EOPNOTSUPP;
1329
1330         mutex_lock(&adev->pm.mutex);
1331         ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
1332                                           speed);
1333         mutex_unlock(&adev->pm.mutex);
1334
1335         return ret;
1336 }
1337
1338 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
1339                                  uint32_t *speed)
1340 {
1341         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1342         int ret = 0;
1343
1344         if (!pp_funcs->get_fan_speed_pwm)
1345                 return -EOPNOTSUPP;
1346
1347         mutex_lock(&adev->pm.mutex);
1348         ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
1349                                           speed);
1350         mutex_unlock(&adev->pm.mutex);
1351
1352         return ret;
1353 }
1354
1355 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
1356                                  uint32_t *speed)
1357 {
1358         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1359         int ret = 0;
1360
1361         if (!pp_funcs->get_fan_speed_rpm)
1362                 return -EOPNOTSUPP;
1363
1364         mutex_lock(&adev->pm.mutex);
1365         ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
1366                                           speed);
1367         mutex_unlock(&adev->pm.mutex);
1368
1369         return ret;
1370 }
1371
1372 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
1373                                  uint32_t speed)
1374 {
1375         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1376         int ret = 0;
1377
1378         if (!pp_funcs->set_fan_speed_rpm)
1379                 return -EOPNOTSUPP;
1380
1381         mutex_lock(&adev->pm.mutex);
1382         ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
1383                                           speed);
1384         mutex_unlock(&adev->pm.mutex);
1385
1386         return ret;
1387 }
1388
1389 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
1390                                     uint32_t mode)
1391 {
1392         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1393         int ret = 0;
1394
1395         if (!pp_funcs->set_fan_control_mode)
1396                 return -EOPNOTSUPP;
1397
1398         mutex_lock(&adev->pm.mutex);
1399         ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
1400                                              mode);
1401         mutex_unlock(&adev->pm.mutex);
1402
1403         return ret;
1404 }
1405
1406 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
1407                                uint32_t *limit,
1408                                enum pp_power_limit_level pp_limit_level,
1409                                enum pp_power_type power_type)
1410 {
1411         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1412         int ret = 0;
1413
1414         if (!pp_funcs->get_power_limit)
1415                 return -ENODATA;
1416
1417         mutex_lock(&adev->pm.mutex);
1418         ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
1419                                         limit,
1420                                         pp_limit_level,
1421                                         power_type);
1422         mutex_unlock(&adev->pm.mutex);
1423
1424         return ret;
1425 }
1426
1427 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
1428                                uint32_t limit)
1429 {
1430         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1431         int ret = 0;
1432
1433         if (!pp_funcs->set_power_limit)
1434                 return -EINVAL;
1435
1436         mutex_lock(&adev->pm.mutex);
1437         ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
1438                                         limit);
1439         mutex_unlock(&adev->pm.mutex);
1440
1441         return ret;
1442 }
1443
1444 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
1445 {
1446         bool cclk_dpm_supported = false;
1447
1448         if (!is_support_sw_smu(adev))
1449                 return false;
1450
1451         mutex_lock(&adev->pm.mutex);
1452         cclk_dpm_supported = is_support_cclk_dpm(adev);
1453         mutex_unlock(&adev->pm.mutex);
1454
1455         return (int)cclk_dpm_supported;
1456 }
1457
1458 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
1459                                                        struct seq_file *m)
1460 {
1461         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1462
1463         if (!pp_funcs->debugfs_print_current_performance_level)
1464                 return -EOPNOTSUPP;
1465
1466         mutex_lock(&adev->pm.mutex);
1467         pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
1468                                                           m);
1469         mutex_unlock(&adev->pm.mutex);
1470
1471         return 0;
1472 }
1473
1474 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
1475                                        void **addr,
1476                                        size_t *size)
1477 {
1478         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1479         int ret = 0;
1480
1481         if (!pp_funcs->get_smu_prv_buf_details)
1482                 return -ENOSYS;
1483
1484         mutex_lock(&adev->pm.mutex);
1485         ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
1486                                                 addr,
1487                                                 size);
1488         mutex_unlock(&adev->pm.mutex);
1489
1490         return ret;
1491 }
1492
1493 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
1494 {
1495         if (is_support_sw_smu(adev)) {
1496                 struct smu_context *smu = adev->powerplay.pp_handle;
1497
1498                 return (smu->od_enabled || smu->is_apu);
1499         } else {
1500                 struct pp_hwmgr *hwmgr;
1501
1502                 /*
1503                  * dpm on some legacy asics don't carry od_enabled member
1504                  * as its pp_handle is casted directly from adev.
1505                  */
1506                 if (amdgpu_dpm_is_legacy_dpm(adev))
1507                         return false;
1508
1509                 hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
1510
1511                 return hwmgr->od_enabled;
1512         }
1513 }
1514
1515 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
1516                             const char *buf,
1517                             size_t size)
1518 {
1519         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1520         int ret = 0;
1521
1522         if (!pp_funcs->set_pp_table)
1523                 return -EOPNOTSUPP;
1524
1525         mutex_lock(&adev->pm.mutex);
1526         ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
1527                                      buf,
1528                                      size);
1529         mutex_unlock(&adev->pm.mutex);
1530
1531         return ret;
1532 }
1533
1534 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
1535 {
1536         struct smu_context *smu = adev->powerplay.pp_handle;
1537
1538         if (!is_support_sw_smu(adev))
1539                 return INT_MAX;
1540
1541         return smu->cpu_core_num;
1542 }
1543
1544 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
1545 {
1546         if (!is_support_sw_smu(adev))
1547                 return;
1548
1549         amdgpu_smu_stb_debug_fs_init(adev);
1550 }
1551
1552 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
1553                                             const struct amd_pp_display_configuration *input)
1554 {
1555         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1556         int ret = 0;
1557
1558         if (!pp_funcs->display_configuration_change)
1559                 return 0;
1560
1561         mutex_lock(&adev->pm.mutex);
1562         ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
1563                                                      input);
1564         mutex_unlock(&adev->pm.mutex);
1565
1566         return ret;
1567 }
1568
1569 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
1570                                  enum amd_pp_clock_type type,
1571                                  struct amd_pp_clocks *clocks)
1572 {
1573         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1574         int ret = 0;
1575
1576         if (!pp_funcs->get_clock_by_type)
1577                 return 0;
1578
1579         mutex_lock(&adev->pm.mutex);
1580         ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
1581                                           type,
1582                                           clocks);
1583         mutex_unlock(&adev->pm.mutex);
1584
1585         return ret;
1586 }
1587
1588 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
1589                                                 struct amd_pp_simple_clock_info *clocks)
1590 {
1591         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1592         int ret = 0;
1593
1594         if (!pp_funcs->get_display_mode_validation_clocks)
1595                 return 0;
1596
1597         mutex_lock(&adev->pm.mutex);
1598         ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
1599                                                            clocks);
1600         mutex_unlock(&adev->pm.mutex);
1601
1602         return ret;
1603 }
1604
1605 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
1606                                               enum amd_pp_clock_type type,
1607                                               struct pp_clock_levels_with_latency *clocks)
1608 {
1609         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1610         int ret = 0;
1611
1612         if (!pp_funcs->get_clock_by_type_with_latency)
1613                 return 0;
1614
1615         mutex_lock(&adev->pm.mutex);
1616         ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
1617                                                        type,
1618                                                        clocks);
1619         mutex_unlock(&adev->pm.mutex);
1620
1621         return ret;
1622 }
1623
1624 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
1625                                               enum amd_pp_clock_type type,
1626                                               struct pp_clock_levels_with_voltage *clocks)
1627 {
1628         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1629         int ret = 0;
1630
1631         if (!pp_funcs->get_clock_by_type_with_voltage)
1632                 return 0;
1633
1634         mutex_lock(&adev->pm.mutex);
1635         ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
1636                                                        type,
1637                                                        clocks);
1638         mutex_unlock(&adev->pm.mutex);
1639
1640         return ret;
1641 }
1642
1643 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
1644                                                void *clock_ranges)
1645 {
1646         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1647         int ret = 0;
1648
1649         if (!pp_funcs->set_watermarks_for_clocks_ranges)
1650                 return -EOPNOTSUPP;
1651
1652         mutex_lock(&adev->pm.mutex);
1653         ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
1654                                                          clock_ranges);
1655         mutex_unlock(&adev->pm.mutex);
1656
1657         return ret;
1658 }
1659
1660 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
1661                                              struct pp_display_clock_request *clock)
1662 {
1663         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1664         int ret = 0;
1665
1666         if (!pp_funcs->display_clock_voltage_request)
1667                 return -EOPNOTSUPP;
1668
1669         mutex_lock(&adev->pm.mutex);
1670         ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
1671                                                       clock);
1672         mutex_unlock(&adev->pm.mutex);
1673
1674         return ret;
1675 }
1676
1677 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
1678                                   struct amd_pp_clock_info *clocks)
1679 {
1680         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1681         int ret = 0;
1682
1683         if (!pp_funcs->get_current_clocks)
1684                 return -EOPNOTSUPP;
1685
1686         mutex_lock(&adev->pm.mutex);
1687         ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
1688                                            clocks);
1689         mutex_unlock(&adev->pm.mutex);
1690
1691         return ret;
1692 }
1693
1694 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
1695 {
1696         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1697
1698         if (!pp_funcs->notify_smu_enable_pwe)
1699                 return;
1700
1701         mutex_lock(&adev->pm.mutex);
1702         pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
1703         mutex_unlock(&adev->pm.mutex);
1704 }
1705
1706 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
1707                                         uint32_t count)
1708 {
1709         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1710         int ret = 0;
1711
1712         if (!pp_funcs->set_active_display_count)
1713                 return -EOPNOTSUPP;
1714
1715         mutex_lock(&adev->pm.mutex);
1716         ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
1717                                                  count);
1718         mutex_unlock(&adev->pm.mutex);
1719
1720         return ret;
1721 }
1722
1723 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
1724                                           uint32_t clock)
1725 {
1726         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1727         int ret = 0;
1728
1729         if (!pp_funcs->set_min_deep_sleep_dcefclk)
1730                 return -EOPNOTSUPP;
1731
1732         mutex_lock(&adev->pm.mutex);
1733         ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
1734                                                    clock);
1735         mutex_unlock(&adev->pm.mutex);
1736
1737         return ret;
1738 }
1739
1740 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
1741                                              uint32_t clock)
1742 {
1743         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1744
1745         if (!pp_funcs->set_hard_min_dcefclk_by_freq)
1746                 return;
1747
1748         mutex_lock(&adev->pm.mutex);
1749         pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
1750                                                clock);
1751         mutex_unlock(&adev->pm.mutex);
1752 }
1753
1754 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
1755                                           uint32_t clock)
1756 {
1757         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1758
1759         if (!pp_funcs->set_hard_min_fclk_by_freq)
1760                 return;
1761
1762         mutex_lock(&adev->pm.mutex);
1763         pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
1764                                             clock);
1765         mutex_unlock(&adev->pm.mutex);
1766 }
1767
1768 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
1769                                                    bool disable_memory_clock_switch)
1770 {
1771         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1772         int ret = 0;
1773
1774         if (!pp_funcs->display_disable_memory_clock_switch)
1775                 return 0;
1776
1777         mutex_lock(&adev->pm.mutex);
1778         ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
1779                                                             disable_memory_clock_switch);
1780         mutex_unlock(&adev->pm.mutex);
1781
1782         return ret;
1783 }
1784
1785 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
1786                                                 struct pp_smu_nv_clock_table *max_clocks)
1787 {
1788         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1789         int ret = 0;
1790
1791         if (!pp_funcs->get_max_sustainable_clocks_by_dc)
1792                 return -EOPNOTSUPP;
1793
1794         mutex_lock(&adev->pm.mutex);
1795         ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
1796                                                          max_clocks);
1797         mutex_unlock(&adev->pm.mutex);
1798
1799         return ret;
1800 }
1801
1802 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
1803                                                   unsigned int *clock_values_in_khz,
1804                                                   unsigned int *num_states)
1805 {
1806         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1807         int ret = 0;
1808
1809         if (!pp_funcs->get_uclk_dpm_states)
1810                 return -EOPNOTSUPP;
1811
1812         mutex_lock(&adev->pm.mutex);
1813         ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
1814                                             clock_values_in_khz,
1815                                             num_states);
1816         mutex_unlock(&adev->pm.mutex);
1817
1818         return ret;
1819 }
1820
1821 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
1822                                    struct dpm_clocks *clock_table)
1823 {
1824         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1825         int ret = 0;
1826
1827         if (!pp_funcs->get_dpm_clock_table)
1828                 return -EOPNOTSUPP;
1829
1830         mutex_lock(&adev->pm.mutex);
1831         ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
1832                                             clock_table);
1833         mutex_unlock(&adev->pm.mutex);
1834
1835         return ret;
1836 }