9a157fe4cbc728777f282811b8c35552b5b91304
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / pm / amdgpu_dpm.c
1 /*
2  * Copyright 2011 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24
25 #include "amdgpu.h"
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_i2c.h"
28 #include "amdgpu_dpm.h"
29 #include "atom.h"
30 #include "amd_pcie.h"
31 #include "amdgpu_display.h"
32 #include "hwmgr.h"
33 #include <linux/power_supply.h>
34 #include "amdgpu_smu.h"
35
36 #define amdgpu_dpm_enable_bapm(adev, e) \
37                 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
38
39 #define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev))
40
41 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
42 {
43         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
44         int ret = 0;
45
46         if (!pp_funcs->get_sclk)
47                 return 0;
48
49         mutex_lock(&adev->pm.mutex);
50         ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
51                                  low);
52         mutex_unlock(&adev->pm.mutex);
53
54         return ret;
55 }
56
57 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
58 {
59         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
60         int ret = 0;
61
62         if (!pp_funcs->get_mclk)
63                 return 0;
64
65         mutex_lock(&adev->pm.mutex);
66         ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
67                                  low);
68         mutex_unlock(&adev->pm.mutex);
69
70         return ret;
71 }
72
73 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
74 {
75         int ret = 0;
76         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
77         enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
78
79         if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
80                 dev_dbg(adev->dev, "IP block%d already in the target %s state!",
81                                 block_type, gate ? "gate" : "ungate");
82                 return 0;
83         }
84
85         mutex_lock(&adev->pm.mutex);
86
87         switch (block_type) {
88         case AMD_IP_BLOCK_TYPE_UVD:
89         case AMD_IP_BLOCK_TYPE_VCE:
90         case AMD_IP_BLOCK_TYPE_GFX:
91         case AMD_IP_BLOCK_TYPE_VCN:
92         case AMD_IP_BLOCK_TYPE_SDMA:
93         case AMD_IP_BLOCK_TYPE_JPEG:
94         case AMD_IP_BLOCK_TYPE_GMC:
95         case AMD_IP_BLOCK_TYPE_ACP:
96                 if (pp_funcs && pp_funcs->set_powergating_by_smu)
97                         ret = (pp_funcs->set_powergating_by_smu(
98                                 (adev)->powerplay.pp_handle, block_type, gate));
99                 break;
100         default:
101                 break;
102         }
103
104         if (!ret)
105                 atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
106
107         mutex_unlock(&adev->pm.mutex);
108
109         return ret;
110 }
111
112 int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev)
113 {
114         struct smu_context *smu = adev->powerplay.pp_handle;
115         int ret = -EOPNOTSUPP;
116
117         mutex_lock(&adev->pm.mutex);
118         ret = smu_set_gfx_power_up_by_imu(smu);
119         mutex_unlock(&adev->pm.mutex);
120
121         msleep(10);
122
123         return ret;
124 }
125
126 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
127 {
128         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
129         void *pp_handle = adev->powerplay.pp_handle;
130         int ret = 0;
131
132         if (!pp_funcs || !pp_funcs->set_asic_baco_state)
133                 return -ENOENT;
134
135         mutex_lock(&adev->pm.mutex);
136
137         /* enter BACO state */
138         ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
139
140         mutex_unlock(&adev->pm.mutex);
141
142         return ret;
143 }
144
145 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
146 {
147         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
148         void *pp_handle = adev->powerplay.pp_handle;
149         int ret = 0;
150
151         if (!pp_funcs || !pp_funcs->set_asic_baco_state)
152                 return -ENOENT;
153
154         mutex_lock(&adev->pm.mutex);
155
156         /* exit BACO state */
157         ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
158
159         mutex_unlock(&adev->pm.mutex);
160
161         return ret;
162 }
163
164 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
165                              enum pp_mp1_state mp1_state)
166 {
167         int ret = 0;
168         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
169
170         if (pp_funcs && pp_funcs->set_mp1_state) {
171                 mutex_lock(&adev->pm.mutex);
172
173                 ret = pp_funcs->set_mp1_state(
174                                 adev->powerplay.pp_handle,
175                                 mp1_state);
176
177                 mutex_unlock(&adev->pm.mutex);
178         }
179
180         return ret;
181 }
182
183 bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
184 {
185         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
186         void *pp_handle = adev->powerplay.pp_handle;
187         bool baco_cap;
188         int ret = 0;
189
190         if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
191                 return false;
192         /* Don't use baco for reset in S3.
193          * This is a workaround for some platforms
194          * where entering BACO during suspend
195          * seems to cause reboots or hangs.
196          * This might be related to the fact that BACO controls
197          * power to the whole GPU including devices like audio and USB.
198          * Powering down/up everything may adversely affect these other
199          * devices.  Needs more investigation.
200          */
201         if (adev->in_s3)
202                 return false;
203
204         mutex_lock(&adev->pm.mutex);
205
206         ret = pp_funcs->get_asic_baco_capability(pp_handle,
207                                                  &baco_cap);
208
209         mutex_unlock(&adev->pm.mutex);
210
211         return ret ? false : baco_cap;
212 }
213
214 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
215 {
216         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
217         void *pp_handle = adev->powerplay.pp_handle;
218         int ret = 0;
219
220         if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
221                 return -ENOENT;
222
223         mutex_lock(&adev->pm.mutex);
224
225         ret = pp_funcs->asic_reset_mode_2(pp_handle);
226
227         mutex_unlock(&adev->pm.mutex);
228
229         return ret;
230 }
231
232 int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev)
233 {
234         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
235         void *pp_handle = adev->powerplay.pp_handle;
236         int ret = 0;
237
238         if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features)
239                 return -ENOENT;
240
241         mutex_lock(&adev->pm.mutex);
242
243         ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle);
244
245         mutex_unlock(&adev->pm.mutex);
246
247         return ret;
248 }
249
250 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
251 {
252         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
253         void *pp_handle = adev->powerplay.pp_handle;
254         int ret = 0;
255
256         if (!pp_funcs || !pp_funcs->set_asic_baco_state)
257                 return -ENOENT;
258
259         mutex_lock(&adev->pm.mutex);
260
261         /* enter BACO state */
262         ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
263         if (ret)
264                 goto out;
265
266         /* exit BACO state */
267         ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
268
269 out:
270         mutex_unlock(&adev->pm.mutex);
271         return ret;
272 }
273
274 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
275 {
276         struct smu_context *smu = adev->powerplay.pp_handle;
277         bool support_mode1_reset = false;
278
279         if (is_support_sw_smu(adev)) {
280                 mutex_lock(&adev->pm.mutex);
281                 support_mode1_reset = smu_mode1_reset_is_support(smu);
282                 mutex_unlock(&adev->pm.mutex);
283         }
284
285         return support_mode1_reset;
286 }
287
288 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
289 {
290         struct smu_context *smu = adev->powerplay.pp_handle;
291         int ret = -EOPNOTSUPP;
292
293         if (is_support_sw_smu(adev)) {
294                 mutex_lock(&adev->pm.mutex);
295                 ret = smu_mode1_reset(smu);
296                 mutex_unlock(&adev->pm.mutex);
297         }
298
299         return ret;
300 }
301
302 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
303                                     enum PP_SMC_POWER_PROFILE type,
304                                     bool en)
305 {
306         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
307         int ret = 0;
308
309         if (amdgpu_sriov_vf(adev))
310                 return 0;
311
312         if (pp_funcs && pp_funcs->switch_power_profile) {
313                 mutex_lock(&adev->pm.mutex);
314                 ret = pp_funcs->switch_power_profile(
315                         adev->powerplay.pp_handle, type, en);
316                 mutex_unlock(&adev->pm.mutex);
317         }
318
319         return ret;
320 }
321
322 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
323                                uint32_t pstate)
324 {
325         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
326         int ret = 0;
327
328         if (pp_funcs && pp_funcs->set_xgmi_pstate) {
329                 mutex_lock(&adev->pm.mutex);
330                 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
331                                                                 pstate);
332                 mutex_unlock(&adev->pm.mutex);
333         }
334
335         return ret;
336 }
337
338 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
339                              uint32_t cstate)
340 {
341         int ret = 0;
342         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
343         void *pp_handle = adev->powerplay.pp_handle;
344
345         if (pp_funcs && pp_funcs->set_df_cstate) {
346                 mutex_lock(&adev->pm.mutex);
347                 ret = pp_funcs->set_df_cstate(pp_handle, cstate);
348                 mutex_unlock(&adev->pm.mutex);
349         }
350
351         return ret;
352 }
353
354 int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
355 {
356         struct smu_context *smu = adev->powerplay.pp_handle;
357         int ret = 0;
358
359         if (is_support_sw_smu(adev)) {
360                 mutex_lock(&adev->pm.mutex);
361                 ret = smu_allow_xgmi_power_down(smu, en);
362                 mutex_unlock(&adev->pm.mutex);
363         }
364
365         return ret;
366 }
367
368 int amdgpu_dpm_get_xgmi_plpd_mode(struct amdgpu_device *adev, char **mode_desc)
369 {
370         struct smu_context *smu = adev->powerplay.pp_handle;
371         int mode = XGMI_PLPD_NONE;
372
373         if (is_support_sw_smu(adev)) {
374                 mode = smu->plpd_mode;
375                 if (mode_desc == NULL)
376                         return mode;
377                 switch (smu->plpd_mode) {
378                 case XGMI_PLPD_DISALLOW:
379                         *mode_desc = "disallow";
380                         break;
381                 case XGMI_PLPD_DEFAULT:
382                         *mode_desc = "default";
383                         break;
384                 case XGMI_PLPD_OPTIMIZED:
385                         *mode_desc = "optimized";
386                         break;
387                 case XGMI_PLPD_NONE:
388                 default:
389                         *mode_desc = "none";
390                         break;
391                 }
392         }
393
394         return mode;
395 }
396
397 int amdgpu_dpm_set_xgmi_plpd_mode(struct amdgpu_device *adev, int mode)
398 {
399         struct smu_context *smu = adev->powerplay.pp_handle;
400         int ret = -EOPNOTSUPP;
401
402         if (is_support_sw_smu(adev)) {
403                 mutex_lock(&adev->pm.mutex);
404                 ret = smu_set_xgmi_plpd_mode(smu, mode);
405                 mutex_unlock(&adev->pm.mutex);
406         }
407
408         return ret;
409 }
410
411 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
412 {
413         void *pp_handle = adev->powerplay.pp_handle;
414         const struct amd_pm_funcs *pp_funcs =
415                         adev->powerplay.pp_funcs;
416         int ret = 0;
417
418         if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
419                 mutex_lock(&adev->pm.mutex);
420                 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
421                 mutex_unlock(&adev->pm.mutex);
422         }
423
424         return ret;
425 }
426
427 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
428                                       uint32_t msg_id)
429 {
430         void *pp_handle = adev->powerplay.pp_handle;
431         const struct amd_pm_funcs *pp_funcs =
432                         adev->powerplay.pp_funcs;
433         int ret = 0;
434
435         if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
436                 mutex_lock(&adev->pm.mutex);
437                 ret = pp_funcs->set_clockgating_by_smu(pp_handle,
438                                                        msg_id);
439                 mutex_unlock(&adev->pm.mutex);
440         }
441
442         return ret;
443 }
444
445 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
446                                   bool acquire)
447 {
448         void *pp_handle = adev->powerplay.pp_handle;
449         const struct amd_pm_funcs *pp_funcs =
450                         adev->powerplay.pp_funcs;
451         int ret = -EOPNOTSUPP;
452
453         if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
454                 mutex_lock(&adev->pm.mutex);
455                 ret = pp_funcs->smu_i2c_bus_access(pp_handle,
456                                                    acquire);
457                 mutex_unlock(&adev->pm.mutex);
458         }
459
460         return ret;
461 }
462
463 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
464 {
465         if (adev->pm.dpm_enabled) {
466                 mutex_lock(&adev->pm.mutex);
467                 if (power_supply_is_system_supplied() > 0)
468                         adev->pm.ac_power = true;
469                 else
470                         adev->pm.ac_power = false;
471
472                 if (adev->powerplay.pp_funcs &&
473                     adev->powerplay.pp_funcs->enable_bapm)
474                         amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
475
476                 if (is_support_sw_smu(adev))
477                         smu_set_ac_dc(adev->powerplay.pp_handle);
478
479                 mutex_unlock(&adev->pm.mutex);
480         }
481 }
482
483 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
484                            void *data, uint32_t *size)
485 {
486         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
487         int ret = -EINVAL;
488
489         if (!data || !size)
490                 return -EINVAL;
491
492         if (pp_funcs && pp_funcs->read_sensor) {
493                 mutex_lock(&adev->pm.mutex);
494                 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
495                                             sensor,
496                                             data,
497                                             size);
498                 mutex_unlock(&adev->pm.mutex);
499         }
500
501         return ret;
502 }
503
504 int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit)
505 {
506         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
507         int ret = -EINVAL;
508
509         if (pp_funcs && pp_funcs->get_apu_thermal_limit) {
510                 mutex_lock(&adev->pm.mutex);
511                 ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit);
512                 mutex_unlock(&adev->pm.mutex);
513         }
514
515         return ret;
516 }
517
518 int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit)
519 {
520         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
521         int ret = -EINVAL;
522
523         if (pp_funcs && pp_funcs->set_apu_thermal_limit) {
524                 mutex_lock(&adev->pm.mutex);
525                 ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit);
526                 mutex_unlock(&adev->pm.mutex);
527         }
528
529         return ret;
530 }
531
532 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
533 {
534         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
535         int i;
536
537         if (!adev->pm.dpm_enabled)
538                 return;
539
540         if (!pp_funcs->pm_compute_clocks)
541                 return;
542
543         if (adev->mode_info.num_crtc)
544                 amdgpu_display_bandwidth_update(adev);
545
546         for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
547                 struct amdgpu_ring *ring = adev->rings[i];
548                 if (ring && ring->sched.ready)
549                         amdgpu_fence_wait_empty(ring);
550         }
551
552         mutex_lock(&adev->pm.mutex);
553         pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
554         mutex_unlock(&adev->pm.mutex);
555 }
556
557 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
558 {
559         int ret = 0;
560
561         if (adev->family == AMDGPU_FAMILY_SI) {
562                 mutex_lock(&adev->pm.mutex);
563                 if (enable) {
564                         adev->pm.dpm.uvd_active = true;
565                         adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
566                 } else {
567                         adev->pm.dpm.uvd_active = false;
568                 }
569                 mutex_unlock(&adev->pm.mutex);
570
571                 amdgpu_dpm_compute_clocks(adev);
572                 return;
573         }
574
575         ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
576         if (ret)
577                 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
578                           enable ? "enable" : "disable", ret);
579 }
580
581 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
582 {
583         int ret = 0;
584
585         if (adev->family == AMDGPU_FAMILY_SI) {
586                 mutex_lock(&adev->pm.mutex);
587                 if (enable) {
588                         adev->pm.dpm.vce_active = true;
589                         /* XXX select vce level based on ring/task */
590                         adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
591                 } else {
592                         adev->pm.dpm.vce_active = false;
593                 }
594                 mutex_unlock(&adev->pm.mutex);
595
596                 amdgpu_dpm_compute_clocks(adev);
597                 return;
598         }
599
600         ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
601         if (ret)
602                 DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
603                           enable ? "enable" : "disable", ret);
604 }
605
606 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
607 {
608         int ret = 0;
609
610         ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
611         if (ret)
612                 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
613                           enable ? "enable" : "disable", ret);
614 }
615
616 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
617 {
618         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
619         int r = 0;
620
621         if (!pp_funcs || !pp_funcs->load_firmware)
622                 return 0;
623
624         mutex_lock(&adev->pm.mutex);
625         r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
626         if (r) {
627                 pr_err("smu firmware loading failed\n");
628                 goto out;
629         }
630
631         if (smu_version)
632                 *smu_version = adev->pm.fw_version;
633
634 out:
635         mutex_unlock(&adev->pm.mutex);
636         return r;
637 }
638
639 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
640 {
641         int ret = 0;
642
643         if (is_support_sw_smu(adev)) {
644                 mutex_lock(&adev->pm.mutex);
645                 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
646                                                  enable);
647                 mutex_unlock(&adev->pm.mutex);
648         }
649
650         return ret;
651 }
652
653 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
654 {
655         struct smu_context *smu = adev->powerplay.pp_handle;
656         int ret = 0;
657
658         if (!is_support_sw_smu(adev))
659                 return -EOPNOTSUPP;
660
661         mutex_lock(&adev->pm.mutex);
662         ret = smu_send_hbm_bad_pages_num(smu, size);
663         mutex_unlock(&adev->pm.mutex);
664
665         return ret;
666 }
667
668 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size)
669 {
670         struct smu_context *smu = adev->powerplay.pp_handle;
671         int ret = 0;
672
673         if (!is_support_sw_smu(adev))
674                 return -EOPNOTSUPP;
675
676         mutex_lock(&adev->pm.mutex);
677         ret = smu_send_hbm_bad_channel_flag(smu, size);
678         mutex_unlock(&adev->pm.mutex);
679
680         return ret;
681 }
682
683 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
684                                   enum pp_clock_type type,
685                                   uint32_t *min,
686                                   uint32_t *max)
687 {
688         int ret = 0;
689
690         if (type != PP_SCLK)
691                 return -EINVAL;
692
693         if (!is_support_sw_smu(adev))
694                 return -EOPNOTSUPP;
695
696         mutex_lock(&adev->pm.mutex);
697         ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
698                                      SMU_SCLK,
699                                      min,
700                                      max);
701         mutex_unlock(&adev->pm.mutex);
702
703         return ret;
704 }
705
706 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
707                                    enum pp_clock_type type,
708                                    uint32_t min,
709                                    uint32_t max)
710 {
711         struct smu_context *smu = adev->powerplay.pp_handle;
712         int ret = 0;
713
714         if (type != PP_SCLK)
715                 return -EINVAL;
716
717         if (!is_support_sw_smu(adev))
718                 return -EOPNOTSUPP;
719
720         mutex_lock(&adev->pm.mutex);
721         ret = smu_set_soft_freq_range(smu,
722                                       SMU_SCLK,
723                                       min,
724                                       max);
725         mutex_unlock(&adev->pm.mutex);
726
727         return ret;
728 }
729
730 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
731 {
732         struct smu_context *smu = adev->powerplay.pp_handle;
733         int ret = 0;
734
735         if (!is_support_sw_smu(adev))
736                 return 0;
737
738         mutex_lock(&adev->pm.mutex);
739         ret = smu_write_watermarks_table(smu);
740         mutex_unlock(&adev->pm.mutex);
741
742         return ret;
743 }
744
745 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
746                               enum smu_event_type event,
747                               uint64_t event_arg)
748 {
749         struct smu_context *smu = adev->powerplay.pp_handle;
750         int ret = 0;
751
752         if (!is_support_sw_smu(adev))
753                 return -EOPNOTSUPP;
754
755         mutex_lock(&adev->pm.mutex);
756         ret = smu_wait_for_event(smu, event, event_arg);
757         mutex_unlock(&adev->pm.mutex);
758
759         return ret;
760 }
761
762 int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value)
763 {
764         struct smu_context *smu = adev->powerplay.pp_handle;
765         int ret = 0;
766
767         if (!is_support_sw_smu(adev))
768                 return -EOPNOTSUPP;
769
770         mutex_lock(&adev->pm.mutex);
771         ret = smu_set_residency_gfxoff(smu, value);
772         mutex_unlock(&adev->pm.mutex);
773
774         return ret;
775 }
776
777 int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value)
778 {
779         struct smu_context *smu = adev->powerplay.pp_handle;
780         int ret = 0;
781
782         if (!is_support_sw_smu(adev))
783                 return -EOPNOTSUPP;
784
785         mutex_lock(&adev->pm.mutex);
786         ret = smu_get_residency_gfxoff(smu, value);
787         mutex_unlock(&adev->pm.mutex);
788
789         return ret;
790 }
791
792 int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value)
793 {
794         struct smu_context *smu = adev->powerplay.pp_handle;
795         int ret = 0;
796
797         if (!is_support_sw_smu(adev))
798                 return -EOPNOTSUPP;
799
800         mutex_lock(&adev->pm.mutex);
801         ret = smu_get_entrycount_gfxoff(smu, value);
802         mutex_unlock(&adev->pm.mutex);
803
804         return ret;
805 }
806
807 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
808 {
809         struct smu_context *smu = adev->powerplay.pp_handle;
810         int ret = 0;
811
812         if (!is_support_sw_smu(adev))
813                 return -EOPNOTSUPP;
814
815         mutex_lock(&adev->pm.mutex);
816         ret = smu_get_status_gfxoff(smu, value);
817         mutex_unlock(&adev->pm.mutex);
818
819         return ret;
820 }
821
822 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
823 {
824         struct smu_context *smu = adev->powerplay.pp_handle;
825
826         if (!is_support_sw_smu(adev))
827                 return 0;
828
829         return atomic64_read(&smu->throttle_int_counter);
830 }
831
832 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
833  * @adev: amdgpu_device pointer
834  * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
835  *
836  */
837 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
838                                  enum gfx_change_state state)
839 {
840         mutex_lock(&adev->pm.mutex);
841         if (adev->powerplay.pp_funcs &&
842             adev->powerplay.pp_funcs->gfx_state_change_set)
843                 ((adev)->powerplay.pp_funcs->gfx_state_change_set(
844                         (adev)->powerplay.pp_handle, state));
845         mutex_unlock(&adev->pm.mutex);
846 }
847
848 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
849                             void *umc_ecc)
850 {
851         struct smu_context *smu = adev->powerplay.pp_handle;
852         int ret = 0;
853
854         if (!is_support_sw_smu(adev))
855                 return -EOPNOTSUPP;
856
857         mutex_lock(&adev->pm.mutex);
858         ret = smu_get_ecc_info(smu, umc_ecc);
859         mutex_unlock(&adev->pm.mutex);
860
861         return ret;
862 }
863
864 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
865                                                      uint32_t idx)
866 {
867         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
868         struct amd_vce_state *vstate = NULL;
869
870         if (!pp_funcs->get_vce_clock_state)
871                 return NULL;
872
873         mutex_lock(&adev->pm.mutex);
874         vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
875                                                idx);
876         mutex_unlock(&adev->pm.mutex);
877
878         return vstate;
879 }
880
881 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
882                                         enum amd_pm_state_type *state)
883 {
884         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
885
886         mutex_lock(&adev->pm.mutex);
887
888         if (!pp_funcs->get_current_power_state) {
889                 *state = adev->pm.dpm.user_state;
890                 goto out;
891         }
892
893         *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
894         if (*state < POWER_STATE_TYPE_DEFAULT ||
895             *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
896                 *state = adev->pm.dpm.user_state;
897
898 out:
899         mutex_unlock(&adev->pm.mutex);
900 }
901
902 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
903                                 enum amd_pm_state_type state)
904 {
905         mutex_lock(&adev->pm.mutex);
906         adev->pm.dpm.user_state = state;
907         mutex_unlock(&adev->pm.mutex);
908
909         if (is_support_sw_smu(adev))
910                 return;
911
912         if (amdgpu_dpm_dispatch_task(adev,
913                                      AMD_PP_TASK_ENABLE_USER_STATE,
914                                      &state) == -EOPNOTSUPP)
915                 amdgpu_dpm_compute_clocks(adev);
916 }
917
918 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
919 {
920         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
921         enum amd_dpm_forced_level level;
922
923         if (!pp_funcs)
924                 return AMD_DPM_FORCED_LEVEL_AUTO;
925
926         mutex_lock(&adev->pm.mutex);
927         if (pp_funcs->get_performance_level)
928                 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
929         else
930                 level = adev->pm.dpm.forced_level;
931         mutex_unlock(&adev->pm.mutex);
932
933         return level;
934 }
935
936 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
937                                        enum amd_dpm_forced_level level)
938 {
939         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
940         enum amd_dpm_forced_level current_level;
941         uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
942                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
943                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
944                                         AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
945
946         if (!pp_funcs || !pp_funcs->force_performance_level)
947                 return 0;
948
949         if (adev->pm.dpm.thermal_active)
950                 return -EINVAL;
951
952         current_level = amdgpu_dpm_get_performance_level(adev);
953         if (current_level == level)
954                 return 0;
955
956         if (adev->asic_type == CHIP_RAVEN) {
957                 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
958                         if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
959                             level == AMD_DPM_FORCED_LEVEL_MANUAL)
960                                 amdgpu_gfx_off_ctrl(adev, false);
961                         else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
962                                  level != AMD_DPM_FORCED_LEVEL_MANUAL)
963                                 amdgpu_gfx_off_ctrl(adev, true);
964                 }
965         }
966
967         if (!(current_level & profile_mode_mask) &&
968             (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
969                 return -EINVAL;
970
971         if (!(current_level & profile_mode_mask) &&
972               (level & profile_mode_mask)) {
973                 /* enter UMD Pstate */
974                 amdgpu_device_ip_set_powergating_state(adev,
975                                                        AMD_IP_BLOCK_TYPE_GFX,
976                                                        AMD_PG_STATE_UNGATE);
977                 amdgpu_device_ip_set_clockgating_state(adev,
978                                                        AMD_IP_BLOCK_TYPE_GFX,
979                                                        AMD_CG_STATE_UNGATE);
980         } else if ((current_level & profile_mode_mask) &&
981                     !(level & profile_mode_mask)) {
982                 /* exit UMD Pstate */
983                 amdgpu_device_ip_set_clockgating_state(adev,
984                                                        AMD_IP_BLOCK_TYPE_GFX,
985                                                        AMD_CG_STATE_GATE);
986                 amdgpu_device_ip_set_powergating_state(adev,
987                                                        AMD_IP_BLOCK_TYPE_GFX,
988                                                        AMD_PG_STATE_GATE);
989         }
990
991         mutex_lock(&adev->pm.mutex);
992
993         if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
994                                               level)) {
995                 mutex_unlock(&adev->pm.mutex);
996                 return -EINVAL;
997         }
998
999         adev->pm.dpm.forced_level = level;
1000
1001         mutex_unlock(&adev->pm.mutex);
1002
1003         return 0;
1004 }
1005
1006 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
1007                                  struct pp_states_info *states)
1008 {
1009         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1010         int ret = 0;
1011
1012         if (!pp_funcs->get_pp_num_states)
1013                 return -EOPNOTSUPP;
1014
1015         mutex_lock(&adev->pm.mutex);
1016         ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
1017                                           states);
1018         mutex_unlock(&adev->pm.mutex);
1019
1020         return ret;
1021 }
1022
1023 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
1024                               enum amd_pp_task task_id,
1025                               enum amd_pm_state_type *user_state)
1026 {
1027         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1028         int ret = 0;
1029
1030         if (!pp_funcs->dispatch_tasks)
1031                 return -EOPNOTSUPP;
1032
1033         mutex_lock(&adev->pm.mutex);
1034         ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
1035                                        task_id,
1036                                        user_state);
1037         mutex_unlock(&adev->pm.mutex);
1038
1039         return ret;
1040 }
1041
1042 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
1043 {
1044         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1045         int ret = 0;
1046
1047         if (!pp_funcs->get_pp_table)
1048                 return 0;
1049
1050         mutex_lock(&adev->pm.mutex);
1051         ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
1052                                      table);
1053         mutex_unlock(&adev->pm.mutex);
1054
1055         return ret;
1056 }
1057
1058 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
1059                                       uint32_t type,
1060                                       long *input,
1061                                       uint32_t size)
1062 {
1063         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1064         int ret = 0;
1065
1066         if (!pp_funcs->set_fine_grain_clk_vol)
1067                 return 0;
1068
1069         mutex_lock(&adev->pm.mutex);
1070         ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
1071                                                type,
1072                                                input,
1073                                                size);
1074         mutex_unlock(&adev->pm.mutex);
1075
1076         return ret;
1077 }
1078
1079 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
1080                                   uint32_t type,
1081                                   long *input,
1082                                   uint32_t size)
1083 {
1084         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1085         int ret = 0;
1086
1087         if (!pp_funcs->odn_edit_dpm_table)
1088                 return 0;
1089
1090         mutex_lock(&adev->pm.mutex);
1091         ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
1092                                            type,
1093                                            input,
1094                                            size);
1095         mutex_unlock(&adev->pm.mutex);
1096
1097         return ret;
1098 }
1099
1100 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
1101                                   enum pp_clock_type type,
1102                                   char *buf)
1103 {
1104         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1105         int ret = 0;
1106
1107         if (!pp_funcs->print_clock_levels)
1108                 return 0;
1109
1110         mutex_lock(&adev->pm.mutex);
1111         ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
1112                                            type,
1113                                            buf);
1114         mutex_unlock(&adev->pm.mutex);
1115
1116         return ret;
1117 }
1118
1119 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
1120                                   enum pp_clock_type type,
1121                                   char *buf,
1122                                   int *offset)
1123 {
1124         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1125         int ret = 0;
1126
1127         if (!pp_funcs->emit_clock_levels)
1128                 return -ENOENT;
1129
1130         mutex_lock(&adev->pm.mutex);
1131         ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
1132                                            type,
1133                                            buf,
1134                                            offset);
1135         mutex_unlock(&adev->pm.mutex);
1136
1137         return ret;
1138 }
1139
1140 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
1141                                     uint64_t ppfeature_masks)
1142 {
1143         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1144         int ret = 0;
1145
1146         if (!pp_funcs->set_ppfeature_status)
1147                 return 0;
1148
1149         mutex_lock(&adev->pm.mutex);
1150         ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
1151                                              ppfeature_masks);
1152         mutex_unlock(&adev->pm.mutex);
1153
1154         return ret;
1155 }
1156
1157 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
1158 {
1159         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1160         int ret = 0;
1161
1162         if (!pp_funcs->get_ppfeature_status)
1163                 return 0;
1164
1165         mutex_lock(&adev->pm.mutex);
1166         ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
1167                                              buf);
1168         mutex_unlock(&adev->pm.mutex);
1169
1170         return ret;
1171 }
1172
1173 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
1174                                  enum pp_clock_type type,
1175                                  uint32_t mask)
1176 {
1177         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1178         int ret = 0;
1179
1180         if (!pp_funcs->force_clock_level)
1181                 return 0;
1182
1183         mutex_lock(&adev->pm.mutex);
1184         ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
1185                                           type,
1186                                           mask);
1187         mutex_unlock(&adev->pm.mutex);
1188
1189         return ret;
1190 }
1191
1192 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
1193 {
1194         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1195         int ret = 0;
1196
1197         if (!pp_funcs->get_sclk_od)
1198                 return 0;
1199
1200         mutex_lock(&adev->pm.mutex);
1201         ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
1202         mutex_unlock(&adev->pm.mutex);
1203
1204         return ret;
1205 }
1206
1207 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
1208 {
1209         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1210
1211         if (is_support_sw_smu(adev))
1212                 return 0;
1213
1214         mutex_lock(&adev->pm.mutex);
1215         if (pp_funcs->set_sclk_od)
1216                 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
1217         mutex_unlock(&adev->pm.mutex);
1218
1219         if (amdgpu_dpm_dispatch_task(adev,
1220                                      AMD_PP_TASK_READJUST_POWER_STATE,
1221                                      NULL) == -EOPNOTSUPP) {
1222                 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1223                 amdgpu_dpm_compute_clocks(adev);
1224         }
1225
1226         return 0;
1227 }
1228
1229 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
1230 {
1231         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1232         int ret = 0;
1233
1234         if (!pp_funcs->get_mclk_od)
1235                 return 0;
1236
1237         mutex_lock(&adev->pm.mutex);
1238         ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
1239         mutex_unlock(&adev->pm.mutex);
1240
1241         return ret;
1242 }
1243
1244 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
1245 {
1246         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1247
1248         if (is_support_sw_smu(adev))
1249                 return 0;
1250
1251         mutex_lock(&adev->pm.mutex);
1252         if (pp_funcs->set_mclk_od)
1253                 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
1254         mutex_unlock(&adev->pm.mutex);
1255
1256         if (amdgpu_dpm_dispatch_task(adev,
1257                                      AMD_PP_TASK_READJUST_POWER_STATE,
1258                                      NULL) == -EOPNOTSUPP) {
1259                 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1260                 amdgpu_dpm_compute_clocks(adev);
1261         }
1262
1263         return 0;
1264 }
1265
1266 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
1267                                       char *buf)
1268 {
1269         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1270         int ret = 0;
1271
1272         if (!pp_funcs->get_power_profile_mode)
1273                 return -EOPNOTSUPP;
1274
1275         mutex_lock(&adev->pm.mutex);
1276         ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
1277                                                buf);
1278         mutex_unlock(&adev->pm.mutex);
1279
1280         return ret;
1281 }
1282
1283 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
1284                                       long *input, uint32_t size)
1285 {
1286         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1287         int ret = 0;
1288
1289         if (!pp_funcs->set_power_profile_mode)
1290                 return 0;
1291
1292         mutex_lock(&adev->pm.mutex);
1293         ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
1294                                                input,
1295                                                size);
1296         mutex_unlock(&adev->pm.mutex);
1297
1298         return ret;
1299 }
1300
1301 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
1302 {
1303         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1304         int ret = 0;
1305
1306         if (!pp_funcs->get_gpu_metrics)
1307                 return 0;
1308
1309         mutex_lock(&adev->pm.mutex);
1310         ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
1311                                         table);
1312         mutex_unlock(&adev->pm.mutex);
1313
1314         return ret;
1315 }
1316
1317 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
1318                                     uint32_t *fan_mode)
1319 {
1320         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1321         int ret = 0;
1322
1323         if (!pp_funcs->get_fan_control_mode)
1324                 return -EOPNOTSUPP;
1325
1326         mutex_lock(&adev->pm.mutex);
1327         ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
1328                                              fan_mode);
1329         mutex_unlock(&adev->pm.mutex);
1330
1331         return ret;
1332 }
1333
1334 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
1335                                  uint32_t speed)
1336 {
1337         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1338         int ret = 0;
1339
1340         if (!pp_funcs->set_fan_speed_pwm)
1341                 return -EOPNOTSUPP;
1342
1343         mutex_lock(&adev->pm.mutex);
1344         ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
1345                                           speed);
1346         mutex_unlock(&adev->pm.mutex);
1347
1348         return ret;
1349 }
1350
1351 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
1352                                  uint32_t *speed)
1353 {
1354         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1355         int ret = 0;
1356
1357         if (!pp_funcs->get_fan_speed_pwm)
1358                 return -EOPNOTSUPP;
1359
1360         mutex_lock(&adev->pm.mutex);
1361         ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
1362                                           speed);
1363         mutex_unlock(&adev->pm.mutex);
1364
1365         return ret;
1366 }
1367
1368 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
1369                                  uint32_t *speed)
1370 {
1371         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1372         int ret = 0;
1373
1374         if (!pp_funcs->get_fan_speed_rpm)
1375                 return -EOPNOTSUPP;
1376
1377         mutex_lock(&adev->pm.mutex);
1378         ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
1379                                           speed);
1380         mutex_unlock(&adev->pm.mutex);
1381
1382         return ret;
1383 }
1384
1385 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
1386                                  uint32_t speed)
1387 {
1388         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1389         int ret = 0;
1390
1391         if (!pp_funcs->set_fan_speed_rpm)
1392                 return -EOPNOTSUPP;
1393
1394         mutex_lock(&adev->pm.mutex);
1395         ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
1396                                           speed);
1397         mutex_unlock(&adev->pm.mutex);
1398
1399         return ret;
1400 }
1401
1402 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
1403                                     uint32_t mode)
1404 {
1405         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1406         int ret = 0;
1407
1408         if (!pp_funcs->set_fan_control_mode)
1409                 return -EOPNOTSUPP;
1410
1411         mutex_lock(&adev->pm.mutex);
1412         ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
1413                                              mode);
1414         mutex_unlock(&adev->pm.mutex);
1415
1416         return ret;
1417 }
1418
1419 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
1420                                uint32_t *limit,
1421                                enum pp_power_limit_level pp_limit_level,
1422                                enum pp_power_type power_type)
1423 {
1424         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1425         int ret = 0;
1426
1427         if (!pp_funcs->get_power_limit)
1428                 return -ENODATA;
1429
1430         mutex_lock(&adev->pm.mutex);
1431         ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
1432                                         limit,
1433                                         pp_limit_level,
1434                                         power_type);
1435         mutex_unlock(&adev->pm.mutex);
1436
1437         return ret;
1438 }
1439
1440 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
1441                                uint32_t limit)
1442 {
1443         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1444         int ret = 0;
1445
1446         if (!pp_funcs->set_power_limit)
1447                 return -EINVAL;
1448
1449         mutex_lock(&adev->pm.mutex);
1450         ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
1451                                         limit);
1452         mutex_unlock(&adev->pm.mutex);
1453
1454         return ret;
1455 }
1456
1457 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
1458 {
1459         bool cclk_dpm_supported = false;
1460
1461         if (!is_support_sw_smu(adev))
1462                 return false;
1463
1464         mutex_lock(&adev->pm.mutex);
1465         cclk_dpm_supported = is_support_cclk_dpm(adev);
1466         mutex_unlock(&adev->pm.mutex);
1467
1468         return (int)cclk_dpm_supported;
1469 }
1470
1471 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
1472                                                        struct seq_file *m)
1473 {
1474         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1475
1476         if (!pp_funcs->debugfs_print_current_performance_level)
1477                 return -EOPNOTSUPP;
1478
1479         mutex_lock(&adev->pm.mutex);
1480         pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
1481                                                           m);
1482         mutex_unlock(&adev->pm.mutex);
1483
1484         return 0;
1485 }
1486
1487 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
1488                                        void **addr,
1489                                        size_t *size)
1490 {
1491         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1492         int ret = 0;
1493
1494         if (!pp_funcs->get_smu_prv_buf_details)
1495                 return -ENOSYS;
1496
1497         mutex_lock(&adev->pm.mutex);
1498         ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
1499                                                 addr,
1500                                                 size);
1501         mutex_unlock(&adev->pm.mutex);
1502
1503         return ret;
1504 }
1505
1506 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
1507 {
1508         if (is_support_sw_smu(adev)) {
1509                 struct smu_context *smu = adev->powerplay.pp_handle;
1510
1511                 return (smu->od_enabled || smu->is_apu);
1512         } else {
1513                 struct pp_hwmgr *hwmgr;
1514
1515                 /*
1516                  * dpm on some legacy asics don't carry od_enabled member
1517                  * as its pp_handle is casted directly from adev.
1518                  */
1519                 if (amdgpu_dpm_is_legacy_dpm(adev))
1520                         return false;
1521
1522                 hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
1523
1524                 return hwmgr->od_enabled;
1525         }
1526 }
1527
1528 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
1529                             const char *buf,
1530                             size_t size)
1531 {
1532         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1533         int ret = 0;
1534
1535         if (!pp_funcs->set_pp_table)
1536                 return -EOPNOTSUPP;
1537
1538         mutex_lock(&adev->pm.mutex);
1539         ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
1540                                      buf,
1541                                      size);
1542         mutex_unlock(&adev->pm.mutex);
1543
1544         return ret;
1545 }
1546
1547 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
1548 {
1549         struct smu_context *smu = adev->powerplay.pp_handle;
1550
1551         if (!is_support_sw_smu(adev))
1552                 return INT_MAX;
1553
1554         return smu->cpu_core_num;
1555 }
1556
1557 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
1558 {
1559         if (!is_support_sw_smu(adev))
1560                 return;
1561
1562         amdgpu_smu_stb_debug_fs_init(adev);
1563 }
1564
1565 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
1566                                             const struct amd_pp_display_configuration *input)
1567 {
1568         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1569         int ret = 0;
1570
1571         if (!pp_funcs->display_configuration_change)
1572                 return 0;
1573
1574         mutex_lock(&adev->pm.mutex);
1575         ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
1576                                                      input);
1577         mutex_unlock(&adev->pm.mutex);
1578
1579         return ret;
1580 }
1581
1582 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
1583                                  enum amd_pp_clock_type type,
1584                                  struct amd_pp_clocks *clocks)
1585 {
1586         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1587         int ret = 0;
1588
1589         if (!pp_funcs->get_clock_by_type)
1590                 return 0;
1591
1592         mutex_lock(&adev->pm.mutex);
1593         ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
1594                                           type,
1595                                           clocks);
1596         mutex_unlock(&adev->pm.mutex);
1597
1598         return ret;
1599 }
1600
1601 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
1602                                                 struct amd_pp_simple_clock_info *clocks)
1603 {
1604         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1605         int ret = 0;
1606
1607         if (!pp_funcs->get_display_mode_validation_clocks)
1608                 return 0;
1609
1610         mutex_lock(&adev->pm.mutex);
1611         ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
1612                                                            clocks);
1613         mutex_unlock(&adev->pm.mutex);
1614
1615         return ret;
1616 }
1617
1618 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
1619                                               enum amd_pp_clock_type type,
1620                                               struct pp_clock_levels_with_latency *clocks)
1621 {
1622         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1623         int ret = 0;
1624
1625         if (!pp_funcs->get_clock_by_type_with_latency)
1626                 return 0;
1627
1628         mutex_lock(&adev->pm.mutex);
1629         ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
1630                                                        type,
1631                                                        clocks);
1632         mutex_unlock(&adev->pm.mutex);
1633
1634         return ret;
1635 }
1636
1637 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
1638                                               enum amd_pp_clock_type type,
1639                                               struct pp_clock_levels_with_voltage *clocks)
1640 {
1641         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1642         int ret = 0;
1643
1644         if (!pp_funcs->get_clock_by_type_with_voltage)
1645                 return 0;
1646
1647         mutex_lock(&adev->pm.mutex);
1648         ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
1649                                                        type,
1650                                                        clocks);
1651         mutex_unlock(&adev->pm.mutex);
1652
1653         return ret;
1654 }
1655
1656 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
1657                                                void *clock_ranges)
1658 {
1659         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1660         int ret = 0;
1661
1662         if (!pp_funcs->set_watermarks_for_clocks_ranges)
1663                 return -EOPNOTSUPP;
1664
1665         mutex_lock(&adev->pm.mutex);
1666         ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
1667                                                          clock_ranges);
1668         mutex_unlock(&adev->pm.mutex);
1669
1670         return ret;
1671 }
1672
1673 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
1674                                              struct pp_display_clock_request *clock)
1675 {
1676         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1677         int ret = 0;
1678
1679         if (!pp_funcs->display_clock_voltage_request)
1680                 return -EOPNOTSUPP;
1681
1682         mutex_lock(&adev->pm.mutex);
1683         ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
1684                                                       clock);
1685         mutex_unlock(&adev->pm.mutex);
1686
1687         return ret;
1688 }
1689
1690 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
1691                                   struct amd_pp_clock_info *clocks)
1692 {
1693         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1694         int ret = 0;
1695
1696         if (!pp_funcs->get_current_clocks)
1697                 return -EOPNOTSUPP;
1698
1699         mutex_lock(&adev->pm.mutex);
1700         ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
1701                                            clocks);
1702         mutex_unlock(&adev->pm.mutex);
1703
1704         return ret;
1705 }
1706
1707 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
1708 {
1709         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1710
1711         if (!pp_funcs->notify_smu_enable_pwe)
1712                 return;
1713
1714         mutex_lock(&adev->pm.mutex);
1715         pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
1716         mutex_unlock(&adev->pm.mutex);
1717 }
1718
1719 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
1720                                         uint32_t count)
1721 {
1722         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1723         int ret = 0;
1724
1725         if (!pp_funcs->set_active_display_count)
1726                 return -EOPNOTSUPP;
1727
1728         mutex_lock(&adev->pm.mutex);
1729         ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
1730                                                  count);
1731         mutex_unlock(&adev->pm.mutex);
1732
1733         return ret;
1734 }
1735
1736 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
1737                                           uint32_t clock)
1738 {
1739         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1740         int ret = 0;
1741
1742         if (!pp_funcs->set_min_deep_sleep_dcefclk)
1743                 return -EOPNOTSUPP;
1744
1745         mutex_lock(&adev->pm.mutex);
1746         ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
1747                                                    clock);
1748         mutex_unlock(&adev->pm.mutex);
1749
1750         return ret;
1751 }
1752
1753 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
1754                                              uint32_t clock)
1755 {
1756         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1757
1758         if (!pp_funcs->set_hard_min_dcefclk_by_freq)
1759                 return;
1760
1761         mutex_lock(&adev->pm.mutex);
1762         pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
1763                                                clock);
1764         mutex_unlock(&adev->pm.mutex);
1765 }
1766
1767 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
1768                                           uint32_t clock)
1769 {
1770         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1771
1772         if (!pp_funcs->set_hard_min_fclk_by_freq)
1773                 return;
1774
1775         mutex_lock(&adev->pm.mutex);
1776         pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
1777                                             clock);
1778         mutex_unlock(&adev->pm.mutex);
1779 }
1780
1781 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
1782                                                    bool disable_memory_clock_switch)
1783 {
1784         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1785         int ret = 0;
1786
1787         if (!pp_funcs->display_disable_memory_clock_switch)
1788                 return 0;
1789
1790         mutex_lock(&adev->pm.mutex);
1791         ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
1792                                                             disable_memory_clock_switch);
1793         mutex_unlock(&adev->pm.mutex);
1794
1795         return ret;
1796 }
1797
1798 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
1799                                                 struct pp_smu_nv_clock_table *max_clocks)
1800 {
1801         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1802         int ret = 0;
1803
1804         if (!pp_funcs->get_max_sustainable_clocks_by_dc)
1805                 return -EOPNOTSUPP;
1806
1807         mutex_lock(&adev->pm.mutex);
1808         ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
1809                                                          max_clocks);
1810         mutex_unlock(&adev->pm.mutex);
1811
1812         return ret;
1813 }
1814
1815 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
1816                                                   unsigned int *clock_values_in_khz,
1817                                                   unsigned int *num_states)
1818 {
1819         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1820         int ret = 0;
1821
1822         if (!pp_funcs->get_uclk_dpm_states)
1823                 return -EOPNOTSUPP;
1824
1825         mutex_lock(&adev->pm.mutex);
1826         ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
1827                                             clock_values_in_khz,
1828                                             num_states);
1829         mutex_unlock(&adev->pm.mutex);
1830
1831         return ret;
1832 }
1833
1834 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
1835                                    struct dpm_clocks *clock_table)
1836 {
1837         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1838         int ret = 0;
1839
1840         if (!pp_funcs->get_dpm_clock_table)
1841                 return -EOPNOTSUPP;
1842
1843         mutex_lock(&adev->pm.mutex);
1844         ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
1845                                             clock_table);
1846         mutex_unlock(&adev->pm.mutex);
1847
1848         return ret;
1849 }