perf/core: Replace zero-length array with flexible-array
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / powerplay / amd_powerplay.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "pp_debug.h"
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/gfp.h>
27 #include <linux/slab.h>
28 #include <linux/firmware.h>
29 #include "amd_shared.h"
30 #include "amd_powerplay.h"
31 #include "power_state.h"
32 #include "amdgpu.h"
33 #include "hwmgr.h"
34
35
36 static const struct amd_pm_funcs pp_dpm_funcs;
37
38 static int amd_powerplay_create(struct amdgpu_device *adev)
39 {
40         struct pp_hwmgr *hwmgr;
41
42         if (adev == NULL)
43                 return -EINVAL;
44
45         hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
46         if (hwmgr == NULL)
47                 return -ENOMEM;
48
49         hwmgr->adev = adev;
50         hwmgr->not_vf = !amdgpu_sriov_vf(adev);
51         hwmgr->device = amdgpu_cgs_create_device(adev);
52         mutex_init(&hwmgr->smu_lock);
53         hwmgr->chip_family = adev->family;
54         hwmgr->chip_id = adev->asic_type;
55         hwmgr->feature_mask = adev->pm.pp_feature;
56         hwmgr->display_config = &adev->pm.pm_display_cfg;
57         adev->powerplay.pp_handle = hwmgr;
58         adev->powerplay.pp_funcs = &pp_dpm_funcs;
59         return 0;
60 }
61
62
63 static void amd_powerplay_destroy(struct amdgpu_device *adev)
64 {
65         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
66
67         kfree(hwmgr->hardcode_pp_table);
68         hwmgr->hardcode_pp_table = NULL;
69
70         kfree(hwmgr);
71         hwmgr = NULL;
72 }
73
74 static int pp_early_init(void *handle)
75 {
76         int ret;
77         struct amdgpu_device *adev = handle;
78
79         ret = amd_powerplay_create(adev);
80
81         if (ret != 0)
82                 return ret;
83
84         ret = hwmgr_early_init(adev->powerplay.pp_handle);
85         if (ret)
86                 return -EINVAL;
87
88         return 0;
89 }
90
91 static int pp_sw_init(void *handle)
92 {
93         struct amdgpu_device *adev = handle;
94         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
95         int ret = 0;
96
97         ret = hwmgr_sw_init(hwmgr);
98
99         pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
100
101         return ret;
102 }
103
104 static int pp_sw_fini(void *handle)
105 {
106         struct amdgpu_device *adev = handle;
107         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
108
109         hwmgr_sw_fini(hwmgr);
110
111         release_firmware(adev->pm.fw);
112         adev->pm.fw = NULL;
113
114         return 0;
115 }
116
117 static int pp_hw_init(void *handle)
118 {
119         int ret = 0;
120         struct amdgpu_device *adev = handle;
121         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
122
123         ret = hwmgr_hw_init(hwmgr);
124
125         if (ret)
126                 pr_err("powerplay hw init failed\n");
127
128         return ret;
129 }
130
131 static int pp_hw_fini(void *handle)
132 {
133         struct amdgpu_device *adev = handle;
134         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
135
136         hwmgr_hw_fini(hwmgr);
137
138         return 0;
139 }
140
141 static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
142 {
143         int r = -EINVAL;
144         void *cpu_ptr = NULL;
145         uint64_t gpu_addr;
146         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
147
148         if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size,
149                                                 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
150                                                 &adev->pm.smu_prv_buffer,
151                                                 &gpu_addr,
152                                                 &cpu_ptr)) {
153                 DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
154                 return;
155         }
156
157         if (hwmgr->hwmgr_func->notify_cac_buffer_info)
158                 r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr,
159                                         lower_32_bits((unsigned long)cpu_ptr),
160                                         upper_32_bits((unsigned long)cpu_ptr),
161                                         lower_32_bits(gpu_addr),
162                                         upper_32_bits(gpu_addr),
163                                         adev->pm.smu_prv_buffer_size);
164
165         if (r) {
166                 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
167                 adev->pm.smu_prv_buffer = NULL;
168                 DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
169         }
170 }
171
172 static int pp_late_init(void *handle)
173 {
174         struct amdgpu_device *adev = handle;
175         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
176
177         if (hwmgr && hwmgr->pm_en) {
178                 mutex_lock(&hwmgr->smu_lock);
179                 hwmgr_handle_task(hwmgr,
180                                         AMD_PP_TASK_COMPLETE_INIT, NULL);
181                 mutex_unlock(&hwmgr->smu_lock);
182         }
183         if (adev->pm.smu_prv_buffer_size != 0)
184                 pp_reserve_vram_for_smu(adev);
185
186         return 0;
187 }
188
189 static void pp_late_fini(void *handle)
190 {
191         struct amdgpu_device *adev = handle;
192
193         if (adev->pm.smu_prv_buffer)
194                 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
195         amd_powerplay_destroy(adev);
196 }
197
198
199 static bool pp_is_idle(void *handle)
200 {
201         return false;
202 }
203
204 static int pp_wait_for_idle(void *handle)
205 {
206         return 0;
207 }
208
209 static int pp_sw_reset(void *handle)
210 {
211         return 0;
212 }
213
214 static int pp_set_powergating_state(void *handle,
215                                     enum amd_powergating_state state)
216 {
217         return 0;
218 }
219
220 static int pp_suspend(void *handle)
221 {
222         struct amdgpu_device *adev = handle;
223         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
224
225         return hwmgr_suspend(hwmgr);
226 }
227
228 static int pp_resume(void *handle)
229 {
230         struct amdgpu_device *adev = handle;
231         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
232
233         return hwmgr_resume(hwmgr);
234 }
235
236 static int pp_set_clockgating_state(void *handle,
237                                           enum amd_clockgating_state state)
238 {
239         return 0;
240 }
241
242 static const struct amd_ip_funcs pp_ip_funcs = {
243         .name = "powerplay",
244         .early_init = pp_early_init,
245         .late_init = pp_late_init,
246         .sw_init = pp_sw_init,
247         .sw_fini = pp_sw_fini,
248         .hw_init = pp_hw_init,
249         .hw_fini = pp_hw_fini,
250         .late_fini = pp_late_fini,
251         .suspend = pp_suspend,
252         .resume = pp_resume,
253         .is_idle = pp_is_idle,
254         .wait_for_idle = pp_wait_for_idle,
255         .soft_reset = pp_sw_reset,
256         .set_clockgating_state = pp_set_clockgating_state,
257         .set_powergating_state = pp_set_powergating_state,
258 };
259
260 const struct amdgpu_ip_block_version pp_smu_ip_block =
261 {
262         .type = AMD_IP_BLOCK_TYPE_SMC,
263         .major = 1,
264         .minor = 0,
265         .rev = 0,
266         .funcs = &pp_ip_funcs,
267 };
268
269 /* This interface only be supported On Vi,
270  * because only smu7/8 can help to load gfx/sdma fw,
271  * smu need to be enabled before load other ip's fw.
272  * so call start smu to load smu7 fw and other ip's fw
273  */
274 static int pp_dpm_load_fw(void *handle)
275 {
276         struct pp_hwmgr *hwmgr = handle;
277
278         if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu)
279                 return -EINVAL;
280
281         if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
282                 pr_err("fw load failed\n");
283                 return -EINVAL;
284         }
285
286         return 0;
287 }
288
289 static int pp_dpm_fw_loading_complete(void *handle)
290 {
291         return 0;
292 }
293
294 static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
295 {
296         struct pp_hwmgr *hwmgr = handle;
297
298         if (!hwmgr || !hwmgr->pm_en)
299                 return -EINVAL;
300
301         if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
302                 pr_info_ratelimited("%s was not implemented.\n", __func__);
303                 return 0;
304         }
305
306         return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
307 }
308
309 static void pp_dpm_en_umd_pstate(struct pp_hwmgr  *hwmgr,
310                                                 enum amd_dpm_forced_level *level)
311 {
312         uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
313                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
314                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
315                                         AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
316
317         if (!(hwmgr->dpm_level & profile_mode_mask)) {
318                 /* enter umd pstate, save current level, disable gfx cg*/
319                 if (*level & profile_mode_mask) {
320                         hwmgr->saved_dpm_level = hwmgr->dpm_level;
321                         hwmgr->en_umd_pstate = true;
322                         amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
323                                                 AMD_IP_BLOCK_TYPE_GFX,
324                                                 AMD_CG_STATE_UNGATE);
325                         amdgpu_device_ip_set_powergating_state(hwmgr->adev,
326                                         AMD_IP_BLOCK_TYPE_GFX,
327                                         AMD_PG_STATE_UNGATE);
328                 }
329         } else {
330                 /* exit umd pstate, restore level, enable gfx cg*/
331                 if (!(*level & profile_mode_mask)) {
332                         if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
333                                 *level = hwmgr->saved_dpm_level;
334                         hwmgr->en_umd_pstate = false;
335                         amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
336                                         AMD_IP_BLOCK_TYPE_GFX,
337                                         AMD_CG_STATE_GATE);
338                         amdgpu_device_ip_set_powergating_state(hwmgr->adev,
339                                         AMD_IP_BLOCK_TYPE_GFX,
340                                         AMD_PG_STATE_GATE);
341                 }
342         }
343 }
344
345 static int pp_dpm_force_performance_level(void *handle,
346                                         enum amd_dpm_forced_level level)
347 {
348         struct pp_hwmgr *hwmgr = handle;
349
350         if (!hwmgr || !hwmgr->pm_en)
351                 return -EINVAL;
352
353         if (level == hwmgr->dpm_level)
354                 return 0;
355
356         mutex_lock(&hwmgr->smu_lock);
357         pp_dpm_en_umd_pstate(hwmgr, &level);
358         hwmgr->request_dpm_level = level;
359         hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
360         mutex_unlock(&hwmgr->smu_lock);
361
362         return 0;
363 }
364
365 static enum amd_dpm_forced_level pp_dpm_get_performance_level(
366                                                                 void *handle)
367 {
368         struct pp_hwmgr *hwmgr = handle;
369         enum amd_dpm_forced_level level;
370
371         if (!hwmgr || !hwmgr->pm_en)
372                 return -EINVAL;
373
374         mutex_lock(&hwmgr->smu_lock);
375         level = hwmgr->dpm_level;
376         mutex_unlock(&hwmgr->smu_lock);
377         return level;
378 }
379
380 static uint32_t pp_dpm_get_sclk(void *handle, bool low)
381 {
382         struct pp_hwmgr *hwmgr = handle;
383         uint32_t clk = 0;
384
385         if (!hwmgr || !hwmgr->pm_en)
386                 return 0;
387
388         if (hwmgr->hwmgr_func->get_sclk == NULL) {
389                 pr_info_ratelimited("%s was not implemented.\n", __func__);
390                 return 0;
391         }
392         mutex_lock(&hwmgr->smu_lock);
393         clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
394         mutex_unlock(&hwmgr->smu_lock);
395         return clk;
396 }
397
398 static uint32_t pp_dpm_get_mclk(void *handle, bool low)
399 {
400         struct pp_hwmgr *hwmgr = handle;
401         uint32_t clk = 0;
402
403         if (!hwmgr || !hwmgr->pm_en)
404                 return 0;
405
406         if (hwmgr->hwmgr_func->get_mclk == NULL) {
407                 pr_info_ratelimited("%s was not implemented.\n", __func__);
408                 return 0;
409         }
410         mutex_lock(&hwmgr->smu_lock);
411         clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
412         mutex_unlock(&hwmgr->smu_lock);
413         return clk;
414 }
415
416 static void pp_dpm_powergate_vce(void *handle, bool gate)
417 {
418         struct pp_hwmgr *hwmgr = handle;
419
420         if (!hwmgr || !hwmgr->pm_en)
421                 return;
422
423         if (hwmgr->hwmgr_func->powergate_vce == NULL) {
424                 pr_info_ratelimited("%s was not implemented.\n", __func__);
425                 return;
426         }
427         mutex_lock(&hwmgr->smu_lock);
428         hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
429         mutex_unlock(&hwmgr->smu_lock);
430 }
431
432 static void pp_dpm_powergate_uvd(void *handle, bool gate)
433 {
434         struct pp_hwmgr *hwmgr = handle;
435
436         if (!hwmgr || !hwmgr->pm_en)
437                 return;
438
439         if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
440                 pr_info_ratelimited("%s was not implemented.\n", __func__);
441                 return;
442         }
443         mutex_lock(&hwmgr->smu_lock);
444         hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
445         mutex_unlock(&hwmgr->smu_lock);
446 }
447
448 static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
449                 enum amd_pm_state_type *user_state)
450 {
451         int ret = 0;
452         struct pp_hwmgr *hwmgr = handle;
453
454         if (!hwmgr || !hwmgr->pm_en)
455                 return -EINVAL;
456
457         mutex_lock(&hwmgr->smu_lock);
458         ret = hwmgr_handle_task(hwmgr, task_id, user_state);
459         mutex_unlock(&hwmgr->smu_lock);
460
461         return ret;
462 }
463
464 static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
465 {
466         struct pp_hwmgr *hwmgr = handle;
467         struct pp_power_state *state;
468         enum amd_pm_state_type pm_type;
469
470         if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
471                 return -EINVAL;
472
473         mutex_lock(&hwmgr->smu_lock);
474
475         state = hwmgr->current_ps;
476
477         switch (state->classification.ui_label) {
478         case PP_StateUILabel_Battery:
479                 pm_type = POWER_STATE_TYPE_BATTERY;
480                 break;
481         case PP_StateUILabel_Balanced:
482                 pm_type = POWER_STATE_TYPE_BALANCED;
483                 break;
484         case PP_StateUILabel_Performance:
485                 pm_type = POWER_STATE_TYPE_PERFORMANCE;
486                 break;
487         default:
488                 if (state->classification.flags & PP_StateClassificationFlag_Boot)
489                         pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
490                 else
491                         pm_type = POWER_STATE_TYPE_DEFAULT;
492                 break;
493         }
494         mutex_unlock(&hwmgr->smu_lock);
495
496         return pm_type;
497 }
498
499 static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
500 {
501         struct pp_hwmgr *hwmgr = handle;
502
503         if (!hwmgr || !hwmgr->pm_en)
504                 return;
505
506         if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
507                 pr_info_ratelimited("%s was not implemented.\n", __func__);
508                 return;
509         }
510         mutex_lock(&hwmgr->smu_lock);
511         hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
512         mutex_unlock(&hwmgr->smu_lock);
513 }
514
515 static uint32_t pp_dpm_get_fan_control_mode(void *handle)
516 {
517         struct pp_hwmgr *hwmgr = handle;
518         uint32_t mode = 0;
519
520         if (!hwmgr || !hwmgr->pm_en)
521                 return 0;
522
523         if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
524                 pr_info_ratelimited("%s was not implemented.\n", __func__);
525                 return 0;
526         }
527         mutex_lock(&hwmgr->smu_lock);
528         mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
529         mutex_unlock(&hwmgr->smu_lock);
530         return mode;
531 }
532
533 static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
534 {
535         struct pp_hwmgr *hwmgr = handle;
536         int ret = 0;
537
538         if (!hwmgr || !hwmgr->pm_en)
539                 return -EINVAL;
540
541         if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
542                 pr_info_ratelimited("%s was not implemented.\n", __func__);
543                 return 0;
544         }
545         mutex_lock(&hwmgr->smu_lock);
546         ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
547         mutex_unlock(&hwmgr->smu_lock);
548         return ret;
549 }
550
551 static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
552 {
553         struct pp_hwmgr *hwmgr = handle;
554         int ret = 0;
555
556         if (!hwmgr || !hwmgr->pm_en)
557                 return -EINVAL;
558
559         if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
560                 pr_info_ratelimited("%s was not implemented.\n", __func__);
561                 return 0;
562         }
563
564         mutex_lock(&hwmgr->smu_lock);
565         ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
566         mutex_unlock(&hwmgr->smu_lock);
567         return ret;
568 }
569
570 static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
571 {
572         struct pp_hwmgr *hwmgr = handle;
573         int ret = 0;
574
575         if (!hwmgr || !hwmgr->pm_en)
576                 return -EINVAL;
577
578         if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
579                 return -EINVAL;
580
581         mutex_lock(&hwmgr->smu_lock);
582         ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
583         mutex_unlock(&hwmgr->smu_lock);
584         return ret;
585 }
586
587 static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
588 {
589         struct pp_hwmgr *hwmgr = handle;
590         int ret = 0;
591
592         if (!hwmgr || !hwmgr->pm_en)
593                 return -EINVAL;
594
595         if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL) {
596                 pr_info_ratelimited("%s was not implemented.\n", __func__);
597                 return 0;
598         }
599         mutex_lock(&hwmgr->smu_lock);
600         ret = hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
601         mutex_unlock(&hwmgr->smu_lock);
602         return ret;
603 }
604
605 static int pp_dpm_get_pp_num_states(void *handle,
606                 struct pp_states_info *data)
607 {
608         struct pp_hwmgr *hwmgr = handle;
609         int i;
610
611         memset(data, 0, sizeof(*data));
612
613         if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
614                 return -EINVAL;
615
616         mutex_lock(&hwmgr->smu_lock);
617
618         data->nums = hwmgr->num_ps;
619
620         for (i = 0; i < hwmgr->num_ps; i++) {
621                 struct pp_power_state *state = (struct pp_power_state *)
622                                 ((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
623                 switch (state->classification.ui_label) {
624                 case PP_StateUILabel_Battery:
625                         data->states[i] = POWER_STATE_TYPE_BATTERY;
626                         break;
627                 case PP_StateUILabel_Balanced:
628                         data->states[i] = POWER_STATE_TYPE_BALANCED;
629                         break;
630                 case PP_StateUILabel_Performance:
631                         data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
632                         break;
633                 default:
634                         if (state->classification.flags & PP_StateClassificationFlag_Boot)
635                                 data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
636                         else
637                                 data->states[i] = POWER_STATE_TYPE_DEFAULT;
638                 }
639         }
640         mutex_unlock(&hwmgr->smu_lock);
641         return 0;
642 }
643
644 static int pp_dpm_get_pp_table(void *handle, char **table)
645 {
646         struct pp_hwmgr *hwmgr = handle;
647         int size = 0;
648
649         if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
650                 return -EINVAL;
651
652         mutex_lock(&hwmgr->smu_lock);
653         *table = (char *)hwmgr->soft_pp_table;
654         size = hwmgr->soft_pp_table_size;
655         mutex_unlock(&hwmgr->smu_lock);
656         return size;
657 }
658
659 static int amd_powerplay_reset(void *handle)
660 {
661         struct pp_hwmgr *hwmgr = handle;
662         int ret;
663
664         ret = hwmgr_hw_fini(hwmgr);
665         if (ret)
666                 return ret;
667
668         ret = hwmgr_hw_init(hwmgr);
669         if (ret)
670                 return ret;
671
672         return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
673 }
674
675 static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
676 {
677         struct pp_hwmgr *hwmgr = handle;
678         int ret = -ENOMEM;
679
680         if (!hwmgr || !hwmgr->pm_en)
681                 return -EINVAL;
682
683         mutex_lock(&hwmgr->smu_lock);
684         if (!hwmgr->hardcode_pp_table) {
685                 hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
686                                                    hwmgr->soft_pp_table_size,
687                                                    GFP_KERNEL);
688                 if (!hwmgr->hardcode_pp_table)
689                         goto err;
690         }
691
692         memcpy(hwmgr->hardcode_pp_table, buf, size);
693
694         hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
695
696         ret = amd_powerplay_reset(handle);
697         if (ret)
698                 goto err;
699
700         if (hwmgr->hwmgr_func->avfs_control) {
701                 ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
702                 if (ret)
703                         goto err;
704         }
705         mutex_unlock(&hwmgr->smu_lock);
706         return 0;
707 err:
708         mutex_unlock(&hwmgr->smu_lock);
709         return ret;
710 }
711
712 static int pp_dpm_force_clock_level(void *handle,
713                 enum pp_clock_type type, uint32_t mask)
714 {
715         struct pp_hwmgr *hwmgr = handle;
716         int ret = 0;
717
718         if (!hwmgr || !hwmgr->pm_en)
719                 return -EINVAL;
720
721         if (hwmgr->hwmgr_func->force_clock_level == NULL) {
722                 pr_info_ratelimited("%s was not implemented.\n", __func__);
723                 return 0;
724         }
725
726         if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
727                 pr_debug("force clock level is for dpm manual mode only.\n");
728                 return -EINVAL;
729         }
730
731         mutex_lock(&hwmgr->smu_lock);
732         ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
733         mutex_unlock(&hwmgr->smu_lock);
734         return ret;
735 }
736
737 static int pp_dpm_print_clock_levels(void *handle,
738                 enum pp_clock_type type, char *buf)
739 {
740         struct pp_hwmgr *hwmgr = handle;
741         int ret = 0;
742
743         if (!hwmgr || !hwmgr->pm_en)
744                 return -EINVAL;
745
746         if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
747                 pr_info_ratelimited("%s was not implemented.\n", __func__);
748                 return 0;
749         }
750         mutex_lock(&hwmgr->smu_lock);
751         ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
752         mutex_unlock(&hwmgr->smu_lock);
753         return ret;
754 }
755
756 static int pp_dpm_get_sclk_od(void *handle)
757 {
758         struct pp_hwmgr *hwmgr = handle;
759         int ret = 0;
760
761         if (!hwmgr || !hwmgr->pm_en)
762                 return -EINVAL;
763
764         if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
765                 pr_info_ratelimited("%s was not implemented.\n", __func__);
766                 return 0;
767         }
768         mutex_lock(&hwmgr->smu_lock);
769         ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
770         mutex_unlock(&hwmgr->smu_lock);
771         return ret;
772 }
773
774 static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
775 {
776         struct pp_hwmgr *hwmgr = handle;
777         int ret = 0;
778
779         if (!hwmgr || !hwmgr->pm_en)
780                 return -EINVAL;
781
782         if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
783                 pr_info_ratelimited("%s was not implemented.\n", __func__);
784                 return 0;
785         }
786
787         mutex_lock(&hwmgr->smu_lock);
788         ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
789         mutex_unlock(&hwmgr->smu_lock);
790         return ret;
791 }
792
793 static int pp_dpm_get_mclk_od(void *handle)
794 {
795         struct pp_hwmgr *hwmgr = handle;
796         int ret = 0;
797
798         if (!hwmgr || !hwmgr->pm_en)
799                 return -EINVAL;
800
801         if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
802                 pr_info_ratelimited("%s was not implemented.\n", __func__);
803                 return 0;
804         }
805         mutex_lock(&hwmgr->smu_lock);
806         ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
807         mutex_unlock(&hwmgr->smu_lock);
808         return ret;
809 }
810
811 static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
812 {
813         struct pp_hwmgr *hwmgr = handle;
814         int ret = 0;
815
816         if (!hwmgr || !hwmgr->pm_en)
817                 return -EINVAL;
818
819         if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
820                 pr_info_ratelimited("%s was not implemented.\n", __func__);
821                 return 0;
822         }
823         mutex_lock(&hwmgr->smu_lock);
824         ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
825         mutex_unlock(&hwmgr->smu_lock);
826         return ret;
827 }
828
829 static int pp_dpm_read_sensor(void *handle, int idx,
830                               void *value, int *size)
831 {
832         struct pp_hwmgr *hwmgr = handle;
833         int ret = 0;
834
835         if (!hwmgr || !hwmgr->pm_en || !value)
836                 return -EINVAL;
837
838         switch (idx) {
839         case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
840                 *((uint32_t *)value) = hwmgr->pstate_sclk;
841                 return 0;
842         case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
843                 *((uint32_t *)value) = hwmgr->pstate_mclk;
844                 return 0;
845         case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
846                 *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM;
847                 return 0;
848         case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
849                 *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
850                 return 0;
851         default:
852                 mutex_lock(&hwmgr->smu_lock);
853                 ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
854                 mutex_unlock(&hwmgr->smu_lock);
855                 return ret;
856         }
857 }
858
859 static struct amd_vce_state*
860 pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
861 {
862         struct pp_hwmgr *hwmgr = handle;
863
864         if (!hwmgr || !hwmgr->pm_en)
865                 return NULL;
866
867         if (idx < hwmgr->num_vce_state_tables)
868                 return &hwmgr->vce_states[idx];
869         return NULL;
870 }
871
872 static int pp_get_power_profile_mode(void *handle, char *buf)
873 {
874         struct pp_hwmgr *hwmgr = handle;
875
876         if (!hwmgr || !hwmgr->pm_en || !buf)
877                 return -EINVAL;
878
879         if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
880                 pr_info_ratelimited("%s was not implemented.\n", __func__);
881                 return snprintf(buf, PAGE_SIZE, "\n");
882         }
883
884         return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
885 }
886
887 static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
888 {
889         struct pp_hwmgr *hwmgr = handle;
890         int ret = -EINVAL;
891
892         if (!hwmgr || !hwmgr->pm_en)
893                 return ret;
894
895         if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
896                 pr_info_ratelimited("%s was not implemented.\n", __func__);
897                 return ret;
898         }
899
900         if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
901                 pr_debug("power profile setting is for manual dpm mode only.\n");
902                 return ret;
903         }
904
905         mutex_lock(&hwmgr->smu_lock);
906         ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
907         mutex_unlock(&hwmgr->smu_lock);
908         return ret;
909 }
910
911 static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
912 {
913         struct pp_hwmgr *hwmgr = handle;
914
915         if (!hwmgr || !hwmgr->pm_en)
916                 return -EINVAL;
917
918         if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
919                 pr_info_ratelimited("%s was not implemented.\n", __func__);
920                 return -EINVAL;
921         }
922
923         return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
924 }
925
926 static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state)
927 {
928         struct pp_hwmgr *hwmgr = handle;
929
930         if (!hwmgr)
931                 return -EINVAL;
932
933         if (!hwmgr->pm_en)
934                 return 0;
935
936         if (hwmgr->hwmgr_func->set_mp1_state)
937                 return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state);
938
939         return 0;
940 }
941
942 static int pp_dpm_switch_power_profile(void *handle,
943                 enum PP_SMC_POWER_PROFILE type, bool en)
944 {
945         struct pp_hwmgr *hwmgr = handle;
946         long workload;
947         uint32_t index;
948
949         if (!hwmgr || !hwmgr->pm_en)
950                 return -EINVAL;
951
952         if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
953                 pr_info_ratelimited("%s was not implemented.\n", __func__);
954                 return -EINVAL;
955         }
956
957         if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
958                 return -EINVAL;
959
960         mutex_lock(&hwmgr->smu_lock);
961
962         if (!en) {
963                 hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
964                 index = fls(hwmgr->workload_mask);
965                 index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
966                 workload = hwmgr->workload_setting[index];
967         } else {
968                 hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
969                 index = fls(hwmgr->workload_mask);
970                 index = index <= Workload_Policy_Max ? index - 1 : 0;
971                 workload = hwmgr->workload_setting[index];
972         }
973
974         if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
975                 hwmgr->hwmgr_func->disable_power_features_for_compute_performance) {
976                         if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en)) {
977                                 mutex_unlock(&hwmgr->smu_lock);
978                                 return -EINVAL;
979                         }
980         }
981
982         if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
983                 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
984         mutex_unlock(&hwmgr->smu_lock);
985
986         return 0;
987 }
988
989 static int pp_set_power_limit(void *handle, uint32_t limit)
990 {
991         struct pp_hwmgr *hwmgr = handle;
992         uint32_t max_power_limit;
993
994         if (!hwmgr || !hwmgr->pm_en)
995                 return -EINVAL;
996
997         if (hwmgr->hwmgr_func->set_power_limit == NULL) {
998                 pr_info_ratelimited("%s was not implemented.\n", __func__);
999                 return -EINVAL;
1000         }
1001
1002         if (limit == 0)
1003                 limit = hwmgr->default_power_limit;
1004
1005         max_power_limit = hwmgr->default_power_limit;
1006         if (hwmgr->od_enabled) {
1007                 max_power_limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
1008                 max_power_limit /= 100;
1009         }
1010
1011         if (limit > max_power_limit)
1012                 return -EINVAL;
1013
1014         mutex_lock(&hwmgr->smu_lock);
1015         hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
1016         hwmgr->power_limit = limit;
1017         mutex_unlock(&hwmgr->smu_lock);
1018         return 0;
1019 }
1020
1021 static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
1022 {
1023         struct pp_hwmgr *hwmgr = handle;
1024
1025         if (!hwmgr || !hwmgr->pm_en ||!limit)
1026                 return -EINVAL;
1027
1028         mutex_lock(&hwmgr->smu_lock);
1029
1030         if (default_limit) {
1031                 *limit = hwmgr->default_power_limit;
1032                 if (hwmgr->od_enabled) {
1033                         *limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
1034                         *limit /= 100;
1035                 }
1036         }
1037         else
1038                 *limit = hwmgr->power_limit;
1039
1040         mutex_unlock(&hwmgr->smu_lock);
1041
1042         return 0;
1043 }
1044
1045 static int pp_display_configuration_change(void *handle,
1046         const struct amd_pp_display_configuration *display_config)
1047 {
1048         struct pp_hwmgr *hwmgr = handle;
1049
1050         if (!hwmgr || !hwmgr->pm_en)
1051                 return -EINVAL;
1052
1053         mutex_lock(&hwmgr->smu_lock);
1054         phm_store_dal_configuration_data(hwmgr, display_config);
1055         mutex_unlock(&hwmgr->smu_lock);
1056         return 0;
1057 }
1058
1059 static int pp_get_display_power_level(void *handle,
1060                 struct amd_pp_simple_clock_info *output)
1061 {
1062         struct pp_hwmgr *hwmgr = handle;
1063         int ret = 0;
1064
1065         if (!hwmgr || !hwmgr->pm_en ||!output)
1066                 return -EINVAL;
1067
1068         mutex_lock(&hwmgr->smu_lock);
1069         ret = phm_get_dal_power_level(hwmgr, output);
1070         mutex_unlock(&hwmgr->smu_lock);
1071         return ret;
1072 }
1073
1074 static int pp_get_current_clocks(void *handle,
1075                 struct amd_pp_clock_info *clocks)
1076 {
1077         struct amd_pp_simple_clock_info simple_clocks = { 0 };
1078         struct pp_clock_info hw_clocks;
1079         struct pp_hwmgr *hwmgr = handle;
1080         int ret = 0;
1081
1082         if (!hwmgr || !hwmgr->pm_en)
1083                 return -EINVAL;
1084
1085         mutex_lock(&hwmgr->smu_lock);
1086
1087         phm_get_dal_power_level(hwmgr, &simple_clocks);
1088
1089         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1090                                         PHM_PlatformCaps_PowerContainment))
1091                 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1092                                         &hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
1093         else
1094                 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1095                                         &hw_clocks, PHM_PerformanceLevelDesignation_Activity);
1096
1097         if (ret) {
1098                 pr_debug("Error in phm_get_clock_info \n");
1099                 mutex_unlock(&hwmgr->smu_lock);
1100                 return -EINVAL;
1101         }
1102
1103         clocks->min_engine_clock = hw_clocks.min_eng_clk;
1104         clocks->max_engine_clock = hw_clocks.max_eng_clk;
1105         clocks->min_memory_clock = hw_clocks.min_mem_clk;
1106         clocks->max_memory_clock = hw_clocks.max_mem_clk;
1107         clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1108         clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1109
1110         clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1111         clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1112
1113         if (simple_clocks.level == 0)
1114                 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1115         else
1116                 clocks->max_clocks_state = simple_clocks.level;
1117
1118         if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
1119                 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1120                 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1121         }
1122         mutex_unlock(&hwmgr->smu_lock);
1123         return 0;
1124 }
1125
1126 static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1127 {
1128         struct pp_hwmgr *hwmgr = handle;
1129         int ret = 0;
1130
1131         if (!hwmgr || !hwmgr->pm_en)
1132                 return -EINVAL;
1133
1134         if (clocks == NULL)
1135                 return -EINVAL;
1136
1137         mutex_lock(&hwmgr->smu_lock);
1138         ret = phm_get_clock_by_type(hwmgr, type, clocks);
1139         mutex_unlock(&hwmgr->smu_lock);
1140         return ret;
1141 }
1142
1143 static int pp_get_clock_by_type_with_latency(void *handle,
1144                 enum amd_pp_clock_type type,
1145                 struct pp_clock_levels_with_latency *clocks)
1146 {
1147         struct pp_hwmgr *hwmgr = handle;
1148         int ret = 0;
1149
1150         if (!hwmgr || !hwmgr->pm_en ||!clocks)
1151                 return -EINVAL;
1152
1153         mutex_lock(&hwmgr->smu_lock);
1154         ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1155         mutex_unlock(&hwmgr->smu_lock);
1156         return ret;
1157 }
1158
1159 static int pp_get_clock_by_type_with_voltage(void *handle,
1160                 enum amd_pp_clock_type type,
1161                 struct pp_clock_levels_with_voltage *clocks)
1162 {
1163         struct pp_hwmgr *hwmgr = handle;
1164         int ret = 0;
1165
1166         if (!hwmgr || !hwmgr->pm_en ||!clocks)
1167                 return -EINVAL;
1168
1169         mutex_lock(&hwmgr->smu_lock);
1170
1171         ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
1172
1173         mutex_unlock(&hwmgr->smu_lock);
1174         return ret;
1175 }
1176
1177 static int pp_set_watermarks_for_clocks_ranges(void *handle,
1178                 void *clock_ranges)
1179 {
1180         struct pp_hwmgr *hwmgr = handle;
1181         int ret = 0;
1182
1183         if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
1184                 return -EINVAL;
1185
1186         mutex_lock(&hwmgr->smu_lock);
1187         ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
1188                         clock_ranges);
1189         mutex_unlock(&hwmgr->smu_lock);
1190
1191         return ret;
1192 }
1193
1194 static int pp_display_clock_voltage_request(void *handle,
1195                 struct pp_display_clock_request *clock)
1196 {
1197         struct pp_hwmgr *hwmgr = handle;
1198         int ret = 0;
1199
1200         if (!hwmgr || !hwmgr->pm_en ||!clock)
1201                 return -EINVAL;
1202
1203         mutex_lock(&hwmgr->smu_lock);
1204         ret = phm_display_clock_voltage_request(hwmgr, clock);
1205         mutex_unlock(&hwmgr->smu_lock);
1206
1207         return ret;
1208 }
1209
1210 static int pp_get_display_mode_validation_clocks(void *handle,
1211                 struct amd_pp_simple_clock_info *clocks)
1212 {
1213         struct pp_hwmgr *hwmgr = handle;
1214         int ret = 0;
1215
1216         if (!hwmgr || !hwmgr->pm_en ||!clocks)
1217                 return -EINVAL;
1218
1219         clocks->level = PP_DAL_POWERLEVEL_7;
1220
1221         mutex_lock(&hwmgr->smu_lock);
1222
1223         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1224                 ret = phm_get_max_high_clocks(hwmgr, clocks);
1225
1226         mutex_unlock(&hwmgr->smu_lock);
1227         return ret;
1228 }
1229
1230 static int pp_dpm_powergate_mmhub(void *handle)
1231 {
1232         struct pp_hwmgr *hwmgr = handle;
1233
1234         if (!hwmgr || !hwmgr->pm_en)
1235                 return -EINVAL;
1236
1237         if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
1238                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1239                 return 0;
1240         }
1241
1242         return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
1243 }
1244
1245 static int pp_dpm_powergate_gfx(void *handle, bool gate)
1246 {
1247         struct pp_hwmgr *hwmgr = handle;
1248
1249         if (!hwmgr || !hwmgr->pm_en)
1250                 return 0;
1251
1252         if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
1253                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1254                 return 0;
1255         }
1256
1257         return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
1258 }
1259
1260 static void pp_dpm_powergate_acp(void *handle, bool gate)
1261 {
1262         struct pp_hwmgr *hwmgr = handle;
1263
1264         if (!hwmgr || !hwmgr->pm_en)
1265                 return;
1266
1267         if (hwmgr->hwmgr_func->powergate_acp == NULL) {
1268                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1269                 return;
1270         }
1271
1272         hwmgr->hwmgr_func->powergate_acp(hwmgr, gate);
1273 }
1274
1275 static void pp_dpm_powergate_sdma(void *handle, bool gate)
1276 {
1277         struct pp_hwmgr *hwmgr = handle;
1278
1279         if (!hwmgr)
1280                 return;
1281
1282         if (hwmgr->hwmgr_func->powergate_sdma == NULL) {
1283                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1284                 return;
1285         }
1286
1287         hwmgr->hwmgr_func->powergate_sdma(hwmgr, gate);
1288 }
1289
1290 static int pp_set_powergating_by_smu(void *handle,
1291                                 uint32_t block_type, bool gate)
1292 {
1293         int ret = 0;
1294
1295         switch (block_type) {
1296         case AMD_IP_BLOCK_TYPE_UVD:
1297         case AMD_IP_BLOCK_TYPE_VCN:
1298                 pp_dpm_powergate_uvd(handle, gate);
1299                 break;
1300         case AMD_IP_BLOCK_TYPE_VCE:
1301                 pp_dpm_powergate_vce(handle, gate);
1302                 break;
1303         case AMD_IP_BLOCK_TYPE_GMC:
1304                 pp_dpm_powergate_mmhub(handle);
1305                 break;
1306         case AMD_IP_BLOCK_TYPE_GFX:
1307                 ret = pp_dpm_powergate_gfx(handle, gate);
1308                 break;
1309         case AMD_IP_BLOCK_TYPE_ACP:
1310                 pp_dpm_powergate_acp(handle, gate);
1311                 break;
1312         case AMD_IP_BLOCK_TYPE_SDMA:
1313                 pp_dpm_powergate_sdma(handle, gate);
1314                 break;
1315         default:
1316                 break;
1317         }
1318         return ret;
1319 }
1320
1321 static int pp_notify_smu_enable_pwe(void *handle)
1322 {
1323         struct pp_hwmgr *hwmgr = handle;
1324
1325         if (!hwmgr || !hwmgr->pm_en)
1326                 return -EINVAL;
1327
1328         if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
1329                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1330                 return -EINVAL;
1331         }
1332
1333         mutex_lock(&hwmgr->smu_lock);
1334         hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
1335         mutex_unlock(&hwmgr->smu_lock);
1336
1337         return 0;
1338 }
1339
1340 static int pp_enable_mgpu_fan_boost(void *handle)
1341 {
1342         struct pp_hwmgr *hwmgr = handle;
1343
1344         if (!hwmgr)
1345                 return -EINVAL;
1346
1347         if (!hwmgr->pm_en ||
1348              hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
1349                 return 0;
1350
1351         mutex_lock(&hwmgr->smu_lock);
1352         hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
1353         mutex_unlock(&hwmgr->smu_lock);
1354
1355         return 0;
1356 }
1357
1358 static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
1359 {
1360         struct pp_hwmgr *hwmgr = handle;
1361
1362         if (!hwmgr || !hwmgr->pm_en)
1363                 return -EINVAL;
1364
1365         if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
1366                 pr_debug("%s was not implemented.\n", __func__);
1367                 return -EINVAL;
1368         }
1369
1370         mutex_lock(&hwmgr->smu_lock);
1371         hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
1372         mutex_unlock(&hwmgr->smu_lock);
1373
1374         return 0;
1375 }
1376
1377 static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
1378 {
1379         struct pp_hwmgr *hwmgr = handle;
1380
1381         if (!hwmgr || !hwmgr->pm_en)
1382                 return -EINVAL;
1383
1384         if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
1385                 pr_debug("%s was not implemented.\n", __func__);
1386                 return -EINVAL;
1387         }
1388
1389         mutex_lock(&hwmgr->smu_lock);
1390         hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
1391         mutex_unlock(&hwmgr->smu_lock);
1392
1393         return 0;
1394 }
1395
1396 static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
1397 {
1398         struct pp_hwmgr *hwmgr = handle;
1399
1400         if (!hwmgr || !hwmgr->pm_en)
1401                 return -EINVAL;
1402
1403         if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
1404                 pr_debug("%s was not implemented.\n", __func__);
1405                 return -EINVAL;
1406         }
1407
1408         mutex_lock(&hwmgr->smu_lock);
1409         hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
1410         mutex_unlock(&hwmgr->smu_lock);
1411
1412         return 0;
1413 }
1414
1415 static int pp_set_active_display_count(void *handle, uint32_t count)
1416 {
1417         struct pp_hwmgr *hwmgr = handle;
1418         int ret = 0;
1419
1420         if (!hwmgr || !hwmgr->pm_en)
1421                 return -EINVAL;
1422
1423         mutex_lock(&hwmgr->smu_lock);
1424         ret = phm_set_active_display_count(hwmgr, count);
1425         mutex_unlock(&hwmgr->smu_lock);
1426
1427         return ret;
1428 }
1429
1430 static int pp_get_asic_baco_capability(void *handle, bool *cap)
1431 {
1432         struct pp_hwmgr *hwmgr = handle;
1433
1434         *cap = false;
1435         if (!hwmgr)
1436                 return -EINVAL;
1437
1438         if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_capability)
1439                 return 0;
1440
1441         mutex_lock(&hwmgr->smu_lock);
1442         hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap);
1443         mutex_unlock(&hwmgr->smu_lock);
1444
1445         return 0;
1446 }
1447
1448 static int pp_get_asic_baco_state(void *handle, int *state)
1449 {
1450         struct pp_hwmgr *hwmgr = handle;
1451
1452         if (!hwmgr)
1453                 return -EINVAL;
1454
1455         if (!(hwmgr->not_vf && amdgpu_dpm) ||
1456                 !hwmgr->hwmgr_func->get_asic_baco_state)
1457                 return 0;
1458
1459         mutex_lock(&hwmgr->smu_lock);
1460         hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state);
1461         mutex_unlock(&hwmgr->smu_lock);
1462
1463         return 0;
1464 }
1465
1466 static int pp_set_asic_baco_state(void *handle, int state)
1467 {
1468         struct pp_hwmgr *hwmgr = handle;
1469
1470         if (!hwmgr)
1471                 return -EINVAL;
1472
1473         if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_asic_baco_state)
1474                 return 0;
1475
1476         mutex_lock(&hwmgr->smu_lock);
1477         hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state);
1478         mutex_unlock(&hwmgr->smu_lock);
1479
1480         return 0;
1481 }
1482
1483 static int pp_get_ppfeature_status(void *handle, char *buf)
1484 {
1485         struct pp_hwmgr *hwmgr = handle;
1486         int ret = 0;
1487
1488         if (!hwmgr || !hwmgr->pm_en || !buf)
1489                 return -EINVAL;
1490
1491         if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) {
1492                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1493                 return -EINVAL;
1494         }
1495
1496         mutex_lock(&hwmgr->smu_lock);
1497         ret = hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
1498         mutex_unlock(&hwmgr->smu_lock);
1499
1500         return ret;
1501 }
1502
1503 static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
1504 {
1505         struct pp_hwmgr *hwmgr = handle;
1506         int ret = 0;
1507
1508         if (!hwmgr || !hwmgr->pm_en)
1509                 return -EINVAL;
1510
1511         if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) {
1512                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1513                 return -EINVAL;
1514         }
1515
1516         mutex_lock(&hwmgr->smu_lock);
1517         ret = hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks);
1518         mutex_unlock(&hwmgr->smu_lock);
1519
1520         return ret;
1521 }
1522
1523 static int pp_asic_reset_mode_2(void *handle)
1524 {
1525         struct pp_hwmgr *hwmgr = handle;
1526                 int ret = 0;
1527
1528         if (!hwmgr || !hwmgr->pm_en)
1529                 return -EINVAL;
1530
1531         if (hwmgr->hwmgr_func->asic_reset == NULL) {
1532                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1533                 return -EINVAL;
1534         }
1535
1536         mutex_lock(&hwmgr->smu_lock);
1537         ret = hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
1538         mutex_unlock(&hwmgr->smu_lock);
1539
1540         return ret;
1541 }
1542
1543 static int pp_smu_i2c_bus_access(void *handle, bool acquire)
1544 {
1545         struct pp_hwmgr *hwmgr = handle;
1546         int ret = 0;
1547
1548         if (!hwmgr || !hwmgr->pm_en)
1549                 return -EINVAL;
1550
1551         if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) {
1552                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1553                 return -EINVAL;
1554         }
1555
1556         mutex_lock(&hwmgr->smu_lock);
1557         ret = hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
1558         mutex_unlock(&hwmgr->smu_lock);
1559
1560         return ret;
1561 }
1562
1563 static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
1564 {
1565         struct pp_hwmgr *hwmgr = handle;
1566
1567         if (!hwmgr)
1568                 return -EINVAL;
1569
1570         if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
1571                 return 0;
1572
1573         mutex_lock(&hwmgr->smu_lock);
1574         hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
1575         mutex_unlock(&hwmgr->smu_lock);
1576
1577         return 0;
1578 }
1579
1580 static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
1581 {
1582         struct pp_hwmgr *hwmgr = handle;
1583
1584         if (!hwmgr)
1585                 return -EINVAL;
1586
1587         if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate)
1588                 return 0;
1589
1590         mutex_lock(&hwmgr->smu_lock);
1591         hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
1592         mutex_unlock(&hwmgr->smu_lock);
1593
1594         return 0;
1595 }
1596
1597 static const struct amd_pm_funcs pp_dpm_funcs = {
1598         .load_firmware = pp_dpm_load_fw,
1599         .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
1600         .force_performance_level = pp_dpm_force_performance_level,
1601         .get_performance_level = pp_dpm_get_performance_level,
1602         .get_current_power_state = pp_dpm_get_current_power_state,
1603         .dispatch_tasks = pp_dpm_dispatch_tasks,
1604         .set_fan_control_mode = pp_dpm_set_fan_control_mode,
1605         .get_fan_control_mode = pp_dpm_get_fan_control_mode,
1606         .set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
1607         .get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
1608         .get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
1609         .set_fan_speed_rpm = pp_dpm_set_fan_speed_rpm,
1610         .get_pp_num_states = pp_dpm_get_pp_num_states,
1611         .get_pp_table = pp_dpm_get_pp_table,
1612         .set_pp_table = pp_dpm_set_pp_table,
1613         .force_clock_level = pp_dpm_force_clock_level,
1614         .print_clock_levels = pp_dpm_print_clock_levels,
1615         .get_sclk_od = pp_dpm_get_sclk_od,
1616         .set_sclk_od = pp_dpm_set_sclk_od,
1617         .get_mclk_od = pp_dpm_get_mclk_od,
1618         .set_mclk_od = pp_dpm_set_mclk_od,
1619         .read_sensor = pp_dpm_read_sensor,
1620         .get_vce_clock_state = pp_dpm_get_vce_clock_state,
1621         .switch_power_profile = pp_dpm_switch_power_profile,
1622         .set_clockgating_by_smu = pp_set_clockgating_by_smu,
1623         .set_powergating_by_smu = pp_set_powergating_by_smu,
1624         .get_power_profile_mode = pp_get_power_profile_mode,
1625         .set_power_profile_mode = pp_set_power_profile_mode,
1626         .odn_edit_dpm_table = pp_odn_edit_dpm_table,
1627         .set_mp1_state = pp_dpm_set_mp1_state,
1628         .set_power_limit = pp_set_power_limit,
1629         .get_power_limit = pp_get_power_limit,
1630 /* export to DC */
1631         .get_sclk = pp_dpm_get_sclk,
1632         .get_mclk = pp_dpm_get_mclk,
1633         .display_configuration_change = pp_display_configuration_change,
1634         .get_display_power_level = pp_get_display_power_level,
1635         .get_current_clocks = pp_get_current_clocks,
1636         .get_clock_by_type = pp_get_clock_by_type,
1637         .get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
1638         .get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
1639         .set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
1640         .display_clock_voltage_request = pp_display_clock_voltage_request,
1641         .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1642         .notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
1643         .enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost,
1644         .set_active_display_count = pp_set_active_display_count,
1645         .set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk,
1646         .set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq,
1647         .set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq,
1648         .get_asic_baco_capability = pp_get_asic_baco_capability,
1649         .get_asic_baco_state = pp_get_asic_baco_state,
1650         .set_asic_baco_state = pp_set_asic_baco_state,
1651         .get_ppfeature_status = pp_get_ppfeature_status,
1652         .set_ppfeature_status = pp_set_ppfeature_status,
1653         .asic_reset_mode_2 = pp_asic_reset_mode_2,
1654         .smu_i2c_bus_access = pp_smu_i2c_bus_access,
1655         .set_df_cstate = pp_set_df_cstate,
1656         .set_xgmi_pstate = pp_set_xgmi_pstate,
1657 };