Merge tag 'drm-misc-next-fixes-2021-09-09' of git://anongit.freedesktop.org/drm/drm...
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / pm / powerplay / amd_powerplay.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "pp_debug.h"
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/gfp.h>
27 #include <linux/slab.h>
28 #include <linux/firmware.h>
29 #include "amd_shared.h"
30 #include "amd_powerplay.h"
31 #include "power_state.h"
32 #include "amdgpu.h"
33 #include "hwmgr.h"
34
35
36 static const struct amd_pm_funcs pp_dpm_funcs;
37
38 static int amd_powerplay_create(struct amdgpu_device *adev)
39 {
40         struct pp_hwmgr *hwmgr;
41
42         if (adev == NULL)
43                 return -EINVAL;
44
45         hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
46         if (hwmgr == NULL)
47                 return -ENOMEM;
48
49         hwmgr->adev = adev;
50         hwmgr->not_vf = !amdgpu_sriov_vf(adev);
51         hwmgr->device = amdgpu_cgs_create_device(adev);
52         mutex_init(&hwmgr->smu_lock);
53         mutex_init(&hwmgr->msg_lock);
54         hwmgr->chip_family = adev->family;
55         hwmgr->chip_id = adev->asic_type;
56         hwmgr->feature_mask = adev->pm.pp_feature;
57         hwmgr->display_config = &adev->pm.pm_display_cfg;
58         adev->powerplay.pp_handle = hwmgr;
59         adev->powerplay.pp_funcs = &pp_dpm_funcs;
60         return 0;
61 }
62
63
64 static void amd_powerplay_destroy(struct amdgpu_device *adev)
65 {
66         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
67
68         mutex_destroy(&hwmgr->msg_lock);
69
70         kfree(hwmgr->hardcode_pp_table);
71         hwmgr->hardcode_pp_table = NULL;
72
73         kfree(hwmgr);
74         hwmgr = NULL;
75 }
76
77 static int pp_early_init(void *handle)
78 {
79         int ret;
80         struct amdgpu_device *adev = handle;
81
82         ret = amd_powerplay_create(adev);
83
84         if (ret != 0)
85                 return ret;
86
87         ret = hwmgr_early_init(adev->powerplay.pp_handle);
88         if (ret)
89                 return -EINVAL;
90
91         return 0;
92 }
93
94 static int pp_sw_init(void *handle)
95 {
96         struct amdgpu_device *adev = handle;
97         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
98         int ret = 0;
99
100         ret = hwmgr_sw_init(hwmgr);
101
102         pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
103
104         return ret;
105 }
106
107 static int pp_sw_fini(void *handle)
108 {
109         struct amdgpu_device *adev = handle;
110         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
111
112         hwmgr_sw_fini(hwmgr);
113
114         release_firmware(adev->pm.fw);
115         adev->pm.fw = NULL;
116
117         return 0;
118 }
119
120 static int pp_hw_init(void *handle)
121 {
122         int ret = 0;
123         struct amdgpu_device *adev = handle;
124         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
125
126         ret = hwmgr_hw_init(hwmgr);
127
128         if (ret)
129                 pr_err("powerplay hw init failed\n");
130
131         return ret;
132 }
133
134 static int pp_hw_fini(void *handle)
135 {
136         struct amdgpu_device *adev = handle;
137         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
138
139         hwmgr_hw_fini(hwmgr);
140
141         return 0;
142 }
143
144 static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
145 {
146         int r = -EINVAL;
147         void *cpu_ptr = NULL;
148         uint64_t gpu_addr;
149         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
150
151         if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size,
152                                                 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
153                                                 &adev->pm.smu_prv_buffer,
154                                                 &gpu_addr,
155                                                 &cpu_ptr)) {
156                 DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
157                 return;
158         }
159
160         if (hwmgr->hwmgr_func->notify_cac_buffer_info)
161                 r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr,
162                                         lower_32_bits((unsigned long)cpu_ptr),
163                                         upper_32_bits((unsigned long)cpu_ptr),
164                                         lower_32_bits(gpu_addr),
165                                         upper_32_bits(gpu_addr),
166                                         adev->pm.smu_prv_buffer_size);
167
168         if (r) {
169                 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
170                 adev->pm.smu_prv_buffer = NULL;
171                 DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
172         }
173 }
174
175 static int pp_late_init(void *handle)
176 {
177         struct amdgpu_device *adev = handle;
178         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
179
180         if (hwmgr && hwmgr->pm_en) {
181                 mutex_lock(&hwmgr->smu_lock);
182                 hwmgr_handle_task(hwmgr,
183                                         AMD_PP_TASK_COMPLETE_INIT, NULL);
184                 mutex_unlock(&hwmgr->smu_lock);
185         }
186         if (adev->pm.smu_prv_buffer_size != 0)
187                 pp_reserve_vram_for_smu(adev);
188
189         return 0;
190 }
191
192 static void pp_late_fini(void *handle)
193 {
194         struct amdgpu_device *adev = handle;
195
196         if (adev->pm.smu_prv_buffer)
197                 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
198         amd_powerplay_destroy(adev);
199 }
200
201
202 static bool pp_is_idle(void *handle)
203 {
204         return false;
205 }
206
207 static int pp_wait_for_idle(void *handle)
208 {
209         return 0;
210 }
211
212 static int pp_sw_reset(void *handle)
213 {
214         return 0;
215 }
216
217 static int pp_set_powergating_state(void *handle,
218                                     enum amd_powergating_state state)
219 {
220         return 0;
221 }
222
223 static int pp_suspend(void *handle)
224 {
225         struct amdgpu_device *adev = handle;
226         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
227
228         return hwmgr_suspend(hwmgr);
229 }
230
231 static int pp_resume(void *handle)
232 {
233         struct amdgpu_device *adev = handle;
234         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
235
236         return hwmgr_resume(hwmgr);
237 }
238
239 static int pp_set_clockgating_state(void *handle,
240                                           enum amd_clockgating_state state)
241 {
242         return 0;
243 }
244
245 static const struct amd_ip_funcs pp_ip_funcs = {
246         .name = "powerplay",
247         .early_init = pp_early_init,
248         .late_init = pp_late_init,
249         .sw_init = pp_sw_init,
250         .sw_fini = pp_sw_fini,
251         .hw_init = pp_hw_init,
252         .hw_fini = pp_hw_fini,
253         .late_fini = pp_late_fini,
254         .suspend = pp_suspend,
255         .resume = pp_resume,
256         .is_idle = pp_is_idle,
257         .wait_for_idle = pp_wait_for_idle,
258         .soft_reset = pp_sw_reset,
259         .set_clockgating_state = pp_set_clockgating_state,
260         .set_powergating_state = pp_set_powergating_state,
261 };
262
263 const struct amdgpu_ip_block_version pp_smu_ip_block =
264 {
265         .type = AMD_IP_BLOCK_TYPE_SMC,
266         .major = 1,
267         .minor = 0,
268         .rev = 0,
269         .funcs = &pp_ip_funcs,
270 };
271
272 /* This interface only be supported On Vi,
273  * because only smu7/8 can help to load gfx/sdma fw,
274  * smu need to be enabled before load other ip's fw.
275  * so call start smu to load smu7 fw and other ip's fw
276  */
277 static int pp_dpm_load_fw(void *handle)
278 {
279         struct pp_hwmgr *hwmgr = handle;
280
281         if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu)
282                 return -EINVAL;
283
284         if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
285                 pr_err("fw load failed\n");
286                 return -EINVAL;
287         }
288
289         return 0;
290 }
291
292 static int pp_dpm_fw_loading_complete(void *handle)
293 {
294         return 0;
295 }
296
297 static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
298 {
299         struct pp_hwmgr *hwmgr = handle;
300
301         if (!hwmgr || !hwmgr->pm_en)
302                 return -EINVAL;
303
304         if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
305                 pr_info_ratelimited("%s was not implemented.\n", __func__);
306                 return 0;
307         }
308
309         return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
310 }
311
312 static void pp_dpm_en_umd_pstate(struct pp_hwmgr  *hwmgr,
313                                                 enum amd_dpm_forced_level *level)
314 {
315         uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
316                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
317                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
318                                         AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
319
320         if (!(hwmgr->dpm_level & profile_mode_mask)) {
321                 /* enter umd pstate, save current level, disable gfx cg*/
322                 if (*level & profile_mode_mask) {
323                         hwmgr->saved_dpm_level = hwmgr->dpm_level;
324                         hwmgr->en_umd_pstate = true;
325                         amdgpu_device_ip_set_powergating_state(hwmgr->adev,
326                                         AMD_IP_BLOCK_TYPE_GFX,
327                                         AMD_PG_STATE_UNGATE);
328                         amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
329                                                 AMD_IP_BLOCK_TYPE_GFX,
330                                                 AMD_CG_STATE_UNGATE);
331                 }
332         } else {
333                 /* exit umd pstate, restore level, enable gfx cg*/
334                 if (!(*level & profile_mode_mask)) {
335                         if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
336                                 *level = hwmgr->saved_dpm_level;
337                         hwmgr->en_umd_pstate = false;
338                         amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
339                                         AMD_IP_BLOCK_TYPE_GFX,
340                                         AMD_CG_STATE_GATE);
341                         amdgpu_device_ip_set_powergating_state(hwmgr->adev,
342                                         AMD_IP_BLOCK_TYPE_GFX,
343                                         AMD_PG_STATE_GATE);
344                 }
345         }
346 }
347
348 static int pp_dpm_force_performance_level(void *handle,
349                                         enum amd_dpm_forced_level level)
350 {
351         struct pp_hwmgr *hwmgr = handle;
352
353         if (!hwmgr || !hwmgr->pm_en)
354                 return -EINVAL;
355
356         if (level == hwmgr->dpm_level)
357                 return 0;
358
359         mutex_lock(&hwmgr->smu_lock);
360         pp_dpm_en_umd_pstate(hwmgr, &level);
361         hwmgr->request_dpm_level = level;
362         hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
363         mutex_unlock(&hwmgr->smu_lock);
364
365         return 0;
366 }
367
368 static enum amd_dpm_forced_level pp_dpm_get_performance_level(
369                                                                 void *handle)
370 {
371         struct pp_hwmgr *hwmgr = handle;
372         enum amd_dpm_forced_level level;
373
374         if (!hwmgr || !hwmgr->pm_en)
375                 return -EINVAL;
376
377         mutex_lock(&hwmgr->smu_lock);
378         level = hwmgr->dpm_level;
379         mutex_unlock(&hwmgr->smu_lock);
380         return level;
381 }
382
383 static uint32_t pp_dpm_get_sclk(void *handle, bool low)
384 {
385         struct pp_hwmgr *hwmgr = handle;
386         uint32_t clk = 0;
387
388         if (!hwmgr || !hwmgr->pm_en)
389                 return 0;
390
391         if (hwmgr->hwmgr_func->get_sclk == NULL) {
392                 pr_info_ratelimited("%s was not implemented.\n", __func__);
393                 return 0;
394         }
395         mutex_lock(&hwmgr->smu_lock);
396         clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
397         mutex_unlock(&hwmgr->smu_lock);
398         return clk;
399 }
400
401 static uint32_t pp_dpm_get_mclk(void *handle, bool low)
402 {
403         struct pp_hwmgr *hwmgr = handle;
404         uint32_t clk = 0;
405
406         if (!hwmgr || !hwmgr->pm_en)
407                 return 0;
408
409         if (hwmgr->hwmgr_func->get_mclk == NULL) {
410                 pr_info_ratelimited("%s was not implemented.\n", __func__);
411                 return 0;
412         }
413         mutex_lock(&hwmgr->smu_lock);
414         clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
415         mutex_unlock(&hwmgr->smu_lock);
416         return clk;
417 }
418
419 static void pp_dpm_powergate_vce(void *handle, bool gate)
420 {
421         struct pp_hwmgr *hwmgr = handle;
422
423         if (!hwmgr || !hwmgr->pm_en)
424                 return;
425
426         if (hwmgr->hwmgr_func->powergate_vce == NULL) {
427                 pr_info_ratelimited("%s was not implemented.\n", __func__);
428                 return;
429         }
430         mutex_lock(&hwmgr->smu_lock);
431         hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
432         mutex_unlock(&hwmgr->smu_lock);
433 }
434
435 static void pp_dpm_powergate_uvd(void *handle, bool gate)
436 {
437         struct pp_hwmgr *hwmgr = handle;
438
439         if (!hwmgr || !hwmgr->pm_en)
440                 return;
441
442         if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
443                 pr_info_ratelimited("%s was not implemented.\n", __func__);
444                 return;
445         }
446         mutex_lock(&hwmgr->smu_lock);
447         hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
448         mutex_unlock(&hwmgr->smu_lock);
449 }
450
451 static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
452                 enum amd_pm_state_type *user_state)
453 {
454         int ret = 0;
455         struct pp_hwmgr *hwmgr = handle;
456
457         if (!hwmgr || !hwmgr->pm_en)
458                 return -EINVAL;
459
460         mutex_lock(&hwmgr->smu_lock);
461         ret = hwmgr_handle_task(hwmgr, task_id, user_state);
462         mutex_unlock(&hwmgr->smu_lock);
463
464         return ret;
465 }
466
467 static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
468 {
469         struct pp_hwmgr *hwmgr = handle;
470         struct pp_power_state *state;
471         enum amd_pm_state_type pm_type;
472
473         if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
474                 return -EINVAL;
475
476         mutex_lock(&hwmgr->smu_lock);
477
478         state = hwmgr->current_ps;
479
480         switch (state->classification.ui_label) {
481         case PP_StateUILabel_Battery:
482                 pm_type = POWER_STATE_TYPE_BATTERY;
483                 break;
484         case PP_StateUILabel_Balanced:
485                 pm_type = POWER_STATE_TYPE_BALANCED;
486                 break;
487         case PP_StateUILabel_Performance:
488                 pm_type = POWER_STATE_TYPE_PERFORMANCE;
489                 break;
490         default:
491                 if (state->classification.flags & PP_StateClassificationFlag_Boot)
492                         pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
493                 else
494                         pm_type = POWER_STATE_TYPE_DEFAULT;
495                 break;
496         }
497         mutex_unlock(&hwmgr->smu_lock);
498
499         return pm_type;
500 }
501
502 static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
503 {
504         struct pp_hwmgr *hwmgr = handle;
505
506         if (!hwmgr || !hwmgr->pm_en)
507                 return;
508
509         if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
510                 pr_info_ratelimited("%s was not implemented.\n", __func__);
511                 return;
512         }
513         mutex_lock(&hwmgr->smu_lock);
514         hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
515         mutex_unlock(&hwmgr->smu_lock);
516 }
517
518 static uint32_t pp_dpm_get_fan_control_mode(void *handle)
519 {
520         struct pp_hwmgr *hwmgr = handle;
521         uint32_t mode = 0;
522
523         if (!hwmgr || !hwmgr->pm_en)
524                 return 0;
525
526         if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
527                 pr_info_ratelimited("%s was not implemented.\n", __func__);
528                 return 0;
529         }
530         mutex_lock(&hwmgr->smu_lock);
531         mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
532         mutex_unlock(&hwmgr->smu_lock);
533         return mode;
534 }
535
536 static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed)
537 {
538         struct pp_hwmgr *hwmgr = handle;
539         int ret = 0;
540
541         if (!hwmgr || !hwmgr->pm_en)
542                 return -EINVAL;
543
544         if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL) {
545                 pr_info_ratelimited("%s was not implemented.\n", __func__);
546                 return 0;
547         }
548         mutex_lock(&hwmgr->smu_lock);
549         ret = hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed);
550         mutex_unlock(&hwmgr->smu_lock);
551         return ret;
552 }
553
554 static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed)
555 {
556         struct pp_hwmgr *hwmgr = handle;
557         int ret = 0;
558
559         if (!hwmgr || !hwmgr->pm_en)
560                 return -EINVAL;
561
562         if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL) {
563                 pr_info_ratelimited("%s was not implemented.\n", __func__);
564                 return 0;
565         }
566
567         mutex_lock(&hwmgr->smu_lock);
568         ret = hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed);
569         mutex_unlock(&hwmgr->smu_lock);
570         return ret;
571 }
572
573 static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
574 {
575         struct pp_hwmgr *hwmgr = handle;
576         int ret = 0;
577
578         if (!hwmgr || !hwmgr->pm_en)
579                 return -EINVAL;
580
581         if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
582                 return -EINVAL;
583
584         mutex_lock(&hwmgr->smu_lock);
585         ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
586         mutex_unlock(&hwmgr->smu_lock);
587         return ret;
588 }
589
590 static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
591 {
592         struct pp_hwmgr *hwmgr = handle;
593         int ret = 0;
594
595         if (!hwmgr || !hwmgr->pm_en)
596                 return -EINVAL;
597
598         if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL) {
599                 pr_info_ratelimited("%s was not implemented.\n", __func__);
600                 return 0;
601         }
602         mutex_lock(&hwmgr->smu_lock);
603         ret = hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
604         mutex_unlock(&hwmgr->smu_lock);
605         return ret;
606 }
607
608 static int pp_dpm_get_pp_num_states(void *handle,
609                 struct pp_states_info *data)
610 {
611         struct pp_hwmgr *hwmgr = handle;
612         int i;
613
614         memset(data, 0, sizeof(*data));
615
616         if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
617                 return -EINVAL;
618
619         mutex_lock(&hwmgr->smu_lock);
620
621         data->nums = hwmgr->num_ps;
622
623         for (i = 0; i < hwmgr->num_ps; i++) {
624                 struct pp_power_state *state = (struct pp_power_state *)
625                                 ((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
626                 switch (state->classification.ui_label) {
627                 case PP_StateUILabel_Battery:
628                         data->states[i] = POWER_STATE_TYPE_BATTERY;
629                         break;
630                 case PP_StateUILabel_Balanced:
631                         data->states[i] = POWER_STATE_TYPE_BALANCED;
632                         break;
633                 case PP_StateUILabel_Performance:
634                         data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
635                         break;
636                 default:
637                         if (state->classification.flags & PP_StateClassificationFlag_Boot)
638                                 data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
639                         else
640                                 data->states[i] = POWER_STATE_TYPE_DEFAULT;
641                 }
642         }
643         mutex_unlock(&hwmgr->smu_lock);
644         return 0;
645 }
646
647 static int pp_dpm_get_pp_table(void *handle, char **table)
648 {
649         struct pp_hwmgr *hwmgr = handle;
650         int size = 0;
651
652         if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
653                 return -EINVAL;
654
655         mutex_lock(&hwmgr->smu_lock);
656         *table = (char *)hwmgr->soft_pp_table;
657         size = hwmgr->soft_pp_table_size;
658         mutex_unlock(&hwmgr->smu_lock);
659         return size;
660 }
661
662 static int amd_powerplay_reset(void *handle)
663 {
664         struct pp_hwmgr *hwmgr = handle;
665         int ret;
666
667         ret = hwmgr_hw_fini(hwmgr);
668         if (ret)
669                 return ret;
670
671         ret = hwmgr_hw_init(hwmgr);
672         if (ret)
673                 return ret;
674
675         return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
676 }
677
678 static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
679 {
680         struct pp_hwmgr *hwmgr = handle;
681         int ret = -ENOMEM;
682
683         if (!hwmgr || !hwmgr->pm_en)
684                 return -EINVAL;
685
686         mutex_lock(&hwmgr->smu_lock);
687         if (!hwmgr->hardcode_pp_table) {
688                 hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
689                                                    hwmgr->soft_pp_table_size,
690                                                    GFP_KERNEL);
691                 if (!hwmgr->hardcode_pp_table)
692                         goto err;
693         }
694
695         memcpy(hwmgr->hardcode_pp_table, buf, size);
696
697         hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
698
699         ret = amd_powerplay_reset(handle);
700         if (ret)
701                 goto err;
702
703         if (hwmgr->hwmgr_func->avfs_control) {
704                 ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
705                 if (ret)
706                         goto err;
707         }
708         mutex_unlock(&hwmgr->smu_lock);
709         return 0;
710 err:
711         mutex_unlock(&hwmgr->smu_lock);
712         return ret;
713 }
714
715 static int pp_dpm_force_clock_level(void *handle,
716                 enum pp_clock_type type, uint32_t mask)
717 {
718         struct pp_hwmgr *hwmgr = handle;
719         int ret = 0;
720
721         if (!hwmgr || !hwmgr->pm_en)
722                 return -EINVAL;
723
724         if (hwmgr->hwmgr_func->force_clock_level == NULL) {
725                 pr_info_ratelimited("%s was not implemented.\n", __func__);
726                 return 0;
727         }
728
729         if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
730                 pr_debug("force clock level is for dpm manual mode only.\n");
731                 return -EINVAL;
732         }
733
734         mutex_lock(&hwmgr->smu_lock);
735         ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
736         mutex_unlock(&hwmgr->smu_lock);
737         return ret;
738 }
739
740 static int pp_dpm_print_clock_levels(void *handle,
741                 enum pp_clock_type type, char *buf)
742 {
743         struct pp_hwmgr *hwmgr = handle;
744         int ret = 0;
745
746         if (!hwmgr || !hwmgr->pm_en)
747                 return -EINVAL;
748
749         if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
750                 pr_info_ratelimited("%s was not implemented.\n", __func__);
751                 return 0;
752         }
753         mutex_lock(&hwmgr->smu_lock);
754         ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
755         mutex_unlock(&hwmgr->smu_lock);
756         return ret;
757 }
758
759 static int pp_dpm_get_sclk_od(void *handle)
760 {
761         struct pp_hwmgr *hwmgr = handle;
762         int ret = 0;
763
764         if (!hwmgr || !hwmgr->pm_en)
765                 return -EINVAL;
766
767         if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
768                 pr_info_ratelimited("%s was not implemented.\n", __func__);
769                 return 0;
770         }
771         mutex_lock(&hwmgr->smu_lock);
772         ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
773         mutex_unlock(&hwmgr->smu_lock);
774         return ret;
775 }
776
777 static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
778 {
779         struct pp_hwmgr *hwmgr = handle;
780         int ret = 0;
781
782         if (!hwmgr || !hwmgr->pm_en)
783                 return -EINVAL;
784
785         if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
786                 pr_info_ratelimited("%s was not implemented.\n", __func__);
787                 return 0;
788         }
789
790         mutex_lock(&hwmgr->smu_lock);
791         ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
792         mutex_unlock(&hwmgr->smu_lock);
793         return ret;
794 }
795
796 static int pp_dpm_get_mclk_od(void *handle)
797 {
798         struct pp_hwmgr *hwmgr = handle;
799         int ret = 0;
800
801         if (!hwmgr || !hwmgr->pm_en)
802                 return -EINVAL;
803
804         if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
805                 pr_info_ratelimited("%s was not implemented.\n", __func__);
806                 return 0;
807         }
808         mutex_lock(&hwmgr->smu_lock);
809         ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
810         mutex_unlock(&hwmgr->smu_lock);
811         return ret;
812 }
813
814 static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
815 {
816         struct pp_hwmgr *hwmgr = handle;
817         int ret = 0;
818
819         if (!hwmgr || !hwmgr->pm_en)
820                 return -EINVAL;
821
822         if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
823                 pr_info_ratelimited("%s was not implemented.\n", __func__);
824                 return 0;
825         }
826         mutex_lock(&hwmgr->smu_lock);
827         ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
828         mutex_unlock(&hwmgr->smu_lock);
829         return ret;
830 }
831
832 static int pp_dpm_read_sensor(void *handle, int idx,
833                               void *value, int *size)
834 {
835         struct pp_hwmgr *hwmgr = handle;
836         int ret = 0;
837
838         if (!hwmgr || !hwmgr->pm_en || !value)
839                 return -EINVAL;
840
841         switch (idx) {
842         case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
843                 *((uint32_t *)value) = hwmgr->pstate_sclk;
844                 return 0;
845         case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
846                 *((uint32_t *)value) = hwmgr->pstate_mclk;
847                 return 0;
848         case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
849                 *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM;
850                 return 0;
851         case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
852                 *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
853                 return 0;
854         default:
855                 mutex_lock(&hwmgr->smu_lock);
856                 ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
857                 mutex_unlock(&hwmgr->smu_lock);
858                 return ret;
859         }
860 }
861
862 static struct amd_vce_state*
863 pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
864 {
865         struct pp_hwmgr *hwmgr = handle;
866
867         if (!hwmgr || !hwmgr->pm_en)
868                 return NULL;
869
870         if (idx < hwmgr->num_vce_state_tables)
871                 return &hwmgr->vce_states[idx];
872         return NULL;
873 }
874
875 static int pp_get_power_profile_mode(void *handle, char *buf)
876 {
877         struct pp_hwmgr *hwmgr = handle;
878
879         if (!hwmgr || !hwmgr->pm_en || !buf)
880                 return -EINVAL;
881
882         if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
883                 pr_info_ratelimited("%s was not implemented.\n", __func__);
884                 return snprintf(buf, PAGE_SIZE, "\n");
885         }
886
887         return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
888 }
889
890 static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
891 {
892         struct pp_hwmgr *hwmgr = handle;
893         int ret = -EINVAL;
894
895         if (!hwmgr || !hwmgr->pm_en)
896                 return ret;
897
898         if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
899                 pr_info_ratelimited("%s was not implemented.\n", __func__);
900                 return ret;
901         }
902
903         if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
904                 pr_debug("power profile setting is for manual dpm mode only.\n");
905                 return ret;
906         }
907
908         mutex_lock(&hwmgr->smu_lock);
909         ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
910         mutex_unlock(&hwmgr->smu_lock);
911         return ret;
912 }
913
914 static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, uint32_t size)
915 {
916         struct pp_hwmgr *hwmgr = handle;
917
918         if (!hwmgr || !hwmgr->pm_en)
919                 return -EINVAL;
920
921         if (hwmgr->hwmgr_func->set_fine_grain_clk_vol == NULL)
922                 return 0;
923
924         return hwmgr->hwmgr_func->set_fine_grain_clk_vol(hwmgr, type, input, size);
925 }
926
927 static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
928 {
929         struct pp_hwmgr *hwmgr = handle;
930
931         if (!hwmgr || !hwmgr->pm_en)
932                 return -EINVAL;
933
934         if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
935                 pr_info_ratelimited("%s was not implemented.\n", __func__);
936                 return 0;
937         }
938
939         return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
940 }
941
942 static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state)
943 {
944         struct pp_hwmgr *hwmgr = handle;
945
946         if (!hwmgr)
947                 return -EINVAL;
948
949         if (!hwmgr->pm_en)
950                 return 0;
951
952         if (hwmgr->hwmgr_func->set_mp1_state)
953                 return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state);
954
955         return 0;
956 }
957
958 static int pp_dpm_switch_power_profile(void *handle,
959                 enum PP_SMC_POWER_PROFILE type, bool en)
960 {
961         struct pp_hwmgr *hwmgr = handle;
962         long workload;
963         uint32_t index;
964
965         if (!hwmgr || !hwmgr->pm_en)
966                 return -EINVAL;
967
968         if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
969                 pr_info_ratelimited("%s was not implemented.\n", __func__);
970                 return -EINVAL;
971         }
972
973         if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
974                 return -EINVAL;
975
976         mutex_lock(&hwmgr->smu_lock);
977
978         if (!en) {
979                 hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
980                 index = fls(hwmgr->workload_mask);
981                 index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
982                 workload = hwmgr->workload_setting[index];
983         } else {
984                 hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
985                 index = fls(hwmgr->workload_mask);
986                 index = index <= Workload_Policy_Max ? index - 1 : 0;
987                 workload = hwmgr->workload_setting[index];
988         }
989
990         if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
991                 hwmgr->hwmgr_func->disable_power_features_for_compute_performance) {
992                         if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en)) {
993                                 mutex_unlock(&hwmgr->smu_lock);
994                                 return -EINVAL;
995                         }
996         }
997
998         if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
999                 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
1000         mutex_unlock(&hwmgr->smu_lock);
1001
1002         return 0;
1003 }
1004
1005 static int pp_set_power_limit(void *handle, uint32_t limit)
1006 {
1007         struct pp_hwmgr *hwmgr = handle;
1008         uint32_t max_power_limit;
1009
1010         if (!hwmgr || !hwmgr->pm_en)
1011                 return -EINVAL;
1012
1013         if (hwmgr->hwmgr_func->set_power_limit == NULL) {
1014                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1015                 return -EINVAL;
1016         }
1017
1018         if (limit == 0)
1019                 limit = hwmgr->default_power_limit;
1020
1021         max_power_limit = hwmgr->default_power_limit;
1022         if (hwmgr->od_enabled) {
1023                 max_power_limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
1024                 max_power_limit /= 100;
1025         }
1026
1027         if (limit > max_power_limit)
1028                 return -EINVAL;
1029
1030         mutex_lock(&hwmgr->smu_lock);
1031         hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
1032         hwmgr->power_limit = limit;
1033         mutex_unlock(&hwmgr->smu_lock);
1034         return 0;
1035 }
1036
1037 static int pp_get_power_limit(void *handle, uint32_t *limit,
1038                               enum pp_power_limit_level pp_limit_level,
1039                               enum pp_power_type power_type)
1040 {
1041         struct pp_hwmgr *hwmgr = handle;
1042         int ret = 0;
1043
1044         if (!hwmgr || !hwmgr->pm_en ||!limit)
1045                 return -EINVAL;
1046
1047         if (power_type != PP_PWR_TYPE_SUSTAINED)
1048                 return -EOPNOTSUPP;
1049
1050         mutex_lock(&hwmgr->smu_lock);
1051
1052         switch (pp_limit_level) {
1053                 case PP_PWR_LIMIT_CURRENT:
1054                         *limit = hwmgr->power_limit;
1055                         break;
1056                 case PP_PWR_LIMIT_DEFAULT:
1057                         *limit = hwmgr->default_power_limit;
1058                         break;
1059                 case PP_PWR_LIMIT_MAX:
1060                         *limit = hwmgr->default_power_limit;
1061                         if (hwmgr->od_enabled) {
1062                                 *limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
1063                                 *limit /= 100;
1064                         }
1065                         break;
1066                 default:
1067                         ret = -EOPNOTSUPP;
1068                         break;
1069         }
1070
1071         mutex_unlock(&hwmgr->smu_lock);
1072
1073         return ret;
1074 }
1075
1076 static int pp_display_configuration_change(void *handle,
1077         const struct amd_pp_display_configuration *display_config)
1078 {
1079         struct pp_hwmgr *hwmgr = handle;
1080
1081         if (!hwmgr || !hwmgr->pm_en)
1082                 return -EINVAL;
1083
1084         mutex_lock(&hwmgr->smu_lock);
1085         phm_store_dal_configuration_data(hwmgr, display_config);
1086         mutex_unlock(&hwmgr->smu_lock);
1087         return 0;
1088 }
1089
1090 static int pp_get_display_power_level(void *handle,
1091                 struct amd_pp_simple_clock_info *output)
1092 {
1093         struct pp_hwmgr *hwmgr = handle;
1094         int ret = 0;
1095
1096         if (!hwmgr || !hwmgr->pm_en ||!output)
1097                 return -EINVAL;
1098
1099         mutex_lock(&hwmgr->smu_lock);
1100         ret = phm_get_dal_power_level(hwmgr, output);
1101         mutex_unlock(&hwmgr->smu_lock);
1102         return ret;
1103 }
1104
1105 static int pp_get_current_clocks(void *handle,
1106                 struct amd_pp_clock_info *clocks)
1107 {
1108         struct amd_pp_simple_clock_info simple_clocks = { 0 };
1109         struct pp_clock_info hw_clocks;
1110         struct pp_hwmgr *hwmgr = handle;
1111         int ret = 0;
1112
1113         if (!hwmgr || !hwmgr->pm_en)
1114                 return -EINVAL;
1115
1116         mutex_lock(&hwmgr->smu_lock);
1117
1118         phm_get_dal_power_level(hwmgr, &simple_clocks);
1119
1120         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1121                                         PHM_PlatformCaps_PowerContainment))
1122                 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1123                                         &hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
1124         else
1125                 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1126                                         &hw_clocks, PHM_PerformanceLevelDesignation_Activity);
1127
1128         if (ret) {
1129                 pr_debug("Error in phm_get_clock_info \n");
1130                 mutex_unlock(&hwmgr->smu_lock);
1131                 return -EINVAL;
1132         }
1133
1134         clocks->min_engine_clock = hw_clocks.min_eng_clk;
1135         clocks->max_engine_clock = hw_clocks.max_eng_clk;
1136         clocks->min_memory_clock = hw_clocks.min_mem_clk;
1137         clocks->max_memory_clock = hw_clocks.max_mem_clk;
1138         clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1139         clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1140
1141         clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1142         clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1143
1144         if (simple_clocks.level == 0)
1145                 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1146         else
1147                 clocks->max_clocks_state = simple_clocks.level;
1148
1149         if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
1150                 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1151                 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1152         }
1153         mutex_unlock(&hwmgr->smu_lock);
1154         return 0;
1155 }
1156
1157 static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1158 {
1159         struct pp_hwmgr *hwmgr = handle;
1160         int ret = 0;
1161
1162         if (!hwmgr || !hwmgr->pm_en)
1163                 return -EINVAL;
1164
1165         if (clocks == NULL)
1166                 return -EINVAL;
1167
1168         mutex_lock(&hwmgr->smu_lock);
1169         ret = phm_get_clock_by_type(hwmgr, type, clocks);
1170         mutex_unlock(&hwmgr->smu_lock);
1171         return ret;
1172 }
1173
1174 static int pp_get_clock_by_type_with_latency(void *handle,
1175                 enum amd_pp_clock_type type,
1176                 struct pp_clock_levels_with_latency *clocks)
1177 {
1178         struct pp_hwmgr *hwmgr = handle;
1179         int ret = 0;
1180
1181         if (!hwmgr || !hwmgr->pm_en ||!clocks)
1182                 return -EINVAL;
1183
1184         mutex_lock(&hwmgr->smu_lock);
1185         ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1186         mutex_unlock(&hwmgr->smu_lock);
1187         return ret;
1188 }
1189
1190 static int pp_get_clock_by_type_with_voltage(void *handle,
1191                 enum amd_pp_clock_type type,
1192                 struct pp_clock_levels_with_voltage *clocks)
1193 {
1194         struct pp_hwmgr *hwmgr = handle;
1195         int ret = 0;
1196
1197         if (!hwmgr || !hwmgr->pm_en ||!clocks)
1198                 return -EINVAL;
1199
1200         mutex_lock(&hwmgr->smu_lock);
1201
1202         ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
1203
1204         mutex_unlock(&hwmgr->smu_lock);
1205         return ret;
1206 }
1207
1208 static int pp_set_watermarks_for_clocks_ranges(void *handle,
1209                 void *clock_ranges)
1210 {
1211         struct pp_hwmgr *hwmgr = handle;
1212         int ret = 0;
1213
1214         if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
1215                 return -EINVAL;
1216
1217         mutex_lock(&hwmgr->smu_lock);
1218         ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
1219                         clock_ranges);
1220         mutex_unlock(&hwmgr->smu_lock);
1221
1222         return ret;
1223 }
1224
1225 static int pp_display_clock_voltage_request(void *handle,
1226                 struct pp_display_clock_request *clock)
1227 {
1228         struct pp_hwmgr *hwmgr = handle;
1229         int ret = 0;
1230
1231         if (!hwmgr || !hwmgr->pm_en ||!clock)
1232                 return -EINVAL;
1233
1234         mutex_lock(&hwmgr->smu_lock);
1235         ret = phm_display_clock_voltage_request(hwmgr, clock);
1236         mutex_unlock(&hwmgr->smu_lock);
1237
1238         return ret;
1239 }
1240
1241 static int pp_get_display_mode_validation_clocks(void *handle,
1242                 struct amd_pp_simple_clock_info *clocks)
1243 {
1244         struct pp_hwmgr *hwmgr = handle;
1245         int ret = 0;
1246
1247         if (!hwmgr || !hwmgr->pm_en ||!clocks)
1248                 return -EINVAL;
1249
1250         clocks->level = PP_DAL_POWERLEVEL_7;
1251
1252         mutex_lock(&hwmgr->smu_lock);
1253
1254         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1255                 ret = phm_get_max_high_clocks(hwmgr, clocks);
1256
1257         mutex_unlock(&hwmgr->smu_lock);
1258         return ret;
1259 }
1260
1261 static int pp_dpm_powergate_mmhub(void *handle)
1262 {
1263         struct pp_hwmgr *hwmgr = handle;
1264
1265         if (!hwmgr || !hwmgr->pm_en)
1266                 return -EINVAL;
1267
1268         if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
1269                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1270                 return 0;
1271         }
1272
1273         return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
1274 }
1275
1276 static int pp_dpm_powergate_gfx(void *handle, bool gate)
1277 {
1278         struct pp_hwmgr *hwmgr = handle;
1279
1280         if (!hwmgr || !hwmgr->pm_en)
1281                 return 0;
1282
1283         if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
1284                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1285                 return 0;
1286         }
1287
1288         return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
1289 }
1290
1291 static void pp_dpm_powergate_acp(void *handle, bool gate)
1292 {
1293         struct pp_hwmgr *hwmgr = handle;
1294
1295         if (!hwmgr || !hwmgr->pm_en)
1296                 return;
1297
1298         if (hwmgr->hwmgr_func->powergate_acp == NULL) {
1299                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1300                 return;
1301         }
1302
1303         hwmgr->hwmgr_func->powergate_acp(hwmgr, gate);
1304 }
1305
1306 static void pp_dpm_powergate_sdma(void *handle, bool gate)
1307 {
1308         struct pp_hwmgr *hwmgr = handle;
1309
1310         if (!hwmgr)
1311                 return;
1312
1313         if (hwmgr->hwmgr_func->powergate_sdma == NULL) {
1314                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1315                 return;
1316         }
1317
1318         hwmgr->hwmgr_func->powergate_sdma(hwmgr, gate);
1319 }
1320
1321 static int pp_set_powergating_by_smu(void *handle,
1322                                 uint32_t block_type, bool gate)
1323 {
1324         int ret = 0;
1325
1326         switch (block_type) {
1327         case AMD_IP_BLOCK_TYPE_UVD:
1328         case AMD_IP_BLOCK_TYPE_VCN:
1329                 pp_dpm_powergate_uvd(handle, gate);
1330                 break;
1331         case AMD_IP_BLOCK_TYPE_VCE:
1332                 pp_dpm_powergate_vce(handle, gate);
1333                 break;
1334         case AMD_IP_BLOCK_TYPE_GMC:
1335                 pp_dpm_powergate_mmhub(handle);
1336                 break;
1337         case AMD_IP_BLOCK_TYPE_GFX:
1338                 ret = pp_dpm_powergate_gfx(handle, gate);
1339                 break;
1340         case AMD_IP_BLOCK_TYPE_ACP:
1341                 pp_dpm_powergate_acp(handle, gate);
1342                 break;
1343         case AMD_IP_BLOCK_TYPE_SDMA:
1344                 pp_dpm_powergate_sdma(handle, gate);
1345                 break;
1346         default:
1347                 break;
1348         }
1349         return ret;
1350 }
1351
1352 static int pp_notify_smu_enable_pwe(void *handle)
1353 {
1354         struct pp_hwmgr *hwmgr = handle;
1355
1356         if (!hwmgr || !hwmgr->pm_en)
1357                 return -EINVAL;
1358
1359         if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
1360                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1361                 return -EINVAL;
1362         }
1363
1364         mutex_lock(&hwmgr->smu_lock);
1365         hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
1366         mutex_unlock(&hwmgr->smu_lock);
1367
1368         return 0;
1369 }
1370
1371 static int pp_enable_mgpu_fan_boost(void *handle)
1372 {
1373         struct pp_hwmgr *hwmgr = handle;
1374
1375         if (!hwmgr)
1376                 return -EINVAL;
1377
1378         if (!hwmgr->pm_en ||
1379              hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
1380                 return 0;
1381
1382         mutex_lock(&hwmgr->smu_lock);
1383         hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
1384         mutex_unlock(&hwmgr->smu_lock);
1385
1386         return 0;
1387 }
1388
1389 static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
1390 {
1391         struct pp_hwmgr *hwmgr = handle;
1392
1393         if (!hwmgr || !hwmgr->pm_en)
1394                 return -EINVAL;
1395
1396         if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
1397                 pr_debug("%s was not implemented.\n", __func__);
1398                 return -EINVAL;
1399         }
1400
1401         mutex_lock(&hwmgr->smu_lock);
1402         hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
1403         mutex_unlock(&hwmgr->smu_lock);
1404
1405         return 0;
1406 }
1407
1408 static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
1409 {
1410         struct pp_hwmgr *hwmgr = handle;
1411
1412         if (!hwmgr || !hwmgr->pm_en)
1413                 return -EINVAL;
1414
1415         if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
1416                 pr_debug("%s was not implemented.\n", __func__);
1417                 return -EINVAL;
1418         }
1419
1420         mutex_lock(&hwmgr->smu_lock);
1421         hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
1422         mutex_unlock(&hwmgr->smu_lock);
1423
1424         return 0;
1425 }
1426
1427 static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
1428 {
1429         struct pp_hwmgr *hwmgr = handle;
1430
1431         if (!hwmgr || !hwmgr->pm_en)
1432                 return -EINVAL;
1433
1434         if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
1435                 pr_debug("%s was not implemented.\n", __func__);
1436                 return -EINVAL;
1437         }
1438
1439         mutex_lock(&hwmgr->smu_lock);
1440         hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
1441         mutex_unlock(&hwmgr->smu_lock);
1442
1443         return 0;
1444 }
1445
1446 static int pp_set_active_display_count(void *handle, uint32_t count)
1447 {
1448         struct pp_hwmgr *hwmgr = handle;
1449         int ret = 0;
1450
1451         if (!hwmgr || !hwmgr->pm_en)
1452                 return -EINVAL;
1453
1454         mutex_lock(&hwmgr->smu_lock);
1455         ret = phm_set_active_display_count(hwmgr, count);
1456         mutex_unlock(&hwmgr->smu_lock);
1457
1458         return ret;
1459 }
1460
1461 static int pp_get_asic_baco_capability(void *handle, bool *cap)
1462 {
1463         struct pp_hwmgr *hwmgr = handle;
1464
1465         *cap = false;
1466         if (!hwmgr)
1467                 return -EINVAL;
1468
1469         if (!(hwmgr->not_vf && amdgpu_dpm) ||
1470                 !hwmgr->hwmgr_func->get_asic_baco_capability)
1471                 return 0;
1472
1473         mutex_lock(&hwmgr->smu_lock);
1474         hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap);
1475         mutex_unlock(&hwmgr->smu_lock);
1476
1477         return 0;
1478 }
1479
1480 static int pp_get_asic_baco_state(void *handle, int *state)
1481 {
1482         struct pp_hwmgr *hwmgr = handle;
1483
1484         if (!hwmgr)
1485                 return -EINVAL;
1486
1487         if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
1488                 return 0;
1489
1490         mutex_lock(&hwmgr->smu_lock);
1491         hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state);
1492         mutex_unlock(&hwmgr->smu_lock);
1493
1494         return 0;
1495 }
1496
1497 static int pp_set_asic_baco_state(void *handle, int state)
1498 {
1499         struct pp_hwmgr *hwmgr = handle;
1500
1501         if (!hwmgr)
1502                 return -EINVAL;
1503
1504         if (!(hwmgr->not_vf && amdgpu_dpm) ||
1505                 !hwmgr->hwmgr_func->set_asic_baco_state)
1506                 return 0;
1507
1508         mutex_lock(&hwmgr->smu_lock);
1509         hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state);
1510         mutex_unlock(&hwmgr->smu_lock);
1511
1512         return 0;
1513 }
1514
1515 static int pp_get_ppfeature_status(void *handle, char *buf)
1516 {
1517         struct pp_hwmgr *hwmgr = handle;
1518         int ret = 0;
1519
1520         if (!hwmgr || !hwmgr->pm_en || !buf)
1521                 return -EINVAL;
1522
1523         if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) {
1524                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1525                 return -EINVAL;
1526         }
1527
1528         mutex_lock(&hwmgr->smu_lock);
1529         ret = hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
1530         mutex_unlock(&hwmgr->smu_lock);
1531
1532         return ret;
1533 }
1534
1535 static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
1536 {
1537         struct pp_hwmgr *hwmgr = handle;
1538         int ret = 0;
1539
1540         if (!hwmgr || !hwmgr->pm_en)
1541                 return -EINVAL;
1542
1543         if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) {
1544                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1545                 return -EINVAL;
1546         }
1547
1548         mutex_lock(&hwmgr->smu_lock);
1549         ret = hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks);
1550         mutex_unlock(&hwmgr->smu_lock);
1551
1552         return ret;
1553 }
1554
1555 static int pp_asic_reset_mode_2(void *handle)
1556 {
1557         struct pp_hwmgr *hwmgr = handle;
1558                 int ret = 0;
1559
1560         if (!hwmgr || !hwmgr->pm_en)
1561                 return -EINVAL;
1562
1563         if (hwmgr->hwmgr_func->asic_reset == NULL) {
1564                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1565                 return -EINVAL;
1566         }
1567
1568         mutex_lock(&hwmgr->smu_lock);
1569         ret = hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
1570         mutex_unlock(&hwmgr->smu_lock);
1571
1572         return ret;
1573 }
1574
1575 static int pp_smu_i2c_bus_access(void *handle, bool acquire)
1576 {
1577         struct pp_hwmgr *hwmgr = handle;
1578         int ret = 0;
1579
1580         if (!hwmgr || !hwmgr->pm_en)
1581                 return -EINVAL;
1582
1583         if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) {
1584                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1585                 return -EINVAL;
1586         }
1587
1588         mutex_lock(&hwmgr->smu_lock);
1589         ret = hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
1590         mutex_unlock(&hwmgr->smu_lock);
1591
1592         return ret;
1593 }
1594
1595 static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
1596 {
1597         struct pp_hwmgr *hwmgr = handle;
1598
1599         if (!hwmgr)
1600                 return -EINVAL;
1601
1602         if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
1603                 return 0;
1604
1605         mutex_lock(&hwmgr->smu_lock);
1606         hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
1607         mutex_unlock(&hwmgr->smu_lock);
1608
1609         return 0;
1610 }
1611
1612 static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
1613 {
1614         struct pp_hwmgr *hwmgr = handle;
1615
1616         if (!hwmgr)
1617                 return -EINVAL;
1618
1619         if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate)
1620                 return 0;
1621
1622         mutex_lock(&hwmgr->smu_lock);
1623         hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
1624         mutex_unlock(&hwmgr->smu_lock);
1625
1626         return 0;
1627 }
1628
1629 static ssize_t pp_get_gpu_metrics(void *handle, void **table)
1630 {
1631         struct pp_hwmgr *hwmgr = handle;
1632         ssize_t size;
1633
1634         if (!hwmgr)
1635                 return -EINVAL;
1636
1637         if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_gpu_metrics)
1638                 return -EOPNOTSUPP;
1639
1640         mutex_lock(&hwmgr->smu_lock);
1641         size = hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
1642         mutex_unlock(&hwmgr->smu_lock);
1643
1644         return size;
1645 }
1646
1647 static int pp_gfx_state_change_set(void *handle, uint32_t state)
1648 {
1649         struct pp_hwmgr *hwmgr = handle;
1650
1651         if (!hwmgr || !hwmgr->pm_en)
1652                 return -EINVAL;
1653
1654         if (hwmgr->hwmgr_func->gfx_state_change == NULL) {
1655                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1656                 return -EINVAL;
1657         }
1658
1659         mutex_lock(&hwmgr->smu_lock);
1660         hwmgr->hwmgr_func->gfx_state_change(hwmgr, state);
1661         mutex_unlock(&hwmgr->smu_lock);
1662         return 0;
1663 }
1664
1665 static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size)
1666 {
1667         struct pp_hwmgr *hwmgr = handle;
1668         struct amdgpu_device *adev = hwmgr->adev;
1669
1670         if (!addr || !size)
1671                 return -EINVAL;
1672
1673         *addr = NULL;
1674         *size = 0;
1675         mutex_lock(&hwmgr->smu_lock);
1676         if (adev->pm.smu_prv_buffer) {
1677                 amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr);
1678                 *size = adev->pm.smu_prv_buffer_size;
1679         }
1680         mutex_unlock(&hwmgr->smu_lock);
1681
1682         return 0;
1683 }
1684
1685 static const struct amd_pm_funcs pp_dpm_funcs = {
1686         .load_firmware = pp_dpm_load_fw,
1687         .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
1688         .force_performance_level = pp_dpm_force_performance_level,
1689         .get_performance_level = pp_dpm_get_performance_level,
1690         .get_current_power_state = pp_dpm_get_current_power_state,
1691         .dispatch_tasks = pp_dpm_dispatch_tasks,
1692         .set_fan_control_mode = pp_dpm_set_fan_control_mode,
1693         .get_fan_control_mode = pp_dpm_get_fan_control_mode,
1694         .set_fan_speed_pwm = pp_dpm_set_fan_speed_pwm,
1695         .get_fan_speed_pwm = pp_dpm_get_fan_speed_pwm,
1696         .get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
1697         .set_fan_speed_rpm = pp_dpm_set_fan_speed_rpm,
1698         .get_pp_num_states = pp_dpm_get_pp_num_states,
1699         .get_pp_table = pp_dpm_get_pp_table,
1700         .set_pp_table = pp_dpm_set_pp_table,
1701         .force_clock_level = pp_dpm_force_clock_level,
1702         .print_clock_levels = pp_dpm_print_clock_levels,
1703         .get_sclk_od = pp_dpm_get_sclk_od,
1704         .set_sclk_od = pp_dpm_set_sclk_od,
1705         .get_mclk_od = pp_dpm_get_mclk_od,
1706         .set_mclk_od = pp_dpm_set_mclk_od,
1707         .read_sensor = pp_dpm_read_sensor,
1708         .get_vce_clock_state = pp_dpm_get_vce_clock_state,
1709         .switch_power_profile = pp_dpm_switch_power_profile,
1710         .set_clockgating_by_smu = pp_set_clockgating_by_smu,
1711         .set_powergating_by_smu = pp_set_powergating_by_smu,
1712         .get_power_profile_mode = pp_get_power_profile_mode,
1713         .set_power_profile_mode = pp_set_power_profile_mode,
1714         .set_fine_grain_clk_vol = pp_set_fine_grain_clk_vol,
1715         .odn_edit_dpm_table = pp_odn_edit_dpm_table,
1716         .set_mp1_state = pp_dpm_set_mp1_state,
1717         .set_power_limit = pp_set_power_limit,
1718         .get_power_limit = pp_get_power_limit,
1719 /* export to DC */
1720         .get_sclk = pp_dpm_get_sclk,
1721         .get_mclk = pp_dpm_get_mclk,
1722         .display_configuration_change = pp_display_configuration_change,
1723         .get_display_power_level = pp_get_display_power_level,
1724         .get_current_clocks = pp_get_current_clocks,
1725         .get_clock_by_type = pp_get_clock_by_type,
1726         .get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
1727         .get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
1728         .set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
1729         .display_clock_voltage_request = pp_display_clock_voltage_request,
1730         .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1731         .notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
1732         .enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost,
1733         .set_active_display_count = pp_set_active_display_count,
1734         .set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk,
1735         .set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq,
1736         .set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq,
1737         .get_asic_baco_capability = pp_get_asic_baco_capability,
1738         .get_asic_baco_state = pp_get_asic_baco_state,
1739         .set_asic_baco_state = pp_set_asic_baco_state,
1740         .get_ppfeature_status = pp_get_ppfeature_status,
1741         .set_ppfeature_status = pp_set_ppfeature_status,
1742         .asic_reset_mode_2 = pp_asic_reset_mode_2,
1743         .smu_i2c_bus_access = pp_smu_i2c_bus_access,
1744         .set_df_cstate = pp_set_df_cstate,
1745         .set_xgmi_pstate = pp_set_xgmi_pstate,
1746         .get_gpu_metrics = pp_get_gpu_metrics,
1747         .gfx_state_change_set = pp_gfx_state_change_set,
1748         .get_smu_prv_buf_details = pp_get_prv_buffer_details,
1749 };