Merge branch irq/misc-5.17 into irq/irqchip-next
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / pm / powerplay / amd_powerplay.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "pp_debug.h"
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/gfp.h>
27 #include <linux/slab.h>
28 #include <linux/firmware.h>
29 #include "amd_shared.h"
30 #include "amd_powerplay.h"
31 #include "power_state.h"
32 #include "amdgpu.h"
33 #include "hwmgr.h"
34
35
36 static const struct amd_pm_funcs pp_dpm_funcs;
37
38 static int amd_powerplay_create(struct amdgpu_device *adev)
39 {
40         struct pp_hwmgr *hwmgr;
41
42         if (adev == NULL)
43                 return -EINVAL;
44
45         hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
46         if (hwmgr == NULL)
47                 return -ENOMEM;
48
49         hwmgr->adev = adev;
50         hwmgr->not_vf = !amdgpu_sriov_vf(adev);
51         hwmgr->device = amdgpu_cgs_create_device(adev);
52         mutex_init(&hwmgr->smu_lock);
53         mutex_init(&hwmgr->msg_lock);
54         hwmgr->chip_family = adev->family;
55         hwmgr->chip_id = adev->asic_type;
56         hwmgr->feature_mask = adev->pm.pp_feature;
57         hwmgr->display_config = &adev->pm.pm_display_cfg;
58         adev->powerplay.pp_handle = hwmgr;
59         adev->powerplay.pp_funcs = &pp_dpm_funcs;
60         return 0;
61 }
62
63
64 static void amd_powerplay_destroy(struct amdgpu_device *adev)
65 {
66         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
67
68         mutex_destroy(&hwmgr->msg_lock);
69
70         kfree(hwmgr->hardcode_pp_table);
71         hwmgr->hardcode_pp_table = NULL;
72
73         kfree(hwmgr);
74         hwmgr = NULL;
75 }
76
77 static int pp_early_init(void *handle)
78 {
79         int ret;
80         struct amdgpu_device *adev = handle;
81
82         ret = amd_powerplay_create(adev);
83
84         if (ret != 0)
85                 return ret;
86
87         ret = hwmgr_early_init(adev->powerplay.pp_handle);
88         if (ret)
89                 return -EINVAL;
90
91         return 0;
92 }
93
94 static int pp_sw_init(void *handle)
95 {
96         struct amdgpu_device *adev = handle;
97         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
98         int ret = 0;
99
100         ret = hwmgr_sw_init(hwmgr);
101
102         pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
103
104         return ret;
105 }
106
107 static int pp_sw_fini(void *handle)
108 {
109         struct amdgpu_device *adev = handle;
110         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
111
112         hwmgr_sw_fini(hwmgr);
113
114         release_firmware(adev->pm.fw);
115         adev->pm.fw = NULL;
116
117         return 0;
118 }
119
120 static int pp_hw_init(void *handle)
121 {
122         int ret = 0;
123         struct amdgpu_device *adev = handle;
124         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
125
126         ret = hwmgr_hw_init(hwmgr);
127
128         if (ret)
129                 pr_err("powerplay hw init failed\n");
130
131         return ret;
132 }
133
134 static int pp_hw_fini(void *handle)
135 {
136         struct amdgpu_device *adev = handle;
137         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
138
139         hwmgr_hw_fini(hwmgr);
140
141         return 0;
142 }
143
144 static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
145 {
146         int r = -EINVAL;
147         void *cpu_ptr = NULL;
148         uint64_t gpu_addr;
149         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
150
151         if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size,
152                                                 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
153                                                 &adev->pm.smu_prv_buffer,
154                                                 &gpu_addr,
155                                                 &cpu_ptr)) {
156                 DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
157                 return;
158         }
159
160         if (hwmgr->hwmgr_func->notify_cac_buffer_info)
161                 r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr,
162                                         lower_32_bits((unsigned long)cpu_ptr),
163                                         upper_32_bits((unsigned long)cpu_ptr),
164                                         lower_32_bits(gpu_addr),
165                                         upper_32_bits(gpu_addr),
166                                         adev->pm.smu_prv_buffer_size);
167
168         if (r) {
169                 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
170                 adev->pm.smu_prv_buffer = NULL;
171                 DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
172         }
173 }
174
175 static int pp_late_init(void *handle)
176 {
177         struct amdgpu_device *adev = handle;
178         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
179
180         if (hwmgr && hwmgr->pm_en) {
181                 mutex_lock(&hwmgr->smu_lock);
182                 hwmgr_handle_task(hwmgr,
183                                         AMD_PP_TASK_COMPLETE_INIT, NULL);
184                 mutex_unlock(&hwmgr->smu_lock);
185         }
186         if (adev->pm.smu_prv_buffer_size != 0)
187                 pp_reserve_vram_for_smu(adev);
188
189         return 0;
190 }
191
192 static void pp_late_fini(void *handle)
193 {
194         struct amdgpu_device *adev = handle;
195
196         if (adev->pm.smu_prv_buffer)
197                 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
198         amd_powerplay_destroy(adev);
199 }
200
201
202 static bool pp_is_idle(void *handle)
203 {
204         return false;
205 }
206
207 static int pp_wait_for_idle(void *handle)
208 {
209         return 0;
210 }
211
212 static int pp_sw_reset(void *handle)
213 {
214         return 0;
215 }
216
217 static int pp_set_powergating_state(void *handle,
218                                     enum amd_powergating_state state)
219 {
220         return 0;
221 }
222
223 static int pp_suspend(void *handle)
224 {
225         struct amdgpu_device *adev = handle;
226         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
227
228         return hwmgr_suspend(hwmgr);
229 }
230
231 static int pp_resume(void *handle)
232 {
233         struct amdgpu_device *adev = handle;
234         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
235
236         return hwmgr_resume(hwmgr);
237 }
238
239 static int pp_set_clockgating_state(void *handle,
240                                           enum amd_clockgating_state state)
241 {
242         return 0;
243 }
244
245 static const struct amd_ip_funcs pp_ip_funcs = {
246         .name = "powerplay",
247         .early_init = pp_early_init,
248         .late_init = pp_late_init,
249         .sw_init = pp_sw_init,
250         .sw_fini = pp_sw_fini,
251         .hw_init = pp_hw_init,
252         .hw_fini = pp_hw_fini,
253         .late_fini = pp_late_fini,
254         .suspend = pp_suspend,
255         .resume = pp_resume,
256         .is_idle = pp_is_idle,
257         .wait_for_idle = pp_wait_for_idle,
258         .soft_reset = pp_sw_reset,
259         .set_clockgating_state = pp_set_clockgating_state,
260         .set_powergating_state = pp_set_powergating_state,
261 };
262
263 const struct amdgpu_ip_block_version pp_smu_ip_block =
264 {
265         .type = AMD_IP_BLOCK_TYPE_SMC,
266         .major = 1,
267         .minor = 0,
268         .rev = 0,
269         .funcs = &pp_ip_funcs,
270 };
271
272 /* This interface only be supported On Vi,
273  * because only smu7/8 can help to load gfx/sdma fw,
274  * smu need to be enabled before load other ip's fw.
275  * so call start smu to load smu7 fw and other ip's fw
276  */
277 static int pp_dpm_load_fw(void *handle)
278 {
279         struct pp_hwmgr *hwmgr = handle;
280
281         if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu)
282                 return -EINVAL;
283
284         if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
285                 pr_err("fw load failed\n");
286                 return -EINVAL;
287         }
288
289         return 0;
290 }
291
292 static int pp_dpm_fw_loading_complete(void *handle)
293 {
294         return 0;
295 }
296
297 static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
298 {
299         struct pp_hwmgr *hwmgr = handle;
300
301         if (!hwmgr || !hwmgr->pm_en)
302                 return -EINVAL;
303
304         if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
305                 pr_info_ratelimited("%s was not implemented.\n", __func__);
306                 return 0;
307         }
308
309         return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
310 }
311
312 static void pp_dpm_en_umd_pstate(struct pp_hwmgr  *hwmgr,
313                                                 enum amd_dpm_forced_level *level)
314 {
315         uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
316                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
317                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
318                                         AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
319
320         if (!(hwmgr->dpm_level & profile_mode_mask)) {
321                 /* enter umd pstate, save current level, disable gfx cg*/
322                 if (*level & profile_mode_mask) {
323                         hwmgr->saved_dpm_level = hwmgr->dpm_level;
324                         hwmgr->en_umd_pstate = true;
325                         amdgpu_device_ip_set_powergating_state(hwmgr->adev,
326                                         AMD_IP_BLOCK_TYPE_GFX,
327                                         AMD_PG_STATE_UNGATE);
328                         amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
329                                                 AMD_IP_BLOCK_TYPE_GFX,
330                                                 AMD_CG_STATE_UNGATE);
331                 }
332         } else {
333                 /* exit umd pstate, restore level, enable gfx cg*/
334                 if (!(*level & profile_mode_mask)) {
335                         if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
336                                 *level = hwmgr->saved_dpm_level;
337                         hwmgr->en_umd_pstate = false;
338                         amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
339                                         AMD_IP_BLOCK_TYPE_GFX,
340                                         AMD_CG_STATE_GATE);
341                         amdgpu_device_ip_set_powergating_state(hwmgr->adev,
342                                         AMD_IP_BLOCK_TYPE_GFX,
343                                         AMD_PG_STATE_GATE);
344                 }
345         }
346 }
347
348 static int pp_dpm_force_performance_level(void *handle,
349                                         enum amd_dpm_forced_level level)
350 {
351         struct pp_hwmgr *hwmgr = handle;
352
353         if (!hwmgr || !hwmgr->pm_en)
354                 return -EINVAL;
355
356         if (level == hwmgr->dpm_level)
357                 return 0;
358
359         mutex_lock(&hwmgr->smu_lock);
360         pp_dpm_en_umd_pstate(hwmgr, &level);
361         hwmgr->request_dpm_level = level;
362         hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
363         mutex_unlock(&hwmgr->smu_lock);
364
365         return 0;
366 }
367
368 static enum amd_dpm_forced_level pp_dpm_get_performance_level(
369                                                                 void *handle)
370 {
371         struct pp_hwmgr *hwmgr = handle;
372         enum amd_dpm_forced_level level;
373
374         if (!hwmgr || !hwmgr->pm_en)
375                 return -EINVAL;
376
377         mutex_lock(&hwmgr->smu_lock);
378         level = hwmgr->dpm_level;
379         mutex_unlock(&hwmgr->smu_lock);
380         return level;
381 }
382
383 static uint32_t pp_dpm_get_sclk(void *handle, bool low)
384 {
385         struct pp_hwmgr *hwmgr = handle;
386         uint32_t clk = 0;
387
388         if (!hwmgr || !hwmgr->pm_en)
389                 return 0;
390
391         if (hwmgr->hwmgr_func->get_sclk == NULL) {
392                 pr_info_ratelimited("%s was not implemented.\n", __func__);
393                 return 0;
394         }
395         mutex_lock(&hwmgr->smu_lock);
396         clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
397         mutex_unlock(&hwmgr->smu_lock);
398         return clk;
399 }
400
401 static uint32_t pp_dpm_get_mclk(void *handle, bool low)
402 {
403         struct pp_hwmgr *hwmgr = handle;
404         uint32_t clk = 0;
405
406         if (!hwmgr || !hwmgr->pm_en)
407                 return 0;
408
409         if (hwmgr->hwmgr_func->get_mclk == NULL) {
410                 pr_info_ratelimited("%s was not implemented.\n", __func__);
411                 return 0;
412         }
413         mutex_lock(&hwmgr->smu_lock);
414         clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
415         mutex_unlock(&hwmgr->smu_lock);
416         return clk;
417 }
418
419 static void pp_dpm_powergate_vce(void *handle, bool gate)
420 {
421         struct pp_hwmgr *hwmgr = handle;
422
423         if (!hwmgr || !hwmgr->pm_en)
424                 return;
425
426         if (hwmgr->hwmgr_func->powergate_vce == NULL) {
427                 pr_info_ratelimited("%s was not implemented.\n", __func__);
428                 return;
429         }
430         mutex_lock(&hwmgr->smu_lock);
431         hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
432         mutex_unlock(&hwmgr->smu_lock);
433 }
434
435 static void pp_dpm_powergate_uvd(void *handle, bool gate)
436 {
437         struct pp_hwmgr *hwmgr = handle;
438
439         if (!hwmgr || !hwmgr->pm_en)
440                 return;
441
442         if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
443                 pr_info_ratelimited("%s was not implemented.\n", __func__);
444                 return;
445         }
446         mutex_lock(&hwmgr->smu_lock);
447         hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
448         mutex_unlock(&hwmgr->smu_lock);
449 }
450
451 static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
452                 enum amd_pm_state_type *user_state)
453 {
454         int ret = 0;
455         struct pp_hwmgr *hwmgr = handle;
456
457         if (!hwmgr || !hwmgr->pm_en)
458                 return -EINVAL;
459
460         mutex_lock(&hwmgr->smu_lock);
461         ret = hwmgr_handle_task(hwmgr, task_id, user_state);
462         mutex_unlock(&hwmgr->smu_lock);
463
464         return ret;
465 }
466
467 static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
468 {
469         struct pp_hwmgr *hwmgr = handle;
470         struct pp_power_state *state;
471         enum amd_pm_state_type pm_type;
472
473         if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
474                 return -EINVAL;
475
476         mutex_lock(&hwmgr->smu_lock);
477
478         state = hwmgr->current_ps;
479
480         switch (state->classification.ui_label) {
481         case PP_StateUILabel_Battery:
482                 pm_type = POWER_STATE_TYPE_BATTERY;
483                 break;
484         case PP_StateUILabel_Balanced:
485                 pm_type = POWER_STATE_TYPE_BALANCED;
486                 break;
487         case PP_StateUILabel_Performance:
488                 pm_type = POWER_STATE_TYPE_PERFORMANCE;
489                 break;
490         default:
491                 if (state->classification.flags & PP_StateClassificationFlag_Boot)
492                         pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
493                 else
494                         pm_type = POWER_STATE_TYPE_DEFAULT;
495                 break;
496         }
497         mutex_unlock(&hwmgr->smu_lock);
498
499         return pm_type;
500 }
501
502 static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
503 {
504         struct pp_hwmgr *hwmgr = handle;
505
506         if (!hwmgr || !hwmgr->pm_en)
507                 return;
508
509         if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
510                 pr_info_ratelimited("%s was not implemented.\n", __func__);
511                 return;
512         }
513         mutex_lock(&hwmgr->smu_lock);
514         hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
515         mutex_unlock(&hwmgr->smu_lock);
516 }
517
518 static uint32_t pp_dpm_get_fan_control_mode(void *handle)
519 {
520         struct pp_hwmgr *hwmgr = handle;
521         uint32_t mode = 0;
522
523         if (!hwmgr || !hwmgr->pm_en)
524                 return 0;
525
526         if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
527                 pr_info_ratelimited("%s was not implemented.\n", __func__);
528                 return 0;
529         }
530         mutex_lock(&hwmgr->smu_lock);
531         mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
532         mutex_unlock(&hwmgr->smu_lock);
533         return mode;
534 }
535
536 static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed)
537 {
538         struct pp_hwmgr *hwmgr = handle;
539         int ret = 0;
540
541         if (!hwmgr || !hwmgr->pm_en)
542                 return -EINVAL;
543
544         if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL) {
545                 pr_info_ratelimited("%s was not implemented.\n", __func__);
546                 return 0;
547         }
548         mutex_lock(&hwmgr->smu_lock);
549         ret = hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed);
550         mutex_unlock(&hwmgr->smu_lock);
551         return ret;
552 }
553
554 static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed)
555 {
556         struct pp_hwmgr *hwmgr = handle;
557         int ret = 0;
558
559         if (!hwmgr || !hwmgr->pm_en)
560                 return -EINVAL;
561
562         if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL) {
563                 pr_info_ratelimited("%s was not implemented.\n", __func__);
564                 return 0;
565         }
566
567         mutex_lock(&hwmgr->smu_lock);
568         ret = hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed);
569         mutex_unlock(&hwmgr->smu_lock);
570         return ret;
571 }
572
573 static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
574 {
575         struct pp_hwmgr *hwmgr = handle;
576         int ret = 0;
577
578         if (!hwmgr || !hwmgr->pm_en)
579                 return -EINVAL;
580
581         if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
582                 return -EINVAL;
583
584         mutex_lock(&hwmgr->smu_lock);
585         ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
586         mutex_unlock(&hwmgr->smu_lock);
587         return ret;
588 }
589
590 static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
591 {
592         struct pp_hwmgr *hwmgr = handle;
593         int ret = 0;
594
595         if (!hwmgr || !hwmgr->pm_en)
596                 return -EINVAL;
597
598         if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL) {
599                 pr_info_ratelimited("%s was not implemented.\n", __func__);
600                 return 0;
601         }
602         mutex_lock(&hwmgr->smu_lock);
603         ret = hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
604         mutex_unlock(&hwmgr->smu_lock);
605         return ret;
606 }
607
608 static int pp_dpm_get_pp_num_states(void *handle,
609                 struct pp_states_info *data)
610 {
611         struct pp_hwmgr *hwmgr = handle;
612         int i;
613
614         memset(data, 0, sizeof(*data));
615
616         if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
617                 return -EINVAL;
618
619         mutex_lock(&hwmgr->smu_lock);
620
621         data->nums = hwmgr->num_ps;
622
623         for (i = 0; i < hwmgr->num_ps; i++) {
624                 struct pp_power_state *state = (struct pp_power_state *)
625                                 ((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
626                 switch (state->classification.ui_label) {
627                 case PP_StateUILabel_Battery:
628                         data->states[i] = POWER_STATE_TYPE_BATTERY;
629                         break;
630                 case PP_StateUILabel_Balanced:
631                         data->states[i] = POWER_STATE_TYPE_BALANCED;
632                         break;
633                 case PP_StateUILabel_Performance:
634                         data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
635                         break;
636                 default:
637                         if (state->classification.flags & PP_StateClassificationFlag_Boot)
638                                 data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
639                         else
640                                 data->states[i] = POWER_STATE_TYPE_DEFAULT;
641                 }
642         }
643         mutex_unlock(&hwmgr->smu_lock);
644         return 0;
645 }
646
647 static int pp_dpm_get_pp_table(void *handle, char **table)
648 {
649         struct pp_hwmgr *hwmgr = handle;
650         int size = 0;
651
652         if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
653                 return -EINVAL;
654
655         mutex_lock(&hwmgr->smu_lock);
656         *table = (char *)hwmgr->soft_pp_table;
657         size = hwmgr->soft_pp_table_size;
658         mutex_unlock(&hwmgr->smu_lock);
659         return size;
660 }
661
662 static int amd_powerplay_reset(void *handle)
663 {
664         struct pp_hwmgr *hwmgr = handle;
665         int ret;
666
667         ret = hwmgr_hw_fini(hwmgr);
668         if (ret)
669                 return ret;
670
671         ret = hwmgr_hw_init(hwmgr);
672         if (ret)
673                 return ret;
674
675         return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
676 }
677
678 static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
679 {
680         struct pp_hwmgr *hwmgr = handle;
681         int ret = -ENOMEM;
682
683         if (!hwmgr || !hwmgr->pm_en)
684                 return -EINVAL;
685
686         mutex_lock(&hwmgr->smu_lock);
687         if (!hwmgr->hardcode_pp_table) {
688                 hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
689                                                    hwmgr->soft_pp_table_size,
690                                                    GFP_KERNEL);
691                 if (!hwmgr->hardcode_pp_table)
692                         goto err;
693         }
694
695         memcpy(hwmgr->hardcode_pp_table, buf, size);
696
697         hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
698
699         ret = amd_powerplay_reset(handle);
700         if (ret)
701                 goto err;
702
703         if (hwmgr->hwmgr_func->avfs_control) {
704                 ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
705                 if (ret)
706                         goto err;
707         }
708         mutex_unlock(&hwmgr->smu_lock);
709         return 0;
710 err:
711         mutex_unlock(&hwmgr->smu_lock);
712         return ret;
713 }
714
715 static int pp_dpm_force_clock_level(void *handle,
716                 enum pp_clock_type type, uint32_t mask)
717 {
718         struct pp_hwmgr *hwmgr = handle;
719         int ret = 0;
720
721         if (!hwmgr || !hwmgr->pm_en)
722                 return -EINVAL;
723
724         if (hwmgr->hwmgr_func->force_clock_level == NULL) {
725                 pr_info_ratelimited("%s was not implemented.\n", __func__);
726                 return 0;
727         }
728
729         if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
730                 pr_debug("force clock level is for dpm manual mode only.\n");
731                 return -EINVAL;
732         }
733
734         mutex_lock(&hwmgr->smu_lock);
735         ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
736         mutex_unlock(&hwmgr->smu_lock);
737         return ret;
738 }
739
740 static int pp_dpm_print_clock_levels(void *handle,
741                 enum pp_clock_type type, char *buf)
742 {
743         struct pp_hwmgr *hwmgr = handle;
744         int ret = 0;
745
746         if (!hwmgr || !hwmgr->pm_en)
747                 return -EINVAL;
748
749         if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
750                 pr_info_ratelimited("%s was not implemented.\n", __func__);
751                 return 0;
752         }
753         mutex_lock(&hwmgr->smu_lock);
754         ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
755         mutex_unlock(&hwmgr->smu_lock);
756         return ret;
757 }
758
759 static int pp_dpm_get_sclk_od(void *handle)
760 {
761         struct pp_hwmgr *hwmgr = handle;
762         int ret = 0;
763
764         if (!hwmgr || !hwmgr->pm_en)
765                 return -EINVAL;
766
767         if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
768                 pr_info_ratelimited("%s was not implemented.\n", __func__);
769                 return 0;
770         }
771         mutex_lock(&hwmgr->smu_lock);
772         ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
773         mutex_unlock(&hwmgr->smu_lock);
774         return ret;
775 }
776
777 static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
778 {
779         struct pp_hwmgr *hwmgr = handle;
780         int ret = 0;
781
782         if (!hwmgr || !hwmgr->pm_en)
783                 return -EINVAL;
784
785         if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
786                 pr_info_ratelimited("%s was not implemented.\n", __func__);
787                 return 0;
788         }
789
790         mutex_lock(&hwmgr->smu_lock);
791         ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
792         mutex_unlock(&hwmgr->smu_lock);
793         return ret;
794 }
795
796 static int pp_dpm_get_mclk_od(void *handle)
797 {
798         struct pp_hwmgr *hwmgr = handle;
799         int ret = 0;
800
801         if (!hwmgr || !hwmgr->pm_en)
802                 return -EINVAL;
803
804         if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
805                 pr_info_ratelimited("%s was not implemented.\n", __func__);
806                 return 0;
807         }
808         mutex_lock(&hwmgr->smu_lock);
809         ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
810         mutex_unlock(&hwmgr->smu_lock);
811         return ret;
812 }
813
814 static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
815 {
816         struct pp_hwmgr *hwmgr = handle;
817         int ret = 0;
818
819         if (!hwmgr || !hwmgr->pm_en)
820                 return -EINVAL;
821
822         if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
823                 pr_info_ratelimited("%s was not implemented.\n", __func__);
824                 return 0;
825         }
826         mutex_lock(&hwmgr->smu_lock);
827         ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
828         mutex_unlock(&hwmgr->smu_lock);
829         return ret;
830 }
831
832 static int pp_dpm_read_sensor(void *handle, int idx,
833                               void *value, int *size)
834 {
835         struct pp_hwmgr *hwmgr = handle;
836         int ret = 0;
837
838         if (!hwmgr || !hwmgr->pm_en || !value)
839                 return -EINVAL;
840
841         switch (idx) {
842         case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
843                 *((uint32_t *)value) = hwmgr->pstate_sclk;
844                 return 0;
845         case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
846                 *((uint32_t *)value) = hwmgr->pstate_mclk;
847                 return 0;
848         case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
849                 *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM;
850                 return 0;
851         case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
852                 *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
853                 return 0;
854         default:
855                 mutex_lock(&hwmgr->smu_lock);
856                 ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
857                 mutex_unlock(&hwmgr->smu_lock);
858                 return ret;
859         }
860 }
861
862 static struct amd_vce_state*
863 pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
864 {
865         struct pp_hwmgr *hwmgr = handle;
866
867         if (!hwmgr || !hwmgr->pm_en)
868                 return NULL;
869
870         if (idx < hwmgr->num_vce_state_tables)
871                 return &hwmgr->vce_states[idx];
872         return NULL;
873 }
874
875 static int pp_get_power_profile_mode(void *handle, char *buf)
876 {
877         struct pp_hwmgr *hwmgr = handle;
878         int ret;
879
880         if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->get_power_profile_mode)
881                 return -EOPNOTSUPP;
882         if (!buf)
883                 return -EINVAL;
884
885         mutex_lock(&hwmgr->smu_lock);
886         ret = hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
887         mutex_unlock(&hwmgr->smu_lock);
888         return ret;
889 }
890
891 static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
892 {
893         struct pp_hwmgr *hwmgr = handle;
894         int ret = -EOPNOTSUPP;
895
896         if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->set_power_profile_mode)
897                 return ret;
898
899         if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
900                 pr_debug("power profile setting is for manual dpm mode only.\n");
901                 return -EINVAL;
902         }
903
904         mutex_lock(&hwmgr->smu_lock);
905         ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
906         mutex_unlock(&hwmgr->smu_lock);
907         return ret;
908 }
909
910 static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, uint32_t size)
911 {
912         struct pp_hwmgr *hwmgr = handle;
913
914         if (!hwmgr || !hwmgr->pm_en)
915                 return -EINVAL;
916
917         if (hwmgr->hwmgr_func->set_fine_grain_clk_vol == NULL)
918                 return 0;
919
920         return hwmgr->hwmgr_func->set_fine_grain_clk_vol(hwmgr, type, input, size);
921 }
922
923 static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
924 {
925         struct pp_hwmgr *hwmgr = handle;
926
927         if (!hwmgr || !hwmgr->pm_en)
928                 return -EINVAL;
929
930         if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
931                 pr_info_ratelimited("%s was not implemented.\n", __func__);
932                 return 0;
933         }
934
935         return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
936 }
937
938 static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state)
939 {
940         struct pp_hwmgr *hwmgr = handle;
941
942         if (!hwmgr)
943                 return -EINVAL;
944
945         if (!hwmgr->pm_en)
946                 return 0;
947
948         if (hwmgr->hwmgr_func->set_mp1_state)
949                 return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state);
950
951         return 0;
952 }
953
954 static int pp_dpm_switch_power_profile(void *handle,
955                 enum PP_SMC_POWER_PROFILE type, bool en)
956 {
957         struct pp_hwmgr *hwmgr = handle;
958         long workload;
959         uint32_t index;
960
961         if (!hwmgr || !hwmgr->pm_en)
962                 return -EINVAL;
963
964         if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
965                 pr_info_ratelimited("%s was not implemented.\n", __func__);
966                 return -EINVAL;
967         }
968
969         if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
970                 return -EINVAL;
971
972         mutex_lock(&hwmgr->smu_lock);
973
974         if (!en) {
975                 hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
976                 index = fls(hwmgr->workload_mask);
977                 index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
978                 workload = hwmgr->workload_setting[index];
979         } else {
980                 hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
981                 index = fls(hwmgr->workload_mask);
982                 index = index <= Workload_Policy_Max ? index - 1 : 0;
983                 workload = hwmgr->workload_setting[index];
984         }
985
986         if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
987                 hwmgr->hwmgr_func->disable_power_features_for_compute_performance) {
988                         if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en)) {
989                                 mutex_unlock(&hwmgr->smu_lock);
990                                 return -EINVAL;
991                         }
992         }
993
994         if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
995                 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
996         mutex_unlock(&hwmgr->smu_lock);
997
998         return 0;
999 }
1000
1001 static int pp_set_power_limit(void *handle, uint32_t limit)
1002 {
1003         struct pp_hwmgr *hwmgr = handle;
1004         uint32_t max_power_limit;
1005
1006         if (!hwmgr || !hwmgr->pm_en)
1007                 return -EINVAL;
1008
1009         if (hwmgr->hwmgr_func->set_power_limit == NULL) {
1010                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1011                 return -EINVAL;
1012         }
1013
1014         if (limit == 0)
1015                 limit = hwmgr->default_power_limit;
1016
1017         max_power_limit = hwmgr->default_power_limit;
1018         if (hwmgr->od_enabled) {
1019                 max_power_limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
1020                 max_power_limit /= 100;
1021         }
1022
1023         if (limit > max_power_limit)
1024                 return -EINVAL;
1025
1026         mutex_lock(&hwmgr->smu_lock);
1027         hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
1028         hwmgr->power_limit = limit;
1029         mutex_unlock(&hwmgr->smu_lock);
1030         return 0;
1031 }
1032
1033 static int pp_get_power_limit(void *handle, uint32_t *limit,
1034                               enum pp_power_limit_level pp_limit_level,
1035                               enum pp_power_type power_type)
1036 {
1037         struct pp_hwmgr *hwmgr = handle;
1038         int ret = 0;
1039
1040         if (!hwmgr || !hwmgr->pm_en ||!limit)
1041                 return -EINVAL;
1042
1043         if (power_type != PP_PWR_TYPE_SUSTAINED)
1044                 return -EOPNOTSUPP;
1045
1046         mutex_lock(&hwmgr->smu_lock);
1047
1048         switch (pp_limit_level) {
1049                 case PP_PWR_LIMIT_CURRENT:
1050                         *limit = hwmgr->power_limit;
1051                         break;
1052                 case PP_PWR_LIMIT_DEFAULT:
1053                         *limit = hwmgr->default_power_limit;
1054                         break;
1055                 case PP_PWR_LIMIT_MAX:
1056                         *limit = hwmgr->default_power_limit;
1057                         if (hwmgr->od_enabled) {
1058                                 *limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
1059                                 *limit /= 100;
1060                         }
1061                         break;
1062                 default:
1063                         ret = -EOPNOTSUPP;
1064                         break;
1065         }
1066
1067         mutex_unlock(&hwmgr->smu_lock);
1068
1069         return ret;
1070 }
1071
1072 static int pp_display_configuration_change(void *handle,
1073         const struct amd_pp_display_configuration *display_config)
1074 {
1075         struct pp_hwmgr *hwmgr = handle;
1076
1077         if (!hwmgr || !hwmgr->pm_en)
1078                 return -EINVAL;
1079
1080         mutex_lock(&hwmgr->smu_lock);
1081         phm_store_dal_configuration_data(hwmgr, display_config);
1082         mutex_unlock(&hwmgr->smu_lock);
1083         return 0;
1084 }
1085
1086 static int pp_get_display_power_level(void *handle,
1087                 struct amd_pp_simple_clock_info *output)
1088 {
1089         struct pp_hwmgr *hwmgr = handle;
1090         int ret = 0;
1091
1092         if (!hwmgr || !hwmgr->pm_en ||!output)
1093                 return -EINVAL;
1094
1095         mutex_lock(&hwmgr->smu_lock);
1096         ret = phm_get_dal_power_level(hwmgr, output);
1097         mutex_unlock(&hwmgr->smu_lock);
1098         return ret;
1099 }
1100
1101 static int pp_get_current_clocks(void *handle,
1102                 struct amd_pp_clock_info *clocks)
1103 {
1104         struct amd_pp_simple_clock_info simple_clocks = { 0 };
1105         struct pp_clock_info hw_clocks;
1106         struct pp_hwmgr *hwmgr = handle;
1107         int ret = 0;
1108
1109         if (!hwmgr || !hwmgr->pm_en)
1110                 return -EINVAL;
1111
1112         mutex_lock(&hwmgr->smu_lock);
1113
1114         phm_get_dal_power_level(hwmgr, &simple_clocks);
1115
1116         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1117                                         PHM_PlatformCaps_PowerContainment))
1118                 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1119                                         &hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
1120         else
1121                 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1122                                         &hw_clocks, PHM_PerformanceLevelDesignation_Activity);
1123
1124         if (ret) {
1125                 pr_debug("Error in phm_get_clock_info \n");
1126                 mutex_unlock(&hwmgr->smu_lock);
1127                 return -EINVAL;
1128         }
1129
1130         clocks->min_engine_clock = hw_clocks.min_eng_clk;
1131         clocks->max_engine_clock = hw_clocks.max_eng_clk;
1132         clocks->min_memory_clock = hw_clocks.min_mem_clk;
1133         clocks->max_memory_clock = hw_clocks.max_mem_clk;
1134         clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1135         clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1136
1137         clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1138         clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1139
1140         if (simple_clocks.level == 0)
1141                 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1142         else
1143                 clocks->max_clocks_state = simple_clocks.level;
1144
1145         if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
1146                 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1147                 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1148         }
1149         mutex_unlock(&hwmgr->smu_lock);
1150         return 0;
1151 }
1152
1153 static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1154 {
1155         struct pp_hwmgr *hwmgr = handle;
1156         int ret = 0;
1157
1158         if (!hwmgr || !hwmgr->pm_en)
1159                 return -EINVAL;
1160
1161         if (clocks == NULL)
1162                 return -EINVAL;
1163
1164         mutex_lock(&hwmgr->smu_lock);
1165         ret = phm_get_clock_by_type(hwmgr, type, clocks);
1166         mutex_unlock(&hwmgr->smu_lock);
1167         return ret;
1168 }
1169
1170 static int pp_get_clock_by_type_with_latency(void *handle,
1171                 enum amd_pp_clock_type type,
1172                 struct pp_clock_levels_with_latency *clocks)
1173 {
1174         struct pp_hwmgr *hwmgr = handle;
1175         int ret = 0;
1176
1177         if (!hwmgr || !hwmgr->pm_en ||!clocks)
1178                 return -EINVAL;
1179
1180         mutex_lock(&hwmgr->smu_lock);
1181         ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1182         mutex_unlock(&hwmgr->smu_lock);
1183         return ret;
1184 }
1185
1186 static int pp_get_clock_by_type_with_voltage(void *handle,
1187                 enum amd_pp_clock_type type,
1188                 struct pp_clock_levels_with_voltage *clocks)
1189 {
1190         struct pp_hwmgr *hwmgr = handle;
1191         int ret = 0;
1192
1193         if (!hwmgr || !hwmgr->pm_en ||!clocks)
1194                 return -EINVAL;
1195
1196         mutex_lock(&hwmgr->smu_lock);
1197
1198         ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
1199
1200         mutex_unlock(&hwmgr->smu_lock);
1201         return ret;
1202 }
1203
1204 static int pp_set_watermarks_for_clocks_ranges(void *handle,
1205                 void *clock_ranges)
1206 {
1207         struct pp_hwmgr *hwmgr = handle;
1208         int ret = 0;
1209
1210         if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
1211                 return -EINVAL;
1212
1213         mutex_lock(&hwmgr->smu_lock);
1214         ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
1215                         clock_ranges);
1216         mutex_unlock(&hwmgr->smu_lock);
1217
1218         return ret;
1219 }
1220
1221 static int pp_display_clock_voltage_request(void *handle,
1222                 struct pp_display_clock_request *clock)
1223 {
1224         struct pp_hwmgr *hwmgr = handle;
1225         int ret = 0;
1226
1227         if (!hwmgr || !hwmgr->pm_en ||!clock)
1228                 return -EINVAL;
1229
1230         mutex_lock(&hwmgr->smu_lock);
1231         ret = phm_display_clock_voltage_request(hwmgr, clock);
1232         mutex_unlock(&hwmgr->smu_lock);
1233
1234         return ret;
1235 }
1236
1237 static int pp_get_display_mode_validation_clocks(void *handle,
1238                 struct amd_pp_simple_clock_info *clocks)
1239 {
1240         struct pp_hwmgr *hwmgr = handle;
1241         int ret = 0;
1242
1243         if (!hwmgr || !hwmgr->pm_en ||!clocks)
1244                 return -EINVAL;
1245
1246         clocks->level = PP_DAL_POWERLEVEL_7;
1247
1248         mutex_lock(&hwmgr->smu_lock);
1249
1250         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1251                 ret = phm_get_max_high_clocks(hwmgr, clocks);
1252
1253         mutex_unlock(&hwmgr->smu_lock);
1254         return ret;
1255 }
1256
1257 static int pp_dpm_powergate_mmhub(void *handle)
1258 {
1259         struct pp_hwmgr *hwmgr = handle;
1260
1261         if (!hwmgr || !hwmgr->pm_en)
1262                 return -EINVAL;
1263
1264         if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
1265                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1266                 return 0;
1267         }
1268
1269         return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
1270 }
1271
1272 static int pp_dpm_powergate_gfx(void *handle, bool gate)
1273 {
1274         struct pp_hwmgr *hwmgr = handle;
1275
1276         if (!hwmgr || !hwmgr->pm_en)
1277                 return 0;
1278
1279         if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
1280                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1281                 return 0;
1282         }
1283
1284         return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
1285 }
1286
1287 static void pp_dpm_powergate_acp(void *handle, bool gate)
1288 {
1289         struct pp_hwmgr *hwmgr = handle;
1290
1291         if (!hwmgr || !hwmgr->pm_en)
1292                 return;
1293
1294         if (hwmgr->hwmgr_func->powergate_acp == NULL) {
1295                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1296                 return;
1297         }
1298
1299         hwmgr->hwmgr_func->powergate_acp(hwmgr, gate);
1300 }
1301
1302 static void pp_dpm_powergate_sdma(void *handle, bool gate)
1303 {
1304         struct pp_hwmgr *hwmgr = handle;
1305
1306         if (!hwmgr)
1307                 return;
1308
1309         if (hwmgr->hwmgr_func->powergate_sdma == NULL) {
1310                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1311                 return;
1312         }
1313
1314         hwmgr->hwmgr_func->powergate_sdma(hwmgr, gate);
1315 }
1316
1317 static int pp_set_powergating_by_smu(void *handle,
1318                                 uint32_t block_type, bool gate)
1319 {
1320         int ret = 0;
1321
1322         switch (block_type) {
1323         case AMD_IP_BLOCK_TYPE_UVD:
1324         case AMD_IP_BLOCK_TYPE_VCN:
1325                 pp_dpm_powergate_uvd(handle, gate);
1326                 break;
1327         case AMD_IP_BLOCK_TYPE_VCE:
1328                 pp_dpm_powergate_vce(handle, gate);
1329                 break;
1330         case AMD_IP_BLOCK_TYPE_GMC:
1331                 pp_dpm_powergate_mmhub(handle);
1332                 break;
1333         case AMD_IP_BLOCK_TYPE_GFX:
1334                 ret = pp_dpm_powergate_gfx(handle, gate);
1335                 break;
1336         case AMD_IP_BLOCK_TYPE_ACP:
1337                 pp_dpm_powergate_acp(handle, gate);
1338                 break;
1339         case AMD_IP_BLOCK_TYPE_SDMA:
1340                 pp_dpm_powergate_sdma(handle, gate);
1341                 break;
1342         default:
1343                 break;
1344         }
1345         return ret;
1346 }
1347
1348 static int pp_notify_smu_enable_pwe(void *handle)
1349 {
1350         struct pp_hwmgr *hwmgr = handle;
1351
1352         if (!hwmgr || !hwmgr->pm_en)
1353                 return -EINVAL;
1354
1355         if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
1356                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1357                 return -EINVAL;
1358         }
1359
1360         mutex_lock(&hwmgr->smu_lock);
1361         hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
1362         mutex_unlock(&hwmgr->smu_lock);
1363
1364         return 0;
1365 }
1366
1367 static int pp_enable_mgpu_fan_boost(void *handle)
1368 {
1369         struct pp_hwmgr *hwmgr = handle;
1370
1371         if (!hwmgr)
1372                 return -EINVAL;
1373
1374         if (!hwmgr->pm_en ||
1375              hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
1376                 return 0;
1377
1378         mutex_lock(&hwmgr->smu_lock);
1379         hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
1380         mutex_unlock(&hwmgr->smu_lock);
1381
1382         return 0;
1383 }
1384
1385 static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
1386 {
1387         struct pp_hwmgr *hwmgr = handle;
1388
1389         if (!hwmgr || !hwmgr->pm_en)
1390                 return -EINVAL;
1391
1392         if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
1393                 pr_debug("%s was not implemented.\n", __func__);
1394                 return -EINVAL;
1395         }
1396
1397         mutex_lock(&hwmgr->smu_lock);
1398         hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
1399         mutex_unlock(&hwmgr->smu_lock);
1400
1401         return 0;
1402 }
1403
1404 static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
1405 {
1406         struct pp_hwmgr *hwmgr = handle;
1407
1408         if (!hwmgr || !hwmgr->pm_en)
1409                 return -EINVAL;
1410
1411         if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
1412                 pr_debug("%s was not implemented.\n", __func__);
1413                 return -EINVAL;
1414         }
1415
1416         mutex_lock(&hwmgr->smu_lock);
1417         hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
1418         mutex_unlock(&hwmgr->smu_lock);
1419
1420         return 0;
1421 }
1422
1423 static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
1424 {
1425         struct pp_hwmgr *hwmgr = handle;
1426
1427         if (!hwmgr || !hwmgr->pm_en)
1428                 return -EINVAL;
1429
1430         if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
1431                 pr_debug("%s was not implemented.\n", __func__);
1432                 return -EINVAL;
1433         }
1434
1435         mutex_lock(&hwmgr->smu_lock);
1436         hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
1437         mutex_unlock(&hwmgr->smu_lock);
1438
1439         return 0;
1440 }
1441
1442 static int pp_set_active_display_count(void *handle, uint32_t count)
1443 {
1444         struct pp_hwmgr *hwmgr = handle;
1445         int ret = 0;
1446
1447         if (!hwmgr || !hwmgr->pm_en)
1448                 return -EINVAL;
1449
1450         mutex_lock(&hwmgr->smu_lock);
1451         ret = phm_set_active_display_count(hwmgr, count);
1452         mutex_unlock(&hwmgr->smu_lock);
1453
1454         return ret;
1455 }
1456
1457 static int pp_get_asic_baco_capability(void *handle, bool *cap)
1458 {
1459         struct pp_hwmgr *hwmgr = handle;
1460
1461         *cap = false;
1462         if (!hwmgr)
1463                 return -EINVAL;
1464
1465         if (!(hwmgr->not_vf && amdgpu_dpm) ||
1466                 !hwmgr->hwmgr_func->get_asic_baco_capability)
1467                 return 0;
1468
1469         mutex_lock(&hwmgr->smu_lock);
1470         hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap);
1471         mutex_unlock(&hwmgr->smu_lock);
1472
1473         return 0;
1474 }
1475
1476 static int pp_get_asic_baco_state(void *handle, int *state)
1477 {
1478         struct pp_hwmgr *hwmgr = handle;
1479
1480         if (!hwmgr)
1481                 return -EINVAL;
1482
1483         if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
1484                 return 0;
1485
1486         mutex_lock(&hwmgr->smu_lock);
1487         hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state);
1488         mutex_unlock(&hwmgr->smu_lock);
1489
1490         return 0;
1491 }
1492
1493 static int pp_set_asic_baco_state(void *handle, int state)
1494 {
1495         struct pp_hwmgr *hwmgr = handle;
1496
1497         if (!hwmgr)
1498                 return -EINVAL;
1499
1500         if (!(hwmgr->not_vf && amdgpu_dpm) ||
1501                 !hwmgr->hwmgr_func->set_asic_baco_state)
1502                 return 0;
1503
1504         mutex_lock(&hwmgr->smu_lock);
1505         hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state);
1506         mutex_unlock(&hwmgr->smu_lock);
1507
1508         return 0;
1509 }
1510
1511 static int pp_get_ppfeature_status(void *handle, char *buf)
1512 {
1513         struct pp_hwmgr *hwmgr = handle;
1514         int ret = 0;
1515
1516         if (!hwmgr || !hwmgr->pm_en || !buf)
1517                 return -EINVAL;
1518
1519         if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) {
1520                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1521                 return -EINVAL;
1522         }
1523
1524         mutex_lock(&hwmgr->smu_lock);
1525         ret = hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
1526         mutex_unlock(&hwmgr->smu_lock);
1527
1528         return ret;
1529 }
1530
1531 static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
1532 {
1533         struct pp_hwmgr *hwmgr = handle;
1534         int ret = 0;
1535
1536         if (!hwmgr || !hwmgr->pm_en)
1537                 return -EINVAL;
1538
1539         if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) {
1540                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1541                 return -EINVAL;
1542         }
1543
1544         mutex_lock(&hwmgr->smu_lock);
1545         ret = hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks);
1546         mutex_unlock(&hwmgr->smu_lock);
1547
1548         return ret;
1549 }
1550
1551 static int pp_asic_reset_mode_2(void *handle)
1552 {
1553         struct pp_hwmgr *hwmgr = handle;
1554                 int ret = 0;
1555
1556         if (!hwmgr || !hwmgr->pm_en)
1557                 return -EINVAL;
1558
1559         if (hwmgr->hwmgr_func->asic_reset == NULL) {
1560                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1561                 return -EINVAL;
1562         }
1563
1564         mutex_lock(&hwmgr->smu_lock);
1565         ret = hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
1566         mutex_unlock(&hwmgr->smu_lock);
1567
1568         return ret;
1569 }
1570
1571 static int pp_smu_i2c_bus_access(void *handle, bool acquire)
1572 {
1573         struct pp_hwmgr *hwmgr = handle;
1574         int ret = 0;
1575
1576         if (!hwmgr || !hwmgr->pm_en)
1577                 return -EINVAL;
1578
1579         if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) {
1580                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1581                 return -EINVAL;
1582         }
1583
1584         mutex_lock(&hwmgr->smu_lock);
1585         ret = hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
1586         mutex_unlock(&hwmgr->smu_lock);
1587
1588         return ret;
1589 }
1590
1591 static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
1592 {
1593         struct pp_hwmgr *hwmgr = handle;
1594
1595         if (!hwmgr)
1596                 return -EINVAL;
1597
1598         if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
1599                 return 0;
1600
1601         mutex_lock(&hwmgr->smu_lock);
1602         hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
1603         mutex_unlock(&hwmgr->smu_lock);
1604
1605         return 0;
1606 }
1607
1608 static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
1609 {
1610         struct pp_hwmgr *hwmgr = handle;
1611
1612         if (!hwmgr)
1613                 return -EINVAL;
1614
1615         if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate)
1616                 return 0;
1617
1618         mutex_lock(&hwmgr->smu_lock);
1619         hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
1620         mutex_unlock(&hwmgr->smu_lock);
1621
1622         return 0;
1623 }
1624
1625 static ssize_t pp_get_gpu_metrics(void *handle, void **table)
1626 {
1627         struct pp_hwmgr *hwmgr = handle;
1628         ssize_t size;
1629
1630         if (!hwmgr)
1631                 return -EINVAL;
1632
1633         if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_gpu_metrics)
1634                 return -EOPNOTSUPP;
1635
1636         mutex_lock(&hwmgr->smu_lock);
1637         size = hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
1638         mutex_unlock(&hwmgr->smu_lock);
1639
1640         return size;
1641 }
1642
1643 static int pp_gfx_state_change_set(void *handle, uint32_t state)
1644 {
1645         struct pp_hwmgr *hwmgr = handle;
1646
1647         if (!hwmgr || !hwmgr->pm_en)
1648                 return -EINVAL;
1649
1650         if (hwmgr->hwmgr_func->gfx_state_change == NULL) {
1651                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1652                 return -EINVAL;
1653         }
1654
1655         mutex_lock(&hwmgr->smu_lock);
1656         hwmgr->hwmgr_func->gfx_state_change(hwmgr, state);
1657         mutex_unlock(&hwmgr->smu_lock);
1658         return 0;
1659 }
1660
1661 static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size)
1662 {
1663         struct pp_hwmgr *hwmgr = handle;
1664         struct amdgpu_device *adev = hwmgr->adev;
1665
1666         if (!addr || !size)
1667                 return -EINVAL;
1668
1669         *addr = NULL;
1670         *size = 0;
1671         mutex_lock(&hwmgr->smu_lock);
1672         if (adev->pm.smu_prv_buffer) {
1673                 amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr);
1674                 *size = adev->pm.smu_prv_buffer_size;
1675         }
1676         mutex_unlock(&hwmgr->smu_lock);
1677
1678         return 0;
1679 }
1680
1681 static const struct amd_pm_funcs pp_dpm_funcs = {
1682         .load_firmware = pp_dpm_load_fw,
1683         .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
1684         .force_performance_level = pp_dpm_force_performance_level,
1685         .get_performance_level = pp_dpm_get_performance_level,
1686         .get_current_power_state = pp_dpm_get_current_power_state,
1687         .dispatch_tasks = pp_dpm_dispatch_tasks,
1688         .set_fan_control_mode = pp_dpm_set_fan_control_mode,
1689         .get_fan_control_mode = pp_dpm_get_fan_control_mode,
1690         .set_fan_speed_pwm = pp_dpm_set_fan_speed_pwm,
1691         .get_fan_speed_pwm = pp_dpm_get_fan_speed_pwm,
1692         .get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
1693         .set_fan_speed_rpm = pp_dpm_set_fan_speed_rpm,
1694         .get_pp_num_states = pp_dpm_get_pp_num_states,
1695         .get_pp_table = pp_dpm_get_pp_table,
1696         .set_pp_table = pp_dpm_set_pp_table,
1697         .force_clock_level = pp_dpm_force_clock_level,
1698         .print_clock_levels = pp_dpm_print_clock_levels,
1699         .get_sclk_od = pp_dpm_get_sclk_od,
1700         .set_sclk_od = pp_dpm_set_sclk_od,
1701         .get_mclk_od = pp_dpm_get_mclk_od,
1702         .set_mclk_od = pp_dpm_set_mclk_od,
1703         .read_sensor = pp_dpm_read_sensor,
1704         .get_vce_clock_state = pp_dpm_get_vce_clock_state,
1705         .switch_power_profile = pp_dpm_switch_power_profile,
1706         .set_clockgating_by_smu = pp_set_clockgating_by_smu,
1707         .set_powergating_by_smu = pp_set_powergating_by_smu,
1708         .get_power_profile_mode = pp_get_power_profile_mode,
1709         .set_power_profile_mode = pp_set_power_profile_mode,
1710         .set_fine_grain_clk_vol = pp_set_fine_grain_clk_vol,
1711         .odn_edit_dpm_table = pp_odn_edit_dpm_table,
1712         .set_mp1_state = pp_dpm_set_mp1_state,
1713         .set_power_limit = pp_set_power_limit,
1714         .get_power_limit = pp_get_power_limit,
1715 /* export to DC */
1716         .get_sclk = pp_dpm_get_sclk,
1717         .get_mclk = pp_dpm_get_mclk,
1718         .display_configuration_change = pp_display_configuration_change,
1719         .get_display_power_level = pp_get_display_power_level,
1720         .get_current_clocks = pp_get_current_clocks,
1721         .get_clock_by_type = pp_get_clock_by_type,
1722         .get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
1723         .get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
1724         .set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
1725         .display_clock_voltage_request = pp_display_clock_voltage_request,
1726         .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1727         .notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
1728         .enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost,
1729         .set_active_display_count = pp_set_active_display_count,
1730         .set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk,
1731         .set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq,
1732         .set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq,
1733         .get_asic_baco_capability = pp_get_asic_baco_capability,
1734         .get_asic_baco_state = pp_get_asic_baco_state,
1735         .set_asic_baco_state = pp_set_asic_baco_state,
1736         .get_ppfeature_status = pp_get_ppfeature_status,
1737         .set_ppfeature_status = pp_set_ppfeature_status,
1738         .asic_reset_mode_2 = pp_asic_reset_mode_2,
1739         .smu_i2c_bus_access = pp_smu_i2c_bus_access,
1740         .set_df_cstate = pp_set_df_cstate,
1741         .set_xgmi_pstate = pp_set_xgmi_pstate,
1742         .get_gpu_metrics = pp_get_gpu_metrics,
1743         .gfx_state_change_set = pp_gfx_state_change_set,
1744         .get_smu_prv_buf_details = pp_get_prv_buffer_details,
1745 };