284bec3585e0da3d9e42e4ea6d894ae082ad2b12
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / pm / swsmu / amdgpu_smu.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #define SWSMU_CODE_LAYER_L1
24
25 #include <linux/firmware.h>
26 #include <linux/pci.h>
27
28 #include "amdgpu.h"
29 #include "amdgpu_smu.h"
30 #include "smu_internal.h"
31 #include "atom.h"
32 #include "arcturus_ppt.h"
33 #include "navi10_ppt.h"
34 #include "sienna_cichlid_ppt.h"
35 #include "renoir_ppt.h"
36 #include "vangogh_ppt.h"
37 #include "aldebaran_ppt.h"
38 #include "amd_pcie.h"
39
40 /*
41  * DO NOT use these for err/warn/info/debug messages.
42  * Use dev_err, dev_warn, dev_info and dev_dbg instead.
43  * They are more MGPU friendly.
44  */
45 #undef pr_err
46 #undef pr_warn
47 #undef pr_info
48 #undef pr_debug
49
50 static const struct amd_pm_funcs swsmu_pm_funcs;
51 static int smu_force_smuclk_levels(struct smu_context *smu,
52                                    enum smu_clk_type clk_type,
53                                    uint32_t mask);
54
55 int smu_sys_get_pp_feature_mask(void *handle, char *buf)
56 {
57         struct smu_context *smu = handle;
58         int size = 0;
59
60         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
61                 return -EOPNOTSUPP;
62
63         mutex_lock(&smu->mutex);
64
65         size = smu_get_pp_feature_mask(smu, buf);
66
67         mutex_unlock(&smu->mutex);
68
69         return size;
70 }
71
72 int smu_sys_set_pp_feature_mask(void *handle, uint64_t new_mask)
73 {
74         struct smu_context *smu = handle;
75         int ret = 0;
76
77         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
78                 return -EOPNOTSUPP;
79
80         mutex_lock(&smu->mutex);
81
82         ret = smu_set_pp_feature_mask(smu, new_mask);
83
84         mutex_unlock(&smu->mutex);
85
86         return ret;
87 }
88
89 int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
90 {
91         int ret = 0;
92         struct smu_context *smu = &adev->smu;
93
94         if (is_support_sw_smu(adev) && smu->ppt_funcs->get_gfx_off_status)
95                 *value = smu_get_gfx_off_status(smu);
96         else
97                 ret = -EINVAL;
98
99         return ret;
100 }
101
102 int smu_set_soft_freq_range(struct smu_context *smu,
103                             enum smu_clk_type clk_type,
104                             uint32_t min,
105                             uint32_t max)
106 {
107         int ret = 0;
108
109         mutex_lock(&smu->mutex);
110
111         if (smu->ppt_funcs->set_soft_freq_limited_range)
112                 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
113                                                                   clk_type,
114                                                                   min,
115                                                                   max);
116
117         mutex_unlock(&smu->mutex);
118
119         return ret;
120 }
121
122 int smu_get_dpm_freq_range(struct smu_context *smu,
123                            enum smu_clk_type clk_type,
124                            uint32_t *min,
125                            uint32_t *max)
126 {
127         int ret = 0;
128
129         if (!min && !max)
130                 return -EINVAL;
131
132         mutex_lock(&smu->mutex);
133
134         if (smu->ppt_funcs->get_dpm_ultimate_freq)
135                 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
136                                                             clk_type,
137                                                             min,
138                                                             max);
139
140         mutex_unlock(&smu->mutex);
141
142         return ret;
143 }
144
145 u32 smu_get_mclk(void *handle, bool low)
146 {
147         struct smu_context *smu = handle;
148         uint32_t clk_freq;
149         int ret = 0;
150
151         ret = smu_get_dpm_freq_range(smu, SMU_UCLK,
152                                      low ? &clk_freq : NULL,
153                                      !low ? &clk_freq : NULL);
154         if (ret)
155                 return 0;
156         return clk_freq * 100;
157 }
158
159 u32 smu_get_sclk(void *handle, bool low)
160 {
161         struct smu_context *smu = handle;
162         uint32_t clk_freq;
163         int ret = 0;
164
165         ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK,
166                                      low ? &clk_freq : NULL,
167                                      !low ? &clk_freq : NULL);
168         if (ret)
169                 return 0;
170         return clk_freq * 100;
171 }
172
173 static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu,
174                                          bool enable)
175 {
176         struct smu_power_context *smu_power = &smu->smu_power;
177         struct smu_power_gate *power_gate = &smu_power->power_gate;
178         int ret = 0;
179
180         if (!smu->ppt_funcs->dpm_set_vcn_enable)
181                 return 0;
182
183         if (atomic_read(&power_gate->vcn_gated) ^ enable)
184                 return 0;
185
186         ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable);
187         if (!ret)
188                 atomic_set(&power_gate->vcn_gated, !enable);
189
190         return ret;
191 }
192
193 static int smu_dpm_set_vcn_enable(struct smu_context *smu,
194                                   bool enable)
195 {
196         struct smu_power_context *smu_power = &smu->smu_power;
197         struct smu_power_gate *power_gate = &smu_power->power_gate;
198         int ret = 0;
199
200         mutex_lock(&power_gate->vcn_gate_lock);
201
202         ret = smu_dpm_set_vcn_enable_locked(smu, enable);
203
204         mutex_unlock(&power_gate->vcn_gate_lock);
205
206         return ret;
207 }
208
209 static int smu_dpm_set_jpeg_enable_locked(struct smu_context *smu,
210                                           bool enable)
211 {
212         struct smu_power_context *smu_power = &smu->smu_power;
213         struct smu_power_gate *power_gate = &smu_power->power_gate;
214         int ret = 0;
215
216         if (!smu->ppt_funcs->dpm_set_jpeg_enable)
217                 return 0;
218
219         if (atomic_read(&power_gate->jpeg_gated) ^ enable)
220                 return 0;
221
222         ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
223         if (!ret)
224                 atomic_set(&power_gate->jpeg_gated, !enable);
225
226         return ret;
227 }
228
229 static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
230                                    bool enable)
231 {
232         struct smu_power_context *smu_power = &smu->smu_power;
233         struct smu_power_gate *power_gate = &smu_power->power_gate;
234         int ret = 0;
235
236         mutex_lock(&power_gate->jpeg_gate_lock);
237
238         ret = smu_dpm_set_jpeg_enable_locked(smu, enable);
239
240         mutex_unlock(&power_gate->jpeg_gate_lock);
241
242         return ret;
243 }
244
245 /**
246  * smu_dpm_set_power_gate - power gate/ungate the specific IP block
247  *
248  * @handle:        smu_context pointer
249  * @block_type: the IP block to power gate/ungate
250  * @gate:       to power gate if true, ungate otherwise
251  *
252  * This API uses no smu->mutex lock protection due to:
253  * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
254  *    This is guarded to be race condition free by the caller.
255  * 2. Or get called on user setting request of power_dpm_force_performance_level.
256  *    Under this case, the smu->mutex lock protection is already enforced on
257  *    the parent API smu_force_performance_level of the call path.
258  */
259 int smu_dpm_set_power_gate(void *handle, uint32_t block_type,
260                            bool gate)
261 {
262         struct smu_context *smu = handle;
263         int ret = 0;
264
265         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
266                 return -EOPNOTSUPP;
267
268         switch (block_type) {
269         /*
270          * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
271          * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept.
272          */
273         case AMD_IP_BLOCK_TYPE_UVD:
274         case AMD_IP_BLOCK_TYPE_VCN:
275                 ret = smu_dpm_set_vcn_enable(smu, !gate);
276                 if (ret)
277                         dev_err(smu->adev->dev, "Failed to power %s VCN!\n",
278                                 gate ? "gate" : "ungate");
279                 break;
280         case AMD_IP_BLOCK_TYPE_GFX:
281                 ret = smu_gfx_off_control(smu, gate);
282                 if (ret)
283                         dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",
284                                 gate ? "enable" : "disable");
285                 break;
286         case AMD_IP_BLOCK_TYPE_SDMA:
287                 ret = smu_powergate_sdma(smu, gate);
288                 if (ret)
289                         dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",
290                                 gate ? "gate" : "ungate");
291                 break;
292         case AMD_IP_BLOCK_TYPE_JPEG:
293                 ret = smu_dpm_set_jpeg_enable(smu, !gate);
294                 if (ret)
295                         dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
296                                 gate ? "gate" : "ungate");
297                 break;
298         default:
299                 dev_err(smu->adev->dev, "Unsupported block type!\n");
300                 return -EINVAL;
301         }
302
303         return ret;
304 }
305
306 /**
307  * smu_set_user_clk_dependencies - set user profile clock dependencies
308  *
309  * @smu:        smu_context pointer
310  * @clk:        enum smu_clk_type type
311  *
312  * Enable/Disable the clock dependency for the @clk type.
313  */
314 static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk)
315 {
316         if (smu->adev->in_suspend)
317                 return;
318
319         if (clk == SMU_MCLK) {
320                 smu->user_dpm_profile.clk_dependency = 0;
321                 smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
322         } else if (clk == SMU_FCLK) {
323                 /* MCLK takes precedence over FCLK */
324                 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
325                         return;
326
327                 smu->user_dpm_profile.clk_dependency = 0;
328                 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
329         } else if (clk == SMU_SOCCLK) {
330                 /* MCLK takes precedence over SOCCLK */
331                 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
332                         return;
333
334                 smu->user_dpm_profile.clk_dependency = 0;
335                 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
336         } else
337                 /* Add clk dependencies here, if any */
338                 return;
339 }
340
341 /**
342  * smu_restore_dpm_user_profile - reinstate user dpm profile
343  *
344  * @smu:        smu_context pointer
345  *
346  * Restore the saved user power configurations include power limit,
347  * clock frequencies, fan control mode and fan speed.
348  */
349 static void smu_restore_dpm_user_profile(struct smu_context *smu)
350 {
351         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
352         int ret = 0;
353
354         if (!smu->adev->in_suspend)
355                 return;
356
357         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
358                 return;
359
360         /* Enable restore flag */
361         smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
362
363         /* set the user dpm power limit */
364         if (smu->user_dpm_profile.power_limit) {
365                 ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit);
366                 if (ret)
367                         dev_err(smu->adev->dev, "Failed to set power limit value\n");
368         }
369
370         /* set the user dpm clock configurations */
371         if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
372                 enum smu_clk_type clk_type;
373
374                 for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) {
375                         /*
376                          * Iterate over smu clk type and force the saved user clk
377                          * configs, skip if clock dependency is enabled
378                          */
379                         if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) &&
380                                         smu->user_dpm_profile.clk_mask[clk_type]) {
381                                 ret = smu_force_smuclk_levels(smu, clk_type,
382                                                 smu->user_dpm_profile.clk_mask[clk_type]);
383                                 if (ret)
384                                         dev_err(smu->adev->dev,
385                                                 "Failed to set clock type = %d\n", clk_type);
386                         }
387                 }
388         }
389
390         /* set the user dpm fan configurations */
391         if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL) {
392                 ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
393                 if (ret) {
394                         dev_err(smu->adev->dev, "Failed to set manual fan control mode\n");
395                         return;
396                 }
397
398                 if (!ret && smu->user_dpm_profile.fan_speed_percent) {
399                         ret = smu_set_fan_speed_percent(smu, smu->user_dpm_profile.fan_speed_percent);
400                         if (ret)
401                                 dev_err(smu->adev->dev, "Failed to set manual fan speed\n");
402                 }
403         }
404
405         /* Disable restore flag */
406         smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE;
407 }
408
409 int smu_get_power_num_states(void *handle,
410                              struct pp_states_info *state_info)
411 {
412         if (!state_info)
413                 return -EINVAL;
414
415         /* not support power state */
416         memset(state_info, 0, sizeof(struct pp_states_info));
417         state_info->nums = 1;
418         state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
419
420         return 0;
421 }
422
423 bool is_support_sw_smu(struct amdgpu_device *adev)
424 {
425         if (adev->asic_type >= CHIP_ARCTURUS)
426                 return true;
427
428         return false;
429 }
430
431 bool is_support_cclk_dpm(struct amdgpu_device *adev)
432 {
433         struct smu_context *smu = &adev->smu;
434
435         if (!is_support_sw_smu(adev))
436                 return false;
437
438         if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
439                 return false;
440
441         return true;
442 }
443
444
445 int smu_sys_get_pp_table(void *handle, char **table)
446 {
447         struct smu_context *smu = handle;
448         struct smu_table_context *smu_table = &smu->smu_table;
449         uint32_t powerplay_table_size;
450
451         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
452                 return -EOPNOTSUPP;
453
454         if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
455                 return -EINVAL;
456
457         mutex_lock(&smu->mutex);
458
459         if (smu_table->hardcode_pptable)
460                 *table = smu_table->hardcode_pptable;
461         else
462                 *table = smu_table->power_play_table;
463
464         powerplay_table_size = smu_table->power_play_table_size;
465
466         mutex_unlock(&smu->mutex);
467
468         return powerplay_table_size;
469 }
470
471 int smu_sys_set_pp_table(void *handle, const char *buf, size_t size)
472 {
473         struct smu_context *smu = handle;
474         struct smu_table_context *smu_table = &smu->smu_table;
475         ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
476         int ret = 0;
477
478         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
479                 return -EOPNOTSUPP;
480
481         if (header->usStructureSize != size) {
482                 dev_err(smu->adev->dev, "pp table size not matched !\n");
483                 return -EIO;
484         }
485
486         mutex_lock(&smu->mutex);
487         if (!smu_table->hardcode_pptable)
488                 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
489         if (!smu_table->hardcode_pptable) {
490                 ret = -ENOMEM;
491                 goto failed;
492         }
493
494         memcpy(smu_table->hardcode_pptable, buf, size);
495         smu_table->power_play_table = smu_table->hardcode_pptable;
496         smu_table->power_play_table_size = size;
497
498         /*
499          * Special hw_fini action(for Navi1x, the DPMs disablement will be
500          * skipped) may be needed for custom pptable uploading.
501          */
502         smu->uploading_custom_pp_table = true;
503
504         ret = smu_reset(smu);
505         if (ret)
506                 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);
507
508         smu->uploading_custom_pp_table = false;
509
510 failed:
511         mutex_unlock(&smu->mutex);
512         return ret;
513 }
514
515 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
516 {
517         struct smu_feature *feature = &smu->smu_feature;
518         int ret = 0;
519         uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
520
521         bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
522
523         ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
524                                              SMU_FEATURE_MAX/32);
525         if (ret)
526                 return ret;
527
528         bitmap_or(feature->allowed, feature->allowed,
529                       (unsigned long *)allowed_feature_mask,
530                       feature->feature_num);
531
532         return ret;
533 }
534
535 static int smu_set_funcs(struct amdgpu_device *adev)
536 {
537         struct smu_context *smu = &adev->smu;
538
539         if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
540                 smu->od_enabled = true;
541
542         switch (adev->asic_type) {
543         case CHIP_NAVI10:
544         case CHIP_NAVI14:
545         case CHIP_NAVI12:
546                 navi10_set_ppt_funcs(smu);
547                 break;
548         case CHIP_ARCTURUS:
549                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
550                 arcturus_set_ppt_funcs(smu);
551                 /* OD is not supported on Arcturus */
552                 smu->od_enabled =false;
553                 break;
554         case CHIP_SIENNA_CICHLID:
555         case CHIP_NAVY_FLOUNDER:
556         case CHIP_DIMGREY_CAVEFISH:
557                 sienna_cichlid_set_ppt_funcs(smu);
558                 break;
559         case CHIP_ALDEBARAN:
560                 aldebaran_set_ppt_funcs(smu);
561                 /* Enable pp_od_clk_voltage node */
562                 smu->od_enabled = true;
563                 break;
564         case CHIP_RENOIR:
565                 renoir_set_ppt_funcs(smu);
566                 break;
567         case CHIP_VANGOGH:
568                 vangogh_set_ppt_funcs(smu);
569                 break;
570         default:
571                 return -EINVAL;
572         }
573
574         return 0;
575 }
576
577 static int smu_early_init(void *handle)
578 {
579         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
580         struct smu_context *smu = &adev->smu;
581
582         smu->adev = adev;
583         smu->pm_enabled = !!amdgpu_dpm;
584         smu->is_apu = false;
585         mutex_init(&smu->mutex);
586         mutex_init(&smu->smu_baco.mutex);
587         smu->smu_baco.state = SMU_BACO_STATE_EXIT;
588         smu->smu_baco.platform_support = false;
589
590         adev->powerplay.pp_handle = smu;
591         adev->powerplay.pp_funcs = &swsmu_pm_funcs;
592
593         return smu_set_funcs(adev);
594 }
595
596 static int smu_set_default_dpm_table(struct smu_context *smu)
597 {
598         struct smu_power_context *smu_power = &smu->smu_power;
599         struct smu_power_gate *power_gate = &smu_power->power_gate;
600         int vcn_gate, jpeg_gate;
601         int ret = 0;
602
603         if (!smu->ppt_funcs->set_default_dpm_table)
604                 return 0;
605
606         mutex_lock(&power_gate->vcn_gate_lock);
607         mutex_lock(&power_gate->jpeg_gate_lock);
608
609         vcn_gate = atomic_read(&power_gate->vcn_gated);
610         jpeg_gate = atomic_read(&power_gate->jpeg_gated);
611
612         ret = smu_dpm_set_vcn_enable_locked(smu, true);
613         if (ret)
614                 goto err0_out;
615
616         ret = smu_dpm_set_jpeg_enable_locked(smu, true);
617         if (ret)
618                 goto err1_out;
619
620         ret = smu->ppt_funcs->set_default_dpm_table(smu);
621         if (ret)
622                 dev_err(smu->adev->dev,
623                         "Failed to setup default dpm clock tables!\n");
624
625         smu_dpm_set_jpeg_enable_locked(smu, !jpeg_gate);
626 err1_out:
627         smu_dpm_set_vcn_enable_locked(smu, !vcn_gate);
628 err0_out:
629         mutex_unlock(&power_gate->jpeg_gate_lock);
630         mutex_unlock(&power_gate->vcn_gate_lock);
631
632         return ret;
633 }
634
635 static int smu_late_init(void *handle)
636 {
637         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
638         struct smu_context *smu = &adev->smu;
639         int ret = 0;
640
641         smu_set_fine_grain_gfx_freq_parameters(smu);
642
643         if (!smu->pm_enabled)
644                 return 0;
645
646         ret = smu_post_init(smu);
647         if (ret) {
648                 dev_err(adev->dev, "Failed to post smu init!\n");
649                 return ret;
650         }
651
652         if (!amdgpu_sriov_vf(adev) || smu->od_enabled) {
653                 ret = smu_set_default_od_settings(smu);
654                 if (ret) {
655                         dev_err(adev->dev, "Failed to setup default OD settings!\n");
656                         return ret;
657                 }
658         }
659
660         ret = smu_populate_umd_state_clk(smu);
661         if (ret) {
662                 dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
663                 return ret;
664         }
665
666         ret = smu_get_asic_power_limits(smu);
667         if (ret) {
668                 dev_err(adev->dev, "Failed to get asic power limits!\n");
669                 return ret;
670         }
671
672         smu_get_unique_id(smu);
673
674         smu_get_fan_parameters(smu);
675
676         smu_handle_task(&adev->smu,
677                         smu->smu_dpm.dpm_level,
678                         AMD_PP_TASK_COMPLETE_INIT,
679                         false);
680
681         smu_restore_dpm_user_profile(smu);
682
683         return 0;
684 }
685
686 static int smu_init_fb_allocations(struct smu_context *smu)
687 {
688         struct amdgpu_device *adev = smu->adev;
689         struct smu_table_context *smu_table = &smu->smu_table;
690         struct smu_table *tables = smu_table->tables;
691         struct smu_table *driver_table = &(smu_table->driver_table);
692         uint32_t max_table_size = 0;
693         int ret, i;
694
695         /* VRAM allocation for tool table */
696         if (tables[SMU_TABLE_PMSTATUSLOG].size) {
697                 ret = amdgpu_bo_create_kernel(adev,
698                                               tables[SMU_TABLE_PMSTATUSLOG].size,
699                                               tables[SMU_TABLE_PMSTATUSLOG].align,
700                                               tables[SMU_TABLE_PMSTATUSLOG].domain,
701                                               &tables[SMU_TABLE_PMSTATUSLOG].bo,
702                                               &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
703                                               &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
704                 if (ret) {
705                         dev_err(adev->dev, "VRAM allocation for tool table failed!\n");
706                         return ret;
707                 }
708         }
709
710         /* VRAM allocation for driver table */
711         for (i = 0; i < SMU_TABLE_COUNT; i++) {
712                 if (tables[i].size == 0)
713                         continue;
714
715                 if (i == SMU_TABLE_PMSTATUSLOG)
716                         continue;
717
718                 if (max_table_size < tables[i].size)
719                         max_table_size = tables[i].size;
720         }
721
722         driver_table->size = max_table_size;
723         driver_table->align = PAGE_SIZE;
724         driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
725
726         ret = amdgpu_bo_create_kernel(adev,
727                                       driver_table->size,
728                                       driver_table->align,
729                                       driver_table->domain,
730                                       &driver_table->bo,
731                                       &driver_table->mc_address,
732                                       &driver_table->cpu_addr);
733         if (ret) {
734                 dev_err(adev->dev, "VRAM allocation for driver table failed!\n");
735                 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
736                         amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
737                                               &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
738                                               &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
739         }
740
741         return ret;
742 }
743
744 static int smu_fini_fb_allocations(struct smu_context *smu)
745 {
746         struct smu_table_context *smu_table = &smu->smu_table;
747         struct smu_table *tables = smu_table->tables;
748         struct smu_table *driver_table = &(smu_table->driver_table);
749
750         if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
751                 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
752                                       &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
753                                       &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
754
755         amdgpu_bo_free_kernel(&driver_table->bo,
756                               &driver_table->mc_address,
757                               &driver_table->cpu_addr);
758
759         return 0;
760 }
761
762 /**
763  * smu_alloc_memory_pool - allocate memory pool in the system memory
764  *
765  * @smu: amdgpu_device pointer
766  *
767  * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
768  * and DramLogSetDramAddr can notify it changed.
769  *
770  * Returns 0 on success, error on failure.
771  */
772 static int smu_alloc_memory_pool(struct smu_context *smu)
773 {
774         struct amdgpu_device *adev = smu->adev;
775         struct smu_table_context *smu_table = &smu->smu_table;
776         struct smu_table *memory_pool = &smu_table->memory_pool;
777         uint64_t pool_size = smu->pool_size;
778         int ret = 0;
779
780         if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
781                 return ret;
782
783         memory_pool->size = pool_size;
784         memory_pool->align = PAGE_SIZE;
785         memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
786
787         switch (pool_size) {
788         case SMU_MEMORY_POOL_SIZE_256_MB:
789         case SMU_MEMORY_POOL_SIZE_512_MB:
790         case SMU_MEMORY_POOL_SIZE_1_GB:
791         case SMU_MEMORY_POOL_SIZE_2_GB:
792                 ret = amdgpu_bo_create_kernel(adev,
793                                               memory_pool->size,
794                                               memory_pool->align,
795                                               memory_pool->domain,
796                                               &memory_pool->bo,
797                                               &memory_pool->mc_address,
798                                               &memory_pool->cpu_addr);
799                 if (ret)
800                         dev_err(adev->dev, "VRAM allocation for dramlog failed!\n");
801                 break;
802         default:
803                 break;
804         }
805
806         return ret;
807 }
808
809 static int smu_free_memory_pool(struct smu_context *smu)
810 {
811         struct smu_table_context *smu_table = &smu->smu_table;
812         struct smu_table *memory_pool = &smu_table->memory_pool;
813
814         if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
815                 return 0;
816
817         amdgpu_bo_free_kernel(&memory_pool->bo,
818                               &memory_pool->mc_address,
819                               &memory_pool->cpu_addr);
820
821         memset(memory_pool, 0, sizeof(struct smu_table));
822
823         return 0;
824 }
825
826 static int smu_alloc_dummy_read_table(struct smu_context *smu)
827 {
828         struct smu_table_context *smu_table = &smu->smu_table;
829         struct smu_table *dummy_read_1_table =
830                         &smu_table->dummy_read_1_table;
831         struct amdgpu_device *adev = smu->adev;
832         int ret = 0;
833
834         dummy_read_1_table->size = 0x40000;
835         dummy_read_1_table->align = PAGE_SIZE;
836         dummy_read_1_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
837
838         ret = amdgpu_bo_create_kernel(adev,
839                                       dummy_read_1_table->size,
840                                       dummy_read_1_table->align,
841                                       dummy_read_1_table->domain,
842                                       &dummy_read_1_table->bo,
843                                       &dummy_read_1_table->mc_address,
844                                       &dummy_read_1_table->cpu_addr);
845         if (ret)
846                 dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n");
847
848         return ret;
849 }
850
851 static void smu_free_dummy_read_table(struct smu_context *smu)
852 {
853         struct smu_table_context *smu_table = &smu->smu_table;
854         struct smu_table *dummy_read_1_table =
855                         &smu_table->dummy_read_1_table;
856
857
858         amdgpu_bo_free_kernel(&dummy_read_1_table->bo,
859                               &dummy_read_1_table->mc_address,
860                               &dummy_read_1_table->cpu_addr);
861
862         memset(dummy_read_1_table, 0, sizeof(struct smu_table));
863 }
864
865 static int smu_smc_table_sw_init(struct smu_context *smu)
866 {
867         int ret;
868
869         /**
870          * Create smu_table structure, and init smc tables such as
871          * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
872          */
873         ret = smu_init_smc_tables(smu);
874         if (ret) {
875                 dev_err(smu->adev->dev, "Failed to init smc tables!\n");
876                 return ret;
877         }
878
879         /**
880          * Create smu_power_context structure, and allocate smu_dpm_context and
881          * context size to fill the smu_power_context data.
882          */
883         ret = smu_init_power(smu);
884         if (ret) {
885                 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");
886                 return ret;
887         }
888
889         /*
890          * allocate vram bos to store smc table contents.
891          */
892         ret = smu_init_fb_allocations(smu);
893         if (ret)
894                 return ret;
895
896         ret = smu_alloc_memory_pool(smu);
897         if (ret)
898                 return ret;
899
900         ret = smu_alloc_dummy_read_table(smu);
901         if (ret)
902                 return ret;
903
904         ret = smu_i2c_init(smu, &smu->adev->pm.smu_i2c);
905         if (ret)
906                 return ret;
907
908         return 0;
909 }
910
911 static int smu_smc_table_sw_fini(struct smu_context *smu)
912 {
913         int ret;
914
915         smu_i2c_fini(smu, &smu->adev->pm.smu_i2c);
916
917         smu_free_dummy_read_table(smu);
918
919         ret = smu_free_memory_pool(smu);
920         if (ret)
921                 return ret;
922
923         ret = smu_fini_fb_allocations(smu);
924         if (ret)
925                 return ret;
926
927         ret = smu_fini_power(smu);
928         if (ret) {
929                 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");
930                 return ret;
931         }
932
933         ret = smu_fini_smc_tables(smu);
934         if (ret) {
935                 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");
936                 return ret;
937         }
938
939         return 0;
940 }
941
942 static void smu_throttling_logging_work_fn(struct work_struct *work)
943 {
944         struct smu_context *smu = container_of(work, struct smu_context,
945                                                throttling_logging_work);
946
947         smu_log_thermal_throttling(smu);
948 }
949
950 static void smu_interrupt_work_fn(struct work_struct *work)
951 {
952         struct smu_context *smu = container_of(work, struct smu_context,
953                                                interrupt_work);
954
955         mutex_lock(&smu->mutex);
956
957         if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
958                 smu->ppt_funcs->interrupt_work(smu);
959
960         mutex_unlock(&smu->mutex);
961 }
962
963 static int smu_sw_init(void *handle)
964 {
965         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
966         struct smu_context *smu = &adev->smu;
967         int ret;
968
969         smu->pool_size = adev->pm.smu_prv_buffer_size;
970         smu->smu_feature.feature_num = SMU_FEATURE_MAX;
971         mutex_init(&smu->smu_feature.mutex);
972         bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
973         bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
974         bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
975
976         mutex_init(&smu->sensor_lock);
977         mutex_init(&smu->metrics_lock);
978         mutex_init(&smu->message_lock);
979
980         INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
981         INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
982         atomic64_set(&smu->throttle_int_counter, 0);
983         smu->watermarks_bitmap = 0;
984         smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
985         smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
986
987         atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
988         atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
989         mutex_init(&smu->smu_power.power_gate.vcn_gate_lock);
990         mutex_init(&smu->smu_power.power_gate.jpeg_gate_lock);
991
992         smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
993         smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
994         smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
995         smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
996         smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
997         smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
998         smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
999         smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
1000
1001         smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1002         smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1003         smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
1004         smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
1005         smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
1006         smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
1007         smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
1008         smu->display_config = &adev->pm.pm_display_cfg;
1009
1010         smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1011         smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1012
1013         ret = smu_init_microcode(smu);
1014         if (ret) {
1015                 dev_err(adev->dev, "Failed to load smu firmware!\n");
1016                 return ret;
1017         }
1018
1019         ret = smu_smc_table_sw_init(smu);
1020         if (ret) {
1021                 dev_err(adev->dev, "Failed to sw init smc table!\n");
1022                 return ret;
1023         }
1024
1025         ret = smu_register_irq_handler(smu);
1026         if (ret) {
1027                 dev_err(adev->dev, "Failed to register smc irq handler!\n");
1028                 return ret;
1029         }
1030
1031         /* If there is no way to query fan control mode, fan control is not supported */
1032         if (!smu->ppt_funcs->get_fan_control_mode)
1033                 smu->adev->pm.no_fan = true;
1034
1035         return 0;
1036 }
1037
1038 static int smu_sw_fini(void *handle)
1039 {
1040         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1041         struct smu_context *smu = &adev->smu;
1042         int ret;
1043
1044         ret = smu_smc_table_sw_fini(smu);
1045         if (ret) {
1046                 dev_err(adev->dev, "Failed to sw fini smc table!\n");
1047                 return ret;
1048         }
1049
1050         smu_fini_microcode(smu);
1051
1052         return 0;
1053 }
1054
1055 static int smu_get_thermal_temperature_range(struct smu_context *smu)
1056 {
1057         struct amdgpu_device *adev = smu->adev;
1058         struct smu_temperature_range *range =
1059                                 &smu->thermal_range;
1060         int ret = 0;
1061
1062         if (!smu->ppt_funcs->get_thermal_temperature_range)
1063                 return 0;
1064
1065         ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);
1066         if (ret)
1067                 return ret;
1068
1069         adev->pm.dpm.thermal.min_temp = range->min;
1070         adev->pm.dpm.thermal.max_temp = range->max;
1071         adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max;
1072         adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min;
1073         adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max;
1074         adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max;
1075         adev->pm.dpm.thermal.min_mem_temp = range->mem_min;
1076         adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max;
1077         adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max;
1078
1079         return ret;
1080 }
1081
1082 static int smu_smc_hw_setup(struct smu_context *smu)
1083 {
1084         struct amdgpu_device *adev = smu->adev;
1085         uint32_t pcie_gen = 0, pcie_width = 0;
1086         int ret = 0;
1087
1088         if (adev->in_suspend && smu_is_dpm_running(smu)) {
1089                 dev_info(adev->dev, "dpm has been enabled\n");
1090                 /* this is needed specifically */
1091                 if ((adev->asic_type >= CHIP_SIENNA_CICHLID) &&
1092                     (adev->asic_type <= CHIP_DIMGREY_CAVEFISH))
1093                         ret = smu_system_features_control(smu, true);
1094                 return ret;
1095         }
1096
1097         ret = smu_init_display_count(smu, 0);
1098         if (ret) {
1099                 dev_info(adev->dev, "Failed to pre-set display count as 0!\n");
1100                 return ret;
1101         }
1102
1103         ret = smu_set_driver_table_location(smu);
1104         if (ret) {
1105                 dev_err(adev->dev, "Failed to SetDriverDramAddr!\n");
1106                 return ret;
1107         }
1108
1109         /*
1110          * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1111          */
1112         ret = smu_set_tool_table_location(smu);
1113         if (ret) {
1114                 dev_err(adev->dev, "Failed to SetToolsDramAddr!\n");
1115                 return ret;
1116         }
1117
1118         /*
1119          * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1120          * pool location.
1121          */
1122         ret = smu_notify_memory_pool_location(smu);
1123         if (ret) {
1124                 dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n");
1125                 return ret;
1126         }
1127
1128         /* smu_dump_pptable(smu); */
1129         /*
1130          * Copy pptable bo in the vram to smc with SMU MSGs such as
1131          * SetDriverDramAddr and TransferTableDram2Smu.
1132          */
1133         ret = smu_write_pptable(smu);
1134         if (ret) {
1135                 dev_err(adev->dev, "Failed to transfer pptable to SMC!\n");
1136                 return ret;
1137         }
1138
1139         /* issue Run*Btc msg */
1140         ret = smu_run_btc(smu);
1141         if (ret)
1142                 return ret;
1143
1144         ret = smu_feature_set_allowed_mask(smu);
1145         if (ret) {
1146                 dev_err(adev->dev, "Failed to set driver allowed features mask!\n");
1147                 return ret;
1148         }
1149
1150         ret = smu_system_features_control(smu, true);
1151         if (ret) {
1152                 dev_err(adev->dev, "Failed to enable requested dpm features!\n");
1153                 return ret;
1154         }
1155
1156         if (!smu_is_dpm_running(smu))
1157                 dev_info(adev->dev, "dpm has been disabled\n");
1158
1159         if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
1160                 pcie_gen = 3;
1161         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
1162                 pcie_gen = 2;
1163         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
1164                 pcie_gen = 1;
1165         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
1166                 pcie_gen = 0;
1167
1168         /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
1169          * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
1170          * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
1171          */
1172         if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
1173                 pcie_width = 6;
1174         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
1175                 pcie_width = 5;
1176         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
1177                 pcie_width = 4;
1178         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
1179                 pcie_width = 3;
1180         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
1181                 pcie_width = 2;
1182         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
1183                 pcie_width = 1;
1184         ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
1185         if (ret) {
1186                 dev_err(adev->dev, "Attempt to override pcie params failed!\n");
1187                 return ret;
1188         }
1189
1190         ret = smu_get_thermal_temperature_range(smu);
1191         if (ret) {
1192                 dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
1193                 return ret;
1194         }
1195
1196         ret = smu_enable_thermal_alert(smu);
1197         if (ret) {
1198                 dev_err(adev->dev, "Failed to enable thermal alert!\n");
1199                 return ret;
1200         }
1201
1202         /*
1203          * Set initialized values (get from vbios) to dpm tables context such as
1204          * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1205          * type of clks.
1206          */
1207         ret = smu_set_default_dpm_table(smu);
1208         if (ret) {
1209                 dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
1210                 return ret;
1211         }
1212
1213         ret = smu_notify_display_change(smu);
1214         if (ret)
1215                 return ret;
1216
1217         /*
1218          * Set min deep sleep dce fclk with bootup value from vbios via
1219          * SetMinDeepSleepDcefclk MSG.
1220          */
1221         ret = smu_set_min_dcef_deep_sleep(smu,
1222                                           smu->smu_table.boot_values.dcefclk / 100);
1223         if (ret)
1224                 return ret;
1225
1226         return ret;
1227 }
1228
1229 static int smu_start_smc_engine(struct smu_context *smu)
1230 {
1231         struct amdgpu_device *adev = smu->adev;
1232         int ret = 0;
1233
1234         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1235                 if (adev->asic_type < CHIP_NAVI10) {
1236                         if (smu->ppt_funcs->load_microcode) {
1237                                 ret = smu->ppt_funcs->load_microcode(smu);
1238                                 if (ret)
1239                                         return ret;
1240                         }
1241                 }
1242         }
1243
1244         if (smu->ppt_funcs->check_fw_status) {
1245                 ret = smu->ppt_funcs->check_fw_status(smu);
1246                 if (ret) {
1247                         dev_err(adev->dev, "SMC is not ready\n");
1248                         return ret;
1249                 }
1250         }
1251
1252         /*
1253          * Send msg GetDriverIfVersion to check if the return value is equal
1254          * with DRIVER_IF_VERSION of smc header.
1255          */
1256         ret = smu_check_fw_version(smu);
1257         if (ret)
1258                 return ret;
1259
1260         return ret;
1261 }
1262
1263 static int smu_hw_init(void *handle)
1264 {
1265         int ret;
1266         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1267         struct smu_context *smu = &adev->smu;
1268
1269         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
1270                 smu->pm_enabled = false;
1271                 return 0;
1272         }
1273
1274         ret = smu_start_smc_engine(smu);
1275         if (ret) {
1276                 dev_err(adev->dev, "SMC engine is not correctly up!\n");
1277                 return ret;
1278         }
1279
1280         if (smu->is_apu) {
1281                 smu_powergate_sdma(&adev->smu, false);
1282                 smu_dpm_set_vcn_enable(smu, true);
1283                 smu_dpm_set_jpeg_enable(smu, true);
1284                 smu_set_gfx_cgpg(&adev->smu, true);
1285         }
1286
1287         if (!smu->pm_enabled)
1288                 return 0;
1289
1290         /* get boot_values from vbios to set revision, gfxclk, and etc. */
1291         ret = smu_get_vbios_bootup_values(smu);
1292         if (ret) {
1293                 dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n");
1294                 return ret;
1295         }
1296
1297         ret = smu_setup_pptable(smu);
1298         if (ret) {
1299                 dev_err(adev->dev, "Failed to setup pptable!\n");
1300                 return ret;
1301         }
1302
1303         ret = smu_get_driver_allowed_feature_mask(smu);
1304         if (ret)
1305                 return ret;
1306
1307         ret = smu_smc_hw_setup(smu);
1308         if (ret) {
1309                 dev_err(adev->dev, "Failed to setup smc hw!\n");
1310                 return ret;
1311         }
1312
1313         /*
1314          * Move maximum sustainable clock retrieving here considering
1315          * 1. It is not needed on resume(from S3).
1316          * 2. DAL settings come between .hw_init and .late_init of SMU.
1317          *    And DAL needs to know the maximum sustainable clocks. Thus
1318          *    it cannot be put in .late_init().
1319          */
1320         ret = smu_init_max_sustainable_clocks(smu);
1321         if (ret) {
1322                 dev_err(adev->dev, "Failed to init max sustainable clocks!\n");
1323                 return ret;
1324         }
1325
1326         adev->pm.dpm_enabled = true;
1327
1328         dev_info(adev->dev, "SMU is initialized successfully!\n");
1329
1330         return 0;
1331 }
1332
1333 static int smu_disable_dpms(struct smu_context *smu)
1334 {
1335         struct amdgpu_device *adev = smu->adev;
1336         int ret = 0;
1337         bool use_baco = !smu->is_apu &&
1338                 ((amdgpu_in_reset(adev) &&
1339                   (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1340                  ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
1341
1342         /*
1343          * For custom pptable uploading, skip the DPM features
1344          * disable process on Navi1x ASICs.
1345          *   - As the gfx related features are under control of
1346          *     RLC on those ASICs. RLC reinitialization will be
1347          *     needed to reenable them. That will cost much more
1348          *     efforts.
1349          *
1350          *   - SMU firmware can handle the DPM reenablement
1351          *     properly.
1352          */
1353         if (smu->uploading_custom_pp_table &&
1354             (adev->asic_type >= CHIP_NAVI10) &&
1355             (adev->asic_type <= CHIP_DIMGREY_CAVEFISH))
1356                 return 0;
1357
1358         /*
1359          * For Sienna_Cichlid, PMFW will handle the features disablement properly
1360          * on BACO in. Driver involvement is unnecessary.
1361          */
1362         if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
1363              use_baco)
1364                 return 0;
1365
1366         /*
1367          * For gpu reset, runpm and hibernation through BACO,
1368          * BACO feature has to be kept enabled.
1369          */
1370         if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
1371                 ret = smu_disable_all_features_with_exception(smu,
1372                                                               SMU_FEATURE_BACO_BIT);
1373                 if (ret)
1374                         dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
1375         } else {
1376                 ret = smu_system_features_control(smu, false);
1377                 if (ret)
1378                         dev_err(adev->dev, "Failed to disable smu features.\n");
1379         }
1380
1381         if (adev->asic_type >= CHIP_NAVI10 &&
1382             adev->gfx.rlc.funcs->stop)
1383                 adev->gfx.rlc.funcs->stop(adev);
1384
1385         return ret;
1386 }
1387
1388 static int smu_smc_hw_cleanup(struct smu_context *smu)
1389 {
1390         struct amdgpu_device *adev = smu->adev;
1391         int ret = 0;
1392
1393         cancel_work_sync(&smu->throttling_logging_work);
1394         cancel_work_sync(&smu->interrupt_work);
1395
1396         ret = smu_disable_thermal_alert(smu);
1397         if (ret) {
1398                 dev_err(adev->dev, "Fail to disable thermal alert!\n");
1399                 return ret;
1400         }
1401
1402         ret = smu_disable_dpms(smu);
1403         if (ret) {
1404                 dev_err(adev->dev, "Fail to disable dpm features!\n");
1405                 return ret;
1406         }
1407
1408         return 0;
1409 }
1410
1411 static int smu_hw_fini(void *handle)
1412 {
1413         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1414         struct smu_context *smu = &adev->smu;
1415
1416         if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1417                 return 0;
1418
1419         if (smu->is_apu) {
1420                 smu_powergate_sdma(&adev->smu, true);
1421                 smu_dpm_set_vcn_enable(smu, false);
1422                 smu_dpm_set_jpeg_enable(smu, false);
1423         }
1424
1425         if (!smu->pm_enabled)
1426                 return 0;
1427
1428         adev->pm.dpm_enabled = false;
1429
1430         return smu_smc_hw_cleanup(smu);
1431 }
1432
1433 int smu_reset(struct smu_context *smu)
1434 {
1435         struct amdgpu_device *adev = smu->adev;
1436         int ret;
1437
1438         amdgpu_gfx_off_ctrl(smu->adev, false);
1439
1440         ret = smu_hw_fini(adev);
1441         if (ret)
1442                 return ret;
1443
1444         ret = smu_hw_init(adev);
1445         if (ret)
1446                 return ret;
1447
1448         ret = smu_late_init(adev);
1449         if (ret)
1450                 return ret;
1451
1452         amdgpu_gfx_off_ctrl(smu->adev, true);
1453
1454         return 0;
1455 }
1456
1457 static int smu_suspend(void *handle)
1458 {
1459         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1460         struct smu_context *smu = &adev->smu;
1461         int ret;
1462
1463         if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1464                 return 0;
1465
1466         if (!smu->pm_enabled)
1467                 return 0;
1468
1469         adev->pm.dpm_enabled = false;
1470
1471         ret = smu_smc_hw_cleanup(smu);
1472         if (ret)
1473                 return ret;
1474
1475         smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1476
1477         /* skip CGPG when in S0ix */
1478         if (smu->is_apu && !adev->in_s0ix)
1479                 smu_set_gfx_cgpg(&adev->smu, false);
1480
1481         return 0;
1482 }
1483
1484 static int smu_resume(void *handle)
1485 {
1486         int ret;
1487         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1488         struct smu_context *smu = &adev->smu;
1489
1490         if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1491                 return 0;
1492
1493         if (!smu->pm_enabled)
1494                 return 0;
1495
1496         dev_info(adev->dev, "SMU is resuming...\n");
1497
1498         ret = smu_start_smc_engine(smu);
1499         if (ret) {
1500                 dev_err(adev->dev, "SMC engine is not correctly up!\n");
1501                 return ret;
1502         }
1503
1504         ret = smu_smc_hw_setup(smu);
1505         if (ret) {
1506                 dev_err(adev->dev, "Failed to setup smc hw!\n");
1507                 return ret;
1508         }
1509
1510         if (smu->is_apu)
1511                 smu_set_gfx_cgpg(&adev->smu, true);
1512
1513         smu->disable_uclk_switch = 0;
1514
1515         adev->pm.dpm_enabled = true;
1516
1517         dev_info(adev->dev, "SMU is resumed successfully!\n");
1518
1519         return 0;
1520 }
1521
1522 int smu_display_configuration_change(void *handle,
1523                                      const struct amd_pp_display_configuration *display_config)
1524 {
1525         struct smu_context *smu = handle;
1526         int index = 0;
1527         int num_of_active_display = 0;
1528
1529         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1530                 return -EOPNOTSUPP;
1531
1532         if (!display_config)
1533                 return -EINVAL;
1534
1535         mutex_lock(&smu->mutex);
1536
1537         smu_set_min_dcef_deep_sleep(smu,
1538                                     display_config->min_dcef_deep_sleep_set_clk / 100);
1539
1540         for (index = 0; index < display_config->num_path_including_non_display; index++) {
1541                 if (display_config->displays[index].controller_id != 0)
1542                         num_of_active_display++;
1543         }
1544
1545         mutex_unlock(&smu->mutex);
1546
1547         return 0;
1548 }
1549
1550 static int smu_set_clockgating_state(void *handle,
1551                                      enum amd_clockgating_state state)
1552 {
1553         return 0;
1554 }
1555
1556 static int smu_set_powergating_state(void *handle,
1557                                      enum amd_powergating_state state)
1558 {
1559         return 0;
1560 }
1561
1562 static int smu_enable_umd_pstate(void *handle,
1563                       enum amd_dpm_forced_level *level)
1564 {
1565         uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1566                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1567                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1568                                         AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1569
1570         struct smu_context *smu = (struct smu_context*)(handle);
1571         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1572
1573         if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1574                 return -EINVAL;
1575
1576         if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1577                 /* enter umd pstate, save current level, disable gfx cg*/
1578                 if (*level & profile_mode_mask) {
1579                         smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1580                         smu_dpm_ctx->enable_umd_pstate = true;
1581                         smu_gpo_control(smu, false);
1582                         amdgpu_device_ip_set_powergating_state(smu->adev,
1583                                                                AMD_IP_BLOCK_TYPE_GFX,
1584                                                                AMD_PG_STATE_UNGATE);
1585                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1586                                                                AMD_IP_BLOCK_TYPE_GFX,
1587                                                                AMD_CG_STATE_UNGATE);
1588                         smu_gfx_ulv_control(smu, false);
1589                         smu_deep_sleep_control(smu, false);
1590                         amdgpu_asic_update_umd_stable_pstate(smu->adev, true);
1591                 }
1592         } else {
1593                 /* exit umd pstate, restore level, enable gfx cg*/
1594                 if (!(*level & profile_mode_mask)) {
1595                         if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1596                                 *level = smu_dpm_ctx->saved_dpm_level;
1597                         smu_dpm_ctx->enable_umd_pstate = false;
1598                         amdgpu_asic_update_umd_stable_pstate(smu->adev, false);
1599                         smu_deep_sleep_control(smu, true);
1600                         smu_gfx_ulv_control(smu, true);
1601                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1602                                                                AMD_IP_BLOCK_TYPE_GFX,
1603                                                                AMD_CG_STATE_GATE);
1604                         amdgpu_device_ip_set_powergating_state(smu->adev,
1605                                                                AMD_IP_BLOCK_TYPE_GFX,
1606                                                                AMD_PG_STATE_GATE);
1607                         smu_gpo_control(smu, true);
1608                 }
1609         }
1610
1611         return 0;
1612 }
1613
1614 static int smu_bump_power_profile_mode(struct smu_context *smu,
1615                                            long *param,
1616                                            uint32_t param_size)
1617 {
1618         int ret = 0;
1619
1620         if (smu->ppt_funcs->set_power_profile_mode)
1621                 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
1622
1623         return ret;
1624 }
1625
1626 static int smu_adjust_power_state_dynamic(struct smu_context *smu,
1627                                    enum amd_dpm_forced_level level,
1628                                    bool skip_display_settings)
1629 {
1630         int ret = 0;
1631         int index = 0;
1632         long workload;
1633         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1634
1635         if (!skip_display_settings) {
1636                 ret = smu_display_config_changed(smu);
1637                 if (ret) {
1638                         dev_err(smu->adev->dev, "Failed to change display config!");
1639                         return ret;
1640                 }
1641         }
1642
1643         ret = smu_apply_clocks_adjust_rules(smu);
1644         if (ret) {
1645                 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");
1646                 return ret;
1647         }
1648
1649         if (!skip_display_settings) {
1650                 ret = smu_notify_smc_display_config(smu);
1651                 if (ret) {
1652                         dev_err(smu->adev->dev, "Failed to notify smc display config!");
1653                         return ret;
1654                 }
1655         }
1656
1657         if (smu_dpm_ctx->dpm_level != level) {
1658                 ret = smu_asic_set_performance_level(smu, level);
1659                 if (ret) {
1660                         dev_err(smu->adev->dev, "Failed to set performance level!");
1661                         return ret;
1662                 }
1663
1664                 /* update the saved copy */
1665                 smu_dpm_ctx->dpm_level = level;
1666         }
1667
1668         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
1669                 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
1670                 index = fls(smu->workload_mask);
1671                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1672                 workload = smu->workload_setting[index];
1673
1674                 if (smu->power_profile_mode != workload)
1675                         smu_bump_power_profile_mode(smu, &workload, 0);
1676         }
1677
1678         return ret;
1679 }
1680
1681 int smu_handle_task(struct smu_context *smu,
1682                     enum amd_dpm_forced_level level,
1683                     enum amd_pp_task task_id,
1684                     bool lock_needed)
1685 {
1686         int ret = 0;
1687
1688         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1689                 return -EOPNOTSUPP;
1690
1691         if (lock_needed)
1692                 mutex_lock(&smu->mutex);
1693
1694         switch (task_id) {
1695         case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1696                 ret = smu_pre_display_config_changed(smu);
1697                 if (ret)
1698                         goto out;
1699                 ret = smu_adjust_power_state_dynamic(smu, level, false);
1700                 break;
1701         case AMD_PP_TASK_COMPLETE_INIT:
1702         case AMD_PP_TASK_READJUST_POWER_STATE:
1703                 ret = smu_adjust_power_state_dynamic(smu, level, true);
1704                 break;
1705         default:
1706                 break;
1707         }
1708
1709 out:
1710         if (lock_needed)
1711                 mutex_unlock(&smu->mutex);
1712
1713         return ret;
1714 }
1715
1716 int smu_handle_dpm_task(void *handle,
1717                         enum amd_pp_task task_id,
1718                         enum amd_pm_state_type *user_state)
1719 {
1720         struct smu_context *smu = handle;
1721         struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1722
1723         return smu_handle_task(smu, smu_dpm->dpm_level, task_id, true);
1724
1725 }
1726
1727
1728 int smu_switch_power_profile(void *handle,
1729                              enum PP_SMC_POWER_PROFILE type,
1730                              bool en)
1731 {
1732         struct smu_context *smu = handle;
1733         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1734         long workload;
1735         uint32_t index;
1736
1737         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1738                 return -EOPNOTSUPP;
1739
1740         if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1741                 return -EINVAL;
1742
1743         mutex_lock(&smu->mutex);
1744
1745         if (!en) {
1746                 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1747                 index = fls(smu->workload_mask);
1748                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1749                 workload = smu->workload_setting[index];
1750         } else {
1751                 smu->workload_mask |= (1 << smu->workload_prority[type]);
1752                 index = fls(smu->workload_mask);
1753                 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1754                 workload = smu->workload_setting[index];
1755         }
1756
1757         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
1758                 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
1759                 smu_bump_power_profile_mode(smu, &workload, 0);
1760
1761         mutex_unlock(&smu->mutex);
1762
1763         return 0;
1764 }
1765
1766 enum amd_dpm_forced_level smu_get_performance_level(void *handle)
1767 {
1768         struct smu_context *smu = handle;
1769         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1770         enum amd_dpm_forced_level level;
1771
1772         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1773                 return -EOPNOTSUPP;
1774
1775         if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1776                 return -EINVAL;
1777
1778         mutex_lock(&(smu->mutex));
1779         level = smu_dpm_ctx->dpm_level;
1780         mutex_unlock(&(smu->mutex));
1781
1782         return level;
1783 }
1784
1785 int smu_force_performance_level(void *handle, enum amd_dpm_forced_level level)
1786 {
1787         struct smu_context *smu = handle;
1788         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1789         int ret = 0;
1790
1791         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1792                 return -EOPNOTSUPP;
1793
1794         if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1795                 return -EINVAL;
1796
1797         mutex_lock(&smu->mutex);
1798
1799         ret = smu_enable_umd_pstate(smu, &level);
1800         if (ret) {
1801                 mutex_unlock(&smu->mutex);
1802                 return ret;
1803         }
1804
1805         ret = smu_handle_task(smu, level,
1806                               AMD_PP_TASK_READJUST_POWER_STATE,
1807                               false);
1808
1809         mutex_unlock(&smu->mutex);
1810
1811         /* reset user dpm clock state */
1812         if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1813                 memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask));
1814                 smu->user_dpm_profile.clk_dependency = 0;
1815         }
1816
1817         return ret;
1818 }
1819
1820 int smu_set_display_count(void *handle, uint32_t count)
1821 {
1822         struct smu_context *smu = handle;
1823         int ret = 0;
1824
1825         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1826                 return -EOPNOTSUPP;
1827
1828         mutex_lock(&smu->mutex);
1829         ret = smu_init_display_count(smu, count);
1830         mutex_unlock(&smu->mutex);
1831
1832         return ret;
1833 }
1834
1835 static int smu_force_smuclk_levels(struct smu_context *smu,
1836                          enum smu_clk_type clk_type,
1837                          uint32_t mask)
1838 {
1839         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1840         int ret = 0;
1841
1842         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1843                 return -EOPNOTSUPP;
1844
1845         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1846                 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
1847                 return -EINVAL;
1848         }
1849
1850         mutex_lock(&smu->mutex);
1851
1852         if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
1853                 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
1854                 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
1855                         smu->user_dpm_profile.clk_mask[clk_type] = mask;
1856                         smu_set_user_clk_dependencies(smu, clk_type);
1857                 }
1858         }
1859
1860         mutex_unlock(&smu->mutex);
1861
1862         return ret;
1863 }
1864
1865 int smu_force_ppclk_levels(void *handle, enum pp_clock_type type, uint32_t mask)
1866 {
1867         struct smu_context *smu = handle;
1868         enum smu_clk_type clk_type;
1869
1870         switch (type) {
1871         case PP_SCLK:
1872                 clk_type = SMU_SCLK; break;
1873         case PP_MCLK:
1874                 clk_type = SMU_MCLK; break;
1875         case PP_PCIE:
1876                 clk_type = SMU_PCIE; break;
1877         case PP_SOCCLK:
1878                 clk_type = SMU_SOCCLK; break;
1879         case PP_FCLK:
1880                 clk_type = SMU_FCLK; break;
1881         case PP_DCEFCLK:
1882                 clk_type = SMU_DCEFCLK; break;
1883         case PP_VCLK:
1884                 clk_type = SMU_VCLK; break;
1885         case PP_DCLK:
1886                 clk_type = SMU_DCLK; break;
1887         case OD_SCLK:
1888                 clk_type = SMU_OD_SCLK; break;
1889         case OD_MCLK:
1890                 clk_type = SMU_OD_MCLK; break;
1891         case OD_VDDC_CURVE:
1892                 clk_type = SMU_OD_VDDC_CURVE; break;
1893         case OD_RANGE:
1894                 clk_type = SMU_OD_RANGE; break;
1895         default:
1896                 return -EINVAL;
1897         }
1898
1899         return smu_force_smuclk_levels(smu, clk_type, mask);
1900 }
1901
1902 /*
1903  * On system suspending or resetting, the dpm_enabled
1904  * flag will be cleared. So that those SMU services which
1905  * are not supported will be gated.
1906  * However, the mp1 state setting should still be granted
1907  * even if the dpm_enabled cleared.
1908  */
1909 int smu_set_mp1_state(void *handle,
1910                       enum pp_mp1_state mp1_state)
1911 {
1912         struct smu_context *smu = handle;
1913         int ret = 0;
1914
1915         if (!smu->pm_enabled)
1916                 return -EOPNOTSUPP;
1917
1918         mutex_lock(&smu->mutex);
1919
1920         if (smu->ppt_funcs &&
1921             smu->ppt_funcs->set_mp1_state)
1922                 ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
1923
1924         mutex_unlock(&smu->mutex);
1925
1926         return ret;
1927 }
1928
1929 int smu_set_df_cstate(void *handle,
1930                       enum pp_df_cstate state)
1931 {
1932         struct smu_context *smu = handle;
1933         int ret = 0;
1934
1935         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1936                 return -EOPNOTSUPP;
1937
1938         if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
1939                 return 0;
1940
1941         mutex_lock(&smu->mutex);
1942
1943         ret = smu->ppt_funcs->set_df_cstate(smu, state);
1944         if (ret)
1945                 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
1946
1947         mutex_unlock(&smu->mutex);
1948
1949         return ret;
1950 }
1951
1952 int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
1953 {
1954         int ret = 0;
1955
1956         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1957                 return -EOPNOTSUPP;
1958
1959         if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
1960                 return 0;
1961
1962         mutex_lock(&smu->mutex);
1963
1964         ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
1965         if (ret)
1966                 dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n");
1967
1968         mutex_unlock(&smu->mutex);
1969
1970         return ret;
1971 }
1972
1973 int smu_write_watermarks_table(struct smu_context *smu)
1974 {
1975         int ret = 0;
1976
1977         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1978                 return -EOPNOTSUPP;
1979
1980         mutex_lock(&smu->mutex);
1981
1982         ret = smu_set_watermarks_table(smu, NULL);
1983
1984         mutex_unlock(&smu->mutex);
1985
1986         return ret;
1987 }
1988
1989 int smu_set_watermarks_for_clock_ranges(void *handle,
1990                                         struct pp_smu_wm_range_sets *clock_ranges)
1991 {
1992         struct smu_context *smu = handle;
1993         int ret = 0;
1994
1995         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1996                 return -EOPNOTSUPP;
1997
1998         if (smu->disable_watermark)
1999                 return 0;
2000
2001         mutex_lock(&smu->mutex);
2002
2003         ret = smu_set_watermarks_table(smu, clock_ranges);
2004
2005         mutex_unlock(&smu->mutex);
2006
2007         return ret;
2008 }
2009
2010 int smu_set_ac_dc(struct smu_context *smu)
2011 {
2012         int ret = 0;
2013
2014         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2015                 return -EOPNOTSUPP;
2016
2017         /* controlled by firmware */
2018         if (smu->dc_controlled_by_gpio)
2019                 return 0;
2020
2021         mutex_lock(&smu->mutex);
2022         ret = smu_set_power_source(smu,
2023                                    smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
2024                                    SMU_POWER_SOURCE_DC);
2025         if (ret)
2026                 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
2027                        smu->adev->pm.ac_power ? "AC" : "DC");
2028         mutex_unlock(&smu->mutex);
2029
2030         return ret;
2031 }
2032
2033 const struct amd_ip_funcs smu_ip_funcs = {
2034         .name = "smu",
2035         .early_init = smu_early_init,
2036         .late_init = smu_late_init,
2037         .sw_init = smu_sw_init,
2038         .sw_fini = smu_sw_fini,
2039         .hw_init = smu_hw_init,
2040         .hw_fini = smu_hw_fini,
2041         .suspend = smu_suspend,
2042         .resume = smu_resume,
2043         .is_idle = NULL,
2044         .check_soft_reset = NULL,
2045         .wait_for_idle = NULL,
2046         .soft_reset = NULL,
2047         .set_clockgating_state = smu_set_clockgating_state,
2048         .set_powergating_state = smu_set_powergating_state,
2049         .enable_umd_pstate = smu_enable_umd_pstate,
2050 };
2051
2052 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
2053 {
2054         .type = AMD_IP_BLOCK_TYPE_SMC,
2055         .major = 11,
2056         .minor = 0,
2057         .rev = 0,
2058         .funcs = &smu_ip_funcs,
2059 };
2060
2061 const struct amdgpu_ip_block_version smu_v12_0_ip_block =
2062 {
2063         .type = AMD_IP_BLOCK_TYPE_SMC,
2064         .major = 12,
2065         .minor = 0,
2066         .rev = 0,
2067         .funcs = &smu_ip_funcs,
2068 };
2069
2070 const struct amdgpu_ip_block_version smu_v13_0_ip_block =
2071 {
2072         .type = AMD_IP_BLOCK_TYPE_SMC,
2073         .major = 13,
2074         .minor = 0,
2075         .rev = 0,
2076         .funcs = &smu_ip_funcs,
2077 };
2078
2079 int smu_load_microcode(struct smu_context *smu)
2080 {
2081         int ret = 0;
2082
2083         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2084                 return -EOPNOTSUPP;
2085
2086         mutex_lock(&smu->mutex);
2087
2088         if (smu->ppt_funcs->load_microcode)
2089                 ret = smu->ppt_funcs->load_microcode(smu);
2090
2091         mutex_unlock(&smu->mutex);
2092
2093         return ret;
2094 }
2095
2096 int smu_check_fw_status(struct smu_context *smu)
2097 {
2098         int ret = 0;
2099
2100         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2101                 return -EOPNOTSUPP;
2102
2103         mutex_lock(&smu->mutex);
2104
2105         if (smu->ppt_funcs->check_fw_status)
2106                 ret = smu->ppt_funcs->check_fw_status(smu);
2107
2108         mutex_unlock(&smu->mutex);
2109
2110         return ret;
2111 }
2112
2113 int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2114 {
2115         int ret = 0;
2116
2117         mutex_lock(&smu->mutex);
2118
2119         if (smu->ppt_funcs->set_gfx_cgpg)
2120                 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2121
2122         mutex_unlock(&smu->mutex);
2123
2124         return ret;
2125 }
2126
2127 int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
2128 {
2129         struct smu_context *smu = handle;
2130         u32 percent;
2131         int ret = 0;
2132
2133         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2134                 return -EOPNOTSUPP;
2135
2136         mutex_lock(&smu->mutex);
2137
2138         if (smu->ppt_funcs->set_fan_speed_percent) {
2139                 percent = speed * 100 / smu->fan_max_rpm;
2140                 ret = smu->ppt_funcs->set_fan_speed_percent(smu, percent);
2141                 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
2142                         smu->user_dpm_profile.fan_speed_percent = percent;
2143         }
2144
2145         mutex_unlock(&smu->mutex);
2146
2147         return ret;
2148 }
2149
2150 int smu_get_power_limit(struct smu_context *smu,
2151                         uint32_t *limit,
2152                         enum smu_ppt_limit_level limit_level)
2153 {
2154         uint32_t limit_type = *limit >> 24;
2155         int ret = 0;
2156
2157         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2158                 return -EOPNOTSUPP;
2159
2160         mutex_lock(&smu->mutex);
2161
2162         if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
2163                 if (smu->ppt_funcs->get_ppt_limit)
2164                         ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
2165         } else {
2166                 switch (limit_level) {
2167                 case SMU_PPT_LIMIT_CURRENT:
2168                         *limit = smu->current_power_limit;
2169                         break;
2170                 case SMU_PPT_LIMIT_DEFAULT:
2171                         *limit = smu->default_power_limit;
2172                         break;
2173                 case SMU_PPT_LIMIT_MAX:
2174                         *limit = smu->max_power_limit;
2175                         break;
2176                 default:
2177                         break;
2178                 }
2179         }
2180
2181         mutex_unlock(&smu->mutex);
2182
2183         return ret;
2184 }
2185
2186 int smu_set_power_limit(void *handle, uint32_t limit)
2187 {
2188         struct smu_context *smu = handle;
2189         uint32_t limit_type = limit >> 24;
2190         int ret = 0;
2191
2192         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2193                 return -EOPNOTSUPP;
2194
2195         mutex_lock(&smu->mutex);
2196
2197         if (limit_type != SMU_DEFAULT_PPT_LIMIT)
2198                 if (smu->ppt_funcs->set_power_limit) {
2199                         ret = smu->ppt_funcs->set_power_limit(smu, limit);
2200                         goto out;
2201                 }
2202
2203         if (limit > smu->max_power_limit) {
2204                 dev_err(smu->adev->dev,
2205                         "New power limit (%d) is over the max allowed %d\n",
2206                         limit, smu->max_power_limit);
2207                 goto out;
2208         }
2209
2210         if (!limit)
2211                 limit = smu->current_power_limit;
2212
2213         if (smu->ppt_funcs->set_power_limit) {
2214                 ret = smu->ppt_funcs->set_power_limit(smu, limit);
2215                 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
2216                         smu->user_dpm_profile.power_limit = limit;
2217         }
2218
2219 out:
2220         mutex_unlock(&smu->mutex);
2221
2222         return ret;
2223 }
2224
2225 static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2226 {
2227         int ret = 0;
2228
2229         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2230                 return -EOPNOTSUPP;
2231
2232         mutex_lock(&smu->mutex);
2233
2234         if (smu->ppt_funcs->print_clk_levels)
2235                 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2236
2237         mutex_unlock(&smu->mutex);
2238
2239         return ret;
2240 }
2241
2242 int smu_print_ppclk_levels(void *handle, enum pp_clock_type type, char *buf)
2243 {
2244         struct smu_context *smu = handle;
2245         enum smu_clk_type clk_type;
2246
2247         switch (type) {
2248         case PP_SCLK:
2249                 clk_type = SMU_SCLK; break;
2250         case PP_MCLK:
2251                 clk_type = SMU_MCLK; break;
2252         case PP_PCIE:
2253                 clk_type = SMU_PCIE; break;
2254         case PP_SOCCLK:
2255                 clk_type = SMU_SOCCLK; break;
2256         case PP_FCLK:
2257                 clk_type = SMU_FCLK; break;
2258         case PP_DCEFCLK:
2259                 clk_type = SMU_DCEFCLK; break;
2260         case PP_VCLK:
2261                 clk_type = SMU_VCLK; break;
2262         case PP_DCLK:
2263                 clk_type = SMU_DCLK; break;
2264         case OD_SCLK:
2265                 clk_type = SMU_OD_SCLK; break;
2266         case OD_MCLK:
2267                 clk_type = SMU_OD_MCLK; break;
2268         case OD_VDDC_CURVE:
2269                 clk_type = SMU_OD_VDDC_CURVE; break;
2270         case OD_RANGE:
2271                 clk_type = SMU_OD_RANGE; break;
2272         case OD_VDDGFX_OFFSET:
2273                 clk_type = SMU_OD_VDDGFX_OFFSET; break;
2274         case OD_CCLK:
2275                 clk_type = SMU_OD_CCLK; break;
2276         default:
2277                 return -EINVAL;
2278         }
2279
2280         return smu_print_smuclk_levels(smu, clk_type, buf);
2281 }
2282
2283 int smu_od_edit_dpm_table(void *handle,
2284                           enum PP_OD_DPM_TABLE_COMMAND type,
2285                           long *input, uint32_t size)
2286 {
2287         struct smu_context *smu = handle;
2288         int ret = 0;
2289
2290         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2291                 return -EOPNOTSUPP;
2292
2293         mutex_lock(&smu->mutex);
2294
2295         if (smu->ppt_funcs->od_edit_dpm_table) {
2296                 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2297         }
2298
2299         mutex_unlock(&smu->mutex);
2300
2301         return ret;
2302 }
2303
2304 int smu_read_sensor(void *handle, int sensor, void *data, int *size_arg)
2305 {
2306         struct smu_context *smu = handle;
2307         struct smu_umd_pstate_table *pstate_table =
2308                                 &smu->pstate_table;
2309         int ret = 0;
2310         uint32_t *size, size_val;
2311
2312         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2313                 return -EOPNOTSUPP;
2314
2315         if (!data || !size_arg)
2316                 return -EINVAL;
2317
2318         size_val = *size_arg;
2319         size = &size_val;
2320
2321         mutex_lock(&smu->mutex);
2322
2323         if (smu->ppt_funcs->read_sensor)
2324                 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
2325                         goto unlock;
2326
2327         switch (sensor) {
2328         case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
2329                 *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
2330                 *size = 4;
2331                 break;
2332         case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
2333                 *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
2334                 *size = 4;
2335                 break;
2336         case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
2337                 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
2338                 *size = 8;
2339                 break;
2340         case AMDGPU_PP_SENSOR_UVD_POWER:
2341                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
2342                 *size = 4;
2343                 break;
2344         case AMDGPU_PP_SENSOR_VCE_POWER:
2345                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
2346                 *size = 4;
2347                 break;
2348         case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
2349                 *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0: 1;
2350                 *size = 4;
2351                 break;
2352         case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
2353                 *(uint32_t *)data = 0;
2354                 *size = 4;
2355                 break;
2356         default:
2357                 *size = 0;
2358                 ret = -EOPNOTSUPP;
2359                 break;
2360         }
2361
2362 unlock:
2363         mutex_unlock(&smu->mutex);
2364
2365         // assign uint32_t to int
2366         *size_arg = size_val;
2367
2368         return ret;
2369 }
2370
2371 int smu_get_power_profile_mode(void *handle, char *buf)
2372 {
2373         struct smu_context *smu = handle;
2374         int ret = 0;
2375
2376         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2377                 return -EOPNOTSUPP;
2378
2379         mutex_lock(&smu->mutex);
2380
2381         if (smu->ppt_funcs->get_power_profile_mode)
2382                 ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
2383
2384         mutex_unlock(&smu->mutex);
2385
2386         return ret;
2387 }
2388
2389 int smu_set_power_profile_mode(void *handle, long *param, uint32_t param_size)
2390 {
2391         struct smu_context *smu = handle;
2392         int ret = 0;
2393
2394         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2395                 return -EOPNOTSUPP;
2396
2397         mutex_lock(&smu->mutex);
2398
2399         smu_bump_power_profile_mode(smu, param, param_size);
2400
2401         mutex_unlock(&smu->mutex);
2402
2403         return ret;
2404 }
2405
2406
2407 u32 smu_get_fan_control_mode(void *handle)
2408 {
2409         struct smu_context *smu = handle;
2410         u32 ret = 0;
2411
2412         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2413                 return AMD_FAN_CTRL_NONE;
2414
2415         mutex_lock(&smu->mutex);
2416
2417         if (smu->ppt_funcs->get_fan_control_mode)
2418                 ret = smu->ppt_funcs->get_fan_control_mode(smu);
2419
2420         mutex_unlock(&smu->mutex);
2421
2422         return ret;
2423 }
2424
2425 int smu_set_fan_control_mode(struct smu_context *smu, int value)
2426 {
2427         int ret = 0;
2428
2429         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2430                 return  -EOPNOTSUPP;
2431
2432         mutex_lock(&smu->mutex);
2433
2434         if (smu->ppt_funcs->set_fan_control_mode) {
2435                 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
2436                 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
2437                         smu->user_dpm_profile.fan_mode = value;
2438         }
2439
2440         mutex_unlock(&smu->mutex);
2441
2442         /* reset user dpm fan speed */
2443         if (!ret && value != AMD_FAN_CTRL_MANUAL &&
2444                         !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
2445                 smu->user_dpm_profile.fan_speed_percent = 0;
2446
2447         return ret;
2448 }
2449
2450 void smu_pp_set_fan_control_mode(void *handle, u32 value) {
2451         struct smu_context *smu = handle;
2452
2453         smu_set_fan_control_mode(smu, value);
2454 }
2455
2456
2457 int smu_get_fan_speed_percent(void *handle, u32 *speed)
2458 {
2459         struct smu_context *smu = handle;
2460         int ret = 0;
2461         uint32_t percent;
2462
2463         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2464                 return -EOPNOTSUPP;
2465
2466         mutex_lock(&smu->mutex);
2467
2468         if (smu->ppt_funcs->get_fan_speed_percent) {
2469                 ret = smu->ppt_funcs->get_fan_speed_percent(smu, &percent);
2470                 if (!ret) {
2471                         *speed = percent > 100 ? 100 : percent;
2472                 }
2473         }
2474
2475         mutex_unlock(&smu->mutex);
2476
2477
2478         return ret;
2479 }
2480
2481 int smu_set_fan_speed_percent(void *handle, u32 speed)
2482 {
2483         struct smu_context *smu = handle;
2484         int ret = 0;
2485
2486         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2487                 return -EOPNOTSUPP;
2488
2489         mutex_lock(&smu->mutex);
2490
2491         if (smu->ppt_funcs->set_fan_speed_percent) {
2492                 if (speed > 100)
2493                         speed = 100;
2494                 ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
2495                 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
2496                         smu->user_dpm_profile.fan_speed_percent = speed;
2497         }
2498
2499         mutex_unlock(&smu->mutex);
2500
2501         return ret;
2502 }
2503
2504 int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
2505 {
2506         struct smu_context *smu = handle;
2507         int ret = 0;
2508         u32 percent;
2509
2510         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2511                 return -EOPNOTSUPP;
2512
2513         mutex_lock(&smu->mutex);
2514
2515         if (smu->ppt_funcs->get_fan_speed_percent) {
2516                 ret = smu->ppt_funcs->get_fan_speed_percent(smu, &percent);
2517                 *speed = percent * smu->fan_max_rpm / 100;
2518         }
2519
2520         mutex_unlock(&smu->mutex);
2521
2522         return ret;
2523 }
2524
2525 int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
2526 {
2527         struct smu_context *smu = handle;
2528         int ret = 0;
2529
2530         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2531                 return -EOPNOTSUPP;
2532
2533         mutex_lock(&smu->mutex);
2534
2535         ret = smu_set_min_dcef_deep_sleep(smu, clk);
2536
2537         mutex_unlock(&smu->mutex);
2538
2539         return ret;
2540 }
2541
2542 int smu_get_clock_by_type_with_latency(void *handle,
2543                                        enum amd_pp_clock_type type,
2544                                        struct pp_clock_levels_with_latency *clocks)
2545 {
2546         struct smu_context *smu = handle;
2547         enum smu_clk_type clk_type;
2548         int ret = 0;
2549
2550         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2551                 return -EOPNOTSUPP;
2552
2553         mutex_lock(&smu->mutex);
2554
2555         if (smu->ppt_funcs->get_clock_by_type_with_latency) {
2556                 switch (type) {
2557                 case amd_pp_sys_clock:
2558                         clk_type = SMU_GFXCLK;
2559                         break;
2560                 case amd_pp_mem_clock:
2561                         clk_type = SMU_MCLK;
2562                         break;
2563                 case amd_pp_dcef_clock:
2564                         clk_type = SMU_DCEFCLK;
2565                         break;
2566                 case amd_pp_disp_clock:
2567                         clk_type = SMU_DISPCLK;
2568                         break;
2569                 default:
2570                         dev_err(smu->adev->dev, "Invalid clock type!\n");
2571                         mutex_unlock(&smu->mutex);
2572                         return -EINVAL;
2573                 }
2574
2575                 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2576         }
2577
2578         mutex_unlock(&smu->mutex);
2579
2580         return ret;
2581 }
2582
2583 int smu_display_clock_voltage_request(void *handle,
2584                                       struct pp_display_clock_request *clock_req)
2585 {
2586         struct smu_context *smu = handle;
2587         int ret = 0;
2588
2589         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2590                 return -EOPNOTSUPP;
2591
2592         mutex_lock(&smu->mutex);
2593
2594         if (smu->ppt_funcs->display_clock_voltage_request)
2595                 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
2596
2597         mutex_unlock(&smu->mutex);
2598
2599         return ret;
2600 }
2601
2602
2603 int smu_display_disable_memory_clock_switch(void *handle,
2604                                             bool disable_memory_clock_switch)
2605 {
2606         struct smu_context *smu = handle;
2607         int ret = -EINVAL;
2608
2609         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2610                 return -EOPNOTSUPP;
2611
2612         mutex_lock(&smu->mutex);
2613
2614         if (smu->ppt_funcs->display_disable_memory_clock_switch)
2615                 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2616
2617         mutex_unlock(&smu->mutex);
2618
2619         return ret;
2620 }
2621
2622 int smu_set_xgmi_pstate(void *handle,
2623                         uint32_t pstate)
2624 {
2625         struct smu_context *smu = handle;
2626         int ret = 0;
2627
2628         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2629                 return -EOPNOTSUPP;
2630
2631         mutex_lock(&smu->mutex);
2632
2633         if (smu->ppt_funcs->set_xgmi_pstate)
2634                 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
2635
2636         mutex_unlock(&smu->mutex);
2637
2638         if(ret)
2639                 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
2640
2641         return ret;
2642 }
2643
2644 int smu_set_azalia_d3_pme(struct smu_context *smu)
2645 {
2646         int ret = 0;
2647
2648         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2649                 return -EOPNOTSUPP;
2650
2651         mutex_lock(&smu->mutex);
2652
2653         if (smu->ppt_funcs->set_azalia_d3_pme)
2654                 ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
2655
2656         mutex_unlock(&smu->mutex);
2657
2658         return ret;
2659 }
2660
2661 /*
2662  * On system suspending or resetting, the dpm_enabled
2663  * flag will be cleared. So that those SMU services which
2664  * are not supported will be gated.
2665  *
2666  * However, the baco/mode1 reset should still be granted
2667  * as they are still supported and necessary.
2668  */
2669 bool smu_baco_is_support(struct smu_context *smu)
2670 {
2671         bool ret = false;
2672
2673         if (!smu->pm_enabled)
2674                 return false;
2675
2676         mutex_lock(&smu->mutex);
2677
2678         if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
2679                 ret = smu->ppt_funcs->baco_is_support(smu);
2680
2681         mutex_unlock(&smu->mutex);
2682
2683         return ret;
2684 }
2685
2686 int smu_get_baco_capability(void *handle, bool *cap)
2687 {
2688         struct smu_context *smu = handle;
2689         int ret = 0;
2690
2691         *cap = false;
2692
2693         if (!smu->pm_enabled)
2694                 return 0;
2695
2696         mutex_lock(&smu->mutex);
2697
2698         if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
2699                 *cap = smu->ppt_funcs->baco_is_support(smu);
2700
2701         mutex_unlock(&smu->mutex);
2702
2703         return ret;
2704 }
2705
2706
2707 int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
2708 {
2709         if (smu->ppt_funcs->baco_get_state)
2710                 return -EINVAL;
2711
2712         mutex_lock(&smu->mutex);
2713         *state = smu->ppt_funcs->baco_get_state(smu);
2714         mutex_unlock(&smu->mutex);
2715
2716         return 0;
2717 }
2718
2719 int smu_baco_enter(struct smu_context *smu)
2720 {
2721         int ret = 0;
2722
2723         if (!smu->pm_enabled)
2724                 return -EOPNOTSUPP;
2725
2726         mutex_lock(&smu->mutex);
2727
2728         if (smu->ppt_funcs->baco_enter)
2729                 ret = smu->ppt_funcs->baco_enter(smu);
2730
2731         mutex_unlock(&smu->mutex);
2732
2733         if (ret)
2734                 dev_err(smu->adev->dev, "Failed to enter BACO state!\n");
2735
2736         return ret;
2737 }
2738
2739 int smu_baco_exit(struct smu_context *smu)
2740 {
2741         int ret = 0;
2742
2743         if (!smu->pm_enabled)
2744                 return -EOPNOTSUPP;
2745
2746         mutex_lock(&smu->mutex);
2747
2748         if (smu->ppt_funcs->baco_exit)
2749                 ret = smu->ppt_funcs->baco_exit(smu);
2750
2751         mutex_unlock(&smu->mutex);
2752
2753         if (ret)
2754                 dev_err(smu->adev->dev, "Failed to exit BACO state!\n");
2755
2756         return ret;
2757 }
2758
2759 int smu_baco_set_state(void *handle, int state)
2760 {
2761         struct smu_context *smu = handle;
2762         int ret = 0;
2763
2764         if (!smu->pm_enabled)
2765                 return -EOPNOTSUPP;
2766
2767         if (state == 0) {
2768                 mutex_lock(&smu->mutex);
2769
2770                 if (smu->ppt_funcs->baco_exit)
2771                         ret = smu->ppt_funcs->baco_exit(smu);
2772
2773                 mutex_unlock(&smu->mutex);
2774         } else if (state == 1) {
2775                 mutex_lock(&smu->mutex);
2776
2777                 if (smu->ppt_funcs->baco_enter)
2778                         ret = smu->ppt_funcs->baco_enter(smu);
2779
2780                 mutex_unlock(&smu->mutex);
2781
2782         } else {
2783                 return -EINVAL;
2784         }
2785
2786         if (ret)
2787                 dev_err(smu->adev->dev, "Failed to %s BACO state!\n",
2788                                 (state)?"enter":"exit");
2789
2790         return ret;
2791 }
2792
2793 bool smu_mode1_reset_is_support(struct smu_context *smu)
2794 {
2795         bool ret = false;
2796
2797         if (!smu->pm_enabled)
2798                 return false;
2799
2800         mutex_lock(&smu->mutex);
2801
2802         if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
2803                 ret = smu->ppt_funcs->mode1_reset_is_support(smu);
2804
2805         mutex_unlock(&smu->mutex);
2806
2807         return ret;
2808 }
2809
2810 bool smu_mode2_reset_is_support(struct smu_context *smu)
2811 {
2812         bool ret = false;
2813
2814         if (!smu->pm_enabled)
2815                 return false;
2816
2817         mutex_lock(&smu->mutex);
2818
2819         if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
2820                 ret = smu->ppt_funcs->mode2_reset_is_support(smu);
2821
2822         mutex_unlock(&smu->mutex);
2823
2824         return ret;
2825 }
2826
2827 int smu_mode1_reset(struct smu_context *smu)
2828 {
2829         int ret = 0;
2830
2831         if (!smu->pm_enabled)
2832                 return -EOPNOTSUPP;
2833
2834         mutex_lock(&smu->mutex);
2835
2836         if (smu->ppt_funcs->mode1_reset)
2837                 ret = smu->ppt_funcs->mode1_reset(smu);
2838
2839         mutex_unlock(&smu->mutex);
2840
2841         return ret;
2842 }
2843
2844 int smu_mode2_reset(void *handle)
2845 {
2846         struct smu_context *smu = handle;
2847         int ret = 0;
2848
2849         if (!smu->pm_enabled)
2850                 return -EOPNOTSUPP;
2851
2852         mutex_lock(&smu->mutex);
2853
2854         if (smu->ppt_funcs->mode2_reset)
2855                 ret = smu->ppt_funcs->mode2_reset(smu);
2856
2857         mutex_unlock(&smu->mutex);
2858
2859         if (ret)
2860                 dev_err(smu->adev->dev, "Mode2 reset failed!\n");
2861
2862         return ret;
2863 }
2864
2865 int smu_get_max_sustainable_clocks_by_dc(void *handle,
2866                                          struct pp_smu_nv_clock_table *max_clocks)
2867 {
2868         struct smu_context *smu = handle;
2869         int ret = 0;
2870
2871         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2872                 return -EOPNOTSUPP;
2873
2874         mutex_lock(&smu->mutex);
2875
2876         if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
2877                 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
2878
2879         mutex_unlock(&smu->mutex);
2880
2881         return ret;
2882 }
2883
2884 int smu_get_uclk_dpm_states(void *handle,
2885                             unsigned int *clock_values_in_khz,
2886                             unsigned int *num_states)
2887 {
2888         struct smu_context *smu = handle;
2889         int ret = 0;
2890
2891         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2892                 return -EOPNOTSUPP;
2893
2894         mutex_lock(&smu->mutex);
2895
2896         if (smu->ppt_funcs->get_uclk_dpm_states)
2897                 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
2898
2899         mutex_unlock(&smu->mutex);
2900
2901         return ret;
2902 }
2903
2904 enum amd_pm_state_type smu_get_current_power_state(void *handle)
2905 {
2906         struct smu_context *smu = handle;
2907         enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
2908
2909         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2910                 return -EOPNOTSUPP;
2911
2912         mutex_lock(&smu->mutex);
2913
2914         if (smu->ppt_funcs->get_current_power_state)
2915                 pm_state = smu->ppt_funcs->get_current_power_state(smu);
2916
2917         mutex_unlock(&smu->mutex);
2918
2919         return pm_state;
2920 }
2921
2922 int smu_get_dpm_clock_table(void *handle,
2923                             struct dpm_clocks *clock_table)
2924 {
2925         struct smu_context *smu = handle;
2926         int ret = 0;
2927
2928         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2929                 return -EOPNOTSUPP;
2930
2931         mutex_lock(&smu->mutex);
2932
2933         if (smu->ppt_funcs->get_dpm_clock_table)
2934                 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
2935
2936         mutex_unlock(&smu->mutex);
2937
2938         return ret;
2939 }
2940
2941 ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
2942 {
2943         struct smu_context *smu = handle;
2944         ssize_t size;
2945
2946         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2947                 return -EOPNOTSUPP;
2948
2949         if (!smu->ppt_funcs->get_gpu_metrics)
2950                 return -EOPNOTSUPP;
2951
2952         mutex_lock(&smu->mutex);
2953
2954         size = smu->ppt_funcs->get_gpu_metrics(smu, table);
2955
2956         mutex_unlock(&smu->mutex);
2957
2958         return size;
2959 }
2960
2961 int smu_enable_mgpu_fan_boost(void *handle)
2962 {
2963         struct smu_context *smu = handle;
2964         int ret = 0;
2965
2966         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2967                 return -EOPNOTSUPP;
2968
2969         mutex_lock(&smu->mutex);
2970
2971         if (smu->ppt_funcs->enable_mgpu_fan_boost)
2972                 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
2973
2974         mutex_unlock(&smu->mutex);
2975
2976         return ret;
2977 }
2978
2979 int smu_gfx_state_change_set(struct smu_context *smu, uint32_t state)
2980 {
2981         int ret = 0;
2982
2983         mutex_lock(&smu->mutex);
2984         if (smu->ppt_funcs->gfx_state_change_set)
2985                 ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
2986         mutex_unlock(&smu->mutex);
2987
2988         return ret;
2989 }
2990
2991 int smu_set_light_sbr(struct smu_context *smu, bool enable)
2992 {
2993         int ret = 0;
2994
2995         mutex_lock(&smu->mutex);
2996         if (smu->ppt_funcs->set_light_sbr)
2997                 ret = smu->ppt_funcs->set_light_sbr(smu, enable);
2998         mutex_unlock(&smu->mutex);
2999
3000         return ret;
3001 }
3002
3003
3004 static const struct amd_pm_funcs swsmu_pm_funcs = {
3005         /* export for sysfs */
3006         .set_fan_control_mode    = smu_pp_set_fan_control_mode,
3007         .get_fan_control_mode    = smu_get_fan_control_mode,
3008         .set_fan_speed_percent   = smu_set_fan_speed_percent,
3009         .get_fan_speed_percent   = smu_get_fan_speed_percent,
3010         .force_performance_level = smu_force_performance_level,
3011         .read_sensor             = smu_read_sensor,
3012         .get_performance_level   = smu_get_performance_level,
3013         .get_current_power_state = smu_get_current_power_state,
3014         .get_fan_speed_rpm       = smu_get_fan_speed_rpm,
3015         .set_fan_speed_rpm       = smu_set_fan_speed_rpm,
3016         .get_pp_num_states       = smu_get_power_num_states,
3017         .get_pp_table            = smu_sys_get_pp_table,
3018         .set_pp_table            = smu_sys_set_pp_table,
3019         .switch_power_profile    = smu_switch_power_profile,
3020         /* export to amdgpu */
3021         .dispatch_tasks          = smu_handle_dpm_task,
3022         .set_powergating_by_smu  = smu_dpm_set_power_gate,
3023         .set_power_limit         = smu_set_power_limit,
3024         .odn_edit_dpm_table      = smu_od_edit_dpm_table,
3025         .set_mp1_state           = smu_set_mp1_state,
3026         /* export to DC */
3027         .get_sclk                = smu_get_sclk,
3028         .get_mclk                = smu_get_mclk,
3029         .enable_mgpu_fan_boost   = smu_enable_mgpu_fan_boost,
3030         .get_asic_baco_capability = smu_get_baco_capability,
3031         .set_asic_baco_state     = smu_baco_set_state,
3032         .get_ppfeature_status    = smu_sys_get_pp_feature_mask,
3033         .set_ppfeature_status    = smu_sys_set_pp_feature_mask,
3034         .asic_reset_mode_2       = smu_mode2_reset,
3035         .set_df_cstate           = smu_set_df_cstate,
3036         .set_xgmi_pstate         = smu_set_xgmi_pstate,
3037         .get_gpu_metrics         = smu_sys_get_gpu_metrics,
3038         .set_power_profile_mode  = smu_set_power_profile_mode,
3039         .get_power_profile_mode  = smu_get_power_profile_mode,
3040         .force_clock_level       = smu_force_ppclk_levels,
3041         .print_clock_levels      = smu_print_ppclk_levels,
3042         .get_uclk_dpm_states     = smu_get_uclk_dpm_states,
3043         .get_dpm_clock_table     = smu_get_dpm_clock_table,
3044         .display_configuration_change        = smu_display_configuration_change,
3045         .get_clock_by_type_with_latency      = smu_get_clock_by_type_with_latency,
3046         .display_clock_voltage_request       = smu_display_clock_voltage_request,
3047         .set_active_display_count            = smu_set_display_count,
3048         .set_min_deep_sleep_dcefclk          = smu_set_deep_sleep_dcefclk,
3049         .set_watermarks_for_clock_ranges     = smu_set_watermarks_for_clock_ranges,
3050         .display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch,
3051         .get_max_sustainable_clocks_by_dc    = smu_get_max_sustainable_clocks_by_dc,
3052 };