drm/amdgpu/powerplay: add initial swSMU support for sienna_cichlid (v2)
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / powerplay / amdgpu_smu.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include <linux/firmware.h>
24 #include <linux/pci.h>
25
26 #include "amdgpu.h"
27 #include "amdgpu_smu.h"
28 #include "smu_internal.h"
29 #include "smu_v11_0.h"
30 #include "smu_v12_0.h"
31 #include "atom.h"
32 #include "arcturus_ppt.h"
33 #include "navi10_ppt.h"
34 #include "sienna_cichlid_ppt.h"
35 #include "renoir_ppt.h"
36
37 #undef __SMU_DUMMY_MAP
38 #define __SMU_DUMMY_MAP(type)   #type
39 static const char* __smu_message_names[] = {
40         SMU_MESSAGE_TYPES
41 };
42
43 const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
44 {
45         if (type < 0 || type >= SMU_MSG_MAX_COUNT)
46                 return "unknown smu message";
47         return __smu_message_names[type];
48 }
49
50 #undef __SMU_DUMMY_MAP
51 #define __SMU_DUMMY_MAP(fea)    #fea
52 static const char* __smu_feature_names[] = {
53         SMU_FEATURE_MASKS
54 };
55
56 const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature)
57 {
58         if (feature < 0 || feature >= SMU_FEATURE_COUNT)
59                 return "unknown smu feature";
60         return __smu_feature_names[feature];
61 }
62
63 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
64 {
65         size_t size = 0;
66         int ret = 0, i = 0;
67         uint32_t feature_mask[2] = { 0 };
68         int32_t feature_index = 0;
69         uint32_t count = 0;
70         uint32_t sort_feature[SMU_FEATURE_COUNT];
71         uint64_t hw_feature_count = 0;
72
73         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
74                 return -EOPNOTSUPP;
75
76         mutex_lock(&smu->mutex);
77
78         ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
79         if (ret)
80                 goto failed;
81
82         size =  sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
83                         feature_mask[1], feature_mask[0]);
84
85         for (i = 0; i < SMU_FEATURE_COUNT; i++) {
86                 feature_index = smu_feature_get_index(smu, i);
87                 if (feature_index < 0)
88                         continue;
89                 sort_feature[feature_index] = i;
90                 hw_feature_count++;
91         }
92
93         for (i = 0; i < hw_feature_count; i++) {
94                 size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
95                                count++,
96                                smu_get_feature_name(smu, sort_feature[i]),
97                                i,
98                                !!smu_feature_is_enabled(smu, sort_feature[i]) ?
99                                "enabled" : "disabled");
100         }
101
102 failed:
103         mutex_unlock(&smu->mutex);
104
105         return size;
106 }
107
108 static int smu_feature_update_enable_state(struct smu_context *smu,
109                                            uint64_t feature_mask,
110                                            bool enabled)
111 {
112         struct smu_feature *feature = &smu->smu_feature;
113         uint32_t feature_low = 0, feature_high = 0;
114         int ret = 0;
115
116         feature_low = (feature_mask >> 0 ) & 0xffffffff;
117         feature_high = (feature_mask >> 32) & 0xffffffff;
118
119         if (enabled) {
120                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
121                                                   feature_low, NULL);
122                 if (ret)
123                         return ret;
124                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
125                                                   feature_high, NULL);
126                 if (ret)
127                         return ret;
128         } else {
129                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
130                                                   feature_low, NULL);
131                 if (ret)
132                         return ret;
133                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
134                                                   feature_high, NULL);
135                 if (ret)
136                         return ret;
137         }
138
139         mutex_lock(&feature->mutex);
140         if (enabled)
141                 bitmap_or(feature->enabled, feature->enabled,
142                                 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
143         else
144                 bitmap_andnot(feature->enabled, feature->enabled,
145                                 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
146         mutex_unlock(&feature->mutex);
147
148         return ret;
149 }
150
151 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
152 {
153         int ret = 0;
154         uint32_t feature_mask[2] = { 0 };
155         uint64_t feature_2_enabled = 0;
156         uint64_t feature_2_disabled = 0;
157         uint64_t feature_enables = 0;
158
159         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
160                 return -EOPNOTSUPP;
161
162         mutex_lock(&smu->mutex);
163
164         ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
165         if (ret)
166                 goto out;
167
168         feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
169
170         feature_2_enabled  = ~feature_enables & new_mask;
171         feature_2_disabled = feature_enables & ~new_mask;
172
173         if (feature_2_enabled) {
174                 ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
175                 if (ret)
176                         goto out;
177         }
178         if (feature_2_disabled) {
179                 ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
180                 if (ret)
181                         goto out;
182         }
183
184 out:
185         mutex_unlock(&smu->mutex);
186
187         return ret;
188 }
189
190 int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
191 {
192         int ret = 0;
193
194         if (!if_version && !smu_version)
195                 return -EINVAL;
196
197         if (smu->smc_fw_if_version && smu->smc_fw_version)
198         {
199                 if (if_version)
200                         *if_version = smu->smc_fw_if_version;
201
202                 if (smu_version)
203                         *smu_version = smu->smc_fw_version;
204
205                 return 0;
206         }
207
208         if (if_version) {
209                 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
210                 if (ret)
211                         return ret;
212
213                 smu->smc_fw_if_version = *if_version;
214         }
215
216         if (smu_version) {
217                 ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
218                 if (ret)
219                         return ret;
220
221                 smu->smc_fw_version = *smu_version;
222         }
223
224         return ret;
225 }
226
227 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
228                             uint32_t min, uint32_t max, bool lock_needed)
229 {
230         int ret = 0;
231
232         if (!smu_clk_dpm_is_enabled(smu, clk_type))
233                 return 0;
234
235         if (lock_needed)
236                 mutex_lock(&smu->mutex);
237         ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
238         if (lock_needed)
239                 mutex_unlock(&smu->mutex);
240
241         return ret;
242 }
243
244 int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
245                             uint32_t min, uint32_t max)
246 {
247         int ret = 0, clk_id = 0;
248         uint32_t param;
249
250         if (min <= 0 && max <= 0)
251                 return -EINVAL;
252
253         if (!smu_clk_dpm_is_enabled(smu, clk_type))
254                 return 0;
255
256         clk_id = smu_clk_get_index(smu, clk_type);
257         if (clk_id < 0)
258                 return clk_id;
259
260         if (max > 0) {
261                 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
262                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
263                                                   param, NULL);
264                 if (ret)
265                         return ret;
266         }
267
268         if (min > 0) {
269                 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
270                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
271                                                   param, NULL);
272                 if (ret)
273                         return ret;
274         }
275
276
277         return ret;
278 }
279
280 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
281                            uint32_t *min, uint32_t *max, bool lock_needed)
282 {
283         uint32_t clock_limit;
284         int ret = 0;
285
286         if (!min && !max)
287                 return -EINVAL;
288
289         if (lock_needed)
290                 mutex_lock(&smu->mutex);
291
292         if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
293                 switch (clk_type) {
294                 case SMU_MCLK:
295                 case SMU_UCLK:
296                         clock_limit = smu->smu_table.boot_values.uclk;
297                         break;
298                 case SMU_GFXCLK:
299                 case SMU_SCLK:
300                         clock_limit = smu->smu_table.boot_values.gfxclk;
301                         break;
302                 case SMU_SOCCLK:
303                         clock_limit = smu->smu_table.boot_values.socclk;
304                         break;
305                 default:
306                         clock_limit = 0;
307                         break;
308                 }
309
310                 /* clock in Mhz unit */
311                 if (min)
312                         *min = clock_limit / 100;
313                 if (max)
314                         *max = clock_limit / 100;
315         } else {
316                 /*
317                  * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
318                  * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
319                  */
320                 ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
321         }
322
323         if (lock_needed)
324                 mutex_unlock(&smu->mutex);
325
326         return ret;
327 }
328
329 int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
330                               uint16_t level, uint32_t *value)
331 {
332         int ret = 0, clk_id = 0;
333         uint32_t param;
334
335         if (!value)
336                 return -EINVAL;
337
338         if (!smu_clk_dpm_is_enabled(smu, clk_type))
339                 return 0;
340
341         clk_id = smu_clk_get_index(smu, clk_type);
342         if (clk_id < 0)
343                 return clk_id;
344
345         param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
346
347         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex,
348                                           param, value);
349         if (ret)
350                 return ret;
351
352         /* BIT31:  0 - Fine grained DPM, 1 - Dicrete DPM
353          * now, we un-support it */
354         *value = *value & 0x7fffffff;
355
356         return ret;
357 }
358
359 int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
360                             uint32_t *value)
361 {
362         return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
363 }
364
365 int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
366                             uint32_t *min_value, uint32_t *max_value)
367 {
368         int ret = 0;
369         uint32_t level_count = 0;
370
371         if (!min_value && !max_value)
372                 return -EINVAL;
373
374         if (min_value) {
375                 /* by default, level 0 clock value as min value */
376                 ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, min_value);
377                 if (ret)
378                         return ret;
379         }
380
381         if (max_value) {
382                 ret = smu_get_dpm_level_count(smu, clk_type, &level_count);
383                 if (ret)
384                         return ret;
385
386                 ret = smu_get_dpm_freq_by_index(smu, clk_type, level_count - 1, max_value);
387                 if (ret)
388                         return ret;
389         }
390
391         return ret;
392 }
393
394 bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
395 {
396         enum smu_feature_mask feature_id = 0;
397
398         switch (clk_type) {
399         case SMU_MCLK:
400         case SMU_UCLK:
401                 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
402                 break;
403         case SMU_GFXCLK:
404         case SMU_SCLK:
405                 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
406                 break;
407         case SMU_SOCCLK:
408                 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
409                 break;
410         default:
411                 return true;
412         }
413
414         if(!smu_feature_is_enabled(smu, feature_id)) {
415                 return false;
416         }
417
418         return true;
419 }
420
421 /**
422  * smu_dpm_set_power_gate - power gate/ungate the specific IP block
423  *
424  * @smu:        smu_context pointer
425  * @block_type: the IP block to power gate/ungate
426  * @gate:       to power gate if true, ungate otherwise
427  *
428  * This API uses no smu->mutex lock protection due to:
429  * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
430  *    This is guarded to be race condition free by the caller.
431  * 2. Or get called on user setting request of power_dpm_force_performance_level.
432  *    Under this case, the smu->mutex lock protection is already enforced on
433  *    the parent API smu_force_performance_level of the call path.
434  */
435 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
436                            bool gate)
437 {
438         int ret = 0;
439
440         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
441                 return -EOPNOTSUPP;
442
443         switch (block_type) {
444         case AMD_IP_BLOCK_TYPE_UVD:
445                 ret = smu_dpm_set_uvd_enable(smu, !gate);
446                 break;
447         case AMD_IP_BLOCK_TYPE_VCE:
448                 ret = smu_dpm_set_vce_enable(smu, !gate);
449                 break;
450         case AMD_IP_BLOCK_TYPE_GFX:
451                 ret = smu_gfx_off_control(smu, gate);
452                 break;
453         case AMD_IP_BLOCK_TYPE_SDMA:
454                 ret = smu_powergate_sdma(smu, gate);
455                 break;
456         case AMD_IP_BLOCK_TYPE_JPEG:
457                 ret = smu_dpm_set_jpeg_enable(smu, !gate);
458                 break;
459         default:
460                 break;
461         }
462
463         return ret;
464 }
465
466 int smu_get_power_num_states(struct smu_context *smu,
467                              struct pp_states_info *state_info)
468 {
469         if (!state_info)
470                 return -EINVAL;
471
472         /* not support power state */
473         memset(state_info, 0, sizeof(struct pp_states_info));
474         state_info->nums = 1;
475         state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
476
477         return 0;
478 }
479
480 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
481                            void *data, uint32_t *size)
482 {
483         struct smu_power_context *smu_power = &smu->smu_power;
484         struct smu_power_gate *power_gate = &smu_power->power_gate;
485         int ret = 0;
486
487         if(!data || !size)
488                 return -EINVAL;
489
490         switch (sensor) {
491         case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
492                 *((uint32_t *)data) = smu->pstate_sclk;
493                 *size = 4;
494                 break;
495         case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
496                 *((uint32_t *)data) = smu->pstate_mclk;
497                 *size = 4;
498                 break;
499         case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
500                 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
501                 *size = 8;
502                 break;
503         case AMDGPU_PP_SENSOR_UVD_POWER:
504                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
505                 *size = 4;
506                 break;
507         case AMDGPU_PP_SENSOR_VCE_POWER:
508                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
509                 *size = 4;
510                 break;
511         case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
512                 *(uint32_t *)data = power_gate->vcn_gated ? 0 : 1;
513                 *size = 4;
514                 break;
515         default:
516                 ret = -EINVAL;
517                 break;
518         }
519
520         if (ret)
521                 *size = 0;
522
523         return ret;
524 }
525
526 int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
527                      void *table_data, bool drv2smu)
528 {
529         struct smu_table_context *smu_table = &smu->smu_table;
530         struct amdgpu_device *adev = smu->adev;
531         struct smu_table *table = &smu_table->driver_table;
532         int table_id = smu_table_get_index(smu, table_index);
533         uint32_t table_size;
534         int ret = 0;
535         if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
536                 return -EINVAL;
537
538         table_size = smu_table->tables[table_index].size;
539
540         if (drv2smu) {
541                 memcpy(table->cpu_addr, table_data, table_size);
542                 /*
543                  * Flush hdp cache: to guard the content seen by
544                  * GPU is consitent with CPU.
545                  */
546                 amdgpu_asic_flush_hdp(adev, NULL);
547         }
548
549         ret = smu_send_smc_msg_with_param(smu, drv2smu ?
550                                           SMU_MSG_TransferTableDram2Smu :
551                                           SMU_MSG_TransferTableSmu2Dram,
552                                           table_id | ((argument & 0xFFFF) << 16),
553                                           NULL);
554         if (ret)
555                 return ret;
556
557         if (!drv2smu) {
558                 amdgpu_asic_flush_hdp(adev, NULL);
559                 memcpy(table_data, table->cpu_addr, table_size);
560         }
561
562         return ret;
563 }
564
565 bool is_support_sw_smu(struct amdgpu_device *adev)
566 {
567         if (adev->asic_type >= CHIP_ARCTURUS)
568                 return true;
569
570         return false;
571 }
572
573 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
574 {
575         struct smu_table_context *smu_table = &smu->smu_table;
576         uint32_t powerplay_table_size;
577
578         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
579                 return -EOPNOTSUPP;
580
581         if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
582                 return -EINVAL;
583
584         mutex_lock(&smu->mutex);
585
586         if (smu_table->hardcode_pptable)
587                 *table = smu_table->hardcode_pptable;
588         else
589                 *table = smu_table->power_play_table;
590
591         powerplay_table_size = smu_table->power_play_table_size;
592
593         mutex_unlock(&smu->mutex);
594
595         return powerplay_table_size;
596 }
597
598 int smu_sys_set_pp_table(struct smu_context *smu,  void *buf, size_t size)
599 {
600         struct smu_table_context *smu_table = &smu->smu_table;
601         ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
602         int ret = 0;
603
604         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
605                 return -EOPNOTSUPP;
606
607         if (header->usStructureSize != size) {
608                 pr_err("pp table size not matched !\n");
609                 return -EIO;
610         }
611
612         mutex_lock(&smu->mutex);
613         if (!smu_table->hardcode_pptable)
614                 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
615         if (!smu_table->hardcode_pptable) {
616                 ret = -ENOMEM;
617                 goto failed;
618         }
619
620         memcpy(smu_table->hardcode_pptable, buf, size);
621         smu_table->power_play_table = smu_table->hardcode_pptable;
622         smu_table->power_play_table_size = size;
623
624         /*
625          * Special hw_fini action(for Navi1x, the DPMs disablement will be
626          * skipped) may be needed for custom pptable uploading.
627          */
628         smu->uploading_custom_pp_table = true;
629
630         ret = smu_reset(smu);
631         if (ret)
632                 pr_info("smu reset failed, ret = %d\n", ret);
633
634         smu->uploading_custom_pp_table = false;
635
636 failed:
637         mutex_unlock(&smu->mutex);
638         return ret;
639 }
640
641 int smu_feature_init_dpm(struct smu_context *smu)
642 {
643         struct smu_feature *feature = &smu->smu_feature;
644         int ret = 0;
645         uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
646
647         mutex_lock(&feature->mutex);
648         bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
649         mutex_unlock(&feature->mutex);
650
651         ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
652                                              SMU_FEATURE_MAX/32);
653         if (ret)
654                 return ret;
655
656         mutex_lock(&feature->mutex);
657         bitmap_or(feature->allowed, feature->allowed,
658                       (unsigned long *)allowed_feature_mask,
659                       feature->feature_num);
660         mutex_unlock(&feature->mutex);
661
662         return ret;
663 }
664
665
666 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
667 {
668         struct smu_feature *feature = &smu->smu_feature;
669         int feature_id;
670         int ret = 0;
671
672         if (smu->is_apu)
673                 return 1;
674         feature_id = smu_feature_get_index(smu, mask);
675         if (feature_id < 0)
676                 return 0;
677
678         WARN_ON(feature_id > feature->feature_num);
679
680         mutex_lock(&feature->mutex);
681         ret = test_bit(feature_id, feature->enabled);
682         mutex_unlock(&feature->mutex);
683
684         return ret;
685 }
686
687 int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
688                             bool enable)
689 {
690         struct smu_feature *feature = &smu->smu_feature;
691         int feature_id;
692
693         feature_id = smu_feature_get_index(smu, mask);
694         if (feature_id < 0)
695                 return -EINVAL;
696
697         WARN_ON(feature_id > feature->feature_num);
698
699         return smu_feature_update_enable_state(smu,
700                                                1ULL << feature_id,
701                                                enable);
702 }
703
704 int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
705 {
706         struct smu_feature *feature = &smu->smu_feature;
707         int feature_id;
708         int ret = 0;
709
710         feature_id = smu_feature_get_index(smu, mask);
711         if (feature_id < 0)
712                 return 0;
713
714         WARN_ON(feature_id > feature->feature_num);
715
716         mutex_lock(&feature->mutex);
717         ret = test_bit(feature_id, feature->supported);
718         mutex_unlock(&feature->mutex);
719
720         return ret;
721 }
722
723 int smu_feature_set_supported(struct smu_context *smu,
724                               enum smu_feature_mask mask,
725                               bool enable)
726 {
727         struct smu_feature *feature = &smu->smu_feature;
728         int feature_id;
729         int ret = 0;
730
731         feature_id = smu_feature_get_index(smu, mask);
732         if (feature_id < 0)
733                 return -EINVAL;
734
735         WARN_ON(feature_id > feature->feature_num);
736
737         mutex_lock(&feature->mutex);
738         if (enable)
739                 test_and_set_bit(feature_id, feature->supported);
740         else
741                 test_and_clear_bit(feature_id, feature->supported);
742         mutex_unlock(&feature->mutex);
743
744         return ret;
745 }
746
747 static int smu_set_funcs(struct amdgpu_device *adev)
748 {
749         struct smu_context *smu = &adev->smu;
750
751         if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
752                 smu->od_enabled = true;
753
754         switch (adev->asic_type) {
755         case CHIP_NAVI10:
756         case CHIP_NAVI14:
757         case CHIP_NAVI12:
758                 navi10_set_ppt_funcs(smu);
759                 break;
760         case CHIP_ARCTURUS:
761                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
762                 arcturus_set_ppt_funcs(smu);
763                 /* OD is not supported on Arcturus */
764                 smu->od_enabled =false;
765                 break;
766         case CHIP_SIENNA_CICHLID:
767                 sienna_cichlid_set_ppt_funcs(smu);
768                 break;
769         case CHIP_RENOIR:
770                 renoir_set_ppt_funcs(smu);
771                 break;
772         default:
773                 return -EINVAL;
774         }
775
776         return 0;
777 }
778
779 static int smu_early_init(void *handle)
780 {
781         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
782         struct smu_context *smu = &adev->smu;
783
784         smu->adev = adev;
785         smu->pm_enabled = !!amdgpu_dpm;
786         smu->is_apu = false;
787         mutex_init(&smu->mutex);
788
789         return smu_set_funcs(adev);
790 }
791
792 static int smu_late_init(void *handle)
793 {
794         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
795         struct smu_context *smu = &adev->smu;
796
797         if (!smu->pm_enabled)
798                 return 0;
799
800         smu_get_unique_id(smu);
801
802         smu_handle_task(&adev->smu,
803                         smu->smu_dpm.dpm_level,
804                         AMD_PP_TASK_COMPLETE_INIT,
805                         false);
806
807         return 0;
808 }
809
810 int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
811                             uint16_t *size, uint8_t *frev, uint8_t *crev,
812                             uint8_t **addr)
813 {
814         struct amdgpu_device *adev = smu->adev;
815         uint16_t data_start;
816
817         if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
818                                            size, frev, crev, &data_start))
819                 return -EINVAL;
820
821         *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
822
823         return 0;
824 }
825
826 static int smu_initialize_pptable(struct smu_context *smu)
827 {
828         /* TODO */
829         return 0;
830 }
831
832 static int smu_smc_table_sw_init(struct smu_context *smu)
833 {
834         int ret;
835
836         ret = smu_initialize_pptable(smu);
837         if (ret) {
838                 pr_err("Failed to init smu_initialize_pptable!\n");
839                 return ret;
840         }
841
842         /**
843          * Create smu_table structure, and init smc tables such as
844          * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
845          */
846         ret = smu_init_smc_tables(smu);
847         if (ret) {
848                 pr_err("Failed to init smc tables!\n");
849                 return ret;
850         }
851
852         /**
853          * Create smu_power_context structure, and allocate smu_dpm_context and
854          * context size to fill the smu_power_context data.
855          */
856         ret = smu_init_power(smu);
857         if (ret) {
858                 pr_err("Failed to init smu_init_power!\n");
859                 return ret;
860         }
861
862         return 0;
863 }
864
865 static int smu_smc_table_sw_fini(struct smu_context *smu)
866 {
867         int ret;
868
869         ret = smu_fini_smc_tables(smu);
870         if (ret) {
871                 pr_err("Failed to smu_fini_smc_tables!\n");
872                 return ret;
873         }
874
875         return 0;
876 }
877
878 static int smu_sw_init(void *handle)
879 {
880         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
881         struct smu_context *smu = &adev->smu;
882         int ret;
883
884         smu->pool_size = adev->pm.smu_prv_buffer_size;
885         smu->smu_feature.feature_num = SMU_FEATURE_MAX;
886         mutex_init(&smu->smu_feature.mutex);
887         bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
888         bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
889         bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
890
891         mutex_init(&smu->smu_baco.mutex);
892         smu->smu_baco.state = SMU_BACO_STATE_EXIT;
893         smu->smu_baco.platform_support = false;
894
895         mutex_init(&smu->sensor_lock);
896         mutex_init(&smu->metrics_lock);
897         mutex_init(&smu->message_lock);
898
899         smu->watermarks_bitmap = 0;
900         smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
901         smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
902
903         smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
904         smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
905         smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
906         smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
907         smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
908         smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
909         smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
910         smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
911
912         smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
913         smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
914         smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
915         smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
916         smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
917         smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
918         smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
919         smu->display_config = &adev->pm.pm_display_cfg;
920
921         smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
922         smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
923         ret = smu_init_microcode(smu);
924         if (ret) {
925                 pr_err("Failed to load smu firmware!\n");
926                 return ret;
927         }
928
929         ret = smu_smc_table_sw_init(smu);
930         if (ret) {
931                 pr_err("Failed to sw init smc table!\n");
932                 return ret;
933         }
934
935         ret = smu_register_irq_handler(smu);
936         if (ret) {
937                 pr_err("Failed to register smc irq handler!\n");
938                 return ret;
939         }
940
941         return 0;
942 }
943
944 static int smu_sw_fini(void *handle)
945 {
946         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
947         struct smu_context *smu = &adev->smu;
948         int ret;
949
950         kfree(smu->irq_source);
951         smu->irq_source = NULL;
952
953         ret = smu_smc_table_sw_fini(smu);
954         if (ret) {
955                 pr_err("Failed to sw fini smc table!\n");
956                 return ret;
957         }
958
959         ret = smu_fini_power(smu);
960         if (ret) {
961                 pr_err("Failed to init smu_fini_power!\n");
962                 return ret;
963         }
964
965         return 0;
966 }
967
968 static int smu_init_fb_allocations(struct smu_context *smu)
969 {
970         struct amdgpu_device *adev = smu->adev;
971         struct smu_table_context *smu_table = &smu->smu_table;
972         struct smu_table *tables = smu_table->tables;
973         struct smu_table *driver_table = &(smu_table->driver_table);
974         uint32_t max_table_size = 0;
975         int ret, i;
976
977         /* VRAM allocation for tool table */
978         if (tables[SMU_TABLE_PMSTATUSLOG].size) {
979                 ret = amdgpu_bo_create_kernel(adev,
980                                               tables[SMU_TABLE_PMSTATUSLOG].size,
981                                               tables[SMU_TABLE_PMSTATUSLOG].align,
982                                               tables[SMU_TABLE_PMSTATUSLOG].domain,
983                                               &tables[SMU_TABLE_PMSTATUSLOG].bo,
984                                               &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
985                                               &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
986                 if (ret) {
987                         pr_err("VRAM allocation for tool table failed!\n");
988                         return ret;
989                 }
990         }
991
992         /* VRAM allocation for driver table */
993         for (i = 0; i < SMU_TABLE_COUNT; i++) {
994                 if (tables[i].size == 0)
995                         continue;
996
997                 if (i == SMU_TABLE_PMSTATUSLOG)
998                         continue;
999
1000                 if (max_table_size < tables[i].size)
1001                         max_table_size = tables[i].size;
1002         }
1003
1004         driver_table->size = max_table_size;
1005         driver_table->align = PAGE_SIZE;
1006         driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
1007
1008         ret = amdgpu_bo_create_kernel(adev,
1009                                       driver_table->size,
1010                                       driver_table->align,
1011                                       driver_table->domain,
1012                                       &driver_table->bo,
1013                                       &driver_table->mc_address,
1014                                       &driver_table->cpu_addr);
1015         if (ret) {
1016                 pr_err("VRAM allocation for driver table failed!\n");
1017                 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
1018                         amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
1019                                               &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
1020                                               &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
1021         }
1022
1023         return ret;
1024 }
1025
1026 static int smu_fini_fb_allocations(struct smu_context *smu)
1027 {
1028         struct smu_table_context *smu_table = &smu->smu_table;
1029         struct smu_table *tables = smu_table->tables;
1030         struct smu_table *driver_table = &(smu_table->driver_table);
1031
1032         if (!tables)
1033                 return 0;
1034
1035         if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
1036                 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
1037                                       &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
1038                                       &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
1039
1040         amdgpu_bo_free_kernel(&driver_table->bo,
1041                               &driver_table->mc_address,
1042                               &driver_table->cpu_addr);
1043
1044         return 0;
1045 }
1046
1047 static int smu_smc_table_hw_init(struct smu_context *smu,
1048                                  bool initialize)
1049 {
1050         struct amdgpu_device *adev = smu->adev;
1051         int ret;
1052
1053         if (smu_is_dpm_running(smu) && adev->in_suspend) {
1054                 pr_info("dpm has been enabled\n");
1055                 return 0;
1056         }
1057
1058         if (adev->asic_type != CHIP_ARCTURUS &&
1059             adev->asic_type != CHIP_SIENNA_CICHLID) {
1060                 ret = smu_init_display_count(smu, 0);
1061                 if (ret)
1062                         return ret;
1063         }
1064
1065         if (initialize) {
1066                 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1067                 ret = smu_get_vbios_bootup_values(smu);
1068                 if (ret)
1069                         return ret;
1070
1071                 ret = smu_setup_pptable(smu);
1072                 if (ret)
1073                         return ret;
1074
1075                 ret = smu_get_clk_info_from_vbios(smu);
1076                 if (ret)
1077                         return ret;
1078
1079                 /*
1080                  * check if the format_revision in vbios is up to pptable header
1081                  * version, and the structure size is not 0.
1082                  */
1083                 ret = smu_check_pptable(smu);
1084                 if (ret)
1085                         return ret;
1086
1087                 /*
1088                  * allocate vram bos to store smc table contents.
1089                  */
1090                 ret = smu_init_fb_allocations(smu);
1091                 if (ret)
1092                         return ret;
1093
1094                 /*
1095                  * Parse pptable format and fill PPTable_t smc_pptable to
1096                  * smu_table_context structure. And read the smc_dpm_table from vbios,
1097                  * then fill it into smc_pptable.
1098                  */
1099                 ret = smu_parse_pptable(smu);
1100                 if (ret)
1101                         return ret;
1102
1103                 /*
1104                  * Send msg GetDriverIfVersion to check if the return value is equal
1105                  * with DRIVER_IF_VERSION of smc header.
1106                  */
1107                 ret = smu_check_fw_version(smu);
1108                 if (ret)
1109                         return ret;
1110         }
1111
1112         ret = smu_set_driver_table_location(smu);
1113         if (ret)
1114                 return ret;
1115
1116         /* smu_dump_pptable(smu); */
1117         /*
1118          * Copy pptable bo in the vram to smc with SMU MSGs such as
1119          * SetDriverDramAddr and TransferTableDram2Smu.
1120          */
1121         ret = smu_write_pptable(smu);
1122         if (ret)
1123                 return ret;
1124
1125         /* issue Run*Btc msg */
1126         ret = smu_run_btc(smu);
1127         if (ret)
1128                 return ret;
1129         ret = smu_feature_set_allowed_mask(smu);
1130         if (ret)
1131                 return ret;
1132
1133         ret = smu_system_features_control(smu, true);
1134         if (ret)
1135                 return ret;
1136
1137         if (adev->asic_type == CHIP_NAVI10) {
1138                 if (adev->pdev->device == 0x731f && (adev->pdev->revision == 0xc2 ||
1139                                                      adev->pdev->revision == 0xc3 ||
1140                                                      adev->pdev->revision == 0xca ||
1141                                                      adev->pdev->revision == 0xcb)) {
1142                         ret = smu_disable_umc_cdr_12gbps_workaround(smu);
1143                         if (ret) {
1144                                 pr_err("Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n");
1145                                 return ret;
1146                         }
1147                 }
1148         }
1149
1150         if (smu->ppt_funcs->set_power_source) {
1151                 /*
1152                  * For Navi1X, manually switch it to AC mode as PMFW
1153                  * may boot it with DC mode.
1154                  */
1155                 if (adev->pm.ac_power)
1156                         ret = smu_set_power_source(smu, SMU_POWER_SOURCE_AC);
1157                 else
1158                         ret = smu_set_power_source(smu, SMU_POWER_SOURCE_DC);
1159                 if (ret) {
1160                         pr_err("Failed to switch to %s mode!\n", adev->pm.ac_power ? "AC" : "DC");
1161                         return ret;
1162                 }
1163         }
1164
1165         if (adev->asic_type != CHIP_ARCTURUS &&
1166             adev->asic_type != CHIP_SIENNA_CICHLID) {
1167                 ret = smu_notify_display_change(smu);
1168                 if (ret)
1169                         return ret;
1170
1171                 /*
1172                  * Set min deep sleep dce fclk with bootup value from vbios via
1173                  * SetMinDeepSleepDcefclk MSG.
1174                  */
1175                 ret = smu_set_min_dcef_deep_sleep(smu);
1176                 if (ret)
1177                         return ret;
1178         }
1179
1180         /*
1181          * Set initialized values (get from vbios) to dpm tables context such as
1182          * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1183          * type of clks.
1184          */
1185         if (initialize) {
1186                 ret = smu_populate_smc_tables(smu);
1187                 if (ret)
1188                         return ret;
1189
1190                 ret = smu_init_max_sustainable_clocks(smu);
1191                 if (ret)
1192                         return ret;
1193         }
1194
1195         if (adev->asic_type != CHIP_ARCTURUS) {
1196                 ret = smu_override_pcie_parameters(smu);
1197                 if (ret)
1198                         return ret;
1199         }
1200
1201         ret = smu_set_default_od_settings(smu, initialize);
1202         if (ret)
1203                 return ret;
1204
1205         if (initialize) {
1206                 ret = smu_populate_umd_state_clk(smu);
1207                 if (ret)
1208                         return ret;
1209
1210                 ret = smu_get_power_limit(smu, &smu->default_power_limit, false, false);
1211                 if (ret)
1212                         return ret;
1213         }
1214
1215         /*
1216          * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1217          */
1218         ret = smu_set_tool_table_location(smu);
1219
1220         if (!smu_is_dpm_running(smu))
1221                 pr_info("dpm has been disabled\n");
1222
1223         return ret;
1224 }
1225
1226 /**
1227  * smu_alloc_memory_pool - allocate memory pool in the system memory
1228  *
1229  * @smu: amdgpu_device pointer
1230  *
1231  * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
1232  * and DramLogSetDramAddr can notify it changed.
1233  *
1234  * Returns 0 on success, error on failure.
1235  */
1236 static int smu_alloc_memory_pool(struct smu_context *smu)
1237 {
1238         struct amdgpu_device *adev = smu->adev;
1239         struct smu_table_context *smu_table = &smu->smu_table;
1240         struct smu_table *memory_pool = &smu_table->memory_pool;
1241         uint64_t pool_size = smu->pool_size;
1242         int ret = 0;
1243
1244         if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
1245                 return ret;
1246
1247         memory_pool->size = pool_size;
1248         memory_pool->align = PAGE_SIZE;
1249         memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
1250
1251         switch (pool_size) {
1252         case SMU_MEMORY_POOL_SIZE_256_MB:
1253         case SMU_MEMORY_POOL_SIZE_512_MB:
1254         case SMU_MEMORY_POOL_SIZE_1_GB:
1255         case SMU_MEMORY_POOL_SIZE_2_GB:
1256                 ret = amdgpu_bo_create_kernel(adev,
1257                                               memory_pool->size,
1258                                               memory_pool->align,
1259                                               memory_pool->domain,
1260                                               &memory_pool->bo,
1261                                               &memory_pool->mc_address,
1262                                               &memory_pool->cpu_addr);
1263                 break;
1264         default:
1265                 break;
1266         }
1267
1268         return ret;
1269 }
1270
1271 static int smu_free_memory_pool(struct smu_context *smu)
1272 {
1273         struct smu_table_context *smu_table = &smu->smu_table;
1274         struct smu_table *memory_pool = &smu_table->memory_pool;
1275
1276         if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1277                 return 0;
1278
1279         amdgpu_bo_free_kernel(&memory_pool->bo,
1280                               &memory_pool->mc_address,
1281                               &memory_pool->cpu_addr);
1282
1283         memset(memory_pool, 0, sizeof(struct smu_table));
1284
1285         return 0;
1286 }
1287
1288 static int smu_start_smc_engine(struct smu_context *smu)
1289 {
1290         struct amdgpu_device *adev = smu->adev;
1291         int ret = 0;
1292
1293         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1294                 if (adev->asic_type < CHIP_NAVI10) {
1295                         if (smu->ppt_funcs->load_microcode) {
1296                                 ret = smu->ppt_funcs->load_microcode(smu);
1297                                 if (ret)
1298                                         return ret;
1299                         }
1300                 }
1301         }
1302
1303         if (smu->ppt_funcs->check_fw_status) {
1304                 ret = smu->ppt_funcs->check_fw_status(smu);
1305                 if (ret)
1306                         pr_err("SMC is not ready\n");
1307         }
1308
1309         return ret;
1310 }
1311
1312 static int smu_hw_init(void *handle)
1313 {
1314         int ret;
1315         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1316         struct smu_context *smu = &adev->smu;
1317
1318         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1319                 return 0;
1320
1321         ret = smu_start_smc_engine(smu);
1322         if (ret) {
1323                 pr_err("SMU is not ready yet!\n");
1324                 return ret;
1325         }
1326
1327         if (smu->is_apu) {
1328                 smu_powergate_sdma(&adev->smu, false);
1329                 smu_powergate_vcn(&adev->smu, false);
1330                 smu_powergate_jpeg(&adev->smu, false);
1331                 smu_set_gfx_cgpg(&adev->smu, true);
1332         }
1333
1334         if (!smu->pm_enabled)
1335                 return 0;
1336
1337         ret = smu_feature_init_dpm(smu);
1338         if (ret)
1339                 goto failed;
1340
1341         ret = smu_smc_table_hw_init(smu, true);
1342         if (ret)
1343                 goto failed;
1344
1345         ret = smu_alloc_memory_pool(smu);
1346         if (ret)
1347                 goto failed;
1348
1349         /*
1350          * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1351          * pool location.
1352          */
1353         ret = smu_notify_memory_pool_location(smu);
1354         if (ret)
1355                 goto failed;
1356
1357         ret = smu_enable_thermal_alert(smu);
1358         if (ret)
1359                 goto failed;
1360
1361         ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
1362         if (ret)
1363                 goto failed;
1364
1365         adev->pm.dpm_enabled = true;
1366
1367         pr_info("SMU is initialized successfully!\n");
1368
1369         return 0;
1370
1371 failed:
1372         return ret;
1373 }
1374
1375 static int smu_stop_dpms(struct smu_context *smu)
1376 {
1377         return smu_system_features_control(smu, false);
1378 }
1379
1380 static int smu_hw_fini(void *handle)
1381 {
1382         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1383         struct smu_context *smu = &adev->smu;
1384         struct smu_table_context *table_context = &smu->smu_table;
1385         int ret = 0;
1386
1387         if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1388                 return 0;
1389
1390         if (smu->is_apu) {
1391                 smu_powergate_sdma(&adev->smu, true);
1392                 smu_powergate_vcn(&adev->smu, true);
1393                 smu_powergate_jpeg(&adev->smu, true);
1394         }
1395
1396         if (!smu->pm_enabled)
1397                 return 0;
1398
1399         adev->pm.dpm_enabled = false;
1400
1401         smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
1402
1403         ret = smu_disable_thermal_alert(smu);
1404         if (ret) {
1405                 pr_warn("Fail to stop thermal control!\n");
1406                 return ret;
1407         }
1408
1409         /*
1410          * For custom pptable uploading, skip the DPM features
1411          * disable process on Navi1x ASICs.
1412          *   - As the gfx related features are under control of
1413          *     RLC on those ASICs. RLC reinitialization will be
1414          *     needed to reenable them. That will cost much more
1415          *     efforts.
1416          *
1417          *   - SMU firmware can handle the DPM reenablement
1418          *     properly.
1419          */
1420         if (!smu->uploading_custom_pp_table ||
1421                         !((adev->asic_type >= CHIP_NAVI10) &&
1422                                 (adev->asic_type <= CHIP_NAVI12))) {
1423                 ret = smu_stop_dpms(smu);
1424                 if (ret) {
1425                         pr_warn("Fail to stop Dpms!\n");
1426                         return ret;
1427                 }
1428         }
1429
1430         kfree(table_context->driver_pptable);
1431         table_context->driver_pptable = NULL;
1432
1433         kfree(table_context->max_sustainable_clocks);
1434         table_context->max_sustainable_clocks = NULL;
1435
1436         kfree(table_context->overdrive_table);
1437         table_context->overdrive_table = NULL;
1438
1439         ret = smu_fini_fb_allocations(smu);
1440         if (ret)
1441                 return ret;
1442
1443         ret = smu_free_memory_pool(smu);
1444         if (ret)
1445                 return ret;
1446
1447         return 0;
1448 }
1449
1450 int smu_reset(struct smu_context *smu)
1451 {
1452         struct amdgpu_device *adev = smu->adev;
1453         int ret = 0;
1454
1455         ret = smu_hw_fini(adev);
1456         if (ret)
1457                 return ret;
1458
1459         ret = smu_hw_init(adev);
1460         if (ret)
1461                 return ret;
1462
1463         return ret;
1464 }
1465
1466 static int smu_disable_dpm(struct smu_context *smu)
1467 {
1468         struct amdgpu_device *adev = smu->adev;
1469         uint32_t smu_version;
1470         int ret = 0;
1471         bool use_baco = !smu->is_apu &&
1472                 ((adev->in_gpu_reset &&
1473                   (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1474                  ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev)));
1475
1476         ret = smu_get_smc_version(smu, NULL, &smu_version);
1477         if (ret) {
1478                 pr_err("Failed to get smu version.\n");
1479                 return ret;
1480         }
1481
1482         /*
1483          * Disable all enabled SMU features.
1484          * This should be handled in SMU FW, as a backup
1485          * driver can issue call to SMU FW until sequence
1486          * in SMU FW is operational.
1487          */
1488         ret = smu_system_features_control(smu, false);
1489         if (ret) {
1490                 pr_err("Failed to disable smu features.\n");
1491                 return ret;
1492         }
1493
1494         /*
1495          * Arcturus does not have BACO bit in disable feature mask.
1496          * Enablement of BACO bit on Arcturus should be skipped.
1497          */
1498         if (adev->asic_type == CHIP_ARCTURUS) {
1499                 if (use_baco && (smu_version > 0x360e00))
1500                         return 0;
1501         }
1502
1503         /* For baco, need to leave BACO feature enabled */
1504         if (use_baco) {
1505                 /*
1506                  * Correct the way for checking whether SMU_FEATURE_BACO_BIT
1507                  * is supported.
1508                  *
1509                  * Since 'smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)' will
1510                  * always return false as the 'smu_system_features_control(smu, false)'
1511                  * was just issued above which disabled all SMU features.
1512                  *
1513                  * Thus 'smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT)' is used
1514                  * now for the checking.
1515                  */
1516                 if (smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT) >= 0) {
1517                         ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
1518                         if (ret) {
1519                                 pr_warn("set BACO feature enabled failed, return %d\n", ret);
1520                                 return ret;
1521                         }
1522                 }
1523         }
1524
1525         return ret;
1526 }
1527
1528 static int smu_suspend(void *handle)
1529 {
1530         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1531         struct smu_context *smu = &adev->smu;
1532         int ret;
1533
1534         if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1535                 return 0;
1536
1537         if (!smu->pm_enabled)
1538                 return 0;
1539
1540         adev->pm.dpm_enabled = false;
1541
1542         smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
1543
1544         ret = smu_disable_thermal_alert(smu);
1545         if (ret) {
1546                 pr_warn("Fail to stop thermal control!\n");
1547                 return ret;
1548         }
1549
1550         ret = smu_disable_dpm(smu);
1551         if (ret)
1552                 return ret;
1553
1554         smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1555
1556         if (adev->asic_type >= CHIP_NAVI10 &&
1557             adev->gfx.rlc.funcs->stop)
1558                 adev->gfx.rlc.funcs->stop(adev);
1559         if (smu->is_apu)
1560                 smu_set_gfx_cgpg(&adev->smu, false);
1561
1562         return 0;
1563 }
1564
1565 static int smu_resume(void *handle)
1566 {
1567         int ret;
1568         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1569         struct smu_context *smu = &adev->smu;
1570
1571         if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1572                 return 0;
1573
1574         if (!smu->pm_enabled)
1575                 return 0;
1576
1577         pr_info("SMU is resuming...\n");
1578
1579         ret = smu_start_smc_engine(smu);
1580         if (ret) {
1581                 pr_err("SMU is not ready yet!\n");
1582                 goto failed;
1583         }
1584
1585         ret = smu_smc_table_hw_init(smu, false);
1586         if (ret)
1587                 goto failed;
1588
1589         ret = smu_enable_thermal_alert(smu);
1590         if (ret)
1591                 goto failed;
1592
1593         ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
1594         if (ret)
1595                 goto failed;
1596
1597         if (smu->is_apu)
1598                 smu_set_gfx_cgpg(&adev->smu, true);
1599
1600         smu->disable_uclk_switch = 0;
1601
1602         adev->pm.dpm_enabled = true;
1603
1604         pr_info("SMU is resumed successfully!\n");
1605
1606         return 0;
1607
1608 failed:
1609         return ret;
1610 }
1611
1612 int smu_display_configuration_change(struct smu_context *smu,
1613                                      const struct amd_pp_display_configuration *display_config)
1614 {
1615         int index = 0;
1616         int num_of_active_display = 0;
1617
1618         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1619                 return -EOPNOTSUPP;
1620
1621         if (!display_config)
1622                 return -EINVAL;
1623
1624         mutex_lock(&smu->mutex);
1625
1626         if (smu->ppt_funcs->set_deep_sleep_dcefclk)
1627                 smu->ppt_funcs->set_deep_sleep_dcefclk(smu,
1628                                 display_config->min_dcef_deep_sleep_set_clk / 100);
1629
1630         for (index = 0; index < display_config->num_path_including_non_display; index++) {
1631                 if (display_config->displays[index].controller_id != 0)
1632                         num_of_active_display++;
1633         }
1634
1635         smu_set_active_display_count(smu, num_of_active_display);
1636
1637         smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1638                            display_config->cpu_cc6_disable,
1639                            display_config->cpu_pstate_disable,
1640                            display_config->nb_pstate_switch_disable);
1641
1642         mutex_unlock(&smu->mutex);
1643
1644         return 0;
1645 }
1646
1647 static int smu_get_clock_info(struct smu_context *smu,
1648                               struct smu_clock_info *clk_info,
1649                               enum smu_perf_level_designation designation)
1650 {
1651         int ret;
1652         struct smu_performance_level level = {0};
1653
1654         if (!clk_info)
1655                 return -EINVAL;
1656
1657         ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1658         if (ret)
1659                 return -EINVAL;
1660
1661         clk_info->min_mem_clk = level.memory_clock;
1662         clk_info->min_eng_clk = level.core_clock;
1663         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1664
1665         ret = smu_get_perf_level(smu, designation, &level);
1666         if (ret)
1667                 return -EINVAL;
1668
1669         clk_info->min_mem_clk = level.memory_clock;
1670         clk_info->min_eng_clk = level.core_clock;
1671         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1672
1673         return 0;
1674 }
1675
1676 int smu_get_current_clocks(struct smu_context *smu,
1677                            struct amd_pp_clock_info *clocks)
1678 {
1679         struct amd_pp_simple_clock_info simple_clocks = {0};
1680         struct smu_clock_info hw_clocks;
1681         int ret = 0;
1682
1683         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1684                 return -EOPNOTSUPP;
1685
1686         mutex_lock(&smu->mutex);
1687
1688         smu_get_dal_power_level(smu, &simple_clocks);
1689
1690         if (smu->support_power_containment)
1691                 ret = smu_get_clock_info(smu, &hw_clocks,
1692                                          PERF_LEVEL_POWER_CONTAINMENT);
1693         else
1694                 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1695
1696         if (ret) {
1697                 pr_err("Error in smu_get_clock_info\n");
1698                 goto failed;
1699         }
1700
1701         clocks->min_engine_clock = hw_clocks.min_eng_clk;
1702         clocks->max_engine_clock = hw_clocks.max_eng_clk;
1703         clocks->min_memory_clock = hw_clocks.min_mem_clk;
1704         clocks->max_memory_clock = hw_clocks.max_mem_clk;
1705         clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1706         clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1707         clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1708         clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1709
1710         if (simple_clocks.level == 0)
1711                 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1712         else
1713                 clocks->max_clocks_state = simple_clocks.level;
1714
1715         if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1716                 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1717                 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1718         }
1719
1720 failed:
1721         mutex_unlock(&smu->mutex);
1722         return ret;
1723 }
1724
1725 static int smu_set_clockgating_state(void *handle,
1726                                      enum amd_clockgating_state state)
1727 {
1728         return 0;
1729 }
1730
1731 static int smu_set_powergating_state(void *handle,
1732                                      enum amd_powergating_state state)
1733 {
1734         return 0;
1735 }
1736
1737 static int smu_enable_umd_pstate(void *handle,
1738                       enum amd_dpm_forced_level *level)
1739 {
1740         uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1741                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1742                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1743                                         AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1744
1745         struct smu_context *smu = (struct smu_context*)(handle);
1746         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1747
1748         if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1749                 return -EINVAL;
1750
1751         if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1752                 /* enter umd pstate, save current level, disable gfx cg*/
1753                 if (*level & profile_mode_mask) {
1754                         smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1755                         smu_dpm_ctx->enable_umd_pstate = true;
1756                         amdgpu_device_ip_set_powergating_state(smu->adev,
1757                                                                AMD_IP_BLOCK_TYPE_GFX,
1758                                                                AMD_PG_STATE_UNGATE);
1759                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1760                                                                AMD_IP_BLOCK_TYPE_GFX,
1761                                                                AMD_CG_STATE_UNGATE);
1762                 }
1763         } else {
1764                 /* exit umd pstate, restore level, enable gfx cg*/
1765                 if (!(*level & profile_mode_mask)) {
1766                         if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1767                                 *level = smu_dpm_ctx->saved_dpm_level;
1768                         smu_dpm_ctx->enable_umd_pstate = false;
1769                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1770                                                                AMD_IP_BLOCK_TYPE_GFX,
1771                                                                AMD_CG_STATE_GATE);
1772                         amdgpu_device_ip_set_powergating_state(smu->adev,
1773                                                                AMD_IP_BLOCK_TYPE_GFX,
1774                                                                AMD_PG_STATE_GATE);
1775                 }
1776         }
1777
1778         return 0;
1779 }
1780
1781 int smu_adjust_power_state_dynamic(struct smu_context *smu,
1782                                    enum amd_dpm_forced_level level,
1783                                    bool skip_display_settings)
1784 {
1785         int ret = 0;
1786         int index = 0;
1787         long workload;
1788         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1789
1790         if (!skip_display_settings) {
1791                 ret = smu_display_config_changed(smu);
1792                 if (ret) {
1793                         pr_err("Failed to change display config!");
1794                         return ret;
1795                 }
1796         }
1797
1798         ret = smu_apply_clocks_adjust_rules(smu);
1799         if (ret) {
1800                 pr_err("Failed to apply clocks adjust rules!");
1801                 return ret;
1802         }
1803
1804         if (!skip_display_settings) {
1805                 ret = smu_notify_smc_display_config(smu);
1806                 if (ret) {
1807                         pr_err("Failed to notify smc display config!");
1808                         return ret;
1809                 }
1810         }
1811
1812         if (smu_dpm_ctx->dpm_level != level) {
1813                 ret = smu_asic_set_performance_level(smu, level);
1814                 if (ret) {
1815                         pr_err("Failed to set performance level!");
1816                         return ret;
1817                 }
1818
1819                 /* update the saved copy */
1820                 smu_dpm_ctx->dpm_level = level;
1821         }
1822
1823         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1824                 index = fls(smu->workload_mask);
1825                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1826                 workload = smu->workload_setting[index];
1827
1828                 if (smu->power_profile_mode != workload)
1829                         smu_set_power_profile_mode(smu, &workload, 0, false);
1830         }
1831
1832         return ret;
1833 }
1834
1835 int smu_handle_task(struct smu_context *smu,
1836                     enum amd_dpm_forced_level level,
1837                     enum amd_pp_task task_id,
1838                     bool lock_needed)
1839 {
1840         int ret = 0;
1841
1842         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1843                 return -EOPNOTSUPP;
1844
1845         if (lock_needed)
1846                 mutex_lock(&smu->mutex);
1847
1848         switch (task_id) {
1849         case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1850                 ret = smu_pre_display_config_changed(smu);
1851                 if (ret)
1852                         goto out;
1853                 ret = smu_set_cpu_power_state(smu);
1854                 if (ret)
1855                         goto out;
1856                 ret = smu_adjust_power_state_dynamic(smu, level, false);
1857                 break;
1858         case AMD_PP_TASK_COMPLETE_INIT:
1859         case AMD_PP_TASK_READJUST_POWER_STATE:
1860                 ret = smu_adjust_power_state_dynamic(smu, level, true);
1861                 break;
1862         default:
1863                 break;
1864         }
1865
1866 out:
1867         if (lock_needed)
1868                 mutex_unlock(&smu->mutex);
1869
1870         return ret;
1871 }
1872
1873 int smu_switch_power_profile(struct smu_context *smu,
1874                              enum PP_SMC_POWER_PROFILE type,
1875                              bool en)
1876 {
1877         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1878         long workload;
1879         uint32_t index;
1880
1881         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1882                 return -EOPNOTSUPP;
1883
1884         if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1885                 return -EINVAL;
1886
1887         mutex_lock(&smu->mutex);
1888
1889         if (!en) {
1890                 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1891                 index = fls(smu->workload_mask);
1892                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1893                 workload = smu->workload_setting[index];
1894         } else {
1895                 smu->workload_mask |= (1 << smu->workload_prority[type]);
1896                 index = fls(smu->workload_mask);
1897                 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1898                 workload = smu->workload_setting[index];
1899         }
1900
1901         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1902                 smu_set_power_profile_mode(smu, &workload, 0, false);
1903
1904         mutex_unlock(&smu->mutex);
1905
1906         return 0;
1907 }
1908
1909 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1910 {
1911         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1912         enum amd_dpm_forced_level level;
1913
1914         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1915                 return -EOPNOTSUPP;
1916
1917         if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1918                 return -EINVAL;
1919
1920         mutex_lock(&(smu->mutex));
1921         level = smu_dpm_ctx->dpm_level;
1922         mutex_unlock(&(smu->mutex));
1923
1924         return level;
1925 }
1926
1927 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1928 {
1929         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1930         int ret = 0;
1931
1932         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1933                 return -EOPNOTSUPP;
1934
1935         if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1936                 return -EINVAL;
1937
1938         mutex_lock(&smu->mutex);
1939
1940         ret = smu_enable_umd_pstate(smu, &level);
1941         if (ret) {
1942                 mutex_unlock(&smu->mutex);
1943                 return ret;
1944         }
1945
1946         ret = smu_handle_task(smu, level,
1947                               AMD_PP_TASK_READJUST_POWER_STATE,
1948                               false);
1949
1950         mutex_unlock(&smu->mutex);
1951
1952         return ret;
1953 }
1954
1955 int smu_set_display_count(struct smu_context *smu, uint32_t count)
1956 {
1957         int ret = 0;
1958
1959         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1960                 return -EOPNOTSUPP;
1961
1962         mutex_lock(&smu->mutex);
1963         ret = smu_init_display_count(smu, count);
1964         mutex_unlock(&smu->mutex);
1965
1966         return ret;
1967 }
1968
1969 int smu_force_clk_levels(struct smu_context *smu,
1970                          enum smu_clk_type clk_type,
1971                          uint32_t mask,
1972                          bool lock_needed)
1973 {
1974         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1975         int ret = 0;
1976
1977         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1978                 return -EOPNOTSUPP;
1979
1980         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1981                 pr_debug("force clock level is for dpm manual mode only.\n");
1982                 return -EINVAL;
1983         }
1984
1985         if (lock_needed)
1986                 mutex_lock(&smu->mutex);
1987
1988         if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
1989                 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
1990
1991         if (lock_needed)
1992                 mutex_unlock(&smu->mutex);
1993
1994         return ret;
1995 }
1996
1997 /*
1998  * On system suspending or resetting, the dpm_enabled
1999  * flag will be cleared. So that those SMU services which
2000  * are not supported will be gated.
2001  * However, the mp1 state setting should still be granted
2002  * even if the dpm_enabled cleared.
2003  */
2004 int smu_set_mp1_state(struct smu_context *smu,
2005                       enum pp_mp1_state mp1_state)
2006 {
2007         uint16_t msg;
2008         int ret;
2009
2010         if (!smu->pm_enabled)
2011                 return -EOPNOTSUPP;
2012
2013         mutex_lock(&smu->mutex);
2014
2015         switch (mp1_state) {
2016         case PP_MP1_STATE_SHUTDOWN:
2017                 msg = SMU_MSG_PrepareMp1ForShutdown;
2018                 break;
2019         case PP_MP1_STATE_UNLOAD:
2020                 msg = SMU_MSG_PrepareMp1ForUnload;
2021                 break;
2022         case PP_MP1_STATE_RESET:
2023                 msg = SMU_MSG_PrepareMp1ForReset;
2024                 break;
2025         case PP_MP1_STATE_NONE:
2026         default:
2027                 mutex_unlock(&smu->mutex);
2028                 return 0;
2029         }
2030
2031         /* some asics may not support those messages */
2032         if (smu_msg_get_index(smu, msg) < 0) {
2033                 mutex_unlock(&smu->mutex);
2034                 return 0;
2035         }
2036
2037         ret = smu_send_smc_msg(smu, msg, NULL);
2038         if (ret)
2039                 pr_err("[PrepareMp1] Failed!\n");
2040
2041         mutex_unlock(&smu->mutex);
2042
2043         return ret;
2044 }
2045
2046 int smu_set_df_cstate(struct smu_context *smu,
2047                       enum pp_df_cstate state)
2048 {
2049         int ret = 0;
2050
2051         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2052                 return -EOPNOTSUPP;
2053
2054         if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
2055                 return 0;
2056
2057         mutex_lock(&smu->mutex);
2058
2059         ret = smu->ppt_funcs->set_df_cstate(smu, state);
2060         if (ret)
2061                 pr_err("[SetDfCstate] failed!\n");
2062
2063         mutex_unlock(&smu->mutex);
2064
2065         return ret;
2066 }
2067
2068 int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
2069 {
2070         int ret = 0;
2071
2072         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2073                 return -EOPNOTSUPP;
2074
2075         if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
2076                 return 0;
2077
2078         mutex_lock(&smu->mutex);
2079
2080         ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
2081         if (ret)
2082                 pr_err("[AllowXgmiPowerDown] failed!\n");
2083
2084         mutex_unlock(&smu->mutex);
2085
2086         return ret;
2087 }
2088
2089 int smu_write_watermarks_table(struct smu_context *smu)
2090 {
2091         void *watermarks_table = smu->smu_table.watermarks_table;
2092
2093         if (!watermarks_table)
2094                 return -EINVAL;
2095
2096         return smu_update_table(smu,
2097                                 SMU_TABLE_WATERMARKS,
2098                                 0,
2099                                 watermarks_table,
2100                                 true);
2101 }
2102
2103 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
2104                 struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
2105 {
2106         void *table = smu->smu_table.watermarks_table;
2107
2108         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2109                 return -EOPNOTSUPP;
2110
2111         if (!table)
2112                 return -EINVAL;
2113
2114         mutex_lock(&smu->mutex);
2115
2116         if (!smu->disable_watermark &&
2117                         smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
2118                         smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
2119                 smu_set_watermarks_table(smu, table, clock_ranges);
2120
2121                 if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) {
2122                         smu->watermarks_bitmap |= WATERMARKS_EXIST;
2123                         smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
2124                 }
2125         }
2126
2127         mutex_unlock(&smu->mutex);
2128
2129         return 0;
2130 }
2131
2132 int smu_set_ac_dc(struct smu_context *smu)
2133 {
2134         int ret = 0;
2135
2136         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2137                 return -EOPNOTSUPP;
2138
2139         /* controlled by firmware */
2140         if (smu->dc_controlled_by_gpio)
2141                 return 0;
2142
2143         mutex_lock(&smu->mutex);
2144         if (smu->ppt_funcs->set_power_source) {
2145                 if (smu->adev->pm.ac_power)
2146                         ret = smu_set_power_source(smu, SMU_POWER_SOURCE_AC);
2147                 else
2148                         ret = smu_set_power_source(smu, SMU_POWER_SOURCE_DC);
2149                 if (ret)
2150                         pr_err("Failed to switch to %s mode!\n",
2151                                smu->adev->pm.ac_power ? "AC" : "DC");
2152         }
2153         mutex_unlock(&smu->mutex);
2154
2155         return ret;
2156 }
2157
2158 const struct amd_ip_funcs smu_ip_funcs = {
2159         .name = "smu",
2160         .early_init = smu_early_init,
2161         .late_init = smu_late_init,
2162         .sw_init = smu_sw_init,
2163         .sw_fini = smu_sw_fini,
2164         .hw_init = smu_hw_init,
2165         .hw_fini = smu_hw_fini,
2166         .suspend = smu_suspend,
2167         .resume = smu_resume,
2168         .is_idle = NULL,
2169         .check_soft_reset = NULL,
2170         .wait_for_idle = NULL,
2171         .soft_reset = NULL,
2172         .set_clockgating_state = smu_set_clockgating_state,
2173         .set_powergating_state = smu_set_powergating_state,
2174         .enable_umd_pstate = smu_enable_umd_pstate,
2175 };
2176
2177 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
2178 {
2179         .type = AMD_IP_BLOCK_TYPE_SMC,
2180         .major = 11,
2181         .minor = 0,
2182         .rev = 0,
2183         .funcs = &smu_ip_funcs,
2184 };
2185
2186 const struct amdgpu_ip_block_version smu_v12_0_ip_block =
2187 {
2188         .type = AMD_IP_BLOCK_TYPE_SMC,
2189         .major = 12,
2190         .minor = 0,
2191         .rev = 0,
2192         .funcs = &smu_ip_funcs,
2193 };
2194
2195 int smu_load_microcode(struct smu_context *smu)
2196 {
2197         int ret = 0;
2198
2199         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2200                 return -EOPNOTSUPP;
2201
2202         mutex_lock(&smu->mutex);
2203
2204         if (smu->ppt_funcs->load_microcode)
2205                 ret = smu->ppt_funcs->load_microcode(smu);
2206
2207         mutex_unlock(&smu->mutex);
2208
2209         return ret;
2210 }
2211
2212 int smu_check_fw_status(struct smu_context *smu)
2213 {
2214         int ret = 0;
2215
2216         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2217                 return -EOPNOTSUPP;
2218
2219         mutex_lock(&smu->mutex);
2220
2221         if (smu->ppt_funcs->check_fw_status)
2222                 ret = smu->ppt_funcs->check_fw_status(smu);
2223
2224         mutex_unlock(&smu->mutex);
2225
2226         return ret;
2227 }
2228
2229 int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2230 {
2231         int ret = 0;
2232
2233         mutex_lock(&smu->mutex);
2234
2235         if (smu->ppt_funcs->set_gfx_cgpg)
2236                 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2237
2238         mutex_unlock(&smu->mutex);
2239
2240         return ret;
2241 }
2242
2243 int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
2244 {
2245         int ret = 0;
2246
2247         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2248                 return -EOPNOTSUPP;
2249
2250         mutex_lock(&smu->mutex);
2251
2252         if (smu->ppt_funcs->set_fan_speed_rpm)
2253                 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2254
2255         mutex_unlock(&smu->mutex);
2256
2257         return ret;
2258 }
2259
2260 int smu_get_power_limit(struct smu_context *smu,
2261                         uint32_t *limit,
2262                         bool def,
2263                         bool lock_needed)
2264 {
2265         int ret = 0;
2266
2267         if (lock_needed) {
2268                 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2269                         return -EOPNOTSUPP;
2270
2271                 mutex_lock(&smu->mutex);
2272         }
2273
2274         if (smu->ppt_funcs->get_power_limit)
2275                 ret = smu->ppt_funcs->get_power_limit(smu, limit, def);
2276
2277         if (lock_needed)
2278                 mutex_unlock(&smu->mutex);
2279
2280         return ret;
2281 }
2282
2283 int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
2284 {
2285         int ret = 0;
2286
2287         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2288                 return -EOPNOTSUPP;
2289
2290         mutex_lock(&smu->mutex);
2291
2292         if (smu->ppt_funcs->set_power_limit)
2293                 ret = smu->ppt_funcs->set_power_limit(smu, limit);
2294
2295         mutex_unlock(&smu->mutex);
2296
2297         return ret;
2298 }
2299
2300 int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2301 {
2302         int ret = 0;
2303
2304         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2305                 return -EOPNOTSUPP;
2306
2307         mutex_lock(&smu->mutex);
2308
2309         if (smu->ppt_funcs->print_clk_levels)
2310                 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2311
2312         mutex_unlock(&smu->mutex);
2313
2314         return ret;
2315 }
2316
2317 int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
2318 {
2319         int ret = 0;
2320
2321         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2322                 return -EOPNOTSUPP;
2323
2324         mutex_lock(&smu->mutex);
2325
2326         if (smu->ppt_funcs->get_od_percentage)
2327                 ret = smu->ppt_funcs->get_od_percentage(smu, type);
2328
2329         mutex_unlock(&smu->mutex);
2330
2331         return ret;
2332 }
2333
2334 int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
2335 {
2336         int ret = 0;
2337
2338         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2339                 return -EOPNOTSUPP;
2340
2341         mutex_lock(&smu->mutex);
2342
2343         if (smu->ppt_funcs->set_od_percentage)
2344                 ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
2345
2346         mutex_unlock(&smu->mutex);
2347
2348         return ret;
2349 }
2350
2351 int smu_od_edit_dpm_table(struct smu_context *smu,
2352                           enum PP_OD_DPM_TABLE_COMMAND type,
2353                           long *input, uint32_t size)
2354 {
2355         int ret = 0;
2356
2357         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2358                 return -EOPNOTSUPP;
2359
2360         mutex_lock(&smu->mutex);
2361
2362         if (smu->ppt_funcs->od_edit_dpm_table)
2363                 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2364
2365         mutex_unlock(&smu->mutex);
2366
2367         return ret;
2368 }
2369
2370 int smu_read_sensor(struct smu_context *smu,
2371                     enum amd_pp_sensors sensor,
2372                     void *data, uint32_t *size)
2373 {
2374         int ret = 0;
2375
2376         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2377                 return -EOPNOTSUPP;
2378
2379         mutex_lock(&smu->mutex);
2380
2381         if (smu->ppt_funcs->read_sensor)
2382                 ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
2383
2384         mutex_unlock(&smu->mutex);
2385
2386         return ret;
2387 }
2388
2389 int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
2390 {
2391         int ret = 0;
2392
2393         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2394                 return -EOPNOTSUPP;
2395
2396         mutex_lock(&smu->mutex);
2397
2398         if (smu->ppt_funcs->get_power_profile_mode)
2399                 ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
2400
2401         mutex_unlock(&smu->mutex);
2402
2403         return ret;
2404 }
2405
2406 int smu_set_power_profile_mode(struct smu_context *smu,
2407                                long *param,
2408                                uint32_t param_size,
2409                                bool lock_needed)
2410 {
2411         int ret = 0;
2412
2413         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2414                 return -EOPNOTSUPP;
2415
2416         if (lock_needed)
2417                 mutex_lock(&smu->mutex);
2418
2419         if (smu->ppt_funcs->set_power_profile_mode)
2420                 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
2421
2422         if (lock_needed)
2423                 mutex_unlock(&smu->mutex);
2424
2425         return ret;
2426 }
2427
2428
2429 int smu_get_fan_control_mode(struct smu_context *smu)
2430 {
2431         int ret = 0;
2432
2433         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2434                 return -EOPNOTSUPP;
2435
2436         mutex_lock(&smu->mutex);
2437
2438         if (smu->ppt_funcs->get_fan_control_mode)
2439                 ret = smu->ppt_funcs->get_fan_control_mode(smu);
2440
2441         mutex_unlock(&smu->mutex);
2442
2443         return ret;
2444 }
2445
2446 int smu_set_fan_control_mode(struct smu_context *smu, int value)
2447 {
2448         int ret = 0;
2449
2450         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2451                 return -EOPNOTSUPP;
2452
2453         mutex_lock(&smu->mutex);
2454
2455         if (smu->ppt_funcs->set_fan_control_mode)
2456                 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
2457
2458         mutex_unlock(&smu->mutex);
2459
2460         return ret;
2461 }
2462
2463 int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
2464 {
2465         int ret = 0;
2466
2467         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2468                 return -EOPNOTSUPP;
2469
2470         mutex_lock(&smu->mutex);
2471
2472         if (smu->ppt_funcs->get_fan_speed_percent)
2473                 ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed);
2474
2475         mutex_unlock(&smu->mutex);
2476
2477         return ret;
2478 }
2479
2480 int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
2481 {
2482         int ret = 0;
2483
2484         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2485                 return -EOPNOTSUPP;
2486
2487         mutex_lock(&smu->mutex);
2488
2489         if (smu->ppt_funcs->set_fan_speed_percent)
2490                 ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
2491
2492         mutex_unlock(&smu->mutex);
2493
2494         return ret;
2495 }
2496
2497 int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
2498 {
2499         int ret = 0;
2500
2501         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2502                 return -EOPNOTSUPP;
2503
2504         mutex_lock(&smu->mutex);
2505
2506         if (smu->ppt_funcs->get_fan_speed_rpm)
2507                 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
2508
2509         mutex_unlock(&smu->mutex);
2510
2511         return ret;
2512 }
2513
2514 int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
2515 {
2516         int ret = 0;
2517
2518         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2519                 return -EOPNOTSUPP;
2520
2521         mutex_lock(&smu->mutex);
2522
2523         if (smu->ppt_funcs->set_deep_sleep_dcefclk)
2524                 ret = smu->ppt_funcs->set_deep_sleep_dcefclk(smu, clk);
2525
2526         mutex_unlock(&smu->mutex);
2527
2528         return ret;
2529 }
2530
2531 int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
2532 {
2533         int ret = 0;
2534
2535         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2536                 return -EOPNOTSUPP;
2537
2538         if (smu->ppt_funcs->set_active_display_count)
2539                 ret = smu->ppt_funcs->set_active_display_count(smu, count);
2540
2541         return ret;
2542 }
2543
2544 int smu_get_clock_by_type(struct smu_context *smu,
2545                           enum amd_pp_clock_type type,
2546                           struct amd_pp_clocks *clocks)
2547 {
2548         int ret = 0;
2549
2550         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2551                 return -EOPNOTSUPP;
2552
2553         mutex_lock(&smu->mutex);
2554
2555         if (smu->ppt_funcs->get_clock_by_type)
2556                 ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks);
2557
2558         mutex_unlock(&smu->mutex);
2559
2560         return ret;
2561 }
2562
2563 int smu_get_max_high_clocks(struct smu_context *smu,
2564                             struct amd_pp_simple_clock_info *clocks)
2565 {
2566         int ret = 0;
2567
2568         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2569                 return -EOPNOTSUPP;
2570
2571         mutex_lock(&smu->mutex);
2572
2573         if (smu->ppt_funcs->get_max_high_clocks)
2574                 ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks);
2575
2576         mutex_unlock(&smu->mutex);
2577
2578         return ret;
2579 }
2580
2581 int smu_get_clock_by_type_with_latency(struct smu_context *smu,
2582                                        enum smu_clk_type clk_type,
2583                                        struct pp_clock_levels_with_latency *clocks)
2584 {
2585         int ret = 0;
2586
2587         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2588                 return -EOPNOTSUPP;
2589
2590         mutex_lock(&smu->mutex);
2591
2592         if (smu->ppt_funcs->get_clock_by_type_with_latency)
2593                 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2594
2595         mutex_unlock(&smu->mutex);
2596
2597         return ret;
2598 }
2599
2600 int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
2601                                        enum amd_pp_clock_type type,
2602                                        struct pp_clock_levels_with_voltage *clocks)
2603 {
2604         int ret = 0;
2605
2606         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2607                 return -EOPNOTSUPP;
2608
2609         mutex_lock(&smu->mutex);
2610
2611         if (smu->ppt_funcs->get_clock_by_type_with_voltage)
2612                 ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
2613
2614         mutex_unlock(&smu->mutex);
2615
2616         return ret;
2617 }
2618
2619
2620 int smu_display_clock_voltage_request(struct smu_context *smu,
2621                                       struct pp_display_clock_request *clock_req)
2622 {
2623         int ret = 0;
2624
2625         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2626                 return -EOPNOTSUPP;
2627
2628         mutex_lock(&smu->mutex);
2629
2630         if (smu->ppt_funcs->display_clock_voltage_request)
2631                 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
2632
2633         mutex_unlock(&smu->mutex);
2634
2635         return ret;
2636 }
2637
2638
2639 int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
2640 {
2641         int ret = -EINVAL;
2642
2643         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2644                 return -EOPNOTSUPP;
2645
2646         mutex_lock(&smu->mutex);
2647
2648         if (smu->ppt_funcs->display_disable_memory_clock_switch)
2649                 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2650
2651         mutex_unlock(&smu->mutex);
2652
2653         return ret;
2654 }
2655
2656 int smu_notify_smu_enable_pwe(struct smu_context *smu)
2657 {
2658         int ret = 0;
2659
2660         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2661                 return -EOPNOTSUPP;
2662
2663         mutex_lock(&smu->mutex);
2664
2665         if (smu->ppt_funcs->notify_smu_enable_pwe)
2666                 ret = smu->ppt_funcs->notify_smu_enable_pwe(smu);
2667
2668         mutex_unlock(&smu->mutex);
2669
2670         return ret;
2671 }
2672
2673 int smu_set_xgmi_pstate(struct smu_context *smu,
2674                         uint32_t pstate)
2675 {
2676         int ret = 0;
2677
2678         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2679                 return -EOPNOTSUPP;
2680
2681         mutex_lock(&smu->mutex);
2682
2683         if (smu->ppt_funcs->set_xgmi_pstate)
2684                 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
2685
2686         mutex_unlock(&smu->mutex);
2687
2688         return ret;
2689 }
2690
2691 int smu_set_azalia_d3_pme(struct smu_context *smu)
2692 {
2693         int ret = 0;
2694
2695         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2696                 return -EOPNOTSUPP;
2697
2698         mutex_lock(&smu->mutex);
2699
2700         if (smu->ppt_funcs->set_azalia_d3_pme)
2701                 ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
2702
2703         mutex_unlock(&smu->mutex);
2704
2705         return ret;
2706 }
2707
2708 /*
2709  * On system suspending or resetting, the dpm_enabled
2710  * flag will be cleared. So that those SMU services which
2711  * are not supported will be gated.
2712  *
2713  * However, the baco/mode1 reset should still be granted
2714  * as they are still supported and necessary.
2715  */
2716 bool smu_baco_is_support(struct smu_context *smu)
2717 {
2718         bool ret = false;
2719
2720         if (!smu->pm_enabled)
2721                 return false;
2722
2723         mutex_lock(&smu->mutex);
2724
2725         if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
2726                 ret = smu->ppt_funcs->baco_is_support(smu);
2727
2728         mutex_unlock(&smu->mutex);
2729
2730         return ret;
2731 }
2732
2733 int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
2734 {
2735         if (smu->ppt_funcs->baco_get_state)
2736                 return -EINVAL;
2737
2738         mutex_lock(&smu->mutex);
2739         *state = smu->ppt_funcs->baco_get_state(smu);
2740         mutex_unlock(&smu->mutex);
2741
2742         return 0;
2743 }
2744
2745 int smu_baco_enter(struct smu_context *smu)
2746 {
2747         int ret = 0;
2748
2749         if (!smu->pm_enabled)
2750                 return -EOPNOTSUPP;
2751
2752         mutex_lock(&smu->mutex);
2753
2754         if (smu->ppt_funcs->baco_enter)
2755                 ret = smu->ppt_funcs->baco_enter(smu);
2756
2757         mutex_unlock(&smu->mutex);
2758
2759         return ret;
2760 }
2761
2762 int smu_baco_exit(struct smu_context *smu)
2763 {
2764         int ret = 0;
2765
2766         if (!smu->pm_enabled)
2767                 return -EOPNOTSUPP;
2768
2769         mutex_lock(&smu->mutex);
2770
2771         if (smu->ppt_funcs->baco_exit)
2772                 ret = smu->ppt_funcs->baco_exit(smu);
2773
2774         mutex_unlock(&smu->mutex);
2775
2776         return ret;
2777 }
2778
2779 int smu_mode2_reset(struct smu_context *smu)
2780 {
2781         int ret = 0;
2782
2783         if (!smu->pm_enabled)
2784                 return -EOPNOTSUPP;
2785
2786         mutex_lock(&smu->mutex);
2787
2788         if (smu->ppt_funcs->mode2_reset)
2789                 ret = smu->ppt_funcs->mode2_reset(smu);
2790
2791         mutex_unlock(&smu->mutex);
2792
2793         return ret;
2794 }
2795
2796 int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
2797                                          struct pp_smu_nv_clock_table *max_clocks)
2798 {
2799         int ret = 0;
2800
2801         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2802                 return -EOPNOTSUPP;
2803
2804         mutex_lock(&smu->mutex);
2805
2806         if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
2807                 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
2808
2809         mutex_unlock(&smu->mutex);
2810
2811         return ret;
2812 }
2813
2814 int smu_get_uclk_dpm_states(struct smu_context *smu,
2815                             unsigned int *clock_values_in_khz,
2816                             unsigned int *num_states)
2817 {
2818         int ret = 0;
2819
2820         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2821                 return -EOPNOTSUPP;
2822
2823         mutex_lock(&smu->mutex);
2824
2825         if (smu->ppt_funcs->get_uclk_dpm_states)
2826                 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
2827
2828         mutex_unlock(&smu->mutex);
2829
2830         return ret;
2831 }
2832
2833 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
2834 {
2835         enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
2836
2837         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2838                 return -EOPNOTSUPP;
2839
2840         mutex_lock(&smu->mutex);
2841
2842         if (smu->ppt_funcs->get_current_power_state)
2843                 pm_state = smu->ppt_funcs->get_current_power_state(smu);
2844
2845         mutex_unlock(&smu->mutex);
2846
2847         return pm_state;
2848 }
2849
2850 int smu_get_dpm_clock_table(struct smu_context *smu,
2851                             struct dpm_clocks *clock_table)
2852 {
2853         int ret = 0;
2854
2855         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2856                 return -EOPNOTSUPP;
2857
2858         mutex_lock(&smu->mutex);
2859
2860         if (smu->ppt_funcs->get_dpm_clock_table)
2861                 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
2862
2863         mutex_unlock(&smu->mutex);
2864
2865         return ret;
2866 }
2867
2868 uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
2869 {
2870         uint32_t ret = 0;
2871
2872         if (smu->ppt_funcs->get_pptable_power_limit)
2873                 ret = smu->ppt_funcs->get_pptable_power_limit(smu);
2874
2875         return ret;
2876 }