drm/amd/powerplay: clean up the APIs for pptable setup
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / powerplay / amdgpu_smu.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include <linux/firmware.h>
24 #include <linux/pci.h>
25
26 #include "amdgpu.h"
27 #include "amdgpu_smu.h"
28 #include "smu_internal.h"
29 #include "smu_v11_0.h"
30 #include "smu_v12_0.h"
31 #include "atom.h"
32 #include "arcturus_ppt.h"
33 #include "navi10_ppt.h"
34 #include "sienna_cichlid_ppt.h"
35 #include "renoir_ppt.h"
36
37 #undef __SMU_DUMMY_MAP
38 #define __SMU_DUMMY_MAP(type)   #type
39 static const char* __smu_message_names[] = {
40         SMU_MESSAGE_TYPES
41 };
42
43 const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
44 {
45         if (type < 0 || type >= SMU_MSG_MAX_COUNT)
46                 return "unknown smu message";
47         return __smu_message_names[type];
48 }
49
50 #undef __SMU_DUMMY_MAP
51 #define __SMU_DUMMY_MAP(fea)    #fea
52 static const char* __smu_feature_names[] = {
53         SMU_FEATURE_MASKS
54 };
55
56 const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature)
57 {
58         if (feature < 0 || feature >= SMU_FEATURE_COUNT)
59                 return "unknown smu feature";
60         return __smu_feature_names[feature];
61 }
62
63 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
64 {
65         size_t size = 0;
66         int ret = 0, i = 0;
67         uint32_t feature_mask[2] = { 0 };
68         int32_t feature_index = 0;
69         uint32_t count = 0;
70         uint32_t sort_feature[SMU_FEATURE_COUNT];
71         uint64_t hw_feature_count = 0;
72
73         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
74                 return -EOPNOTSUPP;
75
76         mutex_lock(&smu->mutex);
77
78         ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
79         if (ret)
80                 goto failed;
81
82         size =  sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
83                         feature_mask[1], feature_mask[0]);
84
85         for (i = 0; i < SMU_FEATURE_COUNT; i++) {
86                 feature_index = smu_feature_get_index(smu, i);
87                 if (feature_index < 0)
88                         continue;
89                 sort_feature[feature_index] = i;
90                 hw_feature_count++;
91         }
92
93         for (i = 0; i < hw_feature_count; i++) {
94                 size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
95                                count++,
96                                smu_get_feature_name(smu, sort_feature[i]),
97                                i,
98                                !!smu_feature_is_enabled(smu, sort_feature[i]) ?
99                                "enabled" : "disabled");
100         }
101
102 failed:
103         mutex_unlock(&smu->mutex);
104
105         return size;
106 }
107
108 static int smu_feature_update_enable_state(struct smu_context *smu,
109                                            uint64_t feature_mask,
110                                            bool enabled)
111 {
112         struct smu_feature *feature = &smu->smu_feature;
113         uint32_t feature_low = 0, feature_high = 0;
114         int ret = 0;
115
116         feature_low = (feature_mask >> 0 ) & 0xffffffff;
117         feature_high = (feature_mask >> 32) & 0xffffffff;
118
119         if (enabled) {
120                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
121                                                   feature_low, NULL);
122                 if (ret)
123                         return ret;
124                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
125                                                   feature_high, NULL);
126                 if (ret)
127                         return ret;
128         } else {
129                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
130                                                   feature_low, NULL);
131                 if (ret)
132                         return ret;
133                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
134                                                   feature_high, NULL);
135                 if (ret)
136                         return ret;
137         }
138
139         mutex_lock(&feature->mutex);
140         if (enabled)
141                 bitmap_or(feature->enabled, feature->enabled,
142                                 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
143         else
144                 bitmap_andnot(feature->enabled, feature->enabled,
145                                 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
146         mutex_unlock(&feature->mutex);
147
148         return ret;
149 }
150
151 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
152 {
153         int ret = 0;
154         uint32_t feature_mask[2] = { 0 };
155         uint64_t feature_2_enabled = 0;
156         uint64_t feature_2_disabled = 0;
157         uint64_t feature_enables = 0;
158
159         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
160                 return -EOPNOTSUPP;
161
162         mutex_lock(&smu->mutex);
163
164         ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
165         if (ret)
166                 goto out;
167
168         feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
169
170         feature_2_enabled  = ~feature_enables & new_mask;
171         feature_2_disabled = feature_enables & ~new_mask;
172
173         if (feature_2_enabled) {
174                 ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
175                 if (ret)
176                         goto out;
177         }
178         if (feature_2_disabled) {
179                 ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
180                 if (ret)
181                         goto out;
182         }
183
184 out:
185         mutex_unlock(&smu->mutex);
186
187         return ret;
188 }
189
190 int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
191 {
192         int ret = 0;
193
194         if (!if_version && !smu_version)
195                 return -EINVAL;
196
197         if (smu->smc_fw_if_version && smu->smc_fw_version)
198         {
199                 if (if_version)
200                         *if_version = smu->smc_fw_if_version;
201
202                 if (smu_version)
203                         *smu_version = smu->smc_fw_version;
204
205                 return 0;
206         }
207
208         if (if_version) {
209                 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
210                 if (ret)
211                         return ret;
212
213                 smu->smc_fw_if_version = *if_version;
214         }
215
216         if (smu_version) {
217                 ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
218                 if (ret)
219                         return ret;
220
221                 smu->smc_fw_version = *smu_version;
222         }
223
224         return ret;
225 }
226
227 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
228                             uint32_t min, uint32_t max, bool lock_needed)
229 {
230         int ret = 0;
231
232         if (!smu_clk_dpm_is_enabled(smu, clk_type))
233                 return 0;
234
235         if (lock_needed)
236                 mutex_lock(&smu->mutex);
237         ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
238         if (lock_needed)
239                 mutex_unlock(&smu->mutex);
240
241         return ret;
242 }
243
244 int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
245                             uint32_t min, uint32_t max)
246 {
247         int ret = 0, clk_id = 0;
248         uint32_t param;
249
250         if (min <= 0 && max <= 0)
251                 return -EINVAL;
252
253         if (!smu_clk_dpm_is_enabled(smu, clk_type))
254                 return 0;
255
256         clk_id = smu_clk_get_index(smu, clk_type);
257         if (clk_id < 0)
258                 return clk_id;
259
260         if (max > 0) {
261                 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
262                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
263                                                   param, NULL);
264                 if (ret)
265                         return ret;
266         }
267
268         if (min > 0) {
269                 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
270                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
271                                                   param, NULL);
272                 if (ret)
273                         return ret;
274         }
275
276
277         return ret;
278 }
279
280 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
281                            uint32_t *min, uint32_t *max, bool lock_needed)
282 {
283         uint32_t clock_limit;
284         int ret = 0;
285
286         if (!min && !max)
287                 return -EINVAL;
288
289         if (lock_needed)
290                 mutex_lock(&smu->mutex);
291
292         if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
293                 switch (clk_type) {
294                 case SMU_MCLK:
295                 case SMU_UCLK:
296                         clock_limit = smu->smu_table.boot_values.uclk;
297                         break;
298                 case SMU_GFXCLK:
299                 case SMU_SCLK:
300                         clock_limit = smu->smu_table.boot_values.gfxclk;
301                         break;
302                 case SMU_SOCCLK:
303                         clock_limit = smu->smu_table.boot_values.socclk;
304                         break;
305                 default:
306                         clock_limit = 0;
307                         break;
308                 }
309
310                 /* clock in Mhz unit */
311                 if (min)
312                         *min = clock_limit / 100;
313                 if (max)
314                         *max = clock_limit / 100;
315         } else {
316                 /*
317                  * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
318                  * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
319                  */
320                 ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
321         }
322
323         if (lock_needed)
324                 mutex_unlock(&smu->mutex);
325
326         return ret;
327 }
328
329 int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
330                               uint16_t level, uint32_t *value)
331 {
332         int ret = 0, clk_id = 0;
333         uint32_t param;
334
335         if (!value)
336                 return -EINVAL;
337
338         if (!smu_clk_dpm_is_enabled(smu, clk_type))
339                 return 0;
340
341         clk_id = smu_clk_get_index(smu, clk_type);
342         if (clk_id < 0)
343                 return clk_id;
344
345         param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
346
347         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex,
348                                           param, value);
349         if (ret)
350                 return ret;
351
352         /* BIT31:  0 - Fine grained DPM, 1 - Dicrete DPM
353          * now, we un-support it */
354         *value = *value & 0x7fffffff;
355
356         return ret;
357 }
358
359 int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
360                             uint32_t *value)
361 {
362         return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
363 }
364
365 int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
366                             uint32_t *min_value, uint32_t *max_value)
367 {
368         int ret = 0;
369         uint32_t level_count = 0;
370
371         if (!min_value && !max_value)
372                 return -EINVAL;
373
374         if (min_value) {
375                 /* by default, level 0 clock value as min value */
376                 ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, min_value);
377                 if (ret)
378                         return ret;
379         }
380
381         if (max_value) {
382                 ret = smu_get_dpm_level_count(smu, clk_type, &level_count);
383                 if (ret)
384                         return ret;
385
386                 ret = smu_get_dpm_freq_by_index(smu, clk_type, level_count - 1, max_value);
387                 if (ret)
388                         return ret;
389         }
390
391         return ret;
392 }
393
394 bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
395 {
396         enum smu_feature_mask feature_id = 0;
397
398         switch (clk_type) {
399         case SMU_MCLK:
400         case SMU_UCLK:
401                 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
402                 break;
403         case SMU_GFXCLK:
404         case SMU_SCLK:
405                 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
406                 break;
407         case SMU_SOCCLK:
408                 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
409                 break;
410         default:
411                 return true;
412         }
413
414         if(!smu_feature_is_enabled(smu, feature_id)) {
415                 return false;
416         }
417
418         return true;
419 }
420
421 /**
422  * smu_dpm_set_power_gate - power gate/ungate the specific IP block
423  *
424  * @smu:        smu_context pointer
425  * @block_type: the IP block to power gate/ungate
426  * @gate:       to power gate if true, ungate otherwise
427  *
428  * This API uses no smu->mutex lock protection due to:
429  * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
430  *    This is guarded to be race condition free by the caller.
431  * 2. Or get called on user setting request of power_dpm_force_performance_level.
432  *    Under this case, the smu->mutex lock protection is already enforced on
433  *    the parent API smu_force_performance_level of the call path.
434  */
435 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
436                            bool gate)
437 {
438         int ret = 0;
439
440         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
441                 return -EOPNOTSUPP;
442
443         switch (block_type) {
444         case AMD_IP_BLOCK_TYPE_UVD:
445                 ret = smu_dpm_set_uvd_enable(smu, !gate);
446                 break;
447         case AMD_IP_BLOCK_TYPE_VCE:
448                 ret = smu_dpm_set_vce_enable(smu, !gate);
449                 break;
450         case AMD_IP_BLOCK_TYPE_GFX:
451                 ret = smu_gfx_off_control(smu, gate);
452                 break;
453         case AMD_IP_BLOCK_TYPE_SDMA:
454                 ret = smu_powergate_sdma(smu, gate);
455                 break;
456         case AMD_IP_BLOCK_TYPE_JPEG:
457                 ret = smu_dpm_set_jpeg_enable(smu, !gate);
458                 break;
459         default:
460                 break;
461         }
462
463         return ret;
464 }
465
466 int smu_get_power_num_states(struct smu_context *smu,
467                              struct pp_states_info *state_info)
468 {
469         if (!state_info)
470                 return -EINVAL;
471
472         /* not support power state */
473         memset(state_info, 0, sizeof(struct pp_states_info));
474         state_info->nums = 1;
475         state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
476
477         return 0;
478 }
479
480 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
481                            void *data, uint32_t *size)
482 {
483         struct smu_power_context *smu_power = &smu->smu_power;
484         struct smu_power_gate *power_gate = &smu_power->power_gate;
485         int ret = 0;
486
487         if(!data || !size)
488                 return -EINVAL;
489
490         switch (sensor) {
491         case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
492                 *((uint32_t *)data) = smu->pstate_sclk;
493                 *size = 4;
494                 break;
495         case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
496                 *((uint32_t *)data) = smu->pstate_mclk;
497                 *size = 4;
498                 break;
499         case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
500                 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
501                 *size = 8;
502                 break;
503         case AMDGPU_PP_SENSOR_UVD_POWER:
504                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
505                 *size = 4;
506                 break;
507         case AMDGPU_PP_SENSOR_VCE_POWER:
508                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
509                 *size = 4;
510                 break;
511         case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
512                 *(uint32_t *)data = power_gate->vcn_gated ? 0 : 1;
513                 *size = 4;
514                 break;
515         default:
516                 ret = -EINVAL;
517                 break;
518         }
519
520         if (ret)
521                 *size = 0;
522
523         return ret;
524 }
525
526 int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
527                      void *table_data, bool drv2smu)
528 {
529         struct smu_table_context *smu_table = &smu->smu_table;
530         struct amdgpu_device *adev = smu->adev;
531         struct smu_table *table = &smu_table->driver_table;
532         int table_id = smu_table_get_index(smu, table_index);
533         uint32_t table_size;
534         int ret = 0;
535         if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
536                 return -EINVAL;
537
538         table_size = smu_table->tables[table_index].size;
539
540         if (drv2smu) {
541                 memcpy(table->cpu_addr, table_data, table_size);
542                 /*
543                  * Flush hdp cache: to guard the content seen by
544                  * GPU is consitent with CPU.
545                  */
546                 amdgpu_asic_flush_hdp(adev, NULL);
547         }
548
549         ret = smu_send_smc_msg_with_param(smu, drv2smu ?
550                                           SMU_MSG_TransferTableDram2Smu :
551                                           SMU_MSG_TransferTableSmu2Dram,
552                                           table_id | ((argument & 0xFFFF) << 16),
553                                           NULL);
554         if (ret)
555                 return ret;
556
557         if (!drv2smu) {
558                 amdgpu_asic_flush_hdp(adev, NULL);
559                 memcpy(table_data, table->cpu_addr, table_size);
560         }
561
562         return ret;
563 }
564
565 bool is_support_sw_smu(struct amdgpu_device *adev)
566 {
567         if (adev->asic_type >= CHIP_ARCTURUS)
568                 return true;
569
570         return false;
571 }
572
573 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
574 {
575         struct smu_table_context *smu_table = &smu->smu_table;
576         uint32_t powerplay_table_size;
577
578         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
579                 return -EOPNOTSUPP;
580
581         if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
582                 return -EINVAL;
583
584         mutex_lock(&smu->mutex);
585
586         if (smu_table->hardcode_pptable)
587                 *table = smu_table->hardcode_pptable;
588         else
589                 *table = smu_table->power_play_table;
590
591         powerplay_table_size = smu_table->power_play_table_size;
592
593         mutex_unlock(&smu->mutex);
594
595         return powerplay_table_size;
596 }
597
598 int smu_sys_set_pp_table(struct smu_context *smu,  void *buf, size_t size)
599 {
600         struct smu_table_context *smu_table = &smu->smu_table;
601         ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
602         int ret = 0;
603
604         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
605                 return -EOPNOTSUPP;
606
607         if (header->usStructureSize != size) {
608                 pr_err("pp table size not matched !\n");
609                 return -EIO;
610         }
611
612         mutex_lock(&smu->mutex);
613         if (!smu_table->hardcode_pptable)
614                 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
615         if (!smu_table->hardcode_pptable) {
616                 ret = -ENOMEM;
617                 goto failed;
618         }
619
620         memcpy(smu_table->hardcode_pptable, buf, size);
621         smu_table->power_play_table = smu_table->hardcode_pptable;
622         smu_table->power_play_table_size = size;
623
624         /*
625          * Special hw_fini action(for Navi1x, the DPMs disablement will be
626          * skipped) may be needed for custom pptable uploading.
627          */
628         smu->uploading_custom_pp_table = true;
629
630         ret = smu_reset(smu);
631         if (ret)
632                 pr_info("smu reset failed, ret = %d\n", ret);
633
634         smu->uploading_custom_pp_table = false;
635
636 failed:
637         mutex_unlock(&smu->mutex);
638         return ret;
639 }
640
641 int smu_feature_init_dpm(struct smu_context *smu)
642 {
643         struct smu_feature *feature = &smu->smu_feature;
644         int ret = 0;
645         uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
646
647         mutex_lock(&feature->mutex);
648         bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
649         mutex_unlock(&feature->mutex);
650
651         ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
652                                              SMU_FEATURE_MAX/32);
653         if (ret)
654                 return ret;
655
656         mutex_lock(&feature->mutex);
657         bitmap_or(feature->allowed, feature->allowed,
658                       (unsigned long *)allowed_feature_mask,
659                       feature->feature_num);
660         mutex_unlock(&feature->mutex);
661
662         return ret;
663 }
664
665
666 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
667 {
668         struct smu_feature *feature = &smu->smu_feature;
669         int feature_id;
670         int ret = 0;
671
672         if (smu->is_apu)
673                 return 1;
674         feature_id = smu_feature_get_index(smu, mask);
675         if (feature_id < 0)
676                 return 0;
677
678         WARN_ON(feature_id > feature->feature_num);
679
680         mutex_lock(&feature->mutex);
681         ret = test_bit(feature_id, feature->enabled);
682         mutex_unlock(&feature->mutex);
683
684         return ret;
685 }
686
687 int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
688                             bool enable)
689 {
690         struct smu_feature *feature = &smu->smu_feature;
691         int feature_id;
692
693         feature_id = smu_feature_get_index(smu, mask);
694         if (feature_id < 0)
695                 return -EINVAL;
696
697         WARN_ON(feature_id > feature->feature_num);
698
699         return smu_feature_update_enable_state(smu,
700                                                1ULL << feature_id,
701                                                enable);
702 }
703
704 int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
705 {
706         struct smu_feature *feature = &smu->smu_feature;
707         int feature_id;
708         int ret = 0;
709
710         feature_id = smu_feature_get_index(smu, mask);
711         if (feature_id < 0)
712                 return 0;
713
714         WARN_ON(feature_id > feature->feature_num);
715
716         mutex_lock(&feature->mutex);
717         ret = test_bit(feature_id, feature->supported);
718         mutex_unlock(&feature->mutex);
719
720         return ret;
721 }
722
723 static int smu_set_funcs(struct amdgpu_device *adev)
724 {
725         struct smu_context *smu = &adev->smu;
726
727         if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
728                 smu->od_enabled = true;
729
730         switch (adev->asic_type) {
731         case CHIP_NAVI10:
732         case CHIP_NAVI14:
733         case CHIP_NAVI12:
734                 navi10_set_ppt_funcs(smu);
735                 break;
736         case CHIP_ARCTURUS:
737                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
738                 arcturus_set_ppt_funcs(smu);
739                 /* OD is not supported on Arcturus */
740                 smu->od_enabled =false;
741                 break;
742         case CHIP_SIENNA_CICHLID:
743                 sienna_cichlid_set_ppt_funcs(smu);
744                 break;
745         case CHIP_RENOIR:
746                 renoir_set_ppt_funcs(smu);
747                 break;
748         default:
749                 return -EINVAL;
750         }
751
752         return 0;
753 }
754
755 static int smu_early_init(void *handle)
756 {
757         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
758         struct smu_context *smu = &adev->smu;
759
760         smu->adev = adev;
761         smu->pm_enabled = !!amdgpu_dpm;
762         smu->is_apu = false;
763         mutex_init(&smu->mutex);
764
765         return smu_set_funcs(adev);
766 }
767
768 static int smu_late_init(void *handle)
769 {
770         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
771         struct smu_context *smu = &adev->smu;
772
773         if (!smu->pm_enabled)
774                 return 0;
775
776         smu_get_unique_id(smu);
777
778         smu_handle_task(&adev->smu,
779                         smu->smu_dpm.dpm_level,
780                         AMD_PP_TASK_COMPLETE_INIT,
781                         false);
782
783         return 0;
784 }
785
786 int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
787                             uint16_t *size, uint8_t *frev, uint8_t *crev,
788                             uint8_t **addr)
789 {
790         struct amdgpu_device *adev = smu->adev;
791         uint16_t data_start;
792
793         if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
794                                            size, frev, crev, &data_start))
795                 return -EINVAL;
796
797         *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
798
799         return 0;
800 }
801
802 static int smu_init_fb_allocations(struct smu_context *smu)
803 {
804         struct amdgpu_device *adev = smu->adev;
805         struct smu_table_context *smu_table = &smu->smu_table;
806         struct smu_table *tables = smu_table->tables;
807         struct smu_table *driver_table = &(smu_table->driver_table);
808         uint32_t max_table_size = 0;
809         int ret, i;
810
811         /* VRAM allocation for tool table */
812         if (tables[SMU_TABLE_PMSTATUSLOG].size) {
813                 ret = amdgpu_bo_create_kernel(adev,
814                                               tables[SMU_TABLE_PMSTATUSLOG].size,
815                                               tables[SMU_TABLE_PMSTATUSLOG].align,
816                                               tables[SMU_TABLE_PMSTATUSLOG].domain,
817                                               &tables[SMU_TABLE_PMSTATUSLOG].bo,
818                                               &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
819                                               &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
820                 if (ret) {
821                         pr_err("VRAM allocation for tool table failed!\n");
822                         return ret;
823                 }
824         }
825
826         /* VRAM allocation for driver table */
827         for (i = 0; i < SMU_TABLE_COUNT; i++) {
828                 if (tables[i].size == 0)
829                         continue;
830
831                 if (i == SMU_TABLE_PMSTATUSLOG)
832                         continue;
833
834                 if (max_table_size < tables[i].size)
835                         max_table_size = tables[i].size;
836         }
837
838         driver_table->size = max_table_size;
839         driver_table->align = PAGE_SIZE;
840         driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
841
842         ret = amdgpu_bo_create_kernel(adev,
843                                       driver_table->size,
844                                       driver_table->align,
845                                       driver_table->domain,
846                                       &driver_table->bo,
847                                       &driver_table->mc_address,
848                                       &driver_table->cpu_addr);
849         if (ret) {
850                 pr_err("VRAM allocation for driver table failed!\n");
851                 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
852                         amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
853                                               &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
854                                               &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
855         }
856
857         return ret;
858 }
859
860 static int smu_fini_fb_allocations(struct smu_context *smu)
861 {
862         struct smu_table_context *smu_table = &smu->smu_table;
863         struct smu_table *tables = smu_table->tables;
864         struct smu_table *driver_table = &(smu_table->driver_table);
865
866         if (!tables)
867                 return 0;
868
869         if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
870                 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
871                                       &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
872                                       &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
873
874         amdgpu_bo_free_kernel(&driver_table->bo,
875                               &driver_table->mc_address,
876                               &driver_table->cpu_addr);
877
878         return 0;
879 }
880
881 /**
882  * smu_alloc_memory_pool - allocate memory pool in the system memory
883  *
884  * @smu: amdgpu_device pointer
885  *
886  * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
887  * and DramLogSetDramAddr can notify it changed.
888  *
889  * Returns 0 on success, error on failure.
890  */
891 static int smu_alloc_memory_pool(struct smu_context *smu)
892 {
893         struct amdgpu_device *adev = smu->adev;
894         struct smu_table_context *smu_table = &smu->smu_table;
895         struct smu_table *memory_pool = &smu_table->memory_pool;
896         uint64_t pool_size = smu->pool_size;
897         int ret = 0;
898
899         if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
900                 return ret;
901
902         memory_pool->size = pool_size;
903         memory_pool->align = PAGE_SIZE;
904         memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
905
906         switch (pool_size) {
907         case SMU_MEMORY_POOL_SIZE_256_MB:
908         case SMU_MEMORY_POOL_SIZE_512_MB:
909         case SMU_MEMORY_POOL_SIZE_1_GB:
910         case SMU_MEMORY_POOL_SIZE_2_GB:
911                 ret = amdgpu_bo_create_kernel(adev,
912                                               memory_pool->size,
913                                               memory_pool->align,
914                                               memory_pool->domain,
915                                               &memory_pool->bo,
916                                               &memory_pool->mc_address,
917                                               &memory_pool->cpu_addr);
918                 break;
919         default:
920                 break;
921         }
922
923         return ret;
924 }
925
926 static int smu_free_memory_pool(struct smu_context *smu)
927 {
928         struct smu_table_context *smu_table = &smu->smu_table;
929         struct smu_table *memory_pool = &smu_table->memory_pool;
930
931         if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
932                 return 0;
933
934         amdgpu_bo_free_kernel(&memory_pool->bo,
935                               &memory_pool->mc_address,
936                               &memory_pool->cpu_addr);
937
938         memset(memory_pool, 0, sizeof(struct smu_table));
939
940         return 0;
941 }
942
943 static int smu_smc_table_sw_init(struct smu_context *smu)
944 {
945         int ret;
946
947         /**
948          * Create smu_table structure, and init smc tables such as
949          * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
950          */
951         ret = smu_init_smc_tables(smu);
952         if (ret) {
953                 pr_err("Failed to init smc tables!\n");
954                 return ret;
955         }
956
957         /**
958          * Create smu_power_context structure, and allocate smu_dpm_context and
959          * context size to fill the smu_power_context data.
960          */
961         ret = smu_init_power(smu);
962         if (ret) {
963                 pr_err("Failed to init smu_init_power!\n");
964                 return ret;
965         }
966
967         /*
968          * allocate vram bos to store smc table contents.
969          */
970         ret = smu_init_fb_allocations(smu);
971         if (ret)
972                 return ret;
973
974         ret = smu_alloc_memory_pool(smu);
975         if (ret)
976                 return ret;
977
978         return 0;
979 }
980
981 static int smu_smc_table_sw_fini(struct smu_context *smu)
982 {
983         int ret;
984
985         ret = smu_free_memory_pool(smu);
986         if (ret)
987                 return ret;
988
989         ret = smu_fini_fb_allocations(smu);
990         if (ret)
991                 return ret;
992
993         ret = smu_fini_power(smu);
994         if (ret) {
995                 pr_err("Failed to init smu_fini_power!\n");
996                 return ret;
997         }
998
999         ret = smu_fini_smc_tables(smu);
1000         if (ret) {
1001                 pr_err("Failed to smu_fini_smc_tables!\n");
1002                 return ret;
1003         }
1004
1005         return 0;
1006 }
1007
1008 static int smu_sw_init(void *handle)
1009 {
1010         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1011         struct smu_context *smu = &adev->smu;
1012         int ret;
1013
1014         smu->pool_size = adev->pm.smu_prv_buffer_size;
1015         smu->smu_feature.feature_num = SMU_FEATURE_MAX;
1016         mutex_init(&smu->smu_feature.mutex);
1017         bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
1018         bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
1019         bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
1020
1021         mutex_init(&smu->smu_baco.mutex);
1022         smu->smu_baco.state = SMU_BACO_STATE_EXIT;
1023         smu->smu_baco.platform_support = false;
1024
1025         mutex_init(&smu->sensor_lock);
1026         mutex_init(&smu->metrics_lock);
1027         mutex_init(&smu->message_lock);
1028
1029         smu->watermarks_bitmap = 0;
1030         smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1031         smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1032
1033         smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
1034         smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
1035         smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
1036         smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
1037         smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
1038         smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
1039         smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
1040         smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
1041
1042         smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1043         smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1044         smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
1045         smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
1046         smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
1047         smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
1048         smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
1049         smu->display_config = &adev->pm.pm_display_cfg;
1050
1051         smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1052         smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1053         ret = smu_init_microcode(smu);
1054         if (ret) {
1055                 pr_err("Failed to load smu firmware!\n");
1056                 return ret;
1057         }
1058
1059         ret = smu_smc_table_sw_init(smu);
1060         if (ret) {
1061                 pr_err("Failed to sw init smc table!\n");
1062                 return ret;
1063         }
1064
1065         ret = smu_register_irq_handler(smu);
1066         if (ret) {
1067                 pr_err("Failed to register smc irq handler!\n");
1068                 return ret;
1069         }
1070
1071         return 0;
1072 }
1073
1074 static int smu_sw_fini(void *handle)
1075 {
1076         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1077         struct smu_context *smu = &adev->smu;
1078         int ret;
1079
1080         kfree(smu->irq_source);
1081         smu->irq_source = NULL;
1082
1083         ret = smu_smc_table_sw_fini(smu);
1084         if (ret) {
1085                 pr_err("Failed to sw fini smc table!\n");
1086                 return ret;
1087         }
1088
1089         return 0;
1090 }
1091
1092 static int smu_smc_table_hw_init(struct smu_context *smu,
1093                                  bool initialize)
1094 {
1095         struct amdgpu_device *adev = smu->adev;
1096         int ret;
1097
1098         if (smu_is_dpm_running(smu) && adev->in_suspend) {
1099                 pr_info("dpm has been enabled\n");
1100                 return 0;
1101         }
1102
1103         ret = smu_init_display_count(smu, 0);
1104         if (ret)
1105                 return ret;
1106
1107         if (initialize) {
1108                 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1109                 ret = smu_get_vbios_bootup_values(smu);
1110                 if (ret)
1111                         return ret;
1112
1113                 ret = smu_setup_pptable(smu);
1114                 if (ret)
1115                         return ret;
1116
1117                 /*
1118                  * Send msg GetDriverIfVersion to check if the return value is equal
1119                  * with DRIVER_IF_VERSION of smc header.
1120                  */
1121                 ret = smu_check_fw_version(smu);
1122                 if (ret)
1123                         return ret;
1124         }
1125
1126         ret = smu_set_driver_table_location(smu);
1127         if (ret)
1128                 return ret;
1129
1130         /* smu_dump_pptable(smu); */
1131         /*
1132          * Copy pptable bo in the vram to smc with SMU MSGs such as
1133          * SetDriverDramAddr and TransferTableDram2Smu.
1134          */
1135         ret = smu_write_pptable(smu);
1136         if (ret)
1137                 return ret;
1138
1139         /* issue Run*Btc msg */
1140         ret = smu_run_btc(smu);
1141         if (ret)
1142                 return ret;
1143         ret = smu_feature_set_allowed_mask(smu);
1144         if (ret)
1145                 return ret;
1146
1147         ret = smu_system_features_control(smu, true);
1148         if (ret)
1149                 return ret;
1150
1151         ret = smu_disable_umc_cdr_12gbps_workaround(smu);
1152         if (ret) {
1153                 pr_err("Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n");
1154                 return ret;
1155         }
1156
1157         /*
1158          * For Navi1X, manually switch it to AC mode as PMFW
1159          * may boot it with DC mode.
1160          */
1161         ret = smu_set_power_source(smu,
1162                                    adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
1163                                    SMU_POWER_SOURCE_DC);
1164         if (ret) {
1165                 pr_err("Failed to switch to %s mode!\n", adev->pm.ac_power ? "AC" : "DC");
1166                 return ret;
1167         }
1168
1169         ret = smu_notify_display_change(smu);
1170         if (ret)
1171                 return ret;
1172
1173         /*
1174          * Set min deep sleep dce fclk with bootup value from vbios via
1175          * SetMinDeepSleepDcefclk MSG.
1176          */
1177         ret = smu_set_min_dcef_deep_sleep(smu);
1178         if (ret)
1179                 return ret;
1180
1181         /*
1182          * Set initialized values (get from vbios) to dpm tables context such as
1183          * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1184          * type of clks.
1185          */
1186         if (initialize) {
1187                 ret = smu_populate_smc_tables(smu);
1188                 if (ret)
1189                         return ret;
1190
1191                 ret = smu_init_max_sustainable_clocks(smu);
1192                 if (ret)
1193                         return ret;
1194         }
1195
1196         ret = smu_override_pcie_parameters(smu);
1197         if (ret)
1198                 return ret;
1199
1200         ret = smu_set_default_od_settings(smu, initialize);
1201         if (ret)
1202                 return ret;
1203
1204         if (initialize) {
1205                 ret = smu_populate_umd_state_clk(smu);
1206                 if (ret)
1207                         return ret;
1208
1209                 ret = smu_get_power_limit(smu, &smu->default_power_limit, false, false);
1210                 if (ret)
1211                         return ret;
1212         }
1213
1214         /*
1215          * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1216          */
1217         ret = smu_set_tool_table_location(smu);
1218
1219         if (!smu_is_dpm_running(smu))
1220                 pr_info("dpm has been disabled\n");
1221
1222         return ret;
1223 }
1224
1225 static int smu_start_smc_engine(struct smu_context *smu)
1226 {
1227         struct amdgpu_device *adev = smu->adev;
1228         int ret = 0;
1229
1230         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1231                 if (adev->asic_type < CHIP_NAVI10) {
1232                         if (smu->ppt_funcs->load_microcode) {
1233                                 ret = smu->ppt_funcs->load_microcode(smu);
1234                                 if (ret)
1235                                         return ret;
1236                         }
1237                 }
1238         }
1239
1240         if (smu->ppt_funcs->check_fw_status) {
1241                 ret = smu->ppt_funcs->check_fw_status(smu);
1242                 if (ret)
1243                         pr_err("SMC is not ready\n");
1244         }
1245
1246         return ret;
1247 }
1248
1249 static int smu_hw_init(void *handle)
1250 {
1251         int ret;
1252         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1253         struct smu_context *smu = &adev->smu;
1254
1255         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1256                 return 0;
1257
1258         ret = smu_start_smc_engine(smu);
1259         if (ret) {
1260                 pr_err("SMU is not ready yet!\n");
1261                 return ret;
1262         }
1263
1264         if (smu->is_apu) {
1265                 smu_powergate_sdma(&adev->smu, false);
1266                 smu_powergate_vcn(&adev->smu, false);
1267                 smu_powergate_jpeg(&adev->smu, false);
1268                 smu_set_gfx_cgpg(&adev->smu, true);
1269         }
1270
1271         if (!smu->pm_enabled)
1272                 return 0;
1273
1274         ret = smu_feature_init_dpm(smu);
1275         if (ret)
1276                 goto failed;
1277
1278         ret = smu_smc_table_hw_init(smu, true);
1279         if (ret)
1280                 goto failed;
1281
1282         /*
1283          * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1284          * pool location.
1285          */
1286         ret = smu_notify_memory_pool_location(smu);
1287         if (ret)
1288                 goto failed;
1289
1290         ret = smu_enable_thermal_alert(smu);
1291         if (ret)
1292                 goto failed;
1293
1294         ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
1295         if (ret)
1296                 goto failed;
1297
1298         adev->pm.dpm_enabled = true;
1299
1300         pr_info("SMU is initialized successfully!\n");
1301
1302         return 0;
1303
1304 failed:
1305         return ret;
1306 }
1307
1308 static int smu_disable_dpms(struct smu_context *smu)
1309 {
1310         struct amdgpu_device *adev = smu->adev;
1311         int ret = 0;
1312         bool use_baco = !smu->is_apu &&
1313                 ((adev->in_gpu_reset &&
1314                   (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1315                  ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev)));
1316
1317         /*
1318          * For custom pptable uploading, skip the DPM features
1319          * disable process on Navi1x ASICs.
1320          *   - As the gfx related features are under control of
1321          *     RLC on those ASICs. RLC reinitialization will be
1322          *     needed to reenable them. That will cost much more
1323          *     efforts.
1324          *
1325          *   - SMU firmware can handle the DPM reenablement
1326          *     properly.
1327          */
1328         if (smu->uploading_custom_pp_table &&
1329             (adev->asic_type >= CHIP_NAVI10) &&
1330             (adev->asic_type <= CHIP_NAVI12))
1331                 return 0;
1332
1333         /*
1334          * For Sienna_Cichlid, PMFW will handle the features disablement properly
1335          * on BACO in. Driver involvement is unnecessary.
1336          */
1337         if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
1338              use_baco)
1339                 return 0;
1340
1341         /*
1342          * Disable all enabled SMU features.
1343          * This should be handled in SMU FW, as a backup
1344          * driver can issue call to SMU FW until sequence
1345          * in SMU FW is operational.
1346          */
1347         ret = smu_system_features_control(smu, false);
1348         if (ret) {
1349                 pr_err("Failed to disable smu features.\n");
1350                 return ret;
1351         }
1352
1353         /*
1354          * For baco, need to leave BACO feature enabled
1355          *
1356          * Correct the way for checking whether SMU_FEATURE_BACO_BIT
1357          * is supported.
1358          *
1359          * Since 'smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)' will
1360          * always return false as the 'smu_system_features_control(smu, false)'
1361          * was just issued above which disabled all SMU features.
1362          *
1363          * Thus 'smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT)' is used
1364          * now for the checking.
1365          */
1366         if (use_baco && (smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT) >= 0)) {
1367                 ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
1368                 if (ret) {
1369                         pr_warn("set BACO feature enabled failed, return %d\n", ret);
1370                         return ret;
1371                 }
1372         }
1373
1374         if (adev->asic_type >= CHIP_NAVI10 &&
1375             adev->gfx.rlc.funcs->stop)
1376                 adev->gfx.rlc.funcs->stop(adev);
1377
1378         return ret;
1379 }
1380
1381 static int smu_hw_fini(void *handle)
1382 {
1383         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1384         struct smu_context *smu = &adev->smu;
1385         int ret = 0;
1386
1387         if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1388                 return 0;
1389
1390         if (smu->is_apu) {
1391                 smu_powergate_sdma(&adev->smu, true);
1392                 smu_powergate_vcn(&adev->smu, true);
1393                 smu_powergate_jpeg(&adev->smu, true);
1394         }
1395
1396         if (!smu->pm_enabled)
1397                 return 0;
1398
1399         adev->pm.dpm_enabled = false;
1400
1401         smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
1402
1403         ret = smu_disable_thermal_alert(smu);
1404         if (ret) {
1405                 pr_warn("Fail to stop thermal control!\n");
1406                 return ret;
1407         }
1408
1409         ret = smu_disable_dpms(smu);
1410         if (ret) {
1411                 pr_warn("Fail to stop Dpms!\n");
1412                 return ret;
1413         }
1414
1415         return 0;
1416 }
1417
1418 int smu_reset(struct smu_context *smu)
1419 {
1420         struct amdgpu_device *adev = smu->adev;
1421         int ret = 0;
1422
1423         ret = smu_hw_fini(adev);
1424         if (ret)
1425                 return ret;
1426
1427         ret = smu_hw_init(adev);
1428         if (ret)
1429                 return ret;
1430
1431         return ret;
1432 }
1433
1434 static int smu_suspend(void *handle)
1435 {
1436         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1437         struct smu_context *smu = &adev->smu;
1438         int ret;
1439
1440         if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1441                 return 0;
1442
1443         if (!smu->pm_enabled)
1444                 return 0;
1445
1446         adev->pm.dpm_enabled = false;
1447
1448         smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
1449
1450         ret = smu_disable_thermal_alert(smu);
1451         if (ret) {
1452                 pr_warn("Fail to stop thermal control!\n");
1453                 return ret;
1454         }
1455
1456         ret = smu_disable_dpms(smu);
1457         if (ret)
1458                 return ret;
1459
1460         smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1461
1462         if (smu->is_apu)
1463                 smu_set_gfx_cgpg(&adev->smu, false);
1464
1465         return 0;
1466 }
1467
1468 static int smu_resume(void *handle)
1469 {
1470         int ret;
1471         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1472         struct smu_context *smu = &adev->smu;
1473
1474         if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1475                 return 0;
1476
1477         if (!smu->pm_enabled)
1478                 return 0;
1479
1480         pr_info("SMU is resuming...\n");
1481
1482         ret = smu_start_smc_engine(smu);
1483         if (ret) {
1484                 pr_err("SMU is not ready yet!\n");
1485                 goto failed;
1486         }
1487
1488         ret = smu_smc_table_hw_init(smu, false);
1489         if (ret)
1490                 goto failed;
1491
1492         ret = smu_enable_thermal_alert(smu);
1493         if (ret)
1494                 goto failed;
1495
1496         ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
1497         if (ret)
1498                 goto failed;
1499
1500         if (smu->is_apu)
1501                 smu_set_gfx_cgpg(&adev->smu, true);
1502
1503         smu->disable_uclk_switch = 0;
1504
1505         adev->pm.dpm_enabled = true;
1506
1507         pr_info("SMU is resumed successfully!\n");
1508
1509         return 0;
1510
1511 failed:
1512         return ret;
1513 }
1514
1515 int smu_display_configuration_change(struct smu_context *smu,
1516                                      const struct amd_pp_display_configuration *display_config)
1517 {
1518         int index = 0;
1519         int num_of_active_display = 0;
1520
1521         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1522                 return -EOPNOTSUPP;
1523
1524         if (!display_config)
1525                 return -EINVAL;
1526
1527         mutex_lock(&smu->mutex);
1528
1529         if (smu->ppt_funcs->set_deep_sleep_dcefclk)
1530                 smu->ppt_funcs->set_deep_sleep_dcefclk(smu,
1531                                 display_config->min_dcef_deep_sleep_set_clk / 100);
1532
1533         for (index = 0; index < display_config->num_path_including_non_display; index++) {
1534                 if (display_config->displays[index].controller_id != 0)
1535                         num_of_active_display++;
1536         }
1537
1538         smu_set_active_display_count(smu, num_of_active_display);
1539
1540         smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1541                            display_config->cpu_cc6_disable,
1542                            display_config->cpu_pstate_disable,
1543                            display_config->nb_pstate_switch_disable);
1544
1545         mutex_unlock(&smu->mutex);
1546
1547         return 0;
1548 }
1549
1550 static int smu_get_clock_info(struct smu_context *smu,
1551                               struct smu_clock_info *clk_info,
1552                               enum smu_perf_level_designation designation)
1553 {
1554         int ret;
1555         struct smu_performance_level level = {0};
1556
1557         if (!clk_info)
1558                 return -EINVAL;
1559
1560         ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1561         if (ret)
1562                 return -EINVAL;
1563
1564         clk_info->min_mem_clk = level.memory_clock;
1565         clk_info->min_eng_clk = level.core_clock;
1566         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1567
1568         ret = smu_get_perf_level(smu, designation, &level);
1569         if (ret)
1570                 return -EINVAL;
1571
1572         clk_info->min_mem_clk = level.memory_clock;
1573         clk_info->min_eng_clk = level.core_clock;
1574         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1575
1576         return 0;
1577 }
1578
1579 int smu_get_current_clocks(struct smu_context *smu,
1580                            struct amd_pp_clock_info *clocks)
1581 {
1582         struct amd_pp_simple_clock_info simple_clocks = {0};
1583         struct smu_clock_info hw_clocks;
1584         int ret = 0;
1585
1586         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1587                 return -EOPNOTSUPP;
1588
1589         mutex_lock(&smu->mutex);
1590
1591         smu_get_dal_power_level(smu, &simple_clocks);
1592
1593         if (smu->support_power_containment)
1594                 ret = smu_get_clock_info(smu, &hw_clocks,
1595                                          PERF_LEVEL_POWER_CONTAINMENT);
1596         else
1597                 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1598
1599         if (ret) {
1600                 pr_err("Error in smu_get_clock_info\n");
1601                 goto failed;
1602         }
1603
1604         clocks->min_engine_clock = hw_clocks.min_eng_clk;
1605         clocks->max_engine_clock = hw_clocks.max_eng_clk;
1606         clocks->min_memory_clock = hw_clocks.min_mem_clk;
1607         clocks->max_memory_clock = hw_clocks.max_mem_clk;
1608         clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1609         clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1610         clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1611         clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1612
1613         if (simple_clocks.level == 0)
1614                 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1615         else
1616                 clocks->max_clocks_state = simple_clocks.level;
1617
1618         if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1619                 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1620                 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1621         }
1622
1623 failed:
1624         mutex_unlock(&smu->mutex);
1625         return ret;
1626 }
1627
1628 static int smu_set_clockgating_state(void *handle,
1629                                      enum amd_clockgating_state state)
1630 {
1631         return 0;
1632 }
1633
1634 static int smu_set_powergating_state(void *handle,
1635                                      enum amd_powergating_state state)
1636 {
1637         return 0;
1638 }
1639
1640 static int smu_enable_umd_pstate(void *handle,
1641                       enum amd_dpm_forced_level *level)
1642 {
1643         uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1644                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1645                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1646                                         AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1647
1648         struct smu_context *smu = (struct smu_context*)(handle);
1649         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1650
1651         if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1652                 return -EINVAL;
1653
1654         if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1655                 /* enter umd pstate, save current level, disable gfx cg*/
1656                 if (*level & profile_mode_mask) {
1657                         smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1658                         smu_dpm_ctx->enable_umd_pstate = true;
1659                         amdgpu_device_ip_set_powergating_state(smu->adev,
1660                                                                AMD_IP_BLOCK_TYPE_GFX,
1661                                                                AMD_PG_STATE_UNGATE);
1662                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1663                                                                AMD_IP_BLOCK_TYPE_GFX,
1664                                                                AMD_CG_STATE_UNGATE);
1665                 }
1666         } else {
1667                 /* exit umd pstate, restore level, enable gfx cg*/
1668                 if (!(*level & profile_mode_mask)) {
1669                         if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1670                                 *level = smu_dpm_ctx->saved_dpm_level;
1671                         smu_dpm_ctx->enable_umd_pstate = false;
1672                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1673                                                                AMD_IP_BLOCK_TYPE_GFX,
1674                                                                AMD_CG_STATE_GATE);
1675                         amdgpu_device_ip_set_powergating_state(smu->adev,
1676                                                                AMD_IP_BLOCK_TYPE_GFX,
1677                                                                AMD_PG_STATE_GATE);
1678                 }
1679         }
1680
1681         return 0;
1682 }
1683
1684 int smu_adjust_power_state_dynamic(struct smu_context *smu,
1685                                    enum amd_dpm_forced_level level,
1686                                    bool skip_display_settings)
1687 {
1688         int ret = 0;
1689         int index = 0;
1690         long workload;
1691         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1692
1693         if (!skip_display_settings) {
1694                 ret = smu_display_config_changed(smu);
1695                 if (ret) {
1696                         pr_err("Failed to change display config!");
1697                         return ret;
1698                 }
1699         }
1700
1701         ret = smu_apply_clocks_adjust_rules(smu);
1702         if (ret) {
1703                 pr_err("Failed to apply clocks adjust rules!");
1704                 return ret;
1705         }
1706
1707         if (!skip_display_settings) {
1708                 ret = smu_notify_smc_display_config(smu);
1709                 if (ret) {
1710                         pr_err("Failed to notify smc display config!");
1711                         return ret;
1712                 }
1713         }
1714
1715         if (smu_dpm_ctx->dpm_level != level) {
1716                 ret = smu_asic_set_performance_level(smu, level);
1717                 if (ret) {
1718                         pr_err("Failed to set performance level!");
1719                         return ret;
1720                 }
1721
1722                 /* update the saved copy */
1723                 smu_dpm_ctx->dpm_level = level;
1724         }
1725
1726         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1727                 index = fls(smu->workload_mask);
1728                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1729                 workload = smu->workload_setting[index];
1730
1731                 if (smu->power_profile_mode != workload)
1732                         smu_set_power_profile_mode(smu, &workload, 0, false);
1733         }
1734
1735         return ret;
1736 }
1737
1738 int smu_handle_task(struct smu_context *smu,
1739                     enum amd_dpm_forced_level level,
1740                     enum amd_pp_task task_id,
1741                     bool lock_needed)
1742 {
1743         int ret = 0;
1744
1745         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1746                 return -EOPNOTSUPP;
1747
1748         if (lock_needed)
1749                 mutex_lock(&smu->mutex);
1750
1751         switch (task_id) {
1752         case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1753                 ret = smu_pre_display_config_changed(smu);
1754                 if (ret)
1755                         goto out;
1756                 ret = smu_set_cpu_power_state(smu);
1757                 if (ret)
1758                         goto out;
1759                 ret = smu_adjust_power_state_dynamic(smu, level, false);
1760                 break;
1761         case AMD_PP_TASK_COMPLETE_INIT:
1762         case AMD_PP_TASK_READJUST_POWER_STATE:
1763                 ret = smu_adjust_power_state_dynamic(smu, level, true);
1764                 break;
1765         default:
1766                 break;
1767         }
1768
1769 out:
1770         if (lock_needed)
1771                 mutex_unlock(&smu->mutex);
1772
1773         return ret;
1774 }
1775
1776 int smu_switch_power_profile(struct smu_context *smu,
1777                              enum PP_SMC_POWER_PROFILE type,
1778                              bool en)
1779 {
1780         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1781         long workload;
1782         uint32_t index;
1783
1784         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1785                 return -EOPNOTSUPP;
1786
1787         if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1788                 return -EINVAL;
1789
1790         mutex_lock(&smu->mutex);
1791
1792         if (!en) {
1793                 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1794                 index = fls(smu->workload_mask);
1795                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1796                 workload = smu->workload_setting[index];
1797         } else {
1798                 smu->workload_mask |= (1 << smu->workload_prority[type]);
1799                 index = fls(smu->workload_mask);
1800                 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1801                 workload = smu->workload_setting[index];
1802         }
1803
1804         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1805                 smu_set_power_profile_mode(smu, &workload, 0, false);
1806
1807         mutex_unlock(&smu->mutex);
1808
1809         return 0;
1810 }
1811
1812 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1813 {
1814         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1815         enum amd_dpm_forced_level level;
1816
1817         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1818                 return -EOPNOTSUPP;
1819
1820         if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1821                 return -EINVAL;
1822
1823         mutex_lock(&(smu->mutex));
1824         level = smu_dpm_ctx->dpm_level;
1825         mutex_unlock(&(smu->mutex));
1826
1827         return level;
1828 }
1829
1830 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1831 {
1832         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1833         int ret = 0;
1834
1835         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1836                 return -EOPNOTSUPP;
1837
1838         if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1839                 return -EINVAL;
1840
1841         mutex_lock(&smu->mutex);
1842
1843         ret = smu_enable_umd_pstate(smu, &level);
1844         if (ret) {
1845                 mutex_unlock(&smu->mutex);
1846                 return ret;
1847         }
1848
1849         ret = smu_handle_task(smu, level,
1850                               AMD_PP_TASK_READJUST_POWER_STATE,
1851                               false);
1852
1853         mutex_unlock(&smu->mutex);
1854
1855         return ret;
1856 }
1857
1858 int smu_set_display_count(struct smu_context *smu, uint32_t count)
1859 {
1860         int ret = 0;
1861
1862         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1863                 return -EOPNOTSUPP;
1864
1865         mutex_lock(&smu->mutex);
1866         ret = smu_init_display_count(smu, count);
1867         mutex_unlock(&smu->mutex);
1868
1869         return ret;
1870 }
1871
1872 int smu_force_clk_levels(struct smu_context *smu,
1873                          enum smu_clk_type clk_type,
1874                          uint32_t mask,
1875                          bool lock_needed)
1876 {
1877         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1878         int ret = 0;
1879
1880         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1881                 return -EOPNOTSUPP;
1882
1883         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1884                 pr_debug("force clock level is for dpm manual mode only.\n");
1885                 return -EINVAL;
1886         }
1887
1888         if (lock_needed)
1889                 mutex_lock(&smu->mutex);
1890
1891         if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
1892                 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
1893
1894         if (lock_needed)
1895                 mutex_unlock(&smu->mutex);
1896
1897         return ret;
1898 }
1899
1900 /*
1901  * On system suspending or resetting, the dpm_enabled
1902  * flag will be cleared. So that those SMU services which
1903  * are not supported will be gated.
1904  * However, the mp1 state setting should still be granted
1905  * even if the dpm_enabled cleared.
1906  */
1907 int smu_set_mp1_state(struct smu_context *smu,
1908                       enum pp_mp1_state mp1_state)
1909 {
1910         uint16_t msg;
1911         int ret;
1912
1913         if (!smu->pm_enabled)
1914                 return -EOPNOTSUPP;
1915
1916         mutex_lock(&smu->mutex);
1917
1918         switch (mp1_state) {
1919         case PP_MP1_STATE_SHUTDOWN:
1920                 msg = SMU_MSG_PrepareMp1ForShutdown;
1921                 break;
1922         case PP_MP1_STATE_UNLOAD:
1923                 msg = SMU_MSG_PrepareMp1ForUnload;
1924                 break;
1925         case PP_MP1_STATE_RESET:
1926                 msg = SMU_MSG_PrepareMp1ForReset;
1927                 break;
1928         case PP_MP1_STATE_NONE:
1929         default:
1930                 mutex_unlock(&smu->mutex);
1931                 return 0;
1932         }
1933
1934         /* some asics may not support those messages */
1935         if (smu_msg_get_index(smu, msg) < 0) {
1936                 mutex_unlock(&smu->mutex);
1937                 return 0;
1938         }
1939
1940         ret = smu_send_smc_msg(smu, msg, NULL);
1941         if (ret)
1942                 pr_err("[PrepareMp1] Failed!\n");
1943
1944         mutex_unlock(&smu->mutex);
1945
1946         return ret;
1947 }
1948
1949 int smu_set_df_cstate(struct smu_context *smu,
1950                       enum pp_df_cstate state)
1951 {
1952         int ret = 0;
1953
1954         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1955                 return -EOPNOTSUPP;
1956
1957         if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
1958                 return 0;
1959
1960         mutex_lock(&smu->mutex);
1961
1962         ret = smu->ppt_funcs->set_df_cstate(smu, state);
1963         if (ret)
1964                 pr_err("[SetDfCstate] failed!\n");
1965
1966         mutex_unlock(&smu->mutex);
1967
1968         return ret;
1969 }
1970
1971 int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
1972 {
1973         int ret = 0;
1974
1975         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1976                 return -EOPNOTSUPP;
1977
1978         if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
1979                 return 0;
1980
1981         mutex_lock(&smu->mutex);
1982
1983         ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
1984         if (ret)
1985                 pr_err("[AllowXgmiPowerDown] failed!\n");
1986
1987         mutex_unlock(&smu->mutex);
1988
1989         return ret;
1990 }
1991
1992 int smu_write_watermarks_table(struct smu_context *smu)
1993 {
1994         void *watermarks_table = smu->smu_table.watermarks_table;
1995
1996         if (!watermarks_table)
1997                 return -EINVAL;
1998
1999         return smu_update_table(smu,
2000                                 SMU_TABLE_WATERMARKS,
2001                                 0,
2002                                 watermarks_table,
2003                                 true);
2004 }
2005
2006 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
2007                 struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
2008 {
2009         void *table = smu->smu_table.watermarks_table;
2010
2011         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2012                 return -EOPNOTSUPP;
2013
2014         if (!table)
2015                 return -EINVAL;
2016
2017         mutex_lock(&smu->mutex);
2018
2019         if (!smu->disable_watermark &&
2020                         smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
2021                         smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
2022                 smu_set_watermarks_table(smu, table, clock_ranges);
2023
2024                 if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) {
2025                         smu->watermarks_bitmap |= WATERMARKS_EXIST;
2026                         smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
2027                 }
2028         }
2029
2030         mutex_unlock(&smu->mutex);
2031
2032         return 0;
2033 }
2034
2035 int smu_set_ac_dc(struct smu_context *smu)
2036 {
2037         int ret = 0;
2038
2039         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2040                 return -EOPNOTSUPP;
2041
2042         /* controlled by firmware */
2043         if (smu->dc_controlled_by_gpio)
2044                 return 0;
2045
2046         mutex_lock(&smu->mutex);
2047         ret = smu_set_power_source(smu,
2048                                    smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
2049                                    SMU_POWER_SOURCE_DC);
2050         if (ret)
2051                 pr_err("Failed to switch to %s mode!\n",
2052                        smu->adev->pm.ac_power ? "AC" : "DC");
2053         mutex_unlock(&smu->mutex);
2054
2055         return ret;
2056 }
2057
2058 const struct amd_ip_funcs smu_ip_funcs = {
2059         .name = "smu",
2060         .early_init = smu_early_init,
2061         .late_init = smu_late_init,
2062         .sw_init = smu_sw_init,
2063         .sw_fini = smu_sw_fini,
2064         .hw_init = smu_hw_init,
2065         .hw_fini = smu_hw_fini,
2066         .suspend = smu_suspend,
2067         .resume = smu_resume,
2068         .is_idle = NULL,
2069         .check_soft_reset = NULL,
2070         .wait_for_idle = NULL,
2071         .soft_reset = NULL,
2072         .set_clockgating_state = smu_set_clockgating_state,
2073         .set_powergating_state = smu_set_powergating_state,
2074         .enable_umd_pstate = smu_enable_umd_pstate,
2075 };
2076
2077 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
2078 {
2079         .type = AMD_IP_BLOCK_TYPE_SMC,
2080         .major = 11,
2081         .minor = 0,
2082         .rev = 0,
2083         .funcs = &smu_ip_funcs,
2084 };
2085
2086 const struct amdgpu_ip_block_version smu_v12_0_ip_block =
2087 {
2088         .type = AMD_IP_BLOCK_TYPE_SMC,
2089         .major = 12,
2090         .minor = 0,
2091         .rev = 0,
2092         .funcs = &smu_ip_funcs,
2093 };
2094
2095 int smu_load_microcode(struct smu_context *smu)
2096 {
2097         int ret = 0;
2098
2099         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2100                 return -EOPNOTSUPP;
2101
2102         mutex_lock(&smu->mutex);
2103
2104         if (smu->ppt_funcs->load_microcode)
2105                 ret = smu->ppt_funcs->load_microcode(smu);
2106
2107         mutex_unlock(&smu->mutex);
2108
2109         return ret;
2110 }
2111
2112 int smu_check_fw_status(struct smu_context *smu)
2113 {
2114         int ret = 0;
2115
2116         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2117                 return -EOPNOTSUPP;
2118
2119         mutex_lock(&smu->mutex);
2120
2121         if (smu->ppt_funcs->check_fw_status)
2122                 ret = smu->ppt_funcs->check_fw_status(smu);
2123
2124         mutex_unlock(&smu->mutex);
2125
2126         return ret;
2127 }
2128
2129 int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2130 {
2131         int ret = 0;
2132
2133         mutex_lock(&smu->mutex);
2134
2135         if (smu->ppt_funcs->set_gfx_cgpg)
2136                 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2137
2138         mutex_unlock(&smu->mutex);
2139
2140         return ret;
2141 }
2142
2143 int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
2144 {
2145         int ret = 0;
2146
2147         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2148                 return -EOPNOTSUPP;
2149
2150         mutex_lock(&smu->mutex);
2151
2152         if (smu->ppt_funcs->set_fan_speed_rpm)
2153                 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2154
2155         mutex_unlock(&smu->mutex);
2156
2157         return ret;
2158 }
2159
2160 int smu_get_power_limit(struct smu_context *smu,
2161                         uint32_t *limit,
2162                         bool def,
2163                         bool lock_needed)
2164 {
2165         int ret = 0;
2166
2167         if (lock_needed) {
2168                 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2169                         return -EOPNOTSUPP;
2170
2171                 mutex_lock(&smu->mutex);
2172         }
2173
2174         if (smu->ppt_funcs->get_power_limit)
2175                 ret = smu->ppt_funcs->get_power_limit(smu, limit, def);
2176
2177         if (lock_needed)
2178                 mutex_unlock(&smu->mutex);
2179
2180         return ret;
2181 }
2182
2183 int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
2184 {
2185         int ret = 0;
2186
2187         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2188                 return -EOPNOTSUPP;
2189
2190         mutex_lock(&smu->mutex);
2191
2192         if (smu->ppt_funcs->set_power_limit)
2193                 ret = smu->ppt_funcs->set_power_limit(smu, limit);
2194
2195         mutex_unlock(&smu->mutex);
2196
2197         return ret;
2198 }
2199
2200 int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2201 {
2202         int ret = 0;
2203
2204         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2205                 return -EOPNOTSUPP;
2206
2207         mutex_lock(&smu->mutex);
2208
2209         if (smu->ppt_funcs->print_clk_levels)
2210                 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2211
2212         mutex_unlock(&smu->mutex);
2213
2214         return ret;
2215 }
2216
2217 int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
2218 {
2219         int ret = 0;
2220
2221         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2222                 return -EOPNOTSUPP;
2223
2224         mutex_lock(&smu->mutex);
2225
2226         if (smu->ppt_funcs->get_od_percentage)
2227                 ret = smu->ppt_funcs->get_od_percentage(smu, type);
2228
2229         mutex_unlock(&smu->mutex);
2230
2231         return ret;
2232 }
2233
2234 int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
2235 {
2236         int ret = 0;
2237
2238         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2239                 return -EOPNOTSUPP;
2240
2241         mutex_lock(&smu->mutex);
2242
2243         if (smu->ppt_funcs->set_od_percentage)
2244                 ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
2245
2246         mutex_unlock(&smu->mutex);
2247
2248         return ret;
2249 }
2250
2251 int smu_od_edit_dpm_table(struct smu_context *smu,
2252                           enum PP_OD_DPM_TABLE_COMMAND type,
2253                           long *input, uint32_t size)
2254 {
2255         int ret = 0;
2256
2257         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2258                 return -EOPNOTSUPP;
2259
2260         mutex_lock(&smu->mutex);
2261
2262         if (smu->ppt_funcs->od_edit_dpm_table)
2263                 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2264
2265         mutex_unlock(&smu->mutex);
2266
2267         return ret;
2268 }
2269
2270 int smu_read_sensor(struct smu_context *smu,
2271                     enum amd_pp_sensors sensor,
2272                     void *data, uint32_t *size)
2273 {
2274         int ret = 0;
2275
2276         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2277                 return -EOPNOTSUPP;
2278
2279         mutex_lock(&smu->mutex);
2280
2281         if (smu->ppt_funcs->read_sensor)
2282                 ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
2283
2284         mutex_unlock(&smu->mutex);
2285
2286         return ret;
2287 }
2288
2289 int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
2290 {
2291         int ret = 0;
2292
2293         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2294                 return -EOPNOTSUPP;
2295
2296         mutex_lock(&smu->mutex);
2297
2298         if (smu->ppt_funcs->get_power_profile_mode)
2299                 ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
2300
2301         mutex_unlock(&smu->mutex);
2302
2303         return ret;
2304 }
2305
2306 int smu_set_power_profile_mode(struct smu_context *smu,
2307                                long *param,
2308                                uint32_t param_size,
2309                                bool lock_needed)
2310 {
2311         int ret = 0;
2312
2313         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2314                 return -EOPNOTSUPP;
2315
2316         if (lock_needed)
2317                 mutex_lock(&smu->mutex);
2318
2319         if (smu->ppt_funcs->set_power_profile_mode)
2320                 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
2321
2322         if (lock_needed)
2323                 mutex_unlock(&smu->mutex);
2324
2325         return ret;
2326 }
2327
2328
2329 int smu_get_fan_control_mode(struct smu_context *smu)
2330 {
2331         int ret = 0;
2332
2333         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2334                 return -EOPNOTSUPP;
2335
2336         mutex_lock(&smu->mutex);
2337
2338         if (smu->ppt_funcs->get_fan_control_mode)
2339                 ret = smu->ppt_funcs->get_fan_control_mode(smu);
2340
2341         mutex_unlock(&smu->mutex);
2342
2343         return ret;
2344 }
2345
2346 int smu_set_fan_control_mode(struct smu_context *smu, int value)
2347 {
2348         int ret = 0;
2349
2350         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2351                 return -EOPNOTSUPP;
2352
2353         mutex_lock(&smu->mutex);
2354
2355         if (smu->ppt_funcs->set_fan_control_mode)
2356                 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
2357
2358         mutex_unlock(&smu->mutex);
2359
2360         return ret;
2361 }
2362
2363 int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
2364 {
2365         int ret = 0;
2366
2367         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2368                 return -EOPNOTSUPP;
2369
2370         mutex_lock(&smu->mutex);
2371
2372         if (smu->ppt_funcs->get_fan_speed_percent)
2373                 ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed);
2374
2375         mutex_unlock(&smu->mutex);
2376
2377         return ret;
2378 }
2379
2380 int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
2381 {
2382         int ret = 0;
2383
2384         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2385                 return -EOPNOTSUPP;
2386
2387         mutex_lock(&smu->mutex);
2388
2389         if (smu->ppt_funcs->set_fan_speed_percent)
2390                 ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
2391
2392         mutex_unlock(&smu->mutex);
2393
2394         return ret;
2395 }
2396
2397 int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
2398 {
2399         int ret = 0;
2400
2401         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2402                 return -EOPNOTSUPP;
2403
2404         mutex_lock(&smu->mutex);
2405
2406         if (smu->ppt_funcs->get_fan_speed_rpm)
2407                 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
2408
2409         mutex_unlock(&smu->mutex);
2410
2411         return ret;
2412 }
2413
2414 int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
2415 {
2416         int ret = 0;
2417
2418         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2419                 return -EOPNOTSUPP;
2420
2421         mutex_lock(&smu->mutex);
2422
2423         if (smu->ppt_funcs->set_deep_sleep_dcefclk)
2424                 ret = smu->ppt_funcs->set_deep_sleep_dcefclk(smu, clk);
2425
2426         mutex_unlock(&smu->mutex);
2427
2428         return ret;
2429 }
2430
2431 int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
2432 {
2433         int ret = 0;
2434
2435         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2436                 return -EOPNOTSUPP;
2437
2438         if (smu->ppt_funcs->set_active_display_count)
2439                 ret = smu->ppt_funcs->set_active_display_count(smu, count);
2440
2441         return ret;
2442 }
2443
2444 int smu_get_clock_by_type(struct smu_context *smu,
2445                           enum amd_pp_clock_type type,
2446                           struct amd_pp_clocks *clocks)
2447 {
2448         int ret = 0;
2449
2450         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2451                 return -EOPNOTSUPP;
2452
2453         mutex_lock(&smu->mutex);
2454
2455         if (smu->ppt_funcs->get_clock_by_type)
2456                 ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks);
2457
2458         mutex_unlock(&smu->mutex);
2459
2460         return ret;
2461 }
2462
2463 int smu_get_max_high_clocks(struct smu_context *smu,
2464                             struct amd_pp_simple_clock_info *clocks)
2465 {
2466         int ret = 0;
2467
2468         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2469                 return -EOPNOTSUPP;
2470
2471         mutex_lock(&smu->mutex);
2472
2473         if (smu->ppt_funcs->get_max_high_clocks)
2474                 ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks);
2475
2476         mutex_unlock(&smu->mutex);
2477
2478         return ret;
2479 }
2480
2481 int smu_get_clock_by_type_with_latency(struct smu_context *smu,
2482                                        enum smu_clk_type clk_type,
2483                                        struct pp_clock_levels_with_latency *clocks)
2484 {
2485         int ret = 0;
2486
2487         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2488                 return -EOPNOTSUPP;
2489
2490         mutex_lock(&smu->mutex);
2491
2492         if (smu->ppt_funcs->get_clock_by_type_with_latency)
2493                 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2494
2495         mutex_unlock(&smu->mutex);
2496
2497         return ret;
2498 }
2499
2500 int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
2501                                        enum amd_pp_clock_type type,
2502                                        struct pp_clock_levels_with_voltage *clocks)
2503 {
2504         int ret = 0;
2505
2506         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2507                 return -EOPNOTSUPP;
2508
2509         mutex_lock(&smu->mutex);
2510
2511         if (smu->ppt_funcs->get_clock_by_type_with_voltage)
2512                 ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
2513
2514         mutex_unlock(&smu->mutex);
2515
2516         return ret;
2517 }
2518
2519
2520 int smu_display_clock_voltage_request(struct smu_context *smu,
2521                                       struct pp_display_clock_request *clock_req)
2522 {
2523         int ret = 0;
2524
2525         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2526                 return -EOPNOTSUPP;
2527
2528         mutex_lock(&smu->mutex);
2529
2530         if (smu->ppt_funcs->display_clock_voltage_request)
2531                 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
2532
2533         mutex_unlock(&smu->mutex);
2534
2535         return ret;
2536 }
2537
2538
2539 int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
2540 {
2541         int ret = -EINVAL;
2542
2543         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2544                 return -EOPNOTSUPP;
2545
2546         mutex_lock(&smu->mutex);
2547
2548         if (smu->ppt_funcs->display_disable_memory_clock_switch)
2549                 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2550
2551         mutex_unlock(&smu->mutex);
2552
2553         return ret;
2554 }
2555
2556 int smu_notify_smu_enable_pwe(struct smu_context *smu)
2557 {
2558         int ret = 0;
2559
2560         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2561                 return -EOPNOTSUPP;
2562
2563         mutex_lock(&smu->mutex);
2564
2565         if (smu->ppt_funcs->notify_smu_enable_pwe)
2566                 ret = smu->ppt_funcs->notify_smu_enable_pwe(smu);
2567
2568         mutex_unlock(&smu->mutex);
2569
2570         return ret;
2571 }
2572
2573 int smu_set_xgmi_pstate(struct smu_context *smu,
2574                         uint32_t pstate)
2575 {
2576         int ret = 0;
2577
2578         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2579                 return -EOPNOTSUPP;
2580
2581         mutex_lock(&smu->mutex);
2582
2583         if (smu->ppt_funcs->set_xgmi_pstate)
2584                 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
2585
2586         mutex_unlock(&smu->mutex);
2587
2588         return ret;
2589 }
2590
2591 int smu_set_azalia_d3_pme(struct smu_context *smu)
2592 {
2593         int ret = 0;
2594
2595         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2596                 return -EOPNOTSUPP;
2597
2598         mutex_lock(&smu->mutex);
2599
2600         if (smu->ppt_funcs->set_azalia_d3_pme)
2601                 ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
2602
2603         mutex_unlock(&smu->mutex);
2604
2605         return ret;
2606 }
2607
2608 /*
2609  * On system suspending or resetting, the dpm_enabled
2610  * flag will be cleared. So that those SMU services which
2611  * are not supported will be gated.
2612  *
2613  * However, the baco/mode1 reset should still be granted
2614  * as they are still supported and necessary.
2615  */
2616 bool smu_baco_is_support(struct smu_context *smu)
2617 {
2618         bool ret = false;
2619
2620         if (!smu->pm_enabled)
2621                 return false;
2622
2623         mutex_lock(&smu->mutex);
2624
2625         if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
2626                 ret = smu->ppt_funcs->baco_is_support(smu);
2627
2628         mutex_unlock(&smu->mutex);
2629
2630         return ret;
2631 }
2632
2633 int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
2634 {
2635         if (smu->ppt_funcs->baco_get_state)
2636                 return -EINVAL;
2637
2638         mutex_lock(&smu->mutex);
2639         *state = smu->ppt_funcs->baco_get_state(smu);
2640         mutex_unlock(&smu->mutex);
2641
2642         return 0;
2643 }
2644
2645 int smu_baco_enter(struct smu_context *smu)
2646 {
2647         int ret = 0;
2648
2649         if (!smu->pm_enabled)
2650                 return -EOPNOTSUPP;
2651
2652         mutex_lock(&smu->mutex);
2653
2654         if (smu->ppt_funcs->baco_enter)
2655                 ret = smu->ppt_funcs->baco_enter(smu);
2656
2657         mutex_unlock(&smu->mutex);
2658
2659         return ret;
2660 }
2661
2662 int smu_baco_exit(struct smu_context *smu)
2663 {
2664         int ret = 0;
2665
2666         if (!smu->pm_enabled)
2667                 return -EOPNOTSUPP;
2668
2669         mutex_lock(&smu->mutex);
2670
2671         if (smu->ppt_funcs->baco_exit)
2672                 ret = smu->ppt_funcs->baco_exit(smu);
2673
2674         mutex_unlock(&smu->mutex);
2675
2676         return ret;
2677 }
2678
2679 int smu_mode2_reset(struct smu_context *smu)
2680 {
2681         int ret = 0;
2682
2683         if (!smu->pm_enabled)
2684                 return -EOPNOTSUPP;
2685
2686         mutex_lock(&smu->mutex);
2687
2688         if (smu->ppt_funcs->mode2_reset)
2689                 ret = smu->ppt_funcs->mode2_reset(smu);
2690
2691         mutex_unlock(&smu->mutex);
2692
2693         return ret;
2694 }
2695
2696 int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
2697                                          struct pp_smu_nv_clock_table *max_clocks)
2698 {
2699         int ret = 0;
2700
2701         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2702                 return -EOPNOTSUPP;
2703
2704         mutex_lock(&smu->mutex);
2705
2706         if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
2707                 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
2708
2709         mutex_unlock(&smu->mutex);
2710
2711         return ret;
2712 }
2713
2714 int smu_get_uclk_dpm_states(struct smu_context *smu,
2715                             unsigned int *clock_values_in_khz,
2716                             unsigned int *num_states)
2717 {
2718         int ret = 0;
2719
2720         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2721                 return -EOPNOTSUPP;
2722
2723         mutex_lock(&smu->mutex);
2724
2725         if (smu->ppt_funcs->get_uclk_dpm_states)
2726                 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
2727
2728         mutex_unlock(&smu->mutex);
2729
2730         return ret;
2731 }
2732
2733 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
2734 {
2735         enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
2736
2737         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2738                 return -EOPNOTSUPP;
2739
2740         mutex_lock(&smu->mutex);
2741
2742         if (smu->ppt_funcs->get_current_power_state)
2743                 pm_state = smu->ppt_funcs->get_current_power_state(smu);
2744
2745         mutex_unlock(&smu->mutex);
2746
2747         return pm_state;
2748 }
2749
2750 int smu_get_dpm_clock_table(struct smu_context *smu,
2751                             struct dpm_clocks *clock_table)
2752 {
2753         int ret = 0;
2754
2755         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2756                 return -EOPNOTSUPP;
2757
2758         mutex_lock(&smu->mutex);
2759
2760         if (smu->ppt_funcs->get_dpm_clock_table)
2761                 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
2762
2763         mutex_unlock(&smu->mutex);
2764
2765         return ret;
2766 }
2767
2768 uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
2769 {
2770         uint32_t ret = 0;
2771
2772         if (smu->ppt_funcs->get_pptable_power_limit)
2773                 ret = smu->ppt_funcs->get_pptable_power_limit(smu);
2774
2775         return ret;
2776 }
2777
2778 int smu_powergate_vcn(struct smu_context *smu, bool gate)
2779 {
2780         if (!smu->is_apu)
2781                 return 0;
2782
2783         return smu_dpm_set_uvd_enable(smu, !gate);
2784 }
2785
2786 int smu_powergate_jpeg(struct smu_context *smu, bool gate)
2787 {
2788         if (!smu->is_apu)
2789                 return 0;
2790
2791         return smu_dpm_set_jpeg_enable(smu, !gate);
2792 }