drm/amd/powerplay: move table setting common code to smu_cmn.c
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / powerplay / amdgpu_smu.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include <linux/firmware.h>
24 #include <linux/pci.h>
25
26 #include "amdgpu.h"
27 #include "amdgpu_smu.h"
28 #include "smu_internal.h"
29 #include "smu_v11_0.h"
30 #include "smu_v12_0.h"
31 #include "atom.h"
32 #include "arcturus_ppt.h"
33 #include "navi10_ppt.h"
34 #include "sienna_cichlid_ppt.h"
35 #include "renoir_ppt.h"
36 #include "amd_pcie.h"
37 #include "smu_cmn.h"
38
39 /*
40  * DO NOT use these for err/warn/info/debug messages.
41  * Use dev_err, dev_warn, dev_info and dev_dbg instead.
42  * They are more MGPU friendly.
43  */
44 #undef pr_err
45 #undef pr_warn
46 #undef pr_info
47 #undef pr_debug
48
49 #undef __SMU_DUMMY_MAP
50 #define __SMU_DUMMY_MAP(type)   #type
51 static const char* __smu_message_names[] = {
52         SMU_MESSAGE_TYPES
53 };
54
55 const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
56 {
57         if (type < 0 || type >= SMU_MSG_MAX_COUNT)
58                 return "unknown smu message";
59         return __smu_message_names[type];
60 }
61
62 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
63 {
64         size_t size = 0;
65
66         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
67                 return -EOPNOTSUPP;
68
69         mutex_lock(&smu->mutex);
70
71         size = smu_get_pp_feature_mask(smu, buf);
72
73         mutex_unlock(&smu->mutex);
74
75         return size;
76 }
77
78 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
79 {
80         int ret = 0;
81
82         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
83                 return -EOPNOTSUPP;
84
85         mutex_lock(&smu->mutex);
86
87         ret = smu_set_pp_feature_mask(smu, new_mask);
88
89         mutex_unlock(&smu->mutex);
90
91         return ret;
92 }
93
94 int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
95 {
96         int ret = 0;
97         struct smu_context *smu = &adev->smu;
98
99         if (is_support_sw_smu(adev) && smu->ppt_funcs->get_gfx_off_status)
100                 *value = smu_get_gfx_off_status(smu);
101         else
102                 ret = -EINVAL;
103
104         return ret;
105 }
106
107 int smu_set_soft_freq_range(struct smu_context *smu,
108                             enum smu_clk_type clk_type,
109                             uint32_t min,
110                             uint32_t max)
111 {
112         int ret = 0;
113
114         mutex_lock(&smu->mutex);
115
116         if (smu->ppt_funcs->set_soft_freq_limited_range)
117                 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
118                                                                   clk_type,
119                                                                   min,
120                                                                   max);
121
122         mutex_unlock(&smu->mutex);
123
124         return ret;
125 }
126
127 int smu_get_dpm_freq_range(struct smu_context *smu,
128                            enum smu_clk_type clk_type,
129                            uint32_t *min,
130                            uint32_t *max)
131 {
132         int ret = 0;
133
134         if (!min && !max)
135                 return -EINVAL;
136
137         mutex_lock(&smu->mutex);
138
139         if (smu->ppt_funcs->get_dpm_ultimate_freq)
140                 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
141                                                             clk_type,
142                                                             min,
143                                                             max);
144
145         mutex_unlock(&smu->mutex);
146
147         return ret;
148 }
149
150 /**
151  * smu_dpm_set_power_gate - power gate/ungate the specific IP block
152  *
153  * @smu:        smu_context pointer
154  * @block_type: the IP block to power gate/ungate
155  * @gate:       to power gate if true, ungate otherwise
156  *
157  * This API uses no smu->mutex lock protection due to:
158  * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
159  *    This is guarded to be race condition free by the caller.
160  * 2. Or get called on user setting request of power_dpm_force_performance_level.
161  *    Under this case, the smu->mutex lock protection is already enforced on
162  *    the parent API smu_force_performance_level of the call path.
163  */
164 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
165                            bool gate)
166 {
167         int ret = 0;
168
169         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
170                 return -EOPNOTSUPP;
171
172         switch (block_type) {
173         /*
174          * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
175          * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept.
176          */
177         case AMD_IP_BLOCK_TYPE_UVD:
178         case AMD_IP_BLOCK_TYPE_VCN:
179                 ret = smu_dpm_set_vcn_enable(smu, !gate);
180                 if (ret)
181                         dev_err(smu->adev->dev, "Failed to power %s VCN!\n",
182                                 gate ? "gate" : "ungate");
183                 break;
184         case AMD_IP_BLOCK_TYPE_GFX:
185                 ret = smu_gfx_off_control(smu, gate);
186                 if (ret)
187                         dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",
188                                 gate ? "enable" : "disable");
189                 break;
190         case AMD_IP_BLOCK_TYPE_SDMA:
191                 ret = smu_powergate_sdma(smu, gate);
192                 if (ret)
193                         dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",
194                                 gate ? "gate" : "ungate");
195                 break;
196         case AMD_IP_BLOCK_TYPE_JPEG:
197                 ret = smu_dpm_set_jpeg_enable(smu, !gate);
198                 if (ret)
199                         dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
200                                 gate ? "gate" : "ungate");
201                 break;
202         default:
203                 dev_err(smu->adev->dev, "Unsupported block type!\n");
204                 return -EINVAL;
205         }
206
207         return ret;
208 }
209
210 int smu_get_power_num_states(struct smu_context *smu,
211                              struct pp_states_info *state_info)
212 {
213         if (!state_info)
214                 return -EINVAL;
215
216         /* not support power state */
217         memset(state_info, 0, sizeof(struct pp_states_info));
218         state_info->nums = 1;
219         state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
220
221         return 0;
222 }
223
224 bool is_support_sw_smu(struct amdgpu_device *adev)
225 {
226         if (adev->asic_type >= CHIP_ARCTURUS)
227                 return true;
228
229         return false;
230 }
231
232 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
233 {
234         struct smu_table_context *smu_table = &smu->smu_table;
235         uint32_t powerplay_table_size;
236
237         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
238                 return -EOPNOTSUPP;
239
240         if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
241                 return -EINVAL;
242
243         mutex_lock(&smu->mutex);
244
245         if (smu_table->hardcode_pptable)
246                 *table = smu_table->hardcode_pptable;
247         else
248                 *table = smu_table->power_play_table;
249
250         powerplay_table_size = smu_table->power_play_table_size;
251
252         mutex_unlock(&smu->mutex);
253
254         return powerplay_table_size;
255 }
256
257 int smu_sys_set_pp_table(struct smu_context *smu,  void *buf, size_t size)
258 {
259         struct smu_table_context *smu_table = &smu->smu_table;
260         ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
261         int ret = 0;
262
263         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
264                 return -EOPNOTSUPP;
265
266         if (header->usStructureSize != size) {
267                 dev_err(smu->adev->dev, "pp table size not matched !\n");
268                 return -EIO;
269         }
270
271         mutex_lock(&smu->mutex);
272         if (!smu_table->hardcode_pptable)
273                 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
274         if (!smu_table->hardcode_pptable) {
275                 ret = -ENOMEM;
276                 goto failed;
277         }
278
279         memcpy(smu_table->hardcode_pptable, buf, size);
280         smu_table->power_play_table = smu_table->hardcode_pptable;
281         smu_table->power_play_table_size = size;
282
283         /*
284          * Special hw_fini action(for Navi1x, the DPMs disablement will be
285          * skipped) may be needed for custom pptable uploading.
286          */
287         smu->uploading_custom_pp_table = true;
288
289         ret = smu_reset(smu);
290         if (ret)
291                 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);
292
293         smu->uploading_custom_pp_table = false;
294
295 failed:
296         mutex_unlock(&smu->mutex);
297         return ret;
298 }
299
300 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
301 {
302         struct smu_feature *feature = &smu->smu_feature;
303         int ret = 0;
304         uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
305
306         mutex_lock(&feature->mutex);
307         bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
308         mutex_unlock(&feature->mutex);
309
310         ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
311                                              SMU_FEATURE_MAX/32);
312         if (ret)
313                 return ret;
314
315         mutex_lock(&feature->mutex);
316         bitmap_or(feature->allowed, feature->allowed,
317                       (unsigned long *)allowed_feature_mask,
318                       feature->feature_num);
319         mutex_unlock(&feature->mutex);
320
321         return ret;
322 }
323
324 static int smu_set_funcs(struct amdgpu_device *adev)
325 {
326         struct smu_context *smu = &adev->smu;
327
328         if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
329                 smu->od_enabled = true;
330
331         switch (adev->asic_type) {
332         case CHIP_NAVI10:
333         case CHIP_NAVI14:
334         case CHIP_NAVI12:
335                 navi10_set_ppt_funcs(smu);
336                 break;
337         case CHIP_ARCTURUS:
338                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
339                 arcturus_set_ppt_funcs(smu);
340                 /* OD is not supported on Arcturus */
341                 smu->od_enabled =false;
342                 break;
343         case CHIP_SIENNA_CICHLID:
344         case CHIP_NAVY_FLOUNDER:
345                 sienna_cichlid_set_ppt_funcs(smu);
346                 break;
347         case CHIP_RENOIR:
348                 renoir_set_ppt_funcs(smu);
349                 break;
350         default:
351                 return -EINVAL;
352         }
353
354         return 0;
355 }
356
357 static int smu_early_init(void *handle)
358 {
359         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
360         struct smu_context *smu = &adev->smu;
361
362         smu->adev = adev;
363         smu->pm_enabled = !!amdgpu_dpm;
364         smu->is_apu = false;
365         mutex_init(&smu->mutex);
366
367         return smu_set_funcs(adev);
368 }
369
370 static int smu_late_init(void *handle)
371 {
372         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
373         struct smu_context *smu = &adev->smu;
374         int ret = 0;
375
376         if (!smu->pm_enabled)
377                 return 0;
378
379         ret = smu_set_default_od_settings(smu);
380         if (ret) {
381                 dev_err(adev->dev, "Failed to setup default OD settings!\n");
382                 return ret;
383         }
384
385         /*
386          * Set initialized values (get from vbios) to dpm tables context such as
387          * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
388          * type of clks.
389          */
390         ret = smu_set_default_dpm_table(smu);
391         if (ret) {
392                 dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
393                 return ret;
394         }
395
396         ret = smu_populate_umd_state_clk(smu);
397         if (ret) {
398                 dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
399                 return ret;
400         }
401
402         ret = smu_get_asic_power_limits(smu);
403         if (ret) {
404                 dev_err(adev->dev, "Failed to get asic power limits!\n");
405                 return ret;
406         }
407
408         smu_get_unique_id(smu);
409
410         smu_handle_task(&adev->smu,
411                         smu->smu_dpm.dpm_level,
412                         AMD_PP_TASK_COMPLETE_INIT,
413                         false);
414
415         return 0;
416 }
417
418 static int smu_init_fb_allocations(struct smu_context *smu)
419 {
420         struct amdgpu_device *adev = smu->adev;
421         struct smu_table_context *smu_table = &smu->smu_table;
422         struct smu_table *tables = smu_table->tables;
423         struct smu_table *driver_table = &(smu_table->driver_table);
424         uint32_t max_table_size = 0;
425         int ret, i;
426
427         /* VRAM allocation for tool table */
428         if (tables[SMU_TABLE_PMSTATUSLOG].size) {
429                 ret = amdgpu_bo_create_kernel(adev,
430                                               tables[SMU_TABLE_PMSTATUSLOG].size,
431                                               tables[SMU_TABLE_PMSTATUSLOG].align,
432                                               tables[SMU_TABLE_PMSTATUSLOG].domain,
433                                               &tables[SMU_TABLE_PMSTATUSLOG].bo,
434                                               &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
435                                               &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
436                 if (ret) {
437                         dev_err(adev->dev, "VRAM allocation for tool table failed!\n");
438                         return ret;
439                 }
440         }
441
442         /* VRAM allocation for driver table */
443         for (i = 0; i < SMU_TABLE_COUNT; i++) {
444                 if (tables[i].size == 0)
445                         continue;
446
447                 if (i == SMU_TABLE_PMSTATUSLOG)
448                         continue;
449
450                 if (max_table_size < tables[i].size)
451                         max_table_size = tables[i].size;
452         }
453
454         driver_table->size = max_table_size;
455         driver_table->align = PAGE_SIZE;
456         driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
457
458         ret = amdgpu_bo_create_kernel(adev,
459                                       driver_table->size,
460                                       driver_table->align,
461                                       driver_table->domain,
462                                       &driver_table->bo,
463                                       &driver_table->mc_address,
464                                       &driver_table->cpu_addr);
465         if (ret) {
466                 dev_err(adev->dev, "VRAM allocation for driver table failed!\n");
467                 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
468                         amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
469                                               &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
470                                               &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
471         }
472
473         return ret;
474 }
475
476 static int smu_fini_fb_allocations(struct smu_context *smu)
477 {
478         struct smu_table_context *smu_table = &smu->smu_table;
479         struct smu_table *tables = smu_table->tables;
480         struct smu_table *driver_table = &(smu_table->driver_table);
481
482         if (!tables)
483                 return 0;
484
485         if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
486                 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
487                                       &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
488                                       &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
489
490         amdgpu_bo_free_kernel(&driver_table->bo,
491                               &driver_table->mc_address,
492                               &driver_table->cpu_addr);
493
494         return 0;
495 }
496
497 /**
498  * smu_alloc_memory_pool - allocate memory pool in the system memory
499  *
500  * @smu: amdgpu_device pointer
501  *
502  * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
503  * and DramLogSetDramAddr can notify it changed.
504  *
505  * Returns 0 on success, error on failure.
506  */
507 static int smu_alloc_memory_pool(struct smu_context *smu)
508 {
509         struct amdgpu_device *adev = smu->adev;
510         struct smu_table_context *smu_table = &smu->smu_table;
511         struct smu_table *memory_pool = &smu_table->memory_pool;
512         uint64_t pool_size = smu->pool_size;
513         int ret = 0;
514
515         if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
516                 return ret;
517
518         memory_pool->size = pool_size;
519         memory_pool->align = PAGE_SIZE;
520         memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
521
522         switch (pool_size) {
523         case SMU_MEMORY_POOL_SIZE_256_MB:
524         case SMU_MEMORY_POOL_SIZE_512_MB:
525         case SMU_MEMORY_POOL_SIZE_1_GB:
526         case SMU_MEMORY_POOL_SIZE_2_GB:
527                 ret = amdgpu_bo_create_kernel(adev,
528                                               memory_pool->size,
529                                               memory_pool->align,
530                                               memory_pool->domain,
531                                               &memory_pool->bo,
532                                               &memory_pool->mc_address,
533                                               &memory_pool->cpu_addr);
534                 if (ret)
535                         dev_err(adev->dev, "VRAM allocation for dramlog failed!\n");
536                 break;
537         default:
538                 break;
539         }
540
541         return ret;
542 }
543
544 static int smu_free_memory_pool(struct smu_context *smu)
545 {
546         struct smu_table_context *smu_table = &smu->smu_table;
547         struct smu_table *memory_pool = &smu_table->memory_pool;
548
549         if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
550                 return 0;
551
552         amdgpu_bo_free_kernel(&memory_pool->bo,
553                               &memory_pool->mc_address,
554                               &memory_pool->cpu_addr);
555
556         memset(memory_pool, 0, sizeof(struct smu_table));
557
558         return 0;
559 }
560
561 static int smu_smc_table_sw_init(struct smu_context *smu)
562 {
563         int ret;
564
565         /**
566          * Create smu_table structure, and init smc tables such as
567          * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
568          */
569         ret = smu_init_smc_tables(smu);
570         if (ret) {
571                 dev_err(smu->adev->dev, "Failed to init smc tables!\n");
572                 return ret;
573         }
574
575         /**
576          * Create smu_power_context structure, and allocate smu_dpm_context and
577          * context size to fill the smu_power_context data.
578          */
579         ret = smu_init_power(smu);
580         if (ret) {
581                 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");
582                 return ret;
583         }
584
585         /*
586          * allocate vram bos to store smc table contents.
587          */
588         ret = smu_init_fb_allocations(smu);
589         if (ret)
590                 return ret;
591
592         ret = smu_alloc_memory_pool(smu);
593         if (ret)
594                 return ret;
595
596         return 0;
597 }
598
599 static int smu_smc_table_sw_fini(struct smu_context *smu)
600 {
601         int ret;
602
603         ret = smu_free_memory_pool(smu);
604         if (ret)
605                 return ret;
606
607         ret = smu_fini_fb_allocations(smu);
608         if (ret)
609                 return ret;
610
611         ret = smu_fini_power(smu);
612         if (ret) {
613                 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");
614                 return ret;
615         }
616
617         ret = smu_fini_smc_tables(smu);
618         if (ret) {
619                 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");
620                 return ret;
621         }
622
623         return 0;
624 }
625
626 static void smu_throttling_logging_work_fn(struct work_struct *work)
627 {
628         struct smu_context *smu = container_of(work, struct smu_context,
629                                                throttling_logging_work);
630
631         smu_log_thermal_throttling(smu);
632 }
633
634 static int smu_sw_init(void *handle)
635 {
636         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
637         struct smu_context *smu = &adev->smu;
638         int ret;
639
640         smu->pool_size = adev->pm.smu_prv_buffer_size;
641         smu->smu_feature.feature_num = SMU_FEATURE_MAX;
642         mutex_init(&smu->smu_feature.mutex);
643         bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
644         bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
645         bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
646
647         mutex_init(&smu->smu_baco.mutex);
648         smu->smu_baco.state = SMU_BACO_STATE_EXIT;
649         smu->smu_baco.platform_support = false;
650
651         mutex_init(&smu->sensor_lock);
652         mutex_init(&smu->metrics_lock);
653         mutex_init(&smu->message_lock);
654
655         INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
656         smu->watermarks_bitmap = 0;
657         smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
658         smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
659
660         smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
661         smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
662         smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
663         smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
664         smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
665         smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
666         smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
667         smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
668
669         smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
670         smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
671         smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
672         smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
673         smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
674         smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
675         smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
676         smu->display_config = &adev->pm.pm_display_cfg;
677
678         smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
679         smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
680         ret = smu_init_microcode(smu);
681         if (ret) {
682                 dev_err(adev->dev, "Failed to load smu firmware!\n");
683                 return ret;
684         }
685
686         ret = smu_smc_table_sw_init(smu);
687         if (ret) {
688                 dev_err(adev->dev, "Failed to sw init smc table!\n");
689                 return ret;
690         }
691
692         ret = smu_register_irq_handler(smu);
693         if (ret) {
694                 dev_err(adev->dev, "Failed to register smc irq handler!\n");
695                 return ret;
696         }
697
698         return 0;
699 }
700
701 static int smu_sw_fini(void *handle)
702 {
703         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
704         struct smu_context *smu = &adev->smu;
705         int ret;
706
707         ret = smu_smc_table_sw_fini(smu);
708         if (ret) {
709                 dev_err(adev->dev, "Failed to sw fini smc table!\n");
710                 return ret;
711         }
712
713         smu_fini_microcode(smu);
714
715         return 0;
716 }
717
718 static int smu_get_thermal_temperature_range(struct smu_context *smu)
719 {
720         struct amdgpu_device *adev = smu->adev;
721         struct smu_temperature_range *range =
722                                 &smu->thermal_range;
723         int ret = 0;
724
725         if (!smu->ppt_funcs->get_thermal_temperature_range)
726                 return 0;
727
728         ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);
729         if (ret)
730                 return ret;
731
732         adev->pm.dpm.thermal.min_temp = range->min;
733         adev->pm.dpm.thermal.max_temp = range->max;
734         adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max;
735         adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min;
736         adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max;
737         adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max;
738         adev->pm.dpm.thermal.min_mem_temp = range->mem_min;
739         adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max;
740         adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max;
741
742         return ret;
743 }
744
745 static int smu_smc_hw_setup(struct smu_context *smu)
746 {
747         struct amdgpu_device *adev = smu->adev;
748         uint32_t pcie_gen = 0, pcie_width = 0;
749         int ret;
750
751         if (smu_is_dpm_running(smu) && adev->in_suspend) {
752                 dev_info(adev->dev, "dpm has been enabled\n");
753                 return 0;
754         }
755
756         ret = smu_init_display_count(smu, 0);
757         if (ret) {
758                 dev_info(adev->dev, "Failed to pre-set display count as 0!\n");
759                 return ret;
760         }
761
762         ret = smu_set_driver_table_location(smu);
763         if (ret) {
764                 dev_err(adev->dev, "Failed to SetDriverDramAddr!\n");
765                 return ret;
766         }
767
768         /*
769          * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
770          */
771         ret = smu_set_tool_table_location(smu);
772         if (ret) {
773                 dev_err(adev->dev, "Failed to SetToolsDramAddr!\n");
774                 return ret;
775         }
776
777         /*
778          * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
779          * pool location.
780          */
781         ret = smu_notify_memory_pool_location(smu);
782         if (ret) {
783                 dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n");
784                 return ret;
785         }
786
787         /* smu_dump_pptable(smu); */
788         /*
789          * Copy pptable bo in the vram to smc with SMU MSGs such as
790          * SetDriverDramAddr and TransferTableDram2Smu.
791          */
792         ret = smu_write_pptable(smu);
793         if (ret) {
794                 dev_err(adev->dev, "Failed to transfer pptable to SMC!\n");
795                 return ret;
796         }
797
798         /* issue Run*Btc msg */
799         ret = smu_run_btc(smu);
800         if (ret)
801                 return ret;
802
803         ret = smu_feature_set_allowed_mask(smu);
804         if (ret) {
805                 dev_err(adev->dev, "Failed to set driver allowed features mask!\n");
806                 return ret;
807         }
808
809         ret = smu_system_features_control(smu, true);
810         if (ret) {
811                 dev_err(adev->dev, "Failed to enable requested dpm features!\n");
812                 return ret;
813         }
814
815         if (!smu_is_dpm_running(smu))
816                 dev_info(adev->dev, "dpm has been disabled\n");
817
818         if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
819                 pcie_gen = 3;
820         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
821                 pcie_gen = 2;
822         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
823                 pcie_gen = 1;
824         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
825                 pcie_gen = 0;
826
827         /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
828          * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
829          * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
830          */
831         if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
832                 pcie_width = 6;
833         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
834                 pcie_width = 5;
835         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
836                 pcie_width = 4;
837         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
838                 pcie_width = 3;
839         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
840                 pcie_width = 2;
841         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
842                 pcie_width = 1;
843         ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
844         if (ret) {
845                 dev_err(adev->dev, "Attempt to override pcie params failed!\n");
846                 return ret;
847         }
848
849         ret = smu_get_thermal_temperature_range(smu);
850         if (ret) {
851                 dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
852                 return ret;
853         }
854
855         ret = smu_enable_thermal_alert(smu);
856         if (ret) {
857                 dev_err(adev->dev, "Failed to enable thermal alert!\n");
858                 return ret;
859         }
860
861         ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
862         if (ret)
863                 return ret;
864
865         ret = smu_disable_umc_cdr_12gbps_workaround(smu);
866         if (ret) {
867                 dev_err(adev->dev, "Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n");
868                 return ret;
869         }
870
871         /*
872          * For Navi1X, manually switch it to AC mode as PMFW
873          * may boot it with DC mode.
874          */
875         ret = smu_set_power_source(smu,
876                                    adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
877                                    SMU_POWER_SOURCE_DC);
878         if (ret) {
879                 dev_err(adev->dev, "Failed to switch to %s mode!\n", adev->pm.ac_power ? "AC" : "DC");
880                 return ret;
881         }
882
883         ret = smu_notify_display_change(smu);
884         if (ret)
885                 return ret;
886
887         /*
888          * Set min deep sleep dce fclk with bootup value from vbios via
889          * SetMinDeepSleepDcefclk MSG.
890          */
891         ret = smu_set_min_dcef_deep_sleep(smu,
892                                           smu->smu_table.boot_values.dcefclk / 100);
893         if (ret)
894                 return ret;
895
896         return ret;
897 }
898
899 static int smu_start_smc_engine(struct smu_context *smu)
900 {
901         struct amdgpu_device *adev = smu->adev;
902         int ret = 0;
903
904         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
905                 if (adev->asic_type < CHIP_NAVI10) {
906                         if (smu->ppt_funcs->load_microcode) {
907                                 ret = smu->ppt_funcs->load_microcode(smu);
908                                 if (ret)
909                                         return ret;
910                         }
911                 }
912         }
913
914         if (smu->ppt_funcs->check_fw_status) {
915                 ret = smu->ppt_funcs->check_fw_status(smu);
916                 if (ret) {
917                         dev_err(adev->dev, "SMC is not ready\n");
918                         return ret;
919                 }
920         }
921
922         /*
923          * Send msg GetDriverIfVersion to check if the return value is equal
924          * with DRIVER_IF_VERSION of smc header.
925          */
926         ret = smu_check_fw_version(smu);
927         if (ret)
928                 return ret;
929
930         return ret;
931 }
932
933 static int smu_hw_init(void *handle)
934 {
935         int ret;
936         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
937         struct smu_context *smu = &adev->smu;
938
939         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
940                 smu->pm_enabled = false;
941                 return 0;
942         }
943
944         ret = smu_start_smc_engine(smu);
945         if (ret) {
946                 dev_err(adev->dev, "SMC engine is not correctly up!\n");
947                 return ret;
948         }
949
950         if (smu->is_apu) {
951                 smu_powergate_sdma(&adev->smu, false);
952                 smu_dpm_set_vcn_enable(smu, true);
953                 smu_dpm_set_jpeg_enable(smu, true);
954                 smu_set_gfx_cgpg(&adev->smu, true);
955         }
956
957         if (!smu->pm_enabled)
958                 return 0;
959
960         /* get boot_values from vbios to set revision, gfxclk, and etc. */
961         ret = smu_get_vbios_bootup_values(smu);
962         if (ret) {
963                 dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n");
964                 return ret;
965         }
966
967         ret = smu_setup_pptable(smu);
968         if (ret) {
969                 dev_err(adev->dev, "Failed to setup pptable!\n");
970                 return ret;
971         }
972
973         ret = smu_get_driver_allowed_feature_mask(smu);
974         if (ret)
975                 return ret;
976
977         ret = smu_smc_hw_setup(smu);
978         if (ret) {
979                 dev_err(adev->dev, "Failed to setup smc hw!\n");
980                 return ret;
981         }
982
983         /*
984          * Move maximum sustainable clock retrieving here considering
985          * 1. It is not needed on resume(from S3).
986          * 2. DAL settings come between .hw_init and .late_init of SMU.
987          *    And DAL needs to know the maximum sustainable clocks. Thus
988          *    it cannot be put in .late_init().
989          */
990         ret = smu_init_max_sustainable_clocks(smu);
991         if (ret) {
992                 dev_err(adev->dev, "Failed to init max sustainable clocks!\n");
993                 return ret;
994         }
995
996         adev->pm.dpm_enabled = true;
997
998         dev_info(adev->dev, "SMU is initialized successfully!\n");
999
1000         return 0;
1001 }
1002
1003 static int smu_disable_dpms(struct smu_context *smu)
1004 {
1005         struct amdgpu_device *adev = smu->adev;
1006         int ret = 0;
1007         bool use_baco = !smu->is_apu &&
1008                 ((adev->in_gpu_reset &&
1009                   (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1010                  ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev)));
1011
1012         /*
1013          * For custom pptable uploading, skip the DPM features
1014          * disable process on Navi1x ASICs.
1015          *   - As the gfx related features are under control of
1016          *     RLC on those ASICs. RLC reinitialization will be
1017          *     needed to reenable them. That will cost much more
1018          *     efforts.
1019          *
1020          *   - SMU firmware can handle the DPM reenablement
1021          *     properly.
1022          */
1023         if (smu->uploading_custom_pp_table &&
1024             (adev->asic_type >= CHIP_NAVI10) &&
1025             (adev->asic_type <= CHIP_NAVI12))
1026                 return 0;
1027
1028         /*
1029          * For Sienna_Cichlid, PMFW will handle the features disablement properly
1030          * on BACO in. Driver involvement is unnecessary.
1031          */
1032         if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
1033              use_baco)
1034                 return 0;
1035
1036         /*
1037          * For gpu reset, runpm and hibernation through BACO,
1038          * BACO feature has to be kept enabled.
1039          */
1040         if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
1041                 ret = smu_disable_all_features_with_exception(smu,
1042                                                               SMU_FEATURE_BACO_BIT);
1043                 if (ret)
1044                         dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
1045         } else {
1046                 ret = smu_system_features_control(smu, false);
1047                 if (ret)
1048                         dev_err(adev->dev, "Failed to disable smu features.\n");
1049         }
1050
1051         if (adev->asic_type >= CHIP_NAVI10 &&
1052             adev->gfx.rlc.funcs->stop)
1053                 adev->gfx.rlc.funcs->stop(adev);
1054
1055         return ret;
1056 }
1057
1058 static int smu_smc_hw_cleanup(struct smu_context *smu)
1059 {
1060         struct amdgpu_device *adev = smu->adev;
1061         int ret = 0;
1062
1063         smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
1064
1065         cancel_work_sync(&smu->throttling_logging_work);
1066
1067         ret = smu_disable_thermal_alert(smu);
1068         if (ret) {
1069                 dev_err(adev->dev, "Fail to disable thermal alert!\n");
1070                 return ret;
1071         }
1072
1073         ret = smu_disable_dpms(smu);
1074         if (ret) {
1075                 dev_err(adev->dev, "Fail to disable dpm features!\n");
1076                 return ret;
1077         }
1078
1079         return 0;
1080 }
1081
1082 static int smu_hw_fini(void *handle)
1083 {
1084         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1085         struct smu_context *smu = &adev->smu;
1086         int ret = 0;
1087
1088         if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1089                 return 0;
1090
1091         if (smu->is_apu) {
1092                 smu_powergate_sdma(&adev->smu, true);
1093                 smu_dpm_set_vcn_enable(smu, false);
1094                 smu_dpm_set_jpeg_enable(smu, false);
1095         }
1096
1097         if (!smu->pm_enabled)
1098                 return 0;
1099
1100         adev->pm.dpm_enabled = false;
1101
1102         ret = smu_smc_hw_cleanup(smu);
1103         if (ret)
1104                 return ret;
1105
1106         return 0;
1107 }
1108
1109 int smu_reset(struct smu_context *smu)
1110 {
1111         struct amdgpu_device *adev = smu->adev;
1112         int ret = 0;
1113
1114         ret = smu_hw_fini(adev);
1115         if (ret)
1116                 return ret;
1117
1118         ret = smu_hw_init(adev);
1119         if (ret)
1120                 return ret;
1121
1122         ret = smu_late_init(adev);
1123
1124         return ret;
1125 }
1126
1127 static int smu_suspend(void *handle)
1128 {
1129         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1130         struct smu_context *smu = &adev->smu;
1131         int ret;
1132
1133         if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1134                 return 0;
1135
1136         if (!smu->pm_enabled)
1137                 return 0;
1138
1139         adev->pm.dpm_enabled = false;
1140
1141         ret = smu_smc_hw_cleanup(smu);
1142         if (ret)
1143                 return ret;
1144
1145         smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1146
1147         if (smu->is_apu)
1148                 smu_set_gfx_cgpg(&adev->smu, false);
1149
1150         return 0;
1151 }
1152
1153 static int smu_resume(void *handle)
1154 {
1155         int ret;
1156         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1157         struct smu_context *smu = &adev->smu;
1158
1159         if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1160                 return 0;
1161
1162         if (!smu->pm_enabled)
1163                 return 0;
1164
1165         dev_info(adev->dev, "SMU is resuming...\n");
1166
1167         ret = smu_start_smc_engine(smu);
1168         if (ret) {
1169                 dev_err(adev->dev, "SMC engine is not correctly up!\n");
1170                 return ret;
1171         }
1172
1173         ret = smu_smc_hw_setup(smu);
1174         if (ret) {
1175                 dev_err(adev->dev, "Failed to setup smc hw!\n");
1176                 return ret;
1177         }
1178
1179         if (smu->is_apu)
1180                 smu_set_gfx_cgpg(&adev->smu, true);
1181
1182         smu->disable_uclk_switch = 0;
1183
1184         adev->pm.dpm_enabled = true;
1185
1186         dev_info(adev->dev, "SMU is resumed successfully!\n");
1187
1188         return 0;
1189 }
1190
1191 int smu_display_configuration_change(struct smu_context *smu,
1192                                      const struct amd_pp_display_configuration *display_config)
1193 {
1194         int index = 0;
1195         int num_of_active_display = 0;
1196
1197         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1198                 return -EOPNOTSUPP;
1199
1200         if (!display_config)
1201                 return -EINVAL;
1202
1203         mutex_lock(&smu->mutex);
1204
1205         smu_set_min_dcef_deep_sleep(smu,
1206                                     display_config->min_dcef_deep_sleep_set_clk / 100);
1207
1208         for (index = 0; index < display_config->num_path_including_non_display; index++) {
1209                 if (display_config->displays[index].controller_id != 0)
1210                         num_of_active_display++;
1211         }
1212
1213         smu_set_active_display_count(smu, num_of_active_display);
1214
1215         smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1216                            display_config->cpu_cc6_disable,
1217                            display_config->cpu_pstate_disable,
1218                            display_config->nb_pstate_switch_disable);
1219
1220         mutex_unlock(&smu->mutex);
1221
1222         return 0;
1223 }
1224
1225 static int smu_get_clock_info(struct smu_context *smu,
1226                               struct smu_clock_info *clk_info,
1227                               enum smu_perf_level_designation designation)
1228 {
1229         int ret;
1230         struct smu_performance_level level = {0};
1231
1232         if (!clk_info)
1233                 return -EINVAL;
1234
1235         ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1236         if (ret)
1237                 return -EINVAL;
1238
1239         clk_info->min_mem_clk = level.memory_clock;
1240         clk_info->min_eng_clk = level.core_clock;
1241         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1242
1243         ret = smu_get_perf_level(smu, designation, &level);
1244         if (ret)
1245                 return -EINVAL;
1246
1247         clk_info->min_mem_clk = level.memory_clock;
1248         clk_info->min_eng_clk = level.core_clock;
1249         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1250
1251         return 0;
1252 }
1253
1254 int smu_get_current_clocks(struct smu_context *smu,
1255                            struct amd_pp_clock_info *clocks)
1256 {
1257         struct amd_pp_simple_clock_info simple_clocks = {0};
1258         struct smu_clock_info hw_clocks;
1259         int ret = 0;
1260
1261         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1262                 return -EOPNOTSUPP;
1263
1264         mutex_lock(&smu->mutex);
1265
1266         smu_get_dal_power_level(smu, &simple_clocks);
1267
1268         if (smu->support_power_containment)
1269                 ret = smu_get_clock_info(smu, &hw_clocks,
1270                                          PERF_LEVEL_POWER_CONTAINMENT);
1271         else
1272                 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1273
1274         if (ret) {
1275                 dev_err(smu->adev->dev, "Error in smu_get_clock_info\n");
1276                 goto failed;
1277         }
1278
1279         clocks->min_engine_clock = hw_clocks.min_eng_clk;
1280         clocks->max_engine_clock = hw_clocks.max_eng_clk;
1281         clocks->min_memory_clock = hw_clocks.min_mem_clk;
1282         clocks->max_memory_clock = hw_clocks.max_mem_clk;
1283         clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1284         clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1285         clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1286         clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1287
1288         if (simple_clocks.level == 0)
1289                 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1290         else
1291                 clocks->max_clocks_state = simple_clocks.level;
1292
1293         if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1294                 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1295                 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1296         }
1297
1298 failed:
1299         mutex_unlock(&smu->mutex);
1300         return ret;
1301 }
1302
1303 static int smu_set_clockgating_state(void *handle,
1304                                      enum amd_clockgating_state state)
1305 {
1306         return 0;
1307 }
1308
1309 static int smu_set_powergating_state(void *handle,
1310                                      enum amd_powergating_state state)
1311 {
1312         return 0;
1313 }
1314
1315 static int smu_enable_umd_pstate(void *handle,
1316                       enum amd_dpm_forced_level *level)
1317 {
1318         uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1319                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1320                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1321                                         AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1322
1323         struct smu_context *smu = (struct smu_context*)(handle);
1324         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1325
1326         if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1327                 return -EINVAL;
1328
1329         if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1330                 /* enter umd pstate, save current level, disable gfx cg*/
1331                 if (*level & profile_mode_mask) {
1332                         smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1333                         smu_dpm_ctx->enable_umd_pstate = true;
1334                         amdgpu_device_ip_set_powergating_state(smu->adev,
1335                                                                AMD_IP_BLOCK_TYPE_GFX,
1336                                                                AMD_PG_STATE_UNGATE);
1337                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1338                                                                AMD_IP_BLOCK_TYPE_GFX,
1339                                                                AMD_CG_STATE_UNGATE);
1340                 }
1341         } else {
1342                 /* exit umd pstate, restore level, enable gfx cg*/
1343                 if (!(*level & profile_mode_mask)) {
1344                         if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1345                                 *level = smu_dpm_ctx->saved_dpm_level;
1346                         smu_dpm_ctx->enable_umd_pstate = false;
1347                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1348                                                                AMD_IP_BLOCK_TYPE_GFX,
1349                                                                AMD_CG_STATE_GATE);
1350                         amdgpu_device_ip_set_powergating_state(smu->adev,
1351                                                                AMD_IP_BLOCK_TYPE_GFX,
1352                                                                AMD_PG_STATE_GATE);
1353                 }
1354         }
1355
1356         return 0;
1357 }
1358
1359 static int smu_adjust_power_state_dynamic(struct smu_context *smu,
1360                                    enum amd_dpm_forced_level level,
1361                                    bool skip_display_settings)
1362 {
1363         int ret = 0;
1364         int index = 0;
1365         long workload;
1366         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1367
1368         if (!skip_display_settings) {
1369                 ret = smu_display_config_changed(smu);
1370                 if (ret) {
1371                         dev_err(smu->adev->dev, "Failed to change display config!");
1372                         return ret;
1373                 }
1374         }
1375
1376         ret = smu_apply_clocks_adjust_rules(smu);
1377         if (ret) {
1378                 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");
1379                 return ret;
1380         }
1381
1382         if (!skip_display_settings) {
1383                 ret = smu_notify_smc_display_config(smu);
1384                 if (ret) {
1385                         dev_err(smu->adev->dev, "Failed to notify smc display config!");
1386                         return ret;
1387                 }
1388         }
1389
1390         if (smu_dpm_ctx->dpm_level != level) {
1391                 ret = smu_asic_set_performance_level(smu, level);
1392                 if (ret) {
1393                         dev_err(smu->adev->dev, "Failed to set performance level!");
1394                         return ret;
1395                 }
1396
1397                 /* update the saved copy */
1398                 smu_dpm_ctx->dpm_level = level;
1399         }
1400
1401         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1402                 index = fls(smu->workload_mask);
1403                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1404                 workload = smu->workload_setting[index];
1405
1406                 if (smu->power_profile_mode != workload)
1407                         smu_set_power_profile_mode(smu, &workload, 0, false);
1408         }
1409
1410         return ret;
1411 }
1412
1413 int smu_handle_task(struct smu_context *smu,
1414                     enum amd_dpm_forced_level level,
1415                     enum amd_pp_task task_id,
1416                     bool lock_needed)
1417 {
1418         int ret = 0;
1419
1420         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1421                 return -EOPNOTSUPP;
1422
1423         if (lock_needed)
1424                 mutex_lock(&smu->mutex);
1425
1426         switch (task_id) {
1427         case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1428                 ret = smu_pre_display_config_changed(smu);
1429                 if (ret)
1430                         goto out;
1431                 ret = smu_set_cpu_power_state(smu);
1432                 if (ret)
1433                         goto out;
1434                 ret = smu_adjust_power_state_dynamic(smu, level, false);
1435                 break;
1436         case AMD_PP_TASK_COMPLETE_INIT:
1437         case AMD_PP_TASK_READJUST_POWER_STATE:
1438                 ret = smu_adjust_power_state_dynamic(smu, level, true);
1439                 break;
1440         default:
1441                 break;
1442         }
1443
1444 out:
1445         if (lock_needed)
1446                 mutex_unlock(&smu->mutex);
1447
1448         return ret;
1449 }
1450
1451 int smu_switch_power_profile(struct smu_context *smu,
1452                              enum PP_SMC_POWER_PROFILE type,
1453                              bool en)
1454 {
1455         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1456         long workload;
1457         uint32_t index;
1458
1459         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1460                 return -EOPNOTSUPP;
1461
1462         if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1463                 return -EINVAL;
1464
1465         mutex_lock(&smu->mutex);
1466
1467         if (!en) {
1468                 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1469                 index = fls(smu->workload_mask);
1470                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1471                 workload = smu->workload_setting[index];
1472         } else {
1473                 smu->workload_mask |= (1 << smu->workload_prority[type]);
1474                 index = fls(smu->workload_mask);
1475                 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1476                 workload = smu->workload_setting[index];
1477         }
1478
1479         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1480                 smu_set_power_profile_mode(smu, &workload, 0, false);
1481
1482         mutex_unlock(&smu->mutex);
1483
1484         return 0;
1485 }
1486
1487 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1488 {
1489         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1490         enum amd_dpm_forced_level level;
1491
1492         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1493                 return -EOPNOTSUPP;
1494
1495         if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1496                 return -EINVAL;
1497
1498         mutex_lock(&(smu->mutex));
1499         level = smu_dpm_ctx->dpm_level;
1500         mutex_unlock(&(smu->mutex));
1501
1502         return level;
1503 }
1504
1505 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1506 {
1507         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1508         int ret = 0;
1509
1510         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1511                 return -EOPNOTSUPP;
1512
1513         if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1514                 return -EINVAL;
1515
1516         mutex_lock(&smu->mutex);
1517
1518         ret = smu_enable_umd_pstate(smu, &level);
1519         if (ret) {
1520                 mutex_unlock(&smu->mutex);
1521                 return ret;
1522         }
1523
1524         ret = smu_handle_task(smu, level,
1525                               AMD_PP_TASK_READJUST_POWER_STATE,
1526                               false);
1527
1528         mutex_unlock(&smu->mutex);
1529
1530         return ret;
1531 }
1532
1533 int smu_set_display_count(struct smu_context *smu, uint32_t count)
1534 {
1535         int ret = 0;
1536
1537         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1538                 return -EOPNOTSUPP;
1539
1540         mutex_lock(&smu->mutex);
1541         ret = smu_init_display_count(smu, count);
1542         mutex_unlock(&smu->mutex);
1543
1544         return ret;
1545 }
1546
1547 int smu_force_clk_levels(struct smu_context *smu,
1548                          enum smu_clk_type clk_type,
1549                          uint32_t mask)
1550 {
1551         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1552         int ret = 0;
1553
1554         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1555                 return -EOPNOTSUPP;
1556
1557         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1558                 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
1559                 return -EINVAL;
1560         }
1561
1562         mutex_lock(&smu->mutex);
1563
1564         if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
1565                 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
1566
1567         mutex_unlock(&smu->mutex);
1568
1569         return ret;
1570 }
1571
1572 /*
1573  * On system suspending or resetting, the dpm_enabled
1574  * flag will be cleared. So that those SMU services which
1575  * are not supported will be gated.
1576  * However, the mp1 state setting should still be granted
1577  * even if the dpm_enabled cleared.
1578  */
1579 int smu_set_mp1_state(struct smu_context *smu,
1580                       enum pp_mp1_state mp1_state)
1581 {
1582         uint16_t msg;
1583         int ret;
1584
1585         if (!smu->pm_enabled)
1586                 return -EOPNOTSUPP;
1587
1588         mutex_lock(&smu->mutex);
1589
1590         switch (mp1_state) {
1591         case PP_MP1_STATE_SHUTDOWN:
1592                 msg = SMU_MSG_PrepareMp1ForShutdown;
1593                 break;
1594         case PP_MP1_STATE_UNLOAD:
1595                 msg = SMU_MSG_PrepareMp1ForUnload;
1596                 break;
1597         case PP_MP1_STATE_RESET:
1598                 msg = SMU_MSG_PrepareMp1ForReset;
1599                 break;
1600         case PP_MP1_STATE_NONE:
1601         default:
1602                 mutex_unlock(&smu->mutex);
1603                 return 0;
1604         }
1605
1606         /* some asics may not support those messages */
1607         if (smu_cmn_to_asic_specific_index(smu,
1608                                            CMN2ASIC_MAPPING_MSG,
1609                                            msg) < 0) {
1610                 mutex_unlock(&smu->mutex);
1611                 return 0;
1612         }
1613
1614         ret = smu_send_smc_msg(smu, msg, NULL);
1615         if (ret)
1616                 dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
1617
1618         mutex_unlock(&smu->mutex);
1619
1620         return ret;
1621 }
1622
1623 int smu_set_df_cstate(struct smu_context *smu,
1624                       enum pp_df_cstate state)
1625 {
1626         int ret = 0;
1627
1628         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1629                 return -EOPNOTSUPP;
1630
1631         if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
1632                 return 0;
1633
1634         mutex_lock(&smu->mutex);
1635
1636         ret = smu->ppt_funcs->set_df_cstate(smu, state);
1637         if (ret)
1638                 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
1639
1640         mutex_unlock(&smu->mutex);
1641
1642         return ret;
1643 }
1644
1645 int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
1646 {
1647         int ret = 0;
1648
1649         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1650                 return -EOPNOTSUPP;
1651
1652         if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
1653                 return 0;
1654
1655         mutex_lock(&smu->mutex);
1656
1657         ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
1658         if (ret)
1659                 dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n");
1660
1661         mutex_unlock(&smu->mutex);
1662
1663         return ret;
1664 }
1665
1666 int smu_write_watermarks_table(struct smu_context *smu)
1667 {
1668         int ret = 0;
1669
1670         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1671                 return -EOPNOTSUPP;
1672
1673         mutex_lock(&smu->mutex);
1674
1675         ret = smu_set_watermarks_table(smu, NULL);
1676
1677         mutex_unlock(&smu->mutex);
1678
1679         return ret;
1680 }
1681
1682 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
1683                 struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
1684 {
1685         int ret = 0;
1686
1687         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1688                 return -EOPNOTSUPP;
1689
1690         mutex_lock(&smu->mutex);
1691
1692         if (!smu->disable_watermark &&
1693                         smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
1694                         smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
1695                 ret = smu_set_watermarks_table(smu, clock_ranges);
1696
1697                 if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) {
1698                         smu->watermarks_bitmap |= WATERMARKS_EXIST;
1699                         smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
1700                 }
1701         }
1702
1703         mutex_unlock(&smu->mutex);
1704
1705         return ret;
1706 }
1707
1708 int smu_set_ac_dc(struct smu_context *smu)
1709 {
1710         int ret = 0;
1711
1712         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1713                 return -EOPNOTSUPP;
1714
1715         /* controlled by firmware */
1716         if (smu->dc_controlled_by_gpio)
1717                 return 0;
1718
1719         mutex_lock(&smu->mutex);
1720         ret = smu_set_power_source(smu,
1721                                    smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
1722                                    SMU_POWER_SOURCE_DC);
1723         if (ret)
1724                 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
1725                        smu->adev->pm.ac_power ? "AC" : "DC");
1726         mutex_unlock(&smu->mutex);
1727
1728         return ret;
1729 }
1730
1731 const struct amd_ip_funcs smu_ip_funcs = {
1732         .name = "smu",
1733         .early_init = smu_early_init,
1734         .late_init = smu_late_init,
1735         .sw_init = smu_sw_init,
1736         .sw_fini = smu_sw_fini,
1737         .hw_init = smu_hw_init,
1738         .hw_fini = smu_hw_fini,
1739         .suspend = smu_suspend,
1740         .resume = smu_resume,
1741         .is_idle = NULL,
1742         .check_soft_reset = NULL,
1743         .wait_for_idle = NULL,
1744         .soft_reset = NULL,
1745         .set_clockgating_state = smu_set_clockgating_state,
1746         .set_powergating_state = smu_set_powergating_state,
1747         .enable_umd_pstate = smu_enable_umd_pstate,
1748 };
1749
1750 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
1751 {
1752         .type = AMD_IP_BLOCK_TYPE_SMC,
1753         .major = 11,
1754         .minor = 0,
1755         .rev = 0,
1756         .funcs = &smu_ip_funcs,
1757 };
1758
1759 const struct amdgpu_ip_block_version smu_v12_0_ip_block =
1760 {
1761         .type = AMD_IP_BLOCK_TYPE_SMC,
1762         .major = 12,
1763         .minor = 0,
1764         .rev = 0,
1765         .funcs = &smu_ip_funcs,
1766 };
1767
1768 int smu_load_microcode(struct smu_context *smu)
1769 {
1770         int ret = 0;
1771
1772         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1773                 return -EOPNOTSUPP;
1774
1775         mutex_lock(&smu->mutex);
1776
1777         if (smu->ppt_funcs->load_microcode)
1778                 ret = smu->ppt_funcs->load_microcode(smu);
1779
1780         mutex_unlock(&smu->mutex);
1781
1782         return ret;
1783 }
1784
1785 int smu_check_fw_status(struct smu_context *smu)
1786 {
1787         int ret = 0;
1788
1789         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1790                 return -EOPNOTSUPP;
1791
1792         mutex_lock(&smu->mutex);
1793
1794         if (smu->ppt_funcs->check_fw_status)
1795                 ret = smu->ppt_funcs->check_fw_status(smu);
1796
1797         mutex_unlock(&smu->mutex);
1798
1799         return ret;
1800 }
1801
1802 int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
1803 {
1804         int ret = 0;
1805
1806         mutex_lock(&smu->mutex);
1807
1808         if (smu->ppt_funcs->set_gfx_cgpg)
1809                 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
1810
1811         mutex_unlock(&smu->mutex);
1812
1813         return ret;
1814 }
1815
1816 int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
1817 {
1818         int ret = 0;
1819
1820         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1821                 return -EOPNOTSUPP;
1822
1823         mutex_lock(&smu->mutex);
1824
1825         if (smu->ppt_funcs->set_fan_speed_rpm)
1826                 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
1827
1828         mutex_unlock(&smu->mutex);
1829
1830         return ret;
1831 }
1832
1833 int smu_get_power_limit(struct smu_context *smu,
1834                         uint32_t *limit,
1835                         bool max_setting)
1836 {
1837         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1838                 return -EOPNOTSUPP;
1839
1840         mutex_lock(&smu->mutex);
1841
1842         *limit = (max_setting ? smu->max_power_limit : smu->current_power_limit);
1843
1844         mutex_unlock(&smu->mutex);
1845
1846         return 0;
1847 }
1848
1849 int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
1850 {
1851         int ret = 0;
1852
1853         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1854                 return -EOPNOTSUPP;
1855
1856         mutex_lock(&smu->mutex);
1857
1858         if (limit > smu->max_power_limit) {
1859                 dev_err(smu->adev->dev,
1860                         "New power limit (%d) is over the max allowed %d\n",
1861                         limit, smu->max_power_limit);
1862                 goto out;
1863         }
1864
1865         if (!limit)
1866                 limit = smu->current_power_limit;
1867
1868         if (smu->ppt_funcs->set_power_limit)
1869                 ret = smu->ppt_funcs->set_power_limit(smu, limit);
1870
1871 out:
1872         mutex_unlock(&smu->mutex);
1873
1874         return ret;
1875 }
1876
1877 int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
1878 {
1879         int ret = 0;
1880
1881         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1882                 return -EOPNOTSUPP;
1883
1884         mutex_lock(&smu->mutex);
1885
1886         if (smu->ppt_funcs->print_clk_levels)
1887                 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
1888
1889         mutex_unlock(&smu->mutex);
1890
1891         return ret;
1892 }
1893
1894 int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
1895 {
1896         int ret = 0;
1897
1898         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1899                 return -EOPNOTSUPP;
1900
1901         mutex_lock(&smu->mutex);
1902
1903         if (smu->ppt_funcs->get_od_percentage)
1904                 ret = smu->ppt_funcs->get_od_percentage(smu, type);
1905
1906         mutex_unlock(&smu->mutex);
1907
1908         return ret;
1909 }
1910
1911 int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
1912 {
1913         int ret = 0;
1914
1915         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1916                 return -EOPNOTSUPP;
1917
1918         mutex_lock(&smu->mutex);
1919
1920         if (smu->ppt_funcs->set_od_percentage)
1921                 ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
1922
1923         mutex_unlock(&smu->mutex);
1924
1925         return ret;
1926 }
1927
1928 int smu_od_edit_dpm_table(struct smu_context *smu,
1929                           enum PP_OD_DPM_TABLE_COMMAND type,
1930                           long *input, uint32_t size)
1931 {
1932         int ret = 0;
1933
1934         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1935                 return -EOPNOTSUPP;
1936
1937         mutex_lock(&smu->mutex);
1938
1939         if (smu->ppt_funcs->od_edit_dpm_table)
1940                 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
1941
1942         mutex_unlock(&smu->mutex);
1943
1944         return ret;
1945 }
1946
1947 int smu_read_sensor(struct smu_context *smu,
1948                     enum amd_pp_sensors sensor,
1949                     void *data, uint32_t *size)
1950 {
1951         struct smu_umd_pstate_table *pstate_table =
1952                                 &smu->pstate_table;
1953         int ret = 0;
1954
1955         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1956                 return -EOPNOTSUPP;
1957
1958         if (!data || !size)
1959                 return -EINVAL;
1960
1961         mutex_lock(&smu->mutex);
1962
1963         switch (sensor) {
1964         case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
1965                 *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
1966                 *size = 4;
1967                 break;
1968         case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
1969                 *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
1970                 *size = 4;
1971                 break;
1972         case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
1973                 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
1974                 *size = 8;
1975                 break;
1976         case AMDGPU_PP_SENSOR_UVD_POWER:
1977                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
1978                 *size = 4;
1979                 break;
1980         case AMDGPU_PP_SENSOR_VCE_POWER:
1981                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
1982                 *size = 4;
1983                 break;
1984         case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
1985                 *(uint32_t *)data = smu->smu_power.power_gate.vcn_gated ? 0 : 1;
1986                 *size = 4;
1987                 break;
1988         case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
1989                 *(uint32_t *)data = 0;
1990                 *size = 4;
1991                 break;
1992         default:
1993                 if (smu->ppt_funcs->read_sensor)
1994                         ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
1995                 break;
1996         }
1997
1998         mutex_unlock(&smu->mutex);
1999
2000         return ret;
2001 }
2002
2003 int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
2004 {
2005         int ret = 0;
2006
2007         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2008                 return -EOPNOTSUPP;
2009
2010         mutex_lock(&smu->mutex);
2011
2012         if (smu->ppt_funcs->get_power_profile_mode)
2013                 ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
2014
2015         mutex_unlock(&smu->mutex);
2016
2017         return ret;
2018 }
2019
2020 int smu_set_power_profile_mode(struct smu_context *smu,
2021                                long *param,
2022                                uint32_t param_size,
2023                                bool lock_needed)
2024 {
2025         int ret = 0;
2026
2027         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2028                 return -EOPNOTSUPP;
2029
2030         if (lock_needed)
2031                 mutex_lock(&smu->mutex);
2032
2033         if (smu->ppt_funcs->set_power_profile_mode)
2034                 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
2035
2036         if (lock_needed)
2037                 mutex_unlock(&smu->mutex);
2038
2039         return ret;
2040 }
2041
2042
2043 int smu_get_fan_control_mode(struct smu_context *smu)
2044 {
2045         int ret = 0;
2046
2047         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2048                 return -EOPNOTSUPP;
2049
2050         mutex_lock(&smu->mutex);
2051
2052         if (smu->ppt_funcs->get_fan_control_mode)
2053                 ret = smu->ppt_funcs->get_fan_control_mode(smu);
2054
2055         mutex_unlock(&smu->mutex);
2056
2057         return ret;
2058 }
2059
2060 int smu_set_fan_control_mode(struct smu_context *smu, int value)
2061 {
2062         int ret = 0;
2063
2064         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2065                 return -EOPNOTSUPP;
2066
2067         mutex_lock(&smu->mutex);
2068
2069         if (smu->ppt_funcs->set_fan_control_mode)
2070                 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
2071
2072         mutex_unlock(&smu->mutex);
2073
2074         return ret;
2075 }
2076
2077 int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
2078 {
2079         int ret = 0;
2080
2081         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2082                 return -EOPNOTSUPP;
2083
2084         mutex_lock(&smu->mutex);
2085
2086         if (smu->ppt_funcs->get_fan_speed_percent)
2087                 ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed);
2088
2089         mutex_unlock(&smu->mutex);
2090
2091         return ret;
2092 }
2093
2094 int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
2095 {
2096         int ret = 0;
2097
2098         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2099                 return -EOPNOTSUPP;
2100
2101         mutex_lock(&smu->mutex);
2102
2103         if (smu->ppt_funcs->set_fan_speed_percent)
2104                 ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
2105
2106         mutex_unlock(&smu->mutex);
2107
2108         return ret;
2109 }
2110
2111 int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
2112 {
2113         int ret = 0;
2114
2115         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2116                 return -EOPNOTSUPP;
2117
2118         mutex_lock(&smu->mutex);
2119
2120         if (smu->ppt_funcs->get_fan_speed_rpm)
2121                 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
2122
2123         mutex_unlock(&smu->mutex);
2124
2125         return ret;
2126 }
2127
2128 int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
2129 {
2130         int ret = 0;
2131
2132         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2133                 return -EOPNOTSUPP;
2134
2135         mutex_lock(&smu->mutex);
2136
2137         ret = smu_set_min_dcef_deep_sleep(smu, clk);
2138
2139         mutex_unlock(&smu->mutex);
2140
2141         return ret;
2142 }
2143
2144 int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
2145 {
2146         int ret = 0;
2147
2148         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2149                 return -EOPNOTSUPP;
2150
2151         if (smu->ppt_funcs->set_active_display_count)
2152                 ret = smu->ppt_funcs->set_active_display_count(smu, count);
2153
2154         return ret;
2155 }
2156
2157 int smu_get_clock_by_type(struct smu_context *smu,
2158                           enum amd_pp_clock_type type,
2159                           struct amd_pp_clocks *clocks)
2160 {
2161         int ret = 0;
2162
2163         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2164                 return -EOPNOTSUPP;
2165
2166         mutex_lock(&smu->mutex);
2167
2168         if (smu->ppt_funcs->get_clock_by_type)
2169                 ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks);
2170
2171         mutex_unlock(&smu->mutex);
2172
2173         return ret;
2174 }
2175
2176 int smu_get_max_high_clocks(struct smu_context *smu,
2177                             struct amd_pp_simple_clock_info *clocks)
2178 {
2179         int ret = 0;
2180
2181         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2182                 return -EOPNOTSUPP;
2183
2184         mutex_lock(&smu->mutex);
2185
2186         if (smu->ppt_funcs->get_max_high_clocks)
2187                 ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks);
2188
2189         mutex_unlock(&smu->mutex);
2190
2191         return ret;
2192 }
2193
2194 int smu_get_clock_by_type_with_latency(struct smu_context *smu,
2195                                        enum smu_clk_type clk_type,
2196                                        struct pp_clock_levels_with_latency *clocks)
2197 {
2198         int ret = 0;
2199
2200         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2201                 return -EOPNOTSUPP;
2202
2203         mutex_lock(&smu->mutex);
2204
2205         if (smu->ppt_funcs->get_clock_by_type_with_latency)
2206                 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2207
2208         mutex_unlock(&smu->mutex);
2209
2210         return ret;
2211 }
2212
2213 int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
2214                                        enum amd_pp_clock_type type,
2215                                        struct pp_clock_levels_with_voltage *clocks)
2216 {
2217         int ret = 0;
2218
2219         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2220                 return -EOPNOTSUPP;
2221
2222         mutex_lock(&smu->mutex);
2223
2224         if (smu->ppt_funcs->get_clock_by_type_with_voltage)
2225                 ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
2226
2227         mutex_unlock(&smu->mutex);
2228
2229         return ret;
2230 }
2231
2232
2233 int smu_display_clock_voltage_request(struct smu_context *smu,
2234                                       struct pp_display_clock_request *clock_req)
2235 {
2236         int ret = 0;
2237
2238         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2239                 return -EOPNOTSUPP;
2240
2241         mutex_lock(&smu->mutex);
2242
2243         if (smu->ppt_funcs->display_clock_voltage_request)
2244                 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
2245
2246         mutex_unlock(&smu->mutex);
2247
2248         return ret;
2249 }
2250
2251
2252 int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
2253 {
2254         int ret = -EINVAL;
2255
2256         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2257                 return -EOPNOTSUPP;
2258
2259         mutex_lock(&smu->mutex);
2260
2261         if (smu->ppt_funcs->display_disable_memory_clock_switch)
2262                 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2263
2264         mutex_unlock(&smu->mutex);
2265
2266         return ret;
2267 }
2268
2269 int smu_notify_smu_enable_pwe(struct smu_context *smu)
2270 {
2271         int ret = 0;
2272
2273         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2274                 return -EOPNOTSUPP;
2275
2276         mutex_lock(&smu->mutex);
2277
2278         if (smu->ppt_funcs->notify_smu_enable_pwe)
2279                 ret = smu->ppt_funcs->notify_smu_enable_pwe(smu);
2280
2281         mutex_unlock(&smu->mutex);
2282
2283         return ret;
2284 }
2285
2286 int smu_set_xgmi_pstate(struct smu_context *smu,
2287                         uint32_t pstate)
2288 {
2289         int ret = 0;
2290
2291         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2292                 return -EOPNOTSUPP;
2293
2294         mutex_lock(&smu->mutex);
2295
2296         if (smu->ppt_funcs->set_xgmi_pstate)
2297                 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
2298
2299         mutex_unlock(&smu->mutex);
2300
2301         if(ret)
2302                 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
2303
2304         return ret;
2305 }
2306
2307 int smu_set_azalia_d3_pme(struct smu_context *smu)
2308 {
2309         int ret = 0;
2310
2311         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2312                 return -EOPNOTSUPP;
2313
2314         mutex_lock(&smu->mutex);
2315
2316         if (smu->ppt_funcs->set_azalia_d3_pme)
2317                 ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
2318
2319         mutex_unlock(&smu->mutex);
2320
2321         return ret;
2322 }
2323
2324 /*
2325  * On system suspending or resetting, the dpm_enabled
2326  * flag will be cleared. So that those SMU services which
2327  * are not supported will be gated.
2328  *
2329  * However, the baco/mode1 reset should still be granted
2330  * as they are still supported and necessary.
2331  */
2332 bool smu_baco_is_support(struct smu_context *smu)
2333 {
2334         bool ret = false;
2335
2336         if (!smu->pm_enabled)
2337                 return false;
2338
2339         mutex_lock(&smu->mutex);
2340
2341         if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
2342                 ret = smu->ppt_funcs->baco_is_support(smu);
2343
2344         mutex_unlock(&smu->mutex);
2345
2346         return ret;
2347 }
2348
2349 int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
2350 {
2351         if (smu->ppt_funcs->baco_get_state)
2352                 return -EINVAL;
2353
2354         mutex_lock(&smu->mutex);
2355         *state = smu->ppt_funcs->baco_get_state(smu);
2356         mutex_unlock(&smu->mutex);
2357
2358         return 0;
2359 }
2360
2361 int smu_baco_enter(struct smu_context *smu)
2362 {
2363         int ret = 0;
2364
2365         if (!smu->pm_enabled)
2366                 return -EOPNOTSUPP;
2367
2368         mutex_lock(&smu->mutex);
2369
2370         if (smu->ppt_funcs->baco_enter)
2371                 ret = smu->ppt_funcs->baco_enter(smu);
2372
2373         mutex_unlock(&smu->mutex);
2374
2375         if (ret)
2376                 dev_err(smu->adev->dev, "Failed to enter BACO state!\n");
2377
2378         return ret;
2379 }
2380
2381 int smu_baco_exit(struct smu_context *smu)
2382 {
2383         int ret = 0;
2384
2385         if (!smu->pm_enabled)
2386                 return -EOPNOTSUPP;
2387
2388         mutex_lock(&smu->mutex);
2389
2390         if (smu->ppt_funcs->baco_exit)
2391                 ret = smu->ppt_funcs->baco_exit(smu);
2392
2393         mutex_unlock(&smu->mutex);
2394
2395         if (ret)
2396                 dev_err(smu->adev->dev, "Failed to exit BACO state!\n");
2397
2398         return ret;
2399 }
2400
2401 bool smu_mode1_reset_is_support(struct smu_context *smu)
2402 {
2403         bool ret = false;
2404
2405         if (!smu->pm_enabled)
2406                 return false;
2407
2408         mutex_lock(&smu->mutex);
2409
2410         if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
2411                 ret = smu->ppt_funcs->mode1_reset_is_support(smu);
2412
2413         mutex_unlock(&smu->mutex);
2414
2415         return ret;
2416 }
2417
2418 int smu_mode1_reset(struct smu_context *smu)
2419 {
2420         int ret = 0;
2421
2422         if (!smu->pm_enabled)
2423                 return -EOPNOTSUPP;
2424
2425         mutex_lock(&smu->mutex);
2426
2427         if (smu->ppt_funcs->mode1_reset)
2428                 ret = smu->ppt_funcs->mode1_reset(smu);
2429
2430         mutex_unlock(&smu->mutex);
2431
2432         return ret;
2433 }
2434
2435 int smu_mode2_reset(struct smu_context *smu)
2436 {
2437         int ret = 0;
2438
2439         if (!smu->pm_enabled)
2440                 return -EOPNOTSUPP;
2441
2442         mutex_lock(&smu->mutex);
2443
2444         if (smu->ppt_funcs->mode2_reset)
2445                 ret = smu->ppt_funcs->mode2_reset(smu);
2446
2447         mutex_unlock(&smu->mutex);
2448
2449         if (ret)
2450                 dev_err(smu->adev->dev, "Mode2 reset failed!\n");
2451
2452         return ret;
2453 }
2454
2455 int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
2456                                          struct pp_smu_nv_clock_table *max_clocks)
2457 {
2458         int ret = 0;
2459
2460         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2461                 return -EOPNOTSUPP;
2462
2463         mutex_lock(&smu->mutex);
2464
2465         if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
2466                 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
2467
2468         mutex_unlock(&smu->mutex);
2469
2470         return ret;
2471 }
2472
2473 int smu_get_uclk_dpm_states(struct smu_context *smu,
2474                             unsigned int *clock_values_in_khz,
2475                             unsigned int *num_states)
2476 {
2477         int ret = 0;
2478
2479         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2480                 return -EOPNOTSUPP;
2481
2482         mutex_lock(&smu->mutex);
2483
2484         if (smu->ppt_funcs->get_uclk_dpm_states)
2485                 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
2486
2487         mutex_unlock(&smu->mutex);
2488
2489         return ret;
2490 }
2491
2492 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
2493 {
2494         enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
2495
2496         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2497                 return -EOPNOTSUPP;
2498
2499         mutex_lock(&smu->mutex);
2500
2501         if (smu->ppt_funcs->get_current_power_state)
2502                 pm_state = smu->ppt_funcs->get_current_power_state(smu);
2503
2504         mutex_unlock(&smu->mutex);
2505
2506         return pm_state;
2507 }
2508
2509 int smu_get_dpm_clock_table(struct smu_context *smu,
2510                             struct dpm_clocks *clock_table)
2511 {
2512         int ret = 0;
2513
2514         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2515                 return -EOPNOTSUPP;
2516
2517         mutex_lock(&smu->mutex);
2518
2519         if (smu->ppt_funcs->get_dpm_clock_table)
2520                 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
2521
2522         mutex_unlock(&smu->mutex);
2523
2524         return ret;
2525 }