drm/amd/pm: correct Arcturus mmTHM_BACO_CNTL register address
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / pm / swsmu / smu11 / smu_v11_0.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include <linux/firmware.h>
24 #include <linux/module.h>
25 #include <linux/pci.h>
26 #include <linux/reboot.h>
27
28 #define SMU_11_0_PARTIAL_PPTABLE
29 #define SWSMU_CODE_LAYER_L3
30
31 #include "amdgpu.h"
32 #include "amdgpu_smu.h"
33 #include "atomfirmware.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "amdgpu_atombios.h"
36 #include "smu_v11_0.h"
37 #include "soc15_common.h"
38 #include "atom.h"
39 #include "amdgpu_ras.h"
40 #include "smu_cmn.h"
41
42 #include "asic_reg/thm/thm_11_0_2_offset.h"
43 #include "asic_reg/thm/thm_11_0_2_sh_mask.h"
44 #include "asic_reg/mp/mp_11_0_offset.h"
45 #include "asic_reg/mp/mp_11_0_sh_mask.h"
46 #include "asic_reg/smuio/smuio_11_0_0_offset.h"
47 #include "asic_reg/smuio/smuio_11_0_0_sh_mask.h"
48
49 /*
50  * DO NOT use these for err/warn/info/debug messages.
51  * Use dev_err, dev_warn, dev_info and dev_dbg instead.
52  * They are more MGPU friendly.
53  */
54 #undef pr_err
55 #undef pr_warn
56 #undef pr_info
57 #undef pr_debug
58
59 MODULE_FIRMWARE("amdgpu/arcturus_smc.bin");
60 MODULE_FIRMWARE("amdgpu/navi10_smc.bin");
61 MODULE_FIRMWARE("amdgpu/navi14_smc.bin");
62 MODULE_FIRMWARE("amdgpu/navi12_smc.bin");
63 MODULE_FIRMWARE("amdgpu/sienna_cichlid_smc.bin");
64 MODULE_FIRMWARE("amdgpu/navy_flounder_smc.bin");
65 MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_smc.bin");
66
67 #define SMU11_VOLTAGE_SCALE 4
68
69 #define SMU11_MODE1_RESET_WAIT_TIME_IN_MS 500  //500ms
70
71 #define LINK_WIDTH_MAX                          6
72 #define LINK_SPEED_MAX                          3
73
74 #define smnPCIE_LC_LINK_WIDTH_CNTL              0x11140288
75 #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L
76 #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4
77 #define smnPCIE_LC_SPEED_CNTL                   0x11140290
78 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000
79 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE
80
81 #define mmTHM_BACO_CNTL_ARCT                    0xA7
82 #define mmTHM_BACO_CNTL_ARCT_BASE_IDX           0
83
84 static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
85 static int link_speed[] = {25, 50, 80, 160};
86
87 int smu_v11_0_init_microcode(struct smu_context *smu)
88 {
89         struct amdgpu_device *adev = smu->adev;
90         const char *chip_name;
91         char fw_name[SMU_FW_NAME_LEN];
92         int err = 0;
93         const struct smc_firmware_header_v1_0 *hdr;
94         const struct common_firmware_header *header;
95         struct amdgpu_firmware_info *ucode = NULL;
96
97         if (amdgpu_sriov_vf(adev) &&
98                         ((adev->asic_type == CHIP_NAVI12) ||
99                          (adev->asic_type == CHIP_SIENNA_CICHLID)))
100                 return 0;
101
102         switch (adev->asic_type) {
103         case CHIP_ARCTURUS:
104                 chip_name = "arcturus";
105                 break;
106         case CHIP_NAVI10:
107                 chip_name = "navi10";
108                 break;
109         case CHIP_NAVI14:
110                 chip_name = "navi14";
111                 break;
112         case CHIP_NAVI12:
113                 chip_name = "navi12";
114                 break;
115         case CHIP_SIENNA_CICHLID:
116                 chip_name = "sienna_cichlid";
117                 break;
118         case CHIP_NAVY_FLOUNDER:
119                 chip_name = "navy_flounder";
120                 break;
121         case CHIP_DIMGREY_CAVEFISH:
122                 chip_name = "dimgrey_cavefish";
123                 break;
124         default:
125                 dev_err(adev->dev, "Unsupported ASIC type %d\n", adev->asic_type);
126                 return -EINVAL;
127         }
128
129         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
130
131         err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
132         if (err)
133                 goto out;
134         err = amdgpu_ucode_validate(adev->pm.fw);
135         if (err)
136                 goto out;
137
138         hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
139         amdgpu_ucode_print_smc_hdr(&hdr->header);
140         adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
141
142         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
143                 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
144                 ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
145                 ucode->fw = adev->pm.fw;
146                 header = (const struct common_firmware_header *)ucode->fw->data;
147                 adev->firmware.fw_size +=
148                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
149         }
150
151 out:
152         if (err) {
153                 DRM_ERROR("smu_v11_0: Failed to load firmware \"%s\"\n",
154                           fw_name);
155                 release_firmware(adev->pm.fw);
156                 adev->pm.fw = NULL;
157         }
158         return err;
159 }
160
161 void smu_v11_0_fini_microcode(struct smu_context *smu)
162 {
163         struct amdgpu_device *adev = smu->adev;
164
165         release_firmware(adev->pm.fw);
166         adev->pm.fw = NULL;
167         adev->pm.fw_version = 0;
168 }
169
170 int smu_v11_0_load_microcode(struct smu_context *smu)
171 {
172         struct amdgpu_device *adev = smu->adev;
173         const uint32_t *src;
174         const struct smc_firmware_header_v1_0 *hdr;
175         uint32_t addr_start = MP1_SRAM;
176         uint32_t i;
177         uint32_t smc_fw_size;
178         uint32_t mp1_fw_flags;
179
180         hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
181         src = (const uint32_t *)(adev->pm.fw->data +
182                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
183         smc_fw_size = hdr->header.ucode_size_bytes;
184
185         for (i = 1; i < smc_fw_size/4 - 1; i++) {
186                 WREG32_PCIE(addr_start, src[i]);
187                 addr_start += 4;
188         }
189
190         WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
191                 1 & MP1_SMN_PUB_CTRL__RESET_MASK);
192         WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
193                 1 & ~MP1_SMN_PUB_CTRL__RESET_MASK);
194
195         for (i = 0; i < adev->usec_timeout; i++) {
196                 mp1_fw_flags = RREG32_PCIE(MP1_Public |
197                         (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
198                 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
199                         MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
200                         break;
201                 udelay(1);
202         }
203
204         if (i == adev->usec_timeout)
205                 return -ETIME;
206
207         return 0;
208 }
209
210 int smu_v11_0_check_fw_status(struct smu_context *smu)
211 {
212         struct amdgpu_device *adev = smu->adev;
213         uint32_t mp1_fw_flags;
214
215         mp1_fw_flags = RREG32_PCIE(MP1_Public |
216                                    (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
217
218         if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
219             MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
220                 return 0;
221
222         return -EIO;
223 }
224
225 int smu_v11_0_check_fw_version(struct smu_context *smu)
226 {
227         struct amdgpu_device *adev = smu->adev;
228         uint32_t if_version = 0xff, smu_version = 0xff;
229         uint16_t smu_major;
230         uint8_t smu_minor, smu_debug;
231         int ret = 0;
232
233         ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
234         if (ret)
235                 return ret;
236
237         smu_major = (smu_version >> 16) & 0xffff;
238         smu_minor = (smu_version >> 8) & 0xff;
239         smu_debug = (smu_version >> 0) & 0xff;
240         if (smu->is_apu)
241                 adev->pm.fw_version = smu_version;
242
243         switch (smu->adev->asic_type) {
244         case CHIP_ARCTURUS:
245                 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
246                 break;
247         case CHIP_NAVI10:
248                 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10;
249                 break;
250         case CHIP_NAVI12:
251                 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV12;
252                 break;
253         case CHIP_NAVI14:
254                 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV14;
255                 break;
256         case CHIP_SIENNA_CICHLID:
257                 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Sienna_Cichlid;
258                 break;
259         case CHIP_NAVY_FLOUNDER:
260                 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Navy_Flounder;
261                 break;
262         case CHIP_VANGOGH:
263                 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VANGOGH;
264                 break;
265         case CHIP_DIMGREY_CAVEFISH:
266                 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish;
267                 break;
268         default:
269                 dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type);
270                 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV;
271                 break;
272         }
273
274         /*
275          * 1. if_version mismatch is not critical as our fw is designed
276          * to be backward compatible.
277          * 2. New fw usually brings some optimizations. But that's visible
278          * only on the paired driver.
279          * Considering above, we just leave user a warning message instead
280          * of halt driver loading.
281          */
282         if (if_version != smu->smc_driver_if_version) {
283                 dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
284                         "smu fw version = 0x%08x (%d.%d.%d)\n",
285                         smu->smc_driver_if_version, if_version,
286                         smu_version, smu_major, smu_minor, smu_debug);
287                 dev_warn(smu->adev->dev, "SMU driver if version not matched\n");
288         }
289
290         return ret;
291 }
292
293 static int smu_v11_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
294 {
295         struct amdgpu_device *adev = smu->adev;
296         uint32_t ppt_offset_bytes;
297         const struct smc_firmware_header_v2_0 *v2;
298
299         v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data;
300
301         ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes);
302         *size = le32_to_cpu(v2->ppt_size_bytes);
303         *table = (uint8_t *)v2 + ppt_offset_bytes;
304
305         return 0;
306 }
307
308 static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table,
309                                       uint32_t *size, uint32_t pptable_id)
310 {
311         struct amdgpu_device *adev = smu->adev;
312         const struct smc_firmware_header_v2_1 *v2_1;
313         struct smc_soft_pptable_entry *entries;
314         uint32_t pptable_count = 0;
315         int i = 0;
316
317         v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data;
318         entries = (struct smc_soft_pptable_entry *)
319                 ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset));
320         pptable_count = le32_to_cpu(v2_1->pptable_count);
321         for (i = 0; i < pptable_count; i++) {
322                 if (le32_to_cpu(entries[i].id) == pptable_id) {
323                         *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes));
324                         *size = le32_to_cpu(entries[i].ppt_size_bytes);
325                         break;
326                 }
327         }
328
329         if (i == pptable_count)
330                 return -EINVAL;
331
332         return 0;
333 }
334
335 int smu_v11_0_setup_pptable(struct smu_context *smu)
336 {
337         struct amdgpu_device *adev = smu->adev;
338         const struct smc_firmware_header_v1_0 *hdr;
339         int ret, index;
340         uint32_t size = 0;
341         uint16_t atom_table_size;
342         uint8_t frev, crev;
343         void *table;
344         uint16_t version_major, version_minor;
345
346         if (!amdgpu_sriov_vf(adev)) {
347                 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
348                 version_major = le16_to_cpu(hdr->header.header_version_major);
349                 version_minor = le16_to_cpu(hdr->header.header_version_minor);
350                 if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) {
351                         dev_info(adev->dev, "use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id);
352                         switch (version_minor) {
353                         case 0:
354                                 ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size);
355                                 break;
356                         case 1:
357                                 ret = smu_v11_0_set_pptable_v2_1(smu, &table, &size,
358                                                                 smu->smu_table.boot_values.pp_table_id);
359                                 break;
360                         default:
361                                 ret = -EINVAL;
362                                 break;
363                         }
364                         if (ret)
365                                 return ret;
366                         goto out;
367                 }
368         }
369
370         dev_info(adev->dev, "use vbios provided pptable\n");
371         index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
372                                                 powerplayinfo);
373
374         ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev,
375                                                 (uint8_t **)&table);
376         if (ret)
377                 return ret;
378         size = atom_table_size;
379
380 out:
381         if (!smu->smu_table.power_play_table)
382                 smu->smu_table.power_play_table = table;
383         if (!smu->smu_table.power_play_table_size)
384                 smu->smu_table.power_play_table_size = size;
385
386         return 0;
387 }
388
389 int smu_v11_0_init_smc_tables(struct smu_context *smu)
390 {
391         struct smu_table_context *smu_table = &smu->smu_table;
392         struct smu_table *tables = smu_table->tables;
393         int ret = 0;
394
395         smu_table->driver_pptable =
396                 kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL);
397         if (!smu_table->driver_pptable) {
398                 ret = -ENOMEM;
399                 goto err0_out;
400         }
401
402         smu_table->max_sustainable_clocks =
403                 kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks), GFP_KERNEL);
404         if (!smu_table->max_sustainable_clocks) {
405                 ret = -ENOMEM;
406                 goto err1_out;
407         }
408
409         /* Arcturus does not support OVERDRIVE */
410         if (tables[SMU_TABLE_OVERDRIVE].size) {
411                 smu_table->overdrive_table =
412                         kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
413                 if (!smu_table->overdrive_table) {
414                         ret = -ENOMEM;
415                         goto err2_out;
416                 }
417
418                 smu_table->boot_overdrive_table =
419                         kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
420                 if (!smu_table->boot_overdrive_table) {
421                         ret = -ENOMEM;
422                         goto err3_out;
423                 }
424         }
425
426         return 0;
427
428 err3_out:
429         kfree(smu_table->overdrive_table);
430 err2_out:
431         kfree(smu_table->max_sustainable_clocks);
432 err1_out:
433         kfree(smu_table->driver_pptable);
434 err0_out:
435         return ret;
436 }
437
438 int smu_v11_0_fini_smc_tables(struct smu_context *smu)
439 {
440         struct smu_table_context *smu_table = &smu->smu_table;
441         struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
442
443         kfree(smu_table->gpu_metrics_table);
444         kfree(smu_table->boot_overdrive_table);
445         kfree(smu_table->overdrive_table);
446         kfree(smu_table->max_sustainable_clocks);
447         kfree(smu_table->driver_pptable);
448         kfree(smu_table->clocks_table);
449         smu_table->gpu_metrics_table = NULL;
450         smu_table->boot_overdrive_table = NULL;
451         smu_table->overdrive_table = NULL;
452         smu_table->max_sustainable_clocks = NULL;
453         smu_table->driver_pptable = NULL;
454         smu_table->clocks_table = NULL;
455         kfree(smu_table->hardcode_pptable);
456         smu_table->hardcode_pptable = NULL;
457
458         kfree(smu_table->metrics_table);
459         kfree(smu_table->watermarks_table);
460         smu_table->metrics_table = NULL;
461         smu_table->watermarks_table = NULL;
462         smu_table->metrics_time = 0;
463
464         kfree(smu_dpm->dpm_context);
465         kfree(smu_dpm->golden_dpm_context);
466         kfree(smu_dpm->dpm_current_power_state);
467         kfree(smu_dpm->dpm_request_power_state);
468         smu_dpm->dpm_context = NULL;
469         smu_dpm->golden_dpm_context = NULL;
470         smu_dpm->dpm_context_size = 0;
471         smu_dpm->dpm_current_power_state = NULL;
472         smu_dpm->dpm_request_power_state = NULL;
473
474         return 0;
475 }
476
477 int smu_v11_0_init_power(struct smu_context *smu)
478 {
479         struct smu_power_context *smu_power = &smu->smu_power;
480         size_t size = smu->adev->asic_type == CHIP_VANGOGH ?
481                         sizeof(struct smu_11_5_power_context) :
482                         sizeof(struct smu_11_0_power_context);
483
484         smu_power->power_context = kzalloc(size, GFP_KERNEL);
485         if (!smu_power->power_context)
486                 return -ENOMEM;
487         smu_power->power_context_size = size;
488
489         return 0;
490 }
491
492 int smu_v11_0_fini_power(struct smu_context *smu)
493 {
494         struct smu_power_context *smu_power = &smu->smu_power;
495
496         kfree(smu_power->power_context);
497         smu_power->power_context = NULL;
498         smu_power->power_context_size = 0;
499
500         return 0;
501 }
502
503 static int smu_v11_0_atom_get_smu_clockinfo(struct amdgpu_device *adev,
504                                             uint8_t clk_id,
505                                             uint8_t syspll_id,
506                                             uint32_t *clk_freq)
507 {
508         struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
509         struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
510         int ret, index;
511
512         input.clk_id = clk_id;
513         input.syspll_id = syspll_id;
514         input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
515         index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
516                                             getsmuclockinfo);
517
518         ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
519                                         (uint32_t *)&input);
520         if (ret)
521                 return -EINVAL;
522
523         output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
524         *clk_freq = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
525
526         return 0;
527 }
528
529 int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
530 {
531         int ret, index;
532         uint16_t size;
533         uint8_t frev, crev;
534         struct atom_common_table_header *header;
535         struct atom_firmware_info_v3_3 *v_3_3;
536         struct atom_firmware_info_v3_1 *v_3_1;
537
538         index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
539                                             firmwareinfo);
540
541         ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
542                                       (uint8_t **)&header);
543         if (ret)
544                 return ret;
545
546         if (header->format_revision != 3) {
547                 dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu11\n");
548                 return -EINVAL;
549         }
550
551         switch (header->content_revision) {
552         case 0:
553         case 1:
554         case 2:
555                 v_3_1 = (struct atom_firmware_info_v3_1 *)header;
556                 smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
557                 smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
558                 smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
559                 smu->smu_table.boot_values.socclk = 0;
560                 smu->smu_table.boot_values.dcefclk = 0;
561                 smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
562                 smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
563                 smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
564                 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
565                 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
566                 smu->smu_table.boot_values.pp_table_id = 0;
567                 smu->smu_table.boot_values.firmware_caps = v_3_1->firmware_capability;
568                 break;
569         case 3:
570         default:
571                 v_3_3 = (struct atom_firmware_info_v3_3 *)header;
572                 smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
573                 smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
574                 smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
575                 smu->smu_table.boot_values.socclk = 0;
576                 smu->smu_table.boot_values.dcefclk = 0;
577                 smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
578                 smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
579                 smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
580                 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
581                 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
582                 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
583                 smu->smu_table.boot_values.firmware_caps = v_3_3->firmware_capability;
584         }
585
586         smu->smu_table.boot_values.format_revision = header->format_revision;
587         smu->smu_table.boot_values.content_revision = header->content_revision;
588
589         smu_v11_0_atom_get_smu_clockinfo(smu->adev,
590                                          (uint8_t)SMU11_SYSPLL0_SOCCLK_ID,
591                                          (uint8_t)0,
592                                          &smu->smu_table.boot_values.socclk);
593
594         smu_v11_0_atom_get_smu_clockinfo(smu->adev,
595                                          (uint8_t)SMU11_SYSPLL0_DCEFCLK_ID,
596                                          (uint8_t)0,
597                                          &smu->smu_table.boot_values.dcefclk);
598
599         smu_v11_0_atom_get_smu_clockinfo(smu->adev,
600                                          (uint8_t)SMU11_SYSPLL0_ECLK_ID,
601                                          (uint8_t)0,
602                                          &smu->smu_table.boot_values.eclk);
603
604         smu_v11_0_atom_get_smu_clockinfo(smu->adev,
605                                          (uint8_t)SMU11_SYSPLL0_VCLK_ID,
606                                          (uint8_t)0,
607                                          &smu->smu_table.boot_values.vclk);
608
609         smu_v11_0_atom_get_smu_clockinfo(smu->adev,
610                                          (uint8_t)SMU11_SYSPLL0_DCLK_ID,
611                                          (uint8_t)0,
612                                          &smu->smu_table.boot_values.dclk);
613
614         if ((smu->smu_table.boot_values.format_revision == 3) &&
615             (smu->smu_table.boot_values.content_revision >= 2))
616                 smu_v11_0_atom_get_smu_clockinfo(smu->adev,
617                                                  (uint8_t)SMU11_SYSPLL1_0_FCLK_ID,
618                                                  (uint8_t)SMU11_SYSPLL1_2_ID,
619                                                  &smu->smu_table.boot_values.fclk);
620
621         smu_v11_0_atom_get_smu_clockinfo(smu->adev,
622                                          (uint8_t)SMU11_SYSPLL3_1_LCLK_ID,
623                                          (uint8_t)SMU11_SYSPLL3_1_ID,
624                                          &smu->smu_table.boot_values.lclk);
625
626         return 0;
627 }
628
629 int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
630 {
631         struct smu_table_context *smu_table = &smu->smu_table;
632         struct smu_table *memory_pool = &smu_table->memory_pool;
633         int ret = 0;
634         uint64_t address;
635         uint32_t address_low, address_high;
636
637         if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
638                 return ret;
639
640         address = (uintptr_t)memory_pool->cpu_addr;
641         address_high = (uint32_t)upper_32_bits(address);
642         address_low  = (uint32_t)lower_32_bits(address);
643
644         ret = smu_cmn_send_smc_msg_with_param(smu,
645                                           SMU_MSG_SetSystemVirtualDramAddrHigh,
646                                           address_high,
647                                           NULL);
648         if (ret)
649                 return ret;
650         ret = smu_cmn_send_smc_msg_with_param(smu,
651                                           SMU_MSG_SetSystemVirtualDramAddrLow,
652                                           address_low,
653                                           NULL);
654         if (ret)
655                 return ret;
656
657         address = memory_pool->mc_address;
658         address_high = (uint32_t)upper_32_bits(address);
659         address_low  = (uint32_t)lower_32_bits(address);
660
661         ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
662                                           address_high, NULL);
663         if (ret)
664                 return ret;
665         ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
666                                           address_low, NULL);
667         if (ret)
668                 return ret;
669         ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
670                                           (uint32_t)memory_pool->size, NULL);
671         if (ret)
672                 return ret;
673
674         return ret;
675 }
676
677 int smu_v11_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
678 {
679         int ret;
680
681         ret = smu_cmn_send_smc_msg_with_param(smu,
682                                           SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL);
683         if (ret)
684                 dev_err(smu->adev->dev, "SMU11 attempt to set divider for DCEFCLK Failed!");
685
686         return ret;
687 }
688
689 int smu_v11_0_set_driver_table_location(struct smu_context *smu)
690 {
691         struct smu_table *driver_table = &smu->smu_table.driver_table;
692         int ret = 0;
693
694         if (driver_table->mc_address) {
695                 ret = smu_cmn_send_smc_msg_with_param(smu,
696                                 SMU_MSG_SetDriverDramAddrHigh,
697                                 upper_32_bits(driver_table->mc_address),
698                                 NULL);
699                 if (!ret)
700                         ret = smu_cmn_send_smc_msg_with_param(smu,
701                                 SMU_MSG_SetDriverDramAddrLow,
702                                 lower_32_bits(driver_table->mc_address),
703                                 NULL);
704         }
705
706         return ret;
707 }
708
709 int smu_v11_0_set_tool_table_location(struct smu_context *smu)
710 {
711         int ret = 0;
712         struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
713
714         if (tool_table->mc_address) {
715                 ret = smu_cmn_send_smc_msg_with_param(smu,
716                                 SMU_MSG_SetToolsDramAddrHigh,
717                                 upper_32_bits(tool_table->mc_address),
718                                 NULL);
719                 if (!ret)
720                         ret = smu_cmn_send_smc_msg_with_param(smu,
721                                 SMU_MSG_SetToolsDramAddrLow,
722                                 lower_32_bits(tool_table->mc_address),
723                                 NULL);
724         }
725
726         return ret;
727 }
728
729 int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
730 {
731         struct amdgpu_device *adev = smu->adev;
732
733         /* Navy_Flounder/Dimgrey_Cavefish do not support to change
734          * display num currently
735          */
736         if (adev->asic_type >= CHIP_NAVY_FLOUNDER &&
737             adev->asic_type <= CHIP_DIMGREY_CAVEFISH)
738                 return 0;
739
740         return smu_cmn_send_smc_msg_with_param(smu,
741                                                SMU_MSG_NumOfDisplays,
742                                                count,
743                                                NULL);
744 }
745
746
747 int smu_v11_0_set_allowed_mask(struct smu_context *smu)
748 {
749         struct smu_feature *feature = &smu->smu_feature;
750         int ret = 0;
751         uint32_t feature_mask[2];
752
753         if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
754                 goto failed;
755
756         bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
757
758         ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
759                                           feature_mask[1], NULL);
760         if (ret)
761                 goto failed;
762
763         ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
764                                           feature_mask[0], NULL);
765         if (ret)
766                 goto failed;
767
768 failed:
769         return ret;
770 }
771
772 int smu_v11_0_system_features_control(struct smu_context *smu,
773                                              bool en)
774 {
775         struct smu_feature *feature = &smu->smu_feature;
776         uint32_t feature_mask[2];
777         int ret = 0;
778
779         ret = smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
780                                      SMU_MSG_DisableAllSmuFeatures), NULL);
781         if (ret)
782                 return ret;
783
784         bitmap_zero(feature->enabled, feature->feature_num);
785         bitmap_zero(feature->supported, feature->feature_num);
786
787         if (en) {
788                 ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
789                 if (ret)
790                         return ret;
791
792                 bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
793                             feature->feature_num);
794                 bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
795                             feature->feature_num);
796         }
797
798         return ret;
799 }
800
801 int smu_v11_0_notify_display_change(struct smu_context *smu)
802 {
803         int ret = 0;
804
805         if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
806             smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
807                 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
808
809         return ret;
810 }
811
812 static int
813 smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
814                                     enum smu_clk_type clock_select)
815 {
816         int ret = 0;
817         int clk_id;
818
819         if ((smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
820             (smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetMaxDpmFreq) < 0))
821                 return 0;
822
823         clk_id = smu_cmn_to_asic_specific_index(smu,
824                                                 CMN2ASIC_MAPPING_CLK,
825                                                 clock_select);
826         if (clk_id < 0)
827                 return -EINVAL;
828
829         ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
830                                           clk_id << 16, clock);
831         if (ret) {
832                 dev_err(smu->adev->dev, "[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
833                 return ret;
834         }
835
836         if (*clock != 0)
837                 return 0;
838
839         /* if DC limit is zero, return AC limit */
840         ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
841                                           clk_id << 16, clock);
842         if (ret) {
843                 dev_err(smu->adev->dev, "[GetMaxSustainableClock] failed to get max AC clock from SMC!");
844                 return ret;
845         }
846
847         return 0;
848 }
849
850 int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
851 {
852         struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks =
853                         smu->smu_table.max_sustainable_clocks;
854         int ret = 0;
855
856         max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
857         max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100;
858         max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100;
859         max_sustainable_clocks->display_clock = 0xFFFFFFFF;
860         max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
861         max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
862
863         if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
864                 ret = smu_v11_0_get_max_sustainable_clock(smu,
865                                                           &(max_sustainable_clocks->uclock),
866                                                           SMU_UCLK);
867                 if (ret) {
868                         dev_err(smu->adev->dev, "[%s] failed to get max UCLK from SMC!",
869                                __func__);
870                         return ret;
871                 }
872         }
873
874         if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
875                 ret = smu_v11_0_get_max_sustainable_clock(smu,
876                                                           &(max_sustainable_clocks->soc_clock),
877                                                           SMU_SOCCLK);
878                 if (ret) {
879                         dev_err(smu->adev->dev, "[%s] failed to get max SOCCLK from SMC!",
880                                __func__);
881                         return ret;
882                 }
883         }
884
885         if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
886                 ret = smu_v11_0_get_max_sustainable_clock(smu,
887                                                           &(max_sustainable_clocks->dcef_clock),
888                                                           SMU_DCEFCLK);
889                 if (ret) {
890                         dev_err(smu->adev->dev, "[%s] failed to get max DCEFCLK from SMC!",
891                                __func__);
892                         return ret;
893                 }
894
895                 ret = smu_v11_0_get_max_sustainable_clock(smu,
896                                                           &(max_sustainable_clocks->display_clock),
897                                                           SMU_DISPCLK);
898                 if (ret) {
899                         dev_err(smu->adev->dev, "[%s] failed to get max DISPCLK from SMC!",
900                                __func__);
901                         return ret;
902                 }
903                 ret = smu_v11_0_get_max_sustainable_clock(smu,
904                                                           &(max_sustainable_clocks->phy_clock),
905                                                           SMU_PHYCLK);
906                 if (ret) {
907                         dev_err(smu->adev->dev, "[%s] failed to get max PHYCLK from SMC!",
908                                __func__);
909                         return ret;
910                 }
911                 ret = smu_v11_0_get_max_sustainable_clock(smu,
912                                                           &(max_sustainable_clocks->pixel_clock),
913                                                           SMU_PIXCLK);
914                 if (ret) {
915                         dev_err(smu->adev->dev, "[%s] failed to get max PIXCLK from SMC!",
916                                __func__);
917                         return ret;
918                 }
919         }
920
921         if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
922                 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
923
924         return 0;
925 }
926
927 int smu_v11_0_get_current_power_limit(struct smu_context *smu,
928                                       uint32_t *power_limit)
929 {
930         int power_src;
931         int ret = 0;
932
933         if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
934                 return -EINVAL;
935
936         power_src = smu_cmn_to_asic_specific_index(smu,
937                                         CMN2ASIC_MAPPING_PWR,
938                                         smu->adev->pm.ac_power ?
939                                         SMU_POWER_SOURCE_AC :
940                                         SMU_POWER_SOURCE_DC);
941         if (power_src < 0)
942                 return -EINVAL;
943
944         /*
945          * BIT 24-31: ControllerId (only PPT0 is supported for now)
946          * BIT 16-23: PowerSource
947          */
948         ret = smu_cmn_send_smc_msg_with_param(smu,
949                                           SMU_MSG_GetPptLimit,
950                                           (0 << 24) | (power_src << 16),
951                                           power_limit);
952         if (ret)
953                 dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__);
954
955         return ret;
956 }
957
958 int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
959 {
960         int power_src;
961         int ret = 0;
962
963         if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
964                 dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
965                 return -EOPNOTSUPP;
966         }
967
968         power_src = smu_cmn_to_asic_specific_index(smu,
969                                         CMN2ASIC_MAPPING_PWR,
970                                         smu->adev->pm.ac_power ?
971                                         SMU_POWER_SOURCE_AC :
972                                         SMU_POWER_SOURCE_DC);
973         if (power_src < 0)
974                 return -EINVAL;
975
976         /*
977          * BIT 24-31: ControllerId (only PPT0 is supported for now)
978          * BIT 16-23: PowerSource
979          * BIT 0-15: PowerLimit
980          */
981         n &= 0xFFFF;
982         n |= 0 << 24;
983         n |= (power_src) << 16;
984         ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL);
985         if (ret) {
986                 dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__);
987                 return ret;
988         }
989
990         smu->current_power_limit = n;
991
992         return 0;
993 }
994
995 static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu)
996 {
997         return smu_cmn_send_smc_msg(smu,
998                                 SMU_MSG_ReenableAcDcInterrupt,
999                                 NULL);
1000 }
1001
1002 static int smu_v11_0_process_pending_interrupt(struct smu_context *smu)
1003 {
1004         int ret = 0;
1005
1006         if (smu->dc_controlled_by_gpio &&
1007             smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT))
1008                 ret = smu_v11_0_ack_ac_dc_interrupt(smu);
1009
1010         return ret;
1011 }
1012
1013 void smu_v11_0_interrupt_work(struct smu_context *smu)
1014 {
1015         if (smu_v11_0_ack_ac_dc_interrupt(smu))
1016                 dev_err(smu->adev->dev, "Ack AC/DC interrupt Failed!\n");
1017 }
1018
1019 int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
1020 {
1021         int ret = 0;
1022
1023         if (smu->smu_table.thermal_controller_type) {
1024                 ret = amdgpu_irq_get(smu->adev, &smu->irq_source, 0);
1025                 if (ret)
1026                         return ret;
1027         }
1028
1029         /*
1030          * After init there might have been missed interrupts triggered
1031          * before driver registers for interrupt (Ex. AC/DC).
1032          */
1033         return smu_v11_0_process_pending_interrupt(smu);
1034 }
1035
1036 int smu_v11_0_disable_thermal_alert(struct smu_context *smu)
1037 {
1038         return amdgpu_irq_put(smu->adev, &smu->irq_source, 0);
1039 }
1040
1041 static uint16_t convert_to_vddc(uint8_t vid)
1042 {
1043         return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE);
1044 }
1045
1046 int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
1047 {
1048         struct amdgpu_device *adev = smu->adev;
1049         uint32_t vdd = 0, val_vid = 0;
1050
1051         if (!value)
1052                 return -EINVAL;
1053         val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) &
1054                 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
1055                 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;
1056
1057         vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid);
1058
1059         *value = vdd;
1060
1061         return 0;
1062
1063 }
1064
1065 int
1066 smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
1067                                         struct pp_display_clock_request
1068                                         *clock_req)
1069 {
1070         enum amd_pp_clock_type clk_type = clock_req->clock_type;
1071         int ret = 0;
1072         enum smu_clk_type clk_select = 0;
1073         uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
1074
1075         if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
1076                 smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1077                 switch (clk_type) {
1078                 case amd_pp_dcef_clock:
1079                         clk_select = SMU_DCEFCLK;
1080                         break;
1081                 case amd_pp_disp_clock:
1082                         clk_select = SMU_DISPCLK;
1083                         break;
1084                 case amd_pp_pixel_clock:
1085                         clk_select = SMU_PIXCLK;
1086                         break;
1087                 case amd_pp_phy_clock:
1088                         clk_select = SMU_PHYCLK;
1089                         break;
1090                 case amd_pp_mem_clock:
1091                         clk_select = SMU_UCLK;
1092                         break;
1093                 default:
1094                         dev_info(smu->adev->dev, "[%s] Invalid Clock Type!", __func__);
1095                         ret = -EINVAL;
1096                         break;
1097                 }
1098
1099                 if (ret)
1100                         goto failed;
1101
1102                 if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
1103                         return 0;
1104
1105                 ret = smu_v11_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0);
1106
1107                 if(clk_select == SMU_UCLK)
1108                         smu->hard_min_uclk_req_from_dal = clk_freq;
1109         }
1110
1111 failed:
1112         return ret;
1113 }
1114
1115 int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
1116 {
1117         int ret = 0;
1118         struct amdgpu_device *adev = smu->adev;
1119
1120         switch (adev->asic_type) {
1121         case CHIP_NAVI10:
1122         case CHIP_NAVI14:
1123         case CHIP_NAVI12:
1124         case CHIP_SIENNA_CICHLID:
1125         case CHIP_NAVY_FLOUNDER:
1126         case CHIP_DIMGREY_CAVEFISH:
1127         case CHIP_VANGOGH:
1128                 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
1129                         return 0;
1130                 if (enable)
1131                         ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
1132                 else
1133                         ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
1134                 break;
1135         default:
1136                 break;
1137         }
1138
1139         return ret;
1140 }
1141
1142 uint32_t
1143 smu_v11_0_get_fan_control_mode(struct smu_context *smu)
1144 {
1145         if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1146                 return AMD_FAN_CTRL_AUTO;
1147         else
1148                 return smu->user_dpm_profile.fan_mode;
1149 }
1150
1151 static int
1152 smu_v11_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control)
1153 {
1154         int ret = 0;
1155
1156         if (!smu_cmn_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1157                 return 0;
1158
1159         ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control);
1160         if (ret)
1161                 dev_err(smu->adev->dev, "[%s]%s smc FAN CONTROL feature failed!",
1162                        __func__, (auto_fan_control ? "Start" : "Stop"));
1163
1164         return ret;
1165 }
1166
1167 static int
1168 smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
1169 {
1170         struct amdgpu_device *adev = smu->adev;
1171
1172         WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
1173                      REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
1174                                    CG_FDO_CTRL2, TMIN, 0));
1175         WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
1176                      REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
1177                                    CG_FDO_CTRL2, FDO_PWM_MODE, mode));
1178
1179         return 0;
1180 }
1181
1182 int
1183 smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
1184 {
1185         struct amdgpu_device *adev = smu->adev;
1186         uint32_t duty100, duty;
1187         uint64_t tmp64;
1188
1189         if (speed > 100)
1190                 speed = 100;
1191
1192         if (smu_v11_0_auto_fan_control(smu, 0))
1193                 return -EINVAL;
1194
1195         duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
1196                                 CG_FDO_CTRL1, FMAX_DUTY100);
1197         if (!duty100)
1198                 return -EINVAL;
1199
1200         tmp64 = (uint64_t)speed * duty100;
1201         do_div(tmp64, 100);
1202         duty = (uint32_t)tmp64;
1203
1204         WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
1205                      REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
1206                                    CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
1207
1208         return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
1209 }
1210
1211 int
1212 smu_v11_0_set_fan_control_mode(struct smu_context *smu,
1213                                uint32_t mode)
1214 {
1215         int ret = 0;
1216
1217         switch (mode) {
1218         case AMD_FAN_CTRL_NONE:
1219                 ret = smu_v11_0_set_fan_speed_percent(smu, 100);
1220                 break;
1221         case AMD_FAN_CTRL_MANUAL:
1222                 ret = smu_v11_0_auto_fan_control(smu, 0);
1223                 break;
1224         case AMD_FAN_CTRL_AUTO:
1225                 ret = smu_v11_0_auto_fan_control(smu, 1);
1226                 break;
1227         default:
1228                 break;
1229         }
1230
1231         if (ret) {
1232                 dev_err(smu->adev->dev, "[%s]Set fan control mode failed!", __func__);
1233                 return -EINVAL;
1234         }
1235
1236         return ret;
1237 }
1238
1239 int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
1240                                      uint32_t pstate)
1241 {
1242         return smu_cmn_send_smc_msg_with_param(smu,
1243                                                SMU_MSG_SetXgmiMode,
1244                                                pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
1245                                           NULL);
1246 }
1247
1248 static int smu_v11_0_set_irq_state(struct amdgpu_device *adev,
1249                                    struct amdgpu_irq_src *source,
1250                                    unsigned tyep,
1251                                    enum amdgpu_interrupt_state state)
1252 {
1253         struct smu_context *smu = &adev->smu;
1254         uint32_t low, high;
1255         uint32_t val = 0;
1256
1257         switch (state) {
1258         case AMDGPU_IRQ_STATE_DISABLE:
1259                 /* For THM irqs */
1260                 val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
1261                 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 1);
1262                 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 1);
1263                 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
1264
1265                 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0);
1266
1267                 /* For MP1 SW irqs */
1268                 val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL);
1269                 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
1270                 WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, val);
1271
1272                 break;
1273         case AMDGPU_IRQ_STATE_ENABLE:
1274                 /* For THM irqs */
1275                 low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP,
1276                                 smu->thermal_range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
1277                 high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP,
1278                                 smu->thermal_range.software_shutdown_temp);
1279
1280                 val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
1281                 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
1282                 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
1283                 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
1284                 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
1285                 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff));
1286                 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff));
1287                 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
1288                 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
1289
1290                 val = (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
1291                 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
1292                 val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
1293                 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);
1294
1295                 /* For MP1 SW irqs */
1296                 val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT);
1297                 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
1298                 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
1299                 WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT, val);
1300
1301                 val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL);
1302                 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
1303                 WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, val);
1304
1305                 break;
1306         default:
1307                 break;
1308         }
1309
1310         return 0;
1311 }
1312
1313 #define THM_11_0__SRCID__THM_DIG_THERM_L2H              0               /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH  */
1314 #define THM_11_0__SRCID__THM_DIG_THERM_H2L              1               /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL  */
1315
1316 #define SMUIO_11_0__SRCID__SMUIO_GPIO19                 83
1317
1318 static int smu_v11_0_irq_process(struct amdgpu_device *adev,
1319                                  struct amdgpu_irq_src *source,
1320                                  struct amdgpu_iv_entry *entry)
1321 {
1322         struct smu_context *smu = &adev->smu;
1323         uint32_t client_id = entry->client_id;
1324         uint32_t src_id = entry->src_id;
1325         /*
1326          * ctxid is used to distinguish different
1327          * events for SMCToHost interrupt.
1328          */
1329         uint32_t ctxid = entry->src_data[0];
1330         uint32_t data;
1331
1332         if (client_id == SOC15_IH_CLIENTID_THM) {
1333                 switch (src_id) {
1334                 case THM_11_0__SRCID__THM_DIG_THERM_L2H:
1335                         dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
1336                         /*
1337                          * SW CTF just occurred.
1338                          * Try to do a graceful shutdown to prevent further damage.
1339                          */
1340                         dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
1341                         orderly_poweroff(true);
1342                 break;
1343                 case THM_11_0__SRCID__THM_DIG_THERM_H2L:
1344                         dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
1345                 break;
1346                 default:
1347                         dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n",
1348                                 src_id);
1349                 break;
1350                 }
1351         } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
1352                 dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
1353                 /*
1354                  * HW CTF just occurred. Shutdown to prevent further damage.
1355                  */
1356                 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
1357                 orderly_poweroff(true);
1358         } else if (client_id == SOC15_IH_CLIENTID_MP1) {
1359                 if (src_id == 0xfe) {
1360                         /* ACK SMUToHost interrupt */
1361                         data = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL);
1362                         data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
1363                         WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, data);
1364
1365                         switch (ctxid) {
1366                         case 0x3:
1367                                 dev_dbg(adev->dev, "Switched to AC mode!\n");
1368                                 schedule_work(&smu->interrupt_work);
1369                                 break;
1370                         case 0x4:
1371                                 dev_dbg(adev->dev, "Switched to DC mode!\n");
1372                                 schedule_work(&smu->interrupt_work);
1373                                 break;
1374                         case 0x7:
1375                                 /*
1376                                  * Increment the throttle interrupt counter
1377                                  */
1378                                 atomic64_inc(&smu->throttle_int_counter);
1379
1380                                 if (!atomic_read(&adev->throttling_logging_enabled))
1381                                         return 0;
1382
1383                                 if (__ratelimit(&adev->throttling_logging_rs))
1384                                         schedule_work(&smu->throttling_logging_work);
1385
1386                                 break;
1387                         }
1388                 }
1389         }
1390
1391         return 0;
1392 }
1393
1394 static const struct amdgpu_irq_src_funcs smu_v11_0_irq_funcs =
1395 {
1396         .set = smu_v11_0_set_irq_state,
1397         .process = smu_v11_0_irq_process,
1398 };
1399
1400 int smu_v11_0_register_irq_handler(struct smu_context *smu)
1401 {
1402         struct amdgpu_device *adev = smu->adev;
1403         struct amdgpu_irq_src *irq_src = &smu->irq_source;
1404         int ret = 0;
1405
1406         irq_src->num_types = 1;
1407         irq_src->funcs = &smu_v11_0_irq_funcs;
1408
1409         ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
1410                                 THM_11_0__SRCID__THM_DIG_THERM_L2H,
1411                                 irq_src);
1412         if (ret)
1413                 return ret;
1414
1415         ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
1416                                 THM_11_0__SRCID__THM_DIG_THERM_H2L,
1417                                 irq_src);
1418         if (ret)
1419                 return ret;
1420
1421         /* Register CTF(GPIO_19) interrupt */
1422         ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_ROM_SMUIO,
1423                                 SMUIO_11_0__SRCID__SMUIO_GPIO19,
1424                                 irq_src);
1425         if (ret)
1426                 return ret;
1427
1428         ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
1429                                 0xfe,
1430                                 irq_src);
1431         if (ret)
1432                 return ret;
1433
1434         return ret;
1435 }
1436
1437 int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
1438                 struct pp_smu_nv_clock_table *max_clocks)
1439 {
1440         struct smu_table_context *table_context = &smu->smu_table;
1441         struct smu_11_0_max_sustainable_clocks *sustainable_clocks = NULL;
1442
1443         if (!max_clocks || !table_context->max_sustainable_clocks)
1444                 return -EINVAL;
1445
1446         sustainable_clocks = table_context->max_sustainable_clocks;
1447
1448         max_clocks->dcfClockInKhz =
1449                         (unsigned int) sustainable_clocks->dcef_clock * 1000;
1450         max_clocks->displayClockInKhz =
1451                         (unsigned int) sustainable_clocks->display_clock * 1000;
1452         max_clocks->phyClockInKhz =
1453                         (unsigned int) sustainable_clocks->phy_clock * 1000;
1454         max_clocks->pixelClockInKhz =
1455                         (unsigned int) sustainable_clocks->pixel_clock * 1000;
1456         max_clocks->uClockInKhz =
1457                         (unsigned int) sustainable_clocks->uclock * 1000;
1458         max_clocks->socClockInKhz =
1459                         (unsigned int) sustainable_clocks->soc_clock * 1000;
1460         max_clocks->dscClockInKhz = 0;
1461         max_clocks->dppClockInKhz = 0;
1462         max_clocks->fabricClockInKhz = 0;
1463
1464         return 0;
1465 }
1466
1467 int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
1468 {
1469         return smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
1470 }
1471
1472 static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq)
1473 {
1474         return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL);
1475 }
1476
1477 bool smu_v11_0_baco_is_support(struct smu_context *smu)
1478 {
1479         struct smu_baco_context *smu_baco = &smu->smu_baco;
1480
1481         if (!smu_baco->platform_support)
1482                 return false;
1483
1484         /* Arcturus does not support this bit mask */
1485         if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
1486            !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
1487                 return false;
1488
1489         return true;
1490 }
1491
1492 enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
1493 {
1494         struct smu_baco_context *smu_baco = &smu->smu_baco;
1495         enum smu_baco_state baco_state;
1496
1497         mutex_lock(&smu_baco->mutex);
1498         baco_state = smu_baco->state;
1499         mutex_unlock(&smu_baco->mutex);
1500
1501         return baco_state;
1502 }
1503
1504 #define D3HOT_BACO_SEQUENCE 0
1505 #define D3HOT_BAMACO_SEQUENCE 2
1506
1507 int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
1508 {
1509         struct smu_baco_context *smu_baco = &smu->smu_baco;
1510         struct amdgpu_device *adev = smu->adev;
1511         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1512         uint32_t data;
1513         int ret = 0;
1514
1515         if (smu_v11_0_baco_get_state(smu) == state)
1516                 return 0;
1517
1518         mutex_lock(&smu_baco->mutex);
1519
1520         if (state == SMU_BACO_STATE_ENTER) {
1521                 switch (adev->asic_type) {
1522                 case CHIP_SIENNA_CICHLID:
1523                 case CHIP_NAVY_FLOUNDER:
1524                 case CHIP_DIMGREY_CAVEFISH:
1525                         if (amdgpu_runtime_pm == 2)
1526                                 ret = smu_cmn_send_smc_msg_with_param(smu,
1527                                                                       SMU_MSG_EnterBaco,
1528                                                                       D3HOT_BAMACO_SEQUENCE,
1529                                                                       NULL);
1530                         else
1531                                 ret = smu_cmn_send_smc_msg_with_param(smu,
1532                                                                       SMU_MSG_EnterBaco,
1533                                                                       D3HOT_BACO_SEQUENCE,
1534                                                                       NULL);
1535                         break;
1536                 default:
1537                         if (!ras || !ras->supported) {
1538                                 if (adev->asic_type == CHIP_ARCTURUS) {
1539                                         data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT);
1540                                         data |= 0x80000000;
1541                                         WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT, data);
1542                                 } else {
1543                                         data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
1544                                         data |= 0x80000000;
1545                                         WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
1546                                 }
1547
1548                                 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL);
1549                         } else {
1550                                 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1, NULL);
1551                         }
1552                         break;
1553                 }
1554
1555         } else {
1556                 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL);
1557                 if (ret)
1558                         goto out;
1559
1560                 /* clear vbios scratch 6 and 7 for coming asic reinit */
1561                 WREG32(adev->bios_scratch_reg_offset + 6, 0);
1562                 WREG32(adev->bios_scratch_reg_offset + 7, 0);
1563         }
1564         if (ret)
1565                 goto out;
1566
1567         smu_baco->state = state;
1568 out:
1569         mutex_unlock(&smu_baco->mutex);
1570         return ret;
1571 }
1572
1573 int smu_v11_0_baco_enter(struct smu_context *smu)
1574 {
1575         struct amdgpu_device *adev = smu->adev;
1576         int ret = 0;
1577
1578         /* Arcturus does not need this audio workaround */
1579         if (adev->asic_type != CHIP_ARCTURUS) {
1580                 ret = smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
1581                 if (ret)
1582                         return ret;
1583         }
1584
1585         ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_ENTER);
1586         if (ret)
1587                 return ret;
1588
1589         msleep(10);
1590
1591         return ret;
1592 }
1593
1594 int smu_v11_0_baco_exit(struct smu_context *smu)
1595 {
1596         return smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT);
1597 }
1598
1599 int smu_v11_0_mode1_reset(struct smu_context *smu)
1600 {
1601         int ret = 0;
1602
1603         ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
1604         if (!ret)
1605                 msleep(SMU11_MODE1_RESET_WAIT_TIME_IN_MS);
1606
1607         return ret;
1608 }
1609
1610 int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
1611                                                  uint32_t *min, uint32_t *max)
1612 {
1613         int ret = 0, clk_id = 0;
1614         uint32_t param = 0;
1615         uint32_t clock_limit;
1616
1617         if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
1618                 switch (clk_type) {
1619                 case SMU_MCLK:
1620                 case SMU_UCLK:
1621                         clock_limit = smu->smu_table.boot_values.uclk;
1622                         break;
1623                 case SMU_GFXCLK:
1624                 case SMU_SCLK:
1625                         clock_limit = smu->smu_table.boot_values.gfxclk;
1626                         break;
1627                 case SMU_SOCCLK:
1628                         clock_limit = smu->smu_table.boot_values.socclk;
1629                         break;
1630                 default:
1631                         clock_limit = 0;
1632                         break;
1633                 }
1634
1635                 /* clock in Mhz unit */
1636                 if (min)
1637                         *min = clock_limit / 100;
1638                 if (max)
1639                         *max = clock_limit / 100;
1640
1641                 return 0;
1642         }
1643
1644         clk_id = smu_cmn_to_asic_specific_index(smu,
1645                                                 CMN2ASIC_MAPPING_CLK,
1646                                                 clk_type);
1647         if (clk_id < 0) {
1648                 ret = -EINVAL;
1649                 goto failed;
1650         }
1651         param = (clk_id & 0xffff) << 16;
1652
1653         if (max) {
1654                 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max);
1655                 if (ret)
1656                         goto failed;
1657         }
1658
1659         if (min) {
1660                 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
1661                 if (ret)
1662                         goto failed;
1663         }
1664
1665 failed:
1666         return ret;
1667 }
1668
1669 int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
1670                                           enum smu_clk_type clk_type,
1671                                           uint32_t min,
1672                                           uint32_t max)
1673 {
1674         struct amdgpu_device *adev = smu->adev;
1675         int ret = 0, clk_id = 0;
1676         uint32_t param;
1677
1678         if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1679                 return 0;
1680
1681         clk_id = smu_cmn_to_asic_specific_index(smu,
1682                                                 CMN2ASIC_MAPPING_CLK,
1683                                                 clk_type);
1684         if (clk_id < 0)
1685                 return clk_id;
1686
1687         if (clk_type == SMU_GFXCLK)
1688                 amdgpu_gfx_off_ctrl(adev, false);
1689
1690         if (max > 0) {
1691                 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
1692                 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
1693                                                   param, NULL);
1694                 if (ret)
1695                         goto out;
1696         }
1697
1698         if (min > 0) {
1699                 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
1700                 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
1701                                                   param, NULL);
1702                 if (ret)
1703                         goto out;
1704         }
1705
1706 out:
1707         if (clk_type == SMU_GFXCLK)
1708                 amdgpu_gfx_off_ctrl(adev, true);
1709
1710         return ret;
1711 }
1712
1713 int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu,
1714                                           enum smu_clk_type clk_type,
1715                                           uint32_t min,
1716                                           uint32_t max)
1717 {
1718         int ret = 0, clk_id = 0;
1719         uint32_t param;
1720
1721         if (min <= 0 && max <= 0)
1722                 return -EINVAL;
1723
1724         if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1725                 return 0;
1726
1727         clk_id = smu_cmn_to_asic_specific_index(smu,
1728                                                 CMN2ASIC_MAPPING_CLK,
1729                                                 clk_type);
1730         if (clk_id < 0)
1731                 return clk_id;
1732
1733         if (max > 0) {
1734                 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
1735                 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
1736                                                   param, NULL);
1737                 if (ret)
1738                         return ret;
1739         }
1740
1741         if (min > 0) {
1742                 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
1743                 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
1744                                                   param, NULL);
1745                 if (ret)
1746                         return ret;
1747         }
1748
1749         return ret;
1750 }
1751
1752 int smu_v11_0_set_performance_level(struct smu_context *smu,
1753                                     enum amd_dpm_forced_level level)
1754 {
1755         struct smu_11_0_dpm_context *dpm_context =
1756                                 smu->smu_dpm.dpm_context;
1757         struct smu_11_0_dpm_table *gfx_table =
1758                                 &dpm_context->dpm_tables.gfx_table;
1759         struct smu_11_0_dpm_table *mem_table =
1760                                 &dpm_context->dpm_tables.uclk_table;
1761         struct smu_11_0_dpm_table *soc_table =
1762                                 &dpm_context->dpm_tables.soc_table;
1763         struct smu_umd_pstate_table *pstate_table =
1764                                 &smu->pstate_table;
1765         struct amdgpu_device *adev = smu->adev;
1766         uint32_t sclk_min = 0, sclk_max = 0;
1767         uint32_t mclk_min = 0, mclk_max = 0;
1768         uint32_t socclk_min = 0, socclk_max = 0;
1769         int ret = 0;
1770
1771         switch (level) {
1772         case AMD_DPM_FORCED_LEVEL_HIGH:
1773                 sclk_min = sclk_max = gfx_table->max;
1774                 mclk_min = mclk_max = mem_table->max;
1775                 socclk_min = socclk_max = soc_table->max;
1776                 break;
1777         case AMD_DPM_FORCED_LEVEL_LOW:
1778                 sclk_min = sclk_max = gfx_table->min;
1779                 mclk_min = mclk_max = mem_table->min;
1780                 socclk_min = socclk_max = soc_table->min;
1781                 break;
1782         case AMD_DPM_FORCED_LEVEL_AUTO:
1783                 sclk_min = gfx_table->min;
1784                 sclk_max = gfx_table->max;
1785                 mclk_min = mem_table->min;
1786                 mclk_max = mem_table->max;
1787                 socclk_min = soc_table->min;
1788                 socclk_max = soc_table->max;
1789                 break;
1790         case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1791                 sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard;
1792                 mclk_min = mclk_max = pstate_table->uclk_pstate.standard;
1793                 socclk_min = socclk_max = pstate_table->socclk_pstate.standard;
1794                 break;
1795         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1796                 sclk_min = sclk_max = pstate_table->gfxclk_pstate.min;
1797                 break;
1798         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1799                 mclk_min = mclk_max = pstate_table->uclk_pstate.min;
1800                 break;
1801         case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1802                 sclk_min = sclk_max = pstate_table->gfxclk_pstate.peak;
1803                 mclk_min = mclk_max = pstate_table->uclk_pstate.peak;
1804                 socclk_min = socclk_max = pstate_table->socclk_pstate.peak;
1805                 break;
1806         case AMD_DPM_FORCED_LEVEL_MANUAL:
1807         case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1808                 return 0;
1809         default:
1810                 dev_err(adev->dev, "Invalid performance level %d\n", level);
1811                 return -EINVAL;
1812         }
1813
1814         /*
1815          * Separate MCLK and SOCCLK soft min/max settings are not allowed
1816          * on Arcturus.
1817          */
1818         if (adev->asic_type == CHIP_ARCTURUS) {
1819                 mclk_min = mclk_max = 0;
1820                 socclk_min = socclk_max = 0;
1821         }
1822
1823         if (sclk_min && sclk_max) {
1824                 ret = smu_v11_0_set_soft_freq_limited_range(smu,
1825                                                             SMU_GFXCLK,
1826                                                             sclk_min,
1827                                                             sclk_max);
1828                 if (ret)
1829                         return ret;
1830         }
1831
1832         if (mclk_min && mclk_max) {
1833                 ret = smu_v11_0_set_soft_freq_limited_range(smu,
1834                                                             SMU_MCLK,
1835                                                             mclk_min,
1836                                                             mclk_max);
1837                 if (ret)
1838                         return ret;
1839         }
1840
1841         if (socclk_min && socclk_max) {
1842                 ret = smu_v11_0_set_soft_freq_limited_range(smu,
1843                                                             SMU_SOCCLK,
1844                                                             socclk_min,
1845                                                             socclk_max);
1846                 if (ret)
1847                         return ret;
1848         }
1849
1850         return ret;
1851 }
1852
1853 int smu_v11_0_set_power_source(struct smu_context *smu,
1854                                enum smu_power_src_type power_src)
1855 {
1856         int pwr_source;
1857
1858         pwr_source = smu_cmn_to_asic_specific_index(smu,
1859                                                     CMN2ASIC_MAPPING_PWR,
1860                                                     (uint32_t)power_src);
1861         if (pwr_source < 0)
1862                 return -EINVAL;
1863
1864         return smu_cmn_send_smc_msg_with_param(smu,
1865                                         SMU_MSG_NotifyPowerSource,
1866                                         pwr_source,
1867                                         NULL);
1868 }
1869
1870 int smu_v11_0_get_dpm_freq_by_index(struct smu_context *smu,
1871                                     enum smu_clk_type clk_type,
1872                                     uint16_t level,
1873                                     uint32_t *value)
1874 {
1875         int ret = 0, clk_id = 0;
1876         uint32_t param;
1877
1878         if (!value)
1879                 return -EINVAL;
1880
1881         if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1882                 return 0;
1883
1884         clk_id = smu_cmn_to_asic_specific_index(smu,
1885                                                 CMN2ASIC_MAPPING_CLK,
1886                                                 clk_type);
1887         if (clk_id < 0)
1888                 return clk_id;
1889
1890         param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
1891
1892         ret = smu_cmn_send_smc_msg_with_param(smu,
1893                                           SMU_MSG_GetDpmFreqByIndex,
1894                                           param,
1895                                           value);
1896         if (ret)
1897                 return ret;
1898
1899         /*
1900          * BIT31:  0 - Fine grained DPM, 1 - Dicrete DPM
1901          * now, we un-support it
1902          */
1903         *value = *value & 0x7fffffff;
1904
1905         return ret;
1906 }
1907
1908 int smu_v11_0_get_dpm_level_count(struct smu_context *smu,
1909                                   enum smu_clk_type clk_type,
1910                                   uint32_t *value)
1911 {
1912         return smu_v11_0_get_dpm_freq_by_index(smu,
1913                                                clk_type,
1914                                                0xff,
1915                                                value);
1916 }
1917
1918 int smu_v11_0_set_single_dpm_table(struct smu_context *smu,
1919                                    enum smu_clk_type clk_type,
1920                                    struct smu_11_0_dpm_table *single_dpm_table)
1921 {
1922         int ret = 0;
1923         uint32_t clk;
1924         int i;
1925
1926         ret = smu_v11_0_get_dpm_level_count(smu,
1927                                             clk_type,
1928                                             &single_dpm_table->count);
1929         if (ret) {
1930                 dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__);
1931                 return ret;
1932         }
1933
1934         for (i = 0; i < single_dpm_table->count; i++) {
1935                 ret = smu_v11_0_get_dpm_freq_by_index(smu,
1936                                                       clk_type,
1937                                                       i,
1938                                                       &clk);
1939                 if (ret) {
1940                         dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__);
1941                         return ret;
1942                 }
1943
1944                 single_dpm_table->dpm_levels[i].value = clk;
1945                 single_dpm_table->dpm_levels[i].enabled = true;
1946
1947                 if (i == 0)
1948                         single_dpm_table->min = clk;
1949                 else if (i == single_dpm_table->count - 1)
1950                         single_dpm_table->max = clk;
1951         }
1952
1953         return 0;
1954 }
1955
1956 int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
1957                                   enum smu_clk_type clk_type,
1958                                   uint32_t *min_value,
1959                                   uint32_t *max_value)
1960 {
1961         uint32_t level_count = 0;
1962         int ret = 0;
1963
1964         if (!min_value && !max_value)
1965                 return -EINVAL;
1966
1967         if (min_value) {
1968                 /* by default, level 0 clock value as min value */
1969                 ret = smu_v11_0_get_dpm_freq_by_index(smu,
1970                                                       clk_type,
1971                                                       0,
1972                                                       min_value);
1973                 if (ret)
1974                         return ret;
1975         }
1976
1977         if (max_value) {
1978                 ret = smu_v11_0_get_dpm_level_count(smu,
1979                                                     clk_type,
1980                                                     &level_count);
1981                 if (ret)
1982                         return ret;
1983
1984                 ret = smu_v11_0_get_dpm_freq_by_index(smu,
1985                                                       clk_type,
1986                                                       level_count - 1,
1987                                                       max_value);
1988                 if (ret)
1989                         return ret;
1990         }
1991
1992         return ret;
1993 }
1994
1995 int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu)
1996 {
1997         struct amdgpu_device *adev = smu->adev;
1998
1999         return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
2000                 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
2001                 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
2002 }
2003
2004 int smu_v11_0_get_current_pcie_link_width(struct smu_context *smu)
2005 {
2006         uint32_t width_level;
2007
2008         width_level = smu_v11_0_get_current_pcie_link_width_level(smu);
2009         if (width_level > LINK_WIDTH_MAX)
2010                 width_level = 0;
2011
2012         return link_width[width_level];
2013 }
2014
2015 int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu)
2016 {
2017         struct amdgpu_device *adev = smu->adev;
2018
2019         return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
2020                 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
2021                 >> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
2022 }
2023
2024 int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu)
2025 {
2026         uint32_t speed_level;
2027
2028         speed_level = smu_v11_0_get_current_pcie_link_speed_level(smu);
2029         if (speed_level > LINK_SPEED_MAX)
2030                 speed_level = 0;
2031
2032         return link_speed[speed_level];
2033 }
2034
2035 int smu_v11_0_gfx_ulv_control(struct smu_context *smu,
2036                               bool enablement)
2037 {
2038         int ret = 0;
2039
2040         if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT))
2041                 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, enablement);
2042
2043         return ret;
2044 }
2045
2046 int smu_v11_0_deep_sleep_control(struct smu_context *smu,
2047                                  bool enablement)
2048 {
2049         struct amdgpu_device *adev = smu->adev;
2050         int ret = 0;
2051
2052         if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) {
2053                 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_GFXCLK_BIT, enablement);
2054                 if (ret) {
2055                         dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", enablement ? "enable" : "disable");
2056                         return ret;
2057                 }
2058         }
2059
2060         if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_UCLK_BIT)) {
2061                 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_UCLK_BIT, enablement);
2062                 if (ret) {
2063                         dev_err(adev->dev, "Failed to %s UCLK DS!\n", enablement ? "enable" : "disable");
2064                         return ret;
2065                 }
2066         }
2067
2068         if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_FCLK_BIT)) {
2069                 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_FCLK_BIT, enablement);
2070                 if (ret) {
2071                         dev_err(adev->dev, "Failed to %s FCLK DS!\n", enablement ? "enable" : "disable");
2072                         return ret;
2073                 }
2074         }
2075
2076         if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) {
2077                 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement);
2078                 if (ret) {
2079                         dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", enablement ? "enable" : "disable");
2080                         return ret;
2081                 }
2082         }
2083
2084         if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) {
2085                 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, enablement);
2086                 if (ret) {
2087                         dev_err(adev->dev, "Failed to %s LCLK DS!\n", enablement ? "enable" : "disable");
2088                         return ret;
2089                 }
2090         }
2091
2092         return ret;
2093 }