drm/amd/powerplay: add suspend and resume function for smu
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / powerplay / smu_v11_0.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include "pp_debug.h"
24 #include <linux/firmware.h>
25 #include "amdgpu.h"
26 #include "amdgpu_smu.h"
27 #include "atomfirmware.h"
28 #include "amdgpu_atomfirmware.h"
29 #include "smu_v11_0.h"
30 #include "smu11_driver_if.h"
31 #include "soc15_common.h"
32 #include "atom.h"
33 #include "vega20_ppt.h"
34 #include "pp_thermal.h"
35
36 #include "asic_reg/thm/thm_11_0_2_offset.h"
37 #include "asic_reg/thm/thm_11_0_2_sh_mask.h"
38 #include "asic_reg/mp/mp_9_0_offset.h"
39 #include "asic_reg/mp/mp_9_0_sh_mask.h"
40 #include "asic_reg/nbio/nbio_7_4_offset.h"
41 #include "asic_reg/smuio/smuio_9_0_offset.h"
42 #include "asic_reg/smuio/smuio_9_0_sh_mask.h"
43
44 MODULE_FIRMWARE("amdgpu/vega20_smc.bin");
45
46 #define SMU11_TOOL_SIZE         0x19000
47 #define SMU11_THERMAL_MINIMUM_ALERT_TEMP      0
48 #define SMU11_THERMAL_MAXIMUM_ALERT_TEMP      255
49
50 #define SMU11_TEMPERATURE_UNITS_PER_CENTIGRADES 1000
51 #define SMU11_VOLTAGE_SCALE 4
52
53 #define SMC_DPM_FEATURE (FEATURE_DPM_PREFETCHER_MASK | \
54                          FEATURE_DPM_GFXCLK_MASK | \
55                          FEATURE_DPM_UCLK_MASK | \
56                          FEATURE_DPM_SOCCLK_MASK | \
57                          FEATURE_DPM_UVD_MASK | \
58                          FEATURE_DPM_VCE_MASK | \
59                          FEATURE_DPM_MP0CLK_MASK | \
60                          FEATURE_DPM_LINK_MASK | \
61                          FEATURE_DPM_DCEFCLK_MASK)
62
63 static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
64                                               uint16_t msg)
65 {
66         struct amdgpu_device *adev = smu->adev;
67         WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
68         return 0;
69 }
70
71 static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
72 {
73         struct amdgpu_device *adev = smu->adev;
74
75         *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
76         return 0;
77 }
78
79 static int smu_v11_0_wait_for_response(struct smu_context *smu)
80 {
81         struct amdgpu_device *adev = smu->adev;
82         uint32_t cur_value, i;
83
84         for (i = 0; i < adev->usec_timeout; i++) {
85                 cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
86                 if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
87                         break;
88                 udelay(1);
89         }
90
91         /* timeout means wrong logic */
92         if (i == adev->usec_timeout)
93                 return -ETIME;
94
95         return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
96 }
97
98 static int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
99 {
100         struct amdgpu_device *adev = smu->adev;
101         int ret = 0, index = 0;
102
103         index = smu_msg_get_index(smu, msg);
104         if (index < 0)
105                 return index;
106
107         smu_v11_0_wait_for_response(smu);
108
109         WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
110
111         smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
112
113         ret = smu_v11_0_wait_for_response(smu);
114
115         if (ret)
116                 pr_err("Failed to send message 0x%x, response 0x%x\n", msg,
117                        ret);
118
119         return ret;
120
121 }
122
123 static int
124 smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
125                               uint32_t param)
126 {
127
128         struct amdgpu_device *adev = smu->adev;
129         int ret = 0, index = 0;
130
131         index = smu_msg_get_index(smu, msg);
132         if (index < 0)
133                 return index;
134
135         ret = smu_v11_0_wait_for_response(smu);
136         if (ret)
137                 pr_err("Failed to send message 0x%x, response 0x%x\n", msg,
138                        ret);
139
140         WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
141
142         WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
143
144         smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
145
146         ret = smu_v11_0_wait_for_response(smu);
147         if (ret)
148                 pr_err("Failed to send message 0x%x, response 0x%x\n", msg,
149                        ret);
150
151         return ret;
152 }
153
154 static int smu_v11_0_init_microcode(struct smu_context *smu)
155 {
156         struct amdgpu_device *adev = smu->adev;
157         const char *chip_name;
158         char fw_name[30];
159         int err = 0;
160         const struct smc_firmware_header_v1_0 *hdr;
161         const struct common_firmware_header *header;
162         struct amdgpu_firmware_info *ucode = NULL;
163
164         switch (adev->asic_type) {
165         case CHIP_VEGA20:
166                 chip_name = "vega20";
167                 break;
168         default:
169                 BUG();
170         }
171
172         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
173
174         err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
175         if (err)
176                 goto out;
177         err = amdgpu_ucode_validate(adev->pm.fw);
178         if (err)
179                 goto out;
180
181         hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
182         amdgpu_ucode_print_smc_hdr(&hdr->header);
183         adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
184
185         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
186                 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
187                 ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
188                 ucode->fw = adev->pm.fw;
189                 header = (const struct common_firmware_header *)ucode->fw->data;
190                 adev->firmware.fw_size +=
191                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
192         }
193
194 out:
195         if (err) {
196                 DRM_ERROR("smu_v11_0: Failed to load firmware \"%s\"\n",
197                           fw_name);
198                 release_firmware(adev->pm.fw);
199                 adev->pm.fw = NULL;
200         }
201         return err;
202 }
203
204 static int smu_v11_0_load_microcode(struct smu_context *smu)
205 {
206         return 0;
207 }
208
209 static int smu_v11_0_check_fw_status(struct smu_context *smu)
210 {
211         struct amdgpu_device *adev = smu->adev;
212         uint32_t mp1_fw_flags;
213
214         WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
215                      (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
216
217         mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
218
219         if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
220             MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
221                 return 0;
222         return -EIO;
223 }
224
225 static int smu_v11_0_check_fw_version(struct smu_context *smu)
226 {
227         uint32_t smu_version = 0xff;
228         int ret = 0;
229
230         ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
231         if (ret)
232                 goto err;
233
234         ret = smu_read_smc_arg(smu, &smu_version);
235         if (ret)
236                 goto err;
237
238         if (smu_version == SMU11_DRIVER_IF_VERSION)
239                 return 0;
240 err:
241         return ret;
242 }
243
244 static int smu_v11_0_read_pptable_from_vbios(struct smu_context *smu)
245 {
246         int ret, index;
247         uint16_t size;
248         uint8_t frev, crev;
249         void *table;
250
251         index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
252                                             powerplayinfo);
253
254         ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
255                                       (uint8_t **)&table);
256         if (ret)
257                 return ret;
258
259         if (!smu->smu_table.power_play_table)
260                 smu->smu_table.power_play_table = table;
261         if (!smu->smu_table.power_play_table_size)
262                 smu->smu_table.power_play_table_size = size;
263
264         return 0;
265 }
266
267 static int smu_v11_0_init_dpm_context(struct smu_context *smu)
268 {
269         struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
270
271         if (smu_dpm->dpm_context || smu_dpm->dpm_context_size != 0)
272                 return -EINVAL;
273
274         return smu_alloc_dpm_context(smu);
275 }
276
277 static int smu_v11_0_fini_dpm_context(struct smu_context *smu)
278 {
279         struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
280
281         if (!smu_dpm->dpm_context || smu_dpm->dpm_context_size == 0)
282                 return -EINVAL;
283
284         kfree(smu_dpm->dpm_context);
285         kfree(smu_dpm->golden_dpm_context);
286         kfree(smu_dpm->dpm_current_power_state);
287         kfree(smu_dpm->dpm_request_power_state);
288         smu_dpm->dpm_context = NULL;
289         smu_dpm->golden_dpm_context = NULL;
290         smu_dpm->dpm_context_size = 0;
291         smu_dpm->dpm_current_power_state = NULL;
292         smu_dpm->dpm_request_power_state = NULL;
293
294         return 0;
295 }
296
297 static int smu_v11_0_init_smc_tables(struct smu_context *smu)
298 {
299         struct smu_table_context *smu_table = &smu->smu_table;
300         struct smu_table *tables = NULL;
301         int ret = 0;
302
303         if (smu_table->tables || smu_table->table_count != 0)
304                 return -EINVAL;
305
306         tables = kcalloc(TABLE_COUNT, sizeof(struct smu_table), GFP_KERNEL);
307         if (!tables)
308                 return -ENOMEM;
309
310         smu_table->tables = tables;
311         smu_table->table_count = TABLE_COUNT;
312
313         SMU_TABLE_INIT(tables, TABLE_PPTABLE, sizeof(PPTable_t),
314                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
315         SMU_TABLE_INIT(tables, TABLE_WATERMARKS, sizeof(Watermarks_t),
316                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
317         SMU_TABLE_INIT(tables, TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
318                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
319         SMU_TABLE_INIT(tables, TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
320                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
321         SMU_TABLE_INIT(tables, TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE, PAGE_SIZE,
322                        AMDGPU_GEM_DOMAIN_VRAM);
323         SMU_TABLE_INIT(tables, TABLE_ACTIVITY_MONITOR_COEFF,
324                        sizeof(DpmActivityMonitorCoeffInt_t),
325                        PAGE_SIZE,
326                        AMDGPU_GEM_DOMAIN_VRAM);
327
328         ret = smu_v11_0_init_dpm_context(smu);
329         if (ret)
330                 return ret;
331
332         return 0;
333 }
334
335 static int smu_v11_0_fini_smc_tables(struct smu_context *smu)
336 {
337         struct smu_table_context *smu_table = &smu->smu_table;
338         int ret = 0;
339
340         if (!smu_table->tables || smu_table->table_count == 0)
341                 return -EINVAL;
342
343         kfree(smu_table->tables);
344         smu_table->tables = NULL;
345         smu_table->table_count = 0;
346
347         ret = smu_v11_0_fini_dpm_context(smu);
348         if (ret)
349                 return ret;
350         return 0;
351 }
352
353 static int smu_v11_0_init_power(struct smu_context *smu)
354 {
355         struct smu_power_context *smu_power = &smu->smu_power;
356
357         if (smu_power->power_context || smu_power->power_context_size != 0)
358                 return -EINVAL;
359
360         smu_power->power_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
361                                            GFP_KERNEL);
362         if (!smu_power->power_context)
363                 return -ENOMEM;
364         smu_power->power_context_size = sizeof(struct smu_11_0_dpm_context);
365
366         return 0;
367 }
368
369 static int smu_v11_0_fini_power(struct smu_context *smu)
370 {
371         struct smu_power_context *smu_power = &smu->smu_power;
372
373         if (!smu_power->power_context || smu_power->power_context_size == 0)
374                 return -EINVAL;
375
376         kfree(smu_power->power_context);
377         smu_power->power_context = NULL;
378         smu_power->power_context_size = 0;
379
380         return 0;
381 }
382
383 int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
384 {
385         int ret, index;
386         uint16_t size;
387         uint8_t frev, crev;
388         struct atom_common_table_header *header;
389         struct atom_firmware_info_v3_3 *v_3_3;
390         struct atom_firmware_info_v3_1 *v_3_1;
391
392         index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
393                                             firmwareinfo);
394
395         ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
396                                       (uint8_t **)&header);
397         if (ret)
398                 return ret;
399
400         if (header->format_revision != 3) {
401                 pr_err("unknown atom_firmware_info version! for smu11\n");
402                 return -EINVAL;
403         }
404
405         switch (header->content_revision) {
406         case 0:
407         case 1:
408         case 2:
409                 v_3_1 = (struct atom_firmware_info_v3_1 *)header;
410                 smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
411                 smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
412                 smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
413                 smu->smu_table.boot_values.socclk = 0;
414                 smu->smu_table.boot_values.dcefclk = 0;
415                 smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
416                 smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
417                 smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
418                 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
419                 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
420                 smu->smu_table.boot_values.pp_table_id = 0;
421                 break;
422         case 3:
423         default:
424                 v_3_3 = (struct atom_firmware_info_v3_3 *)header;
425                 smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
426                 smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
427                 smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
428                 smu->smu_table.boot_values.socclk = 0;
429                 smu->smu_table.boot_values.dcefclk = 0;
430                 smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
431                 smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
432                 smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
433                 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
434                 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
435                 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
436         }
437
438         return 0;
439 }
440
441 static int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
442 {
443         int ret, index;
444         struct amdgpu_device *adev = smu->adev;
445         struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
446         struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
447
448         input.clk_id = SMU11_SYSPLL0_SOCCLK_ID;
449         input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
450         index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
451                                             getsmuclockinfo);
452
453         ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
454                                         (uint32_t *)&input);
455         if (ret)
456                 return -EINVAL;
457
458         output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
459         smu->smu_table.boot_values.socclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
460
461         memset(&input, 0, sizeof(input));
462         input.clk_id = SMU11_SYSPLL0_DCEFCLK_ID;
463         input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
464         index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
465                                             getsmuclockinfo);
466
467         ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
468                                         (uint32_t *)&input);
469         if (ret)
470                 return -EINVAL;
471
472         output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
473         smu->smu_table.boot_values.dcefclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
474
475         memset(&input, 0, sizeof(input));
476         input.clk_id = SMU11_SYSPLL0_ECLK_ID;
477         input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
478         index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
479                                             getsmuclockinfo);
480
481         ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
482                                         (uint32_t *)&input);
483         if (ret)
484                 return -EINVAL;
485
486         output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
487         smu->smu_table.boot_values.eclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
488
489         memset(&input, 0, sizeof(input));
490         input.clk_id = SMU11_SYSPLL0_VCLK_ID;
491         input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
492         index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
493                                             getsmuclockinfo);
494
495         ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
496                                         (uint32_t *)&input);
497         if (ret)
498                 return -EINVAL;
499
500         output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
501         smu->smu_table.boot_values.vclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
502
503         memset(&input, 0, sizeof(input));
504         input.clk_id = SMU11_SYSPLL0_DCLK_ID;
505         input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
506         index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
507                                             getsmuclockinfo);
508
509         ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
510                                         (uint32_t *)&input);
511         if (ret)
512                 return -EINVAL;
513
514         output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
515         smu->smu_table.boot_values.dclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
516
517         return 0;
518 }
519
520 static int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
521 {
522         struct smu_table_context *smu_table = &smu->smu_table;
523         struct smu_table *memory_pool = &smu_table->memory_pool;
524         int ret = 0;
525         uint64_t address;
526         uint32_t address_low, address_high;
527
528         if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
529                 return ret;
530
531         address = (uint64_t)memory_pool->cpu_addr;
532         address_high = (uint32_t)upper_32_bits(address);
533         address_low  = (uint32_t)lower_32_bits(address);
534
535         ret = smu_send_smc_msg_with_param(smu,
536                                           SMU_MSG_SetSystemVirtualDramAddrHigh,
537                                           address_high);
538         if (ret)
539                 return ret;
540         ret = smu_send_smc_msg_with_param(smu,
541                                           SMU_MSG_SetSystemVirtualDramAddrLow,
542                                           address_low);
543         if (ret)
544                 return ret;
545
546         address = memory_pool->mc_address;
547         address_high = (uint32_t)upper_32_bits(address);
548         address_low  = (uint32_t)lower_32_bits(address);
549
550         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
551                                           address_high);
552         if (ret)
553                 return ret;
554         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
555                                           address_low);
556         if (ret)
557                 return ret;
558         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
559                                           (uint32_t)memory_pool->size);
560         if (ret)
561                 return ret;
562
563         return ret;
564 }
565
566 static int smu_v11_0_check_pptable(struct smu_context *smu)
567 {
568         int ret;
569
570         ret = smu_check_powerplay_table(smu);
571         return ret;
572 }
573
574 static int smu_v11_0_parse_pptable(struct smu_context *smu)
575 {
576         int ret;
577
578         struct smu_table_context *table_context = &smu->smu_table;
579
580         if (table_context->driver_pptable)
581                 return -EINVAL;
582
583         table_context->driver_pptable = kzalloc(sizeof(PPTable_t), GFP_KERNEL);
584
585         if (!table_context->driver_pptable)
586                 return -ENOMEM;
587
588         ret = smu_store_powerplay_table(smu);
589         if (ret)
590                 return -EINVAL;
591
592         ret = smu_append_powerplay_table(smu);
593
594         return ret;
595 }
596
597 static int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
598 {
599         int ret;
600
601         ret = smu_set_default_dpm_table(smu);
602
603         return ret;
604 }
605
606 static int smu_v11_0_write_pptable(struct smu_context *smu)
607 {
608         struct smu_table_context *table_context = &smu->smu_table;
609         int ret = 0;
610
611         ret = smu_update_table(smu, TABLE_PPTABLE, table_context->driver_pptable, true);
612
613         return ret;
614 }
615
616 static int smu_v11_0_write_watermarks_table(struct smu_context *smu)
617 {
618         return smu_update_table(smu, TABLE_WATERMARKS,
619                                 smu->smu_table.tables[TABLE_WATERMARKS].cpu_addr, true);
620 }
621
622 static int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
623 {
624         int ret;
625
626         ret = smu_send_smc_msg_with_param(smu,
627                                           SMU_MSG_SetMinDeepSleepDcefclk, clk);
628         if (ret)
629                 pr_err("SMU11 attempt to set divider for DCEFCLK Failed!");
630
631         return ret;
632 }
633
634 static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
635 {
636         struct smu_table_context *table_context = &smu->smu_table;
637
638         if (!table_context)
639                 return -EINVAL;
640
641         return smu_set_deep_sleep_dcefclk(smu,
642                                           table_context->boot_values.dcefclk / 100);
643 }
644
645 static int smu_v11_0_set_tool_table_location(struct smu_context *smu)
646 {
647         int ret = 0;
648         struct smu_table *tool_table = &smu->smu_table.tables[TABLE_PMSTATUSLOG];
649
650         if (tool_table->mc_address) {
651                 ret = smu_send_smc_msg_with_param(smu,
652                                 SMU_MSG_SetToolsDramAddrHigh,
653                                 upper_32_bits(tool_table->mc_address));
654                 if (!ret)
655                         ret = smu_send_smc_msg_with_param(smu,
656                                 SMU_MSG_SetToolsDramAddrLow,
657                                 lower_32_bits(tool_table->mc_address));
658         }
659
660         return ret;
661 }
662
663 static int smu_v11_0_init_display(struct smu_context *smu)
664 {
665         int ret = 0;
666         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0);
667         return ret;
668 }
669
670 static int smu_v11_0_update_feature_enable_state(struct smu_context *smu, uint32_t feature_id, bool enabled)
671 {
672         uint32_t feature_low = 0, feature_high = 0;
673         int ret = 0;
674
675         if (feature_id >= 0 && feature_id < 31)
676                 feature_low = (1 << feature_id);
677         else if (feature_id > 31 && feature_id < 63)
678                 feature_high = (1 << feature_id);
679         else
680                 return -EINVAL;
681
682         if (enabled) {
683                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
684                                                   feature_low);
685                 if (ret)
686                         return ret;
687                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
688                                                   feature_high);
689                 if (ret)
690                         return ret;
691
692         } else {
693                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
694                                                   feature_low);
695                 if (ret)
696                         return ret;
697                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
698                                                   feature_high);
699                 if (ret)
700                         return ret;
701
702         }
703
704         return ret;
705 }
706
707 static int smu_v11_0_set_allowed_mask(struct smu_context *smu)
708 {
709         struct smu_feature *feature = &smu->smu_feature;
710         int ret = 0;
711         uint32_t feature_mask[2];
712
713         mutex_lock(&feature->mutex);
714         if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
715                 goto failed;
716
717         bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
718
719         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
720                                           feature_mask[1]);
721         if (ret)
722                 goto failed;
723
724         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
725                                           feature_mask[0]);
726         if (ret)
727                 goto failed;
728
729 failed:
730         mutex_unlock(&feature->mutex);
731         return ret;
732 }
733
734 static int smu_v11_0_get_enabled_mask(struct smu_context *smu,
735                                       uint32_t *feature_mask, uint32_t num)
736 {
737         uint32_t feature_mask_high = 0, feature_mask_low = 0;
738         int ret = 0;
739
740         if (!feature_mask || num < 2)
741                 return -EINVAL;
742
743         ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
744         if (ret)
745                 return ret;
746         ret = smu_read_smc_arg(smu, &feature_mask_high);
747         if (ret)
748                 return ret;
749
750         ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
751         if (ret)
752                 return ret;
753         ret = smu_read_smc_arg(smu, &feature_mask_low);
754         if (ret)
755                 return ret;
756
757         feature_mask[0] = feature_mask_low;
758         feature_mask[1] = feature_mask_high;
759
760         return ret;
761 }
762
763 static bool smu_v11_0_is_dpm_running(struct smu_context *smu)
764 {
765         int ret = 0;
766         uint32_t feature_mask[2];
767         unsigned long feature_enabled;
768         ret = smu_v11_0_get_enabled_mask(smu, feature_mask, 2);
769         feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
770                            ((uint64_t)feature_mask[1] << 32));
771         return !!(feature_enabled & SMC_DPM_FEATURE);
772 }
773
774 static int smu_v11_0_enable_all_mask(struct smu_context *smu)
775 {
776         struct smu_feature *feature = &smu->smu_feature;
777         uint32_t feature_mask[2];
778         int ret = 0;
779
780         ret = smu_send_smc_msg(smu, SMU_MSG_EnableAllSmuFeatures);
781         if (ret)
782                 return ret;
783         ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
784         if (ret)
785                 return ret;
786
787         bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
788                     feature->feature_num);
789         bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
790                     feature->feature_num);
791
792         return ret;
793 }
794
795 static int smu_v11_0_disable_all_mask(struct smu_context *smu)
796 {
797         struct smu_feature *feature = &smu->smu_feature;
798         uint32_t feature_mask[2];
799         int ret = 0;
800
801         ret = smu_send_smc_msg(smu, SMU_MSG_DisableAllSmuFeatures);
802         if (ret)
803                 return ret;
804         ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
805         if (ret)
806                 return ret;
807
808         bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
809                     feature->feature_num);
810         bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
811                     feature->feature_num);
812
813         return ret;
814 }
815
816 static int smu_v11_0_notify_display_change(struct smu_context *smu)
817 {
818         int ret = 0;
819
820         if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT))
821             ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1);
822
823         return ret;
824 }
825
826 static int
827 smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
828                                     PPCLK_e clock_select)
829 {
830         int ret = 0;
831
832         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
833                                           clock_select << 16);
834         if (ret) {
835                 pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
836                 return ret;
837         }
838
839         ret = smu_read_smc_arg(smu, clock);
840         if (ret)
841                 return ret;
842
843         if (*clock != 0)
844                 return 0;
845
846         /* if DC limit is zero, return AC limit */
847         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
848                                           clock_select << 16);
849         if (ret) {
850                 pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!");
851                 return ret;
852         }
853
854         ret = smu_read_smc_arg(smu, clock);
855
856         return ret;
857 }
858
859 static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
860 {
861         struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks;
862         int ret = 0;
863
864         max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks),
865                                          GFP_KERNEL);
866         smu->smu_table.max_sustainable_clocks = (void *)max_sustainable_clocks;
867
868         max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
869         max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100;
870         max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100;
871         max_sustainable_clocks->display_clock = 0xFFFFFFFF;
872         max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
873         max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
874
875         if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
876                 ret = smu_v11_0_get_max_sustainable_clock(smu,
877                                                           &(max_sustainable_clocks->uclock),
878                                                           PPCLK_UCLK);
879                 if (ret) {
880                         pr_err("[%s] failed to get max UCLK from SMC!",
881                                __func__);
882                         return ret;
883                 }
884         }
885
886         if (smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT)) {
887                 ret = smu_v11_0_get_max_sustainable_clock(smu,
888                                                           &(max_sustainable_clocks->soc_clock),
889                                                           PPCLK_SOCCLK);
890                 if (ret) {
891                         pr_err("[%s] failed to get max SOCCLK from SMC!",
892                                __func__);
893                         return ret;
894                 }
895         }
896
897         if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
898                 ret = smu_v11_0_get_max_sustainable_clock(smu,
899                                                           &(max_sustainable_clocks->dcef_clock),
900                                                           PPCLK_DCEFCLK);
901                 if (ret) {
902                         pr_err("[%s] failed to get max DCEFCLK from SMC!",
903                                __func__);
904                         return ret;
905                 }
906
907                 ret = smu_v11_0_get_max_sustainable_clock(smu,
908                                                           &(max_sustainable_clocks->display_clock),
909                                                           PPCLK_DISPCLK);
910                 if (ret) {
911                         pr_err("[%s] failed to get max DISPCLK from SMC!",
912                                __func__);
913                         return ret;
914                 }
915                 ret = smu_v11_0_get_max_sustainable_clock(smu,
916                                                           &(max_sustainable_clocks->phy_clock),
917                                                           PPCLK_PHYCLK);
918                 if (ret) {
919                         pr_err("[%s] failed to get max PHYCLK from SMC!",
920                                __func__);
921                         return ret;
922                 }
923                 ret = smu_v11_0_get_max_sustainable_clock(smu,
924                                                           &(max_sustainable_clocks->pixel_clock),
925                                                           PPCLK_PIXCLK);
926                 if (ret) {
927                         pr_err("[%s] failed to get max PIXCLK from SMC!",
928                                __func__);
929                         return ret;
930                 }
931         }
932
933         if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
934                 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
935
936         return 0;
937 }
938
939 static int smu_v11_0_get_power_limit(struct smu_context *smu,
940                                      uint32_t *limit,
941                                      bool get_default)
942 {
943         int ret = 0;
944
945         if (get_default) {
946                 mutex_lock(&smu->mutex);
947                 *limit = smu->default_power_limit;
948                 mutex_unlock(&smu->mutex);
949         } else {
950                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
951                                                   POWER_SOURCE_AC << 16);
952                 if (ret) {
953                         pr_err("[%s] get PPT limit failed!", __func__);
954                         return ret;
955                 }
956                 smu_read_smc_arg(smu, limit);
957                 smu->power_limit = *limit;
958         }
959
960         return ret;
961 }
962
963 static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
964 {
965         int ret = 0;
966
967         if (smu_feature_is_enabled(smu, FEATURE_PPT_BIT))
968                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n);
969         if (ret) {
970                 pr_err("[%s] Set power limit Failed!", __func__);
971                 return ret;
972         }
973
974         return ret;
975 }
976
977 static int smu_v11_0_get_current_clk_freq(struct smu_context *smu, uint32_t clk_id, uint32_t *value)
978 {
979         int ret = 0;
980         uint32_t freq;
981
982         if (clk_id >= PPCLK_COUNT || !value)
983                 return -EINVAL;
984
985         ret = smu_send_smc_msg_with_param(smu,
986                         SMU_MSG_GetDpmClockFreq, (clk_id << 16));
987         if (ret)
988                 return ret;
989
990         ret = smu_read_smc_arg(smu, &freq);
991         if (ret)
992                 return ret;
993
994         freq *= 100;
995         *value = freq;
996
997         return ret;
998 }
999
1000 static int smu_v11_0_get_thermal_range(struct smu_context *smu,
1001                                 struct PP_TemperatureRange *range)
1002 {
1003         memcpy(range, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
1004
1005         range->max = smu->smu_table.software_shutdown_temp *
1006                 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1007
1008         return 0;
1009 }
1010
1011 static int smu_v11_0_set_thermal_range(struct smu_context *smu,
1012                         struct PP_TemperatureRange *range)
1013 {
1014         struct amdgpu_device *adev = smu->adev;
1015         int low = SMU11_THERMAL_MINIMUM_ALERT_TEMP *
1016                 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1017         int high = SMU11_THERMAL_MAXIMUM_ALERT_TEMP *
1018                 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1019         uint32_t val;
1020
1021         if (low < range->min)
1022                 low = range->min;
1023         if (high > range->max)
1024                 high = range->max;
1025
1026         if (low > high)
1027                 return -EINVAL;
1028
1029         val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
1030         val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
1031         val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
1032         val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
1033         val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
1034         val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
1035
1036         WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
1037
1038         return 0;
1039 }
1040
1041 static int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
1042 {
1043         struct amdgpu_device *adev = smu->adev;
1044         uint32_t val = 0;
1045
1046         val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
1047         val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
1048         val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
1049
1050         WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);
1051
1052         return 0;
1053 }
1054
1055 static int smu_v11_0_set_thermal_fan_table(struct smu_context *smu)
1056 {
1057         int ret;
1058         struct smu_table_context *table_context = &smu->smu_table;
1059         PPTable_t *pptable = table_context->driver_pptable;
1060
1061         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetFanTemperatureTarget,
1062                         (uint32_t)pptable->FanTargetTemperature);
1063
1064         return ret;
1065 }
1066
1067 static int smu_v11_0_start_thermal_control(struct smu_context *smu)
1068 {
1069         int ret = 0;
1070         struct PP_TemperatureRange range;
1071         struct amdgpu_device *adev = smu->adev;
1072
1073         smu_v11_0_get_thermal_range(smu, &range);
1074
1075         if (smu->smu_table.thermal_controller_type) {
1076                 ret = smu_v11_0_set_thermal_range(smu, &range);
1077                 if (ret)
1078                         return ret;
1079
1080                 ret = smu_v11_0_enable_thermal_alert(smu);
1081                 if (ret)
1082                         return ret;
1083                 ret = smu_v11_0_set_thermal_fan_table(smu);
1084                 if (ret)
1085                         return ret;
1086         }
1087
1088         adev->pm.dpm.thermal.min_temp = range.min;
1089         adev->pm.dpm.thermal.max_temp = range.max;
1090
1091         return ret;
1092 }
1093
1094 static int smu_v11_0_get_current_activity_percent(struct smu_context *smu,
1095                                                   uint32_t *value)
1096 {
1097         int ret = 0;
1098         SmuMetrics_t metrics;
1099
1100         if (!value)
1101                 return -EINVAL;
1102
1103         ret = smu_update_table(smu, TABLE_SMU_METRICS, (void *)&metrics, false);
1104         if (ret)
1105                 return ret;
1106
1107         *value = metrics.AverageGfxActivity;
1108
1109         return 0;
1110 }
1111
1112 static int smu_v11_0_thermal_get_temperature(struct smu_context *smu, uint32_t *value)
1113 {
1114         struct amdgpu_device *adev = smu->adev;
1115         uint32_t temp = 0;
1116
1117         if (!value)
1118                 return -EINVAL;
1119
1120         temp = RREG32_SOC15(THM, 0, mmCG_MULT_THERMAL_STATUS);
1121         temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
1122                         CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
1123
1124         temp = temp & 0x1ff;
1125         temp *= SMU11_TEMPERATURE_UNITS_PER_CENTIGRADES;
1126
1127         *value = temp;
1128
1129         return 0;
1130 }
1131
1132 static int smu_v11_0_get_gpu_power(struct smu_context *smu, uint32_t *value)
1133 {
1134         int ret = 0;
1135         SmuMetrics_t metrics;
1136
1137         if (!value)
1138                 return -EINVAL;
1139
1140         ret = smu_update_table(smu, TABLE_SMU_METRICS, (void *)&metrics, false);
1141         if (ret)
1142                 return ret;
1143
1144         *value = metrics.CurrSocketPower << 8;
1145
1146         return 0;
1147 }
1148
1149 static uint16_t convert_to_vddc(uint8_t vid)
1150 {
1151         return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE);
1152 }
1153
1154 static int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
1155 {
1156         struct amdgpu_device *adev = smu->adev;
1157         uint32_t vdd = 0, val_vid = 0;
1158
1159         if (!value)
1160                 return -EINVAL;
1161         val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) &
1162                 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
1163                 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;
1164
1165         vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid);
1166
1167         *value = vdd;
1168
1169         return 0;
1170
1171 }
1172
1173 static int smu_v11_0_read_sensor(struct smu_context *smu,
1174                                  enum amd_pp_sensors sensor,
1175                                  void *data, uint32_t *size)
1176 {
1177         struct smu_table_context *table_context = &smu->smu_table;
1178         PPTable_t *pptable = table_context->driver_pptable;
1179         int ret = 0;
1180         switch (sensor) {
1181         case AMDGPU_PP_SENSOR_GPU_LOAD:
1182                 ret = smu_v11_0_get_current_activity_percent(smu,
1183                                                              (uint32_t *)data);
1184                 *size = 4;
1185                 break;
1186         case AMDGPU_PP_SENSOR_GFX_MCLK:
1187                 ret = smu_get_current_clk_freq(smu, PPCLK_UCLK, (uint32_t *)data);
1188                 *size = 4;
1189                 break;
1190         case AMDGPU_PP_SENSOR_GFX_SCLK:
1191                 ret = smu_get_current_clk_freq(smu, PPCLK_GFXCLK, (uint32_t *)data);
1192                 *size = 4;
1193                 break;
1194         case AMDGPU_PP_SENSOR_GPU_TEMP:
1195                 ret = smu_v11_0_thermal_get_temperature(smu, (uint32_t *)data);
1196                 *size = 4;
1197                 break;
1198         case AMDGPU_PP_SENSOR_GPU_POWER:
1199                 ret = smu_v11_0_get_gpu_power(smu, (uint32_t *)data);
1200                 *size = 4;
1201                 break;
1202         case AMDGPU_PP_SENSOR_VDDGFX:
1203                 ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data);
1204                 *size = 4;
1205                 break;
1206         case AMDGPU_PP_SENSOR_UVD_POWER:
1207                 *(uint32_t *)data = smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT) ? 1 : 0;
1208                 *size = 4;
1209                 break;
1210         case AMDGPU_PP_SENSOR_VCE_POWER:
1211                 *(uint32_t *)data = smu_feature_is_enabled(smu, FEATURE_DPM_VCE_BIT) ? 1 : 0;
1212                 *size = 4;
1213                 break;
1214         case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
1215                 *(uint32_t *)data = 0;
1216                 *size = 4;
1217                 break;
1218         case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
1219                 *(uint32_t *)data = pptable->FanMaximumRpm;
1220                 *size = 4;
1221                 break;
1222         default:
1223                 ret = smu_common_read_sensor(smu, sensor, data, size);
1224                 break;
1225         }
1226
1227         if (ret)
1228                 *size = 0;
1229
1230         return ret;
1231 }
1232
1233 static int
1234 smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
1235                                         struct pp_display_clock_request
1236                                         *clock_req)
1237 {
1238         enum amd_pp_clock_type clk_type = clock_req->clock_type;
1239         int ret = 0;
1240         PPCLK_e clk_select = 0;
1241         uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
1242
1243         if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
1244                 switch (clk_type) {
1245                 case amd_pp_dcef_clock:
1246                         clk_select = PPCLK_DCEFCLK;
1247                         break;
1248                 case amd_pp_disp_clock:
1249                         clk_select = PPCLK_DISPCLK;
1250                         break;
1251                 case amd_pp_pixel_clock:
1252                         clk_select = PPCLK_PIXCLK;
1253                         break;
1254                 case amd_pp_phy_clock:
1255                         clk_select = PPCLK_PHYCLK;
1256                         break;
1257                 default:
1258                         pr_info("[%s] Invalid Clock Type!", __func__);
1259                         ret = -EINVAL;
1260                         break;
1261                 }
1262
1263                 if (ret)
1264                         goto failed;
1265
1266                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
1267                                                   (clk_select << 16) | clk_freq);
1268         }
1269
1270 failed:
1271         return ret;
1272 }
1273
1274 static int smu_v11_0_set_watermarks_table(struct smu_context *smu,
1275                                           Watermarks_t *table, struct
1276                                           dm_pp_wm_sets_with_clock_ranges_soc15
1277                                           *clock_ranges)
1278 {
1279         int i;
1280
1281         if (!table || !clock_ranges)
1282                 return -EINVAL;
1283
1284         if (clock_ranges->num_wm_dmif_sets > 4 ||
1285             clock_ranges->num_wm_mcif_sets > 4)
1286                 return -EINVAL;
1287
1288         for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
1289                 table->WatermarkRow[1][i].MinClock =
1290                         cpu_to_le16((uint16_t)
1291                         (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
1292                         1000));
1293                 table->WatermarkRow[1][i].MaxClock =
1294                         cpu_to_le16((uint16_t)
1295                         (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
1296                         1000));
1297                 table->WatermarkRow[1][i].MinUclk =
1298                         cpu_to_le16((uint16_t)
1299                         (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
1300                         1000));
1301                 table->WatermarkRow[1][i].MaxUclk =
1302                         cpu_to_le16((uint16_t)
1303                         (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
1304                         1000));
1305                 table->WatermarkRow[1][i].WmSetting = (uint8_t)
1306                                 clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
1307         }
1308
1309         for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
1310                 table->WatermarkRow[0][i].MinClock =
1311                         cpu_to_le16((uint16_t)
1312                         (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
1313                         1000));
1314                 table->WatermarkRow[0][i].MaxClock =
1315                         cpu_to_le16((uint16_t)
1316                         (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
1317                         1000));
1318                 table->WatermarkRow[0][i].MinUclk =
1319                         cpu_to_le16((uint16_t)
1320                         (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
1321                         1000));
1322                 table->WatermarkRow[0][i].MaxUclk =
1323                         cpu_to_le16((uint16_t)
1324                         (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
1325                         1000));
1326                 table->WatermarkRow[0][i].WmSetting = (uint8_t)
1327                                 clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
1328         }
1329
1330         return 0;
1331 }
1332
1333 static int
1334 smu_v11_0_set_watermarks_for_clock_ranges(struct smu_context *smu, struct
1335                                           dm_pp_wm_sets_with_clock_ranges_soc15
1336                                           *clock_ranges)
1337 {
1338         int ret = 0;
1339         struct smu_table *watermarks = &smu->smu_table.tables[TABLE_WATERMARKS];
1340         Watermarks_t *table = watermarks->cpu_addr;
1341
1342         if (!smu->disable_watermark &&
1343             smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT) &&
1344             smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT)) {
1345                 smu_v11_0_set_watermarks_table(smu, table, clock_ranges);
1346                 smu->watermarks_bitmap |= WATERMARKS_EXIST;
1347                 smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
1348         }
1349
1350         return ret;
1351 }
1352
1353 static int smu_v11_0_get_clock_ranges(struct smu_context *smu,
1354                                       uint32_t *clock,
1355                                       PPCLK_e clock_select,
1356                                       bool max)
1357 {
1358         int ret;
1359         *clock = 0;
1360         if (max) {
1361                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
1362                                             (clock_select << 16));
1363                 if (ret) {
1364                         pr_err("[GetClockRanges] Failed to get max clock from SMC!\n");
1365                         return ret;
1366                 }
1367                 smu_read_smc_arg(smu, clock);
1368         } else {
1369                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq,
1370                                             (clock_select << 16));
1371                 if (ret) {
1372                         pr_err("[GetClockRanges] Failed to get min clock from SMC!\n");
1373                         return ret;
1374                 }
1375                 smu_read_smc_arg(smu, clock);
1376         }
1377
1378         return 0;
1379 }
1380
1381 static uint32_t smu_v11_0_dpm_get_sclk(struct smu_context *smu, bool low)
1382 {
1383         uint32_t gfx_clk;
1384         int ret;
1385
1386         if (!smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT)) {
1387                 pr_err("[GetSclks]: gfxclk dpm not enabled!\n");
1388                 return -EPERM;
1389         }
1390
1391         if (low) {
1392                 ret = smu_v11_0_get_clock_ranges(smu, &gfx_clk, PPCLK_GFXCLK, false);
1393                 if (ret) {
1394                         pr_err("[GetSclks]: fail to get min PPCLK_GFXCLK\n");
1395                         return ret;
1396                 }
1397         } else {
1398                 ret = smu_v11_0_get_clock_ranges(smu, &gfx_clk, PPCLK_GFXCLK, true);
1399                 if (ret) {
1400                         pr_err("[GetSclks]: fail to get max PPCLK_GFXCLK\n");
1401                         return ret;
1402                 }
1403         }
1404
1405         return (gfx_clk * 100);
1406 }
1407
1408 static uint32_t smu_v11_0_dpm_get_mclk(struct smu_context *smu, bool low)
1409 {
1410         uint32_t mem_clk;
1411         int ret;
1412
1413         if (!smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
1414                 pr_err("[GetMclks]: memclk dpm not enabled!\n");
1415                 return -EPERM;
1416         }
1417
1418         if (low) {
1419                 ret = smu_v11_0_get_clock_ranges(smu, &mem_clk, PPCLK_UCLK, false);
1420                 if (ret) {
1421                         pr_err("[GetMclks]: fail to get min PPCLK_UCLK\n");
1422                         return ret;
1423                 }
1424         } else {
1425                 ret = smu_v11_0_get_clock_ranges(smu, &mem_clk, PPCLK_GFXCLK, true);
1426                 if (ret) {
1427                         pr_err("[GetMclks]: fail to get max PPCLK_UCLK\n");
1428                         return ret;
1429                 }
1430         }
1431
1432         return (mem_clk * 100);
1433 }
1434
1435 static int smu_v11_0_set_od8_default_settings(struct smu_context *smu,
1436                                               bool initialize)
1437 {
1438         struct smu_table_context *table_context = &smu->smu_table;
1439         int ret;
1440
1441         if (initialize) {
1442                 if (table_context->overdrive_table)
1443                         return -EINVAL;
1444
1445                 table_context->overdrive_table = kzalloc(sizeof(OverDriveTable_t), GFP_KERNEL);
1446
1447                 if (!table_context->overdrive_table)
1448                         return -ENOMEM;
1449
1450                 ret = smu_update_table(smu, TABLE_OVERDRIVE, table_context->overdrive_table, false);
1451                 if (ret) {
1452                         pr_err("Failed to export over drive table!\n");
1453                         return ret;
1454                 }
1455
1456                 smu_set_default_od8_settings(smu);
1457         }
1458
1459         ret = smu_update_table(smu, TABLE_OVERDRIVE, table_context->overdrive_table, true);
1460         if (ret) {
1461                 pr_err("Failed to import over drive table!\n");
1462                 return ret;
1463         }
1464
1465         return 0;
1466 }
1467
1468 static int smu_v11_0_set_activity_monitor_coeff(struct smu_context *smu,
1469                                       uint8_t *table, uint16_t workload_type)
1470 {
1471         int ret = 0;
1472         memcpy(smu->smu_table.tables[TABLE_ACTIVITY_MONITOR_COEFF].cpu_addr,
1473                table, smu->smu_table.tables[TABLE_ACTIVITY_MONITOR_COEFF].size);
1474         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
1475                                           upper_32_bits(smu->smu_table.tables[TABLE_ACTIVITY_MONITOR_COEFF].mc_address));
1476         if (ret) {
1477                 pr_err("[%s] Attempt to Set Dram Addr High Failed!", __func__);
1478                 return ret;
1479         }
1480         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
1481                                           lower_32_bits(smu->smu_table.tables[TABLE_ACTIVITY_MONITOR_COEFF].mc_address));
1482         if (ret) {
1483                 pr_err("[%s] Attempt to Set Dram Addr Low Failed!", __func__);
1484                 return ret;
1485         }
1486         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_TransferTableSmu2Dram,
1487                                           TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16));
1488         if (ret) {
1489                 pr_err("[%s] Attempt to Transfer Table From SMU Failed!", __func__);
1490                 return ret;
1491         }
1492
1493         return ret;
1494 }
1495
1496 static int smu_v11_0_get_activity_monitor_coeff(struct smu_context *smu,
1497                                       uint8_t *table, uint16_t workload_type)
1498 {
1499         int ret = 0;
1500         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
1501                                           upper_32_bits(smu->smu_table.tables[TABLE_ACTIVITY_MONITOR_COEFF].mc_address));
1502         if (ret) {
1503                 pr_err("[%s] Attempt to Set Dram Addr High Failed!", __func__);
1504                 return ret;
1505         }
1506
1507         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
1508                                           lower_32_bits(smu->smu_table.tables[TABLE_ACTIVITY_MONITOR_COEFF].mc_address));
1509         if (ret) {
1510                 pr_err("[%s] Attempt to Set Dram Addr Low Failed!", __func__);
1511                 return ret;
1512         }
1513
1514         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_TransferTableSmu2Dram,
1515                                           TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16));
1516         if (ret) {
1517                 pr_err("[%s] Attempt to Transfer Table From SMU Failed!", __func__);
1518                 return ret;
1519         }
1520
1521         return ret;
1522 }
1523
1524 static int smu_v11_0_conv_power_profile_to_pplib_workload(int power_profile)
1525 {
1526         int pplib_workload = 0;
1527
1528         switch (power_profile) {
1529         case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT:
1530              pplib_workload = WORKLOAD_DEFAULT_BIT;
1531              break;
1532         case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
1533              pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
1534              break;
1535         case PP_SMC_POWER_PROFILE_POWERSAVING:
1536              pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT;
1537              break;
1538         case PP_SMC_POWER_PROFILE_VIDEO:
1539              pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
1540              break;
1541         case PP_SMC_POWER_PROFILE_VR:
1542              pplib_workload = WORKLOAD_PPLIB_VR_BIT;
1543              break;
1544         case PP_SMC_POWER_PROFILE_COMPUTE:
1545              pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
1546              break;
1547         case PP_SMC_POWER_PROFILE_CUSTOM:
1548                 pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT;
1549                 break;
1550         }
1551
1552         return pplib_workload;
1553 }
1554
1555 static int smu_v11_0_get_power_profile_mode(struct smu_context *smu, char *buf)
1556 {
1557         DpmActivityMonitorCoeffInt_t activity_monitor;
1558         uint32_t i, size = 0;
1559         uint16_t workload_type = 0;
1560         static const char *profile_name[] = {
1561                                         "BOOTUP_DEFAULT",
1562                                         "3D_FULL_SCREEN",
1563                                         "POWER_SAVING",
1564                                         "VIDEO",
1565                                         "VR",
1566                                         "COMPUTE",
1567                                         "CUSTOM"};
1568         static const char *title[] = {
1569                         "PROFILE_INDEX(NAME)",
1570                         "CLOCK_TYPE(NAME)",
1571                         "FPS",
1572                         "UseRlcBusy",
1573                         "MinActiveFreqType",
1574                         "MinActiveFreq",
1575                         "BoosterFreqType",
1576                         "BoosterFreq",
1577                         "PD_Data_limit_c",
1578                         "PD_Data_error_coeff",
1579                         "PD_Data_error_rate_coeff"};
1580         int result = 0;
1581
1582         if (!buf)
1583                 return -EINVAL;
1584
1585         size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
1586                         title[0], title[1], title[2], title[3], title[4], title[5],
1587                         title[6], title[7], title[8], title[9], title[10]);
1588
1589         for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
1590                 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1591                 workload_type = smu_v11_0_conv_power_profile_to_pplib_workload(i);
1592                 result = smu_v11_0_get_activity_monitor_coeff(smu,
1593                                                               (uint8_t *)(&activity_monitor),
1594                                                               workload_type);
1595                 if (result) {
1596                         pr_err("[%s] Failed to get activity monitor!", __func__);
1597                         return result;
1598                 }
1599
1600                 size += sprintf(buf + size, "%2d %14s%s:\n",
1601                         i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
1602
1603                 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1604                         " ",
1605                         0,
1606                         "GFXCLK",
1607                         activity_monitor.Gfx_FPS,
1608                         activity_monitor.Gfx_UseRlcBusy,
1609                         activity_monitor.Gfx_MinActiveFreqType,
1610                         activity_monitor.Gfx_MinActiveFreq,
1611                         activity_monitor.Gfx_BoosterFreqType,
1612                         activity_monitor.Gfx_BoosterFreq,
1613                         activity_monitor.Gfx_PD_Data_limit_c,
1614                         activity_monitor.Gfx_PD_Data_error_coeff,
1615                         activity_monitor.Gfx_PD_Data_error_rate_coeff);
1616
1617                 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1618                         " ",
1619                         1,
1620                         "SOCCLK",
1621                         activity_monitor.Soc_FPS,
1622                         activity_monitor.Soc_UseRlcBusy,
1623                         activity_monitor.Soc_MinActiveFreqType,
1624                         activity_monitor.Soc_MinActiveFreq,
1625                         activity_monitor.Soc_BoosterFreqType,
1626                         activity_monitor.Soc_BoosterFreq,
1627                         activity_monitor.Soc_PD_Data_limit_c,
1628                         activity_monitor.Soc_PD_Data_error_coeff,
1629                         activity_monitor.Soc_PD_Data_error_rate_coeff);
1630
1631                 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1632                         " ",
1633                         2,
1634                         "UCLK",
1635                         activity_monitor.Mem_FPS,
1636                         activity_monitor.Mem_UseRlcBusy,
1637                         activity_monitor.Mem_MinActiveFreqType,
1638                         activity_monitor.Mem_MinActiveFreq,
1639                         activity_monitor.Mem_BoosterFreqType,
1640                         activity_monitor.Mem_BoosterFreq,
1641                         activity_monitor.Mem_PD_Data_limit_c,
1642                         activity_monitor.Mem_PD_Data_error_coeff,
1643                         activity_monitor.Mem_PD_Data_error_rate_coeff);
1644
1645                 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1646                         " ",
1647                         3,
1648                         "FCLK",
1649                         activity_monitor.Fclk_FPS,
1650                         activity_monitor.Fclk_UseRlcBusy,
1651                         activity_monitor.Fclk_MinActiveFreqType,
1652                         activity_monitor.Fclk_MinActiveFreq,
1653                         activity_monitor.Fclk_BoosterFreqType,
1654                         activity_monitor.Fclk_BoosterFreq,
1655                         activity_monitor.Fclk_PD_Data_limit_c,
1656                         activity_monitor.Fclk_PD_Data_error_coeff,
1657                         activity_monitor.Fclk_PD_Data_error_rate_coeff);
1658         }
1659
1660         return size;
1661 }
1662
1663 static int smu_v11_0_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
1664 {
1665         DpmActivityMonitorCoeffInt_t activity_monitor;
1666         int workload_type, ret = 0;
1667
1668         smu->power_profile_mode = input[size];
1669
1670         if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
1671                 pr_err("Invalid power profile mode %d\n", smu->power_profile_mode);
1672                 return -EINVAL;
1673         }
1674
1675         if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1676                 if (size < 0)
1677                         return -EINVAL;
1678
1679                 ret = smu_v11_0_get_activity_monitor_coeff(smu,
1680                                                            (uint8_t *)(&activity_monitor),
1681                                                            WORKLOAD_PPLIB_CUSTOM_BIT);
1682                 if (ret) {
1683                         pr_err("[%s] Failed to get activity monitor!", __func__);
1684                         return ret;
1685                 }
1686
1687                 switch (input[0]) {
1688                 case 0: /* Gfxclk */
1689                         activity_monitor.Gfx_FPS = input[1];
1690                         activity_monitor.Gfx_UseRlcBusy = input[2];
1691                         activity_monitor.Gfx_MinActiveFreqType = input[3];
1692                         activity_monitor.Gfx_MinActiveFreq = input[4];
1693                         activity_monitor.Gfx_BoosterFreqType = input[5];
1694                         activity_monitor.Gfx_BoosterFreq = input[6];
1695                         activity_monitor.Gfx_PD_Data_limit_c = input[7];
1696                         activity_monitor.Gfx_PD_Data_error_coeff = input[8];
1697                         activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
1698                         break;
1699                 case 1: /* Socclk */
1700                         activity_monitor.Soc_FPS = input[1];
1701                         activity_monitor.Soc_UseRlcBusy = input[2];
1702                         activity_monitor.Soc_MinActiveFreqType = input[3];
1703                         activity_monitor.Soc_MinActiveFreq = input[4];
1704                         activity_monitor.Soc_BoosterFreqType = input[5];
1705                         activity_monitor.Soc_BoosterFreq = input[6];
1706                         activity_monitor.Soc_PD_Data_limit_c = input[7];
1707                         activity_monitor.Soc_PD_Data_error_coeff = input[8];
1708                         activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
1709                         break;
1710                 case 2: /* Uclk */
1711                         activity_monitor.Mem_FPS = input[1];
1712                         activity_monitor.Mem_UseRlcBusy = input[2];
1713                         activity_monitor.Mem_MinActiveFreqType = input[3];
1714                         activity_monitor.Mem_MinActiveFreq = input[4];
1715                         activity_monitor.Mem_BoosterFreqType = input[5];
1716                         activity_monitor.Mem_BoosterFreq = input[6];
1717                         activity_monitor.Mem_PD_Data_limit_c = input[7];
1718                         activity_monitor.Mem_PD_Data_error_coeff = input[8];
1719                         activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
1720                         break;
1721                 case 3: /* Fclk */
1722                         activity_monitor.Fclk_FPS = input[1];
1723                         activity_monitor.Fclk_UseRlcBusy = input[2];
1724                         activity_monitor.Fclk_MinActiveFreqType = input[3];
1725                         activity_monitor.Fclk_MinActiveFreq = input[4];
1726                         activity_monitor.Fclk_BoosterFreqType = input[5];
1727                         activity_monitor.Fclk_BoosterFreq = input[6];
1728                         activity_monitor.Fclk_PD_Data_limit_c = input[7];
1729                         activity_monitor.Fclk_PD_Data_error_coeff = input[8];
1730                         activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9];
1731                         break;
1732                 }
1733
1734                 ret = smu_v11_0_set_activity_monitor_coeff(smu,
1735                                                            (uint8_t *)(&activity_monitor),
1736                                                            WORKLOAD_PPLIB_CUSTOM_BIT);
1737                 if (ret) {
1738                         pr_err("[%s] Failed to set activity monitor!", __func__);
1739                         return ret;
1740                 }
1741         }
1742
1743         /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1744         workload_type =
1745                 smu_v11_0_conv_power_profile_to_pplib_workload(smu->power_profile_mode);
1746         smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1747                                     1 << workload_type);
1748
1749         return ret;
1750 }
1751
1752 static int smu_v11_0_update_od8_settings(struct smu_context *smu,
1753                                         uint32_t index,
1754                                         uint32_t value)
1755 {
1756         struct smu_table_context *table_context = &smu->smu_table;
1757         int ret;
1758
1759         ret = smu_update_table(smu, TABLE_OVERDRIVE,
1760                                table_context->overdrive_table, false);
1761         if (ret) {
1762                 pr_err("Failed to export over drive table!\n");
1763                 return ret;
1764         }
1765
1766         smu_update_specified_od8_value(smu, index, value);
1767
1768         ret = smu_update_table(smu, TABLE_OVERDRIVE,
1769                                table_context->overdrive_table, true);
1770         if (ret) {
1771                 pr_err("Failed to import over drive table!\n");
1772                 return ret;
1773         }
1774
1775         return 0;
1776 }
1777
1778 static int smu_v11_0_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
1779 {
1780         if (!smu_feature_is_supported(smu, FEATURE_DPM_VCE_BIT))
1781                 return 0;
1782
1783         if (enable == smu_feature_is_enabled(smu, FEATURE_DPM_VCE_BIT))
1784                 return 0;
1785
1786         return smu_feature_set_enabled(smu, FEATURE_DPM_VCE_BIT, enable);
1787 }
1788
1789 static int smu_v11_0_dpm_set_vce_enable(struct smu_context *smu, bool enable)
1790 {
1791         if (!smu_feature_is_supported(smu, FEATURE_DPM_UVD_BIT))
1792                 return 0;
1793
1794         if (enable == smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT))
1795                 return 0;
1796
1797         return smu_feature_set_enabled(smu, FEATURE_DPM_UVD_BIT, enable);
1798 }
1799
1800 static int smu_v11_0_get_current_rpm(struct smu_context *smu,
1801                                      uint32_t *current_rpm)
1802 {
1803         int ret;
1804
1805         ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm);
1806
1807         if (ret) {
1808                 pr_err("Attempt to get current RPM from SMC Failed!\n");
1809                 return ret;
1810         }
1811
1812         smu_read_smc_arg(smu, current_rpm);
1813
1814         return 0;
1815 }
1816
1817 static uint32_t
1818 smu_v11_0_get_fan_control_mode(struct smu_context *smu)
1819 {
1820         if (!smu_feature_is_enabled(smu, FEATURE_FAN_CONTROL_BIT))
1821                 return AMD_FAN_CTRL_MANUAL;
1822         else
1823                 return AMD_FAN_CTRL_AUTO;
1824 }
1825
1826 static int
1827 smu_v11_0_get_fan_speed_percent(struct smu_context *smu,
1828                                            uint32_t *speed)
1829 {
1830         int ret = 0;
1831         uint32_t percent = 0;
1832         uint32_t current_rpm;
1833         PPTable_t *pptable = smu->smu_table.driver_pptable;
1834
1835         ret = smu_v11_0_get_current_rpm(smu, &current_rpm);
1836         percent = current_rpm * 100 / pptable->FanMaximumRpm;
1837         *speed = percent > 100 ? 100 : percent;
1838
1839         return ret;
1840 }
1841
1842 static int
1843 smu_v11_0_smc_fan_control(struct smu_context *smu, bool start)
1844 {
1845         int ret = 0;
1846
1847         if (smu_feature_is_supported(smu, FEATURE_FAN_CONTROL_BIT))
1848                 return 0;
1849
1850         ret = smu_feature_set_enabled(smu, FEATURE_FAN_CONTROL_BIT, start);
1851         if (ret)
1852                 pr_err("[%s]%s smc FAN CONTROL feature failed!",
1853                        __func__, (start ? "Start" : "Stop"));
1854
1855         return ret;
1856 }
1857
1858 static int
1859 smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
1860 {
1861         struct amdgpu_device *adev = smu->adev;
1862
1863         WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
1864                      REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
1865                                    CG_FDO_CTRL2, TMIN, 0));
1866         WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
1867                      REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
1868                                    CG_FDO_CTRL2, FDO_PWM_MODE, mode));
1869
1870         return 0;
1871 }
1872
1873 static int
1874 smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
1875 {
1876         struct amdgpu_device *adev = smu->adev;
1877         uint32_t duty100;
1878         uint32_t duty;
1879         uint64_t tmp64;
1880         bool stop = 0;
1881
1882         if (speed > 100)
1883                 speed = 100;
1884
1885         if (smu_v11_0_smc_fan_control(smu, stop))
1886                 return -EINVAL;
1887         duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
1888                                 CG_FDO_CTRL1, FMAX_DUTY100);
1889         if (!duty100)
1890                 return -EINVAL;
1891
1892         tmp64 = (uint64_t)speed * duty100;
1893         do_div(tmp64, 100);
1894         duty = (uint32_t)tmp64;
1895
1896         WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
1897                      REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
1898                                    CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
1899
1900         return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
1901 }
1902
1903 static int
1904 smu_v11_0_set_fan_control_mode(struct smu_context *smu,
1905                                uint32_t mode)
1906 {
1907         int ret = 0;
1908         bool start = 1;
1909         bool stop  = 0;
1910
1911         switch (mode) {
1912         case AMD_FAN_CTRL_NONE:
1913                 ret = smu_v11_0_set_fan_speed_percent(smu, 100);
1914                 break;
1915         case AMD_FAN_CTRL_MANUAL:
1916                 ret = smu_v11_0_smc_fan_control(smu, stop);
1917                 break;
1918         case AMD_FAN_CTRL_AUTO:
1919                 ret = smu_v11_0_smc_fan_control(smu, start);
1920                 break;
1921         default:
1922                 break;
1923         }
1924
1925         if (ret) {
1926                 pr_err("[%s]Set fan control mode failed!", __func__);
1927                 return -EINVAL;
1928         }
1929
1930         return ret;
1931 }
1932
1933 static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
1934                                        uint32_t speed)
1935 {
1936         struct amdgpu_device *adev = smu->adev;
1937         int ret;
1938         uint32_t tach_period, crystal_clock_freq;
1939         bool stop = 0;
1940
1941         if (!speed)
1942                 return -EINVAL;
1943
1944         mutex_lock(&(smu->mutex));
1945         ret = smu_v11_0_smc_fan_control(smu, stop);
1946         if (ret)
1947                 goto set_fan_speed_rpm_failed;
1948
1949         crystal_clock_freq = amdgpu_asic_get_xclk(adev);
1950         tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
1951         WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
1952                      REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
1953                                    CG_TACH_CTRL, TARGET_PERIOD,
1954                                    tach_period));
1955
1956         ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
1957
1958 set_fan_speed_rpm_failed:
1959         mutex_unlock(&(smu->mutex));
1960         return ret;
1961 }
1962
1963 static const struct smu_funcs smu_v11_0_funcs = {
1964         .init_microcode = smu_v11_0_init_microcode,
1965         .load_microcode = smu_v11_0_load_microcode,
1966         .check_fw_status = smu_v11_0_check_fw_status,
1967         .check_fw_version = smu_v11_0_check_fw_version,
1968         .send_smc_msg = smu_v11_0_send_msg,
1969         .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
1970         .read_smc_arg = smu_v11_0_read_arg,
1971         .read_pptable_from_vbios = smu_v11_0_read_pptable_from_vbios,
1972         .init_smc_tables = smu_v11_0_init_smc_tables,
1973         .fini_smc_tables = smu_v11_0_fini_smc_tables,
1974         .init_power = smu_v11_0_init_power,
1975         .fini_power = smu_v11_0_fini_power,
1976         .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
1977         .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
1978         .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
1979         .check_pptable = smu_v11_0_check_pptable,
1980         .parse_pptable = smu_v11_0_parse_pptable,
1981         .populate_smc_pptable = smu_v11_0_populate_smc_pptable,
1982         .write_pptable = smu_v11_0_write_pptable,
1983         .write_watermarks_table = smu_v11_0_write_watermarks_table,
1984         .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
1985         .set_tool_table_location = smu_v11_0_set_tool_table_location,
1986         .init_display = smu_v11_0_init_display,
1987         .set_allowed_mask = smu_v11_0_set_allowed_mask,
1988         .get_enabled_mask = smu_v11_0_get_enabled_mask,
1989         .is_dpm_running = smu_v11_0_is_dpm_running,
1990         .enable_all_mask = smu_v11_0_enable_all_mask,
1991         .disable_all_mask = smu_v11_0_disable_all_mask,
1992         .update_feature_enable_state = smu_v11_0_update_feature_enable_state,
1993         .notify_display_change = smu_v11_0_notify_display_change,
1994         .get_power_limit = smu_v11_0_get_power_limit,
1995         .set_power_limit = smu_v11_0_set_power_limit,
1996         .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
1997         .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
1998         .start_thermal_control = smu_v11_0_start_thermal_control,
1999         .read_sensor = smu_v11_0_read_sensor,
2000         .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
2001         .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
2002         .set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges,
2003         .get_sclk = smu_v11_0_dpm_get_sclk,
2004         .get_mclk = smu_v11_0_dpm_get_mclk,
2005         .set_od8_default_settings = smu_v11_0_set_od8_default_settings,
2006         .get_activity_monitor_coeff = smu_v11_0_get_activity_monitor_coeff,
2007         .set_activity_monitor_coeff = smu_v11_0_set_activity_monitor_coeff,
2008         .conv_power_profile_to_pplib_workload = smu_v11_0_conv_power_profile_to_pplib_workload,
2009         .get_power_profile_mode = smu_v11_0_get_power_profile_mode,
2010         .set_power_profile_mode = smu_v11_0_set_power_profile_mode,
2011         .update_od8_settings = smu_v11_0_update_od8_settings,
2012         .dpm_set_uvd_enable = smu_v11_0_dpm_set_uvd_enable,
2013         .dpm_set_vce_enable = smu_v11_0_dpm_set_vce_enable,
2014         .get_current_rpm = smu_v11_0_get_current_rpm,
2015         .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
2016         .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
2017         .get_fan_speed_percent = smu_v11_0_get_fan_speed_percent,
2018         .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
2019         .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
2020 };
2021
2022 void smu_v11_0_set_smu_funcs(struct smu_context *smu)
2023 {
2024         struct amdgpu_device *adev = smu->adev;
2025
2026         smu->funcs = &smu_v11_0_funcs;
2027
2028         switch (adev->asic_type) {
2029         case CHIP_VEGA20:
2030                 vega20_set_ppt_funcs(smu);
2031                 break;
2032         default:
2033                 pr_warn("Unknow asic for smu11\n");
2034         }
2035 }