2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
26 #include "amdgpu_smu.h"
27 #include "atomfirmware.h"
28 #include "amdgpu_atomfirmware.h"
29 #include "smu_v11_0.h"
30 #include "smu11_driver_if.h"
31 #include "soc15_common.h"
33 #include "vega20_ppt.h"
34 #include "pp_thermal.h"
36 #include "asic_reg/thm/thm_11_0_2_offset.h"
37 #include "asic_reg/thm/thm_11_0_2_sh_mask.h"
38 #include "asic_reg/mp/mp_9_0_offset.h"
39 #include "asic_reg/mp/mp_9_0_sh_mask.h"
40 #include "asic_reg/nbio/nbio_7_4_offset.h"
41 #include "asic_reg/smuio/smuio_9_0_offset.h"
42 #include "asic_reg/smuio/smuio_9_0_sh_mask.h"
44 MODULE_FIRMWARE("amdgpu/vega20_smc.bin");
46 #define SMU11_TOOL_SIZE 0x19000
47 #define SMU11_THERMAL_MINIMUM_ALERT_TEMP 0
48 #define SMU11_THERMAL_MAXIMUM_ALERT_TEMP 255
50 #define SMU11_TEMPERATURE_UNITS_PER_CENTIGRADES 1000
51 #define SMU11_VOLTAGE_SCALE 4
53 #define SMC_DPM_FEATURE (FEATURE_DPM_PREFETCHER_MASK | \
54 FEATURE_DPM_GFXCLK_MASK | \
55 FEATURE_DPM_UCLK_MASK | \
56 FEATURE_DPM_SOCCLK_MASK | \
57 FEATURE_DPM_UVD_MASK | \
58 FEATURE_DPM_VCE_MASK | \
59 FEATURE_DPM_MP0CLK_MASK | \
60 FEATURE_DPM_LINK_MASK | \
61 FEATURE_DPM_DCEFCLK_MASK)
63 static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
66 struct amdgpu_device *adev = smu->adev;
67 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
71 static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
73 struct amdgpu_device *adev = smu->adev;
75 *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
79 static int smu_v11_0_wait_for_response(struct smu_context *smu)
81 struct amdgpu_device *adev = smu->adev;
82 uint32_t cur_value, i;
84 for (i = 0; i < adev->usec_timeout; i++) {
85 cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
86 if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
91 /* timeout means wrong logic */
92 if (i == adev->usec_timeout)
95 return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
98 static int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
100 struct amdgpu_device *adev = smu->adev;
101 int ret = 0, index = 0;
103 index = smu_msg_get_index(smu, msg);
107 smu_v11_0_wait_for_response(smu);
109 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
111 smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
113 ret = smu_v11_0_wait_for_response(smu);
116 pr_err("Failed to send message 0x%x, response 0x%x\n", msg,
124 smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
128 struct amdgpu_device *adev = smu->adev;
129 int ret = 0, index = 0;
131 index = smu_msg_get_index(smu, msg);
135 ret = smu_v11_0_wait_for_response(smu);
137 pr_err("Failed to send message 0x%x, response 0x%x\n", msg,
140 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
142 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
144 smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
146 ret = smu_v11_0_wait_for_response(smu);
148 pr_err("Failed to send message 0x%x, response 0x%x\n", msg,
154 static int smu_v11_0_init_microcode(struct smu_context *smu)
156 struct amdgpu_device *adev = smu->adev;
157 const char *chip_name;
160 const struct smc_firmware_header_v1_0 *hdr;
161 const struct common_firmware_header *header;
162 struct amdgpu_firmware_info *ucode = NULL;
164 switch (adev->asic_type) {
166 chip_name = "vega20";
172 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
174 err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
177 err = amdgpu_ucode_validate(adev->pm.fw);
181 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
182 amdgpu_ucode_print_smc_hdr(&hdr->header);
183 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
185 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
186 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
187 ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
188 ucode->fw = adev->pm.fw;
189 header = (const struct common_firmware_header *)ucode->fw->data;
190 adev->firmware.fw_size +=
191 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
196 DRM_ERROR("smu_v11_0: Failed to load firmware \"%s\"\n",
198 release_firmware(adev->pm.fw);
204 static int smu_v11_0_load_microcode(struct smu_context *smu)
209 static int smu_v11_0_check_fw_status(struct smu_context *smu)
211 struct amdgpu_device *adev = smu->adev;
212 uint32_t mp1_fw_flags;
214 WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
215 (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
217 mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
219 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
220 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
225 static int smu_v11_0_check_fw_version(struct smu_context *smu)
227 uint32_t smu_version = 0xff;
230 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
234 ret = smu_read_smc_arg(smu, &smu_version);
238 if (smu_version == SMU11_DRIVER_IF_VERSION)
244 static int smu_v11_0_read_pptable_from_vbios(struct smu_context *smu)
251 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
254 ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
259 if (!smu->smu_table.power_play_table)
260 smu->smu_table.power_play_table = table;
261 if (!smu->smu_table.power_play_table_size)
262 smu->smu_table.power_play_table_size = size;
267 static int smu_v11_0_init_dpm_context(struct smu_context *smu)
269 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
271 if (smu_dpm->dpm_context || smu_dpm->dpm_context_size != 0)
274 return smu_alloc_dpm_context(smu);
277 static int smu_v11_0_fini_dpm_context(struct smu_context *smu)
279 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
281 if (!smu_dpm->dpm_context || smu_dpm->dpm_context_size == 0)
284 kfree(smu_dpm->dpm_context);
285 kfree(smu_dpm->golden_dpm_context);
286 kfree(smu_dpm->dpm_current_power_state);
287 kfree(smu_dpm->dpm_request_power_state);
288 smu_dpm->dpm_context = NULL;
289 smu_dpm->golden_dpm_context = NULL;
290 smu_dpm->dpm_context_size = 0;
291 smu_dpm->dpm_current_power_state = NULL;
292 smu_dpm->dpm_request_power_state = NULL;
297 static int smu_v11_0_init_smc_tables(struct smu_context *smu)
299 struct smu_table_context *smu_table = &smu->smu_table;
300 struct smu_table *tables = NULL;
303 if (smu_table->tables || smu_table->table_count != 0)
306 tables = kcalloc(TABLE_COUNT, sizeof(struct smu_table), GFP_KERNEL);
310 smu_table->tables = tables;
311 smu_table->table_count = TABLE_COUNT;
313 SMU_TABLE_INIT(tables, TABLE_PPTABLE, sizeof(PPTable_t),
314 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
315 SMU_TABLE_INIT(tables, TABLE_WATERMARKS, sizeof(Watermarks_t),
316 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
317 SMU_TABLE_INIT(tables, TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
318 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
319 SMU_TABLE_INIT(tables, TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
320 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
321 SMU_TABLE_INIT(tables, TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE, PAGE_SIZE,
322 AMDGPU_GEM_DOMAIN_VRAM);
323 SMU_TABLE_INIT(tables, TABLE_ACTIVITY_MONITOR_COEFF,
324 sizeof(DpmActivityMonitorCoeffInt_t),
326 AMDGPU_GEM_DOMAIN_VRAM);
328 ret = smu_v11_0_init_dpm_context(smu);
335 static int smu_v11_0_fini_smc_tables(struct smu_context *smu)
337 struct smu_table_context *smu_table = &smu->smu_table;
340 if (!smu_table->tables || smu_table->table_count == 0)
343 kfree(smu_table->tables);
344 smu_table->tables = NULL;
345 smu_table->table_count = 0;
347 ret = smu_v11_0_fini_dpm_context(smu);
353 static int smu_v11_0_init_power(struct smu_context *smu)
355 struct smu_power_context *smu_power = &smu->smu_power;
357 if (smu_power->power_context || smu_power->power_context_size != 0)
360 smu_power->power_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
362 if (!smu_power->power_context)
364 smu_power->power_context_size = sizeof(struct smu_11_0_dpm_context);
369 static int smu_v11_0_fini_power(struct smu_context *smu)
371 struct smu_power_context *smu_power = &smu->smu_power;
373 if (!smu_power->power_context || smu_power->power_context_size == 0)
376 kfree(smu_power->power_context);
377 smu_power->power_context = NULL;
378 smu_power->power_context_size = 0;
383 int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
388 struct atom_common_table_header *header;
389 struct atom_firmware_info_v3_3 *v_3_3;
390 struct atom_firmware_info_v3_1 *v_3_1;
392 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
395 ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
396 (uint8_t **)&header);
400 if (header->format_revision != 3) {
401 pr_err("unknown atom_firmware_info version! for smu11\n");
405 switch (header->content_revision) {
409 v_3_1 = (struct atom_firmware_info_v3_1 *)header;
410 smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
411 smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
412 smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
413 smu->smu_table.boot_values.socclk = 0;
414 smu->smu_table.boot_values.dcefclk = 0;
415 smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
416 smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
417 smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
418 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
419 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
420 smu->smu_table.boot_values.pp_table_id = 0;
424 v_3_3 = (struct atom_firmware_info_v3_3 *)header;
425 smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
426 smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
427 smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
428 smu->smu_table.boot_values.socclk = 0;
429 smu->smu_table.boot_values.dcefclk = 0;
430 smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
431 smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
432 smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
433 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
434 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
435 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
441 static int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
444 struct amdgpu_device *adev = smu->adev;
445 struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
446 struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
448 input.clk_id = SMU11_SYSPLL0_SOCCLK_ID;
449 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
450 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
453 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
458 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
459 smu->smu_table.boot_values.socclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
461 memset(&input, 0, sizeof(input));
462 input.clk_id = SMU11_SYSPLL0_DCEFCLK_ID;
463 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
464 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
467 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
472 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
473 smu->smu_table.boot_values.dcefclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
475 memset(&input, 0, sizeof(input));
476 input.clk_id = SMU11_SYSPLL0_ECLK_ID;
477 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
478 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
481 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
486 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
487 smu->smu_table.boot_values.eclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
489 memset(&input, 0, sizeof(input));
490 input.clk_id = SMU11_SYSPLL0_VCLK_ID;
491 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
492 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
495 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
500 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
501 smu->smu_table.boot_values.vclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
503 memset(&input, 0, sizeof(input));
504 input.clk_id = SMU11_SYSPLL0_DCLK_ID;
505 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
506 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
509 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
514 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
515 smu->smu_table.boot_values.dclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
520 static int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
522 struct smu_table_context *smu_table = &smu->smu_table;
523 struct smu_table *memory_pool = &smu_table->memory_pool;
526 uint32_t address_low, address_high;
528 if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
531 address = (uint64_t)memory_pool->cpu_addr;
532 address_high = (uint32_t)upper_32_bits(address);
533 address_low = (uint32_t)lower_32_bits(address);
535 ret = smu_send_smc_msg_with_param(smu,
536 SMU_MSG_SetSystemVirtualDramAddrHigh,
540 ret = smu_send_smc_msg_with_param(smu,
541 SMU_MSG_SetSystemVirtualDramAddrLow,
546 address = memory_pool->mc_address;
547 address_high = (uint32_t)upper_32_bits(address);
548 address_low = (uint32_t)lower_32_bits(address);
550 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
554 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
558 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
559 (uint32_t)memory_pool->size);
566 static int smu_v11_0_check_pptable(struct smu_context *smu)
570 ret = smu_check_powerplay_table(smu);
574 static int smu_v11_0_parse_pptable(struct smu_context *smu)
578 struct smu_table_context *table_context = &smu->smu_table;
580 if (table_context->driver_pptable)
583 table_context->driver_pptable = kzalloc(sizeof(PPTable_t), GFP_KERNEL);
585 if (!table_context->driver_pptable)
588 ret = smu_store_powerplay_table(smu);
592 ret = smu_append_powerplay_table(smu);
597 static int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
601 ret = smu_set_default_dpm_table(smu);
606 static int smu_v11_0_write_pptable(struct smu_context *smu)
608 struct smu_table_context *table_context = &smu->smu_table;
611 ret = smu_update_table(smu, TABLE_PPTABLE, table_context->driver_pptable, true);
616 static int smu_v11_0_write_watermarks_table(struct smu_context *smu)
618 return smu_update_table(smu, TABLE_WATERMARKS,
619 smu->smu_table.tables[TABLE_WATERMARKS].cpu_addr, true);
622 static int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
626 ret = smu_send_smc_msg_with_param(smu,
627 SMU_MSG_SetMinDeepSleepDcefclk, clk);
629 pr_err("SMU11 attempt to set divider for DCEFCLK Failed!");
634 static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
636 struct smu_table_context *table_context = &smu->smu_table;
641 return smu_set_deep_sleep_dcefclk(smu,
642 table_context->boot_values.dcefclk / 100);
645 static int smu_v11_0_set_tool_table_location(struct smu_context *smu)
648 struct smu_table *tool_table = &smu->smu_table.tables[TABLE_PMSTATUSLOG];
650 if (tool_table->mc_address) {
651 ret = smu_send_smc_msg_with_param(smu,
652 SMU_MSG_SetToolsDramAddrHigh,
653 upper_32_bits(tool_table->mc_address));
655 ret = smu_send_smc_msg_with_param(smu,
656 SMU_MSG_SetToolsDramAddrLow,
657 lower_32_bits(tool_table->mc_address));
663 static int smu_v11_0_init_display(struct smu_context *smu)
666 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0);
670 static int smu_v11_0_update_feature_enable_state(struct smu_context *smu, uint32_t feature_id, bool enabled)
672 uint32_t feature_low = 0, feature_high = 0;
675 if (feature_id >= 0 && feature_id < 31)
676 feature_low = (1 << feature_id);
677 else if (feature_id > 31 && feature_id < 63)
678 feature_high = (1 << feature_id);
683 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
687 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
693 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
697 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
707 static int smu_v11_0_set_allowed_mask(struct smu_context *smu)
709 struct smu_feature *feature = &smu->smu_feature;
711 uint32_t feature_mask[2];
713 mutex_lock(&feature->mutex);
714 if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
717 bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
719 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
724 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
730 mutex_unlock(&feature->mutex);
734 static int smu_v11_0_get_enabled_mask(struct smu_context *smu,
735 uint32_t *feature_mask, uint32_t num)
737 uint32_t feature_mask_high = 0, feature_mask_low = 0;
740 if (!feature_mask || num < 2)
743 ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
746 ret = smu_read_smc_arg(smu, &feature_mask_high);
750 ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
753 ret = smu_read_smc_arg(smu, &feature_mask_low);
757 feature_mask[0] = feature_mask_low;
758 feature_mask[1] = feature_mask_high;
763 static bool smu_v11_0_is_dpm_running(struct smu_context *smu)
766 uint32_t feature_mask[2];
767 unsigned long feature_enabled;
768 ret = smu_v11_0_get_enabled_mask(smu, feature_mask, 2);
769 feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
770 ((uint64_t)feature_mask[1] << 32));
771 return !!(feature_enabled & SMC_DPM_FEATURE);
774 static int smu_v11_0_enable_all_mask(struct smu_context *smu)
776 struct smu_feature *feature = &smu->smu_feature;
777 uint32_t feature_mask[2];
780 ret = smu_send_smc_msg(smu, SMU_MSG_EnableAllSmuFeatures);
783 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
787 bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
788 feature->feature_num);
789 bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
790 feature->feature_num);
795 static int smu_v11_0_disable_all_mask(struct smu_context *smu)
797 struct smu_feature *feature = &smu->smu_feature;
798 uint32_t feature_mask[2];
801 ret = smu_send_smc_msg(smu, SMU_MSG_DisableAllSmuFeatures);
804 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
808 bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
809 feature->feature_num);
810 bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
811 feature->feature_num);
816 static int smu_v11_0_notify_display_change(struct smu_context *smu)
820 if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT))
821 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1);
827 smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
828 PPCLK_e clock_select)
832 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
835 pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
839 ret = smu_read_smc_arg(smu, clock);
846 /* if DC limit is zero, return AC limit */
847 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
850 pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!");
854 ret = smu_read_smc_arg(smu, clock);
859 static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
861 struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks;
864 max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks),
866 smu->smu_table.max_sustainable_clocks = (void *)max_sustainable_clocks;
868 max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
869 max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100;
870 max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100;
871 max_sustainable_clocks->display_clock = 0xFFFFFFFF;
872 max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
873 max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
875 if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
876 ret = smu_v11_0_get_max_sustainable_clock(smu,
877 &(max_sustainable_clocks->uclock),
880 pr_err("[%s] failed to get max UCLK from SMC!",
886 if (smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT)) {
887 ret = smu_v11_0_get_max_sustainable_clock(smu,
888 &(max_sustainable_clocks->soc_clock),
891 pr_err("[%s] failed to get max SOCCLK from SMC!",
897 if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
898 ret = smu_v11_0_get_max_sustainable_clock(smu,
899 &(max_sustainable_clocks->dcef_clock),
902 pr_err("[%s] failed to get max DCEFCLK from SMC!",
907 ret = smu_v11_0_get_max_sustainable_clock(smu,
908 &(max_sustainable_clocks->display_clock),
911 pr_err("[%s] failed to get max DISPCLK from SMC!",
915 ret = smu_v11_0_get_max_sustainable_clock(smu,
916 &(max_sustainable_clocks->phy_clock),
919 pr_err("[%s] failed to get max PHYCLK from SMC!",
923 ret = smu_v11_0_get_max_sustainable_clock(smu,
924 &(max_sustainable_clocks->pixel_clock),
927 pr_err("[%s] failed to get max PIXCLK from SMC!",
933 if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
934 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
939 static int smu_v11_0_get_power_limit(struct smu_context *smu,
946 mutex_lock(&smu->mutex);
947 *limit = smu->default_power_limit;
948 mutex_unlock(&smu->mutex);
950 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
951 POWER_SOURCE_AC << 16);
953 pr_err("[%s] get PPT limit failed!", __func__);
956 smu_read_smc_arg(smu, limit);
957 smu->power_limit = *limit;
963 static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
967 if (smu_feature_is_enabled(smu, FEATURE_PPT_BIT))
968 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n);
970 pr_err("[%s] Set power limit Failed!", __func__);
977 static int smu_v11_0_get_current_clk_freq(struct smu_context *smu, uint32_t clk_id, uint32_t *value)
982 if (clk_id >= PPCLK_COUNT || !value)
985 ret = smu_send_smc_msg_with_param(smu,
986 SMU_MSG_GetDpmClockFreq, (clk_id << 16));
990 ret = smu_read_smc_arg(smu, &freq);
1000 static int smu_v11_0_get_thermal_range(struct smu_context *smu,
1001 struct PP_TemperatureRange *range)
1003 memcpy(range, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
1005 range->max = smu->smu_table.software_shutdown_temp *
1006 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1011 static int smu_v11_0_set_thermal_range(struct smu_context *smu,
1012 struct PP_TemperatureRange *range)
1014 struct amdgpu_device *adev = smu->adev;
1015 int low = SMU11_THERMAL_MINIMUM_ALERT_TEMP *
1016 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1017 int high = SMU11_THERMAL_MAXIMUM_ALERT_TEMP *
1018 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1021 if (low < range->min)
1023 if (high > range->max)
1029 val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
1030 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
1031 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
1032 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
1033 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
1034 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
1036 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
1041 static int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
1043 struct amdgpu_device *adev = smu->adev;
1046 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
1047 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
1048 val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
1050 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);
1055 static int smu_v11_0_set_thermal_fan_table(struct smu_context *smu)
1058 struct smu_table_context *table_context = &smu->smu_table;
1059 PPTable_t *pptable = table_context->driver_pptable;
1061 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetFanTemperatureTarget,
1062 (uint32_t)pptable->FanTargetTemperature);
1067 static int smu_v11_0_start_thermal_control(struct smu_context *smu)
1070 struct PP_TemperatureRange range;
1071 struct amdgpu_device *adev = smu->adev;
1073 smu_v11_0_get_thermal_range(smu, &range);
1075 if (smu->smu_table.thermal_controller_type) {
1076 ret = smu_v11_0_set_thermal_range(smu, &range);
1080 ret = smu_v11_0_enable_thermal_alert(smu);
1083 ret = smu_v11_0_set_thermal_fan_table(smu);
1088 adev->pm.dpm.thermal.min_temp = range.min;
1089 adev->pm.dpm.thermal.max_temp = range.max;
1094 static int smu_v11_0_get_current_activity_percent(struct smu_context *smu,
1098 SmuMetrics_t metrics;
1103 ret = smu_update_table(smu, TABLE_SMU_METRICS, (void *)&metrics, false);
1107 *value = metrics.AverageGfxActivity;
1112 static int smu_v11_0_thermal_get_temperature(struct smu_context *smu, uint32_t *value)
1114 struct amdgpu_device *adev = smu->adev;
1120 temp = RREG32_SOC15(THM, 0, mmCG_MULT_THERMAL_STATUS);
1121 temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
1122 CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
1124 temp = temp & 0x1ff;
1125 temp *= SMU11_TEMPERATURE_UNITS_PER_CENTIGRADES;
1132 static int smu_v11_0_get_gpu_power(struct smu_context *smu, uint32_t *value)
1135 SmuMetrics_t metrics;
1140 ret = smu_update_table(smu, TABLE_SMU_METRICS, (void *)&metrics, false);
1144 *value = metrics.CurrSocketPower << 8;
1149 static uint16_t convert_to_vddc(uint8_t vid)
1151 return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE);
1154 static int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
1156 struct amdgpu_device *adev = smu->adev;
1157 uint32_t vdd = 0, val_vid = 0;
1161 val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) &
1162 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
1163 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;
1165 vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid);
1173 static int smu_v11_0_read_sensor(struct smu_context *smu,
1174 enum amd_pp_sensors sensor,
1175 void *data, uint32_t *size)
1177 struct smu_table_context *table_context = &smu->smu_table;
1178 PPTable_t *pptable = table_context->driver_pptable;
1181 case AMDGPU_PP_SENSOR_GPU_LOAD:
1182 ret = smu_v11_0_get_current_activity_percent(smu,
1186 case AMDGPU_PP_SENSOR_GFX_MCLK:
1187 ret = smu_get_current_clk_freq(smu, PPCLK_UCLK, (uint32_t *)data);
1190 case AMDGPU_PP_SENSOR_GFX_SCLK:
1191 ret = smu_get_current_clk_freq(smu, PPCLK_GFXCLK, (uint32_t *)data);
1194 case AMDGPU_PP_SENSOR_GPU_TEMP:
1195 ret = smu_v11_0_thermal_get_temperature(smu, (uint32_t *)data);
1198 case AMDGPU_PP_SENSOR_GPU_POWER:
1199 ret = smu_v11_0_get_gpu_power(smu, (uint32_t *)data);
1202 case AMDGPU_PP_SENSOR_VDDGFX:
1203 ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data);
1206 case AMDGPU_PP_SENSOR_UVD_POWER:
1207 *(uint32_t *)data = smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT) ? 1 : 0;
1210 case AMDGPU_PP_SENSOR_VCE_POWER:
1211 *(uint32_t *)data = smu_feature_is_enabled(smu, FEATURE_DPM_VCE_BIT) ? 1 : 0;
1214 case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
1215 *(uint32_t *)data = 0;
1218 case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
1219 *(uint32_t *)data = pptable->FanMaximumRpm;
1223 ret = smu_common_read_sensor(smu, sensor, data, size);
1234 smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
1235 struct pp_display_clock_request
1238 enum amd_pp_clock_type clk_type = clock_req->clock_type;
1240 PPCLK_e clk_select = 0;
1241 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
1243 if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
1245 case amd_pp_dcef_clock:
1246 clk_select = PPCLK_DCEFCLK;
1248 case amd_pp_disp_clock:
1249 clk_select = PPCLK_DISPCLK;
1251 case amd_pp_pixel_clock:
1252 clk_select = PPCLK_PIXCLK;
1254 case amd_pp_phy_clock:
1255 clk_select = PPCLK_PHYCLK;
1258 pr_info("[%s] Invalid Clock Type!", __func__);
1266 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
1267 (clk_select << 16) | clk_freq);
1274 static int smu_v11_0_set_watermarks_table(struct smu_context *smu,
1275 Watermarks_t *table, struct
1276 dm_pp_wm_sets_with_clock_ranges_soc15
1281 if (!table || !clock_ranges)
1284 if (clock_ranges->num_wm_dmif_sets > 4 ||
1285 clock_ranges->num_wm_mcif_sets > 4)
1288 for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
1289 table->WatermarkRow[1][i].MinClock =
1290 cpu_to_le16((uint16_t)
1291 (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
1293 table->WatermarkRow[1][i].MaxClock =
1294 cpu_to_le16((uint16_t)
1295 (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
1297 table->WatermarkRow[1][i].MinUclk =
1298 cpu_to_le16((uint16_t)
1299 (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
1301 table->WatermarkRow[1][i].MaxUclk =
1302 cpu_to_le16((uint16_t)
1303 (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
1305 table->WatermarkRow[1][i].WmSetting = (uint8_t)
1306 clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
1309 for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
1310 table->WatermarkRow[0][i].MinClock =
1311 cpu_to_le16((uint16_t)
1312 (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
1314 table->WatermarkRow[0][i].MaxClock =
1315 cpu_to_le16((uint16_t)
1316 (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
1318 table->WatermarkRow[0][i].MinUclk =
1319 cpu_to_le16((uint16_t)
1320 (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
1322 table->WatermarkRow[0][i].MaxUclk =
1323 cpu_to_le16((uint16_t)
1324 (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
1326 table->WatermarkRow[0][i].WmSetting = (uint8_t)
1327 clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
1334 smu_v11_0_set_watermarks_for_clock_ranges(struct smu_context *smu, struct
1335 dm_pp_wm_sets_with_clock_ranges_soc15
1339 struct smu_table *watermarks = &smu->smu_table.tables[TABLE_WATERMARKS];
1340 Watermarks_t *table = watermarks->cpu_addr;
1342 if (!smu->disable_watermark &&
1343 smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT) &&
1344 smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT)) {
1345 smu_v11_0_set_watermarks_table(smu, table, clock_ranges);
1346 smu->watermarks_bitmap |= WATERMARKS_EXIST;
1347 smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
1353 static int smu_v11_0_get_clock_ranges(struct smu_context *smu,
1355 PPCLK_e clock_select,
1361 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
1362 (clock_select << 16));
1364 pr_err("[GetClockRanges] Failed to get max clock from SMC!\n");
1367 smu_read_smc_arg(smu, clock);
1369 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq,
1370 (clock_select << 16));
1372 pr_err("[GetClockRanges] Failed to get min clock from SMC!\n");
1375 smu_read_smc_arg(smu, clock);
1381 static uint32_t smu_v11_0_dpm_get_sclk(struct smu_context *smu, bool low)
1386 if (!smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT)) {
1387 pr_err("[GetSclks]: gfxclk dpm not enabled!\n");
1392 ret = smu_v11_0_get_clock_ranges(smu, &gfx_clk, PPCLK_GFXCLK, false);
1394 pr_err("[GetSclks]: fail to get min PPCLK_GFXCLK\n");
1398 ret = smu_v11_0_get_clock_ranges(smu, &gfx_clk, PPCLK_GFXCLK, true);
1400 pr_err("[GetSclks]: fail to get max PPCLK_GFXCLK\n");
1405 return (gfx_clk * 100);
1408 static uint32_t smu_v11_0_dpm_get_mclk(struct smu_context *smu, bool low)
1413 if (!smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
1414 pr_err("[GetMclks]: memclk dpm not enabled!\n");
1419 ret = smu_v11_0_get_clock_ranges(smu, &mem_clk, PPCLK_UCLK, false);
1421 pr_err("[GetMclks]: fail to get min PPCLK_UCLK\n");
1425 ret = smu_v11_0_get_clock_ranges(smu, &mem_clk, PPCLK_GFXCLK, true);
1427 pr_err("[GetMclks]: fail to get max PPCLK_UCLK\n");
1432 return (mem_clk * 100);
1435 static int smu_v11_0_set_od8_default_settings(struct smu_context *smu,
1438 struct smu_table_context *table_context = &smu->smu_table;
1442 if (table_context->overdrive_table)
1445 table_context->overdrive_table = kzalloc(sizeof(OverDriveTable_t), GFP_KERNEL);
1447 if (!table_context->overdrive_table)
1450 ret = smu_update_table(smu, TABLE_OVERDRIVE, table_context->overdrive_table, false);
1452 pr_err("Failed to export over drive table!\n");
1456 smu_set_default_od8_settings(smu);
1459 ret = smu_update_table(smu, TABLE_OVERDRIVE, table_context->overdrive_table, true);
1461 pr_err("Failed to import over drive table!\n");
1468 static int smu_v11_0_set_activity_monitor_coeff(struct smu_context *smu,
1469 uint8_t *table, uint16_t workload_type)
1472 memcpy(smu->smu_table.tables[TABLE_ACTIVITY_MONITOR_COEFF].cpu_addr,
1473 table, smu->smu_table.tables[TABLE_ACTIVITY_MONITOR_COEFF].size);
1474 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
1475 upper_32_bits(smu->smu_table.tables[TABLE_ACTIVITY_MONITOR_COEFF].mc_address));
1477 pr_err("[%s] Attempt to Set Dram Addr High Failed!", __func__);
1480 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
1481 lower_32_bits(smu->smu_table.tables[TABLE_ACTIVITY_MONITOR_COEFF].mc_address));
1483 pr_err("[%s] Attempt to Set Dram Addr Low Failed!", __func__);
1486 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_TransferTableSmu2Dram,
1487 TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16));
1489 pr_err("[%s] Attempt to Transfer Table From SMU Failed!", __func__);
1496 static int smu_v11_0_get_activity_monitor_coeff(struct smu_context *smu,
1497 uint8_t *table, uint16_t workload_type)
1500 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
1501 upper_32_bits(smu->smu_table.tables[TABLE_ACTIVITY_MONITOR_COEFF].mc_address));
1503 pr_err("[%s] Attempt to Set Dram Addr High Failed!", __func__);
1507 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
1508 lower_32_bits(smu->smu_table.tables[TABLE_ACTIVITY_MONITOR_COEFF].mc_address));
1510 pr_err("[%s] Attempt to Set Dram Addr Low Failed!", __func__);
1514 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_TransferTableSmu2Dram,
1515 TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16));
1517 pr_err("[%s] Attempt to Transfer Table From SMU Failed!", __func__);
1524 static int smu_v11_0_conv_power_profile_to_pplib_workload(int power_profile)
1526 int pplib_workload = 0;
1528 switch (power_profile) {
1529 case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT:
1530 pplib_workload = WORKLOAD_DEFAULT_BIT;
1532 case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
1533 pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
1535 case PP_SMC_POWER_PROFILE_POWERSAVING:
1536 pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT;
1538 case PP_SMC_POWER_PROFILE_VIDEO:
1539 pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
1541 case PP_SMC_POWER_PROFILE_VR:
1542 pplib_workload = WORKLOAD_PPLIB_VR_BIT;
1544 case PP_SMC_POWER_PROFILE_COMPUTE:
1545 pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
1547 case PP_SMC_POWER_PROFILE_CUSTOM:
1548 pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT;
1552 return pplib_workload;
1555 static int smu_v11_0_get_power_profile_mode(struct smu_context *smu, char *buf)
1557 DpmActivityMonitorCoeffInt_t activity_monitor;
1558 uint32_t i, size = 0;
1559 uint16_t workload_type = 0;
1560 static const char *profile_name[] = {
1568 static const char *title[] = {
1569 "PROFILE_INDEX(NAME)",
1573 "MinActiveFreqType",
1578 "PD_Data_error_coeff",
1579 "PD_Data_error_rate_coeff"};
1585 size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
1586 title[0], title[1], title[2], title[3], title[4], title[5],
1587 title[6], title[7], title[8], title[9], title[10]);
1589 for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
1590 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1591 workload_type = smu_v11_0_conv_power_profile_to_pplib_workload(i);
1592 result = smu_v11_0_get_activity_monitor_coeff(smu,
1593 (uint8_t *)(&activity_monitor),
1596 pr_err("[%s] Failed to get activity monitor!", __func__);
1600 size += sprintf(buf + size, "%2d %14s%s:\n",
1601 i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
1603 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1607 activity_monitor.Gfx_FPS,
1608 activity_monitor.Gfx_UseRlcBusy,
1609 activity_monitor.Gfx_MinActiveFreqType,
1610 activity_monitor.Gfx_MinActiveFreq,
1611 activity_monitor.Gfx_BoosterFreqType,
1612 activity_monitor.Gfx_BoosterFreq,
1613 activity_monitor.Gfx_PD_Data_limit_c,
1614 activity_monitor.Gfx_PD_Data_error_coeff,
1615 activity_monitor.Gfx_PD_Data_error_rate_coeff);
1617 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1621 activity_monitor.Soc_FPS,
1622 activity_monitor.Soc_UseRlcBusy,
1623 activity_monitor.Soc_MinActiveFreqType,
1624 activity_monitor.Soc_MinActiveFreq,
1625 activity_monitor.Soc_BoosterFreqType,
1626 activity_monitor.Soc_BoosterFreq,
1627 activity_monitor.Soc_PD_Data_limit_c,
1628 activity_monitor.Soc_PD_Data_error_coeff,
1629 activity_monitor.Soc_PD_Data_error_rate_coeff);
1631 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1635 activity_monitor.Mem_FPS,
1636 activity_monitor.Mem_UseRlcBusy,
1637 activity_monitor.Mem_MinActiveFreqType,
1638 activity_monitor.Mem_MinActiveFreq,
1639 activity_monitor.Mem_BoosterFreqType,
1640 activity_monitor.Mem_BoosterFreq,
1641 activity_monitor.Mem_PD_Data_limit_c,
1642 activity_monitor.Mem_PD_Data_error_coeff,
1643 activity_monitor.Mem_PD_Data_error_rate_coeff);
1645 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1649 activity_monitor.Fclk_FPS,
1650 activity_monitor.Fclk_UseRlcBusy,
1651 activity_monitor.Fclk_MinActiveFreqType,
1652 activity_monitor.Fclk_MinActiveFreq,
1653 activity_monitor.Fclk_BoosterFreqType,
1654 activity_monitor.Fclk_BoosterFreq,
1655 activity_monitor.Fclk_PD_Data_limit_c,
1656 activity_monitor.Fclk_PD_Data_error_coeff,
1657 activity_monitor.Fclk_PD_Data_error_rate_coeff);
1663 static int smu_v11_0_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
1665 DpmActivityMonitorCoeffInt_t activity_monitor;
1666 int workload_type, ret = 0;
1668 smu->power_profile_mode = input[size];
1670 if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
1671 pr_err("Invalid power profile mode %d\n", smu->power_profile_mode);
1675 if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1679 ret = smu_v11_0_get_activity_monitor_coeff(smu,
1680 (uint8_t *)(&activity_monitor),
1681 WORKLOAD_PPLIB_CUSTOM_BIT);
1683 pr_err("[%s] Failed to get activity monitor!", __func__);
1688 case 0: /* Gfxclk */
1689 activity_monitor.Gfx_FPS = input[1];
1690 activity_monitor.Gfx_UseRlcBusy = input[2];
1691 activity_monitor.Gfx_MinActiveFreqType = input[3];
1692 activity_monitor.Gfx_MinActiveFreq = input[4];
1693 activity_monitor.Gfx_BoosterFreqType = input[5];
1694 activity_monitor.Gfx_BoosterFreq = input[6];
1695 activity_monitor.Gfx_PD_Data_limit_c = input[7];
1696 activity_monitor.Gfx_PD_Data_error_coeff = input[8];
1697 activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
1699 case 1: /* Socclk */
1700 activity_monitor.Soc_FPS = input[1];
1701 activity_monitor.Soc_UseRlcBusy = input[2];
1702 activity_monitor.Soc_MinActiveFreqType = input[3];
1703 activity_monitor.Soc_MinActiveFreq = input[4];
1704 activity_monitor.Soc_BoosterFreqType = input[5];
1705 activity_monitor.Soc_BoosterFreq = input[6];
1706 activity_monitor.Soc_PD_Data_limit_c = input[7];
1707 activity_monitor.Soc_PD_Data_error_coeff = input[8];
1708 activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
1711 activity_monitor.Mem_FPS = input[1];
1712 activity_monitor.Mem_UseRlcBusy = input[2];
1713 activity_monitor.Mem_MinActiveFreqType = input[3];
1714 activity_monitor.Mem_MinActiveFreq = input[4];
1715 activity_monitor.Mem_BoosterFreqType = input[5];
1716 activity_monitor.Mem_BoosterFreq = input[6];
1717 activity_monitor.Mem_PD_Data_limit_c = input[7];
1718 activity_monitor.Mem_PD_Data_error_coeff = input[8];
1719 activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
1722 activity_monitor.Fclk_FPS = input[1];
1723 activity_monitor.Fclk_UseRlcBusy = input[2];
1724 activity_monitor.Fclk_MinActiveFreqType = input[3];
1725 activity_monitor.Fclk_MinActiveFreq = input[4];
1726 activity_monitor.Fclk_BoosterFreqType = input[5];
1727 activity_monitor.Fclk_BoosterFreq = input[6];
1728 activity_monitor.Fclk_PD_Data_limit_c = input[7];
1729 activity_monitor.Fclk_PD_Data_error_coeff = input[8];
1730 activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9];
1734 ret = smu_v11_0_set_activity_monitor_coeff(smu,
1735 (uint8_t *)(&activity_monitor),
1736 WORKLOAD_PPLIB_CUSTOM_BIT);
1738 pr_err("[%s] Failed to set activity monitor!", __func__);
1743 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1745 smu_v11_0_conv_power_profile_to_pplib_workload(smu->power_profile_mode);
1746 smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1747 1 << workload_type);
1752 static int smu_v11_0_update_od8_settings(struct smu_context *smu,
1756 struct smu_table_context *table_context = &smu->smu_table;
1759 ret = smu_update_table(smu, TABLE_OVERDRIVE,
1760 table_context->overdrive_table, false);
1762 pr_err("Failed to export over drive table!\n");
1766 smu_update_specified_od8_value(smu, index, value);
1768 ret = smu_update_table(smu, TABLE_OVERDRIVE,
1769 table_context->overdrive_table, true);
1771 pr_err("Failed to import over drive table!\n");
1778 static int smu_v11_0_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
1780 if (!smu_feature_is_supported(smu, FEATURE_DPM_VCE_BIT))
1783 if (enable == smu_feature_is_enabled(smu, FEATURE_DPM_VCE_BIT))
1786 return smu_feature_set_enabled(smu, FEATURE_DPM_VCE_BIT, enable);
1789 static int smu_v11_0_dpm_set_vce_enable(struct smu_context *smu, bool enable)
1791 if (!smu_feature_is_supported(smu, FEATURE_DPM_UVD_BIT))
1794 if (enable == smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT))
1797 return smu_feature_set_enabled(smu, FEATURE_DPM_UVD_BIT, enable);
1800 static int smu_v11_0_get_current_rpm(struct smu_context *smu,
1801 uint32_t *current_rpm)
1805 ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm);
1808 pr_err("Attempt to get current RPM from SMC Failed!\n");
1812 smu_read_smc_arg(smu, current_rpm);
1818 smu_v11_0_get_fan_control_mode(struct smu_context *smu)
1820 if (!smu_feature_is_enabled(smu, FEATURE_FAN_CONTROL_BIT))
1821 return AMD_FAN_CTRL_MANUAL;
1823 return AMD_FAN_CTRL_AUTO;
1827 smu_v11_0_get_fan_speed_percent(struct smu_context *smu,
1831 uint32_t percent = 0;
1832 uint32_t current_rpm;
1833 PPTable_t *pptable = smu->smu_table.driver_pptable;
1835 ret = smu_v11_0_get_current_rpm(smu, ¤t_rpm);
1836 percent = current_rpm * 100 / pptable->FanMaximumRpm;
1837 *speed = percent > 100 ? 100 : percent;
1843 smu_v11_0_smc_fan_control(struct smu_context *smu, bool start)
1847 if (smu_feature_is_supported(smu, FEATURE_FAN_CONTROL_BIT))
1850 ret = smu_feature_set_enabled(smu, FEATURE_FAN_CONTROL_BIT, start);
1852 pr_err("[%s]%s smc FAN CONTROL feature failed!",
1853 __func__, (start ? "Start" : "Stop"));
1859 smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
1861 struct amdgpu_device *adev = smu->adev;
1863 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
1864 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
1865 CG_FDO_CTRL2, TMIN, 0));
1866 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
1867 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
1868 CG_FDO_CTRL2, FDO_PWM_MODE, mode));
1874 smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
1876 struct amdgpu_device *adev = smu->adev;
1885 if (smu_v11_0_smc_fan_control(smu, stop))
1887 duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
1888 CG_FDO_CTRL1, FMAX_DUTY100);
1892 tmp64 = (uint64_t)speed * duty100;
1894 duty = (uint32_t)tmp64;
1896 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
1897 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
1898 CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
1900 return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
1904 smu_v11_0_set_fan_control_mode(struct smu_context *smu,
1912 case AMD_FAN_CTRL_NONE:
1913 ret = smu_v11_0_set_fan_speed_percent(smu, 100);
1915 case AMD_FAN_CTRL_MANUAL:
1916 ret = smu_v11_0_smc_fan_control(smu, stop);
1918 case AMD_FAN_CTRL_AUTO:
1919 ret = smu_v11_0_smc_fan_control(smu, start);
1926 pr_err("[%s]Set fan control mode failed!", __func__);
1933 static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
1936 struct amdgpu_device *adev = smu->adev;
1938 uint32_t tach_period, crystal_clock_freq;
1944 mutex_lock(&(smu->mutex));
1945 ret = smu_v11_0_smc_fan_control(smu, stop);
1947 goto set_fan_speed_rpm_failed;
1949 crystal_clock_freq = amdgpu_asic_get_xclk(adev);
1950 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
1951 WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
1952 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
1953 CG_TACH_CTRL, TARGET_PERIOD,
1956 ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
1958 set_fan_speed_rpm_failed:
1959 mutex_unlock(&(smu->mutex));
1963 static const struct smu_funcs smu_v11_0_funcs = {
1964 .init_microcode = smu_v11_0_init_microcode,
1965 .load_microcode = smu_v11_0_load_microcode,
1966 .check_fw_status = smu_v11_0_check_fw_status,
1967 .check_fw_version = smu_v11_0_check_fw_version,
1968 .send_smc_msg = smu_v11_0_send_msg,
1969 .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
1970 .read_smc_arg = smu_v11_0_read_arg,
1971 .read_pptable_from_vbios = smu_v11_0_read_pptable_from_vbios,
1972 .init_smc_tables = smu_v11_0_init_smc_tables,
1973 .fini_smc_tables = smu_v11_0_fini_smc_tables,
1974 .init_power = smu_v11_0_init_power,
1975 .fini_power = smu_v11_0_fini_power,
1976 .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
1977 .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
1978 .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
1979 .check_pptable = smu_v11_0_check_pptable,
1980 .parse_pptable = smu_v11_0_parse_pptable,
1981 .populate_smc_pptable = smu_v11_0_populate_smc_pptable,
1982 .write_pptable = smu_v11_0_write_pptable,
1983 .write_watermarks_table = smu_v11_0_write_watermarks_table,
1984 .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
1985 .set_tool_table_location = smu_v11_0_set_tool_table_location,
1986 .init_display = smu_v11_0_init_display,
1987 .set_allowed_mask = smu_v11_0_set_allowed_mask,
1988 .get_enabled_mask = smu_v11_0_get_enabled_mask,
1989 .is_dpm_running = smu_v11_0_is_dpm_running,
1990 .enable_all_mask = smu_v11_0_enable_all_mask,
1991 .disable_all_mask = smu_v11_0_disable_all_mask,
1992 .update_feature_enable_state = smu_v11_0_update_feature_enable_state,
1993 .notify_display_change = smu_v11_0_notify_display_change,
1994 .get_power_limit = smu_v11_0_get_power_limit,
1995 .set_power_limit = smu_v11_0_set_power_limit,
1996 .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
1997 .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
1998 .start_thermal_control = smu_v11_0_start_thermal_control,
1999 .read_sensor = smu_v11_0_read_sensor,
2000 .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
2001 .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
2002 .set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges,
2003 .get_sclk = smu_v11_0_dpm_get_sclk,
2004 .get_mclk = smu_v11_0_dpm_get_mclk,
2005 .set_od8_default_settings = smu_v11_0_set_od8_default_settings,
2006 .get_activity_monitor_coeff = smu_v11_0_get_activity_monitor_coeff,
2007 .set_activity_monitor_coeff = smu_v11_0_set_activity_monitor_coeff,
2008 .conv_power_profile_to_pplib_workload = smu_v11_0_conv_power_profile_to_pplib_workload,
2009 .get_power_profile_mode = smu_v11_0_get_power_profile_mode,
2010 .set_power_profile_mode = smu_v11_0_set_power_profile_mode,
2011 .update_od8_settings = smu_v11_0_update_od8_settings,
2012 .dpm_set_uvd_enable = smu_v11_0_dpm_set_uvd_enable,
2013 .dpm_set_vce_enable = smu_v11_0_dpm_set_vce_enable,
2014 .get_current_rpm = smu_v11_0_get_current_rpm,
2015 .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
2016 .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
2017 .get_fan_speed_percent = smu_v11_0_get_fan_speed_percent,
2018 .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
2019 .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
2022 void smu_v11_0_set_smu_funcs(struct smu_context *smu)
2024 struct amdgpu_device *adev = smu->adev;
2026 smu->funcs = &smu_v11_0_funcs;
2028 switch (adev->asic_type) {
2030 vega20_set_ppt_funcs(smu);
2033 pr_warn("Unknow asic for smu11\n");