2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <linux/module.h>
25 #include <linux/pci.h>
26 #include <linux/reboot.h>
28 #define SMU_11_0_PARTIAL_PPTABLE
29 #define SWSMU_CODE_LAYER_L3
32 #include "amdgpu_smu.h"
33 #include "atomfirmware.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "amdgpu_atombios.h"
36 #include "smu_v11_0.h"
37 #include "soc15_common.h"
39 #include "amdgpu_ras.h"
42 #include "asic_reg/thm/thm_11_0_2_offset.h"
43 #include "asic_reg/thm/thm_11_0_2_sh_mask.h"
44 #include "asic_reg/mp/mp_11_0_offset.h"
45 #include "asic_reg/mp/mp_11_0_sh_mask.h"
46 #include "asic_reg/smuio/smuio_11_0_0_offset.h"
47 #include "asic_reg/smuio/smuio_11_0_0_sh_mask.h"
50 * DO NOT use these for err/warn/info/debug messages.
51 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
52 * They are more MGPU friendly.
59 MODULE_FIRMWARE("amdgpu/arcturus_smc.bin");
60 MODULE_FIRMWARE("amdgpu/navi10_smc.bin");
61 MODULE_FIRMWARE("amdgpu/navi14_smc.bin");
62 MODULE_FIRMWARE("amdgpu/navi12_smc.bin");
63 MODULE_FIRMWARE("amdgpu/sienna_cichlid_smc.bin");
64 MODULE_FIRMWARE("amdgpu/navy_flounder_smc.bin");
65 MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_smc.bin");
67 #define SMU11_VOLTAGE_SCALE 4
69 #define SMU11_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
71 #define LINK_WIDTH_MAX 6
72 #define LINK_SPEED_MAX 3
74 #define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
75 #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L
76 #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4
77 #define smnPCIE_LC_SPEED_CNTL 0x11140290
78 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000
79 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE
81 static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
82 static int link_speed[] = {25, 50, 80, 160};
84 int smu_v11_0_init_microcode(struct smu_context *smu)
86 struct amdgpu_device *adev = smu->adev;
87 const char *chip_name;
88 char fw_name[SMU_FW_NAME_LEN];
90 const struct smc_firmware_header_v1_0 *hdr;
91 const struct common_firmware_header *header;
92 struct amdgpu_firmware_info *ucode = NULL;
94 switch (adev->asic_type) {
96 chip_name = "arcturus";
102 chip_name = "navi14";
105 chip_name = "navi12";
107 case CHIP_SIENNA_CICHLID:
108 chip_name = "sienna_cichlid";
110 case CHIP_NAVY_FLOUNDER:
111 chip_name = "navy_flounder";
113 case CHIP_DIMGREY_CAVEFISH:
114 chip_name = "dimgrey_cavefish";
117 dev_err(adev->dev, "Unsupported ASIC type %d\n", adev->asic_type);
121 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
123 err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
126 err = amdgpu_ucode_validate(adev->pm.fw);
130 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
131 amdgpu_ucode_print_smc_hdr(&hdr->header);
132 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
134 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
135 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
136 ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
137 ucode->fw = adev->pm.fw;
138 header = (const struct common_firmware_header *)ucode->fw->data;
139 adev->firmware.fw_size +=
140 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
145 DRM_ERROR("smu_v11_0: Failed to load firmware \"%s\"\n",
147 release_firmware(adev->pm.fw);
153 void smu_v11_0_fini_microcode(struct smu_context *smu)
155 struct amdgpu_device *adev = smu->adev;
157 release_firmware(adev->pm.fw);
159 adev->pm.fw_version = 0;
162 int smu_v11_0_load_microcode(struct smu_context *smu)
164 struct amdgpu_device *adev = smu->adev;
166 const struct smc_firmware_header_v1_0 *hdr;
167 uint32_t addr_start = MP1_SRAM;
169 uint32_t smc_fw_size;
170 uint32_t mp1_fw_flags;
172 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
173 src = (const uint32_t *)(adev->pm.fw->data +
174 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
175 smc_fw_size = hdr->header.ucode_size_bytes;
177 for (i = 1; i < smc_fw_size/4 - 1; i++) {
178 WREG32_PCIE(addr_start, src[i]);
182 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
183 1 & MP1_SMN_PUB_CTRL__RESET_MASK);
184 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
185 1 & ~MP1_SMN_PUB_CTRL__RESET_MASK);
187 for (i = 0; i < adev->usec_timeout; i++) {
188 mp1_fw_flags = RREG32_PCIE(MP1_Public |
189 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
190 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
191 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
196 if (i == adev->usec_timeout)
202 int smu_v11_0_check_fw_status(struct smu_context *smu)
204 struct amdgpu_device *adev = smu->adev;
205 uint32_t mp1_fw_flags;
207 mp1_fw_flags = RREG32_PCIE(MP1_Public |
208 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
210 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
211 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
217 int smu_v11_0_check_fw_version(struct smu_context *smu)
219 struct amdgpu_device *adev = smu->adev;
220 uint32_t if_version = 0xff, smu_version = 0xff;
222 uint8_t smu_minor, smu_debug;
225 ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
229 smu_major = (smu_version >> 16) & 0xffff;
230 smu_minor = (smu_version >> 8) & 0xff;
231 smu_debug = (smu_version >> 0) & 0xff;
233 adev->pm.fw_version = smu_version;
235 switch (smu->adev->asic_type) {
237 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
240 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10;
243 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV12;
246 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV14;
248 case CHIP_SIENNA_CICHLID:
249 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Sienna_Cichlid;
251 case CHIP_NAVY_FLOUNDER:
252 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Navy_Flounder;
255 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VANGOGH;
257 case CHIP_DIMGREY_CAVEFISH:
258 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish;
261 dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type);
262 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV;
267 * 1. if_version mismatch is not critical as our fw is designed
268 * to be backward compatible.
269 * 2. New fw usually brings some optimizations. But that's visible
270 * only on the paired driver.
271 * Considering above, we just leave user a warning message instead
272 * of halt driver loading.
274 if (if_version != smu->smc_driver_if_version) {
275 dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
276 "smu fw version = 0x%08x (%d.%d.%d)\n",
277 smu->smc_driver_if_version, if_version,
278 smu_version, smu_major, smu_minor, smu_debug);
279 dev_warn(smu->adev->dev, "SMU driver if version not matched\n");
285 static int smu_v11_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
287 struct amdgpu_device *adev = smu->adev;
288 uint32_t ppt_offset_bytes;
289 const struct smc_firmware_header_v2_0 *v2;
291 v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data;
293 ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes);
294 *size = le32_to_cpu(v2->ppt_size_bytes);
295 *table = (uint8_t *)v2 + ppt_offset_bytes;
300 static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table,
301 uint32_t *size, uint32_t pptable_id)
303 struct amdgpu_device *adev = smu->adev;
304 const struct smc_firmware_header_v2_1 *v2_1;
305 struct smc_soft_pptable_entry *entries;
306 uint32_t pptable_count = 0;
309 v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data;
310 entries = (struct smc_soft_pptable_entry *)
311 ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset));
312 pptable_count = le32_to_cpu(v2_1->pptable_count);
313 for (i = 0; i < pptable_count; i++) {
314 if (le32_to_cpu(entries[i].id) == pptable_id) {
315 *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes));
316 *size = le32_to_cpu(entries[i].ppt_size_bytes);
321 if (i == pptable_count)
327 int smu_v11_0_setup_pptable(struct smu_context *smu)
329 struct amdgpu_device *adev = smu->adev;
330 const struct smc_firmware_header_v1_0 *hdr;
333 uint16_t atom_table_size;
336 uint16_t version_major, version_minor;
338 if (!amdgpu_sriov_vf(adev)) {
339 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
340 version_major = le16_to_cpu(hdr->header.header_version_major);
341 version_minor = le16_to_cpu(hdr->header.header_version_minor);
342 if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) {
343 dev_info(adev->dev, "use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id);
344 switch (version_minor) {
346 ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size);
349 ret = smu_v11_0_set_pptable_v2_1(smu, &table, &size,
350 smu->smu_table.boot_values.pp_table_id);
362 dev_info(adev->dev, "use vbios provided pptable\n");
363 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
366 ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev,
370 size = atom_table_size;
373 if (!smu->smu_table.power_play_table)
374 smu->smu_table.power_play_table = table;
375 if (!smu->smu_table.power_play_table_size)
376 smu->smu_table.power_play_table_size = size;
381 int smu_v11_0_init_smc_tables(struct smu_context *smu)
383 struct smu_table_context *smu_table = &smu->smu_table;
384 struct smu_table *tables = smu_table->tables;
387 smu_table->driver_pptable =
388 kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL);
389 if (!smu_table->driver_pptable) {
394 smu_table->max_sustainable_clocks =
395 kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks), GFP_KERNEL);
396 if (!smu_table->max_sustainable_clocks) {
401 /* Arcturus does not support OVERDRIVE */
402 if (tables[SMU_TABLE_OVERDRIVE].size) {
403 smu_table->overdrive_table =
404 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
405 if (!smu_table->overdrive_table) {
410 smu_table->boot_overdrive_table =
411 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
412 if (!smu_table->boot_overdrive_table) {
421 kfree(smu_table->overdrive_table);
423 kfree(smu_table->max_sustainable_clocks);
425 kfree(smu_table->driver_pptable);
430 int smu_v11_0_fini_smc_tables(struct smu_context *smu)
432 struct smu_table_context *smu_table = &smu->smu_table;
433 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
435 kfree(smu_table->gpu_metrics_table);
436 kfree(smu_table->boot_overdrive_table);
437 kfree(smu_table->overdrive_table);
438 kfree(smu_table->max_sustainable_clocks);
439 kfree(smu_table->driver_pptable);
440 kfree(smu_table->clocks_table);
441 smu_table->gpu_metrics_table = NULL;
442 smu_table->boot_overdrive_table = NULL;
443 smu_table->overdrive_table = NULL;
444 smu_table->max_sustainable_clocks = NULL;
445 smu_table->driver_pptable = NULL;
446 smu_table->clocks_table = NULL;
447 kfree(smu_table->hardcode_pptable);
448 smu_table->hardcode_pptable = NULL;
450 kfree(smu_table->metrics_table);
451 kfree(smu_table->watermarks_table);
452 smu_table->metrics_table = NULL;
453 smu_table->watermarks_table = NULL;
454 smu_table->metrics_time = 0;
456 kfree(smu_dpm->dpm_context);
457 kfree(smu_dpm->golden_dpm_context);
458 kfree(smu_dpm->dpm_current_power_state);
459 kfree(smu_dpm->dpm_request_power_state);
460 smu_dpm->dpm_context = NULL;
461 smu_dpm->golden_dpm_context = NULL;
462 smu_dpm->dpm_context_size = 0;
463 smu_dpm->dpm_current_power_state = NULL;
464 smu_dpm->dpm_request_power_state = NULL;
469 int smu_v11_0_init_power(struct smu_context *smu)
471 struct smu_power_context *smu_power = &smu->smu_power;
473 smu_power->power_context = kzalloc(sizeof(struct smu_11_0_power_context),
475 if (!smu_power->power_context)
477 smu_power->power_context_size = sizeof(struct smu_11_0_power_context);
482 int smu_v11_0_fini_power(struct smu_context *smu)
484 struct smu_power_context *smu_power = &smu->smu_power;
486 kfree(smu_power->power_context);
487 smu_power->power_context = NULL;
488 smu_power->power_context_size = 0;
493 static int smu_v11_0_atom_get_smu_clockinfo(struct amdgpu_device *adev,
498 struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
499 struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
502 input.clk_id = clk_id;
503 input.syspll_id = syspll_id;
504 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
505 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
508 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
513 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
514 *clk_freq = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
519 int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
524 struct atom_common_table_header *header;
525 struct atom_firmware_info_v3_3 *v_3_3;
526 struct atom_firmware_info_v3_1 *v_3_1;
528 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
531 ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
532 (uint8_t **)&header);
536 if (header->format_revision != 3) {
537 dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu11\n");
541 switch (header->content_revision) {
545 v_3_1 = (struct atom_firmware_info_v3_1 *)header;
546 smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
547 smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
548 smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
549 smu->smu_table.boot_values.socclk = 0;
550 smu->smu_table.boot_values.dcefclk = 0;
551 smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
552 smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
553 smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
554 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
555 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
556 smu->smu_table.boot_values.pp_table_id = 0;
557 smu->smu_table.boot_values.firmware_caps = v_3_1->firmware_capability;
561 v_3_3 = (struct atom_firmware_info_v3_3 *)header;
562 smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
563 smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
564 smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
565 smu->smu_table.boot_values.socclk = 0;
566 smu->smu_table.boot_values.dcefclk = 0;
567 smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
568 smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
569 smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
570 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
571 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
572 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
573 smu->smu_table.boot_values.firmware_caps = v_3_3->firmware_capability;
576 smu->smu_table.boot_values.format_revision = header->format_revision;
577 smu->smu_table.boot_values.content_revision = header->content_revision;
579 smu_v11_0_atom_get_smu_clockinfo(smu->adev,
580 (uint8_t)SMU11_SYSPLL0_SOCCLK_ID,
582 &smu->smu_table.boot_values.socclk);
584 smu_v11_0_atom_get_smu_clockinfo(smu->adev,
585 (uint8_t)SMU11_SYSPLL0_DCEFCLK_ID,
587 &smu->smu_table.boot_values.dcefclk);
589 smu_v11_0_atom_get_smu_clockinfo(smu->adev,
590 (uint8_t)SMU11_SYSPLL0_ECLK_ID,
592 &smu->smu_table.boot_values.eclk);
594 smu_v11_0_atom_get_smu_clockinfo(smu->adev,
595 (uint8_t)SMU11_SYSPLL0_VCLK_ID,
597 &smu->smu_table.boot_values.vclk);
599 smu_v11_0_atom_get_smu_clockinfo(smu->adev,
600 (uint8_t)SMU11_SYSPLL0_DCLK_ID,
602 &smu->smu_table.boot_values.dclk);
604 if ((smu->smu_table.boot_values.format_revision == 3) &&
605 (smu->smu_table.boot_values.content_revision >= 2))
606 smu_v11_0_atom_get_smu_clockinfo(smu->adev,
607 (uint8_t)SMU11_SYSPLL1_0_FCLK_ID,
608 (uint8_t)SMU11_SYSPLL1_2_ID,
609 &smu->smu_table.boot_values.fclk);
611 smu_v11_0_atom_get_smu_clockinfo(smu->adev,
612 (uint8_t)SMU11_SYSPLL3_1_LCLK_ID,
613 (uint8_t)SMU11_SYSPLL3_1_ID,
614 &smu->smu_table.boot_values.lclk);
619 int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
621 struct smu_table_context *smu_table = &smu->smu_table;
622 struct smu_table *memory_pool = &smu_table->memory_pool;
625 uint32_t address_low, address_high;
627 if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
630 address = (uintptr_t)memory_pool->cpu_addr;
631 address_high = (uint32_t)upper_32_bits(address);
632 address_low = (uint32_t)lower_32_bits(address);
634 ret = smu_cmn_send_smc_msg_with_param(smu,
635 SMU_MSG_SetSystemVirtualDramAddrHigh,
640 ret = smu_cmn_send_smc_msg_with_param(smu,
641 SMU_MSG_SetSystemVirtualDramAddrLow,
647 address = memory_pool->mc_address;
648 address_high = (uint32_t)upper_32_bits(address);
649 address_low = (uint32_t)lower_32_bits(address);
651 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
655 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
659 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
660 (uint32_t)memory_pool->size, NULL);
667 int smu_v11_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
671 ret = smu_cmn_send_smc_msg_with_param(smu,
672 SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL);
674 dev_err(smu->adev->dev, "SMU11 attempt to set divider for DCEFCLK Failed!");
679 int smu_v11_0_set_driver_table_location(struct smu_context *smu)
681 struct smu_table *driver_table = &smu->smu_table.driver_table;
684 if (driver_table->mc_address) {
685 ret = smu_cmn_send_smc_msg_with_param(smu,
686 SMU_MSG_SetDriverDramAddrHigh,
687 upper_32_bits(driver_table->mc_address),
690 ret = smu_cmn_send_smc_msg_with_param(smu,
691 SMU_MSG_SetDriverDramAddrLow,
692 lower_32_bits(driver_table->mc_address),
699 int smu_v11_0_set_tool_table_location(struct smu_context *smu)
702 struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
704 if (tool_table->mc_address) {
705 ret = smu_cmn_send_smc_msg_with_param(smu,
706 SMU_MSG_SetToolsDramAddrHigh,
707 upper_32_bits(tool_table->mc_address),
710 ret = smu_cmn_send_smc_msg_with_param(smu,
711 SMU_MSG_SetToolsDramAddrLow,
712 lower_32_bits(tool_table->mc_address),
719 int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
721 struct amdgpu_device *adev = smu->adev;
723 /* Navy_Flounder/Dimgrey_Cavefish do not support to change
724 * display num currently
726 if (adev->asic_type >= CHIP_NAVY_FLOUNDER &&
727 adev->asic_type <= CHIP_DIMGREY_CAVEFISH)
730 return smu_cmn_send_smc_msg_with_param(smu,
731 SMU_MSG_NumOfDisplays,
737 int smu_v11_0_set_allowed_mask(struct smu_context *smu)
739 struct smu_feature *feature = &smu->smu_feature;
741 uint32_t feature_mask[2];
743 if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
746 bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
748 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
749 feature_mask[1], NULL);
753 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
754 feature_mask[0], NULL);
762 int smu_v11_0_system_features_control(struct smu_context *smu,
765 struct smu_feature *feature = &smu->smu_feature;
766 uint32_t feature_mask[2];
769 ret = smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
770 SMU_MSG_DisableAllSmuFeatures), NULL);
774 bitmap_zero(feature->enabled, feature->feature_num);
775 bitmap_zero(feature->supported, feature->feature_num);
778 ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
782 bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
783 feature->feature_num);
784 bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
785 feature->feature_num);
791 int smu_v11_0_notify_display_change(struct smu_context *smu)
795 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
796 smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
797 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
803 smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
804 enum smu_clk_type clock_select)
809 if ((smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
810 (smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetMaxDpmFreq) < 0))
813 clk_id = smu_cmn_to_asic_specific_index(smu,
814 CMN2ASIC_MAPPING_CLK,
819 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
820 clk_id << 16, clock);
822 dev_err(smu->adev->dev, "[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
829 /* if DC limit is zero, return AC limit */
830 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
831 clk_id << 16, clock);
833 dev_err(smu->adev->dev, "[GetMaxSustainableClock] failed to get max AC clock from SMC!");
840 int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
842 struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks =
843 smu->smu_table.max_sustainable_clocks;
846 max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
847 max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100;
848 max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100;
849 max_sustainable_clocks->display_clock = 0xFFFFFFFF;
850 max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
851 max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
853 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
854 ret = smu_v11_0_get_max_sustainable_clock(smu,
855 &(max_sustainable_clocks->uclock),
858 dev_err(smu->adev->dev, "[%s] failed to get max UCLK from SMC!",
864 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
865 ret = smu_v11_0_get_max_sustainable_clock(smu,
866 &(max_sustainable_clocks->soc_clock),
869 dev_err(smu->adev->dev, "[%s] failed to get max SOCCLK from SMC!",
875 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
876 ret = smu_v11_0_get_max_sustainable_clock(smu,
877 &(max_sustainable_clocks->dcef_clock),
880 dev_err(smu->adev->dev, "[%s] failed to get max DCEFCLK from SMC!",
885 ret = smu_v11_0_get_max_sustainable_clock(smu,
886 &(max_sustainable_clocks->display_clock),
889 dev_err(smu->adev->dev, "[%s] failed to get max DISPCLK from SMC!",
893 ret = smu_v11_0_get_max_sustainable_clock(smu,
894 &(max_sustainable_clocks->phy_clock),
897 dev_err(smu->adev->dev, "[%s] failed to get max PHYCLK from SMC!",
901 ret = smu_v11_0_get_max_sustainable_clock(smu,
902 &(max_sustainable_clocks->pixel_clock),
905 dev_err(smu->adev->dev, "[%s] failed to get max PIXCLK from SMC!",
911 if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
912 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
917 int smu_v11_0_get_current_power_limit(struct smu_context *smu,
918 uint32_t *power_limit)
923 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
926 power_src = smu_cmn_to_asic_specific_index(smu,
927 CMN2ASIC_MAPPING_PWR,
928 smu->adev->pm.ac_power ?
929 SMU_POWER_SOURCE_AC :
930 SMU_POWER_SOURCE_DC);
935 * BIT 24-31: ControllerId (only PPT0 is supported for now)
936 * BIT 16-23: PowerSource
938 ret = smu_cmn_send_smc_msg_with_param(smu,
940 (0 << 24) | (power_src << 16),
943 dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__);
948 int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
953 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
954 dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
958 power_src = smu_cmn_to_asic_specific_index(smu,
959 CMN2ASIC_MAPPING_PWR,
960 smu->adev->pm.ac_power ?
961 SMU_POWER_SOURCE_AC :
962 SMU_POWER_SOURCE_DC);
967 * BIT 24-31: ControllerId (only PPT0 is supported for now)
968 * BIT 16-23: PowerSource
969 * BIT 0-15: PowerLimit
973 n |= (power_src) << 16;
974 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL);
976 dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__);
980 smu->current_power_limit = n;
985 static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu)
987 return smu_cmn_send_smc_msg(smu,
988 SMU_MSG_ReenableAcDcInterrupt,
992 static int smu_v11_0_process_pending_interrupt(struct smu_context *smu)
996 if (smu->dc_controlled_by_gpio &&
997 smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT))
998 ret = smu_v11_0_ack_ac_dc_interrupt(smu);
1003 void smu_v11_0_interrupt_work(struct smu_context *smu)
1005 if (smu_v11_0_ack_ac_dc_interrupt(smu))
1006 dev_err(smu->adev->dev, "Ack AC/DC interrupt Failed!\n");
1009 int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
1013 if (smu->smu_table.thermal_controller_type) {
1014 ret = amdgpu_irq_get(smu->adev, &smu->irq_source, 0);
1020 * After init there might have been missed interrupts triggered
1021 * before driver registers for interrupt (Ex. AC/DC).
1023 return smu_v11_0_process_pending_interrupt(smu);
1026 int smu_v11_0_disable_thermal_alert(struct smu_context *smu)
1028 return amdgpu_irq_put(smu->adev, &smu->irq_source, 0);
1031 static uint16_t convert_to_vddc(uint8_t vid)
1033 return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE);
1036 int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
1038 struct amdgpu_device *adev = smu->adev;
1039 uint32_t vdd = 0, val_vid = 0;
1043 val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) &
1044 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
1045 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;
1047 vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid);
1056 smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
1057 struct pp_display_clock_request
1060 enum amd_pp_clock_type clk_type = clock_req->clock_type;
1062 enum smu_clk_type clk_select = 0;
1063 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
1065 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
1066 smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1068 case amd_pp_dcef_clock:
1069 clk_select = SMU_DCEFCLK;
1071 case amd_pp_disp_clock:
1072 clk_select = SMU_DISPCLK;
1074 case amd_pp_pixel_clock:
1075 clk_select = SMU_PIXCLK;
1077 case amd_pp_phy_clock:
1078 clk_select = SMU_PHYCLK;
1080 case amd_pp_mem_clock:
1081 clk_select = SMU_UCLK;
1084 dev_info(smu->adev->dev, "[%s] Invalid Clock Type!", __func__);
1092 if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
1095 ret = smu_v11_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0);
1097 if(clk_select == SMU_UCLK)
1098 smu->hard_min_uclk_req_from_dal = clk_freq;
1105 int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
1108 struct amdgpu_device *adev = smu->adev;
1110 switch (adev->asic_type) {
1114 case CHIP_SIENNA_CICHLID:
1115 case CHIP_NAVY_FLOUNDER:
1116 case CHIP_DIMGREY_CAVEFISH:
1117 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
1120 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
1122 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
1132 smu_v11_0_get_fan_control_mode(struct smu_context *smu)
1134 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1135 return AMD_FAN_CTRL_MANUAL;
1137 return AMD_FAN_CTRL_AUTO;
1141 smu_v11_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control)
1145 if (!smu_cmn_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1148 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control);
1150 dev_err(smu->adev->dev, "[%s]%s smc FAN CONTROL feature failed!",
1151 __func__, (auto_fan_control ? "Start" : "Stop"));
1157 smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
1159 struct amdgpu_device *adev = smu->adev;
1161 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
1162 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
1163 CG_FDO_CTRL2, TMIN, 0));
1164 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
1165 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
1166 CG_FDO_CTRL2, FDO_PWM_MODE, mode));
1172 smu_v11_0_set_fan_control_mode(struct smu_context *smu,
1178 case AMD_FAN_CTRL_NONE:
1179 ret = smu_v11_0_set_fan_speed_rpm(smu, smu->fan_max_rpm);
1181 case AMD_FAN_CTRL_MANUAL:
1182 ret = smu_v11_0_auto_fan_control(smu, 0);
1184 case AMD_FAN_CTRL_AUTO:
1185 ret = smu_v11_0_auto_fan_control(smu, 1);
1192 dev_err(smu->adev->dev, "[%s]Set fan control mode failed!", __func__);
1199 int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
1202 struct amdgpu_device *adev = smu->adev;
1204 uint32_t tach_period, crystal_clock_freq;
1209 ret = smu_v11_0_auto_fan_control(smu, 0);
1214 * crystal_clock_freq div by 4 is required since the fan control
1215 * module refers to 25MHz
1218 crystal_clock_freq = amdgpu_asic_get_xclk(adev) / 4;
1219 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
1220 WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
1221 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
1222 CG_TACH_CTRL, TARGET_PERIOD,
1225 ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
1230 int smu_v11_0_get_fan_speed_rpm(struct smu_context *smu,
1233 struct amdgpu_device *adev = smu->adev;
1234 uint32_t tach_period, crystal_clock_freq;
1237 tach_period = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
1238 CG_TACH_CTRL, TARGET_PERIOD);
1242 crystal_clock_freq = amdgpu_asic_get_xclk(adev);
1244 tmp64 = (uint64_t)crystal_clock_freq * 60 * 10000;
1245 do_div(tmp64, (tach_period * 8));
1246 *speed = (uint32_t)tmp64;
1251 int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
1254 return smu_cmn_send_smc_msg_with_param(smu,
1255 SMU_MSG_SetXgmiMode,
1256 pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
1260 static int smu_v11_0_set_irq_state(struct amdgpu_device *adev,
1261 struct amdgpu_irq_src *source,
1263 enum amdgpu_interrupt_state state)
1265 struct smu_context *smu = &adev->smu;
1270 case AMDGPU_IRQ_STATE_DISABLE:
1272 val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
1273 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 1);
1274 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 1);
1275 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
1277 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0);
1279 /* For MP1 SW irqs */
1280 val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL);
1281 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
1282 WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, val);
1285 case AMDGPU_IRQ_STATE_ENABLE:
1287 low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP,
1288 smu->thermal_range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
1289 high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP,
1290 smu->thermal_range.software_shutdown_temp);
1292 val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
1293 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
1294 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
1295 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
1296 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
1297 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff));
1298 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff));
1299 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
1300 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
1302 val = (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
1303 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
1304 val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
1305 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);
1307 /* For MP1 SW irqs */
1308 val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT);
1309 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
1310 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
1311 WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT, val);
1313 val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL);
1314 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
1315 WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, val);
1325 #define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
1326 #define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
1328 #define SMUIO_11_0__SRCID__SMUIO_GPIO19 83
1330 static int smu_v11_0_irq_process(struct amdgpu_device *adev,
1331 struct amdgpu_irq_src *source,
1332 struct amdgpu_iv_entry *entry)
1334 struct smu_context *smu = &adev->smu;
1335 uint32_t client_id = entry->client_id;
1336 uint32_t src_id = entry->src_id;
1338 * ctxid is used to distinguish different
1339 * events for SMCToHost interrupt.
1341 uint32_t ctxid = entry->src_data[0];
1344 if (client_id == SOC15_IH_CLIENTID_THM) {
1346 case THM_11_0__SRCID__THM_DIG_THERM_L2H:
1347 dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
1349 * SW CTF just occurred.
1350 * Try to do a graceful shutdown to prevent further damage.
1352 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
1353 orderly_poweroff(true);
1355 case THM_11_0__SRCID__THM_DIG_THERM_H2L:
1356 dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
1359 dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n",
1363 } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
1364 dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
1366 * HW CTF just occurred. Shutdown to prevent further damage.
1368 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
1369 orderly_poweroff(true);
1370 } else if (client_id == SOC15_IH_CLIENTID_MP1) {
1371 if (src_id == 0xfe) {
1372 /* ACK SMUToHost interrupt */
1373 data = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL);
1374 data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
1375 WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, data);
1379 dev_dbg(adev->dev, "Switched to AC mode!\n");
1380 schedule_work(&smu->interrupt_work);
1383 dev_dbg(adev->dev, "Switched to DC mode!\n");
1384 schedule_work(&smu->interrupt_work);
1388 * Increment the throttle interrupt counter
1390 atomic64_inc(&smu->throttle_int_counter);
1392 if (!atomic_read(&adev->throttling_logging_enabled))
1395 if (__ratelimit(&adev->throttling_logging_rs))
1396 schedule_work(&smu->throttling_logging_work);
1406 static const struct amdgpu_irq_src_funcs smu_v11_0_irq_funcs =
1408 .set = smu_v11_0_set_irq_state,
1409 .process = smu_v11_0_irq_process,
1412 int smu_v11_0_register_irq_handler(struct smu_context *smu)
1414 struct amdgpu_device *adev = smu->adev;
1415 struct amdgpu_irq_src *irq_src = &smu->irq_source;
1418 irq_src->num_types = 1;
1419 irq_src->funcs = &smu_v11_0_irq_funcs;
1421 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
1422 THM_11_0__SRCID__THM_DIG_THERM_L2H,
1427 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
1428 THM_11_0__SRCID__THM_DIG_THERM_H2L,
1433 /* Register CTF(GPIO_19) interrupt */
1434 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_ROM_SMUIO,
1435 SMUIO_11_0__SRCID__SMUIO_GPIO19,
1440 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
1449 int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
1450 struct pp_smu_nv_clock_table *max_clocks)
1452 struct smu_table_context *table_context = &smu->smu_table;
1453 struct smu_11_0_max_sustainable_clocks *sustainable_clocks = NULL;
1455 if (!max_clocks || !table_context->max_sustainable_clocks)
1458 sustainable_clocks = table_context->max_sustainable_clocks;
1460 max_clocks->dcfClockInKhz =
1461 (unsigned int) sustainable_clocks->dcef_clock * 1000;
1462 max_clocks->displayClockInKhz =
1463 (unsigned int) sustainable_clocks->display_clock * 1000;
1464 max_clocks->phyClockInKhz =
1465 (unsigned int) sustainable_clocks->phy_clock * 1000;
1466 max_clocks->pixelClockInKhz =
1467 (unsigned int) sustainable_clocks->pixel_clock * 1000;
1468 max_clocks->uClockInKhz =
1469 (unsigned int) sustainable_clocks->uclock * 1000;
1470 max_clocks->socClockInKhz =
1471 (unsigned int) sustainable_clocks->soc_clock * 1000;
1472 max_clocks->dscClockInKhz = 0;
1473 max_clocks->dppClockInKhz = 0;
1474 max_clocks->fabricClockInKhz = 0;
1479 int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
1481 return smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
1484 static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq)
1486 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL);
1489 bool smu_v11_0_baco_is_support(struct smu_context *smu)
1491 struct smu_baco_context *smu_baco = &smu->smu_baco;
1493 if (!smu_baco->platform_support)
1496 /* Arcturus does not support this bit mask */
1497 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
1498 !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
1504 enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
1506 struct smu_baco_context *smu_baco = &smu->smu_baco;
1507 enum smu_baco_state baco_state;
1509 mutex_lock(&smu_baco->mutex);
1510 baco_state = smu_baco->state;
1511 mutex_unlock(&smu_baco->mutex);
1516 #define D3HOT_BACO_SEQUENCE 0
1517 #define D3HOT_BAMACO_SEQUENCE 2
1519 int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
1521 struct smu_baco_context *smu_baco = &smu->smu_baco;
1522 struct amdgpu_device *adev = smu->adev;
1523 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1527 if (smu_v11_0_baco_get_state(smu) == state)
1530 mutex_lock(&smu_baco->mutex);
1532 if (state == SMU_BACO_STATE_ENTER) {
1533 switch (adev->asic_type) {
1534 case CHIP_SIENNA_CICHLID:
1535 case CHIP_NAVY_FLOUNDER:
1536 case CHIP_DIMGREY_CAVEFISH:
1537 if (amdgpu_runtime_pm == 2)
1538 ret = smu_cmn_send_smc_msg_with_param(smu,
1540 D3HOT_BAMACO_SEQUENCE,
1543 ret = smu_cmn_send_smc_msg_with_param(smu,
1545 D3HOT_BACO_SEQUENCE,
1549 if (!ras || !ras->supported) {
1550 data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
1552 WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
1554 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL);
1556 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1, NULL);
1562 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL);
1566 /* clear vbios scratch 6 and 7 for coming asic reinit */
1567 WREG32(adev->bios_scratch_reg_offset + 6, 0);
1568 WREG32(adev->bios_scratch_reg_offset + 7, 0);
1573 smu_baco->state = state;
1575 mutex_unlock(&smu_baco->mutex);
1579 int smu_v11_0_baco_enter(struct smu_context *smu)
1581 struct amdgpu_device *adev = smu->adev;
1584 /* Arcturus does not need this audio workaround */
1585 if (adev->asic_type != CHIP_ARCTURUS) {
1586 ret = smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
1591 ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_ENTER);
1600 int smu_v11_0_baco_exit(struct smu_context *smu)
1602 return smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT);
1605 int smu_v11_0_mode1_reset(struct smu_context *smu)
1609 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
1611 msleep(SMU11_MODE1_RESET_WAIT_TIME_IN_MS);
1616 int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
1617 uint32_t *min, uint32_t *max)
1619 int ret = 0, clk_id = 0;
1621 uint32_t clock_limit;
1623 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
1627 clock_limit = smu->smu_table.boot_values.uclk;
1631 clock_limit = smu->smu_table.boot_values.gfxclk;
1634 clock_limit = smu->smu_table.boot_values.socclk;
1641 /* clock in Mhz unit */
1643 *min = clock_limit / 100;
1645 *max = clock_limit / 100;
1650 clk_id = smu_cmn_to_asic_specific_index(smu,
1651 CMN2ASIC_MAPPING_CLK,
1657 param = (clk_id & 0xffff) << 16;
1660 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max);
1666 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
1675 int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
1676 enum smu_clk_type clk_type,
1680 struct amdgpu_device *adev = smu->adev;
1681 int ret = 0, clk_id = 0;
1684 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1687 clk_id = smu_cmn_to_asic_specific_index(smu,
1688 CMN2ASIC_MAPPING_CLK,
1693 if (clk_type == SMU_GFXCLK)
1694 amdgpu_gfx_off_ctrl(adev, false);
1697 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
1698 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
1705 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
1706 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
1713 if (clk_type == SMU_GFXCLK)
1714 amdgpu_gfx_off_ctrl(adev, true);
1719 int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu,
1720 enum smu_clk_type clk_type,
1724 int ret = 0, clk_id = 0;
1727 if (min <= 0 && max <= 0)
1730 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1733 clk_id = smu_cmn_to_asic_specific_index(smu,
1734 CMN2ASIC_MAPPING_CLK,
1740 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
1741 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
1748 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
1749 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
1758 int smu_v11_0_set_performance_level(struct smu_context *smu,
1759 enum amd_dpm_forced_level level)
1761 struct smu_11_0_dpm_context *dpm_context =
1762 smu->smu_dpm.dpm_context;
1763 struct smu_11_0_dpm_table *gfx_table =
1764 &dpm_context->dpm_tables.gfx_table;
1765 struct smu_11_0_dpm_table *mem_table =
1766 &dpm_context->dpm_tables.uclk_table;
1767 struct smu_11_0_dpm_table *soc_table =
1768 &dpm_context->dpm_tables.soc_table;
1769 struct smu_umd_pstate_table *pstate_table =
1771 struct amdgpu_device *adev = smu->adev;
1772 uint32_t sclk_min = 0, sclk_max = 0;
1773 uint32_t mclk_min = 0, mclk_max = 0;
1774 uint32_t socclk_min = 0, socclk_max = 0;
1778 case AMD_DPM_FORCED_LEVEL_HIGH:
1779 sclk_min = sclk_max = gfx_table->max;
1780 mclk_min = mclk_max = mem_table->max;
1781 socclk_min = socclk_max = soc_table->max;
1783 case AMD_DPM_FORCED_LEVEL_LOW:
1784 sclk_min = sclk_max = gfx_table->min;
1785 mclk_min = mclk_max = mem_table->min;
1786 socclk_min = socclk_max = soc_table->min;
1788 case AMD_DPM_FORCED_LEVEL_AUTO:
1789 sclk_min = gfx_table->min;
1790 sclk_max = gfx_table->max;
1791 mclk_min = mem_table->min;
1792 mclk_max = mem_table->max;
1793 socclk_min = soc_table->min;
1794 socclk_max = soc_table->max;
1796 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1797 sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard;
1798 mclk_min = mclk_max = pstate_table->uclk_pstate.standard;
1799 socclk_min = socclk_max = pstate_table->socclk_pstate.standard;
1801 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1802 sclk_min = sclk_max = pstate_table->gfxclk_pstate.min;
1804 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1805 mclk_min = mclk_max = pstate_table->uclk_pstate.min;
1807 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1808 sclk_min = sclk_max = pstate_table->gfxclk_pstate.peak;
1809 mclk_min = mclk_max = pstate_table->uclk_pstate.peak;
1810 socclk_min = socclk_max = pstate_table->socclk_pstate.peak;
1812 case AMD_DPM_FORCED_LEVEL_MANUAL:
1813 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1816 dev_err(adev->dev, "Invalid performance level %d\n", level);
1821 * Separate MCLK and SOCCLK soft min/max settings are not allowed
1824 if (adev->asic_type == CHIP_ARCTURUS) {
1825 mclk_min = mclk_max = 0;
1826 socclk_min = socclk_max = 0;
1829 if (sclk_min && sclk_max) {
1830 ret = smu_v11_0_set_soft_freq_limited_range(smu,
1838 if (mclk_min && mclk_max) {
1839 ret = smu_v11_0_set_soft_freq_limited_range(smu,
1847 if (socclk_min && socclk_max) {
1848 ret = smu_v11_0_set_soft_freq_limited_range(smu,
1859 int smu_v11_0_set_power_source(struct smu_context *smu,
1860 enum smu_power_src_type power_src)
1864 pwr_source = smu_cmn_to_asic_specific_index(smu,
1865 CMN2ASIC_MAPPING_PWR,
1866 (uint32_t)power_src);
1870 return smu_cmn_send_smc_msg_with_param(smu,
1871 SMU_MSG_NotifyPowerSource,
1876 int smu_v11_0_get_dpm_freq_by_index(struct smu_context *smu,
1877 enum smu_clk_type clk_type,
1881 int ret = 0, clk_id = 0;
1887 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1890 clk_id = smu_cmn_to_asic_specific_index(smu,
1891 CMN2ASIC_MAPPING_CLK,
1896 param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
1898 ret = smu_cmn_send_smc_msg_with_param(smu,
1899 SMU_MSG_GetDpmFreqByIndex,
1906 * BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM
1907 * now, we un-support it
1909 *value = *value & 0x7fffffff;
1914 int smu_v11_0_get_dpm_level_count(struct smu_context *smu,
1915 enum smu_clk_type clk_type,
1918 return smu_v11_0_get_dpm_freq_by_index(smu,
1924 int smu_v11_0_set_single_dpm_table(struct smu_context *smu,
1925 enum smu_clk_type clk_type,
1926 struct smu_11_0_dpm_table *single_dpm_table)
1932 ret = smu_v11_0_get_dpm_level_count(smu,
1934 &single_dpm_table->count);
1936 dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__);
1940 for (i = 0; i < single_dpm_table->count; i++) {
1941 ret = smu_v11_0_get_dpm_freq_by_index(smu,
1946 dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__);
1950 single_dpm_table->dpm_levels[i].value = clk;
1951 single_dpm_table->dpm_levels[i].enabled = true;
1954 single_dpm_table->min = clk;
1955 else if (i == single_dpm_table->count - 1)
1956 single_dpm_table->max = clk;
1962 int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
1963 enum smu_clk_type clk_type,
1964 uint32_t *min_value,
1965 uint32_t *max_value)
1967 uint32_t level_count = 0;
1970 if (!min_value && !max_value)
1974 /* by default, level 0 clock value as min value */
1975 ret = smu_v11_0_get_dpm_freq_by_index(smu,
1984 ret = smu_v11_0_get_dpm_level_count(smu,
1990 ret = smu_v11_0_get_dpm_freq_by_index(smu,
2001 int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu)
2003 struct amdgpu_device *adev = smu->adev;
2005 return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
2006 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
2007 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
2010 int smu_v11_0_get_current_pcie_link_width(struct smu_context *smu)
2012 uint32_t width_level;
2014 width_level = smu_v11_0_get_current_pcie_link_width_level(smu);
2015 if (width_level > LINK_WIDTH_MAX)
2018 return link_width[width_level];
2021 int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu)
2023 struct amdgpu_device *adev = smu->adev;
2025 return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
2026 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
2027 >> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
2030 int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu)
2032 uint32_t speed_level;
2034 speed_level = smu_v11_0_get_current_pcie_link_speed_level(smu);
2035 if (speed_level > LINK_SPEED_MAX)
2038 return link_speed[speed_level];
2041 void smu_v11_0_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics)
2043 memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v1_0));
2045 gpu_metrics->common_header.structure_size =
2046 sizeof(struct gpu_metrics_v1_0);
2047 gpu_metrics->common_header.format_revision = 1;
2048 gpu_metrics->common_header.content_revision = 0;
2050 gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
2053 void smu_v11_0_init_gpu_metrics_v2_0(struct gpu_metrics_v2_0 *gpu_metrics)
2055 memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v2_0));
2057 gpu_metrics->common_header.structure_size =
2058 sizeof(struct gpu_metrics_v2_0);
2059 gpu_metrics->common_header.format_revision = 2;
2060 gpu_metrics->common_header.content_revision = 0;
2062 gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
2065 int smu_v11_0_gfx_ulv_control(struct smu_context *smu,
2070 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT))
2071 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, enablement);
2076 int smu_v11_0_deep_sleep_control(struct smu_context *smu,
2079 struct amdgpu_device *adev = smu->adev;
2082 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) {
2083 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_GFXCLK_BIT, enablement);
2085 dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", enablement ? "enable" : "disable");
2090 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) {
2091 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement);
2093 dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", enablement ? "enable" : "disable");
2098 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) {
2099 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, enablement);
2101 dev_err(adev->dev, "Failed to %s LCLK DS!\n", enablement ? "enable" : "disable");