2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/module.h>
24 #include <linux/slab.h>
26 #include "linux/delay.h"
27 #include <linux/types.h>
31 #include "ci_smumgr.h"
33 #include "smu7_hwmgr.h"
34 #include "hardwaremanager.h"
35 #include "ppatomctrl.h"
36 #include "cgs_common.h"
38 #include "pppcielanes.h"
40 #include "smu/smu_7_0_1_d.h"
41 #include "smu/smu_7_0_1_sh_mask.h"
43 #include "dce/dce_8_0_d.h"
44 #include "dce/dce_8_0_sh_mask.h"
46 #include "bif/bif_4_1_d.h"
47 #include "bif/bif_4_1_sh_mask.h"
49 #include "gca/gfx_7_2_d.h"
50 #include "gca/gfx_7_2_sh_mask.h"
52 #include "gmc/gmc_7_1_d.h"
53 #include "gmc/gmc_7_1_sh_mask.h"
55 #include "processpptables.h"
57 #define MC_CG_ARB_FREQ_F0 0x0a
58 #define MC_CG_ARB_FREQ_F1 0x0b
59 #define MC_CG_ARB_FREQ_F2 0x0c
60 #define MC_CG_ARB_FREQ_F3 0x0d
62 #define SMC_RAM_END 0x40000
64 #define VOLTAGE_SCALE 4
65 #define VOLTAGE_VID_OFFSET_SCALE1 625
66 #define VOLTAGE_VID_OFFSET_SCALE2 100
67 #define CISLAND_MINIMUM_ENGINE_CLOCK 800
68 #define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5
70 static const struct ci_pt_defaults defaults_hawaii_xt = {
71 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
72 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
73 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
76 static const struct ci_pt_defaults defaults_hawaii_pro = {
77 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
78 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
79 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
82 static const struct ci_pt_defaults defaults_bonaire_xt = {
83 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
84 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
85 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
89 static const struct ci_pt_defaults defaults_saturn_xt = {
90 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
91 { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
92 { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
96 static int ci_set_smc_sram_address(struct pp_hwmgr *hwmgr,
97 uint32_t smc_addr, uint32_t limit)
99 if ((0 != (3 & smc_addr))
100 || ((smc_addr + 3) >= limit)) {
101 pr_err("smc_addr invalid \n");
105 cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, smc_addr);
106 PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
110 static int ci_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
111 const uint8_t *src, uint32_t byte_count, uint32_t limit)
115 uint32_t original_data;
117 uint32_t extra_shift;
119 if ((3 & smc_start_address)
120 || ((smc_start_address + byte_count) >= limit)) {
121 pr_err("smc_start_address invalid \n");
125 addr = smc_start_address;
127 while (byte_count >= 4) {
128 /* Bytes are written into the SMC address space with the MSB first. */
129 data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
131 result = ci_set_smc_sram_address(hwmgr, addr, limit);
136 cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
143 if (0 != byte_count) {
147 result = ci_set_smc_sram_address(hwmgr, addr, limit);
153 original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
155 extra_shift = 8 * (4 - byte_count);
157 while (byte_count > 0) {
158 /* Bytes are written into the SMC addres space with the MSB first. */
159 data = (0x100 * data) + *src++;
163 data <<= extra_shift;
165 data |= (original_data & ~((~0UL) << extra_shift));
167 result = ci_set_smc_sram_address(hwmgr, addr, limit);
172 cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
179 static int ci_program_jump_on_start(struct pp_hwmgr *hwmgr)
181 static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
183 ci_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1);
188 bool ci_is_smc_ram_running(struct pp_hwmgr *hwmgr)
190 return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
191 CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
192 && (0x20100 <= cgs_read_ind_register(hwmgr->device,
193 CGS_IND_REG__SMC, ixSMC_PC_C)));
196 static int ci_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
197 uint32_t *value, uint32_t limit)
201 result = ci_set_smc_sram_address(hwmgr, smc_addr, limit);
206 *value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
210 static int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
214 if (!ci_is_smc_ram_running(hwmgr))
217 cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
219 PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
221 ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
224 pr_info("\n failed to send message %x ret is %d\n", msg, ret);
229 static int ci_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
230 uint16_t msg, uint32_t parameter)
232 cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
233 return ci_send_msg_to_smc(hwmgr, msg);
236 static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
238 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
239 struct cgs_system_info sys_info = {0};
242 sys_info.size = sizeof(struct cgs_system_info);
243 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
244 cgs_query_system_info(hwmgr->device, &sys_info);
245 dev_id = (uint32_t)sys_info.value;
250 smu_data->power_tune_defaults = &defaults_hawaii_pro;
254 smu_data->power_tune_defaults = &defaults_hawaii_xt;
260 smu_data->power_tune_defaults = &defaults_saturn_xt;
277 smu_data->power_tune_defaults = &defaults_bonaire_xt;
282 static int ci_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
283 struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
284 uint32_t clock, uint32_t *vol)
288 if (allowed_clock_voltage_table->count == 0)
291 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
292 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
293 *vol = allowed_clock_voltage_table->entries[i].v;
298 *vol = allowed_clock_voltage_table->entries[i - 1].v;
302 static int ci_calculate_sclk_params(struct pp_hwmgr *hwmgr,
303 uint32_t clock, struct SMU7_Discrete_GraphicsLevel *sclk)
305 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
306 struct pp_atomctrl_clock_dividers_vi dividers;
307 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
308 uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
309 uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
310 uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
311 uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
313 uint32_t ref_divider;
317 /* get the engine clock dividers for this clock value */
318 result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, ÷rs);
320 PP_ASSERT_WITH_CODE(result == 0,
321 "Error retrieving Engine Clock dividers from VBIOS.",
324 /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
325 ref_clock = atomctrl_get_reference_clock(hwmgr);
326 ref_divider = 1 + dividers.uc_pll_ref_div;
328 /* low 14 bits is fraction and high 12 bits is divider */
329 fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
331 /* SPLL_FUNC_CNTL setup */
332 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
333 SPLL_REF_DIV, dividers.uc_pll_ref_div);
334 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
335 SPLL_PDIV_A, dividers.uc_pll_post_div);
337 /* SPLL_FUNC_CNTL_3 setup*/
338 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
341 /* set to use fractional accumulation*/
342 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
345 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
346 PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
347 struct pp_atomctrl_internal_ss_info ss_info;
348 uint32_t vco_freq = clock * dividers.uc_pll_post_div;
350 if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
351 vco_freq, &ss_info)) {
352 uint32_t clk_s = ref_clock * 5 /
353 (ref_divider * ss_info.speed_spectrum_rate);
354 uint32_t clk_v = 4 * ss_info.speed_spectrum_percentage *
355 fbdiv / (clk_s * 10000);
357 cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
358 CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
359 cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
360 CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
361 cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
362 CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
366 sclk->SclkFrequency = clock;
367 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
368 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
369 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
370 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
371 sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
376 static void ci_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
377 const struct phm_phase_shedding_limits_table *pl,
378 uint32_t sclk, uint32_t *p_shed)
382 /* use the minimum phase shedding */
385 for (i = 0; i < pl->count; i++) {
386 if (sclk < pl->entries[i].Sclk) {
393 static uint8_t ci_get_sleep_divider_id_from_clock(uint32_t clock,
398 uint32_t min = min_t(uint32_t, clock_insr, CISLAND_MINIMUM_ENGINE_CLOCK);
401 pr_info("Engine clock can't satisfy stutter requirement!\n");
404 for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
407 if (temp >= min || i == 0)
413 static int ci_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
414 uint32_t clock, uint16_t sclk_al_threshold,
415 struct SMU7_Discrete_GraphicsLevel *level)
418 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
421 result = ci_calculate_sclk_params(hwmgr, clock, level);
423 /* populate graphics levels */
424 result = ci_get_dependency_volt_by_clk(hwmgr,
425 hwmgr->dyn_state.vddc_dependency_on_sclk, clock,
426 (uint32_t *)(&level->MinVddc));
428 pr_err("vdd_dep_on_sclk table is NULL\n");
432 level->SclkFrequency = clock;
433 level->MinVddcPhases = 1;
435 if (data->vddc_phase_shed_control)
436 ci_populate_phase_value_based_on_sclk(hwmgr,
437 hwmgr->dyn_state.vddc_phase_shed_limits_table,
439 &level->MinVddcPhases);
441 level->ActivityLevel = sclk_al_threshold;
442 level->CcPwrDynRm = 0;
443 level->CcPwrDynRm1 = 0;
444 level->EnabledForActivity = 0;
445 /* this level can be used for throttling.*/
446 level->EnabledForThrottle = 1;
449 level->VoltageDownH = 0;
450 level->PowerThrottle = 0;
453 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
454 PHM_PlatformCaps_SclkDeepSleep))
455 level->DeepSleepDivId =
456 ci_get_sleep_divider_id_from_clock(clock,
457 CISLAND_MINIMUM_ENGINE_CLOCK);
459 /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
460 level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
463 level->MinVddc = PP_HOST_TO_SMC_UL(level->MinVddc * VOLTAGE_SCALE);
464 CONVERT_FROM_HOST_TO_SMC_UL(level->MinVddcPhases);
465 CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
466 CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
467 CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
468 CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
469 CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
470 CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
471 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
472 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
478 static int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
480 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
481 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
482 struct smu7_dpm_table *dpm_table = &data->dpm_table;
484 uint32_t array = smu_data->dpm_table_start +
485 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
486 uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
487 SMU7_MAX_LEVELS_GRAPHICS;
488 struct SMU7_Discrete_GraphicsLevel *levels =
489 smu_data->smc_state_table.GraphicsLevel;
492 for (i = 0; i < dpm_table->sclk_table.count; i++) {
493 result = ci_populate_single_graphic_level(hwmgr,
494 dpm_table->sclk_table.dpm_levels[i].value,
495 (uint16_t)smu_data->activity_target[i],
500 smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
501 if (i == (dpm_table->sclk_table.count - 1))
502 smu_data->smc_state_table.GraphicsLevel[i].DisplayWatermark =
503 PPSMC_DISPLAY_WATERMARK_HIGH;
506 smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
508 smu_data->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
509 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
510 phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
512 result = ci_copy_bytes_to_smc(hwmgr, array,
513 (u8 *)levels, array_size,
520 static int ci_populate_svi_load_line(struct pp_hwmgr *hwmgr)
522 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
523 const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
525 smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
526 smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
527 smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
528 smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
533 static int ci_populate_tdc_limit(struct pp_hwmgr *hwmgr)
536 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
537 const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
539 tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
540 smu_data->power_tune_table.TDC_VDDC_PkgLimit =
541 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
542 smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
543 defaults->tdc_vddc_throttle_release_limit_perc;
544 smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
549 static int ci_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
551 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
552 const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
555 if (ci_read_smc_sram_dword(hwmgr,
557 offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
558 (uint32_t *)&temp, SMC_RAM_END))
559 PP_ASSERT_WITH_CODE(false,
560 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
563 smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
568 static int ci_populate_fuzzy_fan(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
571 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
573 if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
574 || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
575 tmp = hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity;
577 tmp = hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
579 smu_data->power_tune_table.FuzzyFan_PwmSetDelta = CONVERT_FROM_HOST_TO_SMC_US(tmp);
584 static int ci_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
587 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
588 uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
589 uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
590 uint8_t *hi2_vid = smu_data->power_tune_table.BapmVddCVidHiSidd2;
592 PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
593 "The CAC Leakage table does not exist!", return -EINVAL);
594 PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8,
595 "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL);
596 PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
597 "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
599 for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
600 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
601 lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
602 hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
603 hi2_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc3);
605 lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc);
606 hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Leakage);
613 static int ci_populate_vddc_vid(struct pp_hwmgr *hwmgr)
616 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
617 uint8_t *vid = smu_data->power_tune_table.VddCVid;
618 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
620 PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8,
621 "There should never be more than 8 entries for VddcVid!!!",
624 for (i = 0; i < (int)data->vddc_voltage_table.count; i++)
625 vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
630 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct pp_hwmgr *hwmgr)
632 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
633 u8 *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
634 u8 *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
637 min = max = hi_vid[0];
638 for (i = 0; i < 8; i++) {
639 if (0 != hi_vid[i]) {
646 if (0 != lo_vid[i]) {
654 if ((min == 0) || (max == 0))
656 smu_data->power_tune_table.GnbLPMLMaxVid = (u8)max;
657 smu_data->power_tune_table.GnbLPMLMinVid = (u8)min;
662 static int ci_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
664 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
665 uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
666 uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
667 struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
669 HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
670 LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
672 smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
673 CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
674 smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
675 CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
680 static int ci_populate_pm_fuses(struct pp_hwmgr *hwmgr)
682 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
683 uint32_t pm_fuse_table_offset;
686 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
687 PHM_PlatformCaps_PowerContainment)) {
688 if (ci_read_smc_sram_dword(hwmgr,
689 SMU7_FIRMWARE_HEADER_LOCATION +
690 offsetof(SMU7_Firmware_Header, PmFuseTable),
691 &pm_fuse_table_offset, SMC_RAM_END)) {
692 pr_err("Attempt to get pm_fuse_table_offset Failed!\n");
697 ret = ci_populate_bapm_vddc_vid_sidd(hwmgr);
699 ret |= ci_populate_vddc_vid(hwmgr);
701 ret |= ci_populate_svi_load_line(hwmgr);
703 ret |= ci_populate_tdc_limit(hwmgr);
705 ret |= ci_populate_dw8(hwmgr, pm_fuse_table_offset);
707 ret |= ci_populate_fuzzy_fan(hwmgr, pm_fuse_table_offset);
709 ret |= ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(hwmgr);
711 ret |= ci_populate_bapm_vddc_base_leakage_sidd(hwmgr);
715 ret = ci_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
716 (uint8_t *)&smu_data->power_tune_table,
717 sizeof(struct SMU7_Discrete_PmFuses), SMC_RAM_END);
722 static int ci_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
724 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
725 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
726 const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
727 SMU7_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
728 struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
729 struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
730 const uint16_t *def1, *def2;
733 dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
734 dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
736 dpm_table->DTETjOffset = 0;
737 dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
738 dpm_table->GpuTjHyst = 8;
740 dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
743 dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
744 dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
746 dpm_table->PPM_PkgPwrLimit = 0;
747 dpm_table->PPM_TemperatureLimit = 0;
750 CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
751 CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
753 dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
754 def1 = defaults->bapmti_r;
755 def2 = defaults->bapmti_rc;
757 for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
758 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
759 for (k = 0; k < SMU7_DTE_SINKS; k++) {
760 dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
761 dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
771 static int ci_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
772 pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
776 bool vol_found = false;
777 *hi = tab->value * VOLTAGE_SCALE;
778 *lo = tab->value * VOLTAGE_SCALE;
780 PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
781 "The SCLK/VDDC Dependency Table does not exist.\n",
784 if (NULL == hwmgr->dyn_state.cac_leakage_table) {
785 pr_warn("CAC Leakage Table does not exist, using vddc.\n");
789 for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
790 if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
792 if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
793 *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
794 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE);
796 pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n");
797 *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
798 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
805 for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
806 if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
808 if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
809 *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
810 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE;
812 pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table.");
813 *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
814 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
821 pr_warn("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n");
827 static int ci_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
828 pp_atomctrl_voltage_table_entry *tab,
829 SMU7_Discrete_VoltageLevel *smc_voltage_tab)
833 result = ci_get_std_voltage_value_sidd(hwmgr, tab,
834 &smc_voltage_tab->StdVoltageHiSidd,
835 &smc_voltage_tab->StdVoltageLoSidd);
837 smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
838 smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
841 smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
842 CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
843 CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageLoSidd);
848 static int ci_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
849 SMU7_Discrete_DpmTable *table)
853 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
855 table->VddcLevelCount = data->vddc_voltage_table.count;
856 for (count = 0; count < table->VddcLevelCount; count++) {
857 result = ci_populate_smc_voltage_table(hwmgr,
858 &(data->vddc_voltage_table.entries[count]),
859 &(table->VddcLevel[count]));
860 PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
862 /* GPIO voltage control */
863 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control)
864 table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low;
866 table->VddcLevel[count].Smio = 0;
869 CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
874 static int ci_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
875 SMU7_Discrete_DpmTable *table)
877 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
881 table->VddciLevelCount = data->vddci_voltage_table.count;
883 for (count = 0; count < table->VddciLevelCount; count++) {
884 result = ci_populate_smc_voltage_table(hwmgr,
885 &(data->vddci_voltage_table.entries[count]),
886 &(table->VddciLevel[count]));
887 PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL);
888 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
889 table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low;
891 table->VddciLevel[count].Smio |= 0;
894 CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
899 static int ci_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
900 SMU7_Discrete_DpmTable *table)
902 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
906 table->MvddLevelCount = data->mvdd_voltage_table.count;
908 for (count = 0; count < table->MvddLevelCount; count++) {
909 result = ci_populate_smc_voltage_table(hwmgr,
910 &(data->mvdd_voltage_table.entries[count]),
911 &table->MvddLevel[count]);
912 PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL);
913 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control)
914 table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low;
916 table->MvddLevel[count].Smio |= 0;
919 CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
925 static int ci_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
926 SMU7_Discrete_DpmTable *table)
930 result = ci_populate_smc_vddc_table(hwmgr, table);
931 PP_ASSERT_WITH_CODE(0 == result,
932 "can not populate VDDC voltage table to SMC", return -EINVAL);
934 result = ci_populate_smc_vdd_ci_table(hwmgr, table);
935 PP_ASSERT_WITH_CODE(0 == result,
936 "can not populate VDDCI voltage table to SMC", return -EINVAL);
938 result = ci_populate_smc_mvdd_table(hwmgr, table);
939 PP_ASSERT_WITH_CODE(0 == result,
940 "can not populate MVDD voltage table to SMC", return -EINVAL);
945 static int ci_populate_ulv_level(struct pp_hwmgr *hwmgr,
946 struct SMU7_Discrete_Ulv *state)
948 uint32_t voltage_response_time, ulv_voltage;
950 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
952 state->CcPwrDynRm = 0;
953 state->CcPwrDynRm1 = 0;
955 result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage);
956 PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;);
958 if (ulv_voltage == 0) {
959 data->ulv_supported = false;
963 if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) {
964 /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
965 if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
966 state->VddcOffset = 0;
968 /* used in SMIO Mode. not implemented for now. this is backup only for CI. */
969 state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage);
971 /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
972 if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
973 state->VddcOffsetVid = 0;
974 else /* used in SVI2 Mode */
975 state->VddcOffsetVid = (uint8_t)(
976 (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage)
977 * VOLTAGE_VID_OFFSET_SCALE2
978 / VOLTAGE_VID_OFFSET_SCALE1);
980 state->VddcPhase = 1;
982 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
983 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
984 CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
989 static int ci_populate_ulv_state(struct pp_hwmgr *hwmgr,
990 SMU7_Discrete_Ulv *ulv_level)
992 return ci_populate_ulv_level(hwmgr, ulv_level);
995 static int ci_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table)
997 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
998 struct smu7_dpm_table *dpm_table = &data->dpm_table;
999 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1002 /* Index dpm_table->pcie_speed_table.count is reserved for PCIE boot level.*/
1003 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
1004 table->LinkLevel[i].PcieGenSpeed =
1005 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
1006 table->LinkLevel[i].PcieLaneCount =
1007 (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
1008 table->LinkLevel[i].EnabledForActivity = 1;
1009 table->LinkLevel[i].DownT = PP_HOST_TO_SMC_UL(5);
1010 table->LinkLevel[i].UpT = PP_HOST_TO_SMC_UL(30);
1013 smu_data->smc_state_table.LinkLevelCount =
1014 (uint8_t)dpm_table->pcie_speed_table.count;
1015 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
1016 phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
1021 static int ci_calculate_mclk_params(
1022 struct pp_hwmgr *hwmgr,
1023 uint32_t memory_clock,
1024 SMU7_Discrete_MemoryLevel *mclk,
1029 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1030 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
1031 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
1032 uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
1033 uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
1034 uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
1035 uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
1036 uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
1037 uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
1038 uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
1040 pp_atomctrl_memory_clock_param mpll_param;
1043 result = atomctrl_get_memory_pll_dividers_si(hwmgr,
1044 memory_clock, &mpll_param, strobe_mode);
1045 PP_ASSERT_WITH_CODE(0 == result,
1046 "Error retrieving Memory Clock Parameters from VBIOS.", return result);
1048 mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
1050 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1051 MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
1052 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1053 MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
1054 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1055 MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
1057 mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
1058 MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
1060 if (data->is_memory_gddr5) {
1061 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
1062 MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
1063 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
1064 MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
1067 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1068 PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
1069 pp_atomctrl_internal_ss_info ss_info;
1072 uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
1074 /* for GDDR5 for all modes and DDR3 */
1075 if (1 == mpll_param.qdr)
1076 freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
1078 freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
1080 /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
1081 tmp = (freq_nom / reference_clock);
1084 if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
1085 uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
1087 (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
1088 ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
1090 mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
1091 mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
1095 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1096 MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
1097 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1098 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
1099 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1100 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
1103 mclk->MclkFrequency = memory_clock;
1104 mclk->MpllFuncCntl = mpll_func_cntl;
1105 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
1106 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
1107 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
1108 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
1109 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
1110 mclk->DllCntl = dll_cntl;
1111 mclk->MpllSs1 = mpll_ss1;
1112 mclk->MpllSs2 = mpll_ss2;
1117 static uint8_t ci_get_mclk_frequency_ratio(uint32_t memory_clock,
1120 uint8_t mc_para_index;
1123 if (memory_clock < 12500)
1124 mc_para_index = 0x00;
1125 else if (memory_clock > 47500)
1126 mc_para_index = 0x0f;
1128 mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
1130 if (memory_clock < 65000)
1131 mc_para_index = 0x00;
1132 else if (memory_clock > 135000)
1133 mc_para_index = 0x0f;
1135 mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
1138 return mc_para_index;
1141 static uint8_t ci_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
1143 uint8_t mc_para_index;
1145 if (memory_clock < 10000)
1147 else if (memory_clock >= 80000)
1148 mc_para_index = 0x0f;
1150 mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
1152 return mc_para_index;
1155 static int ci_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
1156 uint32_t memory_clock, uint32_t *p_shed)
1162 for (i = 0; i < pl->count; i++) {
1163 if (memory_clock < pl->entries[i].Mclk) {
1172 static int ci_populate_single_memory_level(
1173 struct pp_hwmgr *hwmgr,
1174 uint32_t memory_clock,
1175 SMU7_Discrete_MemoryLevel *memory_level
1178 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1181 struct cgs_display_info info = {0};
1182 uint32_t mclk_edc_wr_enable_threshold = 40000;
1183 uint32_t mclk_edc_enable_threshold = 40000;
1184 uint32_t mclk_strobe_mode_threshold = 40000;
1186 if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) {
1187 result = ci_get_dependency_volt_by_clk(hwmgr,
1188 hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
1189 PP_ASSERT_WITH_CODE((0 == result),
1190 "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
1193 if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
1194 result = ci_get_dependency_volt_by_clk(hwmgr,
1195 hwmgr->dyn_state.vddci_dependency_on_mclk,
1197 &memory_level->MinVddci);
1198 PP_ASSERT_WITH_CODE((0 == result),
1199 "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
1202 if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) {
1203 result = ci_get_dependency_volt_by_clk(hwmgr,
1204 hwmgr->dyn_state.mvdd_dependency_on_mclk,
1206 &memory_level->MinMvdd);
1207 PP_ASSERT_WITH_CODE((0 == result),
1208 "can not find MinVddci voltage value from memory MVDD voltage dependency table", return result);
1211 memory_level->MinVddcPhases = 1;
1213 if (data->vddc_phase_shed_control) {
1214 ci_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
1215 memory_clock, &memory_level->MinVddcPhases);
1218 memory_level->EnabledForThrottle = 1;
1219 memory_level->EnabledForActivity = 1;
1220 memory_level->UpH = 0;
1221 memory_level->DownH = 100;
1222 memory_level->VoltageDownH = 0;
1224 /* Indicates maximum activity level for this performance level.*/
1225 memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
1226 memory_level->StutterEnable = 0;
1227 memory_level->StrobeEnable = 0;
1228 memory_level->EdcReadEnable = 0;
1229 memory_level->EdcWriteEnable = 0;
1230 memory_level->RttEnable = 0;
1232 /* default set to low watermark. Highest level will be set to high later.*/
1233 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1235 cgs_get_active_displays_info(hwmgr->device, &info);
1236 data->display_timing.num_existing_displays = info.display_count;
1238 /* stutter mode not support on ci */
1240 /* decide strobe mode*/
1241 memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
1242 (memory_clock <= mclk_strobe_mode_threshold);
1244 /* decide EDC mode and memory clock ratio*/
1245 if (data->is_memory_gddr5) {
1246 memory_level->StrobeRatio = ci_get_mclk_frequency_ratio(memory_clock,
1247 memory_level->StrobeEnable);
1249 if ((mclk_edc_enable_threshold != 0) &&
1250 (memory_clock > mclk_edc_enable_threshold)) {
1251 memory_level->EdcReadEnable = 1;
1254 if ((mclk_edc_wr_enable_threshold != 0) &&
1255 (memory_clock > mclk_edc_wr_enable_threshold)) {
1256 memory_level->EdcWriteEnable = 1;
1259 if (memory_level->StrobeEnable) {
1260 if (ci_get_mclk_frequency_ratio(memory_clock, 1) >=
1261 ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf))
1262 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1264 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
1266 dll_state_on = data->dll_default_on;
1268 memory_level->StrobeRatio =
1269 ci_get_ddr3_mclk_frequency_ratio(memory_clock);
1270 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1273 result = ci_calculate_mclk_params(hwmgr,
1274 memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
1277 memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE);
1278 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases);
1279 memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE);
1280 memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE);
1281 /* MCLK frequency in units of 10KHz*/
1282 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
1283 /* Indicates maximum activity level for this performance level.*/
1284 CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
1285 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
1286 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
1287 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
1288 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
1289 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
1290 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
1291 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
1292 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
1293 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
1299 static int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1301 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1302 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1303 struct smu7_dpm_table *dpm_table = &data->dpm_table;
1305 struct cgs_system_info sys_info = {0};
1308 uint32_t level_array_address = smu_data->dpm_table_start + offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
1309 uint32_t level_array_size = sizeof(SMU7_Discrete_MemoryLevel) * SMU7_MAX_LEVELS_MEMORY;
1310 SMU7_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
1313 memset(levels, 0x00, level_array_size);
1315 for (i = 0; i < dpm_table->mclk_table.count; i++) {
1316 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
1317 "can not populate memory level as memory clock is zero", return -EINVAL);
1318 result = ci_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
1319 &(smu_data->smc_state_table.MemoryLevel[i]));
1324 smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
1326 sys_info.size = sizeof(struct cgs_system_info);
1327 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
1328 cgs_query_system_info(hwmgr->device, &sys_info);
1329 dev_id = (uint32_t)sys_info.value;
1331 if ((dpm_table->mclk_table.count >= 2)
1332 && ((dev_id == 0x67B0) || (dev_id == 0x67B1))) {
1333 smu_data->smc_state_table.MemoryLevel[1].MinVddci =
1334 smu_data->smc_state_table.MemoryLevel[0].MinVddci;
1335 smu_data->smc_state_table.MemoryLevel[1].MinMvdd =
1336 smu_data->smc_state_table.MemoryLevel[0].MinMvdd;
1338 smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
1339 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
1341 smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
1342 data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
1343 smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
1345 result = ci_copy_bytes_to_smc(hwmgr,
1346 level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
1352 static int ci_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
1353 SMU7_Discrete_VoltageLevel *voltage)
1355 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1359 if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
1360 /* find mvdd value which clock is more than request */
1361 for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) {
1362 if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) {
1363 /* Always round to higher voltage. */
1364 voltage->Voltage = data->mvdd_voltage_table.entries[i].value;
1369 PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count,
1370 "MVDD Voltage is outside the supported range.", return -EINVAL);
1379 static int ci_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1380 SMU7_Discrete_DpmTable *table)
1383 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1384 struct pp_atomctrl_clock_dividers_vi dividers;
1386 SMU7_Discrete_VoltageLevel voltage_level;
1387 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
1388 uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
1389 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
1390 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
1393 /* The ACPI state should not do DPM on DC (or ever).*/
1394 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
1396 if (data->acpi_vddc)
1397 table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
1399 table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE);
1401 table->ACPILevel.MinVddcPhases = data->vddc_phase_shed_control ? 0 : 1;
1402 /* assign zero for now*/
1403 table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
1405 /* get the engine clock dividers for this clock value*/
1406 result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
1407 table->ACPILevel.SclkFrequency, ÷rs);
1409 PP_ASSERT_WITH_CODE(result == 0,
1410 "Error retrieving Engine Clock dividers from VBIOS.", return result);
1412 /* divider ID for required SCLK*/
1413 table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
1414 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1415 table->ACPILevel.DeepSleepDivId = 0;
1417 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
1418 CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0);
1419 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
1420 CG_SPLL_FUNC_CNTL, SPLL_RESET, 1);
1421 spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2,
1422 CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4);
1424 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
1425 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
1426 table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
1427 table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
1428 table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
1429 table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
1430 table->ACPILevel.CcPwrDynRm = 0;
1431 table->ACPILevel.CcPwrDynRm1 = 0;
1433 /* For various features to be enabled/disabled while this level is active.*/
1434 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
1435 /* SCLK frequency in units of 10KHz*/
1436 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
1437 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
1438 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
1439 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
1440 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
1441 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
1442 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
1443 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
1444 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
1447 /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
1448 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
1449 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
1451 if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
1452 table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc;
1454 if (data->acpi_vddci != 0)
1455 table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE);
1457 table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE);
1460 if (0 == ci_populate_mvdd_value(hwmgr, 0, &voltage_level))
1461 table->MemoryACPILevel.MinMvdd =
1462 PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
1464 table->MemoryACPILevel.MinMvdd = 0;
1466 /* Force reset on DLL*/
1467 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1468 MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
1469 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1470 MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
1472 /* Disable DLL in ACPIState*/
1473 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1474 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
1475 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1476 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
1478 /* Enable DLL bypass signal*/
1479 dll_cntl = PHM_SET_FIELD(dll_cntl,
1480 DLL_CNTL, MRDCK0_BYPASS, 0);
1481 dll_cntl = PHM_SET_FIELD(dll_cntl,
1482 DLL_CNTL, MRDCK1_BYPASS, 0);
1484 table->MemoryACPILevel.DllCntl =
1485 PP_HOST_TO_SMC_UL(dll_cntl);
1486 table->MemoryACPILevel.MclkPwrmgtCntl =
1487 PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
1488 table->MemoryACPILevel.MpllAdFuncCntl =
1489 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
1490 table->MemoryACPILevel.MpllDqFuncCntl =
1491 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
1492 table->MemoryACPILevel.MpllFuncCntl =
1493 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
1494 table->MemoryACPILevel.MpllFuncCntl_1 =
1495 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
1496 table->MemoryACPILevel.MpllFuncCntl_2 =
1497 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
1498 table->MemoryACPILevel.MpllSs1 =
1499 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
1500 table->MemoryACPILevel.MpllSs2 =
1501 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
1503 table->MemoryACPILevel.EnabledForThrottle = 0;
1504 table->MemoryACPILevel.EnabledForActivity = 0;
1505 table->MemoryACPILevel.UpH = 0;
1506 table->MemoryACPILevel.DownH = 100;
1507 table->MemoryACPILevel.VoltageDownH = 0;
1508 /* Indicates maximum activity level for this performance level.*/
1509 table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
1511 table->MemoryACPILevel.StutterEnable = 0;
1512 table->MemoryACPILevel.StrobeEnable = 0;
1513 table->MemoryACPILevel.EdcReadEnable = 0;
1514 table->MemoryACPILevel.EdcWriteEnable = 0;
1515 table->MemoryACPILevel.RttEnable = 0;
1520 static int ci_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1521 SMU7_Discrete_DpmTable *table)
1525 struct pp_atomctrl_clock_dividers_vi dividers;
1526 struct phm_uvd_clock_voltage_dependency_table *uvd_table =
1527 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
1529 table->UvdLevelCount = (uint8_t)(uvd_table->count);
1531 for (count = 0; count < table->UvdLevelCount; count++) {
1532 table->UvdLevel[count].VclkFrequency =
1533 uvd_table->entries[count].vclk;
1534 table->UvdLevel[count].DclkFrequency =
1535 uvd_table->entries[count].dclk;
1536 table->UvdLevel[count].MinVddc =
1537 uvd_table->entries[count].v * VOLTAGE_SCALE;
1538 table->UvdLevel[count].MinVddcPhases = 1;
1540 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1541 table->UvdLevel[count].VclkFrequency, ÷rs);
1542 PP_ASSERT_WITH_CODE((0 == result),
1543 "can not find divide id for Vclk clock", return result);
1545 table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
1547 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1548 table->UvdLevel[count].DclkFrequency, ÷rs);
1549 PP_ASSERT_WITH_CODE((0 == result),
1550 "can not find divide id for Dclk clock", return result);
1552 table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
1553 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
1554 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
1555 CONVERT_FROM_HOST_TO_SMC_US(table->UvdLevel[count].MinVddc);
1561 static int ci_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1562 SMU7_Discrete_DpmTable *table)
1564 int result = -EINVAL;
1566 struct pp_atomctrl_clock_dividers_vi dividers;
1567 struct phm_vce_clock_voltage_dependency_table *vce_table =
1568 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
1570 table->VceLevelCount = (uint8_t)(vce_table->count);
1571 table->VceBootLevel = 0;
1573 for (count = 0; count < table->VceLevelCount; count++) {
1574 table->VceLevel[count].Frequency = vce_table->entries[count].evclk;
1575 table->VceLevel[count].MinVoltage =
1576 vce_table->entries[count].v * VOLTAGE_SCALE;
1577 table->VceLevel[count].MinPhases = 1;
1579 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1580 table->VceLevel[count].Frequency, ÷rs);
1581 PP_ASSERT_WITH_CODE((0 == result),
1582 "can not find divide id for VCE engine clock",
1585 table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1587 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
1588 CONVERT_FROM_HOST_TO_SMC_US(table->VceLevel[count].MinVoltage);
1593 static int ci_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
1594 SMU7_Discrete_DpmTable *table)
1596 int result = -EINVAL;
1598 struct pp_atomctrl_clock_dividers_vi dividers;
1599 struct phm_acp_clock_voltage_dependency_table *acp_table =
1600 hwmgr->dyn_state.acp_clock_voltage_dependency_table;
1602 table->AcpLevelCount = (uint8_t)(acp_table->count);
1603 table->AcpBootLevel = 0;
1605 for (count = 0; count < table->AcpLevelCount; count++) {
1606 table->AcpLevel[count].Frequency = acp_table->entries[count].acpclk;
1607 table->AcpLevel[count].MinVoltage = acp_table->entries[count].v;
1608 table->AcpLevel[count].MinPhases = 1;
1610 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1611 table->AcpLevel[count].Frequency, ÷rs);
1612 PP_ASSERT_WITH_CODE((0 == result),
1613 "can not find divide id for engine clock", return result);
1615 table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1617 CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
1618 CONVERT_FROM_HOST_TO_SMC_US(table->AcpLevel[count].MinVoltage);
1623 static int ci_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
1624 SMU7_Discrete_DpmTable *table)
1626 int result = -EINVAL;
1628 struct pp_atomctrl_clock_dividers_vi dividers;
1629 struct phm_samu_clock_voltage_dependency_table *samu_table =
1630 hwmgr->dyn_state.samu_clock_voltage_dependency_table;
1632 table->SamuBootLevel = 0;
1633 table->SamuLevelCount = (uint8_t)(samu_table->count);
1635 for (count = 0; count < table->SamuLevelCount; count++) {
1636 table->SamuLevel[count].Frequency = samu_table->entries[count].samclk;
1637 table->SamuLevel[count].MinVoltage = samu_table->entries[count].v * VOLTAGE_SCALE;
1638 table->SamuLevel[count].MinPhases = 1;
1640 /* retrieve divider value for VBIOS */
1641 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1642 table->SamuLevel[count].Frequency, ÷rs);
1643 PP_ASSERT_WITH_CODE((0 == result),
1644 "can not find divide id for samu clock", return result);
1646 table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1648 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
1649 CONVERT_FROM_HOST_TO_SMC_US(table->SamuLevel[count].MinVoltage);
1654 static int ci_populate_memory_timing_parameters(
1655 struct pp_hwmgr *hwmgr,
1656 uint32_t engine_clock,
1657 uint32_t memory_clock,
1658 struct SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs
1661 uint32_t dramTiming;
1662 uint32_t dramTiming2;
1666 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1667 engine_clock, memory_clock);
1669 PP_ASSERT_WITH_CODE(result == 0,
1670 "Error calling VBIOS to set DRAM_TIMING.", return result);
1672 dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1673 dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1674 burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1676 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
1677 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
1678 arb_regs->McArbBurstTime = (uint8_t)burstTime;
1683 static int ci_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1685 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1686 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1688 SMU7_Discrete_MCArbDramTimingTable arb_regs;
1691 memset(&arb_regs, 0x00, sizeof(SMU7_Discrete_MCArbDramTimingTable));
1693 for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
1694 for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
1695 result = ci_populate_memory_timing_parameters
1696 (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
1697 data->dpm_table.mclk_table.dpm_levels[j].value,
1698 &arb_regs.entries[i][j]);
1706 result = ci_copy_bytes_to_smc(
1708 smu_data->arb_table_start,
1709 (uint8_t *)&arb_regs,
1710 sizeof(SMU7_Discrete_MCArbDramTimingTable),
1718 static int ci_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
1719 SMU7_Discrete_DpmTable *table)
1722 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1723 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1725 table->GraphicsBootLevel = 0;
1726 table->MemoryBootLevel = 0;
1728 /* find boot level from dpm table*/
1729 result = phm_find_boot_level(&(data->dpm_table.sclk_table),
1730 data->vbios_boot_state.sclk_bootup_value,
1731 (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
1734 smu_data->smc_state_table.GraphicsBootLevel = 0;
1735 pr_err("VBIOS did not find boot engine clock value in dependency table. Using Graphics DPM level 0!\n");
1739 result = phm_find_boot_level(&(data->dpm_table.mclk_table),
1740 data->vbios_boot_state.mclk_bootup_value,
1741 (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
1744 smu_data->smc_state_table.MemoryBootLevel = 0;
1745 pr_err("VBIOS did not find boot engine clock value in dependency table. Using Memory DPM level 0!\n");
1749 table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
1750 table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
1751 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
1756 static int ci_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
1757 SMU7_Discrete_MCRegisters *mc_reg_table)
1759 const struct ci_smumgr *smu_data = (struct ci_smumgr *)hwmgr->smu_backend;
1763 for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
1764 if (smu_data->mc_reg_table.validflag & 1<<j) {
1765 PP_ASSERT_WITH_CODE(i < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE,
1766 "Index of mc_reg_table->address[] array out of boundary", return -EINVAL);
1767 mc_reg_table->address[i].s0 =
1768 PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
1769 mc_reg_table->address[i].s1 =
1770 PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
1775 mc_reg_table->last = (uint8_t)i;
1780 static void ci_convert_mc_registers(
1781 const struct ci_mc_reg_entry *entry,
1782 SMU7_Discrete_MCRegisterSet *data,
1783 uint32_t num_entries, uint32_t valid_flag)
1787 for (i = 0, j = 0; j < num_entries; j++) {
1788 if (valid_flag & 1<<j) {
1789 data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
1795 static int ci_convert_mc_reg_table_entry_to_smc(
1796 struct pp_hwmgr *hwmgr,
1797 const uint32_t memory_clock,
1798 SMU7_Discrete_MCRegisterSet *mc_reg_table_data
1801 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1804 for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
1806 smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
1811 if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
1814 ci_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
1815 mc_reg_table_data, smu_data->mc_reg_table.last,
1816 smu_data->mc_reg_table.validflag);
1821 static int ci_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
1822 SMU7_Discrete_MCRegisters *mc_regs)
1825 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1829 for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
1830 res = ci_convert_mc_reg_table_entry_to_smc(
1832 data->dpm_table.mclk_table.dpm_levels[i].value,
1843 static int ci_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
1845 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1846 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1850 if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
1854 memset(&smu_data->mc_regs, 0, sizeof(SMU7_Discrete_MCRegisters));
1856 result = ci_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
1861 address = smu_data->mc_reg_table_start + (uint32_t)offsetof(SMU7_Discrete_MCRegisters, data[0]);
1863 return ci_copy_bytes_to_smc(hwmgr, address,
1864 (uint8_t *)&smu_data->mc_regs.data[0],
1865 sizeof(SMU7_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
1869 static int ci_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
1872 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1874 memset(&smu_data->mc_regs, 0x00, sizeof(SMU7_Discrete_MCRegisters));
1875 result = ci_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
1876 PP_ASSERT_WITH_CODE(0 == result,
1877 "Failed to initialize MCRegTable for the MC register addresses!", return result;);
1879 result = ci_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
1880 PP_ASSERT_WITH_CODE(0 == result,
1881 "Failed to initialize MCRegTable for driver state!", return result;);
1883 return ci_copy_bytes_to_smc(hwmgr, smu_data->mc_reg_table_start,
1884 (uint8_t *)&smu_data->mc_regs, sizeof(SMU7_Discrete_MCRegisters), SMC_RAM_END);
1887 static int ci_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
1889 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1890 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1891 uint8_t count, level;
1893 count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
1895 for (level = 0; level < count; level++) {
1896 if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk
1897 >= data->vbios_boot_state.sclk_bootup_value) {
1898 smu_data->smc_state_table.GraphicsBootLevel = level;
1903 count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count);
1905 for (level = 0; level < count; level++) {
1906 if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk
1907 >= data->vbios_boot_state.mclk_bootup_value) {
1908 smu_data->smc_state_table.MemoryBootLevel = level;
1916 static int ci_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
1917 SMU7_Discrete_DpmTable *table)
1919 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1921 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
1922 table->SVI2Enable = 1;
1924 table->SVI2Enable = 0;
1928 static int ci_start_smc(struct pp_hwmgr *hwmgr)
1930 /* set smc instruct start point at 0x0 */
1931 ci_program_jump_on_start(hwmgr);
1933 /* enable smc clock */
1934 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
1936 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
1938 PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS,
1939 INTERRUPTS_ENABLED, 1);
1944 static int ci_init_smc_table(struct pp_hwmgr *hwmgr)
1947 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1948 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1949 SMU7_Discrete_DpmTable *table = &(smu_data->smc_state_table);
1950 struct pp_atomctrl_gpio_pin_assignment gpio_pin;
1953 ci_initialize_power_tune_defaults(hwmgr);
1954 memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
1956 if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
1957 ci_populate_smc_voltage_tables(hwmgr, table);
1959 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1960 PHM_PlatformCaps_AutomaticDCTransition))
1961 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1964 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1965 PHM_PlatformCaps_StepVddc))
1966 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1968 if (data->is_memory_gddr5)
1969 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1971 if (data->ulv_supported) {
1972 result = ci_populate_ulv_state(hwmgr, &(table->Ulv));
1973 PP_ASSERT_WITH_CODE(0 == result,
1974 "Failed to initialize ULV state!", return result);
1976 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1977 ixCG_ULV_PARAMETER, 0x40035);
1980 result = ci_populate_all_graphic_levels(hwmgr);
1981 PP_ASSERT_WITH_CODE(0 == result,
1982 "Failed to initialize Graphics Level!", return result);
1984 result = ci_populate_all_memory_levels(hwmgr);
1985 PP_ASSERT_WITH_CODE(0 == result,
1986 "Failed to initialize Memory Level!", return result);
1988 result = ci_populate_smc_link_level(hwmgr, table);
1989 PP_ASSERT_WITH_CODE(0 == result,
1990 "Failed to initialize Link Level!", return result);
1992 result = ci_populate_smc_acpi_level(hwmgr, table);
1993 PP_ASSERT_WITH_CODE(0 == result,
1994 "Failed to initialize ACPI Level!", return result);
1996 result = ci_populate_smc_vce_level(hwmgr, table);
1997 PP_ASSERT_WITH_CODE(0 == result,
1998 "Failed to initialize VCE Level!", return result);
2000 result = ci_populate_smc_acp_level(hwmgr, table);
2001 PP_ASSERT_WITH_CODE(0 == result,
2002 "Failed to initialize ACP Level!", return result);
2004 result = ci_populate_smc_samu_level(hwmgr, table);
2005 PP_ASSERT_WITH_CODE(0 == result,
2006 "Failed to initialize SAMU Level!", return result);
2008 /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
2009 /* need to populate the ARB settings for the initial state. */
2010 result = ci_program_memory_timing_parameters(hwmgr);
2011 PP_ASSERT_WITH_CODE(0 == result,
2012 "Failed to Write ARB settings for the initial state.", return result);
2014 result = ci_populate_smc_uvd_level(hwmgr, table);
2015 PP_ASSERT_WITH_CODE(0 == result,
2016 "Failed to initialize UVD Level!", return result);
2018 table->UvdBootLevel = 0;
2019 table->VceBootLevel = 0;
2020 table->AcpBootLevel = 0;
2021 table->SamuBootLevel = 0;
2023 table->GraphicsBootLevel = 0;
2024 table->MemoryBootLevel = 0;
2026 result = ci_populate_smc_boot_level(hwmgr, table);
2027 PP_ASSERT_WITH_CODE(0 == result,
2028 "Failed to initialize Boot Level!", return result);
2030 result = ci_populate_smc_initial_state(hwmgr);
2031 PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
2033 result = ci_populate_bapm_parameters_in_dpm_table(hwmgr);
2034 PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
2036 table->UVDInterval = 1;
2037 table->VCEInterval = 1;
2038 table->ACPInterval = 1;
2039 table->SAMUInterval = 1;
2040 table->GraphicsVoltageChangeEnable = 1;
2041 table->GraphicsThermThrottleEnable = 1;
2042 table->GraphicsInterval = 1;
2043 table->VoltageInterval = 1;
2044 table->ThermalInterval = 1;
2046 table->TemperatureLimitHigh =
2047 (data->thermal_temp_setting.temperature_high *
2048 SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2049 table->TemperatureLimitLow =
2050 (data->thermal_temp_setting.temperature_low *
2051 SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2053 table->MemoryVoltageChangeEnable = 1;
2054 table->MemoryInterval = 1;
2055 table->VoltageResponseTime = 0;
2056 table->VddcVddciDelta = 4000;
2057 table->PhaseResponseTime = 0;
2058 table->MemoryThermThrottleEnable = 1;
2060 PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
2061 "There must be 1 or more PCIE levels defined in PPTable.",
2064 table->PCIeBootLinkLevel = (uint8_t)data->dpm_table.pcie_speed_table.count;
2065 table->PCIeGenInterval = 1;
2067 ci_populate_smc_svi2_config(hwmgr, table);
2069 for (i = 0; i < SMU7_MAX_ENTRIES_SMIO; i++)
2070 CONVERT_FROM_HOST_TO_SMC_UL(table->Smio[i]);
2072 table->ThermGpio = 17;
2073 table->SclkStepSize = 0x4000;
2074 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
2075 table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
2076 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2077 PHM_PlatformCaps_RegulatorHot);
2079 table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
2080 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2081 PHM_PlatformCaps_RegulatorHot);
2084 table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
2086 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
2087 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
2088 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
2089 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
2090 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
2091 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
2092 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
2093 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
2094 table->VddcVddciDelta = PP_HOST_TO_SMC_US(table->VddcVddciDelta);
2095 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
2096 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
2098 table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
2099 table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
2100 table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
2102 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
2103 result = ci_copy_bytes_to_smc(hwmgr, smu_data->dpm_table_start +
2104 offsetof(SMU7_Discrete_DpmTable, SystemFlags),
2105 (uint8_t *)&(table->SystemFlags),
2106 sizeof(SMU7_Discrete_DpmTable)-3 * sizeof(SMU7_PIDController),
2109 PP_ASSERT_WITH_CODE(0 == result,
2110 "Failed to upload dpm data to SMC memory!", return result;);
2112 result = ci_populate_initial_mc_reg_table(hwmgr);
2113 PP_ASSERT_WITH_CODE((0 == result),
2114 "Failed to populate initialize MC Reg table!", return result);
2116 result = ci_populate_pm_fuses(hwmgr);
2117 PP_ASSERT_WITH_CODE(0 == result,
2118 "Failed to populate PM fuses to SMC memory!", return result);
2120 ci_start_smc(hwmgr);
2125 static int ci_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
2127 struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2128 SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
2130 uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
2131 uint16_t fdo_min, slope1, slope2;
2132 uint32_t reference_clock;
2136 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
2139 if (hwmgr->thermal_controller.fanInfo.bNoFan) {
2140 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2141 PHM_PlatformCaps_MicrocodeFanControl);
2145 if (0 == ci_data->fan_table_start) {
2146 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
2150 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
2153 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
2157 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
2158 do_div(tmp64, 10000);
2159 fdo_min = (uint16_t)tmp64;
2161 t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
2162 t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
2164 pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
2165 pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
2167 slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
2168 slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
2170 fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
2171 fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
2172 fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
2174 fan_table.Slope1 = cpu_to_be16(slope1);
2175 fan_table.Slope2 = cpu_to_be16(slope2);
2177 fan_table.FdoMin = cpu_to_be16(fdo_min);
2179 fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
2181 fan_table.HystUp = cpu_to_be16(1);
2183 fan_table.HystSlope = cpu_to_be16(1);
2185 fan_table.TempRespLim = cpu_to_be16(5);
2187 reference_clock = smu7_get_xclk(hwmgr);
2189 fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
2191 fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
2193 fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
2195 res = ci_copy_bytes_to_smc(hwmgr, ci_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
2200 static int ci_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
2202 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2204 if (data->need_update_smu7_dpm_table &
2205 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
2206 return ci_program_memory_timing_parameters(hwmgr);
2211 static int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr)
2213 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2214 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2217 uint32_t low_sclk_interrupt_threshold = 0;
2219 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2220 PHM_PlatformCaps_SclkThrottleLowNotification)
2221 && (hwmgr->gfx_arbiter.sclk_threshold !=
2222 data->low_sclk_interrupt_threshold)) {
2223 data->low_sclk_interrupt_threshold =
2224 hwmgr->gfx_arbiter.sclk_threshold;
2225 low_sclk_interrupt_threshold =
2226 data->low_sclk_interrupt_threshold;
2228 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
2230 result = ci_copy_bytes_to_smc(
2232 smu_data->dpm_table_start +
2233 offsetof(SMU7_Discrete_DpmTable,
2235 (uint8_t *)&low_sclk_interrupt_threshold,
2240 result = ci_update_and_upload_mc_reg_table(hwmgr);
2242 PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result);
2244 result = ci_program_mem_timing_parameters(hwmgr);
2245 PP_ASSERT_WITH_CODE((result == 0),
2246 "Failed to program memory timing parameters!",
2252 static uint32_t ci_get_offsetof(uint32_t type, uint32_t member)
2255 case SMU_SoftRegisters:
2257 case HandshakeDisables:
2258 return offsetof(SMU7_SoftRegisters, HandshakeDisables);
2259 case VoltageChangeTimeout:
2260 return offsetof(SMU7_SoftRegisters, VoltageChangeTimeout);
2261 case AverageGraphicsActivity:
2262 return offsetof(SMU7_SoftRegisters, AverageGraphicsA);
2264 return offsetof(SMU7_SoftRegisters, PreVBlankGap);
2266 return offsetof(SMU7_SoftRegisters, VBlankTimeout);
2267 case DRAM_LOG_ADDR_H:
2268 return offsetof(SMU7_SoftRegisters, DRAM_LOG_ADDR_H);
2269 case DRAM_LOG_ADDR_L:
2270 return offsetof(SMU7_SoftRegisters, DRAM_LOG_ADDR_L);
2271 case DRAM_LOG_PHY_ADDR_H:
2272 return offsetof(SMU7_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
2273 case DRAM_LOG_PHY_ADDR_L:
2274 return offsetof(SMU7_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
2275 case DRAM_LOG_BUFF_SIZE:
2276 return offsetof(SMU7_SoftRegisters, DRAM_LOG_BUFF_SIZE);
2278 case SMU_Discrete_DpmTable:
2280 case LowSclkInterruptThreshold:
2281 return offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT);
2284 pr_debug("can't get the offset of type %x member %x\n", type, member);
2288 static uint32_t ci_get_mac_definition(uint32_t value)
2291 case SMU_MAX_LEVELS_GRAPHICS:
2292 return SMU7_MAX_LEVELS_GRAPHICS;
2293 case SMU_MAX_LEVELS_MEMORY:
2294 return SMU7_MAX_LEVELS_MEMORY;
2295 case SMU_MAX_LEVELS_LINK:
2296 return SMU7_MAX_LEVELS_LINK;
2297 case SMU_MAX_ENTRIES_SMIO:
2298 return SMU7_MAX_ENTRIES_SMIO;
2299 case SMU_MAX_LEVELS_VDDC:
2300 return SMU7_MAX_LEVELS_VDDC;
2301 case SMU_MAX_LEVELS_VDDCI:
2302 return SMU7_MAX_LEVELS_VDDCI;
2303 case SMU_MAX_LEVELS_MVDD:
2304 return SMU7_MAX_LEVELS_MVDD;
2307 pr_debug("can't get the mac of %x\n", value);
2311 static int ci_load_smc_ucode(struct pp_hwmgr *hwmgr)
2313 uint32_t byte_count, start_addr;
2317 struct cgs_firmware_info info = {0};
2319 cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
2321 hwmgr->is_kicker = info.is_kicker;
2322 byte_count = info.image_size;
2323 src = (uint8_t *)info.kptr;
2324 start_addr = info.ucode_start_address;
2326 if (byte_count > SMC_RAM_END) {
2327 pr_err("SMC address is beyond the SMC RAM area.\n");
2331 cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, start_addr);
2332 PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
2334 for (; byte_count >= 4; byte_count -= 4) {
2335 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
2336 cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
2339 PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
2341 if (0 != byte_count) {
2342 pr_err("SMC size must be divisible by 4\n");
2349 static int ci_upload_firmware(struct pp_hwmgr *hwmgr)
2351 if (ci_is_smc_ram_running(hwmgr)) {
2352 pr_info("smc is running, no need to load smc firmware\n");
2355 PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS,
2357 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_MISC_CNTL,
2360 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
2361 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
2362 return ci_load_smc_ucode(hwmgr);
2365 static int ci_process_firmware_header(struct pp_hwmgr *hwmgr)
2367 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2368 struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2374 if (ci_upload_firmware(hwmgr))
2377 result = ci_read_smc_sram_dword(hwmgr,
2378 SMU7_FIRMWARE_HEADER_LOCATION +
2379 offsetof(SMU7_Firmware_Header, DpmTable),
2383 ci_data->dpm_table_start = tmp;
2385 error |= (0 != result);
2387 result = ci_read_smc_sram_dword(hwmgr,
2388 SMU7_FIRMWARE_HEADER_LOCATION +
2389 offsetof(SMU7_Firmware_Header, SoftRegisters),
2393 data->soft_regs_start = tmp;
2394 ci_data->soft_regs_start = tmp;
2397 error |= (0 != result);
2399 result = ci_read_smc_sram_dword(hwmgr,
2400 SMU7_FIRMWARE_HEADER_LOCATION +
2401 offsetof(SMU7_Firmware_Header, mcRegisterTable),
2405 ci_data->mc_reg_table_start = tmp;
2407 result = ci_read_smc_sram_dword(hwmgr,
2408 SMU7_FIRMWARE_HEADER_LOCATION +
2409 offsetof(SMU7_Firmware_Header, FanTable),
2413 ci_data->fan_table_start = tmp;
2415 error |= (0 != result);
2417 result = ci_read_smc_sram_dword(hwmgr,
2418 SMU7_FIRMWARE_HEADER_LOCATION +
2419 offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
2423 ci_data->arb_table_start = tmp;
2425 error |= (0 != result);
2427 result = ci_read_smc_sram_dword(hwmgr,
2428 SMU7_FIRMWARE_HEADER_LOCATION +
2429 offsetof(SMU7_Firmware_Header, Version),
2433 hwmgr->microcode_version_info.SMC = tmp;
2435 error |= (0 != result);
2437 return error ? 1 : 0;
2440 static uint8_t ci_get_memory_modile_index(struct pp_hwmgr *hwmgr)
2442 return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
2445 static bool ci_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
2450 case mmMC_SEQ_RAS_TIMING:
2451 *out_reg = mmMC_SEQ_RAS_TIMING_LP;
2454 case mmMC_SEQ_DLL_STBY:
2455 *out_reg = mmMC_SEQ_DLL_STBY_LP;
2458 case mmMC_SEQ_G5PDX_CMD0:
2459 *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
2462 case mmMC_SEQ_G5PDX_CMD1:
2463 *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
2466 case mmMC_SEQ_G5PDX_CTRL:
2467 *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
2470 case mmMC_SEQ_CAS_TIMING:
2471 *out_reg = mmMC_SEQ_CAS_TIMING_LP;
2474 case mmMC_SEQ_MISC_TIMING:
2475 *out_reg = mmMC_SEQ_MISC_TIMING_LP;
2478 case mmMC_SEQ_MISC_TIMING2:
2479 *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
2482 case mmMC_SEQ_PMG_DVS_CMD:
2483 *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
2486 case mmMC_SEQ_PMG_DVS_CTL:
2487 *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
2490 case mmMC_SEQ_RD_CTL_D0:
2491 *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
2494 case mmMC_SEQ_RD_CTL_D1:
2495 *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
2498 case mmMC_SEQ_WR_CTL_D0:
2499 *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
2502 case mmMC_SEQ_WR_CTL_D1:
2503 *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
2506 case mmMC_PMG_CMD_EMRS:
2507 *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
2510 case mmMC_PMG_CMD_MRS:
2511 *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
2514 case mmMC_PMG_CMD_MRS1:
2515 *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
2518 case mmMC_SEQ_PMG_TIMING:
2519 *out_reg = mmMC_SEQ_PMG_TIMING_LP;
2522 case mmMC_PMG_CMD_MRS2:
2523 *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
2526 case mmMC_SEQ_WR_CTL_2:
2527 *out_reg = mmMC_SEQ_WR_CTL_2_LP;
2538 static int ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
2543 for (i = 0; i < table->last; i++) {
2544 table->mc_reg_address[i].s0 =
2545 ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
2546 ? address : table->mc_reg_address[i].s1;
2551 static int ci_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
2552 struct ci_mc_reg_table *ni_table)
2556 PP_ASSERT_WITH_CODE((table->last <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2557 "Invalid VramInfo table.", return -EINVAL);
2558 PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
2559 "Invalid VramInfo table.", return -EINVAL);
2561 for (i = 0; i < table->last; i++)
2562 ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
2564 ni_table->last = table->last;
2566 for (i = 0; i < table->num_entries; i++) {
2567 ni_table->mc_reg_table_entry[i].mclk_max =
2568 table->mc_reg_table_entry[i].mclk_max;
2569 for (j = 0; j < table->last; j++) {
2570 ni_table->mc_reg_table_entry[i].mc_data[j] =
2571 table->mc_reg_table_entry[i].mc_data[j];
2575 ni_table->num_entries = table->num_entries;
2580 static int ci_set_mc_special_registers(struct pp_hwmgr *hwmgr,
2581 struct ci_mc_reg_table *table)
2585 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2587 for (i = 0, j = table->last; i < table->last; i++) {
2588 PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2589 "Invalid VramInfo table.", return -EINVAL);
2591 switch (table->mc_reg_address[i].s1) {
2593 case mmMC_SEQ_MISC1:
2594 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
2595 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
2596 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
2597 for (k = 0; k < table->num_entries; k++) {
2598 table->mc_reg_table_entry[k].mc_data[j] =
2599 ((temp_reg & 0xffff0000)) |
2600 ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
2603 PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2604 "Invalid VramInfo table.", return -EINVAL);
2606 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
2607 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
2608 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
2609 for (k = 0; k < table->num_entries; k++) {
2610 table->mc_reg_table_entry[k].mc_data[j] =
2611 (temp_reg & 0xffff0000) |
2612 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2614 if (!data->is_memory_gddr5)
2615 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
2618 PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2619 "Invalid VramInfo table.", return -EINVAL);
2621 if (!data->is_memory_gddr5 && j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) {
2622 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
2623 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
2624 for (k = 0; k < table->num_entries; k++) {
2625 table->mc_reg_table_entry[k].mc_data[j] =
2626 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
2629 PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2630 "Invalid VramInfo table.", return -EINVAL);
2635 case mmMC_SEQ_RESERVE_M:
2636 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
2637 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
2638 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
2639 for (k = 0; k < table->num_entries; k++) {
2640 table->mc_reg_table_entry[k].mc_data[j] =
2641 (temp_reg & 0xffff0000) |
2642 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2645 PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2646 "Invalid VramInfo table.", return -EINVAL);
2660 static int ci_set_valid_flag(struct ci_mc_reg_table *table)
2664 for (i = 0; i < table->last; i++) {
2665 for (j = 1; j < table->num_entries; j++) {
2666 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
2667 table->mc_reg_table_entry[j].mc_data[i]) {
2668 table->validflag |= (1 << i);
2677 static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
2680 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2681 pp_atomctrl_mc_reg_table *table;
2682 struct ci_mc_reg_table *ni_table = &smu_data->mc_reg_table;
2683 uint8_t module_index = ci_get_memory_modile_index(hwmgr);
2685 table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
2690 /* Program additional LP registers that are no longer programmed by VBIOS */
2691 cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
2692 cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
2693 cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
2694 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
2695 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
2696 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
2697 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
2698 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
2699 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
2700 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
2701 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
2702 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
2703 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
2704 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
2705 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
2706 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
2707 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
2708 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
2709 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
2710 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
2712 memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
2714 result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
2717 result = ci_copy_vbios_smc_reg_table(table, ni_table);
2720 ci_set_s0_mc_reg_index(ni_table);
2721 result = ci_set_mc_special_registers(hwmgr, ni_table);
2725 ci_set_valid_flag(ni_table);
2732 static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
2734 return ci_is_smc_ram_running(hwmgr);
2737 static int ci_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
2738 struct amd_pp_profile *request)
2740 struct ci_smumgr *smu_data = (struct ci_smumgr *)
2741 (hwmgr->smu_backend);
2742 struct SMU7_Discrete_GraphicsLevel *levels =
2743 smu_data->smc_state_table.GraphicsLevel;
2744 uint32_t array = smu_data->dpm_table_start +
2745 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
2746 uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
2747 SMU7_MAX_LEVELS_GRAPHICS;
2750 for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
2751 levels[i].ActivityLevel =
2752 cpu_to_be16(request->activity_threshold);
2753 levels[i].EnabledForActivity = 1;
2754 levels[i].UpH = request->up_hyst;
2755 levels[i].DownH = request->down_hyst;
2758 return ci_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
2759 array_size, SMC_RAM_END);
2763 static int ci_smu_init(struct pp_hwmgr *hwmgr)
2766 struct ci_smumgr *ci_priv = NULL;
2768 ci_priv = kzalloc(sizeof(struct ci_smumgr), GFP_KERNEL);
2770 if (ci_priv == NULL)
2773 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
2774 ci_priv->activity_target[i] = 30;
2776 hwmgr->smu_backend = ci_priv;
2781 static int ci_smu_fini(struct pp_hwmgr *hwmgr)
2783 kfree(hwmgr->smu_backend);
2784 hwmgr->smu_backend = NULL;
2785 cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
2789 static int ci_start_smu(struct pp_hwmgr *hwmgr)
2794 const struct pp_smumgr_func ci_smu_funcs = {
2795 .smu_init = ci_smu_init,
2796 .smu_fini = ci_smu_fini,
2797 .start_smu = ci_start_smu,
2798 .check_fw_load_finish = NULL,
2799 .request_smu_load_fw = NULL,
2800 .request_smu_load_specific_fw = NULL,
2801 .send_msg_to_smc = ci_send_msg_to_smc,
2802 .send_msg_to_smc_with_parameter = ci_send_msg_to_smc_with_parameter,
2803 .download_pptable_settings = NULL,
2804 .upload_pptable_settings = NULL,
2805 .get_offsetof = ci_get_offsetof,
2806 .process_firmware_header = ci_process_firmware_header,
2807 .init_smc_table = ci_init_smc_table,
2808 .update_sclk_threshold = ci_update_sclk_threshold,
2809 .thermal_setup_fan_table = ci_thermal_setup_fan_table,
2810 .populate_all_graphic_levels = ci_populate_all_graphic_levels,
2811 .populate_all_memory_levels = ci_populate_all_memory_levels,
2812 .get_mac_definition = ci_get_mac_definition,
2813 .initialize_mc_reg_table = ci_initialize_mc_reg_table,
2814 .is_dpm_running = ci_is_dpm_running,
2815 .populate_requested_graphic_levels = ci_populate_requested_graphic_levels,