2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/delay.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
30 #include "amd_powerplay.h"
31 #include "vega10_smumgr.h"
32 #include "hardwaremanager.h"
33 #include "ppatomfwctrl.h"
34 #include "atomfirmware.h"
35 #include "cgs_common.h"
36 #include "vega10_powertune.h"
38 #include "smu9_driver_if.h"
39 #include "vega10_inc.h"
41 #include "pppcielanes.h"
42 #include "vega10_hwmgr.h"
43 #include "vega10_processpptables.h"
44 #include "vega10_pptable.h"
45 #include "vega10_thermal.h"
48 #include "amd_pcie_helpers.h"
49 #include "cgs_linux.h"
50 #include "ppinterrupt.h"
51 #include "pp_overdriver.h"
53 #define VOLTAGE_SCALE 4
54 #define VOLTAGE_VID_OFFSET_SCALE1 625
55 #define VOLTAGE_VID_OFFSET_SCALE2 100
57 #define HBM_MEMORY_CHANNEL_WIDTH 128
59 static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
61 #define MEM_FREQ_LOW_LATENCY 25000
62 #define MEM_FREQ_HIGH_LATENCY 80000
63 #define MEM_LATENCY_HIGH 245
64 #define MEM_LATENCY_LOW 35
65 #define MEM_LATENCY_ERR 0xFFFF
67 #define mmDF_CS_AON0_DramBaseAddress0 0x0044
68 #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
70 //DF_CS_AON0_DramBaseAddress0
71 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
72 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
73 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
74 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
75 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
76 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
77 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
78 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
79 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
80 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
81 static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
82 enum pp_clock_type type, uint32_t mask);
84 static const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic);
86 struct vega10_power_state *cast_phw_vega10_power_state(
87 struct pp_hw_power_state *hw_ps)
89 PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
90 "Invalid Powerstate Type!",
93 return (struct vega10_power_state *)hw_ps;
96 const struct vega10_power_state *cast_const_phw_vega10_power_state(
97 const struct pp_hw_power_state *hw_ps)
99 PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
100 "Invalid Powerstate Type!",
103 return (const struct vega10_power_state *)hw_ps;
106 static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr)
108 struct vega10_hwmgr *data =
109 (struct vega10_hwmgr *)(hwmgr->backend);
111 data->registry_data.sclk_dpm_key_disabled =
112 hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
113 data->registry_data.socclk_dpm_key_disabled =
114 hwmgr->feature_mask & PP_SOCCLK_DPM_MASK ? false : true;
115 data->registry_data.mclk_dpm_key_disabled =
116 hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
117 data->registry_data.pcie_dpm_key_disabled =
118 hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
120 data->registry_data.dcefclk_dpm_key_disabled =
121 hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK ? false : true;
123 if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) {
124 data->registry_data.power_containment_support = 1;
125 data->registry_data.enable_pkg_pwr_tracking_feature = 1;
126 data->registry_data.enable_tdc_limit_feature = 1;
129 data->registry_data.clock_stretcher_support =
130 hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK ? true : false;
132 data->registry_data.ulv_support =
133 hwmgr->feature_mask & PP_ULV_MASK ? true : false;
135 data->registry_data.sclk_deep_sleep_support =
136 hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK ? true : false;
138 data->registry_data.disable_water_mark = 0;
140 data->registry_data.fan_control_support = 1;
141 data->registry_data.thermal_support = 1;
142 data->registry_data.fw_ctf_enabled = 1;
144 data->registry_data.avfs_support = 1;
145 data->registry_data.led_dpm_enabled = 1;
147 data->registry_data.vr0hot_enabled = 1;
148 data->registry_data.vr1hot_enabled = 1;
149 data->registry_data.regulator_hot_gpio_support = 1;
151 data->registry_data.didt_support = 1;
152 if (data->registry_data.didt_support) {
153 data->registry_data.didt_mode = 6;
154 data->registry_data.sq_ramping_support = 1;
155 data->registry_data.db_ramping_support = 0;
156 data->registry_data.td_ramping_support = 0;
157 data->registry_data.tcp_ramping_support = 0;
158 data->registry_data.dbr_ramping_support = 0;
159 data->registry_data.edc_didt_support = 1;
160 data->registry_data.gc_didt_support = 0;
161 data->registry_data.psm_didt_support = 0;
164 data->display_voltage_mode = PPVEGA10_VEGA10DISPLAYVOLTAGEMODE_DFLT;
165 data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
166 data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
167 data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
168 data->disp_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
169 data->disp_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
170 data->disp_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
171 data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
172 data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
173 data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
174 data->phy_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
175 data->phy_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
176 data->phy_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
178 data->gfxclk_average_alpha = PPVEGA10_VEGA10GFXCLKAVERAGEALPHA_DFLT;
179 data->socclk_average_alpha = PPVEGA10_VEGA10SOCCLKAVERAGEALPHA_DFLT;
180 data->uclk_average_alpha = PPVEGA10_VEGA10UCLKCLKAVERAGEALPHA_DFLT;
181 data->gfx_activity_average_alpha = PPVEGA10_VEGA10GFXACTIVITYAVERAGEALPHA_DFLT;
184 static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
186 struct vega10_hwmgr *data =
187 (struct vega10_hwmgr *)(hwmgr->backend);
188 struct phm_ppt_v2_information *table_info =
189 (struct phm_ppt_v2_information *)hwmgr->pptable;
190 struct cgs_system_info sys_info = {0};
193 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
194 PHM_PlatformCaps_SclkDeepSleep);
196 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
197 PHM_PlatformCaps_DynamicPatchPowerState);
199 if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE)
200 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
201 PHM_PlatformCaps_ControlVDDCI);
203 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
204 PHM_PlatformCaps_EnableSMU7ThermalManagement);
206 sys_info.size = sizeof(struct cgs_system_info);
207 sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
208 result = cgs_query_system_info(hwmgr->device, &sys_info);
210 if (!result && (sys_info.value & AMD_PG_SUPPORT_UVD))
211 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
212 PHM_PlatformCaps_UVDPowerGating);
214 if (!result && (sys_info.value & AMD_PG_SUPPORT_VCE))
215 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
216 PHM_PlatformCaps_VCEPowerGating);
218 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
219 PHM_PlatformCaps_UnTabledHardwareInterface);
221 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
222 PHM_PlatformCaps_FanSpeedInTableIsRPM);
224 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
225 PHM_PlatformCaps_ODFuzzyFanControlSupport);
227 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
228 PHM_PlatformCaps_DynamicPowerManagement);
230 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
231 PHM_PlatformCaps_SMC);
233 /* power tune caps */
234 /* assume disabled */
235 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
236 PHM_PlatformCaps_PowerContainment);
237 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
238 PHM_PlatformCaps_DiDtSupport);
239 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
240 PHM_PlatformCaps_SQRamping);
241 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
242 PHM_PlatformCaps_DBRamping);
243 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
244 PHM_PlatformCaps_TDRamping);
245 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
246 PHM_PlatformCaps_TCPRamping);
247 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
248 PHM_PlatformCaps_DBRRamping);
249 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
250 PHM_PlatformCaps_DiDtEDCEnable);
251 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
252 PHM_PlatformCaps_GCEDC);
253 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
254 PHM_PlatformCaps_PSM);
256 if (data->registry_data.didt_support) {
257 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtSupport);
258 if (data->registry_data.sq_ramping_support)
259 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping);
260 if (data->registry_data.db_ramping_support)
261 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping);
262 if (data->registry_data.td_ramping_support)
263 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping);
264 if (data->registry_data.tcp_ramping_support)
265 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping);
266 if (data->registry_data.dbr_ramping_support)
267 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping);
268 if (data->registry_data.edc_didt_support)
269 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable);
270 if (data->registry_data.gc_didt_support)
271 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC);
272 if (data->registry_data.psm_didt_support)
273 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM);
276 if (data->registry_data.power_containment_support)
277 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
278 PHM_PlatformCaps_PowerContainment);
279 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
280 PHM_PlatformCaps_CAC);
282 if (table_info->tdp_table->usClockStretchAmount &&
283 data->registry_data.clock_stretcher_support)
284 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
285 PHM_PlatformCaps_ClockStretcher);
287 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
288 PHM_PlatformCaps_RegulatorHot);
289 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
290 PHM_PlatformCaps_AutomaticDCTransition);
292 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
293 PHM_PlatformCaps_UVDDPM);
294 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
295 PHM_PlatformCaps_VCEDPM);
300 static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
302 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
305 vega10_initialize_power_tune_defaults(hwmgr);
307 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
308 data->smu_features[i].smu_feature_id = 0xffff;
309 data->smu_features[i].smu_feature_bitmap = 1 << i;
310 data->smu_features[i].enabled = false;
311 data->smu_features[i].supported = false;
314 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
315 FEATURE_DPM_PREFETCHER_BIT;
316 data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
317 FEATURE_DPM_GFXCLK_BIT;
318 data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
319 FEATURE_DPM_UCLK_BIT;
320 data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
321 FEATURE_DPM_SOCCLK_BIT;
322 data->smu_features[GNLD_DPM_UVD].smu_feature_id =
324 data->smu_features[GNLD_DPM_VCE].smu_feature_id =
326 data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
327 FEATURE_DPM_MP0CLK_BIT;
328 data->smu_features[GNLD_DPM_LINK].smu_feature_id =
329 FEATURE_DPM_LINK_BIT;
330 data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
331 FEATURE_DPM_DCEFCLK_BIT;
332 data->smu_features[GNLD_ULV].smu_feature_id =
334 data->smu_features[GNLD_AVFS].smu_feature_id =
336 data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
337 FEATURE_DS_GFXCLK_BIT;
338 data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
339 FEATURE_DS_SOCCLK_BIT;
340 data->smu_features[GNLD_DS_LCLK].smu_feature_id =
342 data->smu_features[GNLD_PPT].smu_feature_id =
344 data->smu_features[GNLD_TDC].smu_feature_id =
346 data->smu_features[GNLD_THERMAL].smu_feature_id =
348 data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
349 FEATURE_GFX_PER_CU_CG_BIT;
350 data->smu_features[GNLD_RM].smu_feature_id =
352 data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
353 FEATURE_DS_DCEFCLK_BIT;
354 data->smu_features[GNLD_ACDC].smu_feature_id =
356 data->smu_features[GNLD_VR0HOT].smu_feature_id =
358 data->smu_features[GNLD_VR1HOT].smu_feature_id =
360 data->smu_features[GNLD_FW_CTF].smu_feature_id =
362 data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
363 FEATURE_LED_DISPLAY_BIT;
364 data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
365 FEATURE_FAN_CONTROL_BIT;
366 data->smu_features[GNLD_ACG].smu_feature_id = FEATURE_ACG_BIT;
367 data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT;
369 if (!data->registry_data.prefetcher_dpm_key_disabled)
370 data->smu_features[GNLD_DPM_PREFETCHER].supported = true;
372 if (!data->registry_data.sclk_dpm_key_disabled)
373 data->smu_features[GNLD_DPM_GFXCLK].supported = true;
375 if (!data->registry_data.mclk_dpm_key_disabled)
376 data->smu_features[GNLD_DPM_UCLK].supported = true;
378 if (!data->registry_data.socclk_dpm_key_disabled)
379 data->smu_features[GNLD_DPM_SOCCLK].supported = true;
381 if (PP_CAP(PHM_PlatformCaps_UVDDPM))
382 data->smu_features[GNLD_DPM_UVD].supported = true;
384 if (PP_CAP(PHM_PlatformCaps_VCEDPM))
385 data->smu_features[GNLD_DPM_VCE].supported = true;
387 if (!data->registry_data.pcie_dpm_key_disabled)
388 data->smu_features[GNLD_DPM_LINK].supported = true;
390 if (!data->registry_data.dcefclk_dpm_key_disabled)
391 data->smu_features[GNLD_DPM_DCEFCLK].supported = true;
393 if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep) &&
394 data->registry_data.sclk_deep_sleep_support) {
395 data->smu_features[GNLD_DS_GFXCLK].supported = true;
396 data->smu_features[GNLD_DS_SOCCLK].supported = true;
397 data->smu_features[GNLD_DS_LCLK].supported = true;
398 data->smu_features[GNLD_DS_DCEFCLK].supported = true;
401 if (data->registry_data.enable_pkg_pwr_tracking_feature)
402 data->smu_features[GNLD_PPT].supported = true;
404 if (data->registry_data.enable_tdc_limit_feature)
405 data->smu_features[GNLD_TDC].supported = true;
407 if (data->registry_data.thermal_support)
408 data->smu_features[GNLD_THERMAL].supported = true;
410 if (data->registry_data.fan_control_support)
411 data->smu_features[GNLD_FAN_CONTROL].supported = true;
413 if (data->registry_data.fw_ctf_enabled)
414 data->smu_features[GNLD_FW_CTF].supported = true;
416 if (data->registry_data.avfs_support)
417 data->smu_features[GNLD_AVFS].supported = true;
419 if (data->registry_data.led_dpm_enabled)
420 data->smu_features[GNLD_LED_DISPLAY].supported = true;
422 if (data->registry_data.vr1hot_enabled)
423 data->smu_features[GNLD_VR1HOT].supported = true;
425 if (data->registry_data.vr0hot_enabled)
426 data->smu_features[GNLD_VR0HOT].supported = true;
428 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
429 vega10_read_arg_from_smc(hwmgr, &(hwmgr->smu_version));
430 /* ACG firmware has major version 5 */
431 if ((hwmgr->smu_version & 0xff000000) == 0x5000000)
432 data->smu_features[GNLD_ACG].supported = true;
434 if (data->registry_data.didt_support)
435 data->smu_features[GNLD_DIDT].supported = true;
439 #ifdef PPLIB_VEGA10_EVV_SUPPORT
440 static int vega10_get_socclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
441 phm_ppt_v1_voltage_lookup_table *lookup_table,
442 uint16_t virtual_voltage_id, int32_t *socclk)
446 struct phm_ppt_v2_information *table_info =
447 (struct phm_ppt_v2_information *)(hwmgr->pptable);
449 PP_ASSERT_WITH_CODE(lookup_table->count != 0,
450 "Lookup table is empty",
453 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sclk */
454 for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
455 voltage_id = table_info->vdd_dep_on_socclk->entries[entry_id].vddInd;
456 if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
460 PP_ASSERT_WITH_CODE(entry_id < table_info->vdd_dep_on_socclk->count,
461 "Can't find requested voltage id in vdd_dep_on_socclk table!",
464 *socclk = table_info->vdd_dep_on_socclk->entries[entry_id].clk;
469 #define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01
471 * Get Leakage VDDC based on leakage ID.
473 * @param hwmgr the address of the powerplay hardware manager.
476 static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr)
478 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
483 struct phm_ppt_v2_information *table_info =
484 (struct phm_ppt_v2_information *)hwmgr->pptable;
485 struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table =
486 table_info->vdd_dep_on_socclk;
489 for (i = 0; i < VEGA10_MAX_LEAKAGE_COUNT; i++) {
490 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
492 if (!vega10_get_socclk_for_voltage_evv(hwmgr,
493 table_info->vddc_lookup_table, vv_id, &sclk)) {
494 if (PP_CAP(PHM_PlatformCaps_ClockStretcher)) {
495 for (j = 1; j < socclk_table->count; j++) {
496 if (socclk_table->entries[j].clk == sclk &&
497 socclk_table->entries[j].cks_enable == 0) {
504 PP_ASSERT_WITH_CODE(!atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
505 VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc),
506 "Error retrieving EVV voltage value!",
510 /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
511 PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0),
512 "Invalid VDDC value", result = -EINVAL;);
514 /* the voltage should not be zero nor equal to leakage ID */
515 if (vddc != 0 && vddc != vv_id) {
516 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100);
517 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
518 data->vddc_leakage.count++;
527 * Change virtual leakage voltage to actual value.
529 * @param hwmgr the address of the powerplay hardware manager.
530 * @param pointer to changing voltage
531 * @param pointer to leakage table
533 static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
534 uint16_t *voltage, struct vega10_leakage_voltage *leakage_table)
538 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
539 for (index = 0; index < leakage_table->count; index++) {
540 /* if this voltage matches a leakage voltage ID */
541 /* patch with actual leakage voltage */
542 if (leakage_table->leakage_id[index] == *voltage) {
543 *voltage = leakage_table->actual_voltage[index];
548 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
549 pr_info("Voltage value looks like a Leakage ID but it's not patched\n");
553 * Patch voltage lookup table by EVV leakages.
555 * @param hwmgr the address of the powerplay hardware manager.
556 * @param pointer to voltage lookup table
557 * @param pointer to leakage table
560 static int vega10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
561 phm_ppt_v1_voltage_lookup_table *lookup_table,
562 struct vega10_leakage_voltage *leakage_table)
566 for (i = 0; i < lookup_table->count; i++)
567 vega10_patch_with_vdd_leakage(hwmgr,
568 &lookup_table->entries[i].us_vdd, leakage_table);
573 static int vega10_patch_clock_voltage_limits_with_vddc_leakage(
574 struct pp_hwmgr *hwmgr, struct vega10_leakage_voltage *leakage_table,
577 vega10_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
583 static int vega10_patch_voltage_dependency_tables_with_lookup_table(
584 struct pp_hwmgr *hwmgr)
586 uint8_t entry_id, voltage_id;
588 struct phm_ppt_v2_information *table_info =
589 (struct phm_ppt_v2_information *)(hwmgr->pptable);
590 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
591 table_info->mm_dep_table;
592 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
593 table_info->vdd_dep_on_mclk;
595 for (i = 0; i < 6; i++) {
596 struct phm_ppt_v1_clock_voltage_dependency_table *vdt;
598 case 0: vdt = table_info->vdd_dep_on_socclk; break;
599 case 1: vdt = table_info->vdd_dep_on_sclk; break;
600 case 2: vdt = table_info->vdd_dep_on_dcefclk; break;
601 case 3: vdt = table_info->vdd_dep_on_pixclk; break;
602 case 4: vdt = table_info->vdd_dep_on_dispclk; break;
603 case 5: vdt = table_info->vdd_dep_on_phyclk; break;
606 for (entry_id = 0; entry_id < vdt->count; entry_id++) {
607 voltage_id = vdt->entries[entry_id].vddInd;
608 vdt->entries[entry_id].vddc =
609 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
613 for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
614 voltage_id = mm_table->entries[entry_id].vddcInd;
615 mm_table->entries[entry_id].vddc =
616 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
619 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
620 voltage_id = mclk_table->entries[entry_id].vddInd;
621 mclk_table->entries[entry_id].vddc =
622 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
623 voltage_id = mclk_table->entries[entry_id].vddciInd;
624 mclk_table->entries[entry_id].vddci =
625 table_info->vddci_lookup_table->entries[voltage_id].us_vdd;
626 voltage_id = mclk_table->entries[entry_id].mvddInd;
627 mclk_table->entries[entry_id].mvdd =
628 table_info->vddmem_lookup_table->entries[voltage_id].us_vdd;
636 static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr,
637 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
639 uint32_t table_size, i, j;
640 struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
642 PP_ASSERT_WITH_CODE(lookup_table && lookup_table->count,
643 "Lookup table is empty", return -EINVAL);
645 table_size = lookup_table->count;
647 /* Sorting voltages */
648 for (i = 0; i < table_size - 1; i++) {
649 for (j = i + 1; j > 0; j--) {
650 if (lookup_table->entries[j].us_vdd <
651 lookup_table->entries[j - 1].us_vdd) {
652 tmp_voltage_lookup_record = lookup_table->entries[j - 1];
653 lookup_table->entries[j - 1] = lookup_table->entries[j];
654 lookup_table->entries[j] = tmp_voltage_lookup_record;
662 static int vega10_complete_dependency_tables(struct pp_hwmgr *hwmgr)
666 struct phm_ppt_v2_information *table_info =
667 (struct phm_ppt_v2_information *)(hwmgr->pptable);
668 #ifdef PPLIB_VEGA10_EVV_SUPPORT
669 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
671 tmp_result = vega10_patch_lookup_table_with_leakage(hwmgr,
672 table_info->vddc_lookup_table, &(data->vddc_leakage));
676 tmp_result = vega10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
677 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
682 tmp_result = vega10_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
686 tmp_result = vega10_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
693 static int vega10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
695 struct phm_ppt_v2_information *table_info =
696 (struct phm_ppt_v2_information *)(hwmgr->pptable);
697 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
698 table_info->vdd_dep_on_socclk;
699 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
700 table_info->vdd_dep_on_mclk;
702 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table,
703 "VDD dependency on SCLK table is missing. This table is mandatory", return -EINVAL);
704 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
705 "VDD dependency on SCLK table is empty. This table is mandatory", return -EINVAL);
707 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table,
708 "VDD dependency on MCLK table is missing. This table is mandatory", return -EINVAL);
709 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
710 "VDD dependency on MCLK table is empty. This table is mandatory", return -EINVAL);
712 table_info->max_clock_voltage_on_ac.sclk =
713 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
714 table_info->max_clock_voltage_on_ac.mclk =
715 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
716 table_info->max_clock_voltage_on_ac.vddc =
717 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
718 table_info->max_clock_voltage_on_ac.vddci =
719 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
721 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
722 table_info->max_clock_voltage_on_ac.sclk;
723 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
724 table_info->max_clock_voltage_on_ac.mclk;
725 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
726 table_info->max_clock_voltage_on_ac.vddc;
727 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
728 table_info->max_clock_voltage_on_ac.vddci;
733 static int vega10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
735 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
736 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
738 kfree(hwmgr->backend);
739 hwmgr->backend = NULL;
744 static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
747 struct vega10_hwmgr *data;
748 uint32_t config_telemetry = 0;
749 struct pp_atomfwctrl_voltage_table vol_table;
750 struct cgs_system_info sys_info = {0};
753 data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL);
757 hwmgr->backend = data;
759 vega10_set_default_registry_data(hwmgr);
761 data->disable_dpm_mask = 0xff;
762 data->workload_mask = 0xff;
764 /* need to set voltage control types before EVV patching */
765 data->vddc_control = VEGA10_VOLTAGE_CONTROL_NONE;
766 data->mvdd_control = VEGA10_VOLTAGE_CONTROL_NONE;
767 data->vddci_control = VEGA10_VOLTAGE_CONTROL_NONE;
770 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
771 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
772 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
773 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2,
775 config_telemetry = ((vol_table.telemetry_slope << 8) & 0xff00) |
776 (vol_table.telemetry_offset & 0xff);
777 data->vddc_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
780 kfree(hwmgr->backend);
781 hwmgr->backend = NULL;
782 PP_ASSERT_WITH_CODE(false,
783 "VDDCR_SOC is not SVID2!",
788 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
789 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) {
790 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
791 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2,
794 ((vol_table.telemetry_slope << 24) & 0xff000000) |
795 ((vol_table.telemetry_offset << 16) & 0xff0000);
796 data->mvdd_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
801 if (PP_CAP(PHM_PlatformCaps_ControlVDDCI)) {
802 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
803 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
804 data->vddci_control = VEGA10_VOLTAGE_CONTROL_BY_GPIO;
807 data->config_telemetry = config_telemetry;
809 vega10_set_features_platform_caps(hwmgr);
811 vega10_init_dpm_defaults(hwmgr);
813 #ifdef PPLIB_VEGA10_EVV_SUPPORT
814 /* Get leakage voltage based on leakage ID. */
815 PP_ASSERT_WITH_CODE(!vega10_get_evv_voltages(hwmgr),
816 "Get EVV Voltage Failed. Abort Driver loading!",
820 /* Patch our voltage dependency table with actual leakage voltage
821 * We need to perform leakage translation before it's used by other functions
823 vega10_complete_dependency_tables(hwmgr);
825 /* Parse pptable data read from VBIOS */
826 vega10_set_private_data_based_on_pptable(hwmgr);
828 data->is_tlu_enabled = false;
830 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
831 VEGA10_MAX_HARDWARE_POWERLEVELS;
832 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
833 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
835 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
836 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
837 hwmgr->platform_descriptor.clockStep.engineClock = 500;
838 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
840 sys_info.size = sizeof(struct cgs_system_info);
841 sys_info.info_id = CGS_SYSTEM_INFO_GFX_CU_INFO;
842 result = cgs_query_system_info(hwmgr->device, &sys_info);
843 data->total_active_cus = sys_info.value;
844 /* Setup default Overdrive Fan control settings */
845 data->odn_fan_table.target_fan_speed =
846 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM;
847 data->odn_fan_table.target_temperature =
848 hwmgr->thermal_controller.
849 advanceFanControlParameters.ucTargetTemperature;
850 data->odn_fan_table.min_performance_clock =
851 hwmgr->thermal_controller.advanceFanControlParameters.
852 ulMinFanSCLKAcousticLimit;
853 data->odn_fan_table.min_fan_limit =
854 hwmgr->thermal_controller.
855 advanceFanControlParameters.usFanPWMMinLimit *
856 hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
858 reg = soc15_get_register_offset(DF_HWID, 0,
859 mmDF_CS_AON0_DramBaseAddress0_BASE_IDX,
860 mmDF_CS_AON0_DramBaseAddress0);
861 data->mem_channels = (cgs_read_register(hwmgr->device, reg) &
862 DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
863 DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
864 PP_ASSERT_WITH_CODE(data->mem_channels < ARRAY_SIZE(channel_number),
865 "Mem Channel Index Exceeded maximum!",
871 static int vega10_init_sclk_threshold(struct pp_hwmgr *hwmgr)
873 struct vega10_hwmgr *data =
874 (struct vega10_hwmgr *)(hwmgr->backend);
876 data->low_sclk_interrupt_threshold = 0;
881 static int vega10_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
883 struct vega10_hwmgr *data =
884 (struct vega10_hwmgr *)(hwmgr->backend);
885 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
887 struct pp_atomfwctrl_voltage_table table;
893 ret = pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_LEDDPM,
894 VOLTAGE_OBJ_GPIO_LUT, &table);
897 tmp = table.mask_low;
898 for (i = 0, j = 0; i < 32; i++) {
900 mask |= (uint32_t)(i << (8 * j));
908 pp_table->LedPin0 = (uint8_t)(mask & 0xff);
909 pp_table->LedPin1 = (uint8_t)((mask >> 8) & 0xff);
910 pp_table->LedPin2 = (uint8_t)((mask >> 16) & 0xff);
914 static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
916 PP_ASSERT_WITH_CODE(!vega10_init_sclk_threshold(hwmgr),
917 "Failed to init sclk threshold!",
920 PP_ASSERT_WITH_CODE(!vega10_setup_dpm_led_config(hwmgr),
921 "Failed to set up led dpm config!",
927 static bool vega10_is_dpm_running(struct pp_hwmgr *hwmgr)
929 uint32_t features_enabled;
931 if (!vega10_get_smc_features(hwmgr, &features_enabled)) {
932 if (features_enabled & SMC_DPM_FEATURES)
939 * Remove repeated voltage values and create table with unique values.
941 * @param hwmgr the address of the powerplay hardware manager.
942 * @param vol_table the pointer to changing voltage table
943 * @return 0 in success
946 static int vega10_trim_voltage_table(struct pp_hwmgr *hwmgr,
947 struct pp_atomfwctrl_voltage_table *vol_table)
952 struct pp_atomfwctrl_voltage_table *table;
954 PP_ASSERT_WITH_CODE(vol_table,
955 "Voltage Table empty.", return -EINVAL);
956 table = kzalloc(sizeof(struct pp_atomfwctrl_voltage_table),
962 table->mask_low = vol_table->mask_low;
963 table->phase_delay = vol_table->phase_delay;
965 for (i = 0; i < vol_table->count; i++) {
966 vvalue = vol_table->entries[i].value;
969 for (j = 0; j < table->count; j++) {
970 if (vvalue == table->entries[j].value) {
977 table->entries[table->count].value = vvalue;
978 table->entries[table->count].smio_low =
979 vol_table->entries[i].smio_low;
984 memcpy(vol_table, table, sizeof(struct pp_atomfwctrl_voltage_table));
990 static int vega10_get_mvdd_voltage_table(struct pp_hwmgr *hwmgr,
991 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
992 struct pp_atomfwctrl_voltage_table *vol_table)
996 PP_ASSERT_WITH_CODE(dep_table->count,
997 "Voltage Dependency Table empty.",
1000 vol_table->mask_low = 0;
1001 vol_table->phase_delay = 0;
1002 vol_table->count = dep_table->count;
1004 for (i = 0; i < vol_table->count; i++) {
1005 vol_table->entries[i].value = dep_table->entries[i].mvdd;
1006 vol_table->entries[i].smio_low = 0;
1009 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr,
1011 "Failed to trim MVDD Table!",
1017 static int vega10_get_vddci_voltage_table(struct pp_hwmgr *hwmgr,
1018 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1019 struct pp_atomfwctrl_voltage_table *vol_table)
1023 PP_ASSERT_WITH_CODE(dep_table->count,
1024 "Voltage Dependency Table empty.",
1027 vol_table->mask_low = 0;
1028 vol_table->phase_delay = 0;
1029 vol_table->count = dep_table->count;
1031 for (i = 0; i < dep_table->count; i++) {
1032 vol_table->entries[i].value = dep_table->entries[i].vddci;
1033 vol_table->entries[i].smio_low = 0;
1036 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr, vol_table),
1037 "Failed to trim VDDCI table.",
1043 static int vega10_get_vdd_voltage_table(struct pp_hwmgr *hwmgr,
1044 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1045 struct pp_atomfwctrl_voltage_table *vol_table)
1049 PP_ASSERT_WITH_CODE(dep_table->count,
1050 "Voltage Dependency Table empty.",
1053 vol_table->mask_low = 0;
1054 vol_table->phase_delay = 0;
1055 vol_table->count = dep_table->count;
1057 for (i = 0; i < vol_table->count; i++) {
1058 vol_table->entries[i].value = dep_table->entries[i].vddc;
1059 vol_table->entries[i].smio_low = 0;
1065 /* ---- Voltage Tables ----
1066 * If the voltage table would be bigger than
1067 * what will fit into the state table on
1068 * the SMC keep only the higher entries.
1070 static void vega10_trim_voltage_table_to_fit_state_table(
1071 struct pp_hwmgr *hwmgr,
1072 uint32_t max_vol_steps,
1073 struct pp_atomfwctrl_voltage_table *vol_table)
1075 unsigned int i, diff;
1077 if (vol_table->count <= max_vol_steps)
1080 diff = vol_table->count - max_vol_steps;
1082 for (i = 0; i < max_vol_steps; i++)
1083 vol_table->entries[i] = vol_table->entries[i + diff];
1085 vol_table->count = max_vol_steps;
1089 * Create Voltage Tables.
1091 * @param hwmgr the address of the powerplay hardware manager.
1094 static int vega10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
1096 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
1097 struct phm_ppt_v2_information *table_info =
1098 (struct phm_ppt_v2_information *)hwmgr->pptable;
1101 if (data->mvdd_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1102 data->mvdd_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1103 result = vega10_get_mvdd_voltage_table(hwmgr,
1104 table_info->vdd_dep_on_mclk,
1105 &(data->mvdd_voltage_table));
1106 PP_ASSERT_WITH_CODE(!result,
1107 "Failed to retrieve MVDDC table!",
1111 if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1112 result = vega10_get_vddci_voltage_table(hwmgr,
1113 table_info->vdd_dep_on_mclk,
1114 &(data->vddci_voltage_table));
1115 PP_ASSERT_WITH_CODE(!result,
1116 "Failed to retrieve VDDCI_MEM table!",
1120 if (data->vddc_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1121 data->vddc_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1122 result = vega10_get_vdd_voltage_table(hwmgr,
1123 table_info->vdd_dep_on_sclk,
1124 &(data->vddc_voltage_table));
1125 PP_ASSERT_WITH_CODE(!result,
1126 "Failed to retrieve VDDCR_SOC table!",
1130 PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 16,
1131 "Too many voltage values for VDDC. Trimming to fit state table.",
1132 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1133 16, &(data->vddc_voltage_table)));
1135 PP_ASSERT_WITH_CODE(data->vddci_voltage_table.count <= 16,
1136 "Too many voltage values for VDDCI. Trimming to fit state table.",
1137 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1138 16, &(data->vddci_voltage_table)));
1140 PP_ASSERT_WITH_CODE(data->mvdd_voltage_table.count <= 16,
1141 "Too many voltage values for MVDD. Trimming to fit state table.",
1142 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1143 16, &(data->mvdd_voltage_table)));
1150 * @fn vega10_init_dpm_state
1151 * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
1153 * @param dpm_state - the address of the DPM Table to initiailize.
1156 static void vega10_init_dpm_state(struct vega10_dpm_state *dpm_state)
1158 dpm_state->soft_min_level = 0xff;
1159 dpm_state->soft_max_level = 0xff;
1160 dpm_state->hard_min_level = 0xff;
1161 dpm_state->hard_max_level = 0xff;
1164 static void vega10_setup_default_single_dpm_table(struct pp_hwmgr *hwmgr,
1165 struct vega10_single_dpm_table *dpm_table,
1166 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
1170 dpm_table->count = 0;
1172 for (i = 0; i < dep_table->count; i++) {
1173 if (i == 0 || dpm_table->dpm_levels[dpm_table->count - 1].value <=
1174 dep_table->entries[i].clk) {
1175 dpm_table->dpm_levels[dpm_table->count].value =
1176 dep_table->entries[i].clk;
1177 dpm_table->dpm_levels[dpm_table->count].enabled = true;
1182 static int vega10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
1184 struct vega10_hwmgr *data =
1185 (struct vega10_hwmgr *)(hwmgr->backend);
1186 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
1187 struct phm_ppt_v2_information *table_info =
1188 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1189 struct phm_ppt_v1_pcie_table *bios_pcie_table =
1190 table_info->pcie_table;
1193 PP_ASSERT_WITH_CODE(bios_pcie_table->count,
1194 "Incorrect number of PCIE States from VBIOS!",
1197 for (i = 0; i < NUM_LINK_LEVELS; i++) {
1198 if (data->registry_data.pcieSpeedOverride)
1199 pcie_table->pcie_gen[i] =
1200 data->registry_data.pcieSpeedOverride;
1202 pcie_table->pcie_gen[i] =
1203 bios_pcie_table->entries[i].gen_speed;
1205 if (data->registry_data.pcieLaneOverride)
1206 pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width(
1207 data->registry_data.pcieLaneOverride);
1209 pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width(
1210 bios_pcie_table->entries[i].lane_width);
1211 if (data->registry_data.pcieClockOverride)
1212 pcie_table->lclk[i] =
1213 data->registry_data.pcieClockOverride;
1215 pcie_table->lclk[i] =
1216 bios_pcie_table->entries[i].pcie_sclk;
1219 pcie_table->count = NUM_LINK_LEVELS;
1225 * This function is to initialize all DPM state tables
1226 * for SMU based on the dependency table.
1227 * Dynamic state patching function will then trim these
1228 * state tables to the allowed range based
1229 * on the power policy or external client requests,
1230 * such as UVD request, etc.
1232 static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
1234 struct vega10_hwmgr *data =
1235 (struct vega10_hwmgr *)(hwmgr->backend);
1236 struct phm_ppt_v2_information *table_info =
1237 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1238 struct vega10_single_dpm_table *dpm_table;
1241 struct phm_ppt_v1_clock_voltage_dependency_table *dep_soc_table =
1242 table_info->vdd_dep_on_socclk;
1243 struct phm_ppt_v1_clock_voltage_dependency_table *dep_gfx_table =
1244 table_info->vdd_dep_on_sclk;
1245 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
1246 table_info->vdd_dep_on_mclk;
1247 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_mm_table =
1248 table_info->mm_dep_table;
1249 struct phm_ppt_v1_clock_voltage_dependency_table *dep_dcef_table =
1250 table_info->vdd_dep_on_dcefclk;
1251 struct phm_ppt_v1_clock_voltage_dependency_table *dep_pix_table =
1252 table_info->vdd_dep_on_pixclk;
1253 struct phm_ppt_v1_clock_voltage_dependency_table *dep_disp_table =
1254 table_info->vdd_dep_on_dispclk;
1255 struct phm_ppt_v1_clock_voltage_dependency_table *dep_phy_table =
1256 table_info->vdd_dep_on_phyclk;
1258 PP_ASSERT_WITH_CODE(dep_soc_table,
1259 "SOCCLK dependency table is missing. This table is mandatory",
1261 PP_ASSERT_WITH_CODE(dep_soc_table->count >= 1,
1262 "SOCCLK dependency table is empty. This table is mandatory",
1265 PP_ASSERT_WITH_CODE(dep_gfx_table,
1266 "GFXCLK dependency table is missing. This table is mandatory",
1268 PP_ASSERT_WITH_CODE(dep_gfx_table->count >= 1,
1269 "GFXCLK dependency table is empty. This table is mandatory",
1272 PP_ASSERT_WITH_CODE(dep_mclk_table,
1273 "MCLK dependency table is missing. This table is mandatory",
1275 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
1276 "MCLK dependency table has to have is missing. This table is mandatory",
1279 /* Initialize Sclk DPM table based on allow Sclk values */
1280 dpm_table = &(data->dpm_table.soc_table);
1281 vega10_setup_default_single_dpm_table(hwmgr,
1285 vega10_init_dpm_state(&(dpm_table->dpm_state));
1287 dpm_table = &(data->dpm_table.gfx_table);
1288 vega10_setup_default_single_dpm_table(hwmgr,
1291 vega10_init_dpm_state(&(dpm_table->dpm_state));
1293 /* Initialize Mclk DPM table based on allow Mclk values */
1294 data->dpm_table.mem_table.count = 0;
1295 dpm_table = &(data->dpm_table.mem_table);
1296 vega10_setup_default_single_dpm_table(hwmgr,
1299 vega10_init_dpm_state(&(dpm_table->dpm_state));
1301 data->dpm_table.eclk_table.count = 0;
1302 dpm_table = &(data->dpm_table.eclk_table);
1303 for (i = 0; i < dep_mm_table->count; i++) {
1304 if (i == 0 || dpm_table->dpm_levels
1305 [dpm_table->count - 1].value <=
1306 dep_mm_table->entries[i].eclk) {
1307 dpm_table->dpm_levels[dpm_table->count].value =
1308 dep_mm_table->entries[i].eclk;
1309 dpm_table->dpm_levels[dpm_table->count].enabled =
1310 (i == 0) ? true : false;
1314 vega10_init_dpm_state(&(dpm_table->dpm_state));
1316 data->dpm_table.vclk_table.count = 0;
1317 data->dpm_table.dclk_table.count = 0;
1318 dpm_table = &(data->dpm_table.vclk_table);
1319 for (i = 0; i < dep_mm_table->count; i++) {
1320 if (i == 0 || dpm_table->dpm_levels
1321 [dpm_table->count - 1].value <=
1322 dep_mm_table->entries[i].vclk) {
1323 dpm_table->dpm_levels[dpm_table->count].value =
1324 dep_mm_table->entries[i].vclk;
1325 dpm_table->dpm_levels[dpm_table->count].enabled =
1326 (i == 0) ? true : false;
1330 vega10_init_dpm_state(&(dpm_table->dpm_state));
1332 dpm_table = &(data->dpm_table.dclk_table);
1333 for (i = 0; i < dep_mm_table->count; i++) {
1334 if (i == 0 || dpm_table->dpm_levels
1335 [dpm_table->count - 1].value <=
1336 dep_mm_table->entries[i].dclk) {
1337 dpm_table->dpm_levels[dpm_table->count].value =
1338 dep_mm_table->entries[i].dclk;
1339 dpm_table->dpm_levels[dpm_table->count].enabled =
1340 (i == 0) ? true : false;
1344 vega10_init_dpm_state(&(dpm_table->dpm_state));
1346 /* Assume there is no headless Vega10 for now */
1347 dpm_table = &(data->dpm_table.dcef_table);
1348 vega10_setup_default_single_dpm_table(hwmgr,
1352 vega10_init_dpm_state(&(dpm_table->dpm_state));
1354 dpm_table = &(data->dpm_table.pixel_table);
1355 vega10_setup_default_single_dpm_table(hwmgr,
1359 vega10_init_dpm_state(&(dpm_table->dpm_state));
1361 dpm_table = &(data->dpm_table.display_table);
1362 vega10_setup_default_single_dpm_table(hwmgr,
1366 vega10_init_dpm_state(&(dpm_table->dpm_state));
1368 dpm_table = &(data->dpm_table.phy_table);
1369 vega10_setup_default_single_dpm_table(hwmgr,
1373 vega10_init_dpm_state(&(dpm_table->dpm_state));
1375 vega10_setup_default_pcie_table(hwmgr);
1377 /* save a copy of the default DPM table */
1378 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
1379 sizeof(struct vega10_dpm_table));
1381 if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) ||
1382 PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) {
1383 data->odn_dpm_table.odn_core_clock_dpm_levels.
1384 number_of_performance_levels = data->dpm_table.gfx_table.count;
1385 for (i = 0; i < data->dpm_table.gfx_table.count; i++) {
1386 data->odn_dpm_table.odn_core_clock_dpm_levels.
1387 performance_level_entries[i].clock =
1388 data->dpm_table.gfx_table.dpm_levels[i].value;
1389 data->odn_dpm_table.odn_core_clock_dpm_levels.
1390 performance_level_entries[i].enabled = true;
1393 data->odn_dpm_table.vdd_dependency_on_sclk.count =
1394 dep_gfx_table->count;
1395 for (i = 0; i < dep_gfx_table->count; i++) {
1396 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].clk =
1397 dep_gfx_table->entries[i].clk;
1398 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].vddInd =
1399 dep_gfx_table->entries[i].vddInd;
1400 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_enable =
1401 dep_gfx_table->entries[i].cks_enable;
1402 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_voffset =
1403 dep_gfx_table->entries[i].cks_voffset;
1406 data->odn_dpm_table.odn_memory_clock_dpm_levels.
1407 number_of_performance_levels = data->dpm_table.mem_table.count;
1408 for (i = 0; i < data->dpm_table.mem_table.count; i++) {
1409 data->odn_dpm_table.odn_memory_clock_dpm_levels.
1410 performance_level_entries[i].clock =
1411 data->dpm_table.mem_table.dpm_levels[i].value;
1412 data->odn_dpm_table.odn_memory_clock_dpm_levels.
1413 performance_level_entries[i].enabled = true;
1416 data->odn_dpm_table.vdd_dependency_on_mclk.count = dep_mclk_table->count;
1417 for (i = 0; i < dep_mclk_table->count; i++) {
1418 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].clk =
1419 dep_mclk_table->entries[i].clk;
1420 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddInd =
1421 dep_mclk_table->entries[i].vddInd;
1422 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddci =
1423 dep_mclk_table->entries[i].vddci;
1431 * @fn vega10_populate_ulv_state
1432 * @brief Function to provide parameters for Utral Low Voltage state to SMC.
1434 * @param hwmgr - the address of the hardware manager.
1437 static int vega10_populate_ulv_state(struct pp_hwmgr *hwmgr)
1439 struct vega10_hwmgr *data =
1440 (struct vega10_hwmgr *)(hwmgr->backend);
1441 struct phm_ppt_v2_information *table_info =
1442 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1444 data->smc_state_table.pp_table.UlvOffsetVid =
1445 (uint8_t)table_info->us_ulv_voltage_offset;
1447 data->smc_state_table.pp_table.UlvSmnclkDid =
1448 (uint8_t)(table_info->us_ulv_smnclk_did);
1449 data->smc_state_table.pp_table.UlvMp1clkDid =
1450 (uint8_t)(table_info->us_ulv_mp1clk_did);
1451 data->smc_state_table.pp_table.UlvGfxclkBypass =
1452 (uint8_t)(table_info->us_ulv_gfxclk_bypass);
1453 data->smc_state_table.pp_table.UlvPhaseSheddingPsi0 =
1454 (uint8_t)(data->vddc_voltage_table.psi0_enable);
1455 data->smc_state_table.pp_table.UlvPhaseSheddingPsi1 =
1456 (uint8_t)(data->vddc_voltage_table.psi1_enable);
1461 static int vega10_populate_single_lclk_level(struct pp_hwmgr *hwmgr,
1462 uint32_t lclock, uint8_t *curr_lclk_did)
1464 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1466 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1468 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1470 "Failed to get LCLK clock settings from VBIOS!",
1473 *curr_lclk_did = dividers.ulDid;
1478 static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
1481 struct vega10_hwmgr *data =
1482 (struct vega10_hwmgr *)(hwmgr->backend);
1483 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1484 struct vega10_pcie_table *pcie_table =
1485 &(data->dpm_table.pcie_table);
1488 for (i = 0; i < pcie_table->count; i++) {
1489 pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[i];
1490 pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[i];
1492 result = vega10_populate_single_lclk_level(hwmgr,
1493 pcie_table->lclk[i], &(pp_table->LclkDid[i]));
1495 pr_info("Populate LClock Level %d Failed!\n", i);
1501 while (i < NUM_LINK_LEVELS) {
1502 pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[j];
1503 pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[j];
1505 result = vega10_populate_single_lclk_level(hwmgr,
1506 pcie_table->lclk[j], &(pp_table->LclkDid[i]));
1508 pr_info("Populate LClock Level %d Failed!\n", i);
1518 * Populates single SMC GFXSCLK structure using the provided engine clock
1520 * @param hwmgr the address of the hardware manager
1521 * @param gfx_clock the GFX clock to use to populate the structure.
1522 * @param current_gfxclk_level location in PPTable for the SMC GFXCLK structure.
1525 static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
1526 uint32_t gfx_clock, PllSetting_t *current_gfxclk_level,
1529 struct phm_ppt_v2_information *table_info =
1530 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1531 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk =
1532 table_info->vdd_dep_on_sclk;
1533 struct vega10_hwmgr *data =
1534 (struct vega10_hwmgr *)(hwmgr->backend);
1535 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1536 uint32_t gfx_max_clock =
1537 hwmgr->platform_descriptor.overdriveLimit.engineClock;
1540 if (data->apply_overdrive_next_settings_mask &
1541 DPMTABLE_OD_UPDATE_VDDC)
1542 dep_on_sclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1543 &(data->odn_dpm_table.vdd_dependency_on_sclk);
1545 PP_ASSERT_WITH_CODE(dep_on_sclk,
1546 "Invalid SOC_VDD-GFX_CLK Dependency Table!",
1549 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
1550 gfx_clock = gfx_clock > gfx_max_clock ? gfx_max_clock : gfx_clock;
1552 for (i = 0; i < dep_on_sclk->count; i++) {
1553 if (dep_on_sclk->entries[i].clk == gfx_clock)
1556 PP_ASSERT_WITH_CODE(dep_on_sclk->count > i,
1557 "Cannot find gfx_clk in SOC_VDD-GFX_CLK!",
1561 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1562 COMPUTE_GPUCLK_INPUT_FLAG_GFXCLK,
1563 gfx_clock, ÷rs),
1564 "Failed to get GFX Clock settings from VBIOS!",
1567 /* Feedback Multiplier: bit 0:8 int, bit 15:12 post_div, bit 31:16 frac */
1568 current_gfxclk_level->FbMult =
1569 cpu_to_le32(dividers.ulPll_fb_mult);
1570 /* Spread FB Multiplier bit: bit 0:8 int, bit 31:16 frac */
1571 current_gfxclk_level->SsOn = dividers.ucPll_ss_enable;
1572 current_gfxclk_level->SsFbMult =
1573 cpu_to_le32(dividers.ulPll_ss_fbsmult);
1574 current_gfxclk_level->SsSlewFrac =
1575 cpu_to_le16(dividers.usPll_ss_slew_frac);
1576 current_gfxclk_level->Did = (uint8_t)(dividers.ulDid);
1578 *acg_freq = gfx_clock / 100; /* 100 Khz to Mhz conversion */
1584 * @brief Populates single SMC SOCCLK structure using the provided clock.
1586 * @param hwmgr - the address of the hardware manager.
1587 * @param soc_clock - the SOC clock to use to populate the structure.
1588 * @param current_socclk_level - location in PPTable for the SMC SOCCLK structure.
1589 * @return 0 on success..
1591 static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
1592 uint32_t soc_clock, uint8_t *current_soc_did,
1593 uint8_t *current_vol_index)
1595 struct phm_ppt_v2_information *table_info =
1596 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1597 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc =
1598 table_info->vdd_dep_on_socclk;
1599 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1602 PP_ASSERT_WITH_CODE(dep_on_soc,
1603 "Invalid SOC_VDD-SOC_CLK Dependency Table!",
1605 for (i = 0; i < dep_on_soc->count; i++) {
1606 if (dep_on_soc->entries[i].clk == soc_clock)
1609 PP_ASSERT_WITH_CODE(dep_on_soc->count > i,
1610 "Cannot find SOC_CLK in SOC_VDD-SOC_CLK Dependency Table",
1612 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1613 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1614 soc_clock, ÷rs),
1615 "Failed to get SOC Clock settings from VBIOS!",
1618 *current_soc_did = (uint8_t)dividers.ulDid;
1619 *current_vol_index = (uint8_t)(dep_on_soc->entries[i].vddInd);
1624 uint16_t vega10_locate_vddc_given_clock(struct pp_hwmgr *hwmgr,
1626 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
1630 for (i = 0; i < dep_table->count; i++) {
1631 if (dep_table->entries[i].clk == clk)
1632 return dep_table->entries[i].vddc;
1635 pr_info("[LocateVddcGivenClock] Cannot locate SOC Vddc for this clock!");
1640 * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
1642 * @param hwmgr the address of the hardware manager
1644 static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
1646 struct vega10_hwmgr *data =
1647 (struct vega10_hwmgr *)(hwmgr->backend);
1648 struct phm_ppt_v2_information *table_info =
1649 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1650 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
1651 table_info->vdd_dep_on_socclk;
1652 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1653 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
1657 for (i = 0; i < dpm_table->count; i++) {
1658 result = vega10_populate_single_gfx_level(hwmgr,
1659 dpm_table->dpm_levels[i].value,
1660 &(pp_table->GfxclkLevel[i]),
1661 &(pp_table->AcgFreqTable[i]));
1667 while (i < NUM_GFXCLK_DPM_LEVELS) {
1668 result = vega10_populate_single_gfx_level(hwmgr,
1669 dpm_table->dpm_levels[j].value,
1670 &(pp_table->GfxclkLevel[i]),
1671 &(pp_table->AcgFreqTable[i]));
1677 pp_table->GfxclkSlewRate =
1678 cpu_to_le16(table_info->us_gfxclk_slew_rate);
1680 dpm_table = &(data->dpm_table.soc_table);
1681 for (i = 0; i < dpm_table->count; i++) {
1682 pp_table->SocVid[i] =
1683 (uint8_t)convert_to_vid(
1684 vega10_locate_vddc_given_clock(hwmgr,
1685 dpm_table->dpm_levels[i].value,
1687 result = vega10_populate_single_soc_level(hwmgr,
1688 dpm_table->dpm_levels[i].value,
1689 &(pp_table->SocclkDid[i]),
1690 &(pp_table->SocDpmVoltageIndex[i]));
1696 while (i < NUM_SOCCLK_DPM_LEVELS) {
1697 pp_table->SocVid[i] = pp_table->SocVid[j];
1698 result = vega10_populate_single_soc_level(hwmgr,
1699 dpm_table->dpm_levels[j].value,
1700 &(pp_table->SocclkDid[i]),
1701 &(pp_table->SocDpmVoltageIndex[i]));
1711 * @brief Populates single SMC GFXCLK structure using the provided clock.
1713 * @param hwmgr - the address of the hardware manager.
1714 * @param mem_clock - the memory clock to use to populate the structure.
1715 * @return 0 on success..
1717 static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1718 uint32_t mem_clock, uint8_t *current_mem_vid,
1719 PllSetting_t *current_memclk_level, uint8_t *current_mem_soc_vind)
1721 struct vega10_hwmgr *data =
1722 (struct vega10_hwmgr *)(hwmgr->backend);
1723 struct phm_ppt_v2_information *table_info =
1724 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1725 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk =
1726 table_info->vdd_dep_on_mclk;
1727 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1728 uint32_t mem_max_clock =
1729 hwmgr->platform_descriptor.overdriveLimit.memoryClock;
1732 if (data->apply_overdrive_next_settings_mask &
1733 DPMTABLE_OD_UPDATE_VDDC)
1734 dep_on_mclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1735 &data->odn_dpm_table.vdd_dependency_on_mclk;
1737 PP_ASSERT_WITH_CODE(dep_on_mclk,
1738 "Invalid SOC_VDD-UCLK Dependency Table!",
1741 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
1742 mem_clock = mem_clock > mem_max_clock ? mem_max_clock : mem_clock;
1744 for (i = 0; i < dep_on_mclk->count; i++) {
1745 if (dep_on_mclk->entries[i].clk == mem_clock)
1748 PP_ASSERT_WITH_CODE(dep_on_mclk->count > i,
1749 "Cannot find UCLK in SOC_VDD-UCLK Dependency Table!",
1753 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1754 hwmgr, COMPUTE_GPUCLK_INPUT_FLAG_UCLK, mem_clock, ÷rs),
1755 "Failed to get UCLK settings from VBIOS!",
1759 (uint8_t)(convert_to_vid(dep_on_mclk->entries[i].mvdd));
1760 *current_mem_soc_vind =
1761 (uint8_t)(dep_on_mclk->entries[i].vddInd);
1762 current_memclk_level->FbMult = cpu_to_le32(dividers.ulPll_fb_mult);
1763 current_memclk_level->Did = (uint8_t)(dividers.ulDid);
1765 PP_ASSERT_WITH_CODE(current_memclk_level->Did >= 1,
1766 "Invalid Divider ID!",
1773 * @brief Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states.
1775 * @param pHwMgr - the address of the hardware manager.
1776 * @return PP_Result_OK on success.
1778 static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1780 struct vega10_hwmgr *data =
1781 (struct vega10_hwmgr *)(hwmgr->backend);
1782 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1783 struct vega10_single_dpm_table *dpm_table =
1784 &(data->dpm_table.mem_table);
1788 for (i = 0; i < dpm_table->count; i++) {
1789 result = vega10_populate_single_memory_level(hwmgr,
1790 dpm_table->dpm_levels[i].value,
1791 &(pp_table->MemVid[i]),
1792 &(pp_table->UclkLevel[i]),
1793 &(pp_table->MemSocVoltageIndex[i]));
1799 while (i < NUM_UCLK_DPM_LEVELS) {
1800 result = vega10_populate_single_memory_level(hwmgr,
1801 dpm_table->dpm_levels[j].value,
1802 &(pp_table->MemVid[i]),
1803 &(pp_table->UclkLevel[i]),
1804 &(pp_table->MemSocVoltageIndex[i]));
1810 pp_table->NumMemoryChannels = (uint16_t)(data->mem_channels);
1811 pp_table->MemoryChannelWidth =
1812 (uint16_t)(HBM_MEMORY_CHANNEL_WIDTH *
1813 channel_number[data->mem_channels]);
1815 pp_table->LowestUclkReservedForUlv =
1816 (uint8_t)(data->lowest_uclk_reserved_for_ulv);
1821 static int vega10_populate_single_display_type(struct pp_hwmgr *hwmgr,
1822 DSPCLK_e disp_clock)
1824 struct vega10_hwmgr *data =
1825 (struct vega10_hwmgr *)(hwmgr->backend);
1826 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1827 struct phm_ppt_v2_information *table_info =
1828 (struct phm_ppt_v2_information *)
1830 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
1832 uint16_t clk = 0, vddc = 0;
1835 switch (disp_clock) {
1836 case DSPCLK_DCEFCLK:
1837 dep_table = table_info->vdd_dep_on_dcefclk;
1839 case DSPCLK_DISPCLK:
1840 dep_table = table_info->vdd_dep_on_dispclk;
1843 dep_table = table_info->vdd_dep_on_pixclk;
1846 dep_table = table_info->vdd_dep_on_phyclk;
1852 PP_ASSERT_WITH_CODE(dep_table->count <= NUM_DSPCLK_LEVELS,
1853 "Number Of Entries Exceeded maximum!",
1856 for (i = 0; i < dep_table->count; i++) {
1857 clk = (uint16_t)(dep_table->entries[i].clk / 100);
1858 vddc = table_info->vddc_lookup_table->
1859 entries[dep_table->entries[i].vddInd].us_vdd;
1860 vid = (uint8_t)convert_to_vid(vddc);
1861 pp_table->DisplayClockTable[disp_clock][i].Freq =
1863 pp_table->DisplayClockTable[disp_clock][i].Vid =
1867 while (i < NUM_DSPCLK_LEVELS) {
1868 pp_table->DisplayClockTable[disp_clock][i].Freq =
1870 pp_table->DisplayClockTable[disp_clock][i].Vid =
1878 static int vega10_populate_all_display_clock_levels(struct pp_hwmgr *hwmgr)
1882 for (i = 0; i < DSPCLK_COUNT; i++) {
1883 PP_ASSERT_WITH_CODE(!vega10_populate_single_display_type(hwmgr, i),
1884 "Failed to populate Clock in DisplayClockTable!",
1891 static int vega10_populate_single_eclock_level(struct pp_hwmgr *hwmgr,
1892 uint32_t eclock, uint8_t *current_eclk_did,
1893 uint8_t *current_soc_vol)
1895 struct phm_ppt_v2_information *table_info =
1896 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1897 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
1898 table_info->mm_dep_table;
1899 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1902 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1903 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1905 "Failed to get ECLK clock settings from VBIOS!",
1908 *current_eclk_did = (uint8_t)dividers.ulDid;
1910 for (i = 0; i < dep_table->count; i++) {
1911 if (dep_table->entries[i].eclk == eclock)
1912 *current_soc_vol = dep_table->entries[i].vddcInd;
1918 static int vega10_populate_smc_vce_levels(struct pp_hwmgr *hwmgr)
1920 struct vega10_hwmgr *data =
1921 (struct vega10_hwmgr *)(hwmgr->backend);
1922 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1923 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.eclk_table);
1924 int result = -EINVAL;
1927 for (i = 0; i < dpm_table->count; i++) {
1928 result = vega10_populate_single_eclock_level(hwmgr,
1929 dpm_table->dpm_levels[i].value,
1930 &(pp_table->EclkDid[i]),
1931 &(pp_table->VceDpmVoltageIndex[i]));
1937 while (i < NUM_VCE_DPM_LEVELS) {
1938 result = vega10_populate_single_eclock_level(hwmgr,
1939 dpm_table->dpm_levels[j].value,
1940 &(pp_table->EclkDid[i]),
1941 &(pp_table->VceDpmVoltageIndex[i]));
1950 static int vega10_populate_single_vclock_level(struct pp_hwmgr *hwmgr,
1951 uint32_t vclock, uint8_t *current_vclk_did)
1953 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1955 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1956 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1958 "Failed to get VCLK clock settings from VBIOS!",
1961 *current_vclk_did = (uint8_t)dividers.ulDid;
1966 static int vega10_populate_single_dclock_level(struct pp_hwmgr *hwmgr,
1967 uint32_t dclock, uint8_t *current_dclk_did)
1969 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1971 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1972 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1974 "Failed to get DCLK clock settings from VBIOS!",
1977 *current_dclk_did = (uint8_t)dividers.ulDid;
1982 static int vega10_populate_smc_uvd_levels(struct pp_hwmgr *hwmgr)
1984 struct vega10_hwmgr *data =
1985 (struct vega10_hwmgr *)(hwmgr->backend);
1986 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1987 struct vega10_single_dpm_table *vclk_dpm_table =
1988 &(data->dpm_table.vclk_table);
1989 struct vega10_single_dpm_table *dclk_dpm_table =
1990 &(data->dpm_table.dclk_table);
1991 struct phm_ppt_v2_information *table_info =
1992 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1993 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
1994 table_info->mm_dep_table;
1995 int result = -EINVAL;
1998 for (i = 0; i < vclk_dpm_table->count; i++) {
1999 result = vega10_populate_single_vclock_level(hwmgr,
2000 vclk_dpm_table->dpm_levels[i].value,
2001 &(pp_table->VclkDid[i]));
2007 while (i < NUM_UVD_DPM_LEVELS) {
2008 result = vega10_populate_single_vclock_level(hwmgr,
2009 vclk_dpm_table->dpm_levels[j].value,
2010 &(pp_table->VclkDid[i]));
2016 for (i = 0; i < dclk_dpm_table->count; i++) {
2017 result = vega10_populate_single_dclock_level(hwmgr,
2018 dclk_dpm_table->dpm_levels[i].value,
2019 &(pp_table->DclkDid[i]));
2025 while (i < NUM_UVD_DPM_LEVELS) {
2026 result = vega10_populate_single_dclock_level(hwmgr,
2027 dclk_dpm_table->dpm_levels[j].value,
2028 &(pp_table->DclkDid[i]));
2034 for (i = 0; i < dep_table->count; i++) {
2035 if (dep_table->entries[i].vclk ==
2036 vclk_dpm_table->dpm_levels[i].value &&
2037 dep_table->entries[i].dclk ==
2038 dclk_dpm_table->dpm_levels[i].value)
2039 pp_table->UvdDpmVoltageIndex[i] =
2040 dep_table->entries[i].vddcInd;
2046 while (i < NUM_UVD_DPM_LEVELS) {
2047 pp_table->UvdDpmVoltageIndex[i] = dep_table->entries[j].vddcInd;
2054 static int vega10_populate_clock_stretcher_table(struct pp_hwmgr *hwmgr)
2056 struct vega10_hwmgr *data =
2057 (struct vega10_hwmgr *)(hwmgr->backend);
2058 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2059 struct phm_ppt_v2_information *table_info =
2060 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2061 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2062 table_info->vdd_dep_on_sclk;
2065 for (i = 0; i < dep_table->count; i++) {
2066 pp_table->CksEnable[i] = dep_table->entries[i].cks_enable;
2067 pp_table->CksVidOffset[i] = (uint8_t)(dep_table->entries[i].cks_voffset
2068 * VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
2074 static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
2076 struct vega10_hwmgr *data =
2077 (struct vega10_hwmgr *)(hwmgr->backend);
2078 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2079 struct phm_ppt_v2_information *table_info =
2080 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2081 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2082 table_info->vdd_dep_on_sclk;
2083 struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
2087 pp_table->MinVoltageVid = (uint8_t)0xff;
2088 pp_table->MaxVoltageVid = (uint8_t)0;
2090 if (data->smu_features[GNLD_AVFS].supported) {
2091 result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
2093 pp_table->MinVoltageVid = (uint8_t)
2094 convert_to_vid((uint16_t)(avfs_params.ulMinVddc));
2095 pp_table->MaxVoltageVid = (uint8_t)
2096 convert_to_vid((uint16_t)(avfs_params.ulMaxVddc));
2098 pp_table->AConstant[0] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant0);
2099 pp_table->AConstant[1] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant1);
2100 pp_table->AConstant[2] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant2);
2101 pp_table->DC_tol_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2102 pp_table->Platform_mean = cpu_to_le16(avfs_params.usMeanNsigmaPlatformMean);
2103 pp_table->Platform_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2104 pp_table->PSM_Age_CompFactor = cpu_to_le16(avfs_params.usPsmAgeComfactor);
2106 pp_table->BtcGbVdroopTableCksOff.a0 =
2107 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA0);
2108 pp_table->BtcGbVdroopTableCksOff.a0_shift = 20;
2109 pp_table->BtcGbVdroopTableCksOff.a1 =
2110 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA1);
2111 pp_table->BtcGbVdroopTableCksOff.a1_shift = 20;
2112 pp_table->BtcGbVdroopTableCksOff.a2 =
2113 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA2);
2114 pp_table->BtcGbVdroopTableCksOff.a2_shift = 20;
2116 pp_table->OverrideBtcGbCksOn = avfs_params.ucEnableGbVdroopTableCkson;
2117 pp_table->BtcGbVdroopTableCksOn.a0 =
2118 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA0);
2119 pp_table->BtcGbVdroopTableCksOn.a0_shift = 20;
2120 pp_table->BtcGbVdroopTableCksOn.a1 =
2121 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA1);
2122 pp_table->BtcGbVdroopTableCksOn.a1_shift = 20;
2123 pp_table->BtcGbVdroopTableCksOn.a2 =
2124 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA2);
2125 pp_table->BtcGbVdroopTableCksOn.a2_shift = 20;
2127 pp_table->AvfsGbCksOn.m1 =
2128 cpu_to_le32(avfs_params.ulGbFuseTableCksonM1);
2129 pp_table->AvfsGbCksOn.m2 =
2130 cpu_to_le32(avfs_params.ulGbFuseTableCksonM2);
2131 pp_table->AvfsGbCksOn.b =
2132 cpu_to_le32(avfs_params.ulGbFuseTableCksonB);
2133 pp_table->AvfsGbCksOn.m1_shift = 24;
2134 pp_table->AvfsGbCksOn.m2_shift = 12;
2135 pp_table->AvfsGbCksOn.b_shift = 0;
2137 pp_table->OverrideAvfsGbCksOn =
2138 avfs_params.ucEnableGbFuseTableCkson;
2139 pp_table->AvfsGbCksOff.m1 =
2140 cpu_to_le32(avfs_params.ulGbFuseTableCksoffM1);
2141 pp_table->AvfsGbCksOff.m2 =
2142 cpu_to_le32(avfs_params.ulGbFuseTableCksoffM2);
2143 pp_table->AvfsGbCksOff.b =
2144 cpu_to_le32(avfs_params.ulGbFuseTableCksoffB);
2145 pp_table->AvfsGbCksOff.m1_shift = 24;
2146 pp_table->AvfsGbCksOff.m2_shift = 12;
2147 pp_table->AvfsGbCksOff.b_shift = 0;
2149 for (i = 0; i < dep_table->count; i++)
2150 pp_table->StaticVoltageOffsetVid[i] =
2151 convert_to_vid((uint8_t)(dep_table->entries[i].sclk_offset));
2153 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2154 data->disp_clk_quad_eqn_a) &&
2155 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2156 data->disp_clk_quad_eqn_b)) {
2157 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2158 (int32_t)data->disp_clk_quad_eqn_a;
2159 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
2160 (int32_t)data->disp_clk_quad_eqn_b;
2161 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2162 (int32_t)data->disp_clk_quad_eqn_c;
2164 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2165 (int32_t)avfs_params.ulDispclk2GfxclkM1;
2166 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
2167 (int32_t)avfs_params.ulDispclk2GfxclkM2;
2168 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2169 (int32_t)avfs_params.ulDispclk2GfxclkB;
2172 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1_shift = 24;
2173 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2_shift = 12;
2174 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b_shift = 12;
2176 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2177 data->dcef_clk_quad_eqn_a) &&
2178 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2179 data->dcef_clk_quad_eqn_b)) {
2180 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2181 (int32_t)data->dcef_clk_quad_eqn_a;
2182 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
2183 (int32_t)data->dcef_clk_quad_eqn_b;
2184 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2185 (int32_t)data->dcef_clk_quad_eqn_c;
2187 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2188 (int32_t)avfs_params.ulDcefclk2GfxclkM1;
2189 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
2190 (int32_t)avfs_params.ulDcefclk2GfxclkM2;
2191 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2192 (int32_t)avfs_params.ulDcefclk2GfxclkB;
2195 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1_shift = 24;
2196 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2_shift = 12;
2197 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b_shift = 12;
2199 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2200 data->pixel_clk_quad_eqn_a) &&
2201 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2202 data->pixel_clk_quad_eqn_b)) {
2203 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2204 (int32_t)data->pixel_clk_quad_eqn_a;
2205 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
2206 (int32_t)data->pixel_clk_quad_eqn_b;
2207 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2208 (int32_t)data->pixel_clk_quad_eqn_c;
2210 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2211 (int32_t)avfs_params.ulPixelclk2GfxclkM1;
2212 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
2213 (int32_t)avfs_params.ulPixelclk2GfxclkM2;
2214 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2215 (int32_t)avfs_params.ulPixelclk2GfxclkB;
2218 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1_shift = 24;
2219 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2_shift = 12;
2220 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b_shift = 12;
2221 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2222 data->phy_clk_quad_eqn_a) &&
2223 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2224 data->phy_clk_quad_eqn_b)) {
2225 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2226 (int32_t)data->phy_clk_quad_eqn_a;
2227 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
2228 (int32_t)data->phy_clk_quad_eqn_b;
2229 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2230 (int32_t)data->phy_clk_quad_eqn_c;
2232 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2233 (int32_t)avfs_params.ulPhyclk2GfxclkM1;
2234 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
2235 (int32_t)avfs_params.ulPhyclk2GfxclkM2;
2236 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2237 (int32_t)avfs_params.ulPhyclk2GfxclkB;
2240 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1_shift = 24;
2241 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2_shift = 12;
2242 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b_shift = 12;
2244 pp_table->AcgBtcGbVdroopTable.a0 = avfs_params.ulAcgGbVdroopTableA0;
2245 pp_table->AcgBtcGbVdroopTable.a0_shift = 20;
2246 pp_table->AcgBtcGbVdroopTable.a1 = avfs_params.ulAcgGbVdroopTableA1;
2247 pp_table->AcgBtcGbVdroopTable.a1_shift = 20;
2248 pp_table->AcgBtcGbVdroopTable.a2 = avfs_params.ulAcgGbVdroopTableA2;
2249 pp_table->AcgBtcGbVdroopTable.a2_shift = 20;
2251 pp_table->AcgAvfsGb.m1 = avfs_params.ulAcgGbFuseTableM1;
2252 pp_table->AcgAvfsGb.m2 = avfs_params.ulAcgGbFuseTableM2;
2253 pp_table->AcgAvfsGb.b = avfs_params.ulAcgGbFuseTableB;
2254 pp_table->AcgAvfsGb.m1_shift = 0;
2255 pp_table->AcgAvfsGb.m2_shift = 0;
2256 pp_table->AcgAvfsGb.b_shift = 0;
2259 data->smu_features[GNLD_AVFS].supported = false;
2266 static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
2268 struct vega10_hwmgr *data =
2269 (struct vega10_hwmgr *)(hwmgr->backend);
2270 uint32_t agc_btc_response;
2272 if (data->smu_features[GNLD_ACG].supported) {
2273 if (0 == vega10_enable_smc_features(hwmgr, true,
2274 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap))
2275 data->smu_features[GNLD_DPM_PREFETCHER].enabled = true;
2277 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg);
2279 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc);
2280 vega10_read_arg_from_smc(hwmgr, &agc_btc_response);
2282 if (1 == agc_btc_response) {
2283 if (1 == data->acg_loop_state)
2284 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInClosedLoop);
2285 else if (2 == data->acg_loop_state)
2286 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInOpenLoop);
2287 if (0 == vega10_enable_smc_features(hwmgr, true,
2288 data->smu_features[GNLD_ACG].smu_feature_bitmap))
2289 data->smu_features[GNLD_ACG].enabled = true;
2291 pr_info("[ACG_Enable] ACG BTC Returned Failed Status!\n");
2292 data->smu_features[GNLD_ACG].enabled = false;
2299 static int vega10_acg_disable(struct pp_hwmgr *hwmgr)
2301 struct vega10_hwmgr *data =
2302 (struct vega10_hwmgr *)(hwmgr->backend);
2304 if (data->smu_features[GNLD_ACG].supported &&
2305 data->smu_features[GNLD_ACG].enabled)
2306 if (!vega10_enable_smc_features(hwmgr, false,
2307 data->smu_features[GNLD_ACG].smu_feature_bitmap))
2308 data->smu_features[GNLD_ACG].enabled = false;
2313 static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr)
2315 struct vega10_hwmgr *data =
2316 (struct vega10_hwmgr *)(hwmgr->backend);
2317 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2318 struct pp_atomfwctrl_gpio_parameters gpio_params = {0};
2321 result = pp_atomfwctrl_get_gpio_information(hwmgr, &gpio_params);
2323 if (PP_CAP(PHM_PlatformCaps_RegulatorHot) &&
2324 data->registry_data.regulator_hot_gpio_support) {
2325 pp_table->VR0HotGpio = gpio_params.ucVR0HotGpio;
2326 pp_table->VR0HotPolarity = gpio_params.ucVR0HotPolarity;
2327 pp_table->VR1HotGpio = gpio_params.ucVR1HotGpio;
2328 pp_table->VR1HotPolarity = gpio_params.ucVR1HotPolarity;
2330 pp_table->VR0HotGpio = 0;
2331 pp_table->VR0HotPolarity = 0;
2332 pp_table->VR1HotGpio = 0;
2333 pp_table->VR1HotPolarity = 0;
2336 if (PP_CAP(PHM_PlatformCaps_AutomaticDCTransition) &&
2337 data->registry_data.ac_dc_switch_gpio_support) {
2338 pp_table->AcDcGpio = gpio_params.ucAcDcGpio;
2339 pp_table->AcDcPolarity = gpio_params.ucAcDcPolarity;
2341 pp_table->AcDcGpio = 0;
2342 pp_table->AcDcPolarity = 0;
2349 static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable)
2351 struct vega10_hwmgr *data =
2352 (struct vega10_hwmgr *)(hwmgr->backend);
2354 if (data->smu_features[GNLD_AVFS].supported) {
2356 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2358 data->smu_features[GNLD_AVFS].smu_feature_bitmap),
2359 "[avfs_control] Attempt to Enable AVFS feature Failed!",
2361 data->smu_features[GNLD_AVFS].enabled = true;
2363 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2365 data->smu_features[GNLD_AVFS].smu_feature_bitmap),
2366 "[avfs_control] Attempt to Disable AVFS feature Failed!",
2368 data->smu_features[GNLD_AVFS].enabled = false;
2375 static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
2379 uint64_t serial_number = 0;
2380 uint32_t top32, bottom32;
2381 struct phm_fuses_default fuse;
2383 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2384 AvfsFuseOverride_t *avfs_fuse_table = &(data->smc_state_table.avfs_fuse_override_table);
2386 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
2387 vega10_read_arg_from_smc(hwmgr, &top32);
2389 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
2390 vega10_read_arg_from_smc(hwmgr, &bottom32);
2392 serial_number = ((uint64_t)bottom32 << 32) | top32;
2394 if (pp_override_get_default_fuse_value(serial_number, &fuse) == 0) {
2395 avfs_fuse_table->VFT0_b = fuse.VFT0_b;
2396 avfs_fuse_table->VFT0_m1 = fuse.VFT0_m1;
2397 avfs_fuse_table->VFT0_m2 = fuse.VFT0_m2;
2398 avfs_fuse_table->VFT1_b = fuse.VFT1_b;
2399 avfs_fuse_table->VFT1_m1 = fuse.VFT1_m1;
2400 avfs_fuse_table->VFT1_m2 = fuse.VFT1_m2;
2401 avfs_fuse_table->VFT2_b = fuse.VFT2_b;
2402 avfs_fuse_table->VFT2_m1 = fuse.VFT2_m1;
2403 avfs_fuse_table->VFT2_m2 = fuse.VFT2_m2;
2404 result = vega10_copy_table_to_smc(hwmgr,
2405 (uint8_t *)avfs_fuse_table, AVFSFUSETABLE);
2406 PP_ASSERT_WITH_CODE(!result,
2407 "Failed to upload FuseOVerride!",
2414 static int vega10_save_default_power_profile(struct pp_hwmgr *hwmgr)
2416 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2417 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
2420 hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
2421 hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
2423 /* Optimize compute power profile: Use only highest
2424 * 2 power levels (if more than 2 are available)
2426 if (dpm_table->count > 2)
2427 min_level = dpm_table->count - 2;
2428 else if (dpm_table->count == 2)
2433 hwmgr->default_compute_power_profile.min_sclk =
2434 dpm_table->dpm_levels[min_level].value;
2436 hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
2437 hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
2443 * Initializes the SMC table and uploads it
2445 * @param hwmgr the address of the powerplay hardware manager.
2446 * @param pInput the pointer to input data (PowerState)
2449 static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
2452 struct vega10_hwmgr *data =
2453 (struct vega10_hwmgr *)(hwmgr->backend);
2454 struct phm_ppt_v2_information *table_info =
2455 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2456 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2457 struct pp_atomfwctrl_voltage_table voltage_table;
2458 struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
2460 result = vega10_setup_default_dpm_tables(hwmgr);
2461 PP_ASSERT_WITH_CODE(!result,
2462 "Failed to setup default DPM tables!",
2465 pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
2466 VOLTAGE_OBJ_SVID2, &voltage_table);
2467 pp_table->MaxVidStep = voltage_table.max_vid_step;
2469 pp_table->GfxDpmVoltageMode =
2470 (uint8_t)(table_info->uc_gfx_dpm_voltage_mode);
2471 pp_table->SocDpmVoltageMode =
2472 (uint8_t)(table_info->uc_soc_dpm_voltage_mode);
2473 pp_table->UclkDpmVoltageMode =
2474 (uint8_t)(table_info->uc_uclk_dpm_voltage_mode);
2475 pp_table->UvdDpmVoltageMode =
2476 (uint8_t)(table_info->uc_uvd_dpm_voltage_mode);
2477 pp_table->VceDpmVoltageMode =
2478 (uint8_t)(table_info->uc_vce_dpm_voltage_mode);
2479 pp_table->Mp0DpmVoltageMode =
2480 (uint8_t)(table_info->uc_mp0_dpm_voltage_mode);
2482 pp_table->DisplayDpmVoltageMode =
2483 (uint8_t)(table_info->uc_dcef_dpm_voltage_mode);
2485 data->vddc_voltage_table.psi0_enable = voltage_table.psi0_enable;
2486 data->vddc_voltage_table.psi1_enable = voltage_table.psi1_enable;
2488 if (data->registry_data.ulv_support &&
2489 table_info->us_ulv_voltage_offset) {
2490 result = vega10_populate_ulv_state(hwmgr);
2491 PP_ASSERT_WITH_CODE(!result,
2492 "Failed to initialize ULV state!",
2496 result = vega10_populate_smc_link_levels(hwmgr);
2497 PP_ASSERT_WITH_CODE(!result,
2498 "Failed to initialize Link Level!",
2501 result = vega10_populate_all_graphic_levels(hwmgr);
2502 PP_ASSERT_WITH_CODE(!result,
2503 "Failed to initialize Graphics Level!",
2506 result = vega10_populate_all_memory_levels(hwmgr);
2507 PP_ASSERT_WITH_CODE(!result,
2508 "Failed to initialize Memory Level!",
2511 result = vega10_populate_all_display_clock_levels(hwmgr);
2512 PP_ASSERT_WITH_CODE(!result,
2513 "Failed to initialize Display Level!",
2516 result = vega10_populate_smc_vce_levels(hwmgr);
2517 PP_ASSERT_WITH_CODE(!result,
2518 "Failed to initialize VCE Level!",
2521 result = vega10_populate_smc_uvd_levels(hwmgr);
2522 PP_ASSERT_WITH_CODE(!result,
2523 "Failed to initialize UVD Level!",
2526 if (data->registry_data.clock_stretcher_support) {
2527 result = vega10_populate_clock_stretcher_table(hwmgr);
2528 PP_ASSERT_WITH_CODE(!result,
2529 "Failed to populate Clock Stretcher Table!",
2533 result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
2535 data->vbios_boot_state.vddc = boot_up_values.usVddc;
2536 data->vbios_boot_state.vddci = boot_up_values.usVddci;
2537 data->vbios_boot_state.mvddc = boot_up_values.usMvddc;
2538 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
2539 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
2540 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
2541 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
2542 if (0 != boot_up_values.usVddc) {
2543 smum_send_msg_to_smc_with_parameter(hwmgr,
2544 PPSMC_MSG_SetFloorSocVoltage,
2545 (boot_up_values.usVddc * 4));
2546 data->vbios_boot_state.bsoc_vddc_lock = true;
2548 data->vbios_boot_state.bsoc_vddc_lock = false;
2550 smum_send_msg_to_smc_with_parameter(hwmgr,
2551 PPSMC_MSG_SetMinDeepSleepDcefclk,
2552 (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
2555 result = vega10_populate_avfs_parameters(hwmgr);
2556 PP_ASSERT_WITH_CODE(!result,
2557 "Failed to initialize AVFS Parameters!",
2560 result = vega10_populate_gpio_parameters(hwmgr);
2561 PP_ASSERT_WITH_CODE(!result,
2562 "Failed to initialize GPIO Parameters!",
2565 pp_table->GfxclkAverageAlpha = (uint8_t)
2566 (data->gfxclk_average_alpha);
2567 pp_table->SocclkAverageAlpha = (uint8_t)
2568 (data->socclk_average_alpha);
2569 pp_table->UclkAverageAlpha = (uint8_t)
2570 (data->uclk_average_alpha);
2571 pp_table->GfxActivityAverageAlpha = (uint8_t)
2572 (data->gfx_activity_average_alpha);
2574 vega10_populate_and_upload_avfs_fuse_override(hwmgr);
2576 result = vega10_copy_table_to_smc(hwmgr,
2577 (uint8_t *)pp_table, PPTABLE);
2578 PP_ASSERT_WITH_CODE(!result,
2579 "Failed to upload PPtable!", return result);
2581 result = vega10_avfs_enable(hwmgr, true);
2582 PP_ASSERT_WITH_CODE(!result, "Attempt to enable AVFS feature Failed!",
2584 vega10_acg_enable(hwmgr);
2585 vega10_save_default_power_profile(hwmgr);
2590 static int vega10_enable_thermal_protection(struct pp_hwmgr *hwmgr)
2592 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2594 if (data->smu_features[GNLD_THERMAL].supported) {
2595 if (data->smu_features[GNLD_THERMAL].enabled)
2596 pr_info("THERMAL Feature Already enabled!");
2598 PP_ASSERT_WITH_CODE(
2599 !vega10_enable_smc_features(hwmgr,
2601 data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
2602 "Enable THERMAL Feature Failed!",
2604 data->smu_features[GNLD_THERMAL].enabled = true;
2610 static int vega10_disable_thermal_protection(struct pp_hwmgr *hwmgr)
2612 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2614 if (data->smu_features[GNLD_THERMAL].supported) {
2615 if (!data->smu_features[GNLD_THERMAL].enabled)
2616 pr_info("THERMAL Feature Already disabled!");
2618 PP_ASSERT_WITH_CODE(
2619 !vega10_enable_smc_features(hwmgr,
2621 data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
2622 "disable THERMAL Feature Failed!",
2624 data->smu_features[GNLD_THERMAL].enabled = false;
2630 static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr)
2632 struct vega10_hwmgr *data =
2633 (struct vega10_hwmgr *)(hwmgr->backend);
2635 if (PP_CAP(PHM_PlatformCaps_RegulatorHot)) {
2636 if (data->smu_features[GNLD_VR0HOT].supported) {
2637 PP_ASSERT_WITH_CODE(
2638 !vega10_enable_smc_features(hwmgr,
2640 data->smu_features[GNLD_VR0HOT].smu_feature_bitmap),
2641 "Attempt to Enable VR0 Hot feature Failed!",
2643 data->smu_features[GNLD_VR0HOT].enabled = true;
2645 if (data->smu_features[GNLD_VR1HOT].supported) {
2646 PP_ASSERT_WITH_CODE(
2647 !vega10_enable_smc_features(hwmgr,
2649 data->smu_features[GNLD_VR1HOT].smu_feature_bitmap),
2650 "Attempt to Enable VR0 Hot feature Failed!",
2652 data->smu_features[GNLD_VR1HOT].enabled = true;
2659 static int vega10_enable_ulv(struct pp_hwmgr *hwmgr)
2661 struct vega10_hwmgr *data =
2662 (struct vega10_hwmgr *)(hwmgr->backend);
2664 if (data->registry_data.ulv_support) {
2665 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2666 true, data->smu_features[GNLD_ULV].smu_feature_bitmap),
2667 "Enable ULV Feature Failed!",
2669 data->smu_features[GNLD_ULV].enabled = true;
2675 static int vega10_disable_ulv(struct pp_hwmgr *hwmgr)
2677 struct vega10_hwmgr *data =
2678 (struct vega10_hwmgr *)(hwmgr->backend);
2680 if (data->registry_data.ulv_support) {
2681 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2682 false, data->smu_features[GNLD_ULV].smu_feature_bitmap),
2683 "disable ULV Feature Failed!",
2685 data->smu_features[GNLD_ULV].enabled = false;
2691 static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2693 struct vega10_hwmgr *data =
2694 (struct vega10_hwmgr *)(hwmgr->backend);
2696 if (data->smu_features[GNLD_DS_GFXCLK].supported) {
2697 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2698 true, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
2699 "Attempt to Enable DS_GFXCLK Feature Failed!",
2701 data->smu_features[GNLD_DS_GFXCLK].enabled = true;
2704 if (data->smu_features[GNLD_DS_SOCCLK].supported) {
2705 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2706 true, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
2707 "Attempt to Enable DS_SOCCLK Feature Failed!",
2709 data->smu_features[GNLD_DS_SOCCLK].enabled = true;
2712 if (data->smu_features[GNLD_DS_LCLK].supported) {
2713 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2714 true, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
2715 "Attempt to Enable DS_LCLK Feature Failed!",
2717 data->smu_features[GNLD_DS_LCLK].enabled = true;
2720 if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
2721 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2722 true, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
2723 "Attempt to Enable DS_DCEFCLK Feature Failed!",
2725 data->smu_features[GNLD_DS_DCEFCLK].enabled = true;
2731 static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2733 struct vega10_hwmgr *data =
2734 (struct vega10_hwmgr *)(hwmgr->backend);
2736 if (data->smu_features[GNLD_DS_GFXCLK].supported) {
2737 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2738 false, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
2739 "Attempt to disable DS_GFXCLK Feature Failed!",
2741 data->smu_features[GNLD_DS_GFXCLK].enabled = false;
2744 if (data->smu_features[GNLD_DS_SOCCLK].supported) {
2745 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2746 false, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
2747 "Attempt to disable DS_ Feature Failed!",
2749 data->smu_features[GNLD_DS_SOCCLK].enabled = false;
2752 if (data->smu_features[GNLD_DS_LCLK].supported) {
2753 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2754 false, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
2755 "Attempt to disable DS_LCLK Feature Failed!",
2757 data->smu_features[GNLD_DS_LCLK].enabled = false;
2760 if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
2761 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2762 false, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
2763 "Attempt to disable DS_DCEFCLK Feature Failed!",
2765 data->smu_features[GNLD_DS_DCEFCLK].enabled = false;
2771 static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2773 struct vega10_hwmgr *data =
2774 (struct vega10_hwmgr *)(hwmgr->backend);
2775 uint32_t i, feature_mask = 0;
2778 if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
2779 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2780 false, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
2781 "Attempt to disable LED DPM feature failed!", return -EINVAL);
2782 data->smu_features[GNLD_LED_DISPLAY].enabled = false;
2785 for (i = 0; i < GNLD_DPM_MAX; i++) {
2786 if (data->smu_features[i].smu_feature_bitmap & bitmap) {
2787 if (data->smu_features[i].supported) {
2788 if (data->smu_features[i].enabled) {
2789 feature_mask |= data->smu_features[i].
2791 data->smu_features[i].enabled = false;
2797 vega10_enable_smc_features(hwmgr, false, feature_mask);
2803 * @brief Tell SMC to enabled the supported DPMs.
2805 * @param hwmgr - the address of the powerplay hardware manager.
2806 * @Param bitmap - bitmap for the features to enabled.
2807 * @return 0 on at least one DPM is successfully enabled.
2809 static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2811 struct vega10_hwmgr *data =
2812 (struct vega10_hwmgr *)(hwmgr->backend);
2813 uint32_t i, feature_mask = 0;
2815 for (i = 0; i < GNLD_DPM_MAX; i++) {
2816 if (data->smu_features[i].smu_feature_bitmap & bitmap) {
2817 if (data->smu_features[i].supported) {
2818 if (!data->smu_features[i].enabled) {
2819 feature_mask |= data->smu_features[i].
2821 data->smu_features[i].enabled = true;
2827 if (vega10_enable_smc_features(hwmgr,
2828 true, feature_mask)) {
2829 for (i = 0; i < GNLD_DPM_MAX; i++) {
2830 if (data->smu_features[i].smu_feature_bitmap &
2832 data->smu_features[i].enabled = false;
2836 if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
2837 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2838 true, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
2839 "Attempt to Enable LED DPM feature Failed!", return -EINVAL);
2840 data->smu_features[GNLD_LED_DISPLAY].enabled = true;
2843 if (data->vbios_boot_state.bsoc_vddc_lock) {
2844 smum_send_msg_to_smc_with_parameter(hwmgr,
2845 PPSMC_MSG_SetFloorSocVoltage, 0);
2846 data->vbios_boot_state.bsoc_vddc_lock = false;
2849 if (PP_CAP(PHM_PlatformCaps_Falcon_QuickTransition)) {
2850 if (data->smu_features[GNLD_ACDC].supported) {
2851 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2852 true, data->smu_features[GNLD_ACDC].smu_feature_bitmap),
2853 "Attempt to Enable DS_GFXCLK Feature Failed!",
2855 data->smu_features[GNLD_ACDC].enabled = true;
2862 static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
2864 struct vega10_hwmgr *data =
2865 (struct vega10_hwmgr *)(hwmgr->backend);
2866 int tmp_result, result = 0;
2868 tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr,
2869 PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
2870 PP_ASSERT_WITH_CODE(!tmp_result,
2871 "Failed to configure telemetry!",
2874 smum_send_msg_to_smc_with_parameter(hwmgr,
2875 PPSMC_MSG_NumOfDisplays, 0);
2877 tmp_result = (!vega10_is_dpm_running(hwmgr)) ? 0 : -1;
2878 PP_ASSERT_WITH_CODE(!tmp_result,
2879 "DPM is already running right , skipping re-enablement!",
2882 if ((hwmgr->smu_version == 0x001c2c00) ||
2883 (hwmgr->smu_version == 0x001c2d00)) {
2884 tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr,
2885 PPSMC_MSG_UpdatePkgPwrPidAlpha, 1);
2886 PP_ASSERT_WITH_CODE(!tmp_result,
2887 "Failed to set package power PID!",
2891 tmp_result = vega10_construct_voltage_tables(hwmgr);
2892 PP_ASSERT_WITH_CODE(!tmp_result,
2893 "Failed to contruct voltage tables!",
2894 result = tmp_result);
2896 tmp_result = vega10_init_smc_table(hwmgr);
2897 PP_ASSERT_WITH_CODE(!tmp_result,
2898 "Failed to initialize SMC table!",
2899 result = tmp_result);
2901 if (PP_CAP(PHM_PlatformCaps_ThermalController)) {
2902 tmp_result = vega10_enable_thermal_protection(hwmgr);
2903 PP_ASSERT_WITH_CODE(!tmp_result,
2904 "Failed to enable thermal protection!",
2905 result = tmp_result);
2908 tmp_result = vega10_enable_vrhot_feature(hwmgr);
2909 PP_ASSERT_WITH_CODE(!tmp_result,
2910 "Failed to enable VR hot feature!",
2911 result = tmp_result);
2913 tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr);
2914 PP_ASSERT_WITH_CODE(!tmp_result,
2915 "Failed to enable deep sleep master switch!",
2916 result = tmp_result);
2918 tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES);
2919 PP_ASSERT_WITH_CODE(!tmp_result,
2920 "Failed to start DPM!", result = tmp_result);
2922 /* enable didt, do not abort if failed didt */
2923 tmp_result = vega10_enable_didt_config(hwmgr);
2924 PP_ASSERT(!tmp_result,
2925 "Failed to enable didt config!");
2927 tmp_result = vega10_enable_power_containment(hwmgr);
2928 PP_ASSERT_WITH_CODE(!tmp_result,
2929 "Failed to enable power containment!",
2930 result = tmp_result);
2932 tmp_result = vega10_power_control_set_level(hwmgr);
2933 PP_ASSERT_WITH_CODE(!tmp_result,
2934 "Failed to power control set level!",
2935 result = tmp_result);
2937 tmp_result = vega10_enable_ulv(hwmgr);
2938 PP_ASSERT_WITH_CODE(!tmp_result,
2939 "Failed to enable ULV!",
2940 result = tmp_result);
2945 static int vega10_get_power_state_size(struct pp_hwmgr *hwmgr)
2947 return sizeof(struct vega10_power_state);
2950 static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
2951 void *state, struct pp_power_state *power_state,
2952 void *pp_table, uint32_t classification_flag)
2954 ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_V2;
2955 struct vega10_power_state *vega10_power_state =
2956 cast_phw_vega10_power_state(&(power_state->hardware));
2957 struct vega10_performance_level *performance_level;
2958 ATOM_Vega10_State *state_entry = (ATOM_Vega10_State *)state;
2959 ATOM_Vega10_POWERPLAYTABLE *powerplay_table =
2960 (ATOM_Vega10_POWERPLAYTABLE *)pp_table;
2961 ATOM_Vega10_SOCCLK_Dependency_Table *socclk_dep_table =
2962 (ATOM_Vega10_SOCCLK_Dependency_Table *)
2963 (((unsigned long)powerplay_table) +
2964 le16_to_cpu(powerplay_table->usSocclkDependencyTableOffset));
2965 ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table =
2966 (ATOM_Vega10_GFXCLK_Dependency_Table *)
2967 (((unsigned long)powerplay_table) +
2968 le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset));
2969 ATOM_Vega10_MCLK_Dependency_Table *mclk_dep_table =
2970 (ATOM_Vega10_MCLK_Dependency_Table *)
2971 (((unsigned long)powerplay_table) +
2972 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
2975 /* The following fields are not initialized here:
2976 * id orderedList allStatesList
2978 power_state->classification.ui_label =
2979 (le16_to_cpu(state_entry->usClassification) &
2980 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
2981 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
2982 power_state->classification.flags = classification_flag;
2983 /* NOTE: There is a classification2 flag in BIOS
2984 * that is not being used right now
2986 power_state->classification.temporary_state = false;
2987 power_state->classification.to_be_deleted = false;
2989 power_state->validation.disallowOnDC =
2990 ((le32_to_cpu(state_entry->ulCapsAndSettings) &
2991 ATOM_Vega10_DISALLOW_ON_DC) != 0);
2993 power_state->display.disableFrameModulation = false;
2994 power_state->display.limitRefreshrate = false;
2995 power_state->display.enableVariBright =
2996 ((le32_to_cpu(state_entry->ulCapsAndSettings) &
2997 ATOM_Vega10_ENABLE_VARIBRIGHT) != 0);
2999 power_state->validation.supportedPowerLevels = 0;
3000 power_state->uvd_clocks.VCLK = 0;
3001 power_state->uvd_clocks.DCLK = 0;
3002 power_state->temperatures.min = 0;
3003 power_state->temperatures.max = 0;
3005 performance_level = &(vega10_power_state->performance_levels
3006 [vega10_power_state->performance_level_count++]);
3008 PP_ASSERT_WITH_CODE(
3009 (vega10_power_state->performance_level_count <
3010 NUM_GFXCLK_DPM_LEVELS),
3011 "Performance levels exceeds SMC limit!",
3014 PP_ASSERT_WITH_CODE(
3015 (vega10_power_state->performance_level_count <=
3016 hwmgr->platform_descriptor.
3017 hardwareActivityPerformanceLevels),
3018 "Performance levels exceeds Driver limit!",
3021 /* Performance levels are arranged from low to high. */
3022 performance_level->soc_clock = socclk_dep_table->entries
3023 [state_entry->ucSocClockIndexLow].ulClk;
3024 performance_level->gfx_clock = gfxclk_dep_table->entries
3025 [state_entry->ucGfxClockIndexLow].ulClk;
3026 performance_level->mem_clock = mclk_dep_table->entries
3027 [state_entry->ucMemClockIndexLow].ulMemClk;
3029 performance_level = &(vega10_power_state->performance_levels
3030 [vega10_power_state->performance_level_count++]);
3031 performance_level->soc_clock = socclk_dep_table->entries
3032 [state_entry->ucSocClockIndexHigh].ulClk;
3033 if (gfxclk_dep_table->ucRevId == 0) {
3034 performance_level->gfx_clock = gfxclk_dep_table->entries
3035 [state_entry->ucGfxClockIndexHigh].ulClk;
3036 } else if (gfxclk_dep_table->ucRevId == 1) {
3037 patom_record_V2 = (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries;
3038 performance_level->gfx_clock = patom_record_V2[state_entry->ucGfxClockIndexHigh].ulClk;
3041 performance_level->mem_clock = mclk_dep_table->entries
3042 [state_entry->ucMemClockIndexHigh].ulMemClk;
3046 static int vega10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3047 unsigned long entry_index, struct pp_power_state *state)
3050 struct vega10_power_state *ps;
3052 state->hardware.magic = PhwVega10_Magic;
3054 ps = cast_phw_vega10_power_state(&state->hardware);
3056 result = vega10_get_powerplay_table_entry(hwmgr, entry_index, state,
3057 vega10_get_pp_table_entry_callback_func);
3060 * This is the earliest time we have all the dependency table
3061 * and the VBIOS boot state
3063 /* set DC compatible flag if this state supports DC */
3064 if (!state->validation.disallowOnDC)
3065 ps->dc_compatible = true;
3067 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3068 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3073 static int vega10_patch_boot_state(struct pp_hwmgr *hwmgr,
3074 struct pp_hw_power_state *hw_ps)
3079 static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
3080 struct pp_power_state *request_ps,
3081 const struct pp_power_state *current_ps)
3083 struct vega10_power_state *vega10_ps =
3084 cast_phw_vega10_power_state(&request_ps->hardware);
3087 struct PP_Clocks minimum_clocks = {0};
3088 bool disable_mclk_switching;
3089 bool disable_mclk_switching_for_frame_lock;
3090 bool disable_mclk_switching_for_vr;
3091 bool force_mclk_high;
3092 struct cgs_display_info info = {0};
3093 const struct phm_clock_and_voltage_limits *max_limits;
3095 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
3096 struct phm_ppt_v2_information *table_info =
3097 (struct phm_ppt_v2_information *)(hwmgr->pptable);
3099 uint32_t stable_pstate_sclk_dpm_percentage;
3100 uint32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
3103 data->battery_state = (PP_StateUILabel_Battery ==
3104 request_ps->classification.ui_label);
3106 if (vega10_ps->performance_level_count != 2)
3107 pr_info("VI should always have 2 performance levels");
3109 max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
3110 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
3111 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
3113 /* Cap clock DPM tables at DC MAX if it is in DC. */
3114 if (PP_PowerSource_DC == hwmgr->power_source) {
3115 for (i = 0; i < vega10_ps->performance_level_count; i++) {
3116 if (vega10_ps->performance_levels[i].mem_clock >
3118 vega10_ps->performance_levels[i].mem_clock =
3120 if (vega10_ps->performance_levels[i].gfx_clock >
3122 vega10_ps->performance_levels[i].gfx_clock =
3127 cgs_get_active_displays_info(hwmgr->device, &info);
3129 /* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
3130 minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
3131 minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
3133 if (PP_CAP(PHM_PlatformCaps_StablePState)) {
3134 stable_pstate_sclk_dpm_percentage =
3135 data->registry_data.stable_pstate_sclk_dpm_percentage;
3136 PP_ASSERT_WITH_CODE(
3137 data->registry_data.stable_pstate_sclk_dpm_percentage >= 1 &&
3138 data->registry_data.stable_pstate_sclk_dpm_percentage <= 100,
3139 "percent sclk value must range from 1% to 100%, setting default value",
3140 stable_pstate_sclk_dpm_percentage = 75);
3142 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
3143 stable_pstate_sclk = (max_limits->sclk *
3144 stable_pstate_sclk_dpm_percentage) / 100;
3146 for (count = table_info->vdd_dep_on_sclk->count - 1;
3147 count >= 0; count--) {
3148 if (stable_pstate_sclk >=
3149 table_info->vdd_dep_on_sclk->entries[count].clk) {
3150 stable_pstate_sclk =
3151 table_info->vdd_dep_on_sclk->entries[count].clk;
3157 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
3159 stable_pstate_mclk = max_limits->mclk;
3161 minimum_clocks.engineClock = stable_pstate_sclk;
3162 minimum_clocks.memoryClock = stable_pstate_mclk;
3165 disable_mclk_switching_for_frame_lock = phm_cap_enabled(
3166 hwmgr->platform_descriptor.platformCaps,
3167 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
3168 disable_mclk_switching_for_vr = PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR);
3169 force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh);
3171 disable_mclk_switching = (info.display_count > 1) ||
3172 disable_mclk_switching_for_frame_lock ||
3173 disable_mclk_switching_for_vr ||
3176 sclk = vega10_ps->performance_levels[0].gfx_clock;
3177 mclk = vega10_ps->performance_levels[0].mem_clock;
3179 if (sclk < minimum_clocks.engineClock)
3180 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
3181 max_limits->sclk : minimum_clocks.engineClock;
3183 if (mclk < minimum_clocks.memoryClock)
3184 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
3185 max_limits->mclk : minimum_clocks.memoryClock;
3187 vega10_ps->performance_levels[0].gfx_clock = sclk;
3188 vega10_ps->performance_levels[0].mem_clock = mclk;
3190 if (vega10_ps->performance_levels[1].gfx_clock <
3191 vega10_ps->performance_levels[0].gfx_clock)
3192 vega10_ps->performance_levels[0].gfx_clock =
3193 vega10_ps->performance_levels[1].gfx_clock;
3195 if (disable_mclk_switching) {
3196 /* Set Mclk the max of level 0 and level 1 */
3197 if (mclk < vega10_ps->performance_levels[1].mem_clock)
3198 mclk = vega10_ps->performance_levels[1].mem_clock;
3200 /* Find the lowest MCLK frequency that is within
3201 * the tolerable latency defined in DAL
3204 for (i = 0; i < data->mclk_latency_table.count; i++) {
3205 if ((data->mclk_latency_table.entries[i].latency <= latency) &&
3206 (data->mclk_latency_table.entries[i].frequency >=
3207 vega10_ps->performance_levels[0].mem_clock) &&
3208 (data->mclk_latency_table.entries[i].frequency <=
3209 vega10_ps->performance_levels[1].mem_clock))
3210 mclk = data->mclk_latency_table.entries[i].frequency;
3212 vega10_ps->performance_levels[0].mem_clock = mclk;
3214 if (vega10_ps->performance_levels[1].mem_clock <
3215 vega10_ps->performance_levels[0].mem_clock)
3216 vega10_ps->performance_levels[0].mem_clock =
3217 vega10_ps->performance_levels[1].mem_clock;
3220 if (PP_CAP(PHM_PlatformCaps_StablePState)) {
3221 for (i = 0; i < vega10_ps->performance_level_count; i++) {
3222 vega10_ps->performance_levels[i].gfx_clock = stable_pstate_sclk;
3223 vega10_ps->performance_levels[i].mem_clock = stable_pstate_mclk;
3230 static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3232 const struct phm_set_power_state_input *states =
3233 (const struct phm_set_power_state_input *)input;
3234 const struct vega10_power_state *vega10_ps =
3235 cast_const_phw_vega10_power_state(states->pnew_state);
3236 struct vega10_hwmgr *data =
3237 (struct vega10_hwmgr *)(hwmgr->backend);
3238 struct vega10_single_dpm_table *sclk_table =
3239 &(data->dpm_table.gfx_table);
3240 uint32_t sclk = vega10_ps->performance_levels
3241 [vega10_ps->performance_level_count - 1].gfx_clock;
3242 struct vega10_single_dpm_table *mclk_table =
3243 &(data->dpm_table.mem_table);
3244 uint32_t mclk = vega10_ps->performance_levels
3245 [vega10_ps->performance_level_count - 1].mem_clock;
3246 struct PP_Clocks min_clocks = {0};
3248 struct cgs_display_info info = {0};
3250 data->need_update_dpm_table = 0;
3252 if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) ||
3253 PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) {
3254 for (i = 0; i < sclk_table->count; i++) {
3255 if (sclk == sclk_table->dpm_levels[i].value)
3259 if (!(data->apply_overdrive_next_settings_mask &
3260 DPMTABLE_OD_UPDATE_SCLK) && i >= sclk_table->count) {
3261 /* Check SCLK in DAL's minimum clocks
3262 * in case DeepSleep divider update is required.
3264 if (data->display_timing.min_clock_in_sr !=
3265 min_clocks.engineClockInSR &&
3266 (min_clocks.engineClockInSR >=
3267 VEGA10_MINIMUM_ENGINE_CLOCK ||
3268 data->display_timing.min_clock_in_sr >=
3269 VEGA10_MINIMUM_ENGINE_CLOCK))
3270 data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK;
3273 cgs_get_active_displays_info(hwmgr->device, &info);
3275 if (data->display_timing.num_existing_displays !=
3277 data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
3279 for (i = 0; i < sclk_table->count; i++) {
3280 if (sclk == sclk_table->dpm_levels[i].value)
3284 if (i >= sclk_table->count)
3285 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3287 /* Check SCLK in DAL's minimum clocks
3288 * in case DeepSleep divider update is required.
3290 if (data->display_timing.min_clock_in_sr !=
3291 min_clocks.engineClockInSR &&
3292 (min_clocks.engineClockInSR >=
3293 VEGA10_MINIMUM_ENGINE_CLOCK ||
3294 data->display_timing.min_clock_in_sr >=
3295 VEGA10_MINIMUM_ENGINE_CLOCK))
3296 data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK;
3299 for (i = 0; i < mclk_table->count; i++) {
3300 if (mclk == mclk_table->dpm_levels[i].value)
3304 cgs_get_active_displays_info(hwmgr->device, &info);
3306 if (i >= mclk_table->count)
3307 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3309 if (data->display_timing.num_existing_displays !=
3310 info.display_count ||
3311 i >= mclk_table->count)
3312 data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
3317 static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
3318 struct pp_hwmgr *hwmgr, const void *input)
3321 const struct phm_set_power_state_input *states =
3322 (const struct phm_set_power_state_input *)input;
3323 const struct vega10_power_state *vega10_ps =
3324 cast_const_phw_vega10_power_state(states->pnew_state);
3325 struct vega10_hwmgr *data =
3326 (struct vega10_hwmgr *)(hwmgr->backend);
3327 uint32_t sclk = vega10_ps->performance_levels
3328 [vega10_ps->performance_level_count - 1].gfx_clock;
3329 uint32_t mclk = vega10_ps->performance_levels
3330 [vega10_ps->performance_level_count - 1].mem_clock;
3331 struct vega10_dpm_table *dpm_table = &data->dpm_table;
3332 struct vega10_dpm_table *golden_dpm_table =
3333 &data->golden_dpm_table;
3334 uint32_t dpm_count, clock_percent;
3337 if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) ||
3338 PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) {
3340 if (!data->need_update_dpm_table &&
3341 !data->apply_optimized_settings &&
3342 !data->apply_overdrive_next_settings_mask)
3345 if (data->apply_overdrive_next_settings_mask &
3346 DPMTABLE_OD_UPDATE_SCLK) {
3348 dpm_count < dpm_table->gfx_table.count;
3350 dpm_table->gfx_table.dpm_levels[dpm_count].enabled =
3351 data->odn_dpm_table.odn_core_clock_dpm_levels.
3352 performance_level_entries[dpm_count].enabled;
3353 dpm_table->gfx_table.dpm_levels[dpm_count].value =
3354 data->odn_dpm_table.odn_core_clock_dpm_levels.
3355 performance_level_entries[dpm_count].clock;
3359 if (data->apply_overdrive_next_settings_mask &
3360 DPMTABLE_OD_UPDATE_MCLK) {
3362 dpm_count < dpm_table->mem_table.count;
3364 dpm_table->mem_table.dpm_levels[dpm_count].enabled =
3365 data->odn_dpm_table.odn_memory_clock_dpm_levels.
3366 performance_level_entries[dpm_count].enabled;
3367 dpm_table->mem_table.dpm_levels[dpm_count].value =
3368 data->odn_dpm_table.odn_memory_clock_dpm_levels.
3369 performance_level_entries[dpm_count].clock;
3373 if ((data->need_update_dpm_table & DPMTABLE_UPDATE_SCLK) ||
3374 data->apply_optimized_settings ||
3375 (data->apply_overdrive_next_settings_mask &
3376 DPMTABLE_OD_UPDATE_SCLK)) {
3377 result = vega10_populate_all_graphic_levels(hwmgr);
3378 PP_ASSERT_WITH_CODE(!result,
3379 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
3383 if ((data->need_update_dpm_table & DPMTABLE_UPDATE_MCLK) ||
3384 (data->apply_overdrive_next_settings_mask &
3385 DPMTABLE_OD_UPDATE_MCLK)){
3386 result = vega10_populate_all_memory_levels(hwmgr);
3387 PP_ASSERT_WITH_CODE(!result,
3388 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
3392 if (!data->need_update_dpm_table &&
3393 !data->apply_optimized_settings)
3396 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK &&
3397 data->smu_features[GNLD_DPM_GFXCLK].supported) {
3399 gfx_table.dpm_levels[dpm_table->gfx_table.count - 1].
3401 if (PP_CAP(PHM_PlatformCaps_OD6PlusinACSupport) ||
3402 PP_CAP(PHM_PlatformCaps_OD6PlusinDCSupport)) {
3403 /* Need to do calculation based on the golden DPM table
3404 * as the Heatmap GPU Clock axis is also based on
3405 * the default values
3407 PP_ASSERT_WITH_CODE(
3408 golden_dpm_table->gfx_table.dpm_levels
3409 [golden_dpm_table->gfx_table.count - 1].value,
3413 dpm_count = dpm_table->gfx_table.count < 2 ?
3414 0 : dpm_table->gfx_table.count - 2;
3415 for (i = dpm_count; i > 1; i--) {
3416 if (sclk > golden_dpm_table->gfx_table.dpm_levels
3417 [golden_dpm_table->gfx_table.count - 1].value) {
3419 ((sclk - golden_dpm_table->gfx_table.dpm_levels
3420 [golden_dpm_table->gfx_table.count - 1].value) *
3422 golden_dpm_table->gfx_table.dpm_levels
3423 [golden_dpm_table->gfx_table.count - 1].value;
3425 dpm_table->gfx_table.dpm_levels[i].value =
3426 golden_dpm_table->gfx_table.dpm_levels[i].value +
3427 (golden_dpm_table->gfx_table.dpm_levels[i].value *
3428 clock_percent) / 100;
3429 } else if (golden_dpm_table->
3430 gfx_table.dpm_levels[dpm_table->gfx_table.count-1].value >
3433 ((golden_dpm_table->gfx_table.dpm_levels
3434 [golden_dpm_table->gfx_table.count - 1].value -
3436 golden_dpm_table->gfx_table.dpm_levels
3437 [golden_dpm_table->gfx_table.count-1].value;
3439 dpm_table->gfx_table.dpm_levels[i].value =
3440 golden_dpm_table->gfx_table.dpm_levels[i].value -
3441 (golden_dpm_table->gfx_table.dpm_levels[i].value *
3442 clock_percent) / 100;
3444 dpm_table->gfx_table.dpm_levels[i].value =
3445 golden_dpm_table->gfx_table.dpm_levels[i].value;
3450 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK &&
3451 data->smu_features[GNLD_DPM_UCLK].supported) {
3453 mem_table.dpm_levels[dpm_table->mem_table.count - 1].
3456 if (PP_CAP(PHM_PlatformCaps_OD6PlusinACSupport) ||
3457 PP_CAP(PHM_PlatformCaps_OD6PlusinDCSupport)) {
3459 PP_ASSERT_WITH_CODE(
3460 golden_dpm_table->mem_table.dpm_levels
3461 [golden_dpm_table->mem_table.count - 1].value,
3465 dpm_count = dpm_table->mem_table.count < 2 ?
3466 0 : dpm_table->mem_table.count - 2;
3467 for (i = dpm_count; i > 1; i--) {
3468 if (mclk > golden_dpm_table->mem_table.dpm_levels
3469 [golden_dpm_table->mem_table.count-1].value) {
3470 clock_percent = ((mclk -
3471 golden_dpm_table->mem_table.dpm_levels
3472 [golden_dpm_table->mem_table.count-1].value) *
3474 golden_dpm_table->mem_table.dpm_levels
3475 [golden_dpm_table->mem_table.count-1].value;
3477 dpm_table->mem_table.dpm_levels[i].value =
3478 golden_dpm_table->mem_table.dpm_levels[i].value +
3479 (golden_dpm_table->mem_table.dpm_levels[i].value *
3480 clock_percent) / 100;
3481 } else if (golden_dpm_table->mem_table.dpm_levels
3482 [dpm_table->mem_table.count-1].value > mclk) {
3483 clock_percent = ((golden_dpm_table->mem_table.dpm_levels
3484 [golden_dpm_table->mem_table.count-1].value - mclk) *
3486 golden_dpm_table->mem_table.dpm_levels
3487 [golden_dpm_table->mem_table.count-1].value;
3489 dpm_table->mem_table.dpm_levels[i].value =
3490 golden_dpm_table->mem_table.dpm_levels[i].value -
3491 (golden_dpm_table->mem_table.dpm_levels[i].value *
3492 clock_percent) / 100;
3494 dpm_table->mem_table.dpm_levels[i].value =
3495 golden_dpm_table->mem_table.dpm_levels[i].value;
3500 if ((data->need_update_dpm_table &
3501 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) ||
3502 data->apply_optimized_settings) {
3503 result = vega10_populate_all_graphic_levels(hwmgr);
3504 PP_ASSERT_WITH_CODE(!result,
3505 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
3509 if (data->need_update_dpm_table &
3510 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
3511 result = vega10_populate_all_memory_levels(hwmgr);
3512 PP_ASSERT_WITH_CODE(!result,
3513 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
3520 static int vega10_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
3521 struct vega10_single_dpm_table *dpm_table,
3522 uint32_t low_limit, uint32_t high_limit)
3526 for (i = 0; i < dpm_table->count; i++) {
3527 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3528 (dpm_table->dpm_levels[i].value > high_limit))
3529 dpm_table->dpm_levels[i].enabled = false;
3531 dpm_table->dpm_levels[i].enabled = true;
3536 static int vega10_trim_single_dpm_states_with_mask(struct pp_hwmgr *hwmgr,
3537 struct vega10_single_dpm_table *dpm_table,
3538 uint32_t low_limit, uint32_t high_limit,
3539 uint32_t disable_dpm_mask)
3543 for (i = 0; i < dpm_table->count; i++) {
3544 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3545 (dpm_table->dpm_levels[i].value > high_limit))
3546 dpm_table->dpm_levels[i].enabled = false;
3547 else if (!((1 << i) & disable_dpm_mask))
3548 dpm_table->dpm_levels[i].enabled = false;
3550 dpm_table->dpm_levels[i].enabled = true;
3555 static int vega10_trim_dpm_states(struct pp_hwmgr *hwmgr,
3556 const struct vega10_power_state *vega10_ps)
3558 struct vega10_hwmgr *data =
3559 (struct vega10_hwmgr *)(hwmgr->backend);
3560 uint32_t high_limit_count;
3562 PP_ASSERT_WITH_CODE((vega10_ps->performance_level_count >= 1),
3563 "power state did not have any performance level",
3566 high_limit_count = (vega10_ps->performance_level_count == 1) ? 0 : 1;
3568 vega10_trim_single_dpm_states(hwmgr,
3569 &(data->dpm_table.soc_table),
3570 vega10_ps->performance_levels[0].soc_clock,
3571 vega10_ps->performance_levels[high_limit_count].soc_clock);
3573 vega10_trim_single_dpm_states_with_mask(hwmgr,
3574 &(data->dpm_table.gfx_table),
3575 vega10_ps->performance_levels[0].gfx_clock,
3576 vega10_ps->performance_levels[high_limit_count].gfx_clock,
3577 data->disable_dpm_mask);
3579 vega10_trim_single_dpm_states(hwmgr,
3580 &(data->dpm_table.mem_table),
3581 vega10_ps->performance_levels[0].mem_clock,
3582 vega10_ps->performance_levels[high_limit_count].mem_clock);
3587 static uint32_t vega10_find_lowest_dpm_level(
3588 struct vega10_single_dpm_table *table)
3592 for (i = 0; i < table->count; i++) {
3593 if (table->dpm_levels[i].enabled)
3600 static uint32_t vega10_find_highest_dpm_level(
3601 struct vega10_single_dpm_table *table)
3605 if (table->count <= MAX_REGULAR_DPM_NUMBER) {
3606 for (i = table->count; i > 0; i--) {
3607 if (table->dpm_levels[i - 1].enabled)
3611 pr_info("DPM Table Has Too Many Entries!");
3612 return MAX_REGULAR_DPM_NUMBER - 1;
3618 static void vega10_apply_dal_minimum_voltage_request(
3619 struct pp_hwmgr *hwmgr)
3624 static int vega10_get_soc_index_for_max_uclk(struct pp_hwmgr *hwmgr)
3626 struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table_on_mclk;
3627 struct phm_ppt_v2_information *table_info =
3628 (struct phm_ppt_v2_information *)(hwmgr->pptable);
3630 vdd_dep_table_on_mclk = table_info->vdd_dep_on_mclk;
3632 return vdd_dep_table_on_mclk->entries[NUM_UCLK_DPM_LEVELS - 1].vddInd + 1;
3635 static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
3637 struct vega10_hwmgr *data =
3638 (struct vega10_hwmgr *)(hwmgr->backend);
3639 uint32_t socclk_idx;
3641 vega10_apply_dal_minimum_voltage_request(hwmgr);
3643 if (!data->registry_data.sclk_dpm_key_disabled) {
3644 if (data->smc_state_table.gfx_boot_level !=
3645 data->dpm_table.gfx_table.dpm_state.soft_min_level) {
3646 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3648 PPSMC_MSG_SetSoftMinGfxclkByIndex,
3649 data->smc_state_table.gfx_boot_level),
3650 "Failed to set soft min sclk index!",
3652 data->dpm_table.gfx_table.dpm_state.soft_min_level =
3653 data->smc_state_table.gfx_boot_level;
3657 if (!data->registry_data.mclk_dpm_key_disabled) {
3658 if (data->smc_state_table.mem_boot_level !=
3659 data->dpm_table.mem_table.dpm_state.soft_min_level) {
3660 if (data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) {
3661 socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr);
3662 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3664 PPSMC_MSG_SetSoftMinSocclkByIndex,
3666 "Failed to set soft min uclk index!",
3669 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3671 PPSMC_MSG_SetSoftMinUclkByIndex,
3672 data->smc_state_table.mem_boot_level),
3673 "Failed to set soft min uclk index!",
3676 data->dpm_table.mem_table.dpm_state.soft_min_level =
3677 data->smc_state_table.mem_boot_level;
3684 static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
3686 struct vega10_hwmgr *data =
3687 (struct vega10_hwmgr *)(hwmgr->backend);
3689 vega10_apply_dal_minimum_voltage_request(hwmgr);
3691 if (!data->registry_data.sclk_dpm_key_disabled) {
3692 if (data->smc_state_table.gfx_max_level !=
3693 data->dpm_table.gfx_table.dpm_state.soft_max_level) {
3694 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3696 PPSMC_MSG_SetSoftMaxGfxclkByIndex,
3697 data->smc_state_table.gfx_max_level),
3698 "Failed to set soft max sclk index!",
3700 data->dpm_table.gfx_table.dpm_state.soft_max_level =
3701 data->smc_state_table.gfx_max_level;
3705 if (!data->registry_data.mclk_dpm_key_disabled) {
3706 if (data->smc_state_table.mem_max_level !=
3707 data->dpm_table.mem_table.dpm_state.soft_max_level) {
3708 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3710 PPSMC_MSG_SetSoftMaxUclkByIndex,
3711 data->smc_state_table.mem_max_level),
3712 "Failed to set soft max mclk index!",
3714 data->dpm_table.mem_table.dpm_state.soft_max_level =
3715 data->smc_state_table.mem_max_level;
3722 static int vega10_generate_dpm_level_enable_mask(
3723 struct pp_hwmgr *hwmgr, const void *input)
3725 struct vega10_hwmgr *data =
3726 (struct vega10_hwmgr *)(hwmgr->backend);
3727 const struct phm_set_power_state_input *states =
3728 (const struct phm_set_power_state_input *)input;
3729 const struct vega10_power_state *vega10_ps =
3730 cast_const_phw_vega10_power_state(states->pnew_state);
3733 PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps),
3734 "Attempt to Trim DPM States Failed!",
3737 data->smc_state_table.gfx_boot_level =
3738 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
3739 data->smc_state_table.gfx_max_level =
3740 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3741 data->smc_state_table.mem_boot_level =
3742 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
3743 data->smc_state_table.mem_max_level =
3744 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
3746 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3747 "Attempt to upload DPM Bootup Levels Failed!",
3749 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3750 "Attempt to upload DPM Max Levels Failed!",
3752 for(i = data->smc_state_table.gfx_boot_level; i < data->smc_state_table.gfx_max_level; i++)
3753 data->dpm_table.gfx_table.dpm_levels[i].enabled = true;
3756 for(i = data->smc_state_table.mem_boot_level; i < data->smc_state_table.mem_max_level; i++)
3757 data->dpm_table.mem_table.dpm_levels[i].enabled = true;
3762 int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
3764 struct vega10_hwmgr *data =
3765 (struct vega10_hwmgr *)(hwmgr->backend);
3767 if (data->smu_features[GNLD_DPM_VCE].supported) {
3768 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
3770 data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap),
3771 "Attempt to Enable/Disable DPM VCE Failed!",
3773 data->smu_features[GNLD_DPM_VCE].enabled = enable;
3779 static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
3781 struct vega10_hwmgr *data =
3782 (struct vega10_hwmgr *)(hwmgr->backend);
3784 uint32_t low_sclk_interrupt_threshold = 0;
3786 if (PP_CAP(PHM_PlatformCaps_SclkThrottleLowNotification) &&
3787 (data->low_sclk_interrupt_threshold != 0)) {
3788 low_sclk_interrupt_threshold =
3789 data->low_sclk_interrupt_threshold;
3791 data->smc_state_table.pp_table.LowGfxclkInterruptThreshold =
3792 cpu_to_le32(low_sclk_interrupt_threshold);
3794 /* This message will also enable SmcToHost Interrupt */
3795 result = smum_send_msg_to_smc_with_parameter(hwmgr,
3796 PPSMC_MSG_SetLowGfxclkInterruptThreshold,
3797 (uint32_t)low_sclk_interrupt_threshold);
3803 static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
3806 int tmp_result, result = 0;
3807 struct vega10_hwmgr *data =
3808 (struct vega10_hwmgr *)(hwmgr->backend);
3809 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
3811 tmp_result = vega10_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
3812 PP_ASSERT_WITH_CODE(!tmp_result,
3813 "Failed to find DPM states clocks in DPM table!",
3814 result = tmp_result);
3816 tmp_result = vega10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
3817 PP_ASSERT_WITH_CODE(!tmp_result,
3818 "Failed to populate and upload SCLK MCLK DPM levels!",
3819 result = tmp_result);
3821 tmp_result = vega10_generate_dpm_level_enable_mask(hwmgr, input);
3822 PP_ASSERT_WITH_CODE(!tmp_result,
3823 "Failed to generate DPM level enabled mask!",
3824 result = tmp_result);
3826 tmp_result = vega10_update_sclk_threshold(hwmgr);
3827 PP_ASSERT_WITH_CODE(!tmp_result,
3828 "Failed to update SCLK threshold!",
3829 result = tmp_result);
3831 result = vega10_copy_table_to_smc(hwmgr,
3832 (uint8_t *)pp_table, PPTABLE);
3833 PP_ASSERT_WITH_CODE(!result,
3834 "Failed to upload PPtable!", return result);
3836 data->apply_optimized_settings = false;
3837 data->apply_overdrive_next_settings_mask = 0;
3842 static uint32_t vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
3844 struct pp_power_state *ps;
3845 struct vega10_power_state *vega10_ps;
3850 ps = hwmgr->request_ps;
3855 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3858 return vega10_ps->performance_levels[0].gfx_clock;
3860 return vega10_ps->performance_levels
3861 [vega10_ps->performance_level_count - 1].gfx_clock;
3864 static uint32_t vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
3866 struct pp_power_state *ps;
3867 struct vega10_power_state *vega10_ps;
3872 ps = hwmgr->request_ps;
3877 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3880 return vega10_ps->performance_levels[0].mem_clock;
3882 return vega10_ps->performance_levels
3883 [vega10_ps->performance_level_count-1].mem_clock;
3886 static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
3887 struct pp_gpu_power *query)
3891 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
3892 PPSMC_MSG_GetCurrPkgPwr),
3893 "Failed to get current package power!",
3896 vega10_read_arg_from_smc(hwmgr, &value);
3897 /* power value is an integer */
3898 query->average_gpu_power = value << 8;
3903 static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3904 void *value, int *size)
3906 uint32_t sclk_idx, mclk_idx, activity_percent = 0;
3907 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
3908 struct vega10_dpm_table *dpm_table = &data->dpm_table;
3912 case AMDGPU_PP_SENSOR_GFX_SCLK:
3913 ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex);
3915 vega10_read_arg_from_smc(hwmgr, &sclk_idx);
3916 *((uint32_t *)value) = dpm_table->gfx_table.dpm_levels[sclk_idx].value;
3920 case AMDGPU_PP_SENSOR_GFX_MCLK:
3921 ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
3923 vega10_read_arg_from_smc(hwmgr, &mclk_idx);
3924 *((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value;
3928 case AMDGPU_PP_SENSOR_GPU_LOAD:
3929 ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
3931 vega10_read_arg_from_smc(hwmgr, &activity_percent);
3932 *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
3936 case AMDGPU_PP_SENSOR_GPU_TEMP:
3937 *((uint32_t *)value) = vega10_thermal_get_temperature(hwmgr);
3940 case AMDGPU_PP_SENSOR_UVD_POWER:
3941 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
3944 case AMDGPU_PP_SENSOR_VCE_POWER:
3945 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
3948 case AMDGPU_PP_SENSOR_GPU_POWER:
3949 if (*size < sizeof(struct pp_gpu_power))
3952 *size = sizeof(struct pp_gpu_power);
3953 ret = vega10_get_gpu_power(hwmgr, (struct pp_gpu_power *)value);
3963 static int vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
3966 return smum_send_msg_to_smc_with_parameter(hwmgr,
3967 PPSMC_MSG_SetUclkFastSwitch,
3971 int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
3972 struct pp_display_clock_request *clock_req)
3975 enum amd_pp_clock_type clk_type = clock_req->clock_type;
3976 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
3977 DSPCLK_e clk_select = 0;
3978 uint32_t clk_request = 0;
3981 case amd_pp_dcef_clock:
3982 clk_select = DSPCLK_DCEFCLK;
3984 case amd_pp_disp_clock:
3985 clk_select = DSPCLK_DISPCLK;
3987 case amd_pp_pixel_clock:
3988 clk_select = DSPCLK_PIXCLK;
3990 case amd_pp_phy_clock:
3991 clk_select = DSPCLK_PHYCLK;
3994 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
4000 clk_request = (clk_freq << 16) | clk_select;
4001 result = smum_send_msg_to_smc_with_parameter(hwmgr,
4002 PPSMC_MSG_RequestDisplayClockByFreq,
4009 static uint8_t vega10_get_uclk_index(struct pp_hwmgr *hwmgr,
4010 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table,
4016 if (mclk_table == NULL || mclk_table->count == 0)
4019 count = (uint8_t)(mclk_table->count);
4021 for(i = 0; i < count; i++) {
4022 if(mclk_table->entries[i].clk >= frequency)
4029 static int vega10_notify_smc_display_config_after_ps_adjustment(
4030 struct pp_hwmgr *hwmgr)
4032 struct vega10_hwmgr *data =
4033 (struct vega10_hwmgr *)(hwmgr->backend);
4034 struct vega10_single_dpm_table *dpm_table =
4035 &data->dpm_table.dcef_table;
4036 struct phm_ppt_v2_information *table_info =
4037 (struct phm_ppt_v2_information *)hwmgr->pptable;
4038 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = table_info->vdd_dep_on_mclk;
4040 uint32_t num_active_disps = 0;
4041 struct cgs_display_info info = {0};
4042 struct PP_Clocks min_clocks = {0};
4044 struct pp_display_clock_request clock_req;
4046 info.mode_info = NULL;
4048 cgs_get_active_displays_info(hwmgr->device, &info);
4050 num_active_disps = info.display_count;
4052 if (num_active_disps > 1)
4053 vega10_notify_smc_display_change(hwmgr, false);
4055 vega10_notify_smc_display_change(hwmgr, true);
4057 min_clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk;
4058 min_clocks.dcefClockInSR = hwmgr->display_config.min_dcef_deep_sleep_set_clk;
4059 min_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
4061 for (i = 0; i < dpm_table->count; i++) {
4062 if (dpm_table->dpm_levels[i].value == min_clocks.dcefClock)
4066 if (i < dpm_table->count) {
4067 clock_req.clock_type = amd_pp_dcef_clock;
4068 clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value;
4069 if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
4070 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
4071 hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
4072 min_clocks.dcefClockInSR /100),
4073 "Attempt to set divider for DCEFCLK Failed!",);
4075 pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
4078 pr_debug("Cannot find requested DCEFCLK!");
4081 if (min_clocks.memoryClock != 0) {
4082 idx = vega10_get_uclk_index(hwmgr, mclk_table, min_clocks.memoryClock);
4083 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx);
4084 data->dpm_table.mem_table.dpm_state.soft_min_level= idx;
4090 static int vega10_force_dpm_highest(struct pp_hwmgr *hwmgr)
4092 struct vega10_hwmgr *data =
4093 (struct vega10_hwmgr *)(hwmgr->backend);
4095 data->smc_state_table.gfx_boot_level =
4096 data->smc_state_table.gfx_max_level =
4097 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
4098 data->smc_state_table.mem_boot_level =
4099 data->smc_state_table.mem_max_level =
4100 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
4102 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4103 "Failed to upload boot level to highest!",
4106 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4107 "Failed to upload dpm max level to highest!",
4113 static int vega10_force_dpm_lowest(struct pp_hwmgr *hwmgr)
4115 struct vega10_hwmgr *data =
4116 (struct vega10_hwmgr *)(hwmgr->backend);
4118 data->smc_state_table.gfx_boot_level =
4119 data->smc_state_table.gfx_max_level =
4120 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
4121 data->smc_state_table.mem_boot_level =
4122 data->smc_state_table.mem_max_level =
4123 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
4125 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4126 "Failed to upload boot level to highest!",
4129 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4130 "Failed to upload dpm max level to highest!",
4137 static int vega10_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
4139 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4141 data->smc_state_table.gfx_boot_level =
4142 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
4143 data->smc_state_table.gfx_max_level =
4144 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
4145 data->smc_state_table.mem_boot_level =
4146 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
4147 data->smc_state_table.mem_max_level =
4148 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
4150 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4151 "Failed to upload DPM Bootup Levels!",
4154 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4155 "Failed to upload DPM Max Levels!",
4160 static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
4161 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
4163 struct phm_ppt_v2_information *table_info =
4164 (struct phm_ppt_v2_information *)(hwmgr->pptable);
4166 if (table_info->vdd_dep_on_sclk->count > VEGA10_UMD_PSTATE_GFXCLK_LEVEL &&
4167 table_info->vdd_dep_on_socclk->count > VEGA10_UMD_PSTATE_SOCCLK_LEVEL &&
4168 table_info->vdd_dep_on_mclk->count > VEGA10_UMD_PSTATE_MCLK_LEVEL) {
4169 *sclk_mask = VEGA10_UMD_PSTATE_GFXCLK_LEVEL;
4170 *soc_mask = VEGA10_UMD_PSTATE_SOCCLK_LEVEL;
4171 *mclk_mask = VEGA10_UMD_PSTATE_MCLK_LEVEL;
4174 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
4176 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
4178 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
4179 *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
4180 *soc_mask = table_info->vdd_dep_on_socclk->count - 1;
4181 *mclk_mask = table_info->vdd_dep_on_mclk->count - 1;
4186 static void vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
4189 case AMD_FAN_CTRL_NONE:
4190 vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
4192 case AMD_FAN_CTRL_MANUAL:
4193 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
4194 vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
4196 case AMD_FAN_CTRL_AUTO:
4197 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
4198 vega10_fan_ctrl_start_smc_fan_control(hwmgr);
4205 static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
4206 enum amd_dpm_forced_level level)
4209 uint32_t sclk_mask = 0;
4210 uint32_t mclk_mask = 0;
4211 uint32_t soc_mask = 0;
4214 case AMD_DPM_FORCED_LEVEL_HIGH:
4215 ret = vega10_force_dpm_highest(hwmgr);
4217 case AMD_DPM_FORCED_LEVEL_LOW:
4218 ret = vega10_force_dpm_lowest(hwmgr);
4220 case AMD_DPM_FORCED_LEVEL_AUTO:
4221 ret = vega10_unforce_dpm_levels(hwmgr);
4223 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
4224 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
4225 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
4226 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
4227 ret = vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
4230 vega10_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
4231 vega10_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
4233 case AMD_DPM_FORCED_LEVEL_MANUAL:
4234 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
4240 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
4241 vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
4242 else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
4243 vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
4248 static uint32_t vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
4250 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4252 if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
4253 return AMD_FAN_CTRL_MANUAL;
4255 return AMD_FAN_CTRL_AUTO;
4258 static int vega10_get_dal_power_level(struct pp_hwmgr *hwmgr,
4259 struct amd_pp_simple_clock_info *info)
4261 struct phm_ppt_v2_information *table_info =
4262 (struct phm_ppt_v2_information *)hwmgr->pptable;
4263 struct phm_clock_and_voltage_limits *max_limits =
4264 &table_info->max_clock_voltage_on_ac;
4266 info->engine_max_clock = max_limits->sclk;
4267 info->memory_max_clock = max_limits->mclk;
4272 static void vega10_get_sclks(struct pp_hwmgr *hwmgr,
4273 struct pp_clock_levels_with_latency *clocks)
4275 struct phm_ppt_v2_information *table_info =
4276 (struct phm_ppt_v2_information *)hwmgr->pptable;
4277 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4278 table_info->vdd_dep_on_sclk;
4281 for (i = 0; i < dep_table->count; i++) {
4282 if (dep_table->entries[i].clk) {
4283 clocks->data[clocks->num_levels].clocks_in_khz =
4284 dep_table->entries[i].clk;
4285 clocks->num_levels++;
4291 static uint32_t vega10_get_mem_latency(struct pp_hwmgr *hwmgr,
4294 if (clock >= MEM_FREQ_LOW_LATENCY &&
4295 clock < MEM_FREQ_HIGH_LATENCY)
4296 return MEM_LATENCY_HIGH;
4297 else if (clock >= MEM_FREQ_HIGH_LATENCY)
4298 return MEM_LATENCY_LOW;
4300 return MEM_LATENCY_ERR;
4303 static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
4304 struct pp_clock_levels_with_latency *clocks)
4306 struct phm_ppt_v2_information *table_info =
4307 (struct phm_ppt_v2_information *)hwmgr->pptable;
4308 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4309 table_info->vdd_dep_on_mclk;
4310 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4313 clocks->num_levels = 0;
4314 data->mclk_latency_table.count = 0;
4316 for (i = 0; i < dep_table->count; i++) {
4317 if (dep_table->entries[i].clk) {
4318 clocks->data[clocks->num_levels].clocks_in_khz =
4319 data->mclk_latency_table.entries
4320 [data->mclk_latency_table.count].frequency =
4321 dep_table->entries[i].clk;
4322 clocks->data[clocks->num_levels].latency_in_us =
4323 data->mclk_latency_table.entries
4324 [data->mclk_latency_table.count].latency =
4325 vega10_get_mem_latency(hwmgr,
4326 dep_table->entries[i].clk);
4327 clocks->num_levels++;
4328 data->mclk_latency_table.count++;
4333 static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
4334 struct pp_clock_levels_with_latency *clocks)
4336 struct phm_ppt_v2_information *table_info =
4337 (struct phm_ppt_v2_information *)hwmgr->pptable;
4338 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4339 table_info->vdd_dep_on_dcefclk;
4342 for (i = 0; i < dep_table->count; i++) {
4343 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
4344 clocks->data[i].latency_in_us = 0;
4345 clocks->num_levels++;
4349 static void vega10_get_socclocks(struct pp_hwmgr *hwmgr,
4350 struct pp_clock_levels_with_latency *clocks)
4352 struct phm_ppt_v2_information *table_info =
4353 (struct phm_ppt_v2_information *)hwmgr->pptable;
4354 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4355 table_info->vdd_dep_on_socclk;
4358 for (i = 0; i < dep_table->count; i++) {
4359 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
4360 clocks->data[i].latency_in_us = 0;
4361 clocks->num_levels++;
4365 static int vega10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
4366 enum amd_pp_clock_type type,
4367 struct pp_clock_levels_with_latency *clocks)
4370 case amd_pp_sys_clock:
4371 vega10_get_sclks(hwmgr, clocks);
4373 case amd_pp_mem_clock:
4374 vega10_get_memclocks(hwmgr, clocks);
4376 case amd_pp_dcef_clock:
4377 vega10_get_dcefclocks(hwmgr, clocks);
4379 case amd_pp_soc_clock:
4380 vega10_get_socclocks(hwmgr, clocks);
4389 static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
4390 enum amd_pp_clock_type type,
4391 struct pp_clock_levels_with_voltage *clocks)
4393 struct phm_ppt_v2_information *table_info =
4394 (struct phm_ppt_v2_information *)hwmgr->pptable;
4395 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
4399 case amd_pp_mem_clock:
4400 dep_table = table_info->vdd_dep_on_mclk;
4402 case amd_pp_dcef_clock:
4403 dep_table = table_info->vdd_dep_on_dcefclk;
4405 case amd_pp_disp_clock:
4406 dep_table = table_info->vdd_dep_on_dispclk;
4408 case amd_pp_pixel_clock:
4409 dep_table = table_info->vdd_dep_on_pixclk;
4411 case amd_pp_phy_clock:
4412 dep_table = table_info->vdd_dep_on_phyclk;
4418 for (i = 0; i < dep_table->count; i++) {
4419 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
4420 clocks->data[i].voltage_in_mv = (uint32_t)(table_info->vddc_lookup_table->
4421 entries[dep_table->entries[i].vddInd].us_vdd);
4422 clocks->num_levels++;
4425 if (i < dep_table->count)
4431 static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
4432 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
4434 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4435 Watermarks_t *table = &(data->smc_state_table.water_marks_table);
4439 if (!data->registry_data.disable_water_mark) {
4440 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) {
4441 table->WatermarkRow[WM_DCEFCLK][i].MinClock =
4442 cpu_to_le16((uint16_t)
4443 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
4445 table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
4446 cpu_to_le16((uint16_t)
4447 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
4449 table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
4450 cpu_to_le16((uint16_t)
4451 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
4453 table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
4454 cpu_to_le16((uint16_t)
4455 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
4457 table->WatermarkRow[WM_DCEFCLK][i].WmSetting = (uint8_t)
4458 wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
4461 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
4462 table->WatermarkRow[WM_SOCCLK][i].MinClock =
4463 cpu_to_le16((uint16_t)
4464 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
4466 table->WatermarkRow[WM_SOCCLK][i].MaxClock =
4467 cpu_to_le16((uint16_t)
4468 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
4470 table->WatermarkRow[WM_SOCCLK][i].MinUclk =
4471 cpu_to_le16((uint16_t)
4472 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
4474 table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
4475 cpu_to_le16((uint16_t)
4476 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
4478 table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
4479 wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
4481 data->water_marks_bitmap = WaterMarksExist;
4487 static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
4488 enum pp_clock_type type, uint32_t mask)
4490 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4493 if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
4494 AMD_DPM_FORCED_LEVEL_LOW |
4495 AMD_DPM_FORCED_LEVEL_HIGH))
4500 for (i = 0; i < 32; i++) {
4501 if (mask & (1 << i))
4504 data->smc_state_table.gfx_boot_level = i;
4506 for (i = 31; i >= 0; i--) {
4507 if (mask & (1 << i))
4510 data->smc_state_table.gfx_max_level = i;
4512 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4513 "Failed to upload boot level to lowest!",
4516 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4517 "Failed to upload dpm max level to highest!",
4522 for (i = 0; i < 32; i++) {
4523 if (mask & (1 << i))
4526 data->smc_state_table.mem_boot_level = i;
4528 for (i = 31; i >= 0; i--) {
4529 if (mask & (1 << i))
4532 data->smc_state_table.mem_max_level = i;
4534 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4535 "Failed to upload boot level to lowest!",
4538 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4539 "Failed to upload dpm max level to highest!",
4552 static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
4553 enum pp_clock_type type, char *buf)
4555 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4556 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
4557 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
4558 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
4559 int i, now, size = 0;
4563 if (data->registry_data.sclk_dpm_key_disabled)
4566 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
4567 PPSMC_MSG_GetCurrentGfxclkIndex),
4568 "Attempt to get current sclk index Failed!",
4570 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr,
4572 "Attempt to read sclk index Failed!",
4575 for (i = 0; i < sclk_table->count; i++)
4576 size += sprintf(buf + size, "%d: %uMhz %s\n",
4577 i, sclk_table->dpm_levels[i].value / 100,
4578 (i == now) ? "*" : "");
4581 if (data->registry_data.mclk_dpm_key_disabled)
4584 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
4585 PPSMC_MSG_GetCurrentUclkIndex),
4586 "Attempt to get current mclk index Failed!",
4588 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr,
4590 "Attempt to read mclk index Failed!",
4593 for (i = 0; i < mclk_table->count; i++)
4594 size += sprintf(buf + size, "%d: %uMhz %s\n",
4595 i, mclk_table->dpm_levels[i].value / 100,
4596 (i == now) ? "*" : "");
4599 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
4600 PPSMC_MSG_GetCurrentLinkIndex),
4601 "Attempt to get current mclk index Failed!",
4603 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr,
4605 "Attempt to read mclk index Failed!",
4608 for (i = 0; i < pcie_table->count; i++)
4609 size += sprintf(buf + size, "%d: %s %s\n", i,
4610 (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s, x1" :
4611 (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s, x16" :
4612 (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s, x16" : "",
4613 (i == now) ? "*" : "");
4621 static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4623 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4625 uint32_t num_turned_on_displays = 1;
4626 Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
4627 struct cgs_display_info info = {0};
4629 if ((data->water_marks_bitmap & WaterMarksExist) &&
4630 !(data->water_marks_bitmap & WaterMarksLoaded)) {
4631 result = vega10_copy_table_to_smc(hwmgr,
4632 (uint8_t *)wm_table, WMTABLE);
4633 PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
4634 data->water_marks_bitmap |= WaterMarksLoaded;
4637 if (data->water_marks_bitmap & WaterMarksLoaded) {
4638 cgs_get_active_displays_info(hwmgr->device, &info);
4639 num_turned_on_displays = info.display_count;
4640 smum_send_msg_to_smc_with_parameter(hwmgr,
4641 PPSMC_MSG_NumOfDisplays, num_turned_on_displays);
4647 int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
4649 struct vega10_hwmgr *data =
4650 (struct vega10_hwmgr *)(hwmgr->backend);
4652 if (data->smu_features[GNLD_DPM_UVD].supported) {
4653 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
4655 data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap),
4656 "Attempt to Enable/Disable DPM UVD Failed!",
4658 data->smu_features[GNLD_DPM_UVD].enabled = enable;
4663 static void vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
4665 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4667 data->vce_power_gated = bgate;
4668 vega10_enable_disable_vce_dpm(hwmgr, !bgate);
4671 static void vega10_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
4673 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4675 data->uvd_power_gated = bgate;
4676 vega10_enable_disable_uvd_dpm(hwmgr, !bgate);
4679 static inline bool vega10_are_power_levels_equal(
4680 const struct vega10_performance_level *pl1,
4681 const struct vega10_performance_level *pl2)
4683 return ((pl1->soc_clock == pl2->soc_clock) &&
4684 (pl1->gfx_clock == pl2->gfx_clock) &&
4685 (pl1->mem_clock == pl2->mem_clock));
4688 static int vega10_check_states_equal(struct pp_hwmgr *hwmgr,
4689 const struct pp_hw_power_state *pstate1,
4690 const struct pp_hw_power_state *pstate2, bool *equal)
4692 const struct vega10_power_state *psa;
4693 const struct vega10_power_state *psb;
4696 if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4699 psa = cast_const_phw_vega10_power_state(pstate1);
4700 psb = cast_const_phw_vega10_power_state(pstate2);
4701 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
4702 if (psa->performance_level_count != psb->performance_level_count) {
4707 for (i = 0; i < psa->performance_level_count; i++) {
4708 if (!vega10_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4709 /* If we have found even one performance level pair that is different the states are different. */
4715 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4716 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4717 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4718 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
4724 vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
4726 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4727 bool is_update_required = false;
4728 struct cgs_display_info info = {0, 0, NULL};
4730 cgs_get_active_displays_info(hwmgr->device, &info);
4732 if (data->display_timing.num_existing_displays != info.display_count)
4733 is_update_required = true;
4735 if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep)) {
4736 if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr)
4737 is_update_required = true;
4740 return is_update_required;
4743 static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
4745 int tmp_result, result = 0;
4747 tmp_result = (vega10_is_dpm_running(hwmgr)) ? 0 : -1;
4748 PP_ASSERT_WITH_CODE(tmp_result == 0,
4749 "DPM is not running right now, no need to disable DPM!",
4752 if (PP_CAP(PHM_PlatformCaps_ThermalController))
4753 vega10_disable_thermal_protection(hwmgr);
4755 tmp_result = vega10_disable_power_containment(hwmgr);
4756 PP_ASSERT_WITH_CODE((tmp_result == 0),
4757 "Failed to disable power containment!", result = tmp_result);
4759 tmp_result = vega10_disable_didt_config(hwmgr);
4760 PP_ASSERT_WITH_CODE((tmp_result == 0),
4761 "Failed to disable didt config!", result = tmp_result);
4763 tmp_result = vega10_avfs_enable(hwmgr, false);
4764 PP_ASSERT_WITH_CODE((tmp_result == 0),
4765 "Failed to disable AVFS!", result = tmp_result);
4767 tmp_result = vega10_stop_dpm(hwmgr, SMC_DPM_FEATURES);
4768 PP_ASSERT_WITH_CODE((tmp_result == 0),
4769 "Failed to stop DPM!", result = tmp_result);
4771 tmp_result = vega10_disable_deep_sleep_master_switch(hwmgr);
4772 PP_ASSERT_WITH_CODE((tmp_result == 0),
4773 "Failed to disable deep sleep!", result = tmp_result);
4775 tmp_result = vega10_disable_ulv(hwmgr);
4776 PP_ASSERT_WITH_CODE((tmp_result == 0),
4777 "Failed to disable ulv!", result = tmp_result);
4779 tmp_result = vega10_acg_disable(hwmgr);
4780 PP_ASSERT_WITH_CODE((tmp_result == 0),
4781 "Failed to disable acg!", result = tmp_result);
4785 static int vega10_power_off_asic(struct pp_hwmgr *hwmgr)
4787 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4790 result = vega10_disable_dpm_tasks(hwmgr);
4791 PP_ASSERT_WITH_CODE((0 == result),
4792 "[disable_dpm_tasks] Failed to disable DPM!",
4794 data->water_marks_bitmap &= ~(WaterMarksLoaded);
4799 static void vega10_find_min_clock_index(struct pp_hwmgr *hwmgr,
4800 uint32_t *sclk_idx, uint32_t *mclk_idx,
4801 uint32_t min_sclk, uint32_t min_mclk)
4803 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4804 struct vega10_dpm_table *dpm_table = &(data->dpm_table);
4807 for (i = 0; i < dpm_table->gfx_table.count; i++) {
4808 if (dpm_table->gfx_table.dpm_levels[i].enabled &&
4809 dpm_table->gfx_table.dpm_levels[i].value >= min_sclk) {
4815 for (i = 0; i < dpm_table->mem_table.count; i++) {
4816 if (dpm_table->mem_table.dpm_levels[i].enabled &&
4817 dpm_table->mem_table.dpm_levels[i].value >= min_mclk) {
4824 static int vega10_set_power_profile_state(struct pp_hwmgr *hwmgr,
4825 struct amd_pp_profile *request)
4827 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4828 uint32_t sclk_idx = ~0, mclk_idx = ~0;
4830 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO)
4833 vega10_find_min_clock_index(hwmgr, &sclk_idx, &mclk_idx,
4834 request->min_sclk, request->min_mclk);
4836 if (sclk_idx != ~0) {
4837 if (!data->registry_data.sclk_dpm_key_disabled)
4838 PP_ASSERT_WITH_CODE(
4839 !smum_send_msg_to_smc_with_parameter(
4841 PPSMC_MSG_SetSoftMinGfxclkByIndex,
4843 "Failed to set soft min sclk index!",
4847 if (mclk_idx != ~0) {
4848 if (!data->registry_data.mclk_dpm_key_disabled)
4849 PP_ASSERT_WITH_CODE(
4850 !smum_send_msg_to_smc_with_parameter(
4852 PPSMC_MSG_SetSoftMinUclkByIndex,
4854 "Failed to set soft min mclk index!",
4861 static int vega10_get_sclk_od(struct pp_hwmgr *hwmgr)
4863 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4864 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
4865 struct vega10_single_dpm_table *golden_sclk_table =
4866 &(data->golden_dpm_table.gfx_table);
4869 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
4870 golden_sclk_table->dpm_levels
4871 [golden_sclk_table->count - 1].value) *
4873 golden_sclk_table->dpm_levels
4874 [golden_sclk_table->count - 1].value;
4879 static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4881 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4882 struct vega10_single_dpm_table *golden_sclk_table =
4883 &(data->golden_dpm_table.gfx_table);
4884 struct pp_power_state *ps;
4885 struct vega10_power_state *vega10_ps;
4887 ps = hwmgr->request_ps;
4892 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
4894 vega10_ps->performance_levels
4895 [vega10_ps->performance_level_count - 1].gfx_clock =
4896 golden_sclk_table->dpm_levels
4897 [golden_sclk_table->count - 1].value *
4899 golden_sclk_table->dpm_levels
4900 [golden_sclk_table->count - 1].value;
4902 if (vega10_ps->performance_levels
4903 [vega10_ps->performance_level_count - 1].gfx_clock >
4904 hwmgr->platform_descriptor.overdriveLimit.engineClock)
4905 vega10_ps->performance_levels
4906 [vega10_ps->performance_level_count - 1].gfx_clock =
4907 hwmgr->platform_descriptor.overdriveLimit.engineClock;
4912 static int vega10_get_mclk_od(struct pp_hwmgr *hwmgr)
4914 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4915 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
4916 struct vega10_single_dpm_table *golden_mclk_table =
4917 &(data->golden_dpm_table.mem_table);
4920 value = (mclk_table->dpm_levels
4921 [mclk_table->count - 1].value -
4922 golden_mclk_table->dpm_levels
4923 [golden_mclk_table->count - 1].value) *
4925 golden_mclk_table->dpm_levels
4926 [golden_mclk_table->count - 1].value;
4931 static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4933 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4934 struct vega10_single_dpm_table *golden_mclk_table =
4935 &(data->golden_dpm_table.mem_table);
4936 struct pp_power_state *ps;
4937 struct vega10_power_state *vega10_ps;
4939 ps = hwmgr->request_ps;
4944 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
4946 vega10_ps->performance_levels
4947 [vega10_ps->performance_level_count - 1].mem_clock =
4948 golden_mclk_table->dpm_levels
4949 [golden_mclk_table->count - 1].value *
4951 golden_mclk_table->dpm_levels
4952 [golden_mclk_table->count - 1].value;
4954 if (vega10_ps->performance_levels
4955 [vega10_ps->performance_level_count - 1].mem_clock >
4956 hwmgr->platform_descriptor.overdriveLimit.memoryClock)
4957 vega10_ps->performance_levels
4958 [vega10_ps->performance_level_count - 1].mem_clock =
4959 hwmgr->platform_descriptor.overdriveLimit.memoryClock;
4964 static int vega10_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
4965 uint32_t virtual_addr_low,
4966 uint32_t virtual_addr_hi,
4967 uint32_t mc_addr_low,
4968 uint32_t mc_addr_hi,
4971 smum_send_msg_to_smc_with_parameter(hwmgr,
4972 PPSMC_MSG_SetSystemVirtualDramAddrHigh,
4974 smum_send_msg_to_smc_with_parameter(hwmgr,
4975 PPSMC_MSG_SetSystemVirtualDramAddrLow,
4977 smum_send_msg_to_smc_with_parameter(hwmgr,
4978 PPSMC_MSG_DramLogSetDramAddrHigh,
4981 smum_send_msg_to_smc_with_parameter(hwmgr,
4982 PPSMC_MSG_DramLogSetDramAddrLow,
4985 smum_send_msg_to_smc_with_parameter(hwmgr,
4986 PPSMC_MSG_DramLogSetDramSize,
4991 static int vega10_register_thermal_interrupt(struct pp_hwmgr *hwmgr,
4994 struct cgs_irq_src_funcs *irq_src =
4995 (struct cgs_irq_src_funcs *)info;
4997 if (hwmgr->thermal_controller.ucType ==
4998 ATOM_VEGA10_PP_THERMALCONTROLLER_VEGA10 ||
4999 hwmgr->thermal_controller.ucType ==
5000 ATOM_VEGA10_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
5001 PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
5002 0xf, /* AMDGPU_IH_CLIENTID_THM */
5003 0, 0, irq_src[0].set, irq_src[0].handler, hwmgr),
5004 "Failed to register high thermal interrupt!",
5006 PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
5007 0xf, /* AMDGPU_IH_CLIENTID_THM */
5008 1, 0, irq_src[1].set, irq_src[1].handler, hwmgr),
5009 "Failed to register low thermal interrupt!",
5013 /* Register CTF(GPIO_19) interrupt */
5014 PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
5015 0x16, /* AMDGPU_IH_CLIENTID_ROM_SMUIO, */
5016 83, 0, irq_src[2].set, irq_src[2].handler, hwmgr),
5017 "Failed to register CTF thermal interrupt!",
5023 static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
5024 .backend_init = vega10_hwmgr_backend_init,
5025 .backend_fini = vega10_hwmgr_backend_fini,
5026 .asic_setup = vega10_setup_asic_task,
5027 .dynamic_state_management_enable = vega10_enable_dpm_tasks,
5028 .dynamic_state_management_disable = vega10_disable_dpm_tasks,
5029 .get_num_of_pp_table_entries =
5030 vega10_get_number_of_powerplay_table_entries,
5031 .get_power_state_size = vega10_get_power_state_size,
5032 .get_pp_table_entry = vega10_get_pp_table_entry,
5033 .patch_boot_state = vega10_patch_boot_state,
5034 .apply_state_adjust_rules = vega10_apply_state_adjust_rules,
5035 .power_state_set = vega10_set_power_state_tasks,
5036 .get_sclk = vega10_dpm_get_sclk,
5037 .get_mclk = vega10_dpm_get_mclk,
5038 .notify_smc_display_config_after_ps_adjustment =
5039 vega10_notify_smc_display_config_after_ps_adjustment,
5040 .force_dpm_level = vega10_dpm_force_dpm_level,
5041 .get_temperature = vega10_thermal_get_temperature,
5042 .stop_thermal_controller = vega10_thermal_stop_thermal_controller,
5043 .get_fan_speed_info = vega10_fan_ctrl_get_fan_speed_info,
5044 .get_fan_speed_percent = vega10_fan_ctrl_get_fan_speed_percent,
5045 .set_fan_speed_percent = vega10_fan_ctrl_set_fan_speed_percent,
5046 .reset_fan_speed_to_default =
5047 vega10_fan_ctrl_reset_fan_speed_to_default,
5048 .get_fan_speed_rpm = vega10_fan_ctrl_get_fan_speed_rpm,
5049 .set_fan_speed_rpm = vega10_fan_ctrl_set_fan_speed_rpm,
5050 .uninitialize_thermal_controller =
5051 vega10_thermal_ctrl_uninitialize_thermal_controller,
5052 .set_fan_control_mode = vega10_set_fan_control_mode,
5053 .get_fan_control_mode = vega10_get_fan_control_mode,
5054 .read_sensor = vega10_read_sensor,
5055 .get_dal_power_level = vega10_get_dal_power_level,
5056 .get_clock_by_type_with_latency = vega10_get_clock_by_type_with_latency,
5057 .get_clock_by_type_with_voltage = vega10_get_clock_by_type_with_voltage,
5058 .set_watermarks_for_clocks_ranges = vega10_set_watermarks_for_clocks_ranges,
5059 .display_clock_voltage_request = vega10_display_clock_voltage_request,
5060 .force_clock_level = vega10_force_clock_level,
5061 .print_clock_levels = vega10_print_clock_levels,
5062 .display_config_changed = vega10_display_configuration_changed_task,
5063 .powergate_uvd = vega10_power_gate_uvd,
5064 .powergate_vce = vega10_power_gate_vce,
5065 .check_states_equal = vega10_check_states_equal,
5066 .check_smc_update_required_for_display_configuration =
5067 vega10_check_smc_update_required_for_display_configuration,
5068 .power_off_asic = vega10_power_off_asic,
5069 .disable_smc_firmware_ctf = vega10_thermal_disable_alert,
5070 .set_power_profile_state = vega10_set_power_profile_state,
5071 .get_sclk_od = vega10_get_sclk_od,
5072 .set_sclk_od = vega10_set_sclk_od,
5073 .get_mclk_od = vega10_get_mclk_od,
5074 .set_mclk_od = vega10_set_mclk_od,
5075 .avfs_control = vega10_avfs_enable,
5076 .notify_cac_buffer_info = vega10_notify_cac_buffer_info,
5077 .register_internal_thermal_interrupt = vega10_register_thermal_interrupt,
5078 .start_thermal_controller = vega10_start_thermal_controller,
5081 int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
5083 hwmgr->hwmgr_func = &vega10_hwmgr_funcs;
5084 hwmgr->pptable_func = &vega10_pptable_funcs;